code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os
import torch
import torchvision
import torchvision.transforms as T
import numpy as np
from scipy.sparse import coo_matrix
def load_numpy_data(name,
data_path,
logger):
if name == "digits":
data_names = ["mnist", "mnist_m", "svhn", "synth_digits"]
num_trains = 20000
num_tests = 9000
input_dim = 2304 # number of features after CovNet
train_insts, train_labels, test_insts, test_labels = [], [], [], []
for dataset in data_names:
data = np.load(os.path.join(data_path,
dataset,
"%s.npz" % dataset))
logger.info("%s with %d training and %d test instances" % (dataset,
data['train_x'].shape[0],
data['test_x'].shape[0]))
# Shuffle and get training and test data
ridx = np.arange(data['train_x'].shape[0])
np.random.shuffle(ridx)
train_insts.append(data['train_x'][ridx[:num_trains]])
train_labels.append(data['train_y'][ridx[:num_trains]])
ridx = np.arange(data['test_x'].shape[0])
np.random.shuffle(ridx)
test_insts.append(data['test_x'][ridx[:num_tests]])
test_labels.append(data['test_y'][ridx[:num_tests]])
configs = {"input_dim": input_dim,
"channels": 3,
"conv_layers": [64, 128, 256],
"cls_fc_layers": [2048, 1024],
"dom_fc_layers": [2048, 2048],
"num_classes": 10,
"drop_rate": 0.0}
elif name == "office_home":
data_names = ['Art', 'Clipart', 'Product', 'Real_World']
num_trains = 2000
train_insts, train_labels, num_insts, test_insts, test_labels = [], [], [], [], []
for i, dataset in enumerate(data_names):
data_preprocess_dir = os.path.join(data_path, 'office_home_' + dataset.lower() + '.npz')
assert os.path.exists(data_preprocess_dir), "Resnet50 features must be obtained from get_features.py code"
data = np.load(data_preprocess_dir)
t_samples = data['train_x']
v_samples = data['test_x']
t_labels = data['train_y']
v_labels = data['test_y']
num_insts.append(t_labels.shape[0])
logger.info("%s with %d instances." % (dataset, num_insts[i]))
ridx = np.arange(num_insts[i])
np.random.shuffle(ridx)
train_insts.append(t_samples[ridx[:num_trains]])
train_labels.append(t_labels[ridx[:num_trains]])
test_insts.append(v_samples[ridx[num_trains:]])
test_labels.append(v_labels[ridx[num_trains:]])
configs = {"input_dim": 2048,
"hidden_layers": [1000, 500, 100],
"num_classes": 65,
"drop_rate": 0.7}
elif name == "office31":
data_names = ['Amazon', 'DSLR', 'Webcam']
num_trains = 410
train_insts, train_labels, num_insts, test_insts, test_labels = [], [], [], [], []
for i, dataset in enumerate(data_names):
data_preprocess_dir = os.path.join(data_path, 'office31_' + dataset.lower() + '.npz')
assert os.path.exists(data_preprocess_dir), "office31 features must be obtained from get_features.py code"
data = np.load(data_preprocess_dir)
t_samples = data['train_x']
v_samples = data['test_x']
t_labels = data['train_y']
v_labels = data['test_y']
num_insts.append(t_labels.shape[0])
logger.info("%s with %d instances." % (dataset, num_insts[i]))
ridx = np.arange(num_insts[i])
np.random.shuffle(ridx)
train_insts.append(t_samples[ridx[:num_trains]])
train_labels.append(t_labels[ridx[:num_trains]])
test_insts.append(v_samples[ridx[num_trains:]])
test_labels.append(v_labels[ridx[num_trains:]])
configs = {"input_dim": 2048,
"hidden_layers": [1000, 500, 100],
"num_classes": 31,
"drop_rate": 0.7}
else:
raise ValueError("Unknown dataset.")
return data_names, train_insts, train_labels, test_insts, test_labels, configs
def data_loader(inputs, targets, batch_size, shuffle=True):
assert inputs.shape[0] == targets.shape[0]
inputs_size = inputs.shape[0]
if shuffle:
random_order = np.arange(inputs_size)
np.random.shuffle(random_order)
inputs, targets = inputs[random_order, :], targets[random_order]
num_blocks = int(inputs_size / batch_size)
for i in range(num_blocks):
yield inputs[i * batch_size: (i+1) * batch_size, :], targets[i * batch_size: (i+1) * batch_size]
if num_blocks * batch_size != inputs_size:
yield inputs[num_blocks * batch_size:, :], targets[num_blocks * batch_size:]
def multi_data_loader(inputs, targets, batch_size, shuffle=True):
"""
Both inputs and targets are list of numpy arrays, containing instances and labels from multiple sources.
"""
assert len(inputs) == len(targets)
input_sizes = [data.shape[0] for data in inputs]
max_input_size = max(input_sizes)
num_domains = len(inputs)
if shuffle:
for i in range(num_domains):
r_order = np.arange(input_sizes[i])
np.random.shuffle(r_order)
inputs[i], targets[i] = inputs[i][r_order], targets[i][r_order]
num_blocks = int(max_input_size / batch_size)
for j in range(num_blocks):
xs, ys = [], []
for i in range(num_domains):
ridx = np.random.choice(input_sizes[i], batch_size)
xs.append(inputs[i][ridx])
ys.append(targets[i][ridx])
yield xs, ys
def loader_gen(loader, mode='inf'):
# https://github.com/pytorch/pytorch/issues/1917#issuecomment-479482530
while True:
for images, targets in loader:
yield images, targets
if mode != 'inf':
break
| [
"numpy.load",
"os.path.exists",
"numpy.arange",
"numpy.random.choice",
"os.path.join",
"numpy.random.shuffle"
] | [((4699, 4721), 'numpy.arange', 'np.arange', (['inputs_size'], {}), '(inputs_size)\n', (4708, 4721), True, 'import numpy as np\n'), ((4730, 4761), 'numpy.random.shuffle', 'np.random.shuffle', (['random_order'], {}), '(random_order)\n', (4747, 4761), True, 'import numpy as np\n'), ((1046, 1081), 'numpy.arange', 'np.arange', (["data['train_x'].shape[0]"], {}), "(data['train_x'].shape[0])\n", (1055, 1081), True, 'import numpy as np\n'), ((1094, 1117), 'numpy.random.shuffle', 'np.random.shuffle', (['ridx'], {}), '(ridx)\n', (1111, 1117), True, 'import numpy as np\n'), ((1273, 1307), 'numpy.arange', 'np.arange', (["data['test_x'].shape[0]"], {}), "(data['test_x'].shape[0])\n", (1282, 1307), True, 'import numpy as np\n'), ((1320, 1343), 'numpy.random.shuffle', 'np.random.shuffle', (['ridx'], {}), '(ridx)\n', (1337, 1343), True, 'import numpy as np\n'), ((5579, 5604), 'numpy.arange', 'np.arange', (['input_sizes[i]'], {}), '(input_sizes[i])\n', (5588, 5604), True, 'import numpy as np\n'), ((5617, 5643), 'numpy.random.shuffle', 'np.random.shuffle', (['r_order'], {}), '(r_order)\n', (5634, 5643), True, 'import numpy as np\n'), ((5882, 5926), 'numpy.random.choice', 'np.random.choice', (['input_sizes[i]', 'batch_size'], {}), '(input_sizes[i], batch_size)\n', (5898, 5926), True, 'import numpy as np\n'), ((566, 618), 'os.path.join', 'os.path.join', (['data_path', 'dataset', "('%s.npz' % dataset)"], {}), "(data_path, dataset, '%s.npz' % dataset)\n", (578, 618), False, 'import os\n'), ((2163, 2198), 'os.path.exists', 'os.path.exists', (['data_preprocess_dir'], {}), '(data_preprocess_dir)\n', (2177, 2198), False, 'import os\n'), ((2282, 2310), 'numpy.load', 'np.load', (['data_preprocess_dir'], {}), '(data_preprocess_dir)\n', (2289, 2310), True, 'import numpy as np\n'), ((2614, 2637), 'numpy.arange', 'np.arange', (['num_insts[i]'], {}), '(num_insts[i])\n', (2623, 2637), True, 'import numpy as np\n'), ((2650, 2673), 'numpy.random.shuffle', 'np.random.shuffle', (['ridx'], {}), '(ridx)\n', (2667, 2673), True, 'import numpy as np\n'), ((3454, 3489), 'os.path.exists', 'os.path.exists', (['data_preprocess_dir'], {}), '(data_preprocess_dir)\n', (3468, 3489), False, 'import os\n'), ((3573, 3601), 'numpy.load', 'np.load', (['data_preprocess_dir'], {}), '(data_preprocess_dir)\n', (3580, 3601), True, 'import numpy as np\n'), ((3905, 3928), 'numpy.arange', 'np.arange', (['num_insts[i]'], {}), '(num_insts[i])\n', (3914, 3928), True, 'import numpy as np\n'), ((3941, 3964), 'numpy.random.shuffle', 'np.random.shuffle', (['ridx'], {}), '(ridx)\n', (3958, 3964), True, 'import numpy as np\n')] |
''' Testing module for nibetaseries.interfaces.nilearn '''
import nibabel as nib
import numpy as np
import pandas as pd
import os
from ..nilearn import AtlasConnectivity, CensorVolumes
def test_censor_volumes(tmp_path, betaseries_file, brainmask_file):
outlier_file = tmp_path / 'betaseries_outlier.nii.gz'
# make an outlier volume
outlier_idx = 6
beta_img = nib.load(str(betaseries_file))
beta_data = beta_img.get_fdata()
beta_data[..., outlier_idx] += 1000
beta_img.__class__(
beta_data, beta_img.affine, beta_img.header).to_filename(str(outlier_file))
censor_volumes = CensorVolumes(timeseries_file=str(outlier_file),
mask_file=str(brainmask_file))
res = censor_volumes.run()
assert nib.load(res.outputs.censored_file).shape[-1] == beta_img.shape[-1] - 1
assert res.outputs.outliers[outlier_idx]
def test_atlas_connectivity(betaseries_file, atlas_file, atlas_lut):
# read in test files
bs_data = nib.load(str(betaseries_file)).get_data()
atlas_lut_df = pd.read_csv(str(atlas_lut), sep='\t')
# expected output
pcorr = np.corrcoef(bs_data.squeeze())
np.fill_diagonal(pcorr, np.NaN)
regions = atlas_lut_df['regions'].values
pcorr_df = pd.DataFrame(pcorr, index=regions, columns=regions)
expected_zcorr_df = pcorr_df.apply(lambda x: (np.log(1 + x) - np.log(1 - x)) * 0.5)
# run instance of AtlasConnectivity
ac = AtlasConnectivity(timeseries_file=str(betaseries_file),
atlas_file=str(atlas_file),
atlas_lut=str(atlas_lut))
res = ac.run()
output_zcorr_df = pd.read_csv(res.outputs.correlation_matrix,
na_values='n/a',
delimiter='\t',
index_col=0)
os.remove(res.outputs.correlation_matrix)
# test equality of the matrices up to 3 decimals
pd.testing.assert_frame_equal(output_zcorr_df, expected_zcorr_df,
check_less_precise=3)
| [
"pandas.DataFrame",
"numpy.fill_diagonal",
"os.remove",
"pandas.testing.assert_frame_equal",
"numpy.log",
"nibabel.load",
"pandas.read_csv"
] | [((1173, 1204), 'numpy.fill_diagonal', 'np.fill_diagonal', (['pcorr', 'np.NaN'], {}), '(pcorr, np.NaN)\n', (1189, 1204), True, 'import numpy as np\n'), ((1265, 1316), 'pandas.DataFrame', 'pd.DataFrame', (['pcorr'], {'index': 'regions', 'columns': 'regions'}), '(pcorr, index=regions, columns=regions)\n', (1277, 1316), True, 'import pandas as pd\n'), ((1662, 1755), 'pandas.read_csv', 'pd.read_csv', (['res.outputs.correlation_matrix'], {'na_values': '"""n/a"""', 'delimiter': '"""\t"""', 'index_col': '(0)'}), "(res.outputs.correlation_matrix, na_values='n/a', delimiter='\\t',\n index_col=0)\n", (1673, 1755), True, 'import pandas as pd\n'), ((1859, 1900), 'os.remove', 'os.remove', (['res.outputs.correlation_matrix'], {}), '(res.outputs.correlation_matrix)\n', (1868, 1900), False, 'import os\n'), ((1958, 2049), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['output_zcorr_df', 'expected_zcorr_df'], {'check_less_precise': '(3)'}), '(output_zcorr_df, expected_zcorr_df,\n check_less_precise=3)\n', (1987, 2049), True, 'import pandas as pd\n'), ((777, 812), 'nibabel.load', 'nib.load', (['res.outputs.censored_file'], {}), '(res.outputs.censored_file)\n', (785, 812), True, 'import nibabel as nib\n'), ((1367, 1380), 'numpy.log', 'np.log', (['(1 + x)'], {}), '(1 + x)\n', (1373, 1380), True, 'import numpy as np\n'), ((1383, 1396), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (1389, 1396), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 7 15:49:19 2020
@author: <NAME>
"""
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import numpy as np
from functools import reduce
from itertools import combinations
from scipy.optimize import bisect, minimize
from sklearn.manifold import MDS
from sklearn.metrics import euclidean_distances
from sklearn.utils import check_random_state
import pandas as pd
''' Calculates the approximate intersection areas based on a projection
of circles onto a 200x200 pixel matrix '''
def calc_overlap_area(circles):
left = min([c[0][0]-c[1] for c in circles])
right = max([c[0][0]+c[1] for c in circles])
bottom = min([c[0][1]-c[1] for c in circles])
top = max([c[0][1]+c[1] for c in circles])
scale_min = min(left, right, bottom, top)
scale_max = max(left, right, bottom, top)
granularity = 200
scale = np.linspace(scale_min, scale_max, granularity)
x = np.array([scale,]*granularity)
y = x.transpose()
unit = granularity/(scale_max-scale_min)
cp = list(map(np.vectorize(lambda b: '1' if b else '0'), [(x-c[0][0])**2 + (y-c[0][1])**2 < c[1]**2 for c in circles]))
intersectionIds = reduce(np.char.add, cp)
unique, counts = np.unique(intersectionIds, return_counts=True)
counts = counts.astype(float)/unit**2
intersectionAreas = dict(zip(unique, counts))
del intersectionAreas['0'*len(cp)]
return x, y, intersectionIds, intersectionAreas
# Circular segment area calculation. See http://mathworld.wolfram.com/CircularSegment.html
def circleArea(r, width):
return r * r * np.arccos(1 - width/r) - (r - width) * np.sqrt(width * (2 * r - width));
''' Returns the overlap area of two circles of radius r1 and r2 - that
have their centers separated by distance d. Simpler faster
circle intersection for only two circles '''
def circleOverlap(r1, r2, d):
# no overlap
if (d >= r1 + r2):
return 0
# completely overlapped
if (d <= np.abs(r1 - r2)):
return np.pi * np.min([r1, r2]) * np.min([r1, r2])
w1 = r1 - (d * d - r2 * r2 + r1 * r1) / (2 * d)
w2 = r2 - (d * d - r1 * r1 + r2 * r2) / (2 * d)
return circleArea(r1, w1) + circleArea(r2, w2)
''' Returns the distance necessary for two circles of radius r1 + r2 to
have the overlap area 'overlap' '''
def distanceFromIntersectArea(r1, r2, overlap):
# handle complete overlapped circles
if (np.min([r1, r2]) * np.min([r1, r2]) * np.pi <= overlap + 1e-10):
return np.abs(r1 - r2)
return bisect(lambda d: circleOverlap(r1, r2, d) - overlap, 0, r1 + r2)
''' Given a bunch of sets, and the desired overlaps between these sets - computes
the distance from the actual overlaps to the desired overlaps. Note that
this method ignores overlaps of more than 2 circles '''
def lossFunction(centers, radii, overlaps):
assert len(centers)%2 == 0, 'number parameters should be a multiple of 2 (2 xy co-ordinates for center of each circle)'
assert len(centers)/2 == len(radii), 'number of centers & number of radii do not match'
circles = []
for i in range(int(len(centers)/2)):
circles.append([(centers[2*i+0], centers[2*i+1]), radii[i]])
x, y, intersectionIds, curr_overlap = calc_overlap_area(circles)
sst = max(len(overlaps)*np.var(list(overlaps.values())), 1)
act_df = pd.DataFrame(list(overlaps.items()), columns=['areaId', 'actual'])
curr_df = pd.DataFrame(list(curr_overlap.items()), columns=['areaId', 'current'])
mdf = act_df.merge(curr_df, on='areaId', how='outer').fillna(0)
mdf['error'] = mdf['actual'] - mdf['current']
loss = np.sum(mdf['error']*mdf['error']/sst)
return loss
'''Computes constrained multidimensional scaling using SMACOF algorithm Parameters'''
def constrainedMDS(dissimilarities, disj_or_sub, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
disparities = dissimilarities
delta = dis**2 - disparities**2
stress = ((delta.ravel())**2).sum()
#gradmat = 4 * np.vectorize(lambda b: 0 if b > 0 else 1)((disparities-dis) * disj_or_sub) * delta
#gradx= np.sum(gradmat * (X[:,0].reshape(-1,1)-X[:,0].reshape(1,-1)), axis=1)
#grady= np.sum(gradmat * (X[:,1].reshape(-1,1)-X[:,1].reshape(1,-1)), axis=1)
# Update X using the gradient
#X = X - np.concatenate((gradx.reshape(-1,1), grady.reshape(-1,1)), axis=1)/(np.sqrt((gradx**2).sum() + (grady**2).sum()))
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
B *= np.vectorize(lambda b: 0 if b > 0 else 1)((disparities-dis) * disj_or_sub)
X = 1. / n_samples * np.dot(B, X)
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if abs(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
''' Calculates the intersection between columns of a dataframe '''
def df2areas(df, fineTune=False):
radii = np.sqrt(df.sum()/np.pi).tolist()
labels = df.columns
# intersection of two sets - may be overlapped with other sets - A int B
actualOverlaps = {}
for comb in combinations(range(df.shape[1]), 2):
olap = np.sum(df.iloc[:, comb[0]] & df.iloc[:, comb[1]])
actualOverlaps['0'*comb[0]+'1'+'0'*(comb[1]-comb[0]-1)+'1'+'0'*(df.shape[1]-comb[1]-1)] = olap
# intersection of two sets only - not overlapped with any other set - A int B int (not C)
disjointOverlaps = {}
if fineTune:
areaId = pd.Series(df.astype(str).values.sum(axis=1))
vc = areaId.value_counts()
disjointOverlaps = dict(zip(vc.keys().astype(str).tolist(), vc.tolist()))
return labels, radii, actualOverlaps, disjointOverlaps
''' Computes the circles from radius and overlap data
If fineTune=False, returns initial estimates - faster & fairly acurate - should be fit for most cases
If fineTune=True, returns optimized estimates - slower but more accurate '''
def getCircles(radii, actualOverlaps, disjointOverlaps, fineTune=False):
distances = np.zeros((len(radii), (len(radii))))
disj_or_sub = np.zeros((len(radii), (len(radii))))
for i in range(len(radii)):
for j in range(i+1, len(radii)):
combstr = '0'*i+'1'+'0'*(j-i-1)+'1'+'0'*(len(radii)-j-1)
distances[i, j] = distanceFromIntersectArea(radii[i], radii[j], actualOverlaps[combstr])
distances[j, i] = distances[i, j]
if actualOverlaps[combstr] == 0:
disj_or_sub[i, j] = -1
disj_or_sub[j, i] = -1
if np.abs(actualOverlaps[combstr] - np.pi*(min(radii[i], radii[j]))**2) < 1e-5:
disj_or_sub[i, j] = 1
disj_or_sub[j, i] = 1
#mds = MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=42,
# dissimilarity='precomputed', n_jobs=1)
#pos = mds.fit(distances).embedding_
pos, _, _ = constrainedMDS(distances, disj_or_sub, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=42)
circles = [[(pos[i,0], pos[i,1]), radii[i]] for i in range(len(radii))]
if fineTune:
centers = []
for i in range(pos.shape[0]):
centers += [pos[i, 0], pos[i, 1]]
res = minimize(lambda p: lossFunction(p, radii, disjointOverlaps), centers, method='Nelder-Mead', options={'maxiter': 100, 'disp': True})
centers = list(res['x'])
circles = []
for i in range(int(len(centers)/2)):
circles.append([(centers[2*i+0], centers[2*i+1]), radii[i]])
return circles
''' Get label positions for each circle avoiding the overlap areas '''
def getLabelPositions(circles, labels):
x, y, intersectionIds, curr_overlap = calc_overlap_area(circles)
olapByNset = [dict() for x in range(len(circles))]
for k in curr_overlap:
n = k.count('1')
olapByNset[n-1][k] = curr_overlap[k]
areaTol = 0.001*(np.max(x)-np.min(x))*(np.max(y)-np.min(y))
maxrad = max([c[1] for c in circles])
for i, (l, c) in enumerate(zip(labels, circles)):
ls = 15*c[1]/maxrad
olapC = [filterTheDict(ol, lambda elem: (elem[0][i] == '1') and (elem[1] > areaTol)) for ol in olapByNset]
for ol in olapC:
if len(ol) > 0:
break
if ol:
areaId = max(ol, key=lambda x: ol[x])
else:
yield l, c[0][0], c[0][1], ls
continue
indices = np.where(intersectionIds == areaId)
rndx, rndy = int(np.median(indices[0])), int(np.median(indices[1][np.where(indices[0]==int(np.median(indices[0])))]))
lx, ly = x[rndx, rndy], y[rndx, rndy]
yield l, lx, ly, ls
''' Dictionary filter utility function - filter a dictionary basedon criteria'''
def filterTheDict(dictObj, callback):
newDict = dict()
# Iterate over all the items in dictionary
for (key, value) in dictObj.items():
# Check if item satisfies the given condition then add to new dict
if callback((key, value)):
newDict[key] = value
return newDict
''' Plots the Venn diagrams from radius and overlap data '''
def venn(radii, actualOverlaps, disjointOverlaps, labels=None, labelsize='auto', cmap=None, edgecolor='black', fineTune=False):
circles = getCircles(radii, actualOverlaps, disjointOverlaps, fineTune)
fig, ax = plt.subplots()
cplots = [plt.Circle(circles[i][0], circles[i][1]) for i in range(len(circles))]
arr = np.array(radii)
col = PatchCollection(cplots, cmap=cmap, array=arr, edgecolor=edgecolor, alpha=0.5)
ax.add_collection(col)
if labels is not None:
for l, lx, ly, ls in getLabelPositions(circles, labels):
ls = ls if labelsize=='auto' else labelsize
ax.annotate(l, xy=(lx, ly), fontsize=ls, ha='center', va='center')
ax.axis('off')
ax.set_aspect('equal')
ax.autoscale()
return fig, ax
''' Usage Example '''
if __name__ == '__main__':
df = pd.DataFrame(np.random.choice([0,1], size = (50000, 5)), columns=list('ABCDE'))
labels, radii, actualOverlaps, disjointOverlaps = df2areas(df, fineTune=False)
fig, ax = venn(radii, actualOverlaps, disjointOverlaps, labels=labels, labelsize='auto', cmap=None, fineTune=False)
plt.savefig('venn.png', dpi=300, transparent=True)
plt.close()
| [
"sklearn.utils.check_random_state",
"numpy.sum",
"numpy.abs",
"numpy.unique",
"matplotlib.pyplot.close",
"sklearn.metrics.euclidean_distances",
"numpy.max",
"numpy.linspace",
"numpy.random.choice",
"numpy.arccos",
"matplotlib.pyplot.subplots",
"numpy.vectorize",
"numpy.median",
"numpy.min"... | [((940, 986), 'numpy.linspace', 'np.linspace', (['scale_min', 'scale_max', 'granularity'], {}), '(scale_min, scale_max, granularity)\n', (951, 986), True, 'import numpy as np\n'), ((1000, 1031), 'numpy.array', 'np.array', (['([scale] * granularity)'], {}), '([scale] * granularity)\n', (1008, 1031), True, 'import numpy as np\n'), ((1254, 1277), 'functools.reduce', 'reduce', (['np.char.add', 'cp'], {}), '(np.char.add, cp)\n', (1260, 1277), False, 'from functools import reduce\n'), ((1304, 1350), 'numpy.unique', 'np.unique', (['intersectionIds'], {'return_counts': '(True)'}), '(intersectionIds, return_counts=True)\n', (1313, 1350), True, 'import numpy as np\n'), ((3724, 3765), 'numpy.sum', 'np.sum', (["(mdf['error'] * mdf['error'] / sst)"], {}), "(mdf['error'] * mdf['error'] / sst)\n", (3730, 3765), True, 'import numpy as np\n'), ((4082, 4114), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (4100, 4114), False, 'from sklearn.utils import check_random_state\n'), ((10735, 10749), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10747, 10749), True, 'import matplotlib.pyplot as plt\n'), ((10845, 10860), 'numpy.array', 'np.array', (['radii'], {}), '(radii)\n', (10853, 10860), True, 'import numpy as np\n'), ((10871, 10948), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['cplots'], {'cmap': 'cmap', 'array': 'arr', 'edgecolor': 'edgecolor', 'alpha': '(0.5)'}), '(cplots, cmap=cmap, array=arr, edgecolor=edgecolor, alpha=0.5)\n', (10886, 10948), False, 'from matplotlib.collections import PatchCollection\n'), ((11650, 11700), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""venn.png"""'], {'dpi': '(300)', 'transparent': '(True)'}), "('venn.png', dpi=300, transparent=True)\n", (11661, 11700), True, 'import matplotlib.pyplot as plt\n'), ((11705, 11716), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11714, 11716), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2077), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (2068, 2077), True, 'import numpy as np\n'), ((2588, 2603), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (2594, 2603), True, 'import numpy as np\n'), ((4686, 4708), 'sklearn.metrics.euclidean_distances', 'euclidean_distances', (['X'], {}), '(X)\n', (4705, 4708), False, 'from sklearn.metrics import euclidean_distances\n'), ((6502, 6551), 'numpy.sum', 'np.sum', (['(df.iloc[:, comb[0]] & df.iloc[:, comb[1]])'], {}), '(df.iloc[:, comb[0]] & df.iloc[:, comb[1]])\n', (6508, 6551), True, 'import numpy as np\n'), ((9824, 9859), 'numpy.where', 'np.where', (['(intersectionIds == areaId)'], {}), '(intersectionIds == areaId)\n', (9832, 9859), True, 'import numpy as np\n'), ((10764, 10804), 'matplotlib.pyplot.Circle', 'plt.Circle', (['circles[i][0]', 'circles[i][1]'], {}), '(circles[i][0], circles[i][1])\n', (10774, 10804), True, 'import matplotlib.pyplot as plt\n'), ((11376, 11417), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': '(50000, 5)'}), '([0, 1], size=(50000, 5))\n', (11392, 11417), True, 'import numpy as np\n'), ((1126, 1167), 'numpy.vectorize', 'np.vectorize', (["(lambda b: '1' if b else '0')"], {}), "(lambda b: '1' if b else '0')\n", (1138, 1167), True, 'import numpy as np\n'), ((1678, 1702), 'numpy.arccos', 'np.arccos', (['(1 - width / r)'], {}), '(1 - width / r)\n', (1687, 1702), True, 'import numpy as np\n'), ((1717, 1749), 'numpy.sqrt', 'np.sqrt', (['(width * (2 * r - width))'], {}), '(width * (2 * r - width))\n', (1724, 1749), True, 'import numpy as np\n'), ((2122, 2138), 'numpy.min', 'np.min', (['[r1, r2]'], {}), '([r1, r2])\n', (2128, 2138), True, 'import numpy as np\n'), ((5535, 5576), 'numpy.vectorize', 'np.vectorize', (['(lambda b: 0 if b > 0 else 1)'], {}), '(lambda b: 0 if b > 0 else 1)\n', (5547, 5576), True, 'import numpy as np\n'), ((5639, 5651), 'numpy.dot', 'np.dot', (['B', 'X'], {}), '(B, X)\n', (5645, 5651), True, 'import numpy as np\n'), ((9329, 9338), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (9335, 9338), True, 'import numpy as np\n'), ((9339, 9348), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (9345, 9348), True, 'import numpy as np\n'), ((2103, 2119), 'numpy.min', 'np.min', (['[r1, r2]'], {}), '([r1, r2])\n', (2109, 2119), True, 'import numpy as np\n'), ((2508, 2524), 'numpy.min', 'np.min', (['[r1, r2]'], {}), '([r1, r2])\n', (2514, 2524), True, 'import numpy as np\n'), ((2527, 2543), 'numpy.min', 'np.min', (['[r1, r2]'], {}), '([r1, r2])\n', (2533, 2543), True, 'import numpy as np\n'), ((9307, 9316), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (9313, 9316), True, 'import numpy as np\n'), ((9317, 9326), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (9323, 9326), True, 'import numpy as np\n'), ((9885, 9906), 'numpy.median', 'np.median', (['indices[0]'], {}), '(indices[0])\n', (9894, 9906), True, 'import numpy as np\n'), ((9959, 9980), 'numpy.median', 'np.median', (['indices[0]'], {}), '(indices[0])\n', (9968, 9980), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
"""
Filtering.py COPYRIGHT FUJITSU LIMITED 2021
"""
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import traceback
import json
import os
import re
import logging
import unicodedata
import numpy as np
import pandas as pd
import multiprocessing
import itertools
from gensim.models import word2vec
from gensim.models import KeyedVectors
from sklearn.metrics import pairwise_distances
def filt(domain_word_file, domain_text_preprocessed_file, vec):
# Returns only field terms if domain _ word _ file exists in the same folder
if os.path.exists(domain_word_file) is True:
# Read tag data (terms for the field)
tag_file = pd.read_csv(domain_word_file, header=None)
tag = list(tag_file[0])
tag = list(set(tag)) # reduce term duplication by using lowercase letters
return tag
# If domain _ word _ file is not present and domain _ text _ preprocessed _ file is present, returns all text data terms in the field
elif os.path.exists(domain_text_preprocessed_file) is True:
with open(domain_text_preprocessed_file, encoding="utf_8") as f:
txt = f.read()
txt_splited_newline = txt.split('\n')
words_pre1 = []
words_pre2 = []
words = []
for i in range(len(txt_splited_newline)):
words_pre1.append(txt_splited_newline[i].split(" "))
words_pre2 = list(itertools.chain.from_iterable(words_pre1)) # Convert a two-dimensional array to a one-dimensional array
# Delete duplicates
words = set(words_pre2)
if "" in words:
words.remove("")
return words
# If no duplicate domain _ word _ file or domain _ text _ preprocessed _ file exists, returns all terms learned by word embedding
else:
tag = list(vec.item().keys())
return tag
def check_arg(args, config):
endslist = [".csv", ".txt", ".npy"]
if not len(args.input) == len(endslist):
print("invalid input file(s)")
return False
if not all(map(lambda x: x[1].endswith(endslist[x[0]]), enumerate(args.input))):
print("invalid input file type")
return False
endslist = [".npy"]
if not len(args.output) == len(endslist):
print("invalid output file(s)")
return False
if not all(map(lambda x: x[1].endswith(endslist[x[0]]), enumerate(args.output))):
print("invalid output file type")
return False
return True
def main(args, config):
domain_word_file = args.input[0]
domain_text_preprocessed_file = args.input[1]
vector_file = args.input[2]
output_file = args.output[0]
vec = np.load(file=vector_file, allow_pickle = True)
filterddata = filt(domain_word_file, domain_text_preprocessed_file, vec)
np.save(output_file, filterddata)
#with open(output_file, 'w') as f:
# json.dump(vec, f, indent=2, ensure_ascii=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage = '%(prog)s [options]',
description =
'''
example:
$ python3 ./Filtering.py -c config.json -i tag.csv domain_wakati_preprocessed.txt WordEmbedding.npy -o Filtering.npy
''',
add_help = True,
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-c', '--config', required=True, help="Configuration file path. (ex. config.json)")
parser.add_argument('-i', '--input', required=True, help="Input file(s)", nargs='*')
parser.add_argument('-o', '--output', required=True, help="Output file(s)", nargs='*')
args = parser.parse_args()
print ("start: " + os.path.basename(__file__))
print("args: " + str(args))
with open(args.config) as f:
config = json.load(f)
print("config:" + str(config))
if check_arg(args, config):
main(args, config)
else:
exit(1)
print ("finish: " + os.path.basename(__file__))
exit(0)
| [
"numpy.load",
"numpy.save",
"json.load",
"argparse.ArgumentParser",
"os.path.basename",
"pandas.read_csv",
"os.path.exists",
"itertools.chain.from_iterable"
] | [((2664, 2708), 'numpy.load', 'np.load', ([], {'file': 'vector_file', 'allow_pickle': '(True)'}), '(file=vector_file, allow_pickle=True)\n', (2671, 2708), True, 'import numpy as np\n'), ((2793, 2826), 'numpy.save', 'np.save', (['output_file', 'filterddata'], {}), '(output_file, filterddata)\n', (2800, 2826), True, 'import numpy as np\n'), ((2964, 3236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""%(prog)s [options]"""', 'description': '"""\nexample:\n $ python3 ./Filtering.py -c config.json -i tag.csv domain_wakati_preprocessed.txt WordEmbedding.npy -o Filtering.npy\n"""', 'add_help': '(True)', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(usage=\'%(prog)s [options]\', description=\n """\nexample:\n $ python3 ./Filtering.py -c config.json -i tag.csv domain_wakati_preprocessed.txt WordEmbedding.npy -o Filtering.npy\n"""\n , add_help=True, formatter_class=argparse.RawTextHelpFormatter)\n', (2987, 3236), False, 'import argparse\n'), ((577, 609), 'os.path.exists', 'os.path.exists', (['domain_word_file'], {}), '(domain_word_file)\n', (591, 609), False, 'import os\n'), ((684, 726), 'pandas.read_csv', 'pd.read_csv', (['domain_word_file'], {'header': 'None'}), '(domain_word_file, header=None)\n', (695, 726), True, 'import pandas as pd\n'), ((3724, 3736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3733, 3736), False, 'import json\n'), ((1009, 1054), 'os.path.exists', 'os.path.exists', (['domain_text_preprocessed_file'], {}), '(domain_text_preprocessed_file)\n', (1023, 1054), False, 'import os\n'), ((3613, 3639), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (3629, 3639), False, 'import os\n'), ((3883, 3909), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (3899, 3909), False, 'import os\n'), ((1418, 1459), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['words_pre1'], {}), '(words_pre1)\n', (1447, 1459), False, 'import itertools\n')] |
import matplotlib
import numpy as np
def polyfit(dates, levels, p):
"""Returns a tuple (first entry: polynomial of degree p that best fits the data, second entry: shift in dates"""
# Convert dates to floats
x = matplotlib.dates.date2num(dates)
# Find coefficients of best-fit polynomial f(x) of degree p
p_coeff = np.polyfit(x - x[0], levels, p)
poly = np.poly1d(p_coeff)
return (poly, x[0])
| [
"matplotlib.dates.date2num",
"numpy.poly1d",
"numpy.polyfit"
] | [((225, 257), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (250, 257), False, 'import matplotlib\n'), ((337, 368), 'numpy.polyfit', 'np.polyfit', (['(x - x[0])', 'levels', 'p'], {}), '(x - x[0], levels, p)\n', (347, 368), True, 'import numpy as np\n'), ((380, 398), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (389, 398), True, 'import numpy as np\n')] |
import os
import sys
from matplotlib import colors
sys.path.append(os.getcwd())
import pickle
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import DivergingNorm
import iris
import iris.plot as iplt
import restools
from papers.none2021_ecrad.data import Summary
from papers.none2021_ecrad.extensions import IfsIO, extract_or_interpolate, get_ifs_rel_diff
from comsdk.research import Research
from comsdk.comaux import load_from_json, find_all_files_by_named_regexp
from reducedmodels.transition_to_turbulence import MoehlisFaisstEckhardtModel
#def plot_comparison(ifs_io, ifs_io_rps, avail_data, quantity='geopotential', vmin=-0.00015, vmax=0.00015,
# pressure=500., comp_func=get_rel_diff):
# n_rows = len(ifs_io_rps)
# n_cols = len(ifs_io)
# avail_dirs = list(avail_data.keys())
# titles = list(avail_data.values())
#
# fig = plt.figure(figsize=(12, 7))
# for row in range(1, n_rows + 1):
# ifs_io_rp = ifs_io_rps[row - 1]
# for col in range(1, n_cols + 1):
# ts_i = col - 1
# comp = comp_func(ifs_io, ifs_io_rp, ts_i, avail_dirs[row - 1],
# quantity=quantity, pressure=pressure)
# plt.subplot(n_rows, n_cols, (row - 1)*n_cols + col)
# cf = iplt.contourf(comp, 32, norm=DivergingNorm(vmin=vmin, vcenter=0., vmax=vmax),
# cmap=plt.get_cmap('seismic'))
# plt.gca().coastlines()
# plt.gca().set_title(f'+{ifs_io.time_shift(ts_i)}')
# if col == 1:
# plt.gca().text(-0.1, 0.5, f'{titles[row - 1]}', transform=plt.gca().transAxes,
# va='center', fontsize=14, rotation='vertical')
## cbar = plt.gca().colorbar()
## cbar.set_ticks([min_value, 0., max_value])
# colorbar_axes = plt.gcf().add_axes([0.35, 0.05, 0.3, 0.05])
# colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal')
# colorbar.locator = matplotlib.ticker.MaxNLocator(3)
# colorbar.update_ticks()
# plt.show()
if __name__ == '__main__':
plt.style.use('resources/default.mplstyle')
summary = load_from_json(Summary)
res_id = summary.res_id
res = Research.open(res_id)
task_path = res.get_task_path(summary.task_for_oifs_results)
#step_shifts = (0, 64, 128, 192, 256, 320)
step_shifts = (0, 160, 320)
ecrad_runs = ('ecrad_tripleclouds_52bits', 'ecrad_mcica_52bits', 'ecrad_tripleclouds_23bits', 'ecrad_tripleclouds_mixed_precision')
ecrad_run_labels = ('Tripleclouds (52 sbits)', 'McICA (52 sbits)', 'Tripleclouds (23 sbits)', 'Tripleclouds (mixed precision)')
n_timesteps = len(step_shifts)
n_runs = len(ecrad_runs)
quantity='geopotential_height'
#vmin = -0.00015
#vcenter = 0.
#vmax = 0.00015
vmin = 47500
vmax = 58400
vcenter = (vmin + vmax) / 2.
pressure = 500.
ifs_io_ref = IfsIO([os.path.join(task_path, 'hgom', ecrad_runs[0], 'sh', f'{id_}.nc') for id_ in step_shifts],
l91_file=summary.l91_file)
#fig = plt.figure(figsize=(12, 7))
fig, axes = plt.subplots(n_timesteps, n_runs, figsize=(12, 6))
cf_ref = None
for run_i in range(n_runs):
ifs_io = IfsIO([os.path.join(task_path, 'hgom', ecrad_runs[run_i], 'sh', f'{id_}.nc') for id_ in step_shifts],
l91_file=summary.l91_file)
for ts_i in range(n_timesteps):
q = extract_or_interpolate(getattr(ifs_io, quantity)(ts_i), pressure)
plt.sca(axes[ts_i][run_i])
# cf = iplt.contourf(q, 32, norm=DivergingNorm(vmin=vmin, vcenter=vcenter, vmax=vmax),
# cmap=plt.get_cmap('seismic'))
if cf_ref is None:
cf = iplt.contourf(q, 16, cmap=plt.get_cmap('coolwarm'), coords=['latitude', 'longitude'])
cf_ref = cf
else:
cf = iplt.contourf(q, 16, cmap=plt.get_cmap('coolwarm'), vmin=cf_ref.zmin, vmax=cf_ref.zmax, levels=cf_ref.levels, coords=['latitude', 'longitude'])
# Add a contour, and put the result in a variable called contour.
q_rel_diff = get_ifs_rel_diff(ifs_io_ref, ifs_io, ts_i, ecrad_runs[run_i], quantity=quantity, pressure=pressure)
iplt.contour(q_rel_diff, levels=np.array([10**(-2)], dtype=np.float64), colors=['#00ff00'])
iplt.xticks([range()])
plt.gca().coastlines()
if ts_i == 0:
plt.gca().set_title(ecrad_run_labels[run_i], usetex=False, fontsize=12)
if run_i == 0:
plt.gca().text(-0.1, 0.5, f'+{ifs_io.time_shift(ts_i)}', transform=plt.gca().transAxes,
va='center', fontsize=12, rotation='vertical')
# cbar = plt.gca().colorbar()
# cbar.set_ticks([min_value, 0., max_value])
colorbar_axes = plt.gcf().add_axes([0.35, 0.08, 0.3, 0.05])
colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal')
colorbar.locator = matplotlib.ticker.MaxNLocator(3)
colorbar.update_ticks()
#avail_data = {
# 'ecrad_ieee_half_precision_v2_16bits': 'ecrad 16 bits',
# 'ecrad_ieee_half_precision_v2': 'ecrad IEEE 10 bits',
#} # dir_name -> descr
#ifs_io_rps = [IfsIO([os.path.join(ifs_experiments_path, f'{dir_}', f'{id_}.nc') for id_ in step_shifts])
# for dir_ in avail_data.keys()]
#plot_comparison(ifs_io, ifs_io_rps, avail_data, quantity='temperature', vmin=-0.05, vmax=0.05)
plt.tight_layout(rect=[0, 0.1, 1, 1], h_pad=0.05, w_pad=1.0)
plt.savefig(f'geopotential_height_comparison_and_rel_error.png', dpi=200)
plt.show()
| [
"matplotlib.pyplot.tight_layout",
"papers.none2021_ecrad.extensions.get_ifs_rel_diff",
"matplotlib.pyplot.show",
"os.path.join",
"matplotlib.pyplot.get_cmap",
"os.getcwd",
"matplotlib.pyplot.gca",
"matplotlib.ticker.MaxNLocator",
"comsdk.comaux.load_from_json",
"matplotlib.pyplot.colorbar",
"mat... | [((68, 79), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (77, 79), False, 'import os\n'), ((2138, 2181), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""resources/default.mplstyle"""'], {}), "('resources/default.mplstyle')\n", (2151, 2181), True, 'import matplotlib.pyplot as plt\n'), ((2197, 2220), 'comsdk.comaux.load_from_json', 'load_from_json', (['Summary'], {}), '(Summary)\n', (2211, 2220), False, 'from comsdk.comaux import load_from_json, find_all_files_by_named_regexp\n'), ((2259, 2280), 'comsdk.research.Research.open', 'Research.open', (['res_id'], {}), '(res_id)\n', (2272, 2280), False, 'from comsdk.research import Research\n'), ((3159, 3209), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_timesteps', 'n_runs'], {'figsize': '(12, 6)'}), '(n_timesteps, n_runs, figsize=(12, 6))\n', (3171, 3209), True, 'import matplotlib.pyplot as plt\n'), ((4978, 5035), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cf', 'colorbar_axes'], {'orientation': '"""horizontal"""'}), "(cf, colorbar_axes, orientation='horizontal')\n", (4990, 5035), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5091), 'matplotlib.ticker.MaxNLocator', 'matplotlib.ticker.MaxNLocator', (['(3)'], {}), '(3)\n', (5088, 5091), False, 'import matplotlib\n'), ((5534, 5594), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0.1, 1, 1]', 'h_pad': '(0.05)', 'w_pad': '(1.0)'}), '(rect=[0, 0.1, 1, 1], h_pad=0.05, w_pad=1.0)\n', (5550, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5672), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""geopotential_height_comparison_and_rel_error.png"""'], {'dpi': '(200)'}), "(f'geopotential_height_comparison_and_rel_error.png', dpi=200)\n", (5610, 5672), True, 'import matplotlib.pyplot as plt\n'), ((5677, 5687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5685, 5687), True, 'import matplotlib.pyplot as plt\n'), ((2963, 3028), 'os.path.join', 'os.path.join', (['task_path', '"""hgom"""', 'ecrad_runs[0]', '"""sh"""', 'f"""{id_}.nc"""'], {}), "(task_path, 'hgom', ecrad_runs[0], 'sh', f'{id_}.nc')\n", (2975, 3028), False, 'import os\n'), ((3564, 3590), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[ts_i][run_i]'], {}), '(axes[ts_i][run_i])\n', (3571, 3590), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4310), 'papers.none2021_ecrad.extensions.get_ifs_rel_diff', 'get_ifs_rel_diff', (['ifs_io_ref', 'ifs_io', 'ts_i', 'ecrad_runs[run_i]'], {'quantity': 'quantity', 'pressure': 'pressure'}), '(ifs_io_ref, ifs_io, ts_i, ecrad_runs[run_i], quantity=\n quantity, pressure=pressure)\n', (4222, 4310), False, 'from papers.none2021_ecrad.extensions import IfsIO, extract_or_interpolate, get_ifs_rel_diff\n'), ((4919, 4928), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4926, 4928), True, 'import matplotlib.pyplot as plt\n'), ((3284, 3353), 'os.path.join', 'os.path.join', (['task_path', '"""hgom"""', 'ecrad_runs[run_i]', '"""sh"""', 'f"""{id_}.nc"""'], {}), "(task_path, 'hgom', ecrad_runs[run_i], 'sh', f'{id_}.nc')\n", (3296, 3353), False, 'import os\n'), ((4350, 4388), 'numpy.array', 'np.array', (['[10 ** -2]'], {'dtype': 'np.float64'}), '([10 ** -2], dtype=np.float64)\n', (4358, 4388), True, 'import numpy as np\n'), ((4458, 4467), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4465, 4467), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3855), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (3843, 3855), True, 'import matplotlib.pyplot as plt\n'), ((3984, 4008), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (3996, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4532), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4530, 4532), True, 'import matplotlib.pyplot as plt\n'), ((4638, 4647), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4645, 4647), True, 'import matplotlib.pyplot as plt\n'), ((4705, 4714), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4712, 4714), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
from enum import Enum
from typing import Optional
from typing import Union
import numpy as np
import torch.nn.functional as F
from torch import Tensor
from onevision.cv.core.image import get_image_size
from onevision.cv.core.image import is_channel_first
from onevision.type import FloatAnyT
from onevision.type import Int2Or3T
from onevision.type import TensorOrArray
from onevision.type import to_size
__all__ = [
"PaddingMode",
"padding_mode_from_int",
"pad_image",
]
# MARK: - Enum
class PaddingMode(Enum):
"""Padding modes. Available padding methods are:
"""
CONSTANT = "constant"
# For torch compatibility
CIRCULAR = "circular"
REFLECT = "reflect"
REPLICATE = "replicate"
# For numpy compatibility
EDGE = "edge"
EMPTY = "empty"
LINEAR_RAMP = "linear_ramp"
MAXIMUM = "maximum"
MEAN = "mean"
MEDIAN = "median"
MINIMUM = "minimum"
SYMMETRIC = "symmetric"
WRAP = "wrap"
@staticmethod
def values() -> list:
return [e.value for e in PaddingMode]
def padding_mode_from_int(i: int) -> PaddingMode:
inverse_modes_mapping = {
0 : PaddingMode.CONSTANT,
1 : PaddingMode.CIRCULAR,
2 : PaddingMode.REFLECT,
3 : PaddingMode.REPLICATE,
4 : PaddingMode.EDGE,
5 : PaddingMode.EMPTY,
6 : PaddingMode.LINEAR_RAMP,
7 : PaddingMode.MAXIMUM,
8 : PaddingMode.MEAN,
9 : PaddingMode.MEDIAN,
10: PaddingMode.MINIMUM,
11: PaddingMode.SYMMETRIC,
12: PaddingMode.WRAP,
}
return inverse_modes_mapping[i]
# MARK: - Functional
def pad_image(
image : TensorOrArray,
pad_size: Int2Or3T,
mode : Union[PaddingMode, str] = "constant",
value : Optional[FloatAnyT] = 0.0,
) -> TensorOrArray:
"""Pad image with `value`.
Args:
image (TensorOrArray[B, C, H, W]/[B, H, W, C]):
Image to be padded.
pad_size (Int2Or3T[H, W, *]):
Padded image size.
mode (PaddingMode, str):
One of the padding modes defined in `PaddingMode`.
Default: `constant`.
value (FloatAnyT, optional):
Fill value for `constant` padding. Default: `0.0`.
Returns:
image (TensorOrArray[B, C, H, W]/[B, H, W, C]):
Padded image.
"""
if image.ndim not in (3, 4):
raise ValueError(f"`image.ndim` must be 3 or 4. "
f"But got: {image.ndim}")
if isinstance(mode, str) and mode not in PaddingMode.values():
raise ValueError(f"`mode` must be one of: {PaddingMode.values()}. "
f"But got {mode}.")
elif isinstance(mode, PaddingMode):
if mode not in PaddingMode:
raise ValueError(f"`mode` must be one of: {PaddingMode}. "
f"But got: {mode}.")
mode = mode.value
if isinstance(image, Tensor):
if mode not in ("constant", "circular", "reflect", "replicate"):
raise ValueError()
if isinstance(image, np.ndarray):
if mode not in ("constant", "edge", "empty", "linear_ramp", "maximum",
"mean", "median", "minimum", "symmetric", "wrap"):
raise ValueError()
h0, w0 = get_image_size(image)
h1, w1 = to_size(pad_size)
# Image size > pad size, do nothing
if (h0 * w0) >= (h1 * w1):
return image
if value is None:
value = 0
pad_h = int(abs(h0 - h1) / 2)
pad_w = int(abs(w0 - w1) / 2)
if isinstance(image, Tensor):
if is_channel_first(image):
pad = (pad_w, pad_w, pad_h, pad_h)
else:
pad = (0, 0, pad_w, pad_w, pad_h, pad_h)
return F.pad(input=image, pad=pad, mode=mode, value=value)
elif isinstance(image, np.ndarray):
if is_channel_first(image):
if image.ndim == 3:
pad_width = ((0, 0), (pad_h, pad_h), (pad_w, pad_w))
else:
pad_width = ((0, 0), (0, 0), (pad_h, pad_h), (pad_w, pad_w))
else:
if image.ndim == 3:
pad_width = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))
else:
pad_width = ((pad_h, pad_h), (pad_w, pad_w), (0, 0), (0, 0))
return np.pad(array=image, pad_width=pad_width, mode=mode, constant_values=value)
return image
| [
"numpy.pad",
"onevision.cv.core.image.is_channel_first",
"onevision.cv.core.image.get_image_size",
"onevision.type.to_size",
"torch.nn.functional.pad"
] | [((3469, 3490), 'onevision.cv.core.image.get_image_size', 'get_image_size', (['image'], {}), '(image)\n', (3483, 3490), False, 'from onevision.cv.core.image import get_image_size\n'), ((3504, 3521), 'onevision.type.to_size', 'to_size', (['pad_size'], {}), '(pad_size)\n', (3511, 3521), False, 'from onevision.type import to_size\n'), ((3773, 3796), 'onevision.cv.core.image.is_channel_first', 'is_channel_first', (['image'], {}), '(image)\n', (3789, 3796), False, 'from onevision.cv.core.image import is_channel_first\n'), ((3927, 3978), 'torch.nn.functional.pad', 'F.pad', ([], {'input': 'image', 'pad': 'pad', 'mode': 'mode', 'value': 'value'}), '(input=image, pad=pad, mode=mode, value=value)\n', (3932, 3978), True, 'import torch.nn.functional as F\n'), ((4030, 4053), 'onevision.cv.core.image.is_channel_first', 'is_channel_first', (['image'], {}), '(image)\n', (4046, 4053), False, 'from onevision.cv.core.image import is_channel_first\n'), ((4476, 4550), 'numpy.pad', 'np.pad', ([], {'array': 'image', 'pad_width': 'pad_width', 'mode': 'mode', 'constant_values': 'value'}), '(array=image, pad_width=pad_width, mode=mode, constant_values=value)\n', (4482, 4550), True, 'import numpy as np\n')] |
import numpy as np
import math
import scipy
from autodp import rdp_bank, dp_bank, fdp_bank, utils
from autodp.mechanism_zoo import LaplaceMechanism, LaplaceSVT_Mechanism,StageWiseMechanism
from autodp.transformer_zoo import Composition
import matplotlib.pyplot as plt
from scipy.stats import norm, laplace
from scipy.special import comb
import matplotlib.font_manager as fm
from autodp.mechanism_zoo import ExactGaussianMechanism, PureDP_Mechanism,SubsampleGaussianMechanism, GaussianMechanism, ComposedGaussianMechanism,GaussianSVT_Mechanism, NoisyScreenMechanism
from autodp.transformer_zoo import Composition, AmplificationBySampling
"""
This experiment corresponding to exp 2 in NeurIPS-20 (Figure 2 (a))
We evaluate SVT variants with the same variance of noise by comparing the composed privacy loss for finishing a fixed length sequence of queries.
rho is from Lap(lambda) -> eps_rho = 1/lambda
nu is from Lap(2lambda) -> eps_nu = 1/lambda
eps = (c+1)/lambda, lambda = (c+1)/eps
To align variance between Gaussian-bassed and Laplace-based approaches, we set sigma_1 = sqrt(2) * lambda_rho
"""
delta = 1e-6
lambda_rho = 120
lambda_nu = 240
sigma_1 = lambda_rho*np.sqrt(2)
sigma_2 = 2*sigma_1
eps_1 = 1.0 / lambda_rho
n = 100000 #the length of the fixed query
margin = 1000
def exp_2a():
eps_a = [] #standard SVT
eps_e = [] #Laplace-SVT (via RDP)
eps_g = [] #Gaussian SVT c>1
eps_g_c = [] #c=1 for Gaussian SVT
eps_i = []
eps_kov = [] #generalized SVT
eps_noisy = []
k_list = [int(1.4**i) for i in range(int(math.floor(math.log(n,1.4)))+1)]
print(len(k_list))
query = np.zeros(n)
rho = np.random.normal(scale=sigma_1)
lap_rho = np.random.laplace(loc=0.0, scale=lambda_rho)
"""
compute eps for noisy screening
p = Prob[ nu > Margin]
q = Prob[ nu - 1 > margin]
count_gau counts #tops in Gaussian-SVT
count_lap counts #tops in Laplace-SVT
"""
count_gau = 0
count_lap = 0
# the following is for data-dependent screening in CVPR-20
p = scipy.stats.norm.logsf(margin, scale=sigma_2)
q = scipy.stats.norm.logsf(margin + 1, scale=sigma_2)
params = {}
params['logp'] = p
params['logq'] = q
per_screen_mech = NoisyScreenMechanism(params, name='NoisyScreen')
per_gaussian_mech = ExactGaussianMechanism(sigma_2,name='GM1')
index = []
compose = Composition()
for idx, qu in enumerate(query):
nu = np.random.normal(scale=sigma_2)
lap_nu = np.random.laplace(loc=0.0, scale=lambda_nu)
if nu >= rho + margin:
count_gau += 1
if lap_nu >= lap_rho + margin:
count_lap += 1
count_gau = max(count_gau, 1)
count_lap = max(count_lap, 1)
if idx in k_list:
index.append(idx)
print('number of queries passing threshold', count_gau)
#eps_a records the standard SVT
eps_a.append(eps_1 * count_lap + eps_1)
# compose data-dependent screening
screen_mech = compose([per_screen_mech], [idx])
gaussian_mech = compose([per_gaussian_mech], [idx])
# standard SVT with RDP calculation
param_lap_svt = {}
param_lap_svt['b'] = lambda_rho
param_lap_svt['k'] = idx
param_lap_svt['c'] = count_lap
lapsvtrdp_mech = LaplaceSVT_Mechanism(param_lap_svt)
eps_e.append(lapsvtrdp_mech.get_approxDP(delta))
# stage-wise generalized SVT, k is the maximum length of each chunk
k = int(idx / np.sqrt(count_gau))
generalized_mech = StageWiseMechanism({'sigma':sigma_1,'k':k, 'c':count_gau})
eps_kov.append(generalized_mech.get_approxDP(delta))
# Gaussian-SVT c>1 with RDP, k is the total length before algorithm stops
gaussianSVT_c = GaussianSVT_Mechanism({'sigma':sigma_1,'k':idx, 'c':count_gau}, rdp_c_1=False)
eps_g.append(gaussianSVT_c.get_approxDP(delta))
#Gaussian-SVT with c=1, we use average_k as the approximate maximum length of each chunk, margin is used in Proposition 10
average_k = int(idx / max(count_gau, 1))
params_SVT = {}
params_SVT['k'] = average_k
params_SVT['sigma'] = sigma_1
params_SVT['margin'] = margin
per_gaussianSVT_mech = GaussianSVT_Mechanism(params_SVT)
gaussianSVT_mech = compose([per_gaussianSVT_mech],[max(count_gau, 1)])
eps_g_c.append(gaussianSVT_mech.get_approxDP(delta))
eps_i.append(gaussian_mech.get_approxDP(delta)) # Gaussian Mechanism
eps_noisy.append(screen_mech.get_approxDP(delta))
import matplotlib
import matplotlib.pyplot as plt
font = {'family': 'times',
'weight': 'bold',
'size': 18}
props = fm.FontProperties(family='Gill Sans', fname='/Library/Fonts/GillSans.ttc')
f, ax = plt.subplots()
plt.figure(num=0, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.loglog(index, eps_a, '-r', linewidth=2)
plt.loglog(index, eps_e, '--g^', linewidth=2)
plt.loglog(index, eps_g, '-c^', linewidth=2)
plt.loglog(index, eps_g_c, '-bs', linewidth=2)
plt.loglog(index, eps_i, '--k', linewidth=2)
plt.loglog(index, eps_noisy, color='brown', linewidth=2)
plt.loglog(index, eps_kov, color='hotpink', linewidth=2)
plt.legend(
['Laplace-SVT (Pure-DP from Lyu et al., 2017)', 'Laplace-SVT (via RDP)', 'Gaussian-SVT c>1 (RDP by Theorem 11)',
'Gaussian-SVT c=1 (RDP by Theorem 8)', 'Gaussian Mechanism', 'Noisy Screening (data-dependent RDP)',
'Stage-wise generalized SVT'], loc='best', fontsize=17)
plt.grid(True)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(r'Iterations', fontsize=20)
plt.ylabel(r'$\epsilon$', fontsize=20)
ax.set_title('Title', fontproperties=props)
plt.savefig('exp2a.pdf', bbox_inches='tight')
exp_2a() | [
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.figure",
"numpy.random.normal",
"autodp.mechanism_zoo.GaussianSVT_Mechanism",
"autodp.mechanism_zoo.ExactGaussianMechanism",
"matplotlib.font_manager.FontProperties",
"autodp.mechanism_zoo.StageWiseMechanism",
"matplotlib.pyplot.yticks",
"math.log",
"... | [((1169, 1179), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1176, 1179), True, 'import numpy as np\n'), ((1627, 1638), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1635, 1638), True, 'import numpy as np\n'), ((1649, 1680), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma_1'}), '(scale=sigma_1)\n', (1665, 1680), True, 'import numpy as np\n'), ((1695, 1739), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': 'lambda_rho'}), '(loc=0.0, scale=lambda_rho)\n', (1712, 1739), True, 'import numpy as np\n'), ((2050, 2095), 'scipy.stats.norm.logsf', 'scipy.stats.norm.logsf', (['margin'], {'scale': 'sigma_2'}), '(margin, scale=sigma_2)\n', (2072, 2095), False, 'import scipy\n'), ((2104, 2153), 'scipy.stats.norm.logsf', 'scipy.stats.norm.logsf', (['(margin + 1)'], {'scale': 'sigma_2'}), '(margin + 1, scale=sigma_2)\n', (2126, 2153), False, 'import scipy\n'), ((2238, 2286), 'autodp.mechanism_zoo.NoisyScreenMechanism', 'NoisyScreenMechanism', (['params'], {'name': '"""NoisyScreen"""'}), "(params, name='NoisyScreen')\n", (2258, 2286), False, 'from autodp.mechanism_zoo import ExactGaussianMechanism, PureDP_Mechanism, SubsampleGaussianMechanism, GaussianMechanism, ComposedGaussianMechanism, GaussianSVT_Mechanism, NoisyScreenMechanism\n'), ((2311, 2354), 'autodp.mechanism_zoo.ExactGaussianMechanism', 'ExactGaussianMechanism', (['sigma_2'], {'name': '"""GM1"""'}), "(sigma_2, name='GM1')\n", (2333, 2354), False, 'from autodp.mechanism_zoo import ExactGaussianMechanism, PureDP_Mechanism, SubsampleGaussianMechanism, GaussianMechanism, ComposedGaussianMechanism, GaussianSVT_Mechanism, NoisyScreenMechanism\n'), ((2383, 2396), 'autodp.transformer_zoo.Composition', 'Composition', ([], {}), '()\n', (2394, 2396), False, 'from autodp.transformer_zoo import Composition, AmplificationBySampling\n'), ((4860, 4934), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'family': '"""Gill Sans"""', 'fname': '"""/Library/Fonts/GillSans.ttc"""'}), "(family='Gill Sans', fname='/Library/Fonts/GillSans.ttc')\n", (4877, 4934), True, 'import matplotlib.font_manager as fm\n'), ((4947, 4961), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4959, 4961), True, 'import matplotlib.pyplot as plt\n'), ((4966, 5038), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(0)', 'figsize': '(12, 8)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=0, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n", (4976, 5038), True, 'import matplotlib.pyplot as plt\n'), ((5043, 5086), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_a', '"""-r"""'], {'linewidth': '(2)'}), "(index, eps_a, '-r', linewidth=2)\n", (5053, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5091, 5136), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_e', '"""--g^"""'], {'linewidth': '(2)'}), "(index, eps_e, '--g^', linewidth=2)\n", (5101, 5136), True, 'import matplotlib.pyplot as plt\n'), ((5141, 5185), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_g', '"""-c^"""'], {'linewidth': '(2)'}), "(index, eps_g, '-c^', linewidth=2)\n", (5151, 5185), True, 'import matplotlib.pyplot as plt\n'), ((5190, 5236), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_g_c', '"""-bs"""'], {'linewidth': '(2)'}), "(index, eps_g_c, '-bs', linewidth=2)\n", (5200, 5236), True, 'import matplotlib.pyplot as plt\n'), ((5241, 5285), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_i', '"""--k"""'], {'linewidth': '(2)'}), "(index, eps_i, '--k', linewidth=2)\n", (5251, 5285), True, 'import matplotlib.pyplot as plt\n'), ((5290, 5346), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_noisy'], {'color': '"""brown"""', 'linewidth': '(2)'}), "(index, eps_noisy, color='brown', linewidth=2)\n", (5300, 5346), True, 'import matplotlib.pyplot as plt\n'), ((5351, 5407), 'matplotlib.pyplot.loglog', 'plt.loglog', (['index', 'eps_kov'], {'color': '"""hotpink"""', 'linewidth': '(2)'}), "(index, eps_kov, color='hotpink', linewidth=2)\n", (5361, 5407), True, 'import matplotlib.pyplot as plt\n'), ((5412, 5708), 'matplotlib.pyplot.legend', 'plt.legend', (["['Laplace-SVT (Pure-DP from Lyu et al., 2017)', 'Laplace-SVT (via RDP)',\n 'Gaussian-SVT c>1 (RDP by Theorem 11)',\n 'Gaussian-SVT c=1 (RDP by Theorem 8)', 'Gaussian Mechanism',\n 'Noisy Screening (data-dependent RDP)', 'Stage-wise generalized SVT']"], {'loc': '"""best"""', 'fontsize': '(17)'}), "(['Laplace-SVT (Pure-DP from Lyu et al., 2017)',\n 'Laplace-SVT (via RDP)', 'Gaussian-SVT c>1 (RDP by Theorem 11)',\n 'Gaussian-SVT c=1 (RDP by Theorem 8)', 'Gaussian Mechanism',\n 'Noisy Screening (data-dependent RDP)', 'Stage-wise generalized SVT'],\n loc='best', fontsize=17)\n", (5422, 5708), True, 'import matplotlib.pyplot as plt\n'), ((5724, 5738), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5732, 5738), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5766), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (5753, 5766), True, 'import matplotlib.pyplot as plt\n'), ((5771, 5794), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (5781, 5794), True, 'import matplotlib.pyplot as plt\n'), ((5799, 5836), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {'fontsize': '(20)'}), "('Iterations', fontsize=20)\n", (5809, 5836), True, 'import matplotlib.pyplot as plt\n'), ((5842, 5880), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\epsilon$"""'], {'fontsize': '(20)'}), "('$\\\\epsilon$', fontsize=20)\n", (5852, 5880), True, 'import matplotlib.pyplot as plt\n'), ((5934, 5979), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""exp2a.pdf"""'], {'bbox_inches': '"""tight"""'}), "('exp2a.pdf', bbox_inches='tight')\n", (5945, 5979), True, 'import matplotlib.pyplot as plt\n'), ((2447, 2478), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma_2'}), '(scale=sigma_2)\n', (2463, 2478), True, 'import numpy as np\n'), ((2496, 2539), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': 'lambda_nu'}), '(loc=0.0, scale=lambda_nu)\n', (2513, 2539), True, 'import numpy as np\n'), ((3366, 3401), 'autodp.mechanism_zoo.LaplaceSVT_Mechanism', 'LaplaceSVT_Mechanism', (['param_lap_svt'], {}), '(param_lap_svt)\n', (3386, 3401), False, 'from autodp.mechanism_zoo import LaplaceMechanism, LaplaceSVT_Mechanism, StageWiseMechanism\n'), ((3621, 3683), 'autodp.mechanism_zoo.StageWiseMechanism', 'StageWiseMechanism', (["{'sigma': sigma_1, 'k': k, 'c': count_gau}"], {}), "({'sigma': sigma_1, 'k': k, 'c': count_gau})\n", (3639, 3683), False, 'from autodp.mechanism_zoo import LaplaceMechanism, LaplaceSVT_Mechanism, StageWiseMechanism\n'), ((3860, 3947), 'autodp.mechanism_zoo.GaussianSVT_Mechanism', 'GaussianSVT_Mechanism', (["{'sigma': sigma_1, 'k': idx, 'c': count_gau}"], {'rdp_c_1': '(False)'}), "({'sigma': sigma_1, 'k': idx, 'c': count_gau}, rdp_c_1\n =False)\n", (3881, 3947), False, 'from autodp.mechanism_zoo import ExactGaussianMechanism, PureDP_Mechanism, SubsampleGaussianMechanism, GaussianMechanism, ComposedGaussianMechanism, GaussianSVT_Mechanism, NoisyScreenMechanism\n'), ((4375, 4408), 'autodp.mechanism_zoo.GaussianSVT_Mechanism', 'GaussianSVT_Mechanism', (['params_SVT'], {}), '(params_SVT)\n', (4396, 4408), False, 'from autodp.mechanism_zoo import ExactGaussianMechanism, PureDP_Mechanism, SubsampleGaussianMechanism, GaussianMechanism, ComposedGaussianMechanism, GaussianSVT_Mechanism, NoisyScreenMechanism\n'), ((3570, 3588), 'numpy.sqrt', 'np.sqrt', (['count_gau'], {}), '(count_gau)\n', (3577, 3588), True, 'import numpy as np\n'), ((1569, 1585), 'math.log', 'math.log', (['n', '(1.4)'], {}), '(n, 1.4)\n', (1577, 1585), False, 'import math\n')] |
################################################################################
################# Model Module Tests #################
################################################################################
import os
import sys
import unittest
import sklearn
import numpy as np
#### Suppress Sklearn warnings ####
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
###################################
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, BaggingRegressor
from sklearn.linear_model import Lasso
from sklearn.svm import SVR
from pySAR.model import *
class ModelTests(unittest.TestCase):
def setUp(self):
""" Create dummy data. """
self.dummy_X = np.random.ranf(size=100)
self.dummy_X_2 = np.random.ranf(size=50)
self.dummy_Y = np.random.randint(10,size=100)
self.dummy_Y_2 = np.random.randint(20,size=50)
def test_model(self):
""" Test Case to check each model type & its associated parameters & attributes. """
test_models = ['PLSRegression','RandomForestRegressor','AdaBoostRegressor',\
'BaggingRegressor','DecisionTreeRegressor','LinearRegression',\
'Lasso','SVR','KNeighborsRegressor']
#iterate through all available algorithms and test them
for test_mod in range(0,len(test_models)):
model = Model(test_models[test_mod])
#1.)
#checking model object is of the correct sklearn model datatype
self.assertEqual(type(model.model).__name__, test_models[test_mod],
'Model type is not correct, wanted {}, got {} '.format(
test_models[test_mod], type(model.model).__name__
))
#2.) #assert that model has not been fitted
self.assertFalse(model.model_fitted(), 'Model should not be fitted \
on initialisation')
#3.) #verify that parameters input param = {} meaning the default params for the model are used
self.assertEqual(model.parameters,{},
'Default Parameters attribute should be an empty dict, but got {}'.format(model.parameters))
#4.) #verify test split attribute is = 0.2, its default value
self.assertEqual(model.test_split, None,
'Default test split attribute should be None, but got {}'.format(model.test_split))
#5.) #verify that input model type is a valid model for the class
self.assertTrue(model.algorithm in [item.lower() \
for item in model.valid_models],
'Input algorithm {} not in available algorithms: {}'.format(model.algorithm, model.valid_models))
#6.) #verify repr represenation of model object is correct
self.assertEqual(repr(model), test_models[test_mod],
'Repr function should return {}, but got {}'.format(test_models[test_mod], repr(model)))
#7.) #verify algorithm is a regression
self.assertTrue(sklearn.base.is_regressor(model.model),
'Model type should be a sklearn regressor.')
#8.) #fit model and assert it has been fitted
model.train_test_split(self.dummy_X, self.dummy_Y)
model.fit()
self.assertTrue(model.model_fitted(), 'Model has not been fitted')
def test_model_input_closeness(self):
""" Test case for testing the algorithm closeness function used to get the
closest available algorithm to the algorithm input into the class. """
#1.)
model = Model('plsreg')
self.assertEqual(model.algorithm, "plsregression")
self.assertEqual(repr(model), "PLSRegression")
model = Model('randomfor')
self.assertEqual(model.algorithm, "randomforestregressor")
self.assertEqual(repr(model), "RandomForestRegressor")
model = Model('adaboo')
self.assertEqual(model.algorithm, "adaboostregressor")
self.assertEqual(repr(model), "AdaBoostRegressor")
model = Model('bagg')
self.assertEqual(model.algorithm, "baggingregressor")
self.assertEqual(repr(model), "BaggingRegressor")
model = Model('decisiontree')
self.assertEqual(model.algorithm, "decisiontreeregressor")
self.assertEqual(repr(model), "DecisionTreeRegressor")
model = Model('linear')
self.assertEqual(model.algorithm, "linearregression")
self.assertEqual(repr(model), "LinearRegression")
model = Model('lass')
self.assertEqual(model.algorithm, "lasso")
self.assertEqual(repr(model), "Lasso")
model = Model('kneighbors')
self.assertEqual(model.algorithm, "kneighborsregressor")
self.assertEqual(repr(model), "KNeighborsRegressor")
model = Model('sv')
self.assertEqual(model.algorithm, "svr")
self.assertEqual(repr(model), "SVR")
#2.)
with self.assertRaises(ValueError):
bad_model = Model('abcdefg')
with self.assertRaises(ValueError):
bad_model = Model('rand')
with self.assertRaises(ValueError):
bad_model = Model('123')
with self.assertRaises(ValueError):
bad_model = Model('blahblahblah')
def test_train_test_split(self):
""" Testing splitting up dataset into training and test data. """
#1.)
model = Model('plsreg')
X_train, X_test, Y_train, Y_test = model.train_test_split(self.dummy_X, self.dummy_Y)
self.assertTrue(len(X_train) == 80)
self.assertTrue(len(Y_train) == 80)
self.assertTrue(len(X_test) == 20)
self.assertTrue(len(Y_test) == 20)
#2.)
model = Model('plsreg')
with self.assertRaises(ValueError):
X_train, X_test, Y_train, Y_test = model.train_test_split(self.dummy_X_2, self.dummy_Y)
#3.)
model = Model('adaboostreg')
X_train, X_test, Y_train, Y_test = model.train_test_split(self.dummy_X, self.dummy_Y, test_size=0.5)
self.assertTrue(len(X_train) == 50)
self.assertTrue(len(Y_train) == 50)
self.assertTrue(len(X_test) == 50)
self.assertTrue(len(Y_test) == 50)
#4.)
model = Model('bagging')
X_train, X_test, Y_train, Y_test = model.train_test_split(self.dummy_X_2, self.dummy_Y_2, test_size=0.1)
self.assertTrue(len(X_train) == 45)
self.assertTrue(len(Y_train) == 45)
self.assertTrue(len(X_test) == 5)
self.assertTrue(len(Y_test) == 5)
def test_predict(self):
""" Testing the prediction of values for unseen sequences using the trained model. """
#1.)
model = Model('knn')
X_train, X_test, Y_train, Y_test = model.train_test_split(self.dummy_X_2, self.dummy_Y_2)
model.fit()
Y_pred = model.predict()
self.assertIsInstance(Y_pred, np.ndarray)
self.assertEqual(len(Y_pred), len(Y_test))
def test_parameters(self):
""" Testing parameters of Model class. """
#1.)
#create instance of PLS model using Model class & creating instance
# using SKlearn libary, comparing if the parameters of both instances are equal
pls_parameters = {"n_components":20,"scale":False, "max_iter":200}
model = Model(algorithm="PlsRegression",parameters=pls_parameters)
pls_model = PLSRegression(n_components=20, scale="svd", max_iter=200)
for k, v in model.model.get_params().items():
self.assertIn(k, list(pls_model.get_params()))
#2.)
rf_parameters = {"n_estimators":200, "max_depth":50,"min_samples_split":10}
model = Model(algorithm="RandomForest",parameters=rf_parameters)
rf_model = RandomForestRegressor(n_estimators=200, max_depth=50, min_samples_split=10)
for k, v in model.model.get_params().items():
self.assertIn(k, list(rf_model.get_params()))
#3.)
knn_parameters = {"n_neighbors":10, "weights":"distance","algorithm":"ball_tree"}
model = Model(algorithm="KNN",parameters=knn_parameters)
knn_model = KNeighborsRegressor(n_neighbors=10,weights='distance',algorithm="kd_tree")
for k, v in model.model.get_params().items():
self.assertIn(k, list(knn_model.get_params()))
#4.)
svr_parameters = {"kernel":"poly", "degree":5,"coef0":1}
model = Model(algorithm="SVR",parameters=svr_parameters)
svr_model = SVR(kernel='poly', degree=5, coef0=1)
for k, v in model.model.get_params().items():
self.assertIn(k, list(svr_model.get_params()))
#5.)
ada_parameters = {"n_estimators":150, "learning_rate":1.2,"loss":"square"}
model = Model(algorithm="AdaBoost",parameters=ada_parameters)
ada_model = AdaBoostRegressor(n_estimators=150, learning_rate=1.2, loss="square")
for k, v in model.model.get_params().items():
self.assertIn(k, list(ada_model.get_params()))
#6.)
bagging_parameters = {"n_estimators":50, "max_samples":1.5,"max_features":2}
model = Model(algorithm="Bagging",parameters=bagging_parameters)
bagging_model = BaggingRegressor(n_estimators=50, max_samples=1.5, max_features="square")
for k, v in model.model.get_params().items():
self.assertIn(k, list(bagging_model.get_params()))
#7.)
lasso_parameters = {"alpha":1.5, "max_iter":500,"tol":0.004}
model = Model(algorithm="lasso",parameters=lasso_parameters)
lasso_model = Lasso(alpha=1.5, max_iter=500, tol=0.004)
for k, v in model.model.get_params().items():
self.assertIn(k, list(lasso_model.get_params()))
def test_copy(self):
""" Testing model copy function. """
#1.)
pls_parameters = {"n_components":20,"scale":False, "max_iter":200}
model = Model(algorithm="PLSReg",parameters=pls_parameters)
model_copy = model.copy()
self.assertTrue(model_copy == model.model)
#2.)
svr_parameters = {"kernel":"poly", "degree":5,"coef0":1}
model = Model(algorithm="SVR",parameters=svr_parameters)
model_copy = model.copy()
self.assertTrue(model_copy == model.model)
def test_hyperparamter_tuning(self):
pass
def tearDown(self):
del self.dummy_X
del self.dummy_X_2
del self.dummy_Y
del self.dummy_Y_2
if __name__ == '__main__':
#run all model tests
unittest.main(verbosity=2)
| [
"unittest.main",
"sklearn.svm.SVR",
"sklearn.ensemble.AdaBoostRegressor",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.Lasso",
"numpy.random.randint",
"sklearn.cross_decomposition.PLSRegression",
"sklearn.base.is_regressor",
"numpy.random.ranf",
"sklearn.ensemble.BaggingRegresso... | [((10482, 10508), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10495, 10508), False, 'import unittest\n'), ((816, 840), 'numpy.random.ranf', 'np.random.ranf', ([], {'size': '(100)'}), '(size=100)\n', (830, 840), True, 'import numpy as np\n'), ((866, 889), 'numpy.random.ranf', 'np.random.ranf', ([], {'size': '(50)'}), '(size=50)\n', (880, 889), True, 'import numpy as np\n'), ((913, 944), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(100)'}), '(10, size=100)\n', (930, 944), True, 'import numpy as np\n'), ((969, 999), 'numpy.random.randint', 'np.random.randint', (['(20)'], {'size': '(50)'}), '(20, size=50)\n', (986, 999), True, 'import numpy as np\n'), ((7432, 7489), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': '(20)', 'scale': '"""svd"""', 'max_iter': '(200)'}), "(n_components=20, scale='svd', max_iter=200)\n", (7445, 7489), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((7785, 7860), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(200)', 'max_depth': '(50)', 'min_samples_split': '(10)'}), '(n_estimators=200, max_depth=50, min_samples_split=10)\n', (7806, 7860), False, 'from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, BaggingRegressor\n'), ((8498, 8535), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""poly"""', 'degree': '(5)', 'coef0': '(1)'}), "(kernel='poly', degree=5, coef0=1)\n", (8501, 8535), False, 'from sklearn.svm import SVR\n'), ((8828, 8897), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {'n_estimators': '(150)', 'learning_rate': '(1.2)', 'loss': '"""square"""'}), "(n_estimators=150, learning_rate=1.2, loss='square')\n", (8845, 8897), False, 'from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, BaggingRegressor\n'), ((9199, 9272), 'sklearn.ensemble.BaggingRegressor', 'BaggingRegressor', ([], {'n_estimators': '(50)', 'max_samples': '(1.5)', 'max_features': '"""square"""'}), "(n_estimators=50, max_samples=1.5, max_features='square')\n", (9215, 9272), False, 'from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, BaggingRegressor\n'), ((9556, 9597), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(1.5)', 'max_iter': '(500)', 'tol': '(0.004)'}), '(alpha=1.5, max_iter=500, tol=0.004)\n', (9561, 9597), False, 'from sklearn.linear_model import Lasso\n'), ((3111, 3149), 'sklearn.base.is_regressor', 'sklearn.base.is_regressor', (['model.model'], {}), '(model.model)\n', (3136, 3149), False, 'import sklearn\n')] |
import Sofa
import SofaTest
from SofaTest.Macro import *
import math
from Compliant import Frame, Vec, Tools, Control, StructuralAPI
from SofaPython import Quaternion
import numpy
import random
import sys
class Shared:
pass
global shared
shared = Shared()
dir = Tools.path( __file__ )
def createScene(node):
# controller
node.createObject('PythonScriptController', filename = __file__, classname = 'Controller' )
# friction coefficient
shared.mu = float( random.randint(0,10) ) / 10.0 # a random mu in [0,1] with 0.1 step
scene = Tools.scene( node )
node.dt = 0.005
style = node.getObject('style')
style.findData('displayFlags').showMappings = True
manager = node.getObject('manager')
manager.response = 'FrictionCompliantContact'
manager.responseParams = 'mu=' + str(shared.mu) +"&horizontalConeProjection=1"
ode = node.getObject('ode')
ode.stabilization = "pre-stabilization"
num = node.createObject('SequentialSolver',
name = 'num',
iterations = 1000,
precision = 1e-20)
node.createObject('LDLTResponse')
proximity = node.getObject('proximity')
proximity.alarmDistance = 0.5
proximity.contactDistance = 0.1
# plane
plane = StructuralAPI.RigidBody( node, 'plane' )
plane.setManually( [0,0,0,0,0,0,1], 1, [1,1,1] )
plane.node.createObject('FixedConstraint')
cm = plane.addCollisionMesh( "mesh/cube.obj", [10,1,10] )
cm.addVisualModel()
# box
box = StructuralAPI.RigidBody( node, 'box' )
box.setFromMesh( 'mesh/cube.obj', 50, [0,2.5,0,0,0,0,1] )
#box.setManually( [0,2.5,0,0,0,0,1], 1, [1,1,1] )
box.dofs.showObject=True
box.dofs.showObjectScale=5
cm = box.addCollisionMesh( "mesh/cube.obj" )
cm.addVisualModel()
# keep an eye on dofs
shared.plane = plane.dofs
shared.box = box.dofs
# scene controller
class Controller(SofaTest.Controller):
muAngle = 0 # the plane angle corresponding to given mu
currentAngle = 0 # the current plane angle
muToTest = 0.1 # stop at this mu to see if the box is sticking or sliding
counter = 0 # to wait at a given mu
def reset(self):
self.muAngle = math.atan(shared.mu)
return 0
def onBeginAnimationStep(self, dt):
# current mu from current plane angle
currentMu = math.tan( self.currentAngle )
if self.counter < 100 : # does not rotate the plane for 100 time steps
self.counter += 1
return 0
# is it a mu we want to test?
if numpy.allclose( self.muToTest, currentMu, 1e-3, 1e-3 ) :
# at the end of 100 time steps, check if the box was sticking or sliding
self.counter = 0
self.muToTest += 0.1
# look at the box velocity along its x-axis
localbox = Quaternion.rotate(Quaternion.conj( Frame.Frame( shared.plane.position[0] ).rotation ),
shared.box.velocity[0][:3])
vel = localbox[0]
#print 'plane/ground angle:', self.currentAngle
#print 'velocity:',vel
#print shared.box.position[0], shared.box.velocity[0][:3]
#print vel, currentMu, shared.mu
testVel = (vel > 1e-1)
if testVel:
testMu = (currentMu>=shared.mu-1e-2)
else:
testMu = (currentMu>=shared.mu)
EXPECT_FALSE( testVel ^ testMu, str(vel)+' '+str(currentMu)+'mu='+str(shared.mu) ) # xor
#print testVel, testMu
#sys.stdout.flush()
# all finished
if currentMu >= shared.mu + .1:
self.sendSuccess()
# update plane orientation
self.currentAngle += 0.001
q = Quaternion.from_euler( [0,0,-self.currentAngle] )
p = shared.plane.position
p[0] = [0,0,0,q[3],q[2],q[1],q[0]]
shared.plane.position = p
return 0
def bwdInitGraph(self,node):
return 0
| [
"math.atan",
"random.randint",
"SofaPython.Quaternion.from_euler",
"math.tan",
"numpy.allclose",
"Compliant.Frame.Frame",
"Compliant.Tools.path",
"Compliant.Tools.scene",
"Compliant.StructuralAPI.RigidBody"
] | [((272, 292), 'Compliant.Tools.path', 'Tools.path', (['__file__'], {}), '(__file__)\n', (282, 292), False, 'from Compliant import Frame, Vec, Tools, Control, StructuralAPI\n'), ((564, 581), 'Compliant.Tools.scene', 'Tools.scene', (['node'], {}), '(node)\n', (575, 581), False, 'from Compliant import Frame, Vec, Tools, Control, StructuralAPI\n'), ((1320, 1358), 'Compliant.StructuralAPI.RigidBody', 'StructuralAPI.RigidBody', (['node', '"""plane"""'], {}), "(node, 'plane')\n", (1343, 1358), False, 'from Compliant import Frame, Vec, Tools, Control, StructuralAPI\n'), ((1572, 1608), 'Compliant.StructuralAPI.RigidBody', 'StructuralAPI.RigidBody', (['node', '"""box"""'], {}), "(node, 'box')\n", (1595, 1608), False, 'from Compliant import Frame, Vec, Tools, Control, StructuralAPI\n'), ((2280, 2300), 'math.atan', 'math.atan', (['shared.mu'], {}), '(shared.mu)\n', (2289, 2300), False, 'import math\n'), ((2431, 2458), 'math.tan', 'math.tan', (['self.currentAngle'], {}), '(self.currentAngle)\n', (2439, 2458), False, 'import math\n'), ((2662, 2716), 'numpy.allclose', 'numpy.allclose', (['self.muToTest', 'currentMu', '(0.001)', '(0.001)'], {}), '(self.muToTest, currentMu, 0.001, 0.001)\n', (2676, 2716), False, 'import numpy\n'), ((4029, 4078), 'SofaPython.Quaternion.from_euler', 'Quaternion.from_euler', (['[0, 0, -self.currentAngle]'], {}), '([0, 0, -self.currentAngle])\n', (4050, 4078), False, 'from SofaPython import Quaternion\n'), ((484, 505), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (498, 505), False, 'import random\n'), ((3006, 3043), 'Compliant.Frame.Frame', 'Frame.Frame', (['shared.plane.position[0]'], {}), '(shared.plane.position[0])\n', (3017, 3043), False, 'from Compliant import Frame, Vec, Tools, Control, StructuralAPI\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Example of simple image plotting.
"""
import sys
import numpy as np
import vispy.plot as vp
canvas = vp.image(np.random.normal(128, 60, (20, 20)).astype(np.ubyte))
# Start up the event loop if this is not an interactive prompt.
if __name__ == '__main__' and sys.flags.interactive == 0:
canvas.app.run()
| [
"numpy.random.normal"
] | [((421, 456), 'numpy.random.normal', 'np.random.normal', (['(128)', '(60)', '(20, 20)'], {}), '(128, 60, (20, 20))\n', (437, 456), True, 'import numpy as np\n')] |
import numpy as np
class VolatilityExtractor:
name = "volatility"
def __init__(
self,
dataset: np.ndarray,
price_changes: np.ndarray,
period: int = 60,
) -> None:
self.dataset = dataset
self.period = period
number_of_samples = len(price_changes)
volatilities = np.zeros((number_of_samples,))
for i in range(number_of_samples):
start = max(0, i - self.period)
volatilities[i] = np.var(price_changes[start:i])
volatilities[0] = volatilities[1]
self.feature_data = volatilities | [
"numpy.zeros",
"numpy.var"
] | [((342, 372), 'numpy.zeros', 'np.zeros', (['(number_of_samples,)'], {}), '((number_of_samples,))\n', (350, 372), True, 'import numpy as np\n'), ((491, 521), 'numpy.var', 'np.var', (['price_changes[start:i]'], {}), '(price_changes[start:i])\n', (497, 521), True, 'import numpy as np\n')] |
from scipy.io import loadmat, savemat
import matplotlib.pyplot as plt
from FCMyoMapNet import UNet
import torch
from torch.autograd import Variable
import numpy as np
TimeScaling = 1000;
TimeScalingFactor =1/TimeScaling
T1sigNum = 4
T1sigAndTi = T1sigNum*2;
# Select one model
modelName = "MyoMapNet_4PreandPostGd" #MyoMapNet_4PostGd; MyoMapNet_4PreandPostGd; MyoMapNet_4PreGd; MyoMapNet_5PreGd;
if modelName=="MyoMapNet_5PreGd":
T1sigNum = 5
else:
T1sigNum = 4
T1sigAndTi = T1sigNum*2
# Construct Model
MyoMapNet = UNet(T1sigAndTi, 1)
MyoMapNet.to(torch.device('cpu'))
#loading trained model
try:
model = torch.load( 'TrainedModels/' +modelName+'.pth', map_location=torch.device('cpu'))
MyoMapNet = torch.nn.DataParallel(MyoMapNet)
MyoMapNet.load_state_dict(model['state_dict'])
print('Model loaded!')
except Exception as e:
print('Can not load model!')
print(e)
print('Start loading demo data')
if T1sigNum == 4:
t1wtiIdx = [0, 1, 2, 3, 5, 6, 7, 8] #For MyoMapNet 4PreGd, 4Pre+PostGd, 4PostGd
else:
t1wtiIdx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] #Only for MyoMapNet 5PreGd
try:
data = loadmat("Data/Demo/demo_Phantom.mat")
Pre5HBsT1wTIs_in = data['MOLLIT1wTI'] #get T1 weighted signals and corrsponding inversion time
Pre5HBsT1wTIs_double = Pre5HBsT1wTIs_in.astype(np.double)
Pre5HBs_tst_t1w_TI = Pre5HBsT1wTIs_double[:,:,t1wtiIdx,:,:]
PreMOLLIT1MapOffLine_in = data['MOLLIoffLineT1Map']
PreMOLLIT1MapOffLine_double = PreMOLLIT1MapOffLine_in.astype(np.double)
PreMOLLIT1MapOffLineT1 = np.zeros((PreMOLLIT1MapOffLine_double.shape[0], 1, PreMOLLIT1MapOffLine_double.shape[1],
PreMOLLIT1MapOffLine_double.shape[2]))
PreMOLLIT1MapOffLineT1[0, 0, :, :] = PreMOLLIT1MapOffLine_double = PreMOLLIT1MapOffLine_in.astype(np.double)[
0, :, :]
except Exception as e:
print(e)
#Construct input signals and output T1 maps
X = Variable(torch.FloatTensor(Pre5HBs_tst_t1w_TI))
xs = X.shape
X = X.permute((0, 3, 4, 1, 2)).reshape((xs[0] * xs[3] * xs[4], xs[1], xs[2]))
MyoMapNet.eval()
y = MyoMapNet(X)
MyoMapNetT1 = y.reshape((xs[0], 1, xs[3], xs[4]))
print('Displaying T1 maps')
fig, axs = plt.subplots(1, 3)
axs[0].set_title('MyoMapNet')
axs[0].imshow(MyoMapNetT1[0, 0, :, :].data.numpy() * TimeScaling, cmap='jet',
vmin=0, vmax=1500)
axs[0].axis('off')
axs[1].set_title('MOLLI5(3)3')
axs[1].imshow(PreMOLLIT1MapOffLineT1[0, 0, :, :] * TimeScaling, cmap='jet', vmin=0,
vmax=1500)
axs[1].axis('off')
axs[2].set_title('MyoMapNet-MOLLI5(3)3')
axs[2].imshow((MyoMapNetT1[0, 0, :, :].data.numpy()-PreMOLLIT1MapOffLineT1[0, 0, :, :]) * TimeScaling, cmap='jet', vmin=-50,
vmax=50)
axs[2].axis('off')
fig.show() | [
"scipy.io.loadmat",
"numpy.zeros",
"torch.FloatTensor",
"FCMyoMapNet.UNet",
"torch.device",
"torch.nn.DataParallel",
"matplotlib.pyplot.subplots"
] | [((528, 547), 'FCMyoMapNet.UNet', 'UNet', (['T1sigAndTi', '(1)'], {}), '(T1sigAndTi, 1)\n', (532, 547), False, 'from FCMyoMapNet import UNet\n'), ((2277, 2295), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (2289, 2295), True, 'import matplotlib.pyplot as plt\n'), ((561, 580), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (573, 580), False, 'import torch\n'), ((721, 753), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['MyoMapNet'], {}), '(MyoMapNet)\n', (742, 753), False, 'import torch\n'), ((1138, 1175), 'scipy.io.loadmat', 'loadmat', (['"""Data/Demo/demo_Phantom.mat"""'], {}), "('Data/Demo/demo_Phantom.mat')\n", (1145, 1175), False, 'from scipy.io import loadmat, savemat\n'), ((1563, 1699), 'numpy.zeros', 'np.zeros', (['(PreMOLLIT1MapOffLine_double.shape[0], 1, PreMOLLIT1MapOffLine_double.shape\n [1], PreMOLLIT1MapOffLine_double.shape[2])'], {}), '((PreMOLLIT1MapOffLine_double.shape[0], 1,\n PreMOLLIT1MapOffLine_double.shape[1], PreMOLLIT1MapOffLine_double.shape[2])\n )\n', (1571, 1699), True, 'import numpy as np\n'), ((2023, 2060), 'torch.FloatTensor', 'torch.FloatTensor', (['Pre5HBs_tst_t1w_TI'], {}), '(Pre5HBs_tst_t1w_TI)\n', (2040, 2060), False, 'import torch\n'), ((684, 703), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (696, 703), False, 'import torch\n')] |
# 打印坐标
from t1 import Test1
import numpy as np
import matplotlib.pyplot as plt
co1 = Test1().nums()
'''随机生成20个数字,调用已经写好的t1模块'''
class Test2:
def col(co):
lst = []
for i in range(0, len(co), 2):
lst.append((co[i], co[i + 1]))
print(lst)
# @staticmethod
def mat(lst1):
x, y = list(), list()
for i in range(0, len(co1), 2):
x.append(co1[i])
y.append(co1[i + 1])
x1 = np.array(x)
y1 = np.array(y)
plt.scatter(x1, y1)
plt.show()
if __name__ == '__main__':
# random_num = Test2()
a = Test2.col(co1)
b = Test2.mat(a)
| [
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"numpy.array",
"t1.Test1"
] | [((86, 93), 't1.Test1', 'Test1', ([], {}), '()\n', (91, 93), False, 'from t1 import Test1\n'), ((465, 476), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (473, 476), True, 'import numpy as np\n'), ((490, 501), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (498, 501), True, 'import numpy as np\n'), ((511, 530), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x1', 'y1'], {}), '(x1, y1)\n', (522, 530), True, 'import matplotlib.pyplot as plt\n'), ((539, 549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (547, 549), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
import multiprocessing
import os.path
import numpy as np
import open3d as o3d
import ast
from pykdtree.kdtree import KDTree
PLANAR_IDS = {
6: 1,
7: 1,
8: 2
}
def visualize_pcd_labels(pcd: o3d.geometry.PointCloud, labels: np.array, filename: str = None):
colors = np.concatenate([np.asarray([[0, 0, 0]]), np.random.rand(np.max(labels), 3)])
pcd_for_vis = o3d.geometry.PointCloud()
pcd_for_vis.points = o3d.utility.Vector3dVector(np.asarray(pcd.points))
pcd_for_vis.paint_uniform_color([0, 0, 0])
pcd_for_vis.colors = o3d.utility.Vector3dVector(colors[labels])
if filename is None:
o3d.visualization.draw_geometries([pcd_for_vis])
else:
o3d.io.write_point_cloud(filename, pcd_for_vis)
def pcd_from_carla_line(line: str) -> (o3d.geometry.PointCloud, np.array):
points_packed = ast.literal_eval(line.split(",|,")[1])
points = np.asarray([point_label_id[0] for point_label_id in points_packed])
labels = [point_label_id[1] for point_label_id in points_packed]
labels = np.asarray(list(map(lambda x: PLANAR_IDS[x] if x in PLANAR_IDS else 0, labels)))
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
return pcd, labels
def build_map(data_path) -> o3d.geometry.PointCloud:
map_pcd = o3d.geometry.PointCloud()
with open(data_path) as data_file:
for index, line in enumerate(data_file):
if index % 10 != 0:
continue
frame_pcd, _ = pcd_from_carla_line(line)
map_pcd += frame_pcd
print("Cloud {} loaded!".format(index + 1))
map_pcd = map_pcd.voxel_down_sample(0.2)
return map_pcd
def annotate_map(map_pcd: o3d.geometry.PointCloud, annot_path: str, data_path: str) -> np.array:
mesh_names = [filename[:-4] for filename in os.listdir(annot_path) if filename.endswith(".pcd")]
map_tree = KDTree(np.asarray(map_pcd.points))
map_points_count = np.asarray(map_pcd.points).shape[0]
map_labels = np.zeros(map_points_count, dtype=int)
max_used_label = -1
with open(data_path) as data_file:
for index, line in enumerate(data_file):
if index % 10 != 0:
continue
frame_pcd, frame_labels = pcd_from_carla_line(line)
frame_points = np.asarray(frame_pcd.points)
frame_map_indices = map_tree.query(frame_points, distance_upper_bound=0.2)[1]
frame_labels_not_null_indices = np.where(frame_map_indices < map_points_count)[0]
frame_map_indices_not_null = frame_map_indices[frame_labels_not_null_indices]
map_labels[frame_map_indices_not_null] = frame_labels[frame_labels_not_null_indices]
print("Cloud {} annotations loaded!".format(index + 1))
max_used_label = np.max(map_labels)
for index, mesh_name in enumerate(mesh_names):
mesh_pcd_filename = "{}.pcd".format(mesh_name)
mesh_labels_filename = "{}.npy".format(mesh_name)
mesh_pcd = o3d.io.read_point_cloud(os.path.join(annot_path, mesh_pcd_filename))
mesh_labels = np.load(os.path.join(annot_path, mesh_labels_filename))
mesh_labels += max_used_label + 1
prev_max_used_label = max_used_label
max_used_label = max(max_used_label, np.max(mesh_labels))
mesh_points = np.asarray(mesh_pcd.points)
# Swap y and z axis for UE coordinates and go from cm to metres
mesh_points_z = mesh_points[:, 2].copy()
mesh_points[:, 2] = mesh_points[:, 1]
mesh_points[:, 1] = mesh_points_z
mesh_points /= 100
mesh_map_indices = map_tree.query(mesh_points, distance_upper_bound=0.2)[1]
mesh_labels_not_null_indices = np.where(mesh_map_indices < map_points_count)[0]
mesh_map_indices_not_null = mesh_map_indices[mesh_labels_not_null_indices]
map_labels[mesh_map_indices_not_null] = mesh_labels[mesh_labels_not_null_indices]
print("{0} is ready! ({1}/{2})".format(mesh_name, index + 1, len(mesh_names)))
print("{0} mesh labels: ({1}, {2})".format(mesh_name, prev_max_used_label + 1, max_used_label))
return map_labels
def annotate_frames(data_path: str, output_path: str, map_pcd: o3d.geometry.PointCloud, map_labels: np.array):
map_kd_tree = KDTree(np.asarray(map_pcd.points))
with open(data_path) as data_file:
for index, line in enumerate(data_file):
frame_pcd, _ = pcd_from_carla_line(line)
frame_labels = annotate_frame_with_map(frame_pcd, map_kd_tree, map_labels, map_pcd)
o3d.io.write_point_cloud(os.path.join(output_path, "{:06d}.pcd".format(index)), frame_pcd)
np.save(os.path.join(output_path, "{:06d}.npy".format(index)), frame_labels)
def unpacking_apply_along_axis(params):
"""
Like numpy.apply_along_axis(), but with arguments in a tuple
instead.
This function is useful with multiprocessing.Pool().map(): (1)
map() only handles functions that take a single argument, and (2)
this function can generally be imported from a module, as required
by map().
"""
func1d, axis, arr, args, kwargs = params
return np.apply_along_axis(func1d, axis, arr, *args, **kwargs)
def parallel_apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Like numpy.apply_along_axis(), but takes advantage of multiple
cores.
"""
# Effective axis where apply_along_axis() will be applied by each
# worker (any non-zero axis number would work, so as to allow the use
# of `np.array_split()`, which is only done on axis 0):
effective_axis = 1 if axis == 0 else axis
if effective_axis != axis:
arr = arr.swapaxes(axis, effective_axis)
# Chunks for the mapping (only a few chunks):
chunks = [(func1d, effective_axis, sub_arr, args, kwargs)
for sub_arr in np.array_split(arr, multiprocessing.cpu_count())]
pool = multiprocessing.Pool()
individual_results = pool.map(unpacking_apply_along_axis, chunks)
# Freeing the workers:
pool.close()
pool.join()
return np.concatenate(individual_results)
def get_most_popular_label(frame_indices: np.array, *args, **kwargs) -> int:
map_labels = kwargs['map_labels']
labels_arr = map_labels[frame_indices]
# use '-' for labels to set 0 as the last label to help argmax to choose better
values, counts = np.unique(-labels_arr, return_counts=True)
if len(values) == 1:
return -values[0]
if 0 in values:
counts[values == 0] -= np.count_nonzero(frame_indices == (map_labels.size - 1))
return -values[np.argmax(counts)]
def annotate_frame_with_map(frame_pcd: o3d.geometry.PointCloud, map_kd_tree: KDTree, map_labels: np.array, map_pcd) -> np.array:
# map_pcd.paint_uniform_color([0, 0, 0])
# labels_unique = np.unique(map_labels)
# colors = np.concatenate([np.asarray([[0,0,0]]), np.random.rand(np.max(labels_unique), 3)])
# map_colors = colors[map_labels]
# map_pcd.colors = o3d.utility.Vector3dVector(map_colors)
# frame_pcd.paint_uniform_color([1, 0, 0])
# o3d.visualization.draw_geometries([map_pcd, frame_pcd])
# vis = o3d.visualization.VisualizerWithEditing()
# vis.create_window()
# vis.add_geometry(map_pcd)
# vis.add_geometry(frame_pcd)
# vis.run() # user picks points
# vis.destroy_window()
# picked = vis.get_picked_points()
# print(np.asarray(map_pcd.points)[picked[0]])
# print(np.asarray(map_pcd.points)[picked[1]])
# print(map_labels[picked[0]])
# points with no reference will be marked with len(mapped_frame_pcd) index, so add zero to this index
map_labels = np.concatenate([map_labels, np.asarray([0])])
frame_indices_in_map = map_kd_tree.query(
np.asarray(frame_pcd.points),
distance_upper_bound=0.2,
k=20
)[1]
frame_labels = parallel_apply_along_axis(
get_most_popular_label,
axis=1,
arr=frame_indices_in_map,
map_labels=map_labels
)
return frame_labels
def process(data_path, annot_path, output_path):
map_filename = "carla_map.pcd"
annot_filename = "carla_map.npy"
if os.path.exists(map_filename):
map_pcd = o3d.io.read_point_cloud(map_filename)
else:
map_pcd = build_map(data_path)
o3d.io.write_point_cloud(map_filename, map_pcd)
map_pcd.paint_uniform_color([0, 0, 0])
if os.path.exists(annot_filename):
map_labels = np.load(annot_filename)
else:
map_labels = annotate_map(map_pcd, annot_path, data_path)
np.save(annot_filename, map_labels)
# visualize_pcd_labels(map_pcd, map_labels)
annotate_frames(data_path, output_path, map_pcd, map_labels)
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument(
'data_path',
help='path to measurements file'
)
argparser.add_argument(
'annot_path_path',
help='path to where to save new pcd'
)
argparser.add_argument(
'output_path',
help='path to where to save new pcd'
)
args = argparser.parse_args()
process(args.data_path, args.annot_path_path, args.output_path)
| [
"numpy.load",
"argparse.ArgumentParser",
"numpy.argmax",
"open3d.geometry.PointCloud",
"open3d.visualization.draw_geometries",
"numpy.unique",
"multiprocessing.cpu_count",
"open3d.io.write_point_cloud",
"numpy.apply_along_axis",
"numpy.max",
"numpy.save",
"numpy.asarray",
"open3d.io.read_poi... | [((395, 420), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (418, 420), True, 'import open3d as o3d\n'), ((569, 611), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors[labels]'], {}), '(colors[labels])\n', (595, 611), True, 'import open3d as o3d\n'), ((909, 976), 'numpy.asarray', 'np.asarray', (['[point_label_id[0] for point_label_id in points_packed]'], {}), '([point_label_id[0] for point_label_id in points_packed])\n', (919, 976), True, 'import numpy as np\n'), ((1150, 1175), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1173, 1175), True, 'import open3d as o3d\n'), ((1193, 1227), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (1219, 1227), True, 'import open3d as o3d\n'), ((1321, 1346), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1344, 1346), True, 'import open3d as o3d\n'), ((2031, 2068), 'numpy.zeros', 'np.zeros', (['map_points_count'], {'dtype': 'int'}), '(map_points_count, dtype=int)\n', (2039, 2068), True, 'import numpy as np\n'), ((2822, 2840), 'numpy.max', 'np.max', (['map_labels'], {}), '(map_labels)\n', (2828, 2840), True, 'import numpy as np\n'), ((5183, 5238), 'numpy.apply_along_axis', 'np.apply_along_axis', (['func1d', 'axis', 'arr', '*args'], {}), '(func1d, axis, arr, *args, **kwargs)\n', (5202, 5238), True, 'import numpy as np\n'), ((5937, 5959), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (5957, 5959), False, 'import multiprocessing\n'), ((6102, 6136), 'numpy.concatenate', 'np.concatenate', (['individual_results'], {}), '(individual_results)\n', (6116, 6136), True, 'import numpy as np\n'), ((6402, 6444), 'numpy.unique', 'np.unique', (['(-labels_arr)'], {'return_counts': '(True)'}), '(-labels_arr, return_counts=True)\n', (6411, 6444), True, 'import numpy as np\n'), ((8788, 8813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8811, 8813), False, 'import argparse\n'), ((473, 495), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (483, 495), True, 'import numpy as np\n'), ((645, 693), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[pcd_for_vis]'], {}), '([pcd_for_vis])\n', (678, 693), True, 'import open3d as o3d\n'), ((712, 759), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['filename', 'pcd_for_vis'], {}), '(filename, pcd_for_vis)\n', (736, 759), True, 'import open3d as o3d\n'), ((1927, 1953), 'numpy.asarray', 'np.asarray', (['map_pcd.points'], {}), '(map_pcd.points)\n', (1937, 1953), True, 'import numpy as np\n'), ((3348, 3375), 'numpy.asarray', 'np.asarray', (['mesh_pcd.points'], {}), '(mesh_pcd.points)\n', (3358, 3375), True, 'import numpy as np\n'), ((4311, 4337), 'numpy.asarray', 'np.asarray', (['map_pcd.points'], {}), '(map_pcd.points)\n', (4321, 4337), True, 'import numpy as np\n'), ((6548, 6602), 'numpy.count_nonzero', 'np.count_nonzero', (['(frame_indices == map_labels.size - 1)'], {}), '(frame_indices == map_labels.size - 1)\n', (6564, 6602), True, 'import numpy as np\n'), ((8236, 8273), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['map_filename'], {}), '(map_filename)\n', (8259, 8273), True, 'import open3d as o3d\n'), ((8331, 8378), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['map_filename', 'map_pcd'], {}), '(map_filename, map_pcd)\n', (8355, 8378), True, 'import open3d as o3d\n'), ((8484, 8507), 'numpy.load', 'np.load', (['annot_filename'], {}), '(annot_filename)\n', (8491, 8507), True, 'import numpy as np\n'), ((8592, 8627), 'numpy.save', 'np.save', (['annot_filename', 'map_labels'], {}), '(annot_filename, map_labels)\n', (8599, 8627), True, 'import numpy as np\n'), ((316, 339), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (326, 339), True, 'import numpy as np\n'), ((1978, 2004), 'numpy.asarray', 'np.asarray', (['map_pcd.points'], {}), '(map_pcd.points)\n', (1988, 2004), True, 'import numpy as np\n'), ((2331, 2359), 'numpy.asarray', 'np.asarray', (['frame_pcd.points'], {}), '(frame_pcd.points)\n', (2341, 2359), True, 'import numpy as np\n'), ((3304, 3323), 'numpy.max', 'np.max', (['mesh_labels'], {}), '(mesh_labels)\n', (3310, 3323), True, 'import numpy as np\n'), ((3736, 3781), 'numpy.where', 'np.where', (['(mesh_map_indices < map_points_count)'], {}), '(mesh_map_indices < map_points_count)\n', (3744, 3781), True, 'import numpy as np\n'), ((6625, 6642), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (6634, 6642), True, 'import numpy as np\n'), ((7708, 7723), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (7718, 7723), True, 'import numpy as np\n'), ((7781, 7809), 'numpy.asarray', 'np.asarray', (['frame_pcd.points'], {}), '(frame_pcd.points)\n', (7791, 7809), True, 'import numpy as np\n'), ((356, 370), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (362, 370), True, 'import numpy as np\n'), ((2494, 2540), 'numpy.where', 'np.where', (['(frame_map_indices < map_points_count)'], {}), '(frame_map_indices < map_points_count)\n', (2502, 2540), True, 'import numpy as np\n'), ((5895, 5922), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5920, 5922), False, 'import multiprocessing\n')] |
import pdb
import torch
import numpy as np
import torchvision.transforms as T
import scipy.signal as signal
class Normalize(object):
def __init__(self, min_v=None, max_v=None, apply_log=False):
self.fn = lambda x: (isinstance(x, torch.Tensor) and x.log() or np.log(x)) \
if apply_log else x
self.min_v = min_v and self.fn(min_v) or min_v
self.max_v = max_v and self.fn(max_v) or max_v
def __call__(self, x):
min_vals = (self.min_v, self.fn(x.min(0)))[self.min_v is None]
max_vals = (self.max_v, self.fn(x.max(0)))[self.max_v is None]
return (self.fn(x) - min_vals) / (max_vals - min_vals + 1e-5)
class QuantileNorm(object):
def __call__(self, x):
quantiles = np.quantile(x, [.25, .75], axis=0)
iqr = quantiles[1] - quantiles[0]
max_v = quantiles[1] + 1.5 * iqr
min_v = quantiles[0] - 1.5 * iqr
return (x - min_v) / (max_v - min_v)
class Standarize(object):
def __init__(self, mean_v=None, std_v=None, eps=1e-7):
self.mean_v = mean_v
self.std_v = std_v
self.eps = eps
def __call__(self, x):
mean_vals = (self.mean_v, x.mean(0))[self.mean_v is None]
std_vals = (self.std_v, x.std(0))[self.std_v is None]
return (x - mean_vals + .5) / (std_vals + self.eps)
class Downsample(object):
def __init__(self, sampling_period):
self.sampling_period = sampling_period
def __call__(self, x):
return x[::self.sampling_period]
y = np.stack([x[i:i + self.sampling_period].mean(0) \
for i in np.arange(0, x.shape[0], self.sampling_period)])
return y
class Differenciate(object):
def __call__(self, x):
return x[1:] - x[:-1]
class WienerFilter(object):
def __init__(self, n, noise_power=None):
self.n=n
self.noise_power = noise_power
def __call__(self, x):
y = x.copy()
for i in range(x.shape[1]):
y[:, i] = signal.wiener(x[:, i], self.n, self.noise_power)
return y
class MovingAverage(object):
def __init__(self, n):
self.n = n
def __call__(self, x):
return np.stack([np.convolve(ts, np.ones([self.n]) / self.n) \
for ts in x.transpose(1, 0)], 1)[self.n:-self.n]
class RemoveOutliers(object):
""" Does not support adaptive preprocessing """
#TODO : deal with cat data and impl use_diff
def __init__(self, q1, q2, num_ids=None, use_diff=False):
self.q1 = q1
self.q2 = q2
self.num_ids = num_ids
self.use_diff = False #! not implem yet
def __call__(self, x):
quantiles = np.quantile(x[:, self.num_ids], [self.q1, self.q2], axis=0)
iqr = quantiles[1] - quantiles[0]
return np.stack(tuple(filter(lambda v: all(v[self.num_ids] > quantiles[0] - 1.5 * iqr)\
and all(v[self.num_ids] < quantiles[1] + 1.5 * iqr), x)))
| [
"numpy.quantile",
"numpy.log",
"scipy.signal.wiener",
"numpy.ones",
"numpy.arange"
] | [((794, 830), 'numpy.quantile', 'np.quantile', (['x', '[0.25, 0.75]'], {'axis': '(0)'}), '(x, [0.25, 0.75], axis=0)\n', (805, 830), True, 'import numpy as np\n'), ((2757, 2816), 'numpy.quantile', 'np.quantile', (['x[:, self.num_ids]', '[self.q1, self.q2]'], {'axis': '(0)'}), '(x[:, self.num_ids], [self.q1, self.q2], axis=0)\n', (2768, 2816), True, 'import numpy as np\n'), ((2078, 2126), 'scipy.signal.wiener', 'signal.wiener', (['x[:, i]', 'self.n', 'self.noise_power'], {}), '(x[:, i], self.n, self.noise_power)\n', (2091, 2126), True, 'import scipy.signal as signal\n'), ((282, 291), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (288, 291), True, 'import numpy as np\n'), ((1667, 1713), 'numpy.arange', 'np.arange', (['(0)', 'x.shape[0]', 'self.sampling_period'], {}), '(0, x.shape[0], self.sampling_period)\n', (1676, 1713), True, 'import numpy as np\n'), ((2285, 2302), 'numpy.ones', 'np.ones', (['[self.n]'], {}), '([self.n])\n', (2292, 2302), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import codecs
from subprocess import run
import os
import networkx as nx
np.set_printoptions(precision=2, suppress=True)
def run_mfinder(N_nw):
for i in range(N_nw):
fname = 'seed={:02d}.edges'.format(i)
if os.path.isfile(fname):
run('mfinder1.2.exe ' + fname)
def read_motif_data(N_nw):
id2x = {6: 0, 36: 1, 12: 2, 74: 3, 14: 4, 78: 5, 38: 6, 98: 7, 108: 8, 46: 9, 102: 10, 110: 11, 238: 12}
# y_data = np.zeros((N, 13))
y_data = [[] for i in range(13)]
# for i_seq, i_real in nw_dict.items():
for i_seed in range(N_nw):
fname = 'seed={:02d}.e_OUT.txt'.format(i_seed)
with codecs.open(fname, 'r') as fin:
start = 'ID STATS ZSCORE PVAL [MILI]'
kk = len(start)
for line in fin:
if line[:kk] == start:
break
else:
print("Warning! no motifs information found in the following file:")
print(fname, flush=True)
return
motif_z = {}
for _ in range(13):
line = fin.readline()
vals = line.split()
motif_id = int(vals[0])
if vals[3] != '888888':
motif_z[id2x[motif_id]] = float(vals[3])
line = fin.readline()
# Normalization
if motif_z != {}:
norm = np.sqrt(np.sum(np.array(list(motif_z.values()))**2))
else:
norm = 1
for key, val in motif_z.items():
motif_z[key] = val / norm
y_data[key].append(val / norm)
x = [2*i for i in range(13)]
y = []
for row in y_data:
y.append(np.array(row))
return x, y
def get_pos(k):
dy = 0.87
dx = 2.0
h = 0.09
pos = {'A': [0.0, h], 'B': [-0.5, 0.0], 'C': [0.5, 0.0], 'label': [0.0, -h]}
for key in pos:
pos[key][0] += dx * k
pos[key][1] -= dy
return pos
def plot_motif_scores(N_nw, title):
# Plot motif images at the bottom
motifs_dict = {
6: [('A','B'), ('A','C')],
36: [('A','C'), ('B','C')],
12: [('A','B'), ('B','C')],
74: [('A','B'), ('B','C'), ('C','B')],
14: [('A','B'), ('B','C'), ('B','A')],
78: [('A','B'), ('B','C'), ('B','A'), ('C','B')],
38: [('A','B'), ('B','C'), ('A', 'C')],
98: [('A','B'), ('B','C'), ('C', 'A')],
108: [('A','B'), ('A','C'), ('B','C'), ('C','B')],
46: [('A','B'), ('B','A'), ('A','C'), ('B','C')],
102: [('A','B'), ('B','C'), ('C', 'A'), ('A','C')],
110: [('A','B'), ('B','C'), ('C', 'A'), ('A','C'), ('C', 'B')],
238: [('A','B'), ('B','A'), ('B','C'), ('C', 'B'), ('A','C'), ('C', 'A')]
}
fig, ax = plt.subplots(figsize=(9, 6))
k = 0
# for m_id in sorted(motifs_dict.keys()):
for i in range(13):
m_id = [6, 36, 12, 74, 14, 78, 38, 98, 108, 46, 102, 110, 238][i]
axis = ax
e_list = motifs_dict[m_id]
m = nx.DiGraph()
m.add_nodes_from(['A', 'B', 'C'])
m.add_edges_from(e_list)
pos = get_pos(k)
nx.draw_networkx_nodes( m, pos, node_size=40, node_color=['C4', 'C1', 'C2'], ax=axis)
nx.draw_networkx_edges( m, pos, node_size=40, width=1.0, arrowsize = 8, ax=axis)
# nx.draw_networkx_labels(m, pos, font_size=8, ax=axis)
axis.text(pos['label'][0], pos['label'][1], '%d'%(i+1), horizontalalignment='center')
k += 1
x, y = read_motif_data(N_nw)
means = []
for kk in range(13):
print(kk, y[kk])
# Violin plot
if y[kk].size != 0:
parts = ax.violinplot(y[kk], [kk*2], points=60, widths=0.9, showmeans=False, showextrema=False, bw_method=0.5)
for pc in parts['bodies']:
pc.set_facecolor('C0')
# pc.set_edgecolor('black')
pc.set_alpha(0.4)
# Mean + error bars
y_s = np.std(y[kk])
y_m = np.mean(y[kk])
means.append(y_m)
ax.errorbar([kk*2], y_m, yerr=y_s, fmt='o', color='#4764a8', capsize=3.0)
else:
means.append(0.0)
ax.plot(x, means, '-', lw=0.7, color='#4764a8')
ax.plot([-1.0, 25.0], [0.0, 0.0], '--', lw=0.5, color='black', zorder=0, alpha=0.5)
ax.set_ylabel('z-score, normalized')
# ax.text(0.90, 0.95, '{}/{}'.format(len(nw_dict), N_nw), transform=ax.transAxes)
ax.set_xticks(np.arange(0,25,2))
ax.set_xticklabels('')
y_range = np.arange(-0.6, 1, 0.2)
ax.set_yticks(y_range)
ax.set_yticklabels(['%.1f'%val for val in y_range])
ax.tick_params(bottom=False, left=True, labelleft=True, labelbottom=False)
plt.title(title)
ax.set_xlim((-1.0, 25.0))
ax.set_ylim((-1.0, 0.8))
# ax.set_ylim((-0.5, 0.5))
ax.grid(alpha = 0.6, linestyle = '--', linewidth = 0.2, color = 'black')
plt.savefig('motifs-no88.png', dpi=400, bbox_inches = 'tight')
plt.show()
N_nw = 100
dirpath = os.getcwd()
title = dirpath[dirpath.find('N='):]
title = title.replace('-Mc=', ', Mc=')
title = title.replace('-p=', ', p=[')
if title.find('-inv') != -1:
title = title.replace('-inv', ']; inv')
else:
title += ']'
title = title.replace('_', ', ')
# run_mfinder(N_nw)
plot_motif_scores(N_nw, title)
| [
"matplotlib.pyplot.title",
"subprocess.run",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"networkx.draw_networkx_edges",
"codecs.open",
"os.getcwd",
"numpy.std",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"networkx.draw_networkx_nodes",
"numpy.array",
"networkx.DiGraph",
"ma... | [((126, 173), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (145, 173), True, 'import numpy as np\n'), ((5144, 5155), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5153, 5155), False, 'import os\n'), ((2886, 2914), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (2898, 2914), True, 'import matplotlib.pyplot as plt\n'), ((4651, 4674), 'numpy.arange', 'np.arange', (['(-0.6)', '(1)', '(0.2)'], {}), '(-0.6, 1, 0.2)\n', (4660, 4674), True, 'import numpy as np\n'), ((4847, 4863), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4856, 4863), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5098), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""motifs-no88.png"""'], {'dpi': '(400)', 'bbox_inches': '"""tight"""'}), "('motifs-no88.png', dpi=400, bbox_inches='tight')\n", (5049, 5098), True, 'import matplotlib.pyplot as plt\n'), ((5105, 5115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5113, 5115), True, 'import matplotlib.pyplot as plt\n'), ((283, 304), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (297, 304), False, 'import os\n'), ((3143, 3155), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3153, 3155), True, 'import networkx as nx\n'), ((3264, 3352), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['m', 'pos'], {'node_size': '(40)', 'node_color': "['C4', 'C1', 'C2']", 'ax': 'axis'}), "(m, pos, node_size=40, node_color=['C4', 'C1', 'C2'],\n ax=axis)\n", (3286, 3352), True, 'import networkx as nx\n'), ((3358, 3435), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['m', 'pos'], {'node_size': '(40)', 'width': '(1.0)', 'arrowsize': '(8)', 'ax': 'axis'}), '(m, pos, node_size=40, width=1.0, arrowsize=8, ax=axis)\n', (3380, 3435), True, 'import networkx as nx\n'), ((4590, 4609), 'numpy.arange', 'np.arange', (['(0)', '(25)', '(2)'], {}), '(0, 25, 2)\n', (4599, 4609), True, 'import numpy as np\n'), ((318, 348), 'subprocess.run', 'run', (["('mfinder1.2.exe ' + fname)"], {}), "('mfinder1.2.exe ' + fname)\n", (321, 348), False, 'from subprocess import run\n'), ((702, 725), 'codecs.open', 'codecs.open', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (713, 725), False, 'import codecs\n'), ((1809, 1822), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (1817, 1822), True, 'import numpy as np\n'), ((4093, 4106), 'numpy.std', 'np.std', (['y[kk]'], {}), '(y[kk])\n', (4099, 4106), True, 'import numpy as np\n'), ((4125, 4139), 'numpy.mean', 'np.mean', (['y[kk]'], {}), '(y[kk])\n', (4132, 4139), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains utilities to run the test suite.
"""
import numpy as np
import mskpy
class TestInstruments():
def test_irac(self, test=True):
import astropy.units as u
from mskpy.util import planck
from mskpy.instruments import IRAC
irac = IRAC()
# Color correction standard values from the IRAC Instrument
# Handbook. Those calculations are good to ~1%.
templates = dict(
nu_2 = (lambda w: w.value**2 * u.Jy,
[1.0037, 1.0040, 1.0052, 1.0111], 0.015),
nu_1 = (lambda w: w.value**1 * u.Jy,
[1.0, 1.0, 1.0, 1.0], 0.015),
nu0 = (lambda w: w.value**0 * u.Jy,
[1.0, 1.0, 1.0, 1.0], 0.015),
nu1 = (lambda w: w.value**-1 * u.Jy,
[1.0037, 1.0040, 1.0052, 1.0113], 0.015),
nu2 = (lambda w: w.value**-2 * u.Jy,
[1.0111, 1.0121, 1.0155, 1.0337], 0.015),
bb5000 = (lambda x: planck(x, 5000., unit=u.Jy / u.sr) * u.sr,
[1.0063, 1.0080, 1.0114, 1.0269], 0.015),
bb2000 = (lambda x: planck(x, 2000., unit=u.Jy / u.sr) * u.sr,
[0.9990, 1.0015, 1.0048, 1.0163], 0.015),
bb1500 = (lambda x: planck(x, 1500., unit=u.Jy / u.sr) * u.sr,
[0.9959, 0.9983, 1.0012, 1.0112], 0.015),
bb1000 = (lambda x: planck(x, 1000., unit=u.Jy / u.sr) * u.sr,
[0.9933, 0.9938, 0.9952, 1.0001], 0.015),
bb800 = (lambda x: planck(x, 800., unit=u.Jy / u.sr) * u.sr,
[0.9953, 0.9927, 0.9921, 0.9928], 0.015),
bb600 = (lambda x: planck(x, 600., unit=u.Jy / u.sr) * u.sr,
[1.0068, 0.9961, 0.9907, 0.9839], 0.015),
bb400 = (lambda x: planck(x, 400., unit=u.Jy / u.sr) * u.sr,
[1.0614, 1.0240, 1.0042, 0.9818], 0.015),
bb200 = (lambda x: planck(x, 200., unit=u.Jy / u.sr) * u.sr,
[1.5138, 1.2929, 1.1717, 1.1215], 0.03)
)
for k, v in templates.items():
f, K0, rtol = templates[k]
K = irac.ccorrection(f)
print(k, (K - K0) / K0)
if test:
assert np.allclose(K, K0, rtol=rtol)
| [
"mskpy.instruments.IRAC",
"numpy.allclose",
"mskpy.util.planck"
] | [((353, 359), 'mskpy.instruments.IRAC', 'IRAC', ([], {}), '()\n', (357, 359), False, 'from mskpy.instruments import IRAC\n'), ((2346, 2375), 'numpy.allclose', 'np.allclose', (['K', 'K0'], {'rtol': 'rtol'}), '(K, K0, rtol=rtol)\n', (2357, 2375), True, 'import numpy as np\n'), ((1071, 1106), 'mskpy.util.planck', 'planck', (['x', '(5000.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 5000.0, unit=u.Jy / u.sr)\n', (1077, 1106), False, 'from mskpy.util import planck\n'), ((1210, 1245), 'mskpy.util.planck', 'planck', (['x', '(2000.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 2000.0, unit=u.Jy / u.sr)\n', (1216, 1245), False, 'from mskpy.util import planck\n'), ((1349, 1384), 'mskpy.util.planck', 'planck', (['x', '(1500.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 1500.0, unit=u.Jy / u.sr)\n', (1355, 1384), False, 'from mskpy.util import planck\n'), ((1488, 1523), 'mskpy.util.planck', 'planck', (['x', '(1000.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 1000.0, unit=u.Jy / u.sr)\n', (1494, 1523), False, 'from mskpy.util import planck\n'), ((1626, 1660), 'mskpy.util.planck', 'planck', (['x', '(800.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 800.0, unit=u.Jy / u.sr)\n', (1632, 1660), False, 'from mskpy.util import planck\n'), ((1762, 1796), 'mskpy.util.planck', 'planck', (['x', '(600.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 600.0, unit=u.Jy / u.sr)\n', (1768, 1796), False, 'from mskpy.util import planck\n'), ((1898, 1932), 'mskpy.util.planck', 'planck', (['x', '(400.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 400.0, unit=u.Jy / u.sr)\n', (1904, 1932), False, 'from mskpy.util import planck\n'), ((2034, 2068), 'mskpy.util.planck', 'planck', (['x', '(200.0)'], {'unit': '(u.Jy / u.sr)'}), '(x, 200.0, unit=u.Jy / u.sr)\n', (2040, 2068), False, 'from mskpy.util import planck\n')] |
from __future__ import division
import pandas as pd
from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser
import numpy as np
import random
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
import os
from collections import Counter, defaultdict
from .utils_figures import get_fdbinsize
from scipy.stats import scoreatpercentile
from sklearn.isotonic import IsotonicRegression
import logging
import warnings
warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\n'
logger = logging.getLogger(__name__)
SEED = 42
class NoDecoyError(ValueError):
pass
class WrongInputError(NotImplementedError):
pass
class EmptyFileError(ValueError):
pass
def filter_custom(df, fdr, key, is_decoy, reverse, remove_decoy, ratio, formula, correction=None, loglabel=None):
kw = dict(key=key, is_decoy=is_decoy, reverse=reverse, full_output=True,
remove_decoy=False, ratio=ratio, formula=formula)
df = df.copy()
q = aux.qvalues(df, correction=1, **kw)
q_uncorr = aux.qvalues(df, correction=0, **kw)
df['q'] = q['q']
df['q_uncorrected'] = q_uncorr['q']
if correction is not None:
qlabel = 'q' if correction else 'q_uncorrected'
logger.debug('Explicitly using %s for filtering.', qlabel)
elif df['q'].min() < fdr:
logger.debug('Successfully filtered with +1 correction (label = %s).', loglabel)
qlabel = 'q'
else:
logger.info('No results for filtering with +1 correction (label = %s). Rerunning without correction...', loglabel)
qlabel = 'q_uncorrected'
if remove_decoy:
df = df[~df[is_decoy]]
return df[df[qlabel] < fdr].copy()
def convert_tandem_cleave_rule_to_regexp(cleavage_rule):
def get_sense(c_term_rule, n_term_rule):
if '{' in c_term_rule:
return 'N'
elif '{' in n_term_rule:
return 'C'
else:
if len(c_term_rule) <= len(n_term_rule):
return 'C'
else:
return 'N'
def get_cut(cut, no_cut):
aminoacids = set(parser.std_amino_acids)
cut = ''.join(aminoacids & set(cut))
if '{' in no_cut:
no_cut = ''.join(aminoacids & set(no_cut))
return cut, no_cut
else:
no_cut = ''.join(set(parser.std_amino_acids) - set(no_cut))
return cut, no_cut
out_rules = []
for protease in cleavage_rule.split(','):
protease = protease.replace('X', ''.join(parser.std_amino_acids))
c_term_rule, n_term_rule = protease.split('|')
sense = get_sense(c_term_rule, n_term_rule)
if sense == 'C':
cut, no_cut = get_cut(c_term_rule, n_term_rule)
else:
cut, no_cut = get_cut(n_term_rule, c_term_rule)
if no_cut:
if sense == 'C':
out_rules.append('([%s](?=[^%s]))' % (cut, no_cut))
else:
out_rules.append('([^%s](?=[%s]))' % (no_cut, cut))
else:
if sense == 'C':
out_rules.append('([%s])' % (cut, ))
else:
out_rules.append('(?=[%s])' % (cut, ))
return '|'.join(out_rules)
def calc_TOP3(df):
df['TOP3'] = df['TOP3'].apply(lambda z: sum(sorted(z, reverse=True)[:3]))
def calc_NSAF(df):
df['NSAF'] = df['PSMs'] / df['length']
NSAF_sum = np.sum(df['NSAF'])
df['NSAF'] = df['NSAF'] / NSAF_sum
if sum(pd.notna(df['NSAF'])):
df['LOG10_NSAF'] = np.log10(df['NSAF'])
df.loc[pd.isna(df['NSAF']), 'NSAF'] = 0.0
def keywithmaxval(d):
# this method is much faster than using max(prots.iterkeys(), key=(lambda key: prots[key]))
v = list(d.values())
k = list(d.keys())
return k[v.index(max(v))]
def add_protein_groups(df, df_ms1_path, sort_random=True):
# input: df - pandas dataframe which contains columns "dbname" with protein names and
# "peptides set" with set of peptide sequences belong to this protein and identified in MS/MS analysis.
# df_ms1_path - path to the table *_proteins_full_noexclusion.tsv which is output by DirectMS1 analysis.
# sort_random - if True, the proteins with same scores are chosen randomly. Otherwise, they are chosen
# in alphabetical order.
# output: pandas dataframe with new columns "groupleader" (True for protein group leaders) and
# "all proteins" (list of proteins belong to protein group and separeted by ';')
pept_prots = defaultdict(set)
prot_prots = defaultdict(set)
prot_pepts = dict()
if not sort_random:
iter_list = df.sort_values(by='dbname').reset_index(drop=True)[['peptides set', 'dbname']].values
else:
iter_list = df.sample(frac=1).reset_index(drop=True)[['peptides set', 'dbname']].values
for peptides, dbname in iter_list:
prot_pepts[dbname] = peptides
for peptide in peptides:
pept_prots[peptide].add(dbname)
for prots in pept_prots.values():
for dbname in prots:
prot_prots[dbname].update(prots)
prot_pepts_count = dict()
prot_pepts_count2 = dict()
if not df_ms1_path:
for k, v in prot_pepts.items():
prot_pepts_count[k] = len(v)
prot_pepts_count2[k] = len(v)
else:
df_ms1 = pd.read_csv(df_ms1_path, sep='\t')
ms1s = dict()
for qval, prot in df_ms1[['score', 'dbname']].values:
ms1s[prot] = float(qval)
max_k = max(ms1s.values())
for k, v in prot_pepts.items():
prot_pepts_count[k] = len(v) + ms1s.get(k, 0) / max_k
prot_pepts_count2[k] = len(v)
tostay = set()
while pept_prots:
bestprot = keywithmaxval(prot_pepts_count)
tostay.add(bestprot)
for pep in prot_pepts[bestprot]:
for k in pept_prots[pep]:
prot_pepts_count[k] -= 1
prot_pepts_count2[k] -= 1
del pept_prots[pep]
for k, v in list(prot_pepts_count2.items()):
if v == 0:
del prot_pepts_count[k]
del prot_pepts_count2[k]
df['groupleader'] = df['dbname'].apply(lambda x: x in tostay)
df['all proteins'] = df['dbname'].apply(lambda x: ';'.join(prot_prots[x]))
def process_fasta(df, path_to_fasta, decoy_prefix, decoy_infix=False):
protsS = dict()
decoy_check_flag = False
for x in fasta.read(path_to_fasta):
dbname = x[0].split(' ')[0]
if not decoy_check_flag:
if (not decoy_infix and dbname.startswith(decoy_prefix)) or (decoy_infix and decoy_infix in dbname):
decoy_check_flag = True
protsS[dbname] = x[1]
df['sequence'] = df['dbname'].apply(lambda x: protsS.get(x, protsS.get(x.split(' ')[0], '')))
if not decoy_check_flag:
if not decoy_infix:
df['sequence'] = df.apply(
lambda x: x['sequence'] if x['sequence'] else protsS.get(
x['dbname'].replace(decoy_prefix, ''), protsS.get(x['dbname'].split(' ')[0].replace(decoy_prefix, ''), '')),
axis=1)
else:
df['sequence'] = df.apply(
lambda x: x['sequence'] if x['sequence'] else protsS.get(
x['dbname'].replace(decoy_infix, ''), protsS.get(x['dbname'].split(' ')[0].replace(decoy_infix, ''), '')),
axis=1)
return df
def get_tag_names(columns):
return [c for c in columns if c[:4] == 'tag_']
def get_proteins_dataframe(df1_f2, decoy_prefix, all_decoys_2, decoy_infix=False, path_to_fasta=False, pif_threshold=0):
proteins_dict = dict()
cols = ['protein', 'protein_descr', 'peptide', 'PEP', 'MS1Intensity', 'PIF']
tagnames = get_tag_names(df1_f2.columns)
tagsums = []
for c in tagnames:
cols.append(c)
tagsums.append(0.)
have_pif = 'PIF' in df1_f2.columns
if not have_pif:
logger.debug('PIF not found.')
if pif_threshold > 0:
logger.warning('PIF not found, threshold will not be applied.')
df1_f2['PIF'] = 0.
for tup in df1_f2[cols].values:
# avoid star unpacking to support Python 2.7 for a little longer
(proteins, protein_descriptions, peptide, pep, ms1_i, pif), tags = tup[:6], tup[6:]
for prot, prot_descr in zip(proteins, protein_descriptions):
if prot not in proteins_dict:
proteins_dict[prot] = dict()
proteins_dict[prot]['dbname'] = prot
proteins_dict[prot]['description'] = prot_descr
proteins_dict[prot]['PSMs'] = 0
proteins_dict[prot]['peptides set'] = set()
proteins_dict[prot]['sequence'] = ''
proteins_dict[prot]['NSAF'] = 0
proteins_dict[prot]['TOP3'] = []
proteins_dict[prot]['sq'] = 0
proteins_dict[prot]['score'] = dict()
proteins_dict[prot]['q-value'] = 1.0
for tag in tagnames:
proteins_dict[prot][tag] = 0.
if not decoy_infix:
proteins_dict[prot]['decoy'] = prot.startswith(decoy_prefix)
else:
proteins_dict[prot]['decoy'] = decoy_infix in prot
proteins_dict[prot]['decoy2'] = prot in all_decoys_2
proteins_dict[prot]['decoy1'] = proteins_dict[prot]['decoy'] and not proteins_dict[prot]['decoy2']
proteins_dict[prot]['peptides set'].add(peptide)
proteins_dict[prot]['TOP3'].append(ms1_i)
proteins_dict[prot]['score'][peptide] = min(proteins_dict[prot]['score'].get(peptide, 1.0), pep)
proteins_dict[prot]['PSMs'] += 1
if pif > pif_threshold:
for tag, val in zip(tagnames, tags):
proteins_dict[prot][tag] += val
if not have_pif or pif > pif_threshold:
for i, (tag, val) in enumerate(zip(tagnames, tags)):
tagsums[i] += val
if not have_pif:
df1_f2.drop('PIF', axis='columns', inplace=True)
df_proteins = pd.DataFrame.from_dict(proteins_dict, orient='index').reset_index()
if path_to_fasta:
df_proteins = process_fasta(df_proteins, path_to_fasta, decoy_prefix, decoy_infix)
df_proteins['length'] = df_proteins['sequence'].apply(len)
df_proteins['sq'] = df_proteins.apply(calc_sq, axis=1)
df_proteins['peptides'] = df_proteins['peptides set'].apply(len)
df_proteins['PSMs'] = df_proteins.apply(lambda x: max(x['PSMs'], x['peptides']), axis=1)
# df_proteins.loc[:, ['PSMs', 'peptides']].max(axis=1)
calc_NSAF(df_proteins)
calc_TOP3(df_proteins)
df_proteins['score'] = df_proteins['score'].apply(lambda x: np.prod(list(x.values())))
norm = np.array(tagsums)
df_proteins[tagnames] /= norm
return df_proteins, norm
def calc_sq(df_raw):
protein = df_raw['sequence']
protein = protein.replace('L', 'I')
peptides = df_raw['peptides set']
if not protein:
return 0
psq = [False for x in protein]
plen = len(protein)
for pep in peptides:
pep = pep.replace('L', 'I')
csize = len(pep)
for j in range(plen):
if protein[j:j+csize] == pep:
for y in range(csize):
psq[j + y] = True
return float(sum(psq)) / len(psq) * 100
def get_output_basename(fname, suffix=''):
basename = os.path.basename(fname)
splt = os.path.splitext(basename)
basename = splt[0]
if 'pep' not in splt[1].lower():
basename = os.path.splitext(basename)[0]
return basename + suffix
def get_output_folder(folder, fname):
if not folder:
return os.path.dirname(os.path.realpath(fname))
else:
if not os.path.isdir(folder):
os.mkdir(folder)
return folder
def calc_RT(seq, RC):
try:
return achrom.calculate_RT(seq, RC)
except Exception:
return 0
def is_decoy(proteins, decoy_prefix, decoy_infix=False):
if not decoy_infix:
return all(z.startswith(decoy_prefix) for z in proteins)
else:
return all(decoy_infix in z for z in proteins)
def is_group_specific(proteins, group_prefix, group_infix, decoy_prefix, decoy_infix=None):
if group_infix:
return all(group_infix in z for z in proteins)
if not decoy_infix:
return all(z.startswith(decoy_prefix + group_prefix) or z.startswith(group_prefix) for z in proteins)
return all(z.startswith(group_prefix) for z in proteins)
def is_decoy_2(proteins, decoy_set):
return all(z in decoy_set for z in proteins)
def split_fasta_decoys(db, decoy_prefix, decoy_infix=None):
decoy_dbnames = set()
with fasta.read(db) as f:
for protein in f:
dbname = protein.description.split()[0]
if (decoy_infix and decoy_infix in dbname) or dbname.startswith(decoy_prefix):
decoy_dbnames.add(dbname)
decoy_dbnames = sorted(decoy_dbnames)
random.seed(SEED)
all_decoys_2 = set(random.sample(decoy_dbnames, len(decoy_dbnames) // 2))
logger.debug('Marking %s out of %s decoys as decoy2', len(all_decoys_2), len(decoy_dbnames))
return all_decoys_2
def split_decoys(df):
"""Split decoys into decoy1 and decoy2 without FASTA file"""
all_decoys = set()
for proteins in df.loc[df.decoy, 'protein'].values:
all_decoys.update(proteins)
logger.debug('proteins: %s', proteins)
all_decoys = sorted(all_decoys) # sort is done for working of random SEED
random.seed(SEED)
all_decoys_2 = set(random.sample(all_decoys, int(len(all_decoys) / 2)))
df['decoy2'] = df['protein'].apply(is_decoy_2, decoy_set=all_decoys_2)
df['decoy1'] = df.apply(lambda x: x['decoy'] and not x['decoy2'], axis=1)
return all_decoys_2
def remove_column_hit_rank(df):
if 'hit_rank' in df.columns:
return df[df['hit_rank'] == 1]
else:
return df
def parse_mods(df_raw):
mods_counter = {}
sequence, mods = df_raw['peptide'], df_raw['modifications']
if isinstance(mods, list):
for mod in mods:
mod_mass, aa_ind = mod.split('@')
mod_mass = float(mod_mass)
aa_ind = int(aa_ind)
if aa_ind == 0:
aa = 'N_term'
mod_mass = round(mod_mass - 1.007825, 3)
elif aa_ind == len(sequence) + 1:
aa = 'C_term'
mod_mass = round(mod_mass - 17.002735, 3)
else:
aa = sequence[aa_ind - 1]
mod_mass = round(mod_mass - mass.std_aa_mass[aa], 3)
mod_name = 'mass shift %.3f at %s' % (mod_mass, aa)
mods_counter[mod_name] = mods_counter.get(mod_name, 0) + 1
return mods_counter
def parse_mods_msgf(df_raw):
mods_counter = {}
sequence, mods = df_raw['peptide'], df_raw['Modification']
if isinstance(mods, list):
for mod in mods:
mod_mass, aa_ind = mod['monoisotopicMassDelta'], mod['location']
if aa_ind == 0:
aa = 'N_term'
mod_mass = round(mod_mass, 3)
elif aa_ind == len(sequence) + 1:
aa = 'C_term'
mod_mass = round(mod_mass, 3)
else:
aa = sequence[aa_ind - 1]
mod_mass = round(mod_mass, 3)
mod_name = 'mass shift %.3f at %s' % (mod_mass, aa)
mods_counter[mod_name] = mods_counter.get(mod_name, 0) + 1
return mods_counter
def add_mod_info(df_raw, mod):
sequence, mods_counter = df_raw['peptide'], df_raw['mods_counter']
mod_aa = mod.split(' at ')[1]
if 'term' not in mod_aa and mod_aa not in sequence:
return -1
else:
return (1 if mods_counter.get(mod, 0) >= 1 else 0)
def prepare_mods(df):
all_mods = set()
for cnt in df['mods_counter'].values:
for k in cnt.keys():
all_mods.add(k)
for mod in all_mods:
df[mod] = df.apply(add_mod_info, axis=1, mod=mod)
def prepare_dataframe(infile_path, decoy_prefix=None, decoy_infix=False, cleavage_rule=False, fdr=0.01, decoy2set=None):
if not cleavage_rule:
cleavage_rule = parser.expasy_rules['trypsin']
if infile_path.lower().endswith('.pep.xml') or infile_path.lower().endswith('.pepxml'):
df1 = pepxml.DataFrame(infile_path)
ftype = 'pepxml'
elif infile_path.lower().endswith('.mzid'):
df1 = mzid.DataFrame(infile_path)
else:
raise WrongInputError()
if not df1.shape[0]:
raise EmptyFileError()
if 'Morpheus Score' in df1.columns:
df1 = df1[df1['Morpheus Score'] != 0]
df1['expect'] = 1 / df1['Morpheus Score']
df1['num_missed_cleavages'] = df1['peptide'].apply(lambda x: parser.num_sites(x, rule=cleavage_rule))
if 'MS-GF:EValue' in df1.columns:
# MSGF search engine
ftype = 'msgf'
df1['peptide'] = df1['PeptideSequence']
df1['num_missed_cleavages'] = df1['peptide'].apply(lambda x: parser.num_sites(x, rule=cleavage_rule))
df1['assumed_charge'] = df1['chargeState']
df1['spectrum'] = df1['spectrumID']
df1['massdiff'] = (df1['experimentalMassToCharge'] - df1['calculatedMassToCharge']) * df1['assumed_charge']
df1['calc_neutral_pep_mass'] = df1['calculatedMassToCharge'] * df1['chargeState'] - df1['chargeState'] * 1.00727649
df1['protein'] = df1['accession']
df1['protein_descr'] = df1['protein description']
df1['expect'] = df1['MS-GF:EValue']
if set(df1['protein_descr'].str[0]) == {None}:
# MSFragger
logger.debug('Adapting MSFragger DataFrame.')
logger.debug('Proteins before: %s', df1.loc[1, 'protein'])
protein = df1['protein'].apply(lambda row: [x.split(None, 1) for x in row])
df1['protein'] = protein.apply(lambda row: [x[0] for x in row])
try:
df1['protein_descr'] = protein.apply(lambda row: [x[1] for x in row])
except IndexError:
df1['protein_descr'] = protein.apply(lambda row: ['' for x in row])
logger.debug('Proteins after: %s', df1.loc[1, 'protein'])
# if any(None in set(df1['protein_descr'].str[0])):
# print('HERE')
# df1['protein_descr'] = df1.apply(lambda x: x['protein_descr'] if x['protein_descr'] else x['protein'], axis=1)
df1.loc[pd.isna(df1['protein_descr']), 'protein_descr'] = df1.loc[pd.isna(df1['protein_descr']), 'protein']
# try:
# df1['expect'] = 1.0 / df1['bions_score_neg'].values
# except:
# pass
df1 = df1[~pd.isna(df1['peptide'])]
if 'MS1Intensity' not in df1:
df1['MS1Intensity'] = 0.0
df1['length'] = df1['peptide'].apply(len)
df1 = df1[df1['length'] >= 6]
df1['spectrum'] = df1['spectrum'].apply(lambda x: x.split(' RTINS')[0])
if 'retention_time_sec' not in df1.columns:
if 'scan start time' in df1.columns:
df1['RT exp'] = df1['scan start time']
df1 = df1.drop(['scan start time', ], axis=1)
else:
df1['RT exp'] = 0
else:
df1['RT exp'] = df1['retention_time_sec'] / 60
df1 = df1.drop(['retention_time_sec', ], axis=1)
df1['massdiff_int'] = df1['massdiff'].apply(lambda x: int(round(x, 0)))
df1['massdiff_ppm'] = 1e6 * (df1['massdiff'] - df1['massdiff_int'] * 1.003354) / df1['calc_neutral_pep_mass']
df1['decoy'] = df1['protein'].apply(is_decoy, decoy_prefix=decoy_prefix, decoy_infix=decoy_infix)
if not df1.decoy.sum():
raise NoDecoyError()
if decoy2set is None:
decoy2set = split_decoys(df1)
else:
df1['decoy2'] = df1['protein'].apply(lambda p: all(x in decoy2set for x in p))
df1['decoy1'] = df1['decoy'] & (~df1['decoy2'])
df1 = remove_column_hit_rank(df1)
if ftype == 'pepxml':
df1['mods_counter'] = df1.apply(parse_mods, axis=1)
elif ftype == 'msgf':
df1['mods_counter'] = df1.apply(parse_mods_msgf, axis=1)
prepare_mods(df1)
pep_ratio = df1['decoy2'].sum() / df1['decoy'].sum()
df1_f = filter_custom(df1[~df1['decoy1']], fdr=fdr, key='expect', is_decoy='decoy2',
reverse=False, remove_decoy=False, ratio=pep_ratio, formula=1, correction=None, loglabel='PSMs default')
num_psms_def = df1_f[~df1_f['decoy2']].shape[0]
logger.info('Default target-decoy filtering, 1%% PSM FDR: Number of target PSMs = %d', num_psms_def)
try:
logger.info('Calibrating retention model...')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
retention_coefficients = achrom.get_RCs_vary_lcp(df1_f['peptide'].values, df1_f['RT exp'].values)
df1_f['RT pred'] = df1_f['peptide'].apply(lambda x: calc_RT(x, retention_coefficients))
df1['RT pred'] = df1['peptide'].apply(lambda x: calc_RT(x, retention_coefficients))
_, _, r_value, std_value = aux.linear_regression(df1_f['RT pred'], df1_f['RT exp'])
logger.info('RT model training results: R^2 = %f , std = %f', r_value**2, std_value)
df1['RT diff'] = df1['RT pred'] - df1['RT exp']
logger.info('Retention model calibrated successfully.')
except Exception:
logger.warning('Retention times are probably missing in input file.')
df1['RT pred'] = df1['peptide'].apply(lambda x: calc_RT(x, achrom.RCs_krokhin_100A_tfa))
df1['RT diff'] = df1['RT exp']
return df1, decoy2set
_standard_features = {'calc_neutral_pep_mass', 'bscore', 'yscore',
'massdiff', 'massdiff_ppm', 'nextscore', 'RT pred', 'RT diff',
'sumI', 'RT exp', 'precursor_neutral_mass', 'massdiff_int',
'num_missed_cleavages', 'tot_num_ions', 'num_matched_ions', 'length',
'ScoreRatio', 'Energy', 'MS2IonCurrent', 'MeanErrorTop7', 'sqMeanErrorTop7', 'StdevErrorTop7',
'MS-GF:DeNovoScore', 'MS-GF:EValue', 'MS-GF:RawScore', 'MeanErrorAll',
'MeanRelErrorAll', 'MeanRelErrorTop7', 'NumMatchedMainIons', 'StdevErrorAll',
'StdevErrorTop7', 'StdevRelErrorAll', 'StdevRelErrorTop7', 'NTermIonCurrentRatio',
'CTermIonCurrentRatio', 'ExplainedIonCurrentRatio', 'fragmentMT', 'ISOWIDTHDIFF',
'MS1Intensity', 'sumI_to_MS1Intensity', 'nextscore_std', 'IPGF', 'IPGF2', 'hyperscore', 'PIF'}
def get_features(dataframe):
feature_columns = dataframe.columns
columns_to_remove = []
for feature in feature_columns:
if feature not in _standard_features:
if not feature.startswith('mass shift') and not feature.startswith('matched_y') and not feature.startswith('matched_b'):
columns_to_remove.append(feature)
feature_columns = feature_columns.drop(columns_to_remove)
return sorted(feature_columns)
def get_X_array(df, feature_columns):
return df.loc[:, feature_columns].values
def get_Y_array(df):
return df.loc[:, 'decoy1'].values.astype(float)
def variant_peptides(allowed_peptides, group_prefix, group_infix):
if allowed_peptides:
with open(allowed_peptides) as f:
allowed_peptides = set(pseq.strip().split()[0] for pseq in f)
else:
allowed_peptides = None
return allowed_peptides, group_prefix, group_infix
def filename(outfolder, outbasename, ftype):
type_suffix = {
'psm_full': '_PSMs_full.tsv',
'psm': '_PSMs.tsv',
'pepxml': '.scavager.pep.xml',
'peptide': '_peptides.tsv',
'protein': '_proteins.tsv',
'protein_group': '_protein_groups.tsv'
}
return os.path.join(outfolder, outbasename + type_suffix[ftype])
def get_cat_model(df, feature_columns):
logger.info('Starting machine learning...')
# logger.info(df.shape)
# df['orig_spectrum'] = df['spectrum'].apply(lambda x: x.split('.')[-3])
# df['massdiff_abs'] = df['massdiff'].abs()
# df = df.sort_values(by='massdiff_abs')
# df = df.drop_duplicates(subset = ['orig_spectrum', 'peptide'])
# logger.info(df.shape)
train, test = train_test_split(df, test_size=0.3, random_state=SEED)
x_train = get_X_array(train, feature_columns)
y_train = get_Y_array(train)
x_test = get_X_array(test, feature_columns)
y_test = get_Y_array(test)
np.random.seed(SEED)
# model = CatBoostClassifier(iterations=1000, learning_rate=0.05, depth=10, loss_function='Logloss', logging_level='Silent', random_seed=SEED)
# model.fit(x_train, y_train, use_best_model=True, eval_set=(x_test, y_test))
model = CatBoostClassifier(iterations=10000, learning_rate=0.01, depth=8, loss_function='Logloss', eval_metric='Logloss',
od_type='Iter', od_wait=33, random_state=SEED, logging_level='Silent')
model.fit(x_train, y_train, use_best_model=True, eval_set=(x_test, y_test))
best_iter = model.best_iteration_
logger.debug('Best iteration: %d', best_iter)
ln_rt = max(0.001, round(0.01 * best_iter / 1000, 3))
model = CatBoostClassifier(iterations=10000, learning_rate=ln_rt, depth=8, loss_function='Logloss', eval_metric='Logloss',
od_type='Iter', od_wait=33, random_state=SEED, logging_level='Silent')
model.fit(x_train, y_train, use_best_model=True, eval_set=(x_test, y_test))
best_iter = model.best_iteration_
logger.debug('Best iteration: %d', best_iter)
X = get_X_array(df, feature_columns)
y = get_Y_array(df)
model = CatBoostClassifier(iterations=int(best_iter / 0.7), learning_rate=ln_rt, depth=8, loss_function='Logloss',
random_state=SEED, logging_level='Silent')
model.fit(X, y)
logger.debug('Feature importance:')
for fi, fn in sorted(zip(model.feature_importances_, feature_columns), key=lambda x: x[0])[::-1]:
logger.debug('%s: %s', fi, fn)
logger.info('Machine learning is finished.')
return model
def calc_PEP(df, pep_ratio=1.0, reduced=False):
if not reduced:
feature_columns = get_features(df)
cat_model = get_cat_model(df, feature_columns)
x_all = get_X_array(df, feature_columns)
df['ML score'] = cat_model.predict_proba(x_all)[:, 1]
else:
df['ML score'] = df['expect']
df0_t = df[~df['decoy']]
df0_d = df[df['decoy']]
df0_d = df0_d[~df0_d['decoy1']]
binsize = min(get_fdbinsize(df0_t['ML score'].values), get_fdbinsize(df0_d['ML score'].values))
tmp = np.concatenate([df0_t['ML score'].values, df0_d['ML score'].values])
minv = df['ML score'].min()
maxv = df['ML score'].max()
lbin_s = scoreatpercentile(tmp, 1.0)
lbin = minv
if lbin_s and abs((lbin - lbin_s) / lbin_s) > 1.0:
lbin = lbin_s * 1.05
rbin_s = scoreatpercentile(tmp, 99.0)
rbin = maxv
if rbin_s and abs((rbin - rbin_s) / rbin_s) > 1.0:
rbin = rbin_s * 1.05
rbin += 1.5 * binsize
logger.debug('cbins: lbin = %s, rbin = %s, binsize = %s', lbin, rbin, binsize)
cbins = np.arange(lbin, rbin + 2 * binsize, binsize)
H1, b1 = np.histogram(df0_d['ML score'].values, bins=cbins)
H2, b2 = np.histogram(df0_t['ML score'].values, bins=cbins)
H2[H2 == 0] = 1
H1_2 = H1 * (1 + 1. / pep_ratio) / H2
ir = IsotonicRegression(y_min=0, y_max=1.0)
ir.fit(b1[:-1], H1_2)
df['PEP'] = ir.predict(df['ML score'].values)
pep_min = df['ML score'].min()
df['log_score'] = np.log10(df['ML score'] - ((pep_min - 1e-15) if pep_min < 0 else 0))
def calc_qvals(df, ratio):
logger.debug('Q-value calculation started...')
df_t_1 = aux.qvalues(df[~df['decoy1']], key='ML score', is_decoy='decoy2',
remove_decoy=False, formula=1, full_output=True, ratio=ratio, correction=1)
df_t = aux.qvalues(df[~df['decoy1']], key='ML score', is_decoy='decoy2',
remove_decoy=False, formula=1, full_output=True, ratio=ratio, correction=0)
df.loc[~df['decoy1'], 'q'] = df_t_1['q']
df.loc[~df['decoy1'], 'q_uncorrected'] = df_t['q']
df.loc[df['decoy1'], 'q'] = None
df.loc[df['decoy1'], 'q_uncorrected'] = None
_columns_to_output = {
'psm_full': ['peptide', 'length', 'spectrum', 'q', 'q_uncorrected', 'ML score', 'modifications', 'assumed_charge',
'num_missed_cleavages', 'num_tol_term', 'peptide_next_aa',
'peptide_prev_aa', 'calc_neutral_pep_mass', 'massdiff_ppm', 'massdiff_int', 'RT exp', 'RT pred',
'RT diff', 'protein', 'protein_descr', 'decoy', 'decoy1', 'decoy2', 'PEP',
'MS1Intensity', 'ISOWIDTHDIFF'],
'psm': ['peptide', 'length', 'spectrum', 'q', 'q_uncorrected','ML score', 'modifications', 'modified_peptide',
'assumed_charge', 'num_missed_cleavages', 'num_tol_term', 'peptide_next_aa',
'peptide_prev_aa', 'calc_neutral_pep_mass', 'massdiff_ppm', 'massdiff_int', 'RT exp', 'RT pred',
'protein', 'protein_descr', 'decoy', 'PEP', 'MS1Intensity', 'ISOWIDTHDIFF', 'PIF'],
'peptide': ['peptide', '#PSMs', 'length', 'spectrum', 'q', 'q_uncorrected', 'ML score', 'modifications',
'assumed_charge', 'num_missed_cleavages', 'num_tol_term', 'peptide_next_aa',
'peptide_prev_aa', 'calc_neutral_pep_mass', 'massdiff_ppm', 'massdiff_int', 'RT exp',
'RT pred', 'protein', 'protein_descr', 'decoy', 'PEP', 'MS1Intensity', 'ISOWIDTHDIFF', 'PIF'],
'protein': ['dbname', 'description', 'PSMs', 'peptides', 'NSAF', 'TOP3', 'sq', 'score', 'q', 'q_uncorrected',
'length', 'all proteins', 'groupleader'],
}
def get_columns_to_output(columns, out_type):
present = set(columns)
order = _columns_to_output[out_type]
labels = [label for label in order if label in present]
if out_type == 'psm_full':
labels.extend(present.difference(order))
if out_type != 'psm_full':
for c in columns:
if c[:4] == 'tag_':
labels.append(c)
logger.debug('Writing out %s table, q_uncorrected present: %s', out_type, 'q_uncorrected' in labels)
return labels
def calc_psms(df, df_filt):
peptides = Counter(df_filt['peptide'])
df['#PSMs'] = df['peptide'].apply(lambda x: peptides.get(x, 0))
| [
"os.mkdir",
"numpy.sum",
"numpy.random.seed",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"collections.defaultdict",
"numpy.histogram",
"numpy.arange",
"catboost.CatBoostClassifier",
"os.path.join",
"warnings.simplefilter",
"scipy.stats.scoreatpercentile",
"pyteomics.achro... | [((552, 579), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (569, 579), False, 'import logging\n'), ((1011, 1046), 'pyteomics.auxiliary.qvalues', 'aux.qvalues', (['df'], {'correction': '(1)'}), '(df, correction=1, **kw)\n', (1022, 1046), True, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((1062, 1097), 'pyteomics.auxiliary.qvalues', 'aux.qvalues', (['df'], {'correction': '(0)'}), '(df, correction=0, **kw)\n', (1073, 1097), True, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((3406, 3424), 'numpy.sum', 'np.sum', (["df['NSAF']"], {}), "(df['NSAF'])\n", (3412, 3424), True, 'import numpy as np\n'), ((4532, 4548), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4543, 4548), False, 'from collections import Counter, defaultdict\n'), ((4566, 4582), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4577, 4582), False, 'from collections import Counter, defaultdict\n'), ((6439, 6464), 'pyteomics.fasta.read', 'fasta.read', (['path_to_fasta'], {}), '(path_to_fasta)\n', (6449, 6464), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((10812, 10829), 'numpy.array', 'np.array', (['tagsums'], {}), '(tagsums)\n', (10820, 10829), True, 'import numpy as np\n'), ((11462, 11485), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (11478, 11485), False, 'import os\n'), ((11497, 11523), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (11513, 11523), False, 'import os\n'), ((13032, 13049), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (13043, 13049), False, 'import random\n'), ((13579, 13596), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (13590, 13596), False, 'import random\n'), ((23630, 23687), 'os.path.join', 'os.path.join', (['outfolder', '(outbasename + type_suffix[ftype])'], {}), '(outfolder, outbasename + type_suffix[ftype])\n', (23642, 23687), False, 'import os\n'), ((24093, 24147), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.3)', 'random_state': 'SEED'}), '(df, test_size=0.3, random_state=SEED)\n', (24109, 24147), False, 'from sklearn.model_selection import train_test_split\n'), ((24314, 24334), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (24328, 24334), True, 'import numpy as np\n'), ((24577, 24770), 'catboost.CatBoostClassifier', 'CatBoostClassifier', ([], {'iterations': '(10000)', 'learning_rate': '(0.01)', 'depth': '(8)', 'loss_function': '"""Logloss"""', 'eval_metric': '"""Logloss"""', 'od_type': '"""Iter"""', 'od_wait': '(33)', 'random_state': 'SEED', 'logging_level': '"""Silent"""'}), "(iterations=10000, learning_rate=0.01, depth=8,\n loss_function='Logloss', eval_metric='Logloss', od_type='Iter', od_wait\n =33, random_state=SEED, logging_level='Silent')\n", (24595, 24770), False, 'from catboost import CatBoostClassifier\n'), ((25031, 25225), 'catboost.CatBoostClassifier', 'CatBoostClassifier', ([], {'iterations': '(10000)', 'learning_rate': 'ln_rt', 'depth': '(8)', 'loss_function': '"""Logloss"""', 'eval_metric': '"""Logloss"""', 'od_type': '"""Iter"""', 'od_wait': '(33)', 'random_state': 'SEED', 'logging_level': '"""Silent"""'}), "(iterations=10000, learning_rate=ln_rt, depth=8,\n loss_function='Logloss', eval_metric='Logloss', od_type='Iter', od_wait\n =33, random_state=SEED, logging_level='Silent')\n", (25049, 25225), False, 'from catboost import CatBoostClassifier\n'), ((26475, 26543), 'numpy.concatenate', 'np.concatenate', (["[df0_t['ML score'].values, df0_d['ML score'].values]"], {}), "([df0_t['ML score'].values, df0_d['ML score'].values])\n", (26489, 26543), True, 'import numpy as np\n'), ((26621, 26648), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['tmp', '(1.0)'], {}), '(tmp, 1.0)\n', (26638, 26648), False, 'from scipy.stats import scoreatpercentile\n'), ((26762, 26790), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['tmp', '(99.0)'], {}), '(tmp, 99.0)\n', (26779, 26790), False, 'from scipy.stats import scoreatpercentile\n'), ((27012, 27056), 'numpy.arange', 'np.arange', (['lbin', '(rbin + 2 * binsize)', 'binsize'], {}), '(lbin, rbin + 2 * binsize, binsize)\n', (27021, 27056), True, 'import numpy as np\n'), ((27071, 27121), 'numpy.histogram', 'np.histogram', (["df0_d['ML score'].values"], {'bins': 'cbins'}), "(df0_d['ML score'].values, bins=cbins)\n", (27083, 27121), True, 'import numpy as np\n'), ((27135, 27185), 'numpy.histogram', 'np.histogram', (["df0_t['ML score'].values"], {'bins': 'cbins'}), "(df0_t['ML score'].values, bins=cbins)\n", (27147, 27185), True, 'import numpy as np\n'), ((27258, 27296), 'sklearn.isotonic.IsotonicRegression', 'IsotonicRegression', ([], {'y_min': '(0)', 'y_max': '(1.0)'}), '(y_min=0, y_max=1.0)\n', (27276, 27296), False, 'from sklearn.isotonic import IsotonicRegression\n'), ((27431, 27497), 'numpy.log10', 'np.log10', (["(df['ML score'] - (pep_min - 1e-15 if pep_min < 0 else 0))"], {}), "(df['ML score'] - (pep_min - 1e-15 if pep_min < 0 else 0))\n", (27439, 27497), True, 'import numpy as np\n'), ((27593, 27738), 'pyteomics.auxiliary.qvalues', 'aux.qvalues', (["df[~df['decoy1']]"], {'key': '"""ML score"""', 'is_decoy': '"""decoy2"""', 'remove_decoy': '(False)', 'formula': '(1)', 'full_output': '(True)', 'ratio': 'ratio', 'correction': '(1)'}), "(df[~df['decoy1']], key='ML score', is_decoy='decoy2',\n remove_decoy=False, formula=1, full_output=True, ratio=ratio, correction=1)\n", (27604, 27738), True, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((27754, 27899), 'pyteomics.auxiliary.qvalues', 'aux.qvalues', (["df[~df['decoy1']]"], {'key': '"""ML score"""', 'is_decoy': '"""decoy2"""', 'remove_decoy': '(False)', 'formula': '(1)', 'full_output': '(True)', 'ratio': 'ratio', 'correction': '(0)'}), "(df[~df['decoy1']], key='ML score', is_decoy='decoy2',\n remove_decoy=False, formula=1, full_output=True, ratio=ratio, correction=0)\n", (27765, 27899), True, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((30045, 30072), 'collections.Counter', 'Counter', (["df_filt['peptide']"], {}), "(df_filt['peptide'])\n", (30052, 30072), False, 'from collections import Counter, defaultdict\n'), ((3475, 3495), 'pandas.notna', 'pd.notna', (["df['NSAF']"], {}), "(df['NSAF'])\n", (3483, 3495), True, 'import pandas as pd\n'), ((3525, 3545), 'numpy.log10', 'np.log10', (["df['NSAF']"], {}), "(df['NSAF'])\n", (3533, 3545), True, 'import numpy as np\n'), ((5346, 5380), 'pandas.read_csv', 'pd.read_csv', (['df_ms1_path'], {'sep': '"""\t"""'}), "(df_ms1_path, sep='\\t')\n", (5357, 5380), True, 'import pandas as pd\n'), ((11924, 11952), 'pyteomics.achrom.calculate_RT', 'achrom.calculate_RT', (['seq', 'RC'], {}), '(seq, RC)\n', (11943, 11952), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((12754, 12768), 'pyteomics.fasta.read', 'fasta.read', (['db'], {}), '(db)\n', (12764, 12768), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((16365, 16394), 'pyteomics.pepxml.DataFrame', 'pepxml.DataFrame', (['infile_path'], {}), '(infile_path)\n', (16381, 16394), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((20952, 21008), 'pyteomics.auxiliary.linear_regression', 'aux.linear_regression', (["df1_f['RT pred']", "df1_f['RT exp']"], {}), "(df1_f['RT pred'], df1_f['RT exp'])\n", (20973, 21008), True, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((3557, 3576), 'pandas.isna', 'pd.isna', (["df['NSAF']"], {}), "(df['NSAF'])\n", (3564, 3576), True, 'import pandas as pd\n'), ((10132, 10185), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['proteins_dict'], {'orient': '"""index"""'}), "(proteins_dict, orient='index')\n", (10154, 10185), True, 'import pandas as pd\n'), ((11603, 11629), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (11619, 11629), False, 'import os\n'), ((11752, 11775), 'os.path.realpath', 'os.path.realpath', (['fname'], {}), '(fname)\n', (11768, 11775), False, 'import os\n'), ((11802, 11823), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (11815, 11823), False, 'import os\n'), ((11837, 11853), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (11845, 11853), False, 'import os\n'), ((16482, 16509), 'pyteomics.mzid.DataFrame', 'mzid.DataFrame', (['infile_path'], {}), '(infile_path)\n', (16496, 16509), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((18414, 18443), 'pandas.isna', 'pd.isna', (["df1['protein_descr']"], {}), "(df1['protein_descr'])\n", (18421, 18443), True, 'import pandas as pd\n'), ((18472, 18501), 'pandas.isna', 'pd.isna', (["df1['protein_descr']"], {}), "(df1['protein_descr'])\n", (18479, 18501), True, 'import pandas as pd\n'), ((18632, 18655), 'pandas.isna', 'pd.isna', (["df1['peptide']"], {}), "(df1['peptide'])\n", (18639, 18655), True, 'import pandas as pd\n'), ((20548, 20573), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (20571, 20573), False, 'import warnings\n'), ((20587, 20618), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (20608, 20618), False, 'import warnings\n'), ((20656, 20728), 'pyteomics.achrom.get_RCs_vary_lcp', 'achrom.get_RCs_vary_lcp', (["df1_f['peptide'].values", "df1_f['RT exp'].values"], {}), "(df1_f['peptide'].values, df1_f['RT exp'].values)\n", (20679, 20728), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((16814, 16853), 'pyteomics.parser.num_sites', 'parser.num_sites', (['x'], {'rule': 'cleavage_rule'}), '(x, rule=cleavage_rule)\n', (16830, 16853), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n'), ((17063, 17102), 'pyteomics.parser.num_sites', 'parser.num_sites', (['x'], {'rule': 'cleavage_rule'}), '(x, rule=cleavage_rule)\n', (17079, 17102), False, 'from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser\n')] |
from __future__ import annotations
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import pinv
from IMLearn.metrics.loss_functions import mean_square_error
class LinearRegression(BaseEstimator):
"""
Linear Regression Estimator
Solving Ordinary Least Squares optimization problem
"""
def __init__(self, include_intercept: bool = True) -> LinearRegression:
"""
Instantiate a linear regression estimator
Parameters
----------
include_intercept: bool, default=True
Should fitted model include an intercept or not
Attributes
----------
include_intercept_: bool
Should fitted model include an intercept or not
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by linear regression. To be set in
`LinearRegression.fit` function.
"""
super().__init__()
self.include_intercept_, self.coefs_ = include_intercept, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to given samples
#my notes - We want to minimize the square error -
the principle of learning is ERM
The algorithm used to minimize RSS is by SVM decomposition
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
Fits model with or without an intercept depending on value of `self.include_intercept_`
"""
# when there is an intercept included, to make the equation linear
# (not affine) we add a zero-th coordinate with value 1
if self.include_intercept_:
X = np.insert(X, 0, np.ones(np.shape(X)[0]), axis=1)
# pinv X- Compute the (Moore-Penrose) pseudo-inverse of a matrix.
# Calculate the generalized inverse of a matrix using its
# singular-value decomposition (SVD) and including all
# large singular values.
self.coefs_ = np.linalg.pinv(X) @ y
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
# we get samples and using the model we found (w hat) the result
# is calculated
# checks the intercept so the dimensions of the multiplication would
# be ok
if self.include_intercept_:
# = np.insert(X, 0, np.ones(np.shape(X)[0]), axis=1)
return (X @ self.coefs_[1:]) + self.coefs_[0]
# X multiply w hat = y
return X @ self.coefs_
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
# using the function I have implemented
return mean_square_error(self._predict(X), y)
| [
"numpy.shape",
"numpy.linalg.pinv"
] | [((2221, 2238), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X'], {}), '(X)\n', (2235, 2238), True, 'import numpy as np\n'), ((1938, 1949), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1946, 1949), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def grid2contour(grid):
'''
grid--image_grid used to show deform field
type: torch.Tensor, shape: (h, w, 2), value range:(-1, 1)
'''
x = np.arange(-1, 1, 2/ grid.shape[0])
y = np.arange(-1, 1, 2 / grid.shape[1])
X, Y = np.meshgrid(x, y)
Z1 = grid[:, :, 0] + 2 # remove the dashed line
Z1 = Z1[::-1] # vertical flip
Z2 = grid[:, :, 1] + 2
plt.figure()
plt.contour(X, Y, Z1, 15, colors='k')
# plt.clabel(CS, fontsize=9, inline=1)
plt.contour(X, Y, Z2, 15, colors='k')
# plt.clabel(CS, fontsize=9, inline=1)
plt.xticks(()), plt.yticks(()) # remove x, y ticks
plt.title('deform field')
plt.show()
def test():
img_shape = [80, 80]
x = np.arange(-1, 1, 2/img_shape[0])
y = np.arange(-1, 1, 2/img_shape[1])
X, Y = np.meshgrid(x, y)
regular_grid = np.stack((X,Y), axis=2)
rand_field = np.random.rand(*img_shape, 2)
rand_field_norm = rand_field.copy()
rand_field_norm[:, :, 0] = rand_field_norm[:, :, 0] * 2 / img_shape[1]
rand_field_norm[:, :, 1] = rand_field_norm[:, :, 1] * 2 / img_shape[0]
sampling_grid = regular_grid + rand_field_norm
grid2contour(sampling_grid)
def plotVectorField(filed):
x=filed[:,:,0]
y=filed[:,:,1]
plt.quiver(x, y)
plt.show()
if __name__=="__main__":
test()
x,y=np.mgrid[0:10,0:10]
plt.quiver(x,y, headwidth=1, scale = 10, headlength=4)
plt.show() | [
"matplotlib.pyplot.title",
"numpy.stack",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.quiver",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.contour",
"numpy.random.rand",
"matplotlib.pyplot.xticks"
] | [((209, 244), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(2 / grid.shape[0])'], {}), '(-1, 1, 2 / grid.shape[0])\n', (218, 244), True, 'import numpy as np\n'), ((252, 287), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(2 / grid.shape[1])'], {}), '(-1, 1, 2 / grid.shape[1])\n', (261, 287), True, 'import numpy as np\n'), ((299, 316), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (310, 316), True, 'import numpy as np\n'), ((437, 449), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (447, 449), True, 'import matplotlib.pyplot as plt\n'), ((454, 491), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z1', '(15)'], {'colors': '"""k"""'}), "(X, Y, Z1, 15, colors='k')\n", (465, 491), True, 'import matplotlib.pyplot as plt\n'), ((542, 579), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z2', '(15)'], {'colors': '"""k"""'}), "(X, Y, Z2, 15, colors='k')\n", (553, 579), True, 'import matplotlib.pyplot as plt\n'), ((686, 711), 'matplotlib.pyplot.title', 'plt.title', (['"""deform field"""'], {}), "('deform field')\n", (695, 711), True, 'import matplotlib.pyplot as plt\n'), ((716, 726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (724, 726), True, 'import matplotlib.pyplot as plt\n'), ((773, 807), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(2 / img_shape[0])'], {}), '(-1, 1, 2 / img_shape[0])\n', (782, 807), True, 'import numpy as np\n'), ((814, 848), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(2 / img_shape[1])'], {}), '(-1, 1, 2 / img_shape[1])\n', (823, 848), True, 'import numpy as np\n'), ((858, 875), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (869, 875), True, 'import numpy as np\n'), ((895, 919), 'numpy.stack', 'np.stack', (['(X, Y)'], {'axis': '(2)'}), '((X, Y), axis=2)\n', (903, 919), True, 'import numpy as np\n'), ((937, 966), 'numpy.random.rand', 'np.random.rand', (['*img_shape', '(2)'], {}), '(*img_shape, 2)\n', (951, 966), True, 'import numpy as np\n'), ((1312, 1328), 'matplotlib.pyplot.quiver', 'plt.quiver', (['x', 'y'], {}), '(x, y)\n', (1322, 1328), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1341, 1343), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1468), 'matplotlib.pyplot.quiver', 'plt.quiver', (['x', 'y'], {'headwidth': '(1)', 'scale': '(10)', 'headlength': '(4)'}), '(x, y, headwidth=1, scale=10, headlength=4)\n', (1425, 1468), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1484), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1482, 1484), True, 'import matplotlib.pyplot as plt\n'), ((630, 644), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (640, 644), True, 'import matplotlib.pyplot as plt\n'), ((646, 660), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (656, 660), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from numpy import random as rand
from scipy import sparse as sps
from sklearn.datasets import load_svmlight_file
from sklearn.datasets import dump_svmlight_file
import os
import time as tm
# DO NOT CHANGE THE NAME OF THIS METHOD OR ITS INPUT OUTPUT BEHAVtst_X_Xftst_X_Xftst_X_XfIOR
# INPUT CONVENTION
# X: n x d matrix in csr_matrix format containing d-dim (sparse) features for n test data points
# k: the number of recommendations to return for each test data point in ranked order
# OUTPUT CONVENTION
# The method must return an n x k numpy nd-array (not numpy matrix or scipy matrix) of labels with the i-th row
# containing k labels which it thinks are most appropriate for the i-th test point. Labels must be returned in
# ranked order i.e. the label yPred[i][0] must be considered most appropriate followed by yPred[i][1] and so on
# CAUTION: Make sure that you return (yPred below) an n x k numpy (nd) array and not a numpy/scipy/sparse matrix
# The returned matrix will always be a dense matrix and it terribly slows things down to store it in csr format
# The evaluation code may misbehave and give unexpected results if an nd-array is not returned
def getReco( X, k):
# Find out how many data points we have
n = X.shape[0]
L = 3400
# Load and unpack the dummy model
# The dummy model simply stores the labels in decreasing order of their popularity
out_path="sandbox/data/Assn2/"
model_dir = "sandbox/results/Assn2/"
dump_food(X,out_path)
os.system("bash shallow/sample_run.sh")
filename = model_dir + "score_mat"
Xp, _ = load_svmlight_file( "%s.txt" %filename, multilabel = True, n_features = L, offset = 1 )
yPred = np.zeros( (n, k), dtype=int )
for ind, user in enumerate(Xp):
d = user.data
i = user.indices
xf = np.vstack( (i, d) ).T
xf = xf[xf[:,1].argsort()[::-1]]
for j in range(0,k):
yPred[ind][j]=xf[j][0]
os.system("rm sandbox/data/Assn2/tst_X_Xf.txt")
os.system("rm sandbox/results/Assn2/score_mat.txt")
'''
# Let us predict a random subset of the 2k most popular labels no matter what the test point
shortList = model[0:2*k]
# Make sure we are returning a numpy nd-array and not a numpy matrix or a scipy sparse matrix
yPred = np.zeros( (n, k) )
for i in range( n ):
yPred[i,:] = rand.permutation( shortList )[0:k]
'''
return yPred
def dump_food( matrix_test, out_path):
(n, d) = matrix_test.shape
dummy = sps.csr_matrix( (n, 1) )
dump_svmlight_file( matrix_test, dummy, "test_data.X", multilabel = True, zero_based = True, comment = "%d %d" % (n, d) )
test_ws=open("test_data.X","r")
test_is=open(out_path+"tst_X_Xf.txt","w")
lines=test_ws.readlines()
for i in range(0,len(lines)):
if(lines[i][0]=='#'):
if(len(lines[i])>2):
if(not(lines[i][2]<='9' and lines[i][2]>='0')):
continue
else :
lines[i]=lines[i][2:]
test_is.write(lines[i])
else :
continue
else :
lines[i]=lines[i][1:]
test_is.write(lines[i])
test_is.close()
os.system("rm test_data.X")
| [
"sklearn.datasets.dump_svmlight_file",
"numpy.zeros",
"os.system",
"scipy.sparse.csr_matrix",
"sklearn.datasets.load_svmlight_file",
"numpy.vstack"
] | [((1562, 1601), 'os.system', 'os.system', (['"""bash shallow/sample_run.sh"""'], {}), "('bash shallow/sample_run.sh')\n", (1571, 1601), False, 'import os\n'), ((1661, 1746), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (["('%s.txt' % filename)"], {'multilabel': '(True)', 'n_features': 'L', 'offset': '(1)'}), "('%s.txt' % filename, multilabel=True, n_features=L, offset=1\n )\n", (1679, 1746), False, 'from sklearn.datasets import load_svmlight_file\n'), ((1764, 1791), 'numpy.zeros', 'np.zeros', (['(n, k)'], {'dtype': 'int'}), '((n, k), dtype=int)\n', (1772, 1791), True, 'import numpy as np\n'), ((2031, 2078), 'os.system', 'os.system', (['"""rm sandbox/data/Assn2/tst_X_Xf.txt"""'], {}), "('rm sandbox/data/Assn2/tst_X_Xf.txt')\n", (2040, 2078), False, 'import os\n'), ((2084, 2135), 'os.system', 'os.system', (['"""rm sandbox/results/Assn2/score_mat.txt"""'], {}), "('rm sandbox/results/Assn2/score_mat.txt')\n", (2093, 2135), False, 'import os\n'), ((2603, 2625), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['(n, 1)'], {}), '((n, 1))\n', (2617, 2625), True, 'from scipy import sparse as sps\n'), ((2633, 2750), 'sklearn.datasets.dump_svmlight_file', 'dump_svmlight_file', (['matrix_test', 'dummy', '"""test_data.X"""'], {'multilabel': '(True)', 'zero_based': '(True)', 'comment': "('%d %d' % (n, d))"}), "(matrix_test, dummy, 'test_data.X', multilabel=True,\n zero_based=True, comment='%d %d' % (n, d))\n", (2651, 2750), False, 'from sklearn.datasets import dump_svmlight_file\n'), ((3344, 3371), 'os.system', 'os.system', (['"""rm test_data.X"""'], {}), "('rm test_data.X')\n", (3353, 3371), False, 'import os\n'), ((1896, 1913), 'numpy.vstack', 'np.vstack', (['(i, d)'], {}), '((i, d))\n', (1905, 1913), True, 'import numpy as np\n')] |
# Basic packages
import numpy as np
import scipy as scipy
import os
import sys
import json
import datetime
import skimage.draw
import cv2
import matplotlib.pyplot as plt
from imgaug import augmenters as iaa # For image augmentation
#%cd drive/MyDrive/
# To find the path for Mask_RCNN
sys.path.insert(1, 'drive/MyDrive/Mask_RCNN')
from mrcnn import utils
# ========================================================================
# ========================================================================
class GhostsDataset(utils.Dataset):
def load_ghosts(self, dataset_dir, subset):
"""Load a subset of the ghosts dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have three classes to add.
self.add_class("Type", 1, "Bright")
self.add_class("Type", 2, "Faint")
self.add_class("Type", 3, "Rays")
# Train or validation dataset?
assert subset in ["Training_set", "Validation_set", "Test_set"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
annotations1 = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
# print(annotations1)
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# print(a)
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions']]
objects = [s['region_attributes']['Type'] for s in a['regions']]
#print("objects:",objects)
name_dict = {"Bright": 1,"Faint": 2,"Rays":3}
# key = tuple(name_dict)
num_ids = [name_dict[a] for a in objects]
# num_ids = [int(n['Event']) for n in objects]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image.
#print("numids",num_ids)
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)#/255.0
height, width = image.shape[:2]
self.add_image(
"Type", ## for a single class just add the name here
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
num_ids=num_ids)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a bottle dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "Type":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
#print(info["source"])
if info["source"] != "Type":
return super(self.__class__, self).load_mask(image_id)
num_ids = info['num_ids']
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
name = p['name']
if (name=='polyline'):
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
a = (rr<400)&(cc<400)
mask[rr[a], cc[a], i] = 1
elif (name=='polygon'):
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
a = (rr<400)&(cc<400)
mask[rr[a], cc[a], i] = 1
elif (name=='rect'):
rr, cc = skimage.draw.rectangle(start=(p['y'], p['x']),extent=(p['height'],p['width']))
a = (rr<400)&(cc<400)
mask[rr[a], cc[a], i] = 1
elif (name=='circle'):
rr, cc = skimage.draw.circle(p['cy'],p['cx'],p['r'])
a = (rr<400)&(cc<400)
mask[rr[a], cc[a], i] = 1
else: #This is the case when you have an ellipse
rr, cc = skimage.draw.ellipse(p['cy'],p['cx'],p['ry'],p['rx'],shape=None,rotation=-p['theta'])
a = (rr<400)&(cc<400)
mask[rr[a], cc[a], i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
# Map class names to class IDs.
num_ids = np.array(num_ids, dtype=np.int32)
return mask, num_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "Type":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
| [
"numpy.array",
"os.path.join",
"sys.path.insert"
] | [((292, 337), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""drive/MyDrive/Mask_RCNN"""'], {}), "(1, 'drive/MyDrive/Mask_RCNN')\n", (307, 337), False, 'import sys\n'), ((1075, 1108), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (1087, 1108), False, 'import os\n'), ((5680, 5713), 'numpy.array', 'np.array', (['num_ids'], {'dtype': 'np.int32'}), '(num_ids, dtype=np.int32)\n', (5688, 5713), True, 'import numpy as np\n'), ((2932, 2972), 'os.path.join', 'os.path.join', (['dataset_dir', "a['filename']"], {}), "(dataset_dir, a['filename'])\n", (2944, 2972), False, 'import os\n'), ((1724, 1773), 'os.path.join', 'os.path.join', (['dataset_dir', '"""via_region_data.json"""'], {}), "(dataset_dir, 'via_region_data.json')\n", (1736, 1773), False, 'import os\n')] |
from flask import Flask, request, jsonify
from flask_cors import CORS
from tensorflow import keras
import pickle
import json
import numpy as np
app = Flask(__name__)
CORS(app)
with open("intents.json") as file:
data = json.load(file)
@app.post('/predict')
def predict():
userText = request.get_json().get("message")
# load trained model
model = keras.models.load_model('chat_model_college')
# load tokenizer object
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
# load label encoder object
with open('label_encoder.pickle', 'rb') as enc:
lbl_encoder = pickle.load(enc)
# parameters
max_len = 20
result = model.predict(keras.preprocessing.sequence.pad_sequences(tokenizer.texts_to_sequences([userText]),
truncating='post', maxlen=max_len))
tag = lbl_encoder.inverse_transform([np.argmax(result)])
for i in data['intents']:
if i['tag'] == tag:
message = {"answer": np.random.choice(i['responses'])}
return jsonify(message)
if __name__ == "__main__":
app.run()
| [
"json.load",
"tensorflow.keras.models.load_model",
"numpy.argmax",
"flask_cors.CORS",
"flask.Flask",
"flask.jsonify",
"pickle.load",
"numpy.random.choice",
"flask.request.get_json"
] | [((158, 173), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (163, 173), False, 'from flask import Flask, request, jsonify\n'), ((175, 184), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (179, 184), False, 'from flask_cors import CORS\n'), ((235, 250), 'json.load', 'json.load', (['file'], {}), '(file)\n', (244, 250), False, 'import json\n'), ((385, 430), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""chat_model_college"""'], {}), "('chat_model_college')\n", (408, 430), False, 'from tensorflow import keras\n'), ((535, 554), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (546, 554), False, 'import pickle\n'), ((666, 682), 'pickle.load', 'pickle.load', (['enc'], {}), '(enc)\n', (677, 682), False, 'import pickle\n'), ((310, 328), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (326, 328), False, 'from flask import Flask, request, jsonify\n'), ((985, 1002), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (994, 1002), True, 'import numpy as np\n'), ((1155, 1171), 'flask.jsonify', 'jsonify', (['message'], {}), '(message)\n', (1162, 1171), False, 'from flask import Flask, request, jsonify\n'), ((1101, 1133), 'numpy.random.choice', 'np.random.choice', (["i['responses']"], {}), "(i['responses'])\n", (1117, 1133), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from numba import njit
@njit
def njit_thin(points, maps):
result = maps.copy()
h, w = maps.shape[:2]
for _ in range(len(points[0])):
x = points[0][_]
y = points[1][_]
if x > 0:
a = maps[x-1, y]
if a > 0:
result[x, y] = a
continue
if y > 0:
a = maps[x, y-1]
if a > 0:
result[x, y] = a
continue
if x + 1 < h:
a = maps[x+1, y]
if a > 0:
result[x, y] = a
continue
if y + 1 < w:
a = maps[x, y+1]
if a > 0:
result[x, y] = a
continue
return result
def thinning(fillmap, max_iter=100):
result = fillmap.copy()
for iterNum in range(max_iter):
line_points = np.where(result == 0)
if not len(line_points[0]) > 0:
break
result = njit_thin(line_points, result)
return result
| [
"numpy.where"
] | [((886, 907), 'numpy.where', 'np.where', (['(result == 0)'], {}), '(result == 0)\n', (894, 907), True, 'import numpy as np\n')] |
"""Directly copied from the spacy-transformers library https://raw.githubusercontent.com/explosion/spacy-transformers/master/spacy_transformers/align.py
Pasted here to avoid clashing torch versions
"""
import numpy
from typing import cast, Dict, List, Tuple, Callable, Set, Optional
from spacy_alignments.tokenizations import get_alignments
from spacy.tokens import Span, Token
from thinc.types import Ragged, Floats2d
def get_token_positions(spans: List[Span]) -> Dict[Token, int]:
token_positions: Dict[Token, int] = {}
for span in spans:
for token in span.doc:
if token not in token_positions:
token_positions[token] = len(token_positions)
return token_positions
def get_alignment_via_offset_mapping(spans: List[Span], token_data) -> Ragged:
# This function uses the offset mapping provided by Huggingface. I'm not
# sure whether there's a bug here but I'm getting weird errors.
# Tokens can occur more than once, and we need the alignment of each token
# to its place in the concatenated wordpieces array.
token_positions = get_token_positions(spans)
alignment: List[Set[int]] = [set() for _ in range(len(token_positions))]
wp_start = 0
for i, span in enumerate(spans):
for j, token in enumerate(span):
position = token_positions[token]
for char_idx in range(token.idx, token.idx + len(token)):
wp_j = token_data.char_to_token(i, char_idx)
if wp_j is not None:
alignment[position].add(wp_start + wp_j)
wp_start += len(token_data.input_ids[i])
lengths: List[int] = []
flat: List[int] = []
for a in alignment:
lengths.append(len(a))
flat.extend(sorted(a))
align = Ragged(numpy.array(flat, dtype="i"), numpy.array(lengths, dtype="i"))
return align
def get_alignment(
spans: List[Span],
wordpieces: List[List[str]],
special_tokens: Optional[List[str]] = None,
) -> Ragged:
"""Compute a ragged alignment array that records, for each unique token in
`spans`, the corresponding indices in the flattened `wordpieces` array.
For instance, imagine you have two overlapping spans:
[[I, like, walking], [walking, outdoors]]
And their wordpieces are:
[[I, like, walk, ing], [walk, ing, out, doors]]
We want to align "walking" against [walk, ing, walk, ing], which have
indices [2, 3, 4, 5] once the nested wordpieces list is flattened.
The nested alignment list would be:
[[0], [1], [2, 3, 4, 5], [6, 7]]
I like walking outdoors
Which gets flattened into the ragged array:
[0, 1, 2, 3, 4, 5, 6, 7]
[1, 1, 4, 2]
The ragged format allows the aligned data to be computed via:
tokens = Ragged(wp_tensor[align.data], align.lengths)
This produces a ragged format, indicating which tokens need to be collapsed
to make the aligned array. The reduction is deferred for a later step, so
the user can configure it. The indexing is especially efficient in trivial
cases like this where the indexing array is completely continuous.
"""
if len(spans) != len(wordpieces):
raise ValueError("Cannot align batches of different sizes.")
if special_tokens is None:
special_tokens = []
# Tokens can occur more than once, and we need the alignment of each token
# to its place in the concatenated wordpieces array.
token_positions = get_token_positions(spans)
alignment: List[Set[int]] = [set() for _ in range(len(token_positions))]
wp_start = 0
for i, (span, wp_toks) in enumerate(zip(spans, wordpieces)):
sp_toks = [token.text for token in span]
wp_toks_filtered = wp_toks
# In the case that the special tokens do not appear in the text, filter
# them out for alignment purposes so that special tokens like "<s>" are
# not aligned to the character "s" in the text. (If the special tokens
# appear in the text, it's not possible to distinguish them from the
# added special tokens, so they may be aligned incorrectly.)
if not any([special in span.text for special in special_tokens]):
wp_toks_filtered = [
tok if tok not in special_tokens else "" for tok in wp_toks
]
span2wp, wp2span = get_alignments(sp_toks, wp_toks_filtered)
for token, wp_js in zip(span, span2wp):
position = token_positions[token]
alignment[position].update(wp_start + j for j in wp_js)
wp_start += len(wp_toks)
lengths: List[int] = []
flat: List[int] = []
for a in alignment:
lengths.append(len(a))
flat.extend(sorted(a))
align = Ragged(numpy.array(flat, dtype="i"), numpy.array(lengths, dtype="i"))
return align | [
"spacy_alignments.tokenizations.get_alignments",
"numpy.array"
] | [((1783, 1811), 'numpy.array', 'numpy.array', (['flat'], {'dtype': '"""i"""'}), "(flat, dtype='i')\n", (1794, 1811), False, 'import numpy\n'), ((1813, 1844), 'numpy.array', 'numpy.array', (['lengths'], {'dtype': '"""i"""'}), "(lengths, dtype='i')\n", (1824, 1844), False, 'import numpy\n'), ((4362, 4403), 'spacy_alignments.tokenizations.get_alignments', 'get_alignments', (['sp_toks', 'wp_toks_filtered'], {}), '(sp_toks, wp_toks_filtered)\n', (4376, 4403), False, 'from spacy_alignments.tokenizations import get_alignments\n'), ((4757, 4785), 'numpy.array', 'numpy.array', (['flat'], {'dtype': '"""i"""'}), "(flat, dtype='i')\n", (4768, 4785), False, 'import numpy\n'), ((4787, 4818), 'numpy.array', 'numpy.array', (['lengths'], {'dtype': '"""i"""'}), "(lengths, dtype='i')\n", (4798, 4818), False, 'import numpy\n')] |
#!/usr/bin/env python
"""Check the WeightedSum transformation"""
import numpy as N
from load import ROOT as R
from gna.constructors import Points, stdvector
from gna.env import env
"""Initialize inpnuts"""
arr1 = N.arange(0, 5)
arr2 = -arr1
print( 'Data1:', arr1 )
print( 'Data2:', arr2 )
labels = [ 'arr1', 'arr2' ]
weights = [ 'w1', 'w2' ]
"""Initialize environment"""
p1 = env.globalns.defparameter( weights[0], central=1.0, sigma=0.1 )
p2 = env.globalns.defparameter( weights[1], central=1.0, sigma=0.1 )
env.globalns.printparameters()
"""Initialize transformations"""
points1 = Points( arr1 )
points2 = Points( arr2 )
"""Mode1: a1*w1+a2*w2"""
ws = R.WeightedSum( stdvector(weights), stdvector(labels) )
ws.sum.arr1(points1.points)
ws.sum.arr2(points2.points)
print( 'Mode1: a1*w1+a2*w2' )
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(2)
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p2.set(2)
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(1)
p2.set(1)
print()
"""Mode2: a1*w1+a2"""
ws = R.WeightedSum( stdvector(weights[:1]), stdvector(labels) )
ws.sum.arr1(points1.points)
ws.sum.arr2(points2.points)
print( 'Mode2: a1*w1+a2' )
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(2)
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p2.set(2)
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(1)
p2.set(1)
print()
"""Mode4: c+a1*w1+a1*w2"""
ws = R.WeightedSum( -10, stdvector(weights), stdvector(labels) )
ws.sum.arr1(points1.points)
ws.sum.arr2(points2.points)
print( 'Mode4: -10+a1*w1+a2*w2' )
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(2)
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p2.set(2)
print( ' ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(1)
p2.set(1)
print()
| [
"gna.constructors.Points",
"gna.env.env.globalns.printparameters",
"gna.env.env.globalns.defparameter",
"gna.constructors.stdvector",
"numpy.arange"
] | [((216, 230), 'numpy.arange', 'N.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (224, 230), True, 'import numpy as N\n'), ((381, 442), 'gna.env.env.globalns.defparameter', 'env.globalns.defparameter', (['weights[0]'], {'central': '(1.0)', 'sigma': '(0.1)'}), '(weights[0], central=1.0, sigma=0.1)\n', (406, 442), False, 'from gna.env import env\n'), ((450, 511), 'gna.env.env.globalns.defparameter', 'env.globalns.defparameter', (['weights[1]'], {'central': '(1.0)', 'sigma': '(0.1)'}), '(weights[1], central=1.0, sigma=0.1)\n', (475, 511), False, 'from gna.env import env\n'), ((515, 545), 'gna.env.env.globalns.printparameters', 'env.globalns.printparameters', ([], {}), '()\n', (543, 545), False, 'from gna.env import env\n'), ((590, 602), 'gna.constructors.Points', 'Points', (['arr1'], {}), '(arr1)\n', (596, 602), False, 'from gna.constructors import Points, stdvector\n'), ((615, 627), 'gna.constructors.Points', 'Points', (['arr2'], {}), '(arr2)\n', (621, 627), False, 'from gna.constructors import Points, stdvector\n'), ((676, 694), 'gna.constructors.stdvector', 'stdvector', (['weights'], {}), '(weights)\n', (685, 694), False, 'from gna.constructors import Points, stdvector\n'), ((696, 713), 'gna.constructors.stdvector', 'stdvector', (['labels'], {}), '(labels)\n', (705, 713), False, 'from gna.constructors import Points, stdvector\n'), ((1065, 1087), 'gna.constructors.stdvector', 'stdvector', (['weights[:1]'], {}), '(weights[:1])\n', (1074, 1087), False, 'from gna.constructors import Points, stdvector\n'), ((1089, 1106), 'gna.constructors.stdvector', 'stdvector', (['labels'], {}), '(labels)\n', (1098, 1106), False, 'from gna.constructors import Points, stdvector\n'), ((1465, 1483), 'gna.constructors.stdvector', 'stdvector', (['weights'], {}), '(weights)\n', (1474, 1483), False, 'from gna.constructors import Points, stdvector\n'), ((1485, 1502), 'gna.constructors.stdvector', 'stdvector', (['labels'], {}), '(labels)\n', (1494, 1502), False, 'from gna.constructors import Points, stdvector\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 10:43, 08/07/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi
from numpy import tanh as nptanh
def itself(x):
return x
def elu(x, alpha=1):
return where(x < 0, alpha * (exp(x) - 1), x)
def relu(x):
return maximum(0, x)
def tanh(x):
return nptanh(x)
def sigmoid(x):
return 1.0 / (1.0 + exp(-x))
def derivative_self(x):
return 1
def derivative_elu(x, alpha=1):
return where(x < 0, x + alpha, 1)
def derivative_relu(x):
return where(x < 0, 0, 1)
def derivative_tanh(x):
return 1 - power(x, 2)
def derivative_sigmoid(x):
return multiply(x, 1 - x)
def expand_chebyshev(x):
x1 = x
x2 = 2 * power(x, 2) - 1
x3 = 4 * power(x, 3) - 3 * x
x4 = 8 * power(x, 4) - 8 * power(x, 2) + 1
x5 = 16 * power(x, 5) - 20 * power(x, 3) + 5 * x
return concatenate((x1, x2, x3, x4, x5), axis=1)
def expand_legendre(x):
x1 = x
x2 = 1 / 2 * (3 * power(x, 2) - 1)
x3 = 1 / 2 * (5 * power(x, 3) - 3 * x)
x4 = 1 / 8 * (35 * power(x, 4) - 30 * power(x, 2) + 3)
x5 = 1 / 40 * (9 * power(x, 5) - 350 * power(x, 3) + 75 * x)
return concatenate((x1, x2, x3, x4, x5), axis=1)
def expand_laguerre(x):
x1 = -x + 1
x2 = 1 / 2 * (power(x, 2) - 4 * x + 2)
x3 = 1 / 6 * (-power(x, 3) + 9 * power(x, 2) - 18 * x + 6)
x4 = 1 / 24 * (power(x, 4) - 16 * power(x, 3) + 72 * power(x, 2) - 96 * x + 24)
x5 = 1 / 120 * (-power(x, 5) + 25 * power(x, 4) - 200 * power(x, 3) + 600 * power(x, 2) - 600 * x + 120)
return concatenate((x1, x2, x3, x4, x5), axis=1)
def expand_power(x):
x1 = x
x2 = x1 + power(x, 2)
x3 = x2 + power(x, 3)
x4 = x3 + power(x, 4)
x5 = x4 + power(x, 5)
return concatenate((x1, x2, x3, x4, x5), axis=1)
def expand_trigonometric(x):
x1 = x
x2 = sin(pi * x) + cos(pi * x)
x3 = sin(2 * pi * x) + cos(2 * pi * x)
x4 = sin(3 * pi * x) + cos(3 * pi * x)
x5 = sin(4 * pi * x) + cos(4 * pi * x)
return concatenate((x1, x2, x3, x4, x5), axis=1) | [
"numpy.multiply",
"numpy.maximum",
"numpy.tanh",
"numpy.power",
"numpy.where",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.concatenate"
] | [((990, 1003), 'numpy.maximum', 'maximum', (['(0)', 'x'], {}), '(0, x)\n', (997, 1003), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1030, 1039), 'numpy.tanh', 'nptanh', (['x'], {}), '(x)\n', (1036, 1039), True, 'from numpy import tanh as nptanh\n'), ((1175, 1201), 'numpy.where', 'where', (['(x < 0)', '(x + alpha)', '(1)'], {}), '(x < 0, x + alpha, 1)\n', (1180, 1201), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1239, 1257), 'numpy.where', 'where', (['(x < 0)', '(0)', '(1)'], {}), '(x < 0, 0, 1)\n', (1244, 1257), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1351, 1369), 'numpy.multiply', 'multiply', (['x', '(1 - x)'], {}), '(x, 1 - x)\n', (1359, 1369), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1581, 1622), 'numpy.concatenate', 'concatenate', (['(x1, x2, x3, x4, x5)'], {'axis': '(1)'}), '((x1, x2, x3, x4, x5), axis=1)\n', (1592, 1622), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1877, 1918), 'numpy.concatenate', 'concatenate', (['(x1, x2, x3, x4, x5)'], {'axis': '(1)'}), '((x1, x2, x3, x4, x5), axis=1)\n', (1888, 1918), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2271, 2312), 'numpy.concatenate', 'concatenate', (['(x1, x2, x3, x4, x5)'], {'axis': '(1)'}), '((x1, x2, x3, x4, x5), axis=1)\n', (2282, 2312), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2462, 2503), 'numpy.concatenate', 'concatenate', (['(x1, x2, x3, x4, x5)'], {'axis': '(1)'}), '((x1, x2, x3, x4, x5), axis=1)\n', (2473, 2503), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2721, 2762), 'numpy.concatenate', 'concatenate', (['(x1, x2, x3, x4, x5)'], {'axis': '(1)'}), '((x1, x2, x3, x4, x5), axis=1)\n', (2732, 2762), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1299, 1310), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (1304, 1310), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2361, 2372), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (2366, 2372), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2387, 2398), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (2392, 2398), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2413, 2424), 'numpy.power', 'power', (['x', '(4)'], {}), '(x, 4)\n', (2418, 2424), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2439, 2450), 'numpy.power', 'power', (['x', '(5)'], {}), '(x, 5)\n', (2444, 2450), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2555, 2566), 'numpy.sin', 'sin', (['(pi * x)'], {}), '(pi * x)\n', (2558, 2566), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2569, 2580), 'numpy.cos', 'cos', (['(pi * x)'], {}), '(pi * x)\n', (2572, 2580), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2590, 2605), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (2593, 2605), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2608, 2623), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (2611, 2623), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2633, 2648), 'numpy.sin', 'sin', (['(3 * pi * x)'], {}), '(3 * pi * x)\n', (2636, 2648), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2651, 2666), 'numpy.cos', 'cos', (['(3 * pi * x)'], {}), '(3 * pi * x)\n', (2654, 2666), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2676, 2691), 'numpy.sin', 'sin', (['(4 * pi * x)'], {}), '(4 * pi * x)\n', (2679, 2691), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2694, 2709), 'numpy.cos', 'cos', (['(4 * pi * x)'], {}), '(4 * pi * x)\n', (2697, 2709), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1082, 1089), 'numpy.exp', 'exp', (['(-x)'], {}), '(-x)\n', (1085, 1089), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1421, 1432), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (1426, 1432), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1450, 1461), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (1455, 1461), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((948, 954), 'numpy.exp', 'exp', (['x'], {}), '(x)\n', (951, 954), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1483, 1494), 'numpy.power', 'power', (['x', '(4)'], {}), '(x, 4)\n', (1488, 1494), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1501, 1512), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (1506, 1512), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1531, 1542), 'numpy.power', 'power', (['x', '(5)'], {}), '(x, 5)\n', (1536, 1542), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1550, 1561), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (1555, 1561), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1682, 1693), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (1687, 1693), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1721, 1732), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (1726, 1732), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1979, 1990), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (1984, 1990), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1765, 1776), 'numpy.power', 'power', (['x', '(4)'], {}), '(x, 4)\n', (1770, 1776), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1784, 1795), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (1789, 1795), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1824, 1835), 'numpy.power', 'power', (['x', '(5)'], {}), '(x, 5)\n', (1829, 1835), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((1844, 1855), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (1849, 1855), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2023, 2034), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (2028, 2034), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2041, 2052), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (2046, 2052), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2086, 2097), 'numpy.power', 'power', (['x', '(4)'], {}), '(x, 4)\n', (2091, 2097), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2124, 2135), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (2129, 2135), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2231, 2242), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (2236, 2242), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2105, 2116), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (2110, 2116), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2211, 2222), 'numpy.power', 'power', (['x', '(3)'], {}), '(x, 3)\n', (2216, 2222), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2172, 2183), 'numpy.power', 'power', (['x', '(5)'], {}), '(x, 5)\n', (2177, 2183), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n'), ((2191, 2202), 'numpy.power', 'power', (['x', '(4)'], {}), '(x, 4)\n', (2196, 2202), False, 'from numpy import where, exp, maximum, power, multiply, concatenate, sin, cos, pi\n')] |
import numpy as np
from typing import List
from common import almost_equal
def correlation(x: List[int], y: List[int]) -> float:
assert len(x) == len(y)
n = len(x)
x_mean, y_mean = np.mean(x), np.mean(y)
x_std_dev, y_std_dev = np.std(x), np.std(y)
numerator = sum([x_i * y_i for (x_i, y_i) in zip(x, y)]) - (n * x_mean * y_mean)
denominator = n * x_std_dev * y_std_dev
return numerator / denominator
a, b = [0, 14, 1, 10, 5], [2, 6, 8, 5, 6]
assert almost_equal(correlation(a, b), np.corrcoef(a, b)[0][1])
| [
"numpy.std",
"numpy.corrcoef",
"numpy.mean"
] | [((197, 207), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (204, 207), True, 'import numpy as np\n'), ((209, 219), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (216, 219), True, 'import numpy as np\n'), ((247, 256), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (253, 256), True, 'import numpy as np\n'), ((258, 267), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (264, 267), True, 'import numpy as np\n'), ((516, 533), 'numpy.corrcoef', 'np.corrcoef', (['a', 'b'], {}), '(a, b)\n', (527, 533), True, 'import numpy as np\n')] |
"""
Combine metric_generator and attract_repel_clusterer to derive a low dimensional layout
"""
from . import local_files
import numpy as np
import jp_proxy_widget
from jp_doodle.data_tables import widen_notebook
from jp_doodle import dual_canvas
from IPython.display import display
required_javascript_modules = [
local_files.vendor_path("js/feedWebGL.js"),
local_files.vendor_path("js/metric_generator.js"),
local_files.vendor_path("js/attract_repel_clusterer.js"),
]
REQUIREMENTS_LOADED = False
def load_requirements(widget=None, silent=True, additional=()):
"""
Load Javascript prerequisites into the notebook page context.
"""
global REQUIREMENTS_LOADED
if REQUIREMENTS_LOADED:
if not silent:
print("Not reloading requirements.")
return
if widget is None:
widget = jp_proxy_widget.JSProxyWidget()
silent = False
# Make sure jQuery and jQueryUI are loaded.
widget.check_jquery()
# load additional jQuery plugin code.
all_requirements = list(required_javascript_modules) + list(additional)
widget.load_js_files(all_requirements)
dual_canvas.load_requirements(widget, silent=silent)
if not silent:
widget.element.html("<div>Requirements for <b>chart_ipynb</b> have been loaded.</div>")
display(widget)
REQUIREMENTS_LOADED = True
class AttractRepelMatrix:
def __init__(self, vectors, num_close, num_far, close_distance, far_distance):
self.vectors = np.array(vectors, dtype=np.float)
[self.num_vectors, self.vector_length] = self.vectors.shape
self.num_close = int(num_close)
self.num_far = int(num_far)
self.close_distance = float(close_distance)
self.far_distance = float(far_distance)
def get_distance_and_index_matrices(self, using_widget):
load_requirements(using_widget)
using_widget.js_init("""
debugger;
var generator = element.metric_generator({
ravelled_vectors: ravelled_vectors,
vector_length: vector_length,
num_vectors: num_vectors,
})
element.extremal_indices = generator.calculate_extremal_indices(num_close, num_far);
generator.lose_context()
""",
ravelled_vectors=list(self.vectors.ravel()),
vector_length=self.vector_length,
num_vectors=self.num_vectors,
num_close=self.num_close,
num_far=self.num_far,
)
low_indices = using_widget.element.extremal_indices.low_indices.sync_value()
high_indices = using_widget.element.extremal_indices.high_indices.sync_value()
indices = np.hstack( [np.array(low_indices, dtype=np.int), np.array(high_indices, dtype=np.int) ] )
distances = np.zeros(indices.shape, dtype=np.float)
distances[:, :self.num_close] = self.close_distance
distances[:, -self.num_far:] = self.far_distance
return dict(
low_indices=low_indices,
high_indices=high_indices,
indices=indices,
distances=distances,
)
class AttractRepelClusterer:
def __init__(self, initial_positions, indices, distances, delta=0.1):
self.delta = delta
self.initial_positions = np.array(initial_positions, dtype=np.float)
self.indices = np.array(indices, dtype=np.int)
self.distances = np.array(distances, dtype=np.float)
(self.npositions, self.dimension) = self.initial_positions.shape
assert 2 <= self.dimension <= 4, "dimension must be in 2,3,4"
assert self.indices.shape == self.distances.shape, "indices must match distances"
(n_ind, self.indices_per_vertex) = self.indices.shape
assert n_ind == self.npositions, "index rows must match positions rows"
def install_in_widget(self, in_widget):
load_requirements(in_widget)
positions = np.zeros((self.npositions, 4), dtype=np.float)
positions[:, :self.dimension] = self.initial_positions
in_widget.js_init("""
debugger;
element.clusterer = element.attract_repel_clusterer({
positions: positions,
indices_per_vertex: indices_per_vertex,
indices: indices,
index_distances: index_distances,
delta: delta,
});
element.current_positions = function () {
return element.clusterer.get_positions(dimension);
};
element.centered_positions = function (diameter) {
return element.clusterer.get_centered_positions(diameter, dimension).centered_positions;
};
""",
positions=list(positions.ravel()),
indices_per_vertex=self.indices_per_vertex,
indices=list(self.indices.ravel()),
index_distances=list(self.distances.ravel()),
dimension=self.dimension,
delta=self.delta,
)
self.widget = in_widget
class DisplayController:
def __init__(self, vectors, labels, width, nclose, nfar, dim=3, delta=0.1):
self.vectors = np.array(vectors, dtype=np.float)
(self.nvectors, self.vlength) = self.vectors.shape
assert self.nvectors == len(labels)
self.labels = labels
self.width = float(width)
self.nclose = int(nclose)
self.nfar = int(nfar)
self.dim = int(dim)
self.delta = delta
def set_up_swatch(self):
from jp_doodle import nd_frame, dual_canvas
from IPython.display import display
import ipywidgets as widgets
from jp_doodle.data_tables import widen_notebook
widen_notebook()
self.frame = nd_frame.swatch3d(pixels=800, model_height=self.width, auto_show=False)
self.canvas = self.frame.in_canvas
display(self.frame.in_canvas.debugging_display())
# Get the affinity matrix
A = AttractRepelMatrix(
vectors=self.vectors,
num_close=self.nclose,
num_far=self.nfar,
close_distance=0.01 * self.width,
far_distance=self.width,
)
D = A.get_distance_and_index_matrices(self.canvas)
self.indices = D["indices"]
self.distances = D["distances"]
self.initial_positions = self.get_initial_positions()
self.colors = self.get_colors()
self.clusterer = AttractRepelClusterer(
initial_positions=self.initial_positions,
indices=self.indices,
distances=self.distances,
delta=self.delta,
)
self.clusterer.install_in_widget(self.canvas)
self.canvas.js_init(
"""
debugger;
element.draw_graph = function(names, positions) {
if (!positions) {
var positions = element.centered_positions(width);
}
frame.reset();
for (var i=0; i<positions.length; i++) {
var p = positions[i];
var label = node_labels[i];
var color = colors[label];
var name = null;
if (names) {
name = "node_" + i;
}
var txt = frame.text({
location: p,
text: "" + label,
background: color,
font: "normal 20px Arial",
name: name,
})
}
frame.fit(0.7);
frame.orbit_all(width);
return true;
};
var status = $("<div>information here</div>").appendTo(element);
element.relax_graph = function(iterations, min_change) {
min_change = min_change || 0.01;
var count = 0;
var step = function() {
element.clusterer.step_and_feedback();
// always get 3d positions
var info = element.clusterer.get_centered_positions(width, 3)
// draw graph with names enabled
if ((count % 20) == 0) {
element.draw_graph(false, info.centered_positions);
}
var max_shift = info.max_shift;
status.html("" + count + " : " + max_shift);
if ((min_change) && (count>0) && (max_shift < min_change)) {
status.html("" + count + " change too small " + max_shift);
return;
}
if ((iterations) && (count > iterations)) {
status.html("" + count + " iteration limit reached " + max_shift);
return;
}
count += 1
// otherwise run again
requestAnimationFrame(step);
};
step();
return true;
};
element.info = function(text) {
status.html(text)
};
element.draw_graph();
""",
node_labels=list(self.labels),
width=self.width,
frame=self.frame.element,
colors=self.colors,
dimensions=self.dim,
)
def relax(self, iterations, min_change=0.1):
self.canvas.element.relax_graph(iterations, min_change).sync_value()
self.canvas.element.draw_graph(True)
def get_colors(self):
labels = list(set(self.labels))
r = 33
g = 121
result = {}
for label in labels:
b = (256 - r - g) % 256
color = "rgb(%s,%s,%s)" % (r, g, b)
result[label] = color
r = (r + 43) % 256
g = (g + 31) % 256
# not too yellow
if r > 200 and g > 200:
r = r % 200
# not too white
if (r + b + g) > 600:
g = g % 101
b = b % 123
return result
def get_initial_positions(self):
r = np.arange(self.nvectors)
positions = np.zeros((self.nvectors, self.dim))
width = self.width
for i in range(self.dim):
m = np.sqrt( (i*5 + 9))
positions[:, i] = np.sin( m * r ) * (width + 1)
#print ("positions before")
#print(positions)
# pull positions between previous near points
indices = self.indices
for i in range(self.nvectors):
count = 0
total = 0
for k in range(self.nclose):
index = indices[i, k]
if index <= i:
count += 1
total = total + positions[index]
if count > 1:
positions[i] = total / count
#print("positions after")
#print(positions)
return positions
| [
"jp_proxy_widget.JSProxyWidget",
"jp_doodle.nd_frame.swatch3d",
"jp_doodle.dual_canvas.load_requirements",
"numpy.zeros",
"jp_doodle.data_tables.widen_notebook",
"IPython.display.display",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.sqrt"
] | [((1140, 1192), 'jp_doodle.dual_canvas.load_requirements', 'dual_canvas.load_requirements', (['widget'], {'silent': 'silent'}), '(widget, silent=silent)\n', (1169, 1192), False, 'from jp_doodle import nd_frame, dual_canvas\n'), ((846, 877), 'jp_proxy_widget.JSProxyWidget', 'jp_proxy_widget.JSProxyWidget', ([], {}), '()\n', (875, 877), False, 'import jp_proxy_widget\n'), ((1316, 1331), 'IPython.display.display', 'display', (['widget'], {}), '(widget)\n', (1323, 1331), False, 'from IPython.display import display\n'), ((1498, 1531), 'numpy.array', 'np.array', (['vectors'], {'dtype': 'np.float'}), '(vectors, dtype=np.float)\n', (1506, 1531), True, 'import numpy as np\n'), ((2797, 2836), 'numpy.zeros', 'np.zeros', (['indices.shape'], {'dtype': 'np.float'}), '(indices.shape, dtype=np.float)\n', (2805, 2836), True, 'import numpy as np\n'), ((3288, 3331), 'numpy.array', 'np.array', (['initial_positions'], {'dtype': 'np.float'}), '(initial_positions, dtype=np.float)\n', (3296, 3331), True, 'import numpy as np\n'), ((3355, 3386), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.int'}), '(indices, dtype=np.int)\n', (3363, 3386), True, 'import numpy as np\n'), ((3412, 3447), 'numpy.array', 'np.array', (['distances'], {'dtype': 'np.float'}), '(distances, dtype=np.float)\n', (3420, 3447), True, 'import numpy as np\n'), ((3925, 3971), 'numpy.zeros', 'np.zeros', (['(self.npositions, 4)'], {'dtype': 'np.float'}), '((self.npositions, 4), dtype=np.float)\n', (3933, 3971), True, 'import numpy as np\n'), ((5134, 5167), 'numpy.array', 'np.array', (['vectors'], {'dtype': 'np.float'}), '(vectors, dtype=np.float)\n', (5142, 5167), True, 'import numpy as np\n'), ((5681, 5697), 'jp_doodle.data_tables.widen_notebook', 'widen_notebook', ([], {}), '()\n', (5695, 5697), False, 'from jp_doodle.data_tables import widen_notebook\n'), ((5719, 5790), 'jp_doodle.nd_frame.swatch3d', 'nd_frame.swatch3d', ([], {'pixels': '(800)', 'model_height': 'self.width', 'auto_show': '(False)'}), '(pixels=800, model_height=self.width, auto_show=False)\n', (5736, 5790), False, 'from jp_doodle import nd_frame, dual_canvas\n'), ((10452, 10476), 'numpy.arange', 'np.arange', (['self.nvectors'], {}), '(self.nvectors)\n', (10461, 10476), True, 'import numpy as np\n'), ((10497, 10532), 'numpy.zeros', 'np.zeros', (['(self.nvectors, self.dim)'], {}), '((self.nvectors, self.dim))\n', (10505, 10532), True, 'import numpy as np\n'), ((10610, 10628), 'numpy.sqrt', 'np.sqrt', (['(i * 5 + 9)'], {}), '(i * 5 + 9)\n', (10617, 10628), True, 'import numpy as np\n'), ((2699, 2734), 'numpy.array', 'np.array', (['low_indices'], {'dtype': 'np.int'}), '(low_indices, dtype=np.int)\n', (2707, 2734), True, 'import numpy as np\n'), ((2736, 2772), 'numpy.array', 'np.array', (['high_indices'], {'dtype': 'np.int'}), '(high_indices, dtype=np.int)\n', (2744, 2772), True, 'import numpy as np\n'), ((10660, 10673), 'numpy.sin', 'np.sin', (['(m * r)'], {}), '(m * r)\n', (10666, 10673), True, 'import numpy as np\n')] |
from modlamp.core import read_fasta
from scipy.stats import describe
from glob import glob
import numpy as np
import pandas as pd
lens_pos, lens_neg = [], []
data = []
for p in sorted(glob("data/*/seqs.fasta")):
classes_path = p.replace("seqs.fasta", "classes.txt")
with open(classes_path) as f:
classes = [int(l.rstrip()) for l in f.readlines()]
seqs, names = read_fasta(p)
zipped = dict(zip(seqs, classes))
len_pos = [len(seq) for seq, class_ in zipped.items() if class_ == 1]
lens_pos += len_pos
len_neg = [len(seq) for seq, class_ in zipped.items() if class_ == 0]
lens_neg += len_neg
des_pos, des_neg = describe(len_pos), describe(len_neg)
data += [{
"dataset": p.split("/")[-2].replace("_", "\\_"),
"pos_neg_nobs": f"{des_pos.nobs}, {des_neg.nobs}",
"pos_minmax": f"{des_pos.minmax[0]}, {des_pos.minmax[1]}",
"neg_minmax": f"{des_neg.minmax[0]}, {des_neg.minmax[1]}",
"pos_mean_std": f"{np.round(des_pos.mean, 2)}, {np.round(np.sqrt(des_pos.variance), 2)}",
"neg_mean_std": f"{np.round(des_neg.mean, 2)}, {np.round(np.sqrt(des_neg.variance), 2)}",
"pos_median": np.median(len_pos),
"neg_median": np.median(len_neg)
}]
des_pos = describe(lens_pos)
des_neg = describe(lens_neg)
des = describe(lens_pos + lens_neg)
print("+", des_pos, np.median(lens_pos), np.sqrt(des_pos.variance))
print("-", des_neg, np.median(lens_neg), np.sqrt(des_neg.variance))
print(" ", des, np.median(lens_pos + lens_neg), np.sqrt(des.variance))
pd.DataFrame(data).to_csv("data.csv", sep="&", index=False, line_terminator="\\\\ \\hline \n")
| [
"pandas.DataFrame",
"numpy.median",
"modlamp.core.read_fasta",
"glob.glob",
"scipy.stats.describe",
"numpy.round",
"numpy.sqrt"
] | [((1253, 1271), 'scipy.stats.describe', 'describe', (['lens_pos'], {}), '(lens_pos)\n', (1261, 1271), False, 'from scipy.stats import describe\n'), ((1282, 1300), 'scipy.stats.describe', 'describe', (['lens_neg'], {}), '(lens_neg)\n', (1290, 1300), False, 'from scipy.stats import describe\n'), ((1307, 1336), 'scipy.stats.describe', 'describe', (['(lens_pos + lens_neg)'], {}), '(lens_pos + lens_neg)\n', (1315, 1336), False, 'from scipy.stats import describe\n'), ((186, 211), 'glob.glob', 'glob', (['"""data/*/seqs.fasta"""'], {}), "('data/*/seqs.fasta')\n", (190, 211), False, 'from glob import glob\n'), ((383, 396), 'modlamp.core.read_fasta', 'read_fasta', (['p'], {}), '(p)\n', (393, 396), False, 'from modlamp.core import read_fasta\n'), ((1358, 1377), 'numpy.median', 'np.median', (['lens_pos'], {}), '(lens_pos)\n', (1367, 1377), True, 'import numpy as np\n'), ((1379, 1404), 'numpy.sqrt', 'np.sqrt', (['des_pos.variance'], {}), '(des_pos.variance)\n', (1386, 1404), True, 'import numpy as np\n'), ((1426, 1445), 'numpy.median', 'np.median', (['lens_neg'], {}), '(lens_neg)\n', (1435, 1445), True, 'import numpy as np\n'), ((1447, 1472), 'numpy.sqrt', 'np.sqrt', (['des_neg.variance'], {}), '(des_neg.variance)\n', (1454, 1472), True, 'import numpy as np\n'), ((1490, 1520), 'numpy.median', 'np.median', (['(lens_pos + lens_neg)'], {}), '(lens_pos + lens_neg)\n', (1499, 1520), True, 'import numpy as np\n'), ((1522, 1543), 'numpy.sqrt', 'np.sqrt', (['des.variance'], {}), '(des.variance)\n', (1529, 1543), True, 'import numpy as np\n'), ((654, 671), 'scipy.stats.describe', 'describe', (['len_pos'], {}), '(len_pos)\n', (662, 671), False, 'from scipy.stats import describe\n'), ((673, 690), 'scipy.stats.describe', 'describe', (['len_neg'], {}), '(len_neg)\n', (681, 690), False, 'from scipy.stats import describe\n'), ((1546, 1564), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1558, 1564), True, 'import pandas as pd\n'), ((1174, 1192), 'numpy.median', 'np.median', (['len_pos'], {}), '(len_pos)\n', (1183, 1192), True, 'import numpy as np\n'), ((1216, 1234), 'numpy.median', 'np.median', (['len_neg'], {}), '(len_neg)\n', (1225, 1234), True, 'import numpy as np\n'), ((983, 1008), 'numpy.round', 'np.round', (['des_pos.mean', '(2)'], {}), '(des_pos.mean, 2)\n', (991, 1008), True, 'import numpy as np\n'), ((1081, 1106), 'numpy.round', 'np.round', (['des_neg.mean', '(2)'], {}), '(des_neg.mean, 2)\n', (1089, 1106), True, 'import numpy as np\n'), ((1021, 1046), 'numpy.sqrt', 'np.sqrt', (['des_pos.variance'], {}), '(des_pos.variance)\n', (1028, 1046), True, 'import numpy as np\n'), ((1119, 1144), 'numpy.sqrt', 'np.sqrt', (['des_neg.variance'], {}), '(des_neg.variance)\n', (1126, 1144), True, 'import numpy as np\n')] |
# coding: utf-8
import gpflow
import numpy as np
import tensorflow as tf
from gpflow.mean_functions import Zero
from . import pZ_construction_singleBP
class AssignGP(
gpflow.models.model.GPModel, gpflow.models.InternalDataTrainingLossMixin
):
r"""
Gaussian Process regression, but where the index to which the data are
assigned is unknown.
let f be a vector of GP points (usually longer than the number of data)
f ~ MVN(0, K)
and let Z be an (unknown) binary matrix with a single 1 in each row. The
likelihood is
y ~ MVN( Z f, \sigma^2 I)
That is, each element of y is a noisy realization of one (unknown) element
of f. We use variational Bayes to infer the labels using a sparse prior
over the Z matrix (i.e. we have narrowed down the choice of which function
values each y is drawn from).
"""
def __init__(
self,
t,
XExpanded,
Y,
kern,
indices,
b,
phiPrior=None,
phiInitial=None,
fDebug=False,
KConst=None,
):
super().__init__(
kernel=kern,
likelihood=gpflow.likelihoods.Gaussian(),
mean_function=Zero(),
num_latent_gps=Y.shape[-1],
)
assert len(indices) == t.size, "indices must be size N"
assert len(t.shape) == 1, "pseudotime should be 1D"
self.Y = Y
self.X = XExpanded
self.N = t.shape[0]
self.t = t.astype(gpflow.default_float()) # could be DataHolder? advantages
self.indices = indices
self.logPhi = gpflow.Parameter(
np.random.randn(t.shape[0], t.shape[0] * 3)
) # 1 branch point => 3 functions
if phiInitial is None:
phiInitial = np.ones((self.N, 2)) * 0.5 # dont know anything
phiInitial[:, 0] = np.random.rand(self.N)
phiInitial[:, 1] = 1 - phiInitial[:, 0]
self.fDebug = fDebug
# Used as p(Z) prior in KL term. This should add to 1 but will do so after UpdatePhPrior
if phiPrior is None:
phiPrior = np.ones((self.N, 2)) * 0.5
# Fix prior term - this is without trunk
self.pZ = np.ones((t.shape[0], t.shape[0] * 3))
self.UpdateBranchingPoint(b, phiInitial, prior=phiPrior)
self.KConst = KConst
if not fDebug:
assert KConst is None, "KConst only for debugging"
def UpdateBranchingPoint(self, b, phiInitial, prior=None):
""" Function to update branching point and optionally reset initial conditions for variational phi"""
assert isinstance(b, np.ndarray)
assert b.size == 1, "Must have scalar branching point"
self.b = b.astype(gpflow.default_float()) # remember branching value
assert self.kernel.kernels[0].name == "branch_kernel_param"
self.kernel.kernels[0].Bv = b
assert (
self.logPhi.trainable is True
), "Phi should not be constant when changing branching location"
if prior is not None:
self.eZ0 = pZ_construction_singleBP.expand_pZ0Zeros(prior)
self.pZ = pZ_construction_singleBP.expand_pZ0PureNumpyZeros(self.eZ0, b, self.t)
self.InitialiseVariationalPhi(phiInitial)
def InitialiseVariationalPhi(self, phiInitialIn):
"""Set initial state for Phi using branching location to constrain.
This code has to be consistent with pZ_construction.singleBP.make_matrix to where
the equality is placed i.e. if x<=b trunk and if x>b branch or vice versa. We use the
former convention."""
assert np.allclose(phiInitialIn.sum(1), 1), "probs must sum to 1 %s" % str(
phiInitialIn
)
assert self.b == self.kernel.kernels[0].Bv, "Need to call UpdateBranchingPoint"
N = self.Y.shape[0]
assert phiInitialIn.shape[0] == N
assert phiInitialIn.shape[1] == 2 # run OMGP with K=2 trajectories
phiInitialEx = np.zeros((N, 3 * N))
# large neg number makes exact zeros, make smaller for added jitter
phiInitial_invSoftmax = -9.0 * np.ones((N, 3 * N))
eps = 1e-9
iterC = 0
for i, p in enumerate(self.t):
if p > self.b: # before branching - it's the root
phiInitialEx[i, iterC : iterC + 3] = np.hstack(
[eps, phiInitialIn[i, :] - eps]
)
else:
phiInitialEx[i, iterC : iterC + 3] = np.array(
[1 - 2 * eps, 0 + eps, 0 + eps]
)
phiInitial_invSoftmax[i, iterC : iterC + 3] = np.log(
phiInitialEx[i, iterC : iterC + 3]
)
iterC += 3
assert not np.any(np.isnan(phiInitialEx)), "no nans please " + str(
np.nonzero(np.isnan(phiInitialEx))
)
assert not np.any(phiInitialEx < -eps), "no negatives please " + str(
np.nonzero(np.isnan(phiInitialEx))
)
self.logPhi.assign(phiInitial_invSoftmax)
def GetPhi(self):
""" Get Phi matrix, collapsed for each possible entry """
assert self.b == self.kernel.kernels[0].Bv, "Need to call UpdateBranchingPoint"
phiExpanded = self.GetPhiExpanded().numpy()
l = [phiExpanded[i, self.indices[i]] for i in range(len(self.indices))]
phi = np.asarray(l)
tolError = 1e-6
assert np.all(phi.sum(1) <= 1 + tolError)
assert np.all(phi >= 0 - tolError)
assert np.all(phi <= 1 + tolError)
return phi
def GetPhiExpanded(self):
""" Shortcut function to get Phi matrix out."""
return tf.nn.softmax(self.logPhi)
def objectiveFun(self):
"""Objective function to minimize - log likelihood -log prior.
Unlike _objective, no gradient calculation is performed."""
return -self.log_posterior_density() - self.log_prior_density()
def maximum_log_likelihood_objective(self):
print("assignegp_dense compiling model (build_likelihood)")
N = tf.cast(tf.shape(self.Y)[0], dtype=gpflow.default_float())
M = tf.shape(self.X)[0]
D = tf.cast(tf.shape(self.Y)[1], dtype=gpflow.default_float())
if self.KConst is not None:
K = tf.cast(self.KConst, gpflow.default_float())
else:
K = self.kernel.K(self.X)
Phi = tf.nn.softmax(self.logPhi)
# try squashing Phi to avoid numerical errors
Phi = (1 - 2e-6) * Phi + 1e-6
sigma2 = self.likelihood.variance
tau = 1.0 / self.likelihood.variance
L = (
tf.linalg.cholesky(K)
+ tf.eye(M, dtype=gpflow.default_float()) * gpflow.default_jitter()
)
W = tf.transpose(L) * tf.sqrt(tf.reduce_sum(Phi, 0)) / tf.sqrt(sigma2)
P = tf.linalg.matmul(W, tf.transpose(W)) + tf.eye(
M, dtype=gpflow.default_float()
)
R = tf.linalg.cholesky(P)
PhiY = tf.linalg.matmul(tf.transpose(Phi), self.Y)
LPhiY = tf.linalg.matmul(tf.transpose(L), PhiY)
if self.fDebug:
tf.print(Phi, [tf.shape(P), P], name="P", summarize=10)
tf.print(Phi, [tf.shape(LPhiY), LPhiY], name="LPhiY", summarize=10)
tf.print(Phi, [tf.shape(K), K], name="K", summarize=10)
tf.print(Phi, [tau], name="tau", summarize=10)
c = tf.linalg.triangular_solve(R, LPhiY, lower=True) / sigma2
# compute KL
KL = self.build_KL(Phi)
a1 = -0.5 * N * D * tf.math.log(2.0 * np.pi / tau)
a2 = (
-0.5
* D
* tf.math.reduce_sum(tf.math.log(tf.math.square(tf.linalg.diag_part(R))))
)
a3 = -0.5 * tf.math.reduce_sum(tf.math.square(self.Y)) / sigma2
a4 = +0.5 * tf.math.reduce_sum(tf.math.square(c))
a5 = -KL
if self.fDebug:
tf.print(a1, [a1], name="a1=")
tf.print(a2, [a2], name="a2=")
tf.print(a3, [a3], name="a3=")
tf.print(a4, [a4], name="a4=")
tf.print(a5, [a5, Phi], name="a5 and Phi=", summarize=10)
return a1 + a2 + a3 + a4 + a5
def predict_f(self, Xnew, full_cov=False):
M = tf.shape(self.X)[0]
K = self.kernel.K(self.X)
Phi = tf.nn.softmax(self.logPhi)
# try squashing Phi to avoid numerical errors
Phi = (1 - 2e-6) * Phi + 1e-6
sigma2 = self.likelihood.variance
L = (
tf.linalg.cholesky(K)
+ tf.eye(M, dtype=gpflow.default_float()) * gpflow.default_jitter()
)
W = tf.transpose(L) * tf.sqrt(tf.math.reduce_sum(Phi, 0)) / tf.sqrt(sigma2)
P = tf.linalg.matmul(W, tf.transpose(W)) + tf.eye(
M, dtype=gpflow.default_float()
)
R = tf.linalg.cholesky(P)
PhiY = tf.linalg.matmul(tf.transpose(Phi), self.Y)
LPhiY = tf.linalg.matmul(tf.transpose(L), PhiY)
c = tf.linalg.triangular_solve(R, LPhiY, lower=True) / sigma2
Kus = self.kernel.K(self.X, Xnew)
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(R, tmp1, lower=True)
mean = tf.linalg.matmul(tf.transpose(tmp2), c)
if full_cov:
var = (
self.kernel.K(Xnew)
+ tf.linalg.matmul(tf.transpose(tmp2), tmp2)
- tf.linalg.matmul(tf.transpose(tmp1), tmp1)
)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = (
self.kernel.K_diag(Xnew)
+ tf.math.reduce_sum(tf.math.square(tmp2), 0)
- tf.math.reduce_sum(tf.math.square(tmp1), 0)
)
shape = tf.stack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean, var
def build_KL(self, Phi):
return tf.math.reduce_sum(Phi * tf.math.log(Phi)) - tf.math.reduce_sum(
Phi * tf.math.log(self.pZ)
)
| [
"tensorflow.reduce_sum",
"tensorflow.linalg.triangular_solve",
"tensorflow.print",
"numpy.ones",
"numpy.isnan",
"tensorflow.sqrt",
"tensorflow.math.square",
"gpflow.likelihoods.Gaussian",
"tensorflow.nn.softmax",
"tensorflow.math.log",
"gpflow.default_float",
"numpy.random.randn",
"gpflow.me... | [((2206, 2243), 'numpy.ones', 'np.ones', (['(t.shape[0], t.shape[0] * 3)'], {}), '((t.shape[0], t.shape[0] * 3))\n', (2213, 2243), True, 'import numpy as np\n'), ((3980, 4000), 'numpy.zeros', 'np.zeros', (['(N, 3 * N)'], {}), '((N, 3 * N))\n', (3988, 4000), True, 'import numpy as np\n'), ((5355, 5368), 'numpy.asarray', 'np.asarray', (['l'], {}), '(l)\n', (5365, 5368), True, 'import numpy as np\n'), ((5458, 5485), 'numpy.all', 'np.all', (['(phi >= 0 - tolError)'], {}), '(phi >= 0 - tolError)\n', (5464, 5485), True, 'import numpy as np\n'), ((5501, 5528), 'numpy.all', 'np.all', (['(phi <= 1 + tolError)'], {}), '(phi <= 1 + tolError)\n', (5507, 5528), True, 'import numpy as np\n'), ((5650, 5676), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logPhi'], {}), '(self.logPhi)\n', (5663, 5676), True, 'import tensorflow as tf\n'), ((6371, 6397), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logPhi'], {}), '(self.logPhi)\n', (6384, 6397), True, 'import tensorflow as tf\n'), ((6919, 6940), 'tensorflow.linalg.cholesky', 'tf.linalg.cholesky', (['P'], {}), '(P)\n', (6937, 6940), True, 'import tensorflow as tf\n'), ((8260, 8286), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logPhi'], {}), '(self.logPhi)\n', (8273, 8286), True, 'import tensorflow as tf\n'), ((8768, 8789), 'tensorflow.linalg.cholesky', 'tf.linalg.cholesky', (['P'], {}), '(P)\n', (8786, 8789), True, 'import tensorflow as tf\n'), ((9032, 9078), 'tensorflow.linalg.triangular_solve', 'tf.linalg.triangular_solve', (['L', 'Kus'], {'lower': '(True)'}), '(L, Kus, lower=True)\n', (9058, 9078), True, 'import tensorflow as tf\n'), ((9094, 9141), 'tensorflow.linalg.triangular_solve', 'tf.linalg.triangular_solve', (['R', 'tmp1'], {'lower': '(True)'}), '(R, tmp1, lower=True)\n', (9120, 9141), True, 'import tensorflow as tf\n'), ((1494, 1516), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (1514, 1516), False, 'import gpflow\n'), ((1636, 1679), 'numpy.random.randn', 'np.random.randn', (['t.shape[0]', '(t.shape[0] * 3)'], {}), '(t.shape[0], t.shape[0] * 3)\n', (1651, 1679), True, 'import numpy as np\n'), ((1859, 1881), 'numpy.random.rand', 'np.random.rand', (['self.N'], {}), '(self.N)\n', (1873, 1881), True, 'import numpy as np\n'), ((2728, 2750), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (2748, 2750), False, 'import gpflow\n'), ((4116, 4135), 'numpy.ones', 'np.ones', (['(N, 3 * N)'], {}), '((N, 3 * N))\n', (4123, 4135), True, 'import numpy as np\n'), ((4618, 4658), 'numpy.log', 'np.log', (['phiInitialEx[i, iterC:iterC + 3]'], {}), '(phiInitialEx[i, iterC:iterC + 3])\n', (4624, 4658), True, 'import numpy as np\n'), ((4866, 4893), 'numpy.any', 'np.any', (['(phiInitialEx < -eps)'], {}), '(phiInitialEx < -eps)\n', (4872, 4893), True, 'import numpy as np\n'), ((6117, 6133), 'tensorflow.shape', 'tf.shape', (['self.X'], {}), '(self.X)\n', (6125, 6133), True, 'import tensorflow as tf\n'), ((6603, 6624), 'tensorflow.linalg.cholesky', 'tf.linalg.cholesky', (['K'], {}), '(K)\n', (6621, 6624), True, 'import tensorflow as tf\n'), ((6778, 6793), 'tensorflow.sqrt', 'tf.sqrt', (['sigma2'], {}), '(sigma2)\n', (6785, 6793), True, 'import tensorflow as tf\n'), ((6973, 6990), 'tensorflow.transpose', 'tf.transpose', (['Phi'], {}), '(Phi)\n', (6985, 6990), True, 'import tensorflow as tf\n'), ((7033, 7048), 'tensorflow.transpose', 'tf.transpose', (['L'], {}), '(L)\n', (7045, 7048), True, 'import tensorflow as tf\n'), ((7308, 7354), 'tensorflow.print', 'tf.print', (['Phi', '[tau]'], {'name': '"""tau"""', 'summarize': '(10)'}), "(Phi, [tau], name='tau', summarize=10)\n", (7316, 7354), True, 'import tensorflow as tf\n'), ((7367, 7415), 'tensorflow.linalg.triangular_solve', 'tf.linalg.triangular_solve', (['R', 'LPhiY'], {'lower': '(True)'}), '(R, LPhiY, lower=True)\n', (7393, 7415), True, 'import tensorflow as tf\n'), ((7506, 7536), 'tensorflow.math.log', 'tf.math.log', (['(2.0 * np.pi / tau)'], {}), '(2.0 * np.pi / tau)\n', (7517, 7536), True, 'import tensorflow as tf\n'), ((7864, 7894), 'tensorflow.print', 'tf.print', (['a1', '[a1]'], {'name': '"""a1="""'}), "(a1, [a1], name='a1=')\n", (7872, 7894), True, 'import tensorflow as tf\n'), ((7907, 7937), 'tensorflow.print', 'tf.print', (['a2', '[a2]'], {'name': '"""a2="""'}), "(a2, [a2], name='a2=')\n", (7915, 7937), True, 'import tensorflow as tf\n'), ((7950, 7980), 'tensorflow.print', 'tf.print', (['a3', '[a3]'], {'name': '"""a3="""'}), "(a3, [a3], name='a3=')\n", (7958, 7980), True, 'import tensorflow as tf\n'), ((7993, 8023), 'tensorflow.print', 'tf.print', (['a4', '[a4]'], {'name': '"""a4="""'}), "(a4, [a4], name='a4=')\n", (8001, 8023), True, 'import tensorflow as tf\n'), ((8036, 8093), 'tensorflow.print', 'tf.print', (['a5', '[a5, Phi]'], {'name': '"""a5 and Phi="""', 'summarize': '(10)'}), "(a5, [a5, Phi], name='a5 and Phi=', summarize=10)\n", (8044, 8093), True, 'import tensorflow as tf\n'), ((8192, 8208), 'tensorflow.shape', 'tf.shape', (['self.X'], {}), '(self.X)\n', (8200, 8208), True, 'import tensorflow as tf\n'), ((8447, 8468), 'tensorflow.linalg.cholesky', 'tf.linalg.cholesky', (['K'], {}), '(K)\n', (8465, 8468), True, 'import tensorflow as tf\n'), ((8627, 8642), 'tensorflow.sqrt', 'tf.sqrt', (['sigma2'], {}), '(sigma2)\n', (8634, 8642), True, 'import tensorflow as tf\n'), ((8822, 8839), 'tensorflow.transpose', 'tf.transpose', (['Phi'], {}), '(Phi)\n', (8834, 8839), True, 'import tensorflow as tf\n'), ((8882, 8897), 'tensorflow.transpose', 'tf.transpose', (['L'], {}), '(L)\n', (8894, 8897), True, 'import tensorflow as tf\n'), ((8917, 8965), 'tensorflow.linalg.triangular_solve', 'tf.linalg.triangular_solve', (['R', 'LPhiY'], {'lower': '(True)'}), '(R, LPhiY, lower=True)\n', (8943, 8965), True, 'import tensorflow as tf\n'), ((9174, 9192), 'tensorflow.transpose', 'tf.transpose', (['tmp2'], {}), '(tmp2)\n', (9186, 9192), True, 'import tensorflow as tf\n'), ((1155, 1184), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (1182, 1184), False, 'import gpflow\n'), ((1212, 1218), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (1216, 1218), False, 'from gpflow.mean_functions import Zero\n'), ((1779, 1799), 'numpy.ones', 'np.ones', (['(self.N, 2)'], {}), '((self.N, 2))\n', (1786, 1799), True, 'import numpy as np\n'), ((2112, 2132), 'numpy.ones', 'np.ones', (['(self.N, 2)'], {}), '((self.N, 2))\n', (2119, 2132), True, 'import numpy as np\n'), ((4328, 4370), 'numpy.hstack', 'np.hstack', (['[eps, phiInitialIn[i, :] - eps]'], {}), '([eps, phiInitialIn[i, :] - eps])\n', (4337, 4370), True, 'import numpy as np\n'), ((4480, 4521), 'numpy.array', 'np.array', (['[1 - 2 * eps, 0 + eps, 0 + eps]'], {}), '([1 - 2 * eps, 0 + eps, 0 + eps])\n', (4488, 4521), True, 'import numpy as np\n'), ((4740, 4762), 'numpy.isnan', 'np.isnan', (['phiInitialEx'], {}), '(phiInitialEx)\n', (4748, 4762), True, 'import numpy as np\n'), ((6054, 6070), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (6062, 6070), True, 'import tensorflow as tf\n'), ((6081, 6103), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (6101, 6103), False, 'import gpflow\n'), ((6157, 6173), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (6165, 6173), True, 'import tensorflow as tf\n'), ((6184, 6206), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (6204, 6206), False, 'import gpflow\n'), ((6281, 6303), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (6301, 6303), False, 'import gpflow\n'), ((6681, 6704), 'gpflow.default_jitter', 'gpflow.default_jitter', ([], {}), '()\n', (6702, 6704), False, 'import gpflow\n'), ((6727, 6742), 'tensorflow.transpose', 'tf.transpose', (['L'], {}), '(L)\n', (6739, 6742), True, 'import tensorflow as tf\n'), ((6826, 6841), 'tensorflow.transpose', 'tf.transpose', (['W'], {}), '(W)\n', (6838, 6841), True, 'import tensorflow as tf\n'), ((7792, 7809), 'tensorflow.math.square', 'tf.math.square', (['c'], {}), '(c)\n', (7806, 7809), True, 'import tensorflow as tf\n'), ((8525, 8548), 'gpflow.default_jitter', 'gpflow.default_jitter', ([], {}), '()\n', (8546, 8548), False, 'import gpflow\n'), ((8571, 8586), 'tensorflow.transpose', 'tf.transpose', (['L'], {}), '(L)\n', (8583, 8586), True, 'import tensorflow as tf\n'), ((8675, 8690), 'tensorflow.transpose', 'tf.transpose', (['W'], {}), '(W)\n', (8687, 8690), True, 'import tensorflow as tf\n'), ((9494, 9516), 'tensorflow.expand_dims', 'tf.expand_dims', (['var', '(2)'], {}), '(var, 2)\n', (9508, 9516), True, 'import tensorflow as tf\n'), ((9819, 9841), 'tensorflow.expand_dims', 'tf.expand_dims', (['var', '(1)'], {}), '(var, 1)\n', (9833, 9841), True, 'import tensorflow as tf\n'), ((4813, 4835), 'numpy.isnan', 'np.isnan', (['phiInitialEx'], {}), '(phiInitialEx)\n', (4821, 4835), True, 'import numpy as np\n'), ((4948, 4970), 'numpy.isnan', 'np.isnan', (['phiInitialEx'], {}), '(phiInitialEx)\n', (4956, 4970), True, 'import numpy as np\n'), ((6753, 6774), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['Phi', '(0)'], {}), '(Phi, 0)\n', (6766, 6774), True, 'import tensorflow as tf\n'), ((6874, 6896), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (6894, 6896), False, 'import gpflow\n'), ((7107, 7118), 'tensorflow.shape', 'tf.shape', (['P'], {}), '(P)\n', (7115, 7118), True, 'import tensorflow as tf\n'), ((7175, 7190), 'tensorflow.shape', 'tf.shape', (['LPhiY'], {}), '(LPhiY)\n', (7183, 7190), True, 'import tensorflow as tf\n'), ((7255, 7266), 'tensorflow.shape', 'tf.shape', (['K'], {}), '(K)\n', (7263, 7266), True, 'import tensorflow as tf\n'), ((7720, 7742), 'tensorflow.math.square', 'tf.math.square', (['self.Y'], {}), '(self.Y)\n', (7734, 7742), True, 'import tensorflow as tf\n'), ((8597, 8623), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['Phi', '(0)'], {}), '(Phi, 0)\n', (8615, 8623), True, 'import tensorflow as tf\n'), ((8723, 8745), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (8743, 8745), False, 'import gpflow\n'), ((9370, 9388), 'tensorflow.transpose', 'tf.transpose', (['tmp1'], {}), '(tmp1)\n', (9382, 9388), True, 'import tensorflow as tf\n'), ((9699, 9719), 'tensorflow.math.square', 'tf.math.square', (['tmp1'], {}), '(tmp1)\n', (9713, 9719), True, 'import tensorflow as tf\n'), ((9945, 9961), 'tensorflow.math.log', 'tf.math.log', (['Phi'], {}), '(Phi)\n', (9956, 9961), True, 'import tensorflow as tf\n'), ((10003, 10023), 'tensorflow.math.log', 'tf.math.log', (['self.pZ'], {}), '(self.pZ)\n', (10014, 10023), True, 'import tensorflow as tf\n'), ((6655, 6677), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (6675, 6677), False, 'import gpflow\n'), ((7645, 7667), 'tensorflow.linalg.diag_part', 'tf.linalg.diag_part', (['R'], {}), '(R)\n', (7664, 7667), True, 'import tensorflow as tf\n'), ((8499, 8521), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (8519, 8521), False, 'import gpflow\n'), ((9309, 9327), 'tensorflow.transpose', 'tf.transpose', (['tmp2'], {}), '(tmp2)\n', (9321, 9327), True, 'import tensorflow as tf\n'), ((9446, 9462), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (9454, 9462), True, 'import tensorflow as tf\n'), ((9637, 9657), 'tensorflow.math.square', 'tf.math.square', (['tmp2'], {}), '(tmp2)\n', (9651, 9657), True, 'import tensorflow as tf\n'), ((9771, 9787), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (9779, 9787), True, 'import tensorflow as tf\n')] |
import multiprocessing
import numpy as np
import pandas as pd
import re
from pathlib import Path
from os import cpu_count
from tables.exceptions import HDF5ExtError
from src.patches import PatchSchema
from src.preset2fxp import *
FXP_CHUNK = 'chunk'
FXP_PARAMS = 'params'
DB_KEY = 'patches'
TAGS_KEY = 'tags'
PATCH_FILE = 'patch'
JOBS = min(4, cpu_count())
def updates(func):
"""Wrapper for functions that require an update of the database."""
def inner(self, *args, **kwargs):
ret = func(self, *args, **kwargs)
self.refresh()
return ret
return inner
class PatchDatabase:
"""Model for a pandas-based patch database conforming to a `PatchSchema`."""
__df: pd.DataFrame = None
__tags: pd.DataFrame
__knn = None
schema: PatchSchema
tags: pd.Index = pd.Index([])
banks = []
def __init__(self, schema: PatchSchema):
"""Constructs a new `PatchDatabase` instance following the `schema`."""
self.schema = schema
def bootstrap(self, root_dir: Path):
"""Creates a new database from the contents of the specified directory and loads the database."""
re_file = re.compile(self.schema.file_pattern)
files = filter(lambda f: re_file.match(f.name) is not None, root_dir.glob('**/*'))
meta = []
params = []
# Running *all* this I/O on a single thread is just so slow...
with multiprocessing.Pool(processes=JOBS) as pool:
for patch in pool.imap_unordered(self.schema.read_patchfile, files):
if patch:
params.append(patch['params'])
del patch['params']
meta.append(patch)
init_patch = pd.Series(
self.schema.values, index=self.schema.params, dtype=self.schema.param_dtype)
meta_df = pd.DataFrame(meta)
param_df = pd.DataFrame(params, columns=self.schema.params,
dtype=int).fillna(init_patch)
meta_df['bank'] = pd.Categorical(meta_df['bank'])
meta_df['tags'] = ''
for col, pos in self.schema.possibilites.items():
meta_df[col] = pd.Categorical(meta_df[col], categories=pos)
self.__df = meta_df.join(param_df)
self.__tags = pd.DataFrame(index=self.__df.index, dtype='bool')
self.refresh()
# noinspection PyTypeChecker
def from_disk(self, file):
"""Loads a database from the `file`."""
try:
store = pd.HDFStore(file, mode='r')
except (HDF5ExtError, OSError):
raise FileNotFoundError
self.__df = store.get(DB_KEY)
self.__tags = store.get(TAGS_KEY)
store.close()
self.refresh()
def to_disk(self, file):
"""Saves the active database to the `file`."""
store = pd.HDFStore(str(file), mode='w')
store.put(DB_KEY, self.__df, format='table')
store.put(TAGS_KEY, self.__tags)
store.close()
def is_active(self) -> bool:
"""Returns `True` if a database is loaded, `False` otherwise."""
return self.__df is not None
def refresh(self):
"""Rebuilds cached indexes for, and cleans up, the active database."""
self.__clean_tags()
self.tags = self.__tags.columns
self.banks = self.get_categories('bank')
def __return_df(self, mask):
"""Returns a `DataFrame` composed of metadata from the patches in the database represented by the Boolean mask
`mask`."""
return self.__df.loc[mask][self.schema.meta_cols]
def find_patches_by_val(self, find: str, col: str, exact=False, regex=False) -> pd.DataFrame:
"""Finds patches in the database matching `find` value in column `col`, either as a substring (`exact=False`),
an exact match (`exact=True`), or a regular expression (`regex=True`)."""
if exact:
mask = self.__df[col] == find
else:
mask = self.__df[col].str.contains(find, case=False, regex=regex)
return self.__return_df(mask)
def keyword_search(self, kwd: str) -> pd.DataFrame:
"""Finds metadata of patches in the database whose name matches the specified keyword query."""
return self.find_patches_by_val(kwd, 'patch_name')
def find_patches_by_tags(self, tags: list) -> pd.DataFrame:
"""Finds patches in the database tagged with (at least) each tag in `tags`."""
try:
# create masks for each tag, unpack into list, take logical and,
# reduce into single mask, return slice of dataframe with that mask
return self.__return_df(np.logical_and.reduce([*(self.__tags[tag] == True for tag in tags)]))
except KeyError:
...
def get_tags(self, ind: int) -> list:
"""Returns the tags of the patch at index `ind`."""
return self.tags[self.__tags.iloc[ind]].to_list()
def get_categories(self, col: str) -> list:
"""Returns all possible values within a column of categorical data."""
assert isinstance(self.__df[col].dtype, pd.CategoricalDtype)
return self.__df[col].cat.categories.to_list()
def train_classifier(self):
"""Constructs a k-nearest neighbors classifier for patches based on their parameters. The classifier is not
intended to persist across sessions."""
from sklearn.pipeline import Pipeline
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
df = self.__df.drop_duplicates(self.schema.params)
tags = self.__tags.loc[df.index].fillna(False)
tagged_mask = tags.any(axis=1)
if len(tagged_mask) == 0:
raise Exception('Add some tags and try again.')
df = df.loc[tagged_mask]
tags = tags.loc[tagged_mask]
X = df[self.schema.params].to_numpy()
y = tags.to_numpy(dtype='bool')
self.__knn = Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier(
n_jobs=JOBS, p=1, weights='distance'))])
self.__knn.fit(X, y)
def classify_tags(self):
"""Tags patches based on their parameters using the previously generated classifier model."""
assert self.__knn is not None, 'Please create a classifier model first.'
self.__tags |= self.__knn.predict(
self.__df[self.schema.params].to_numpy())
self.__update_tags()
del self.__knn
def tags_from_val_defs(self, re_defs: dict, col: str):
"""Tags patches in the database, where the patch's `col` value matches a regular expression in `re_defs`,
with the dictionary key of the matching expression."""
for tag, pattern in re_defs.items():
mask = self.__df[col].str.contains(
pattern, regex=True, flags=re.IGNORECASE)
self.__tags.loc[mask, tag] = True
self.__update_tags()
def change_tags(self, index: int, tags: list, replace: bool = True):
"""Changes the tags of the patch at `index` to `tags`. If `replace` is `False`, `tags` will be added to the
patch's existing tags."""
if replace:
self.__tags.loc[index, :] = False
self.__tags.loc[index, tags] = True
self.__update_tags(index)
@updates
def remove_duplicates(self):
"""Removes duplicate patches from the database."""
self.__df = self.__df.drop_duplicates(self.schema.params)
def __clean_tags(self):
"""Internal use only. Re-fits the tag DataFrame to the patch DataFrame, removes unused tags, sorts columns,
and fills empty values."""
df_l = len(self.__df)
tags_l = len(self.__tags)
# Re-fit tags df if a patch was removed...
if df_l < tags_l:
self.__tags = self.__tags.loc[self.__df.index]
# ...or added
elif df_l > tags_l:
self.__tags.append(pd.DataFrame(columns=self.__tags.columns,
index=range(0, df_l - tags_l))
.fillna(False), ignore_index=True)
# Remove unused tags and sort columns
self.__tags = self.__tags[sorted(self.__tags.columns[self.__tags.any()], key=lambda s: s.lower())]
def __update_tags(self, index=None):
"""Internal use only. Updates the stringified tags for the patch at `index` or the entire database, and
cleans up the tag database."""
sep = ', '
self.__tags = self.__tags.fillna(False)
self.refresh()
if index is not None:
patch = self.__tags.iloc[index]
self.__df.loc[index, 'tags'] = sep.join(self.tags[patch])
else:
self.__df['tags'] = self.__tags.apply(lambda row: sep.join(self.tags[row]), axis=1)
def write_patch(self, index, typ, path: Path):
"""Writes the patch at `index` into a file of type `typ` (either `FXP_CHUNK`, `FXP_PARAMS`, or `PATCH_FILE`)
at `path`."""
patch = self.__df.iloc[index]
if typ == PATCH_FILE:
self.schema.write_patchfile(patch, path)
else:
kwargs = {'plugin_id': self.schema.vst_id, 'plugin_version': None,
'label': patch['patch_name'], 'num_params': self.schema.num_params}
if typ == FXP_PARAMS:
preset = Preset(params=self.schema.make_fxp_params(
patch[self.schema.params].to_numpy(dtype=int)), **kwargs)
elif typ == FXP_CHUNK:
preset = ChunkPreset(chunk=self.schema.make_fxp_chunk(
patch), **kwargs)
else:
raise ValueError(
'Cannot write a patch to a file type of %s' % typ)
write_fxp(preset, str(path))
__all__ = ['PatchDatabase', 'FXP_CHUNK', 'FXP_PARAMS', 'PATCH_FILE']
| [
"pandas.DataFrame",
"pandas.HDFStore",
"sklearn.preprocessing.StandardScaler",
"numpy.logical_and.reduce",
"pandas.Index",
"os.cpu_count",
"sklearn.neighbors.KNeighborsClassifier",
"pandas.Series",
"pandas.Categorical",
"multiprocessing.Pool",
"re.compile"
] | [((345, 356), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (354, 356), False, 'from os import cpu_count\n'), ((815, 827), 'pandas.Index', 'pd.Index', (['[]'], {}), '([])\n', (823, 827), True, 'import pandas as pd\n'), ((1166, 1202), 're.compile', 're.compile', (['self.schema.file_pattern'], {}), '(self.schema.file_pattern)\n', (1176, 1202), False, 'import re\n'), ((1722, 1813), 'pandas.Series', 'pd.Series', (['self.schema.values'], {'index': 'self.schema.params', 'dtype': 'self.schema.param_dtype'}), '(self.schema.values, index=self.schema.params, dtype=self.schema.\n param_dtype)\n', (1731, 1813), True, 'import pandas as pd\n'), ((1841, 1859), 'pandas.DataFrame', 'pd.DataFrame', (['meta'], {}), '(meta)\n', (1853, 1859), True, 'import pandas as pd\n'), ((2017, 2048), 'pandas.Categorical', 'pd.Categorical', (["meta_df['bank']"], {}), "(meta_df['bank'])\n", (2031, 2048), True, 'import pandas as pd\n'), ((2275, 2324), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.__df.index', 'dtype': '"""bool"""'}), "(index=self.__df.index, dtype='bool')\n", (2287, 2324), True, 'import pandas as pd\n'), ((1417, 1453), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'JOBS'}), '(processes=JOBS)\n', (1437, 1453), False, 'import multiprocessing\n'), ((2164, 2208), 'pandas.Categorical', 'pd.Categorical', (['meta_df[col]'], {'categories': 'pos'}), '(meta_df[col], categories=pos)\n', (2178, 2208), True, 'import pandas as pd\n'), ((2495, 2522), 'pandas.HDFStore', 'pd.HDFStore', (['file'], {'mode': '"""r"""'}), "(file, mode='r')\n", (2506, 2522), True, 'import pandas as pd\n'), ((1879, 1938), 'pandas.DataFrame', 'pd.DataFrame', (['params'], {'columns': 'self.schema.params', 'dtype': 'int'}), '(params, columns=self.schema.params, dtype=int)\n', (1891, 1938), True, 'import pandas as pd\n'), ((4647, 4715), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[*(self.__tags[tag] == True for tag in tags)]'], {}), '([*(self.__tags[tag] == True for tag in tags)])\n', (4668, 4715), True, 'import numpy as np\n'), ((5981, 5997), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5995, 5997), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6008, 6066), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_jobs': 'JOBS', 'p': '(1)', 'weights': '"""distance"""'}), "(n_jobs=JOBS, p=1, weights='distance')\n", (6028, 6066), False, 'from sklearn.neighbors import KNeighborsClassifier\n')] |
import numpy as np
import time
import os
import math
import pickle as pkl
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
"""
Author: <NAME>
Description: In this file are implemented some support function to preprocess EUSAR dataset
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def one_hot_2_label_value(label):
"""
In EUSAR labels are composed of one hot encoded images for each class
This function converts a structure of [classes, dim_x, dim_y] to a [tile_dim_x, tile_dim_y] in which each
class has a different value
Labels are composed as follow (EUSAR):
ONE HOT SINGLE VALUES COLOUR
- Band 0 name forest.png --> 0 --> dark green
- Band 1 name street.png --> 1 --> grey
- Band 2 name field.png --> 2 --> lime
- Band 3 name urban.png --> 3 --> red
- Band 4 name water.png --> 4 --> blue
- All zero values --> 255 --> white
:param label: label patch of shape [C,ps,ps]
:return: single values image
"""
label_values = np.zeros(label.shape[1:3])
for i in range(label.shape[0]):
# merge each class classified pixel using index + 1
# index plus 1 because otherwise non classified and index 0 would be equal
label_values = (label[i] * (i + 1)) + label_values
# where no classified pixel put 255 else put index
label_values = np.where(label_values == 0, 255, (label_values - 1))
label_values.astype(np.uint8)
return label_values
def remove_duplicates(img):
"""
This function exist to adjust few label error. Sometimes happens that the same pixels in the dataset are
associated with two classes.
The duplicated pixels are removed being judge as non reliable
:param img: input data
:return: corrected input
"""
# sums one hot codes along channel axis
channel_tot = np.sum(img, 0)
# if a location is more than 1, the pixel is duplicated, put 0 to duplicated and 1 to non duplicated
duplicated_location = np.where(channel_tot > 1, 0, 1)
for i in range(img.shape[0]):
# duplicated multiplied by 0 other by 1
img[i] = img[i] * duplicated_location
return img
def normalizer(img, f, center=False, norm1=False):
"""
Standardize data channel by channel
:param img: image to be normalized
:param f: pointer to the log file
:param center: if center normalization or not
:param norm1: normalize between -1 and 1 works only if centering is requested
:return: [img, mean, std, center_val, mx] along with the normalized image are returned all the normalization
parameters to be able to recover the original data
"""
mean = []
std = []
mx = []
f.write("Running normalization\n")
n_ch = img.shape[0]
for i in range(0, n_ch):
mean.append(np.mean(img[i]))
std.append(np.std(img[i]))
f.write("Ch {} mean = {} - std = {}\n".format(i, mean[i], std[i]))
if not norm1:
img[i] = np.true_divide((img[i] - mean[i]), std[i])
# if center after the first normalisation to mean 0 and std 1 it scales the value around 0 and between -1 and 1
if center:
f.write("Centering values\n")
center_val = (np.max(img) + np.min(img))/2
img = img - center_val
if norm1:
f.write("Norm values after centering\n")
for i in range(0, n_ch):
if norm1:
mx.append(np.max(np.abs(img[i])))
f.write("Ch {} max = {} \n".format(i, mx[i]))
img[i] = np.true_divide(img[i], mx[i])
else:
mx.append(0)
else:
center_val = 0
return img, mean, std, center_val, mx
def simple_normalizer(img, f, mean, std):
"""
Standardize data channel by channel
:param img: image to be normalized
:param f: pointer to the log file
:param mean: channel mean array
:param std: channel std array
:return img: noirmalized img
"""
f.write("Running normalization\n")
for i in range(len(mean)):
f.write("Ch {} mean = {} - std = {}\n".format(i, mean[i], std[i]))
img[i] = np.true_divide((img[i] - mean[i]), std[i])
return img
def cut_tiles_full(tile_path, dest_path, name, patch_size, max_n_bad_pix, overlapping, padding):
"""
This function load data, cut into patches and save them to the destination folder
Label patches are stored only if there is a certain amount of pixel classified.
:param tile_path: location of input data tile to be cut
:param dest_path: folder in which data will be stored
:param name: target tile name
:param patch_size: patch dimension
:param max_n_bad_pix: patch percentage of admissible non classified pixels
:param overlapping: overlap between two patches
:param padding: pad data or not
:param mode: ['eval'| 'train'] in eval mode store data with name which contain the location of that specific patch so that is
possible to reconstruct the original tile
:return: NA
"""
t = time.time()
f = open(os.path.join(dest_path + name + "_" + "log.txt"), "a")
f.write("Cutting data from{}\n".format(tile_path + ' ' + name))
f.write("Patch_size {}\n".format(patch_size))
f.write("Overlapping {}\n".format(overlapping))
f.write("patch_max_bad_pix_prc {}\n".format(max_n_bad_pix))
radar = np.load(os.path.join(tile_path, "radar/" + name + "_radar.npy"))
label = np.load(os.path.join(tile_path, "label/" + name + "_label.npy"))
f.write("Loaded radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("Loaded label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.load(os.path.join(tile_path, "rgb/" + name + "_rgb.npy"))
f.write("Loaded rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
w = radar.shape[1]
h = radar.shape[2]
if padding:
# If padding calculate how big the padding has to be
extra_w = (w % patch_size)
extra_h = (h % patch_size)
f.write("Extrapixel w {} h {}\n".format(extra_w, extra_h))
if extra_w > 0:
extra_w_l = int(math.ceil((patch_size - extra_w) / 2))
extra_w_r = int(math.ceil(patch_size - extra_w)) - extra_w_l
else:
extra_w_l = 0
extra_w_r = 0
if extra_h > 0:
extra_h_l = int(math.ceil((patch_size - extra_h) / 2))
extra_h_r = int(math.ceil(patch_size - extra_h)) - extra_h_l
else:
extra_h_l = 0
extra_h_r = 0
# pad images
f.write("Extrapixel w_l {} w_r {} h_l {} h_r {}\n".format(extra_w_l, extra_w_r, extra_h_l, extra_h_r))
radar = np.pad(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
label = np.pad(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("New label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.pad(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
w = radar.shape[1]
h = radar.shape[2]
patch_counter = 0
step = int(patch_size * overlapping)
# pass all the image with the right stride
posx, posy = [], []
for i in range(0, (w - patch_size + step), step):
for j in range(0, (h - patch_size + step), step):
validity_patch = label[:, i:i + patch_size, j:j + patch_size]
u, count = np.unique(validity_patch, return_counts=True)
if len(u) > 1:
good_pix_n = count[1]
else:
good_pix_n = 0
patch_counter = patch_counter + 1
posx.append(str(i))
posy.append(str(j))
t_n, p_n = patch_name(name, patch_counter)
# label are saved only with the right number of classified pixel
if good_pix_n >= max_n_bad_pix:
np.save(os.path.join(dest_path, "label", t_n + "_" + p_n + "_label.npy"),
label[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
# rgb and radar patches are always stored
np.save(os.path.join(dest_path, "radar", t_n + "_" + p_n + "_radar.npy"),
radar[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
np.save(os.path.join(dest_path, "rgb", t_n + "_" + p_n + "_rgb.npy"),
rgb[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
f.write("{} Patch ({},{} - {},{}) good_pix_n = {:.3f}\n".format(
name + "_" + str(patch_counter), i, i+patch_size, j, j+patch_size, good_pix_n))
f.write("Execution time = {:.2f} s".format(time.time() - t))
pkl.dump(posx, open(os.path.join(dest_path, "posx.pkl"), "wb"))
pkl.dump(posy, open(os.path.join(dest_path, "posy.pkl"), "wb"))
print("Execution time = {:.2f} s".format(time.time() - t))
f.close()
def cut_tiles_small(tile_path, dest_path, name, patch_size, max_n_bad_pix, overlapping, padding):
"""
This function load data cut into patches and save them to the destination folder
Label patches are stored only if there is a certain amount of pixel classified. So is suitable if all data are used
Small because works only on a piece of the image
:param tile_path: location of input data tile to be cut
:param dest_path: folder in which data will be stored
:param name: target tile name
:param patch_size: patch dimension
:param max_n_bad_pix: patch percentage of admissible non classified pixels
:param overlapping: overlap between two patches
:param padding: pad data or not
:param mode: ['eval'| 'train'] in eval mode store data with name which contain the location of that specific patch so that is
possible to reconstruct the original tile
:return: NA
"""
t = time.time()
f = open(os.path.join(dest_path + name + "_" + "log.txt"), "a")
f.write("Cutting data from{}\n".format(tile_path + ' ' + name))
f.write("Patch_size {}\n".format(patch_size))
f.write("Overlapping {}\n".format(overlapping))
f.write("patch_max_bad_pix_prc {}\n".format(max_n_bad_pix))
radar = np.load(os.path.join(tile_path, "radar/" + name + "_radar.npy"))
label = np.load(os.path.join(tile_path, "label/" + name + "_label.npy"))
f.write("Loaded radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("Loaded label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.load(os.path.join(tile_path, "rgb/" + name + "_rgb.npy"))
f.write("Loaded rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
w = radar.shape[1]
h = radar.shape[2]
if padding:
# If padding calculate how big the padding has to be
extra_w = (w % patch_size)
extra_h = (h % patch_size)
f.write("Extrapixel w {} h {}\n".format(extra_w, extra_h))
if extra_w > 0:
extra_w_l = int(math.ceil((patch_size - extra_w) / 2))
extra_w_r = int(math.ceil(patch_size - extra_w)) - extra_w_l
else:
extra_w_l = 0
extra_w_r = 0
if extra_h > 0:
extra_h_l = int(math.ceil((patch_size - extra_h) / 2))
extra_h_r = int(math.ceil(patch_size - extra_h)) - extra_h_l
else:
extra_h_l = 0
extra_h_r = 0
# pad images
f.write("Extrapixel w_l {} w_r {} h_l {} h_r {}\n".format(extra_w_l, extra_w_r, extra_h_l, extra_h_r))
radar = np.pad(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
label = np.pad(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("New label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.pad(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
patch_counter = 0
step = int(patch_size * overlapping)
# pass all the image with the right stride
posx, posy = [], []
for i in range(850, (1490 - patch_size + step), step):
for j in range(850, (1490 - patch_size + step), step):
validity_patch = label[:, i:i + patch_size, j:j + patch_size]
u, count = np.unique(validity_patch, return_counts=True)
if len(u) > 1:
good_pix_n = count[1]
else:
good_pix_n = 0
patch_counter = patch_counter + 1
posx.append(str(i))
posy.append(str(j))
t_n, p_n = patch_name(name, patch_counter)
# label are saved only with the right number of classified pixel
if good_pix_n >= max_n_bad_pix:
np.save(os.path.join(dest_path, "label", t_n + "_" + p_n + "_label.npy"),
label[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
# rgb and radar patches are always stored
np.save(os.path.join(dest_path, "radar", t_n + "_" + p_n + "_radar.npy"),
radar[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
np.save(os.path.join(dest_path, "rgb", t_n + "_" + p_n + "_rgb.npy"),
rgb[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
f.write("{} Patch ({},{} - {},{}) good_pix_n = {:.3f}\n".format(
name + "_" + str(patch_counter), i, i+patch_size, j, j+patch_size, good_pix_n))
f.write("Execution time = {:.2f} s".format(time.time() - t))
pkl.dump(posx, open(os.path.join(dest_path, "posx.pkl"), "wb"))
pkl.dump(posy, open(os.path.join(dest_path, "posy.pkl"), "wb"))
print("Execution time = {:.2f} s".format(time.time() - t))
f.close()
def cut_tiles(tile_path, dest_path, name, patch_size, max_n_bad_pix, overlapping, padding):
"""
This function load data cut into patches and save them to the destination folder
Patches are stored only if there is a certain amount of pixel classified. So is suitable if all data are used
:param tile_path: location of input data tile to be cut
:param dest_path: folder in which data will be stored
:param name: target tile name
:param patch_size: patch dimension
:param max_n_bad_pix: patch percentage of admissible non classified pixels
:param overlapping: overlap between two patches
:param padding: pad data or not
:return: NA
"""
t = time.time()
f = open(os.path.join(dest_path + name + "_" + "log.txt"), "a")
f.write("Cutting data from{}\n".format(tile_path + ' ' + name))
f.write("Patch_size {}\n".format(patch_size))
f.write("Overlapping {}\n".format(overlapping))
f.write("patch_max_bad_pix_prc {}\n".format(max_n_bad_pix))
radar = np.load(os.path.join(tile_path, "radar/" + name + "_radar.npy"))
label = np.load(os.path.join(tile_path, "label/" + name + "_label.npy"))
f.write("Loaded radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("Loaded label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.load(os.path.join(tile_path, "rgb/" + name + "_rgb.npy"))
f.write("Loaded rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
w = radar.shape[1]
h = radar.shape[2]
if padding:
# If padding calculate how big the padding has to be
extra_w = (w % patch_size)
extra_h = (h % patch_size)
f.write("Extrapixel w {} h {}\n".format(extra_w, extra_h))
if extra_w > 0:
extra_w_l = int(math.ceil((patch_size - extra_w) / 2))
extra_w_r = int(math.ceil(patch_size - extra_w)) - extra_w_l
else:
extra_w_l = 0
extra_w_r = 0
if extra_h > 0:
extra_h_l = int(math.ceil((patch_size - extra_h) / 2))
extra_h_r = int(math.ceil(patch_size - extra_h)) - extra_h_l
else:
extra_h_l = 0
extra_h_r = 0
# pad images
f.write("Extrapixel w_l {} w_r {} h_l {} h_r {}\n".format(extra_w_l, extra_w_r, extra_h_l, extra_h_r))
radar = np.pad(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
label = np.pad(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("New label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.pad(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
w = radar.shape[1]
h = radar.shape[2]
patch_counter = 0
step = int(patch_size * overlapping)
# pass all the image with the right stride
# 0, (h 0, (w
posx, posy = [], []
for i in range(0, (w - patch_size + step), step):
for j in range(0, (h - patch_size + step), step):
validity_patch = label[:, i:i + patch_size, j:j + patch_size]
u, count = np.unique(validity_patch, return_counts=True)
if len(u) > 1:
good_pix_n = count[1]
else:
good_pix_n = 0
# store data only if the number of classified pixel of the actual patch are enough
if good_pix_n >= max_n_bad_pix:
patch_counter = patch_counter + 1
posx.append(str(i))
posy.append(str(j))
t_n, p_n = patch_name(name, patch_counter)
np.save(os.path.join(dest_path, "radar", t_n + "_" + p_n + "_radar.npy"),
radar[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
np.save(os.path.join(dest_path, "label", t_n + "_" + p_n + "_label.npy"),
label[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
np.save(os.path.join(dest_path, "rgb", t_n + "_" + p_n + "_rgb.npy"),
rgb[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
f.write("{} Patch ({},{} - {},{}) good_pix_n = {:.3f}\n".format(
name + "_" + str(patch_counter), i, i+patch_size, j, j+patch_size, good_pix_n))
f.write("Execution time = {:.2f} s".format(time.time() - t))
pkl.dump(posx, open(os.path.join(dest_path, "posx.pkl"), "wb"))
pkl.dump(posy, open(os.path.join(dest_path, "posy.pkl"), "wb"))
print("Execution time = {:.2f} s".format(time.time() - t))
f.close()
def patch_name(tile_id, patch_id):
"""
convert the nth tile in a string which is padded with 0 up to tile_max_len positions
convert the nth tile in a string which is padded with 0 up to patch_max_len positions
:param tile_id: number of actual tile
:param patch_id: number of actual patch
:return: two new strings
"""
# figures of tiles
tile_max_len = 4
# figures of patchs
patch_max_len = 7
tile_id_str = str(tile_id)
patch_id_str = str(patch_id)
tile_id_len = len(tile_id_str)
patch_id_len = len(patch_id_str)
tile_id_str = '0' * (tile_max_len-tile_id_len) + tile_id_str
patch_id_str = '0' * (patch_max_len-patch_id_len) + patch_id_str
return tile_id_str, patch_id_str
def cut_tiles_radar(tile_path, dest_path, name, patch_size, max_n_bad_pix, overlapping, padding):
"""
This function load data cut into patches and save them to the destination folder
Label patches are stored only if there is a certain amount of pixel classified. So is suitable if all data are used
Full because saves almost everything except some label
:param tile_path: location of input data tile to be cut
:param dest_path: folder in which data will be stored
:param name: target tile name
:param patch_size: patch dimension
:param max_n_bad_pix: patch percentage of admissible non classified pixels
:param overlapping: overlap between two patches
:param padding:
possible to reconstruct the original tile
:return: NA
"""
t = time.time()
f = open(os.path.join(dest_path + name + "_" + "log.txt"), "a")
f.write("Cutting data from{}\n".format(tile_path + ' ' + name))
f.write("Patch_size {}\n".format(patch_size))
f.write("Overlapping {}\n".format(overlapping))
f.write("patch_max_bad_pix_prc {}\n".format(max_n_bad_pix))
radar = np.load(os.path.join(tile_path, "radar/" + name + "_radar.npy"))
label = np.load(os.path.join(tile_path, "label/" + name + "_label.npy"))
rgb = np.load(os.path.join(tile_path, "rgb/" + name + "_rgb.npy"))
f.write("Loaded radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("Loaded label shape {} type {}\n".format(label.shape, label.dtype))
f.write("Loaded rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
img_r = radar[[2, 1, 0], :, :]
img_r = np.rollaxis(img_r, 0, 3)
minimum = np.min(img_r)
if minimum < 0:
img_r = img_r - minimum
maximum = np.max(img_r)
img_r = np.divide(img_r, maximum)
img_r = img_r * 255
img_r = img_r.astype(np.uint8)
img_o = rgb[[2, 1, 0], :, :]
img_o = np.rollaxis(img_o, 0, 3)
minimum = np.min(img_o)
if minimum < 0:
img_o = img_o - minimum
maximum = np.max(img_o)
img_o = np.divide(img_o, maximum)
img_o = img_o * 255
img_o = img_o.astype(np.uint8)
white = np.ones((patch_size, patch_size, 3)) * 255
white = white.astype(np.uint8)
w = radar.shape[1]
h = radar.shape[2]
if padding:
# If padding calculate how big the padding has to be
extra_w = (w % patch_size)
extra_h = (h % patch_size)
f.write("Extrapixel w {} h {}\n".format(extra_w, extra_h))
if extra_w > 0:
extra_w_l = int(math.ceil((patch_size - extra_w) / 2))
extra_w_r = int(math.ceil(patch_size - extra_w)) - extra_w_l
else:
extra_w_l = 0
extra_w_r = 0
if extra_h > 0:
extra_h_l = int(math.ceil((patch_size - extra_h) / 2))
extra_h_r = int(math.ceil(patch_size - extra_h)) - extra_h_l
else:
extra_h_l = 0
extra_h_r = 0
# pad images
f.write("Extrapixel w_l {} w_r {} h_l {} h_r {}\n".format(extra_w_l, extra_w_r, extra_h_l, extra_h_r))
radar = np.pad(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
label = np.pad(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
# img_o = np.pad(img_o, ((extra_w_l, extra_w_r), (extra_h_l, extra_h_r), (0, 0)), mode="wrap")
# img_r = np.pad(img_r, ((extra_w_l, extra_w_r), (extra_h_l, extra_h_r), (0, 0)), mode="wrap")
f.write("New radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("New label shape {} type {}\n".format(label.shape, label.dtype))
rgb = np.pad(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode="wrap")
f.write("New rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
f.write("New radar shape {} type {}\n".format(radar.shape, radar.dtype))
f.write("New label shape {} type {}\n".format(label.shape, label.dtype))
f.write("New rgb shape {} type {}\n".format(rgb.shape, rgb.dtype))
w = radar.shape[1]
h = radar.shape[2]
patch_counter = 0
step = int(patch_size * overlapping)
# pass all the image with the right stride
posx, posy = [], []
for i in range(0, (w - patch_size + step), step):
for j in range(0, (h - patch_size + step), step):
validity_patch = label[:, i:i + patch_size, j:j + patch_size]
u, count = np.unique(validity_patch, return_counts=True)
if len(u) > 1:
good_pix_n = count[1]
else:
good_pix_n = 0
# label are saved only with the right number of classified pixel
# rgb and radar patches are always stored
if not (1500 < i < 7000 and 1500 < j < 12500):
r = radar[:, i:i + patch_size, j:j + patch_size]
o = rgb[:, i:i + patch_size, j:j + patch_size]
r = np.sum(r, 0)
o = np.sum(o, 0)
r = np.unique(r)
o = np.unique(o)
if len(r) > (patch_size * 28) and len(o) > (patch_size * 4):
posx.append(str(i))
posy.append(str(j))
patch_counter = patch_counter + 1
t_n, p_n = patch_name(name, patch_counter)
np.save(os.path.join(dest_path, "radar", t_n + "_" + p_n + "_radar.npy"),
radar[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
np.save(os.path.join(dest_path, "rgb", t_n + "_" + p_n + "_rgb.npy"),
rgb[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
# else:
# img_o[i:i + patch_size, j:j + patch_size, :] = white
# img_r[i:i + patch_size, j:j + patch_size, :] = white
else:
posx.append(str(i))
posy.append(str(j))
patch_counter = patch_counter + 1
t_n, p_n = patch_name(name, patch_counter)
np.save(os.path.join(dest_path, "radar", t_n + "_" + p_n + "_radar.npy"),
radar[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
np.save(os.path.join(dest_path, "rgb", t_n + "_" + p_n + "_rgb.npy"),
rgb[:, i:i + patch_size, j:j + patch_size], allow_pickle=True)
f.write("{} Patch ({},{} - {},{}) good_pix_n = {}\n".format(
name + "_" + str(patch_counter), i, i + patch_size, j, j + patch_size, good_pix_n))
# temp = Image.fromarray(img_r, 'RGB')
# temp.save('radar' + '.png')
# temp = Image.fromarray(img_o, 'RGB')
# temp.save('rgb' + '.png')
f.write("Execution time = {:.2f} s".format(time.time() - t))
pkl.dump(posx, open(os.path.join(dest_path, "posx.pkl"), "wb"))
pkl.dump(posy, open(os.path.join(dest_path, "posy.pkl"), "wb"))
print("Execution time = {:.2f} s".format(time.time() - t))
f.close()
| [
"numpy.pad",
"numpy.divide",
"numpy.sum",
"numpy.true_divide",
"numpy.abs",
"math.ceil",
"numpy.std",
"numpy.zeros",
"numpy.ones",
"time.time",
"numpy.min",
"numpy.where",
"numpy.max",
"numpy.mean",
"numpy.rollaxis",
"os.path.join",
"numpy.unique"
] | [((1350, 1376), 'numpy.zeros', 'np.zeros', (['label.shape[1:3]'], {}), '(label.shape[1:3])\n', (1358, 1376), True, 'import numpy as np\n'), ((1689, 1739), 'numpy.where', 'np.where', (['(label_values == 0)', '(255)', '(label_values - 1)'], {}), '(label_values == 0, 255, label_values - 1)\n', (1697, 1739), True, 'import numpy as np\n'), ((2172, 2186), 'numpy.sum', 'np.sum', (['img', '(0)'], {}), '(img, 0)\n', (2178, 2186), True, 'import numpy as np\n'), ((2318, 2349), 'numpy.where', 'np.where', (['(channel_tot > 1)', '(0)', '(1)'], {}), '(channel_tot > 1, 0, 1)\n', (2326, 2349), True, 'import numpy as np\n'), ((5384, 5395), 'time.time', 'time.time', ([], {}), '()\n', (5393, 5395), False, 'import time\n'), ((10335, 10346), 'time.time', 'time.time', ([], {}), '()\n', (10344, 10346), False, 'import time\n'), ((15009, 15020), 'time.time', 'time.time', ([], {}), '()\n', (15018, 15020), False, 'import time\n'), ((20589, 20600), 'time.time', 'time.time', ([], {}), '()\n', (20598, 20600), False, 'import time\n'), ((21410, 21434), 'numpy.rollaxis', 'np.rollaxis', (['img_r', '(0)', '(3)'], {}), '(img_r, 0, 3)\n', (21421, 21434), True, 'import numpy as np\n'), ((21449, 21462), 'numpy.min', 'np.min', (['img_r'], {}), '(img_r)\n', (21455, 21462), True, 'import numpy as np\n'), ((21529, 21542), 'numpy.max', 'np.max', (['img_r'], {}), '(img_r)\n', (21535, 21542), True, 'import numpy as np\n'), ((21555, 21580), 'numpy.divide', 'np.divide', (['img_r', 'maximum'], {}), '(img_r, maximum)\n', (21564, 21580), True, 'import numpy as np\n'), ((21686, 21710), 'numpy.rollaxis', 'np.rollaxis', (['img_o', '(0)', '(3)'], {}), '(img_o, 0, 3)\n', (21697, 21710), True, 'import numpy as np\n'), ((21725, 21738), 'numpy.min', 'np.min', (['img_o'], {}), '(img_o)\n', (21731, 21738), True, 'import numpy as np\n'), ((21805, 21818), 'numpy.max', 'np.max', (['img_o'], {}), '(img_o)\n', (21811, 21818), True, 'import numpy as np\n'), ((21831, 21856), 'numpy.divide', 'np.divide', (['img_o', 'maximum'], {}), '(img_o, maximum)\n', (21840, 21856), True, 'import numpy as np\n'), ((4482, 4522), 'numpy.true_divide', 'np.true_divide', (['(img[i] - mean[i])', 'std[i]'], {}), '(img[i] - mean[i], std[i])\n', (4496, 4522), True, 'import numpy as np\n'), ((5409, 5457), 'os.path.join', 'os.path.join', (["(dest_path + name + '_' + 'log.txt')"], {}), "(dest_path + name + '_' + 'log.txt')\n", (5421, 5457), False, 'import os\n'), ((5718, 5773), 'os.path.join', 'os.path.join', (['tile_path', "('radar/' + name + '_radar.npy')"], {}), "(tile_path, 'radar/' + name + '_radar.npy')\n", (5730, 5773), False, 'import os\n'), ((5795, 5850), 'os.path.join', 'os.path.join', (['tile_path', "('label/' + name + '_label.npy')"], {}), "(tile_path, 'label/' + name + '_label.npy')\n", (5807, 5850), False, 'import os\n'), ((6030, 6081), 'os.path.join', 'os.path.join', (['tile_path', "('rgb/' + name + '_rgb.npy')"], {}), "(tile_path, 'rgb/' + name + '_rgb.npy')\n", (6042, 6081), False, 'import os\n'), ((7026, 7114), 'numpy.pad', 'np.pad', (['radar', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (7032, 7114), True, 'import numpy as np\n'), ((7127, 7215), 'numpy.pad', 'np.pad', (['label', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (7133, 7215), True, 'import numpy as np\n'), ((7388, 7475), 'numpy.pad', 'np.pad', (['rgb', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode=\n 'wrap')\n", (7394, 7475), True, 'import numpy as np\n'), ((10360, 10408), 'os.path.join', 'os.path.join', (["(dest_path + name + '_' + 'log.txt')"], {}), "(dest_path + name + '_' + 'log.txt')\n", (10372, 10408), False, 'import os\n'), ((10669, 10724), 'os.path.join', 'os.path.join', (['tile_path', "('radar/' + name + '_radar.npy')"], {}), "(tile_path, 'radar/' + name + '_radar.npy')\n", (10681, 10724), False, 'import os\n'), ((10746, 10801), 'os.path.join', 'os.path.join', (['tile_path', "('label/' + name + '_label.npy')"], {}), "(tile_path, 'label/' + name + '_label.npy')\n", (10758, 10801), False, 'import os\n'), ((10981, 11032), 'os.path.join', 'os.path.join', (['tile_path', "('rgb/' + name + '_rgb.npy')"], {}), "(tile_path, 'rgb/' + name + '_rgb.npy')\n", (10993, 11032), False, 'import os\n'), ((11977, 12065), 'numpy.pad', 'np.pad', (['radar', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (11983, 12065), True, 'import numpy as np\n'), ((12078, 12166), 'numpy.pad', 'np.pad', (['label', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (12084, 12166), True, 'import numpy as np\n'), ((12339, 12426), 'numpy.pad', 'np.pad', (['rgb', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode=\n 'wrap')\n", (12345, 12426), True, 'import numpy as np\n'), ((15034, 15082), 'os.path.join', 'os.path.join', (["(dest_path + name + '_' + 'log.txt')"], {}), "(dest_path + name + '_' + 'log.txt')\n", (15046, 15082), False, 'import os\n'), ((15343, 15398), 'os.path.join', 'os.path.join', (['tile_path', "('radar/' + name + '_radar.npy')"], {}), "(tile_path, 'radar/' + name + '_radar.npy')\n", (15355, 15398), False, 'import os\n'), ((15420, 15475), 'os.path.join', 'os.path.join', (['tile_path', "('label/' + name + '_label.npy')"], {}), "(tile_path, 'label/' + name + '_label.npy')\n", (15432, 15475), False, 'import os\n'), ((15655, 15706), 'os.path.join', 'os.path.join', (['tile_path', "('rgb/' + name + '_rgb.npy')"], {}), "(tile_path, 'rgb/' + name + '_rgb.npy')\n", (15667, 15706), False, 'import os\n'), ((16651, 16739), 'numpy.pad', 'np.pad', (['radar', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (16657, 16739), True, 'import numpy as np\n'), ((16752, 16840), 'numpy.pad', 'np.pad', (['label', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (16758, 16840), True, 'import numpy as np\n'), ((17013, 17100), 'numpy.pad', 'np.pad', (['rgb', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode=\n 'wrap')\n", (17019, 17100), True, 'import numpy as np\n'), ((20614, 20662), 'os.path.join', 'os.path.join', (["(dest_path + name + '_' + 'log.txt')"], {}), "(dest_path + name + '_' + 'log.txt')\n", (20626, 20662), False, 'import os\n'), ((20923, 20978), 'os.path.join', 'os.path.join', (['tile_path', "('radar/' + name + '_radar.npy')"], {}), "(tile_path, 'radar/' + name + '_radar.npy')\n", (20935, 20978), False, 'import os\n'), ((21000, 21055), 'os.path.join', 'os.path.join', (['tile_path', "('label/' + name + '_label.npy')"], {}), "(tile_path, 'label/' + name + '_label.npy')\n", (21012, 21055), False, 'import os\n'), ((21075, 21126), 'os.path.join', 'os.path.join', (['tile_path', "('rgb/' + name + '_rgb.npy')"], {}), "(tile_path, 'rgb/' + name + '_rgb.npy')\n", (21087, 21126), False, 'import os\n'), ((21929, 21965), 'numpy.ones', 'np.ones', (['(patch_size, patch_size, 3)'], {}), '((patch_size, patch_size, 3))\n', (21936, 21965), True, 'import numpy as np\n'), ((22876, 22964), 'numpy.pad', 'np.pad', (['radar', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(radar, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (22882, 22964), True, 'import numpy as np\n'), ((22977, 23065), 'numpy.pad', 'np.pad', (['label', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(label, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)),\n mode='wrap')\n", (22983, 23065), True, 'import numpy as np\n'), ((23444, 23531), 'numpy.pad', 'np.pad', (['rgb', '((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r))'], {'mode': '"""wrap"""'}), "(rgb, ((0, 0), (extra_w_l, extra_w_r), (extra_h_l, extra_h_r)), mode=\n 'wrap')\n", (23450, 23531), True, 'import numpy as np\n'), ((3131, 3146), 'numpy.mean', 'np.mean', (['img[i]'], {}), '(img[i])\n', (3138, 3146), True, 'import numpy as np\n'), ((3167, 3181), 'numpy.std', 'np.std', (['img[i]'], {}), '(img[i])\n', (3173, 3181), True, 'import numpy as np\n'), ((3301, 3341), 'numpy.true_divide', 'np.true_divide', (['(img[i] - mean[i])', 'std[i]'], {}), '(img[i] - mean[i], std[i])\n', (3315, 3341), True, 'import numpy as np\n'), ((7937, 7982), 'numpy.unique', 'np.unique', (['validity_patch'], {'return_counts': '(True)'}), '(validity_patch, return_counts=True)\n', (7946, 7982), True, 'import numpy as np\n'), ((9214, 9249), 'os.path.join', 'os.path.join', (['dest_path', '"""posx.pkl"""'], {}), "(dest_path, 'posx.pkl')\n", (9226, 9249), False, 'import os\n'), ((9282, 9317), 'os.path.join', 'os.path.join', (['dest_path', '"""posy.pkl"""'], {}), "(dest_path, 'posy.pkl')\n", (9294, 9317), False, 'import os\n'), ((12852, 12897), 'numpy.unique', 'np.unique', (['validity_patch'], {'return_counts': '(True)'}), '(validity_patch, return_counts=True)\n', (12861, 12897), True, 'import numpy as np\n'), ((14129, 14164), 'os.path.join', 'os.path.join', (['dest_path', '"""posx.pkl"""'], {}), "(dest_path, 'posx.pkl')\n", (14141, 14164), False, 'import os\n'), ((14197, 14232), 'os.path.join', 'os.path.join', (['dest_path', '"""posy.pkl"""'], {}), "(dest_path, 'posy.pkl')\n", (14209, 14232), False, 'import os\n'), ((17583, 17628), 'numpy.unique', 'np.unique', (['validity_patch'], {'return_counts': '(True)'}), '(validity_patch, return_counts=True)\n', (17592, 17628), True, 'import numpy as np\n'), ((18864, 18899), 'os.path.join', 'os.path.join', (['dest_path', '"""posx.pkl"""'], {}), "(dest_path, 'posx.pkl')\n", (18876, 18899), False, 'import os\n'), ((18932, 18967), 'os.path.join', 'os.path.join', (['dest_path', '"""posy.pkl"""'], {}), "(dest_path, 'posy.pkl')\n", (18944, 18967), False, 'import os\n'), ((24219, 24264), 'numpy.unique', 'np.unique', (['validity_patch'], {'return_counts': '(True)'}), '(validity_patch, return_counts=True)\n', (24228, 24264), True, 'import numpy as np\n'), ((26604, 26639), 'os.path.join', 'os.path.join', (['dest_path', '"""posx.pkl"""'], {}), "(dest_path, 'posx.pkl')\n", (26616, 26639), False, 'import os\n'), ((26672, 26707), 'os.path.join', 'os.path.join', (['dest_path', '"""posy.pkl"""'], {}), "(dest_path, 'posy.pkl')\n", (26684, 26707), False, 'import os\n'), ((3536, 3547), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (3542, 3547), True, 'import numpy as np\n'), ((3550, 3561), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (3556, 3561), True, 'import numpy as np\n'), ((6470, 6507), 'math.ceil', 'math.ceil', (['((patch_size - extra_w) / 2)'], {}), '((patch_size - extra_w) / 2)\n', (6479, 6507), False, 'import math\n'), ((6700, 6737), 'math.ceil', 'math.ceil', (['((patch_size - extra_h) / 2)'], {}), '((patch_size - extra_h) / 2)\n', (6709, 6737), False, 'import math\n'), ((8636, 8700), 'os.path.join', 'os.path.join', (['dest_path', '"""radar"""', "(t_n + '_' + p_n + '_radar.npy')"], {}), "(dest_path, 'radar', t_n + '_' + p_n + '_radar.npy')\n", (8648, 8700), False, 'import os\n'), ((8807, 8867), 'os.path.join', 'os.path.join', (['dest_path', '"""rgb"""', "(t_n + '_' + p_n + '_rgb.npy')"], {}), "(dest_path, 'rgb', t_n + '_' + p_n + '_rgb.npy')\n", (8819, 8867), False, 'import os\n'), ((9172, 9183), 'time.time', 'time.time', ([], {}), '()\n', (9181, 9183), False, 'import time\n'), ((9371, 9382), 'time.time', 'time.time', ([], {}), '()\n', (9380, 9382), False, 'import time\n'), ((11421, 11458), 'math.ceil', 'math.ceil', (['((patch_size - extra_w) / 2)'], {}), '((patch_size - extra_w) / 2)\n', (11430, 11458), False, 'import math\n'), ((11651, 11688), 'math.ceil', 'math.ceil', (['((patch_size - extra_h) / 2)'], {}), '((patch_size - extra_h) / 2)\n', (11660, 11688), False, 'import math\n'), ((13551, 13615), 'os.path.join', 'os.path.join', (['dest_path', '"""radar"""', "(t_n + '_' + p_n + '_radar.npy')"], {}), "(dest_path, 'radar', t_n + '_' + p_n + '_radar.npy')\n", (13563, 13615), False, 'import os\n'), ((13722, 13782), 'os.path.join', 'os.path.join', (['dest_path', '"""rgb"""', "(t_n + '_' + p_n + '_rgb.npy')"], {}), "(dest_path, 'rgb', t_n + '_' + p_n + '_rgb.npy')\n", (13734, 13782), False, 'import os\n'), ((14087, 14098), 'time.time', 'time.time', ([], {}), '()\n', (14096, 14098), False, 'import time\n'), ((14286, 14297), 'time.time', 'time.time', ([], {}), '()\n', (14295, 14297), False, 'import time\n'), ((16095, 16132), 'math.ceil', 'math.ceil', (['((patch_size - extra_w) / 2)'], {}), '((patch_size - extra_w) / 2)\n', (16104, 16132), False, 'import math\n'), ((16325, 16362), 'math.ceil', 'math.ceil', (['((patch_size - extra_h) / 2)'], {}), '((patch_size - extra_h) / 2)\n', (16334, 16362), False, 'import math\n'), ((18822, 18833), 'time.time', 'time.time', ([], {}), '()\n', (18831, 18833), False, 'import time\n'), ((19021, 19032), 'time.time', 'time.time', ([], {}), '()\n', (19030, 19032), False, 'import time\n'), ((22320, 22357), 'math.ceil', 'math.ceil', (['((patch_size - extra_w) / 2)'], {}), '((patch_size - extra_w) / 2)\n', (22329, 22357), False, 'import math\n'), ((22550, 22587), 'math.ceil', 'math.ceil', (['((patch_size - extra_h) / 2)'], {}), '((patch_size - extra_h) / 2)\n', (22559, 22587), False, 'import math\n'), ((24717, 24729), 'numpy.sum', 'np.sum', (['r', '(0)'], {}), '(r, 0)\n', (24723, 24729), True, 'import numpy as np\n'), ((24750, 24762), 'numpy.sum', 'np.sum', (['o', '(0)'], {}), '(o, 0)\n', (24756, 24762), True, 'import numpy as np\n'), ((24783, 24795), 'numpy.unique', 'np.unique', (['r'], {}), '(r)\n', (24792, 24795), True, 'import numpy as np\n'), ((24816, 24828), 'numpy.unique', 'np.unique', (['o'], {}), '(o)\n', (24825, 24828), True, 'import numpy as np\n'), ((26562, 26573), 'time.time', 'time.time', ([], {}), '()\n', (26571, 26573), False, 'import time\n'), ((26761, 26772), 'time.time', 'time.time', ([], {}), '()\n', (26770, 26772), False, 'import time\n'), ((3879, 3908), 'numpy.true_divide', 'np.true_divide', (['img[i]', 'mx[i]'], {}), '(img[i], mx[i])\n', (3893, 3908), True, 'import numpy as np\n'), ((6537, 6568), 'math.ceil', 'math.ceil', (['(patch_size - extra_w)'], {}), '(patch_size - extra_w)\n', (6546, 6568), False, 'import math\n'), ((6767, 6798), 'math.ceil', 'math.ceil', (['(patch_size - extra_h)'], {}), '(patch_size - extra_h)\n', (6776, 6798), False, 'import math\n'), ((8407, 8471), 'os.path.join', 'os.path.join', (['dest_path', '"""label"""', "(t_n + '_' + p_n + '_label.npy')"], {}), "(dest_path, 'label', t_n + '_' + p_n + '_label.npy')\n", (8419, 8471), False, 'import os\n'), ((11488, 11519), 'math.ceil', 'math.ceil', (['(patch_size - extra_w)'], {}), '(patch_size - extra_w)\n', (11497, 11519), False, 'import math\n'), ((11718, 11749), 'math.ceil', 'math.ceil', (['(patch_size - extra_h)'], {}), '(patch_size - extra_h)\n', (11727, 11749), False, 'import math\n'), ((13322, 13386), 'os.path.join', 'os.path.join', (['dest_path', '"""label"""', "(t_n + '_' + p_n + '_label.npy')"], {}), "(dest_path, 'label', t_n + '_' + p_n + '_label.npy')\n", (13334, 13386), False, 'import os\n'), ((16162, 16193), 'math.ceil', 'math.ceil', (['(patch_size - extra_w)'], {}), '(patch_size - extra_w)\n', (16171, 16193), False, 'import math\n'), ((16392, 16423), 'math.ceil', 'math.ceil', (['(patch_size - extra_h)'], {}), '(patch_size - extra_h)\n', (16401, 16423), False, 'import math\n'), ((18087, 18151), 'os.path.join', 'os.path.join', (['dest_path', '"""radar"""', "(t_n + '_' + p_n + '_radar.npy')"], {}), "(dest_path, 'radar', t_n + '_' + p_n + '_radar.npy')\n", (18099, 18151), False, 'import os\n'), ((18266, 18330), 'os.path.join', 'os.path.join', (['dest_path', '"""label"""', "(t_n + '_' + p_n + '_label.npy')"], {}), "(dest_path, 'label', t_n + '_' + p_n + '_label.npy')\n", (18278, 18330), False, 'import os\n'), ((18445, 18505), 'os.path.join', 'os.path.join', (['dest_path', '"""rgb"""', "(t_n + '_' + p_n + '_rgb.npy')"], {}), "(dest_path, 'rgb', t_n + '_' + p_n + '_rgb.npy')\n", (18457, 18505), False, 'import os\n'), ((22387, 22418), 'math.ceil', 'math.ceil', (['(patch_size - extra_w)'], {}), '(patch_size - extra_w)\n', (22396, 22418), False, 'import math\n'), ((22617, 22648), 'math.ceil', 'math.ceil', (['(patch_size - extra_h)'], {}), '(patch_size - extra_h)\n', (22626, 22648), False, 'import math\n'), ((25860, 25924), 'os.path.join', 'os.path.join', (['dest_path', '"""radar"""', "(t_n + '_' + p_n + '_radar.npy')"], {}), "(dest_path, 'radar', t_n + '_' + p_n + '_radar.npy')\n", (25872, 25924), False, 'import os\n'), ((26039, 26099), 'os.path.join', 'os.path.join', (['dest_path', '"""rgb"""', "(t_n + '_' + p_n + '_rgb.npy')"], {}), "(dest_path, 'rgb', t_n + '_' + p_n + '_rgb.npy')\n", (26051, 26099), False, 'import os\n'), ((25131, 25195), 'os.path.join', 'os.path.join', (['dest_path', '"""radar"""', "(t_n + '_' + p_n + '_radar.npy')"], {}), "(dest_path, 'radar', t_n + '_' + p_n + '_radar.npy')\n", (25143, 25195), False, 'import os\n'), ((25318, 25378), 'os.path.join', 'os.path.join', (['dest_path', '"""rgb"""', "(t_n + '_' + p_n + '_rgb.npy')"], {}), "(dest_path, 'rgb', t_n + '_' + p_n + '_rgb.npy')\n", (25330, 25378), False, 'import os\n'), ((3767, 3781), 'numpy.abs', 'np.abs', (['img[i]'], {}), '(img[i])\n', (3773, 3781), True, 'import numpy as np\n')] |
import copy
import json
from argparse import ArgumentParser
from json.decoder import JSONDecoder
from pathlib import Path
from typing import List, Optional, Tuple, Dict, Union
import cv2
from shapely import geometry
import numpy as np
from PIL import Image
def order_points(points):
if len(points) == 4:
rect = np.zeros((4, 2), dtype="float32")
s = np.array(points).sum(axis=1)
rect[0] = points[np.argmin(s)]
rect[2] = points[np.argmax(s)]
diff = np.diff(points, axis=1)
rect[1] = points[np.argmin(diff)]
rect[3] = points[np.argmax(diff)]
else:
rect = points
return rect
class Shape():
def __init__(self, shape):
self.label = shape['label']
self.points = shape['points']
self.type = shape['shape_type']
self.group_id = shape['group_id']
self.flags = shape['flags']
def __repr__(self) -> str:
return f'Shape({self.label}, {self.points})'
def is_child(self, parent: 'Shape'):
ratio = 0.
polyA = geometry.Polygon(parent.points)
if len(self.points) == 4:
polyB = geometry.Polygon(self.points)
if polyA.intersects(polyB):
ratio = polyA.intersection(polyB).area / polyB.area
elif len(self.points) == 2:
line = geometry.LineString(self.points)
if line.intersection(polyA).within(polyA):
ratio = 1.0
return ratio >= 0.6
def astype(self, othertype):
assert othertype in ['line', 'polygon', 'rectangle']
if len(self.points) == 2 and self.type == 'rectangle' and othertype == 'polygon':
p1, p2 = self.points
x1 = min(p1[0], p2[0])
y1 = min(p1[1], p2[1])
x2 = max(p1[0], p2[0])
y2 = max(p1[1], p2[1])
self.points = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
self.type = othertype
else:
raise ValueError()
def find_transform(self, other: 'Shape'):
assert len(self.points) == 4 and len(other.points) == 4
region_src = order_points(self.points)
region_dst = order_points(other.points)
src_array = np.array(region_src, np.float32).reshape(-1, 2)
dst_array = np.array(region_dst, np.float32).reshape(-1, 2)
M: np.ndarray = cv2.getPerspectiveTransform(src_array, dst_array)
return M
def map(self, transform: np.ndarray):
src_points = order_points(self.points)
src_array = np.array(src_points, dtype=np.float32).reshape(-1, 2)
dst_array = cv2.perspectiveTransform(np.array([src_array]), transform).squeeze(0)
dst = dst_array.tolist()
return Shape({
'label': self.label,
'points': dst,
'shape_type': self.type,
'flags': self.flags,
'group_id': self.group_id
})
class Annotation():
def __init__(self, image_path, image_height, image_width, shapes):
self.image_path = image_path
self.image_width = image_width
self.image_height = image_height
self.shapes = list(map(Shape, shapes))
def __iter__(self):
for shape in self.shapes:
yield shape
def __len__(self):
return len(self.shapes)
def keep_labels(self, labels: List[str]):
self.shapes = [shape for shape in self.shapes if shape.label in labels]
def remove_labels(self, labels: List[str]):
self.shapes = [shape for shape in self.shapes if shape.label not in labels]
@classmethod
def parse_from_labelme(cls, json_path):
json_dict = json.load(open(json_path, 'rt'))
return cls(json_dict['imagePath'],
json_dict['imageHeight'],
json_dict['imageWidth'],
json_dict['shapes'])
def __repr__(self) -> str:
shapes = self.shapes[:5]
repr = f'Annotation({self.image_path}, {self.image_width}x{self.image_height}, {len(self.shapes)}):\n'
for shape in shapes:
repr += f'\t{shape.__repr__()}\n'
if len(shapes) > 5:
repr += '...\n'
return repr
def find(self, labels: List[str], first: bool = False):
results: List[Shape] = []
for shape in self.shapes:
if shape.label in labels:
results.append(shape)
if len(results) == 0 and first:
return None
elif first:
return results[0]
else:
return results
def find_childs(self, ref_shape):
childs: List[Shape] = [shape for shape in self.shapes if shape.is_child(ref_shape) and shape != ref_shape]
return childs
def add_shapes(self, shapes: List[Shape]):
self.shapes.extend(shapes)
def remove_shapes(self, shapes: List[Shape]):
self.shapes = [shape for shape in self.shapes if shape not in shapes]
def to_json(self, path: Path):
# print(json.dumps(anno_new.__dict__, default=serializer))
json.dump(self, open(path, 'wt', encoding='utf8'), ensure_ascii=False, indent=4, default=labelme_serializer)
def labelme_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, Annotation):
d = {
'version': '4.5.6',
'flags': {},
'shapes': obj.shapes,
'imageData': None,
'imagePath': obj.image_path,
'imageHeight': obj.image_height,
'imageWidth': obj.image_width,
}
return d
if isinstance(obj, Shape):
d = {
'label': obj.label,
'points': obj.points,
"group_id": obj.group_id,
"shape_type": obj.type,
"flags": obj.flags,
}
return d
return obj.__dict__
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('ref_json', type=str, help='Reference json file which will be duplicated for each image')
parser.add_argument('region_path', type=str, default=None, help='Path to the file containing region configurations')
parser.add_argument('json_dir', type=str,
help='Directory where the frames are located in')
parser.add_argument('--ext', default='jpg', help='Image extension')
args = parser.parse_args()
region_path = Path(args.region_path)
if region_path.suffix == '.yaml':
import yaml
region_config = yaml.safe_load(open(region_path, 'rt'))
elif region_path.suffix == '.json':
import json
region_config = json.load(open(region_path, 'rt'))
else:
print('Unsupport file type. Should be .yaml or .json')
exit(-1)
assert 'names' in region_config.keys()
back_anno_ref: Annotation = Annotation.parse_from_labelme(args.ref_json)
if region_config.get('ignore', None) is not None:
back_anno_ref.remove_labels(region_config['ignore'])
json_dir = Path(args.json_dir)
for json_path in json_dir.glob(f'*.json'):
print(f'Processing {json_path}')
anno_ref: Annotation = copy.deepcopy(back_anno_ref)
anno_new: Annotation = Annotation.parse_from_labelme(json_path)
region_news = anno_new.find(region_config['names'])
if len(region_news) == 0:
print('Empty region annotations!')
continue
anno_new.keep_labels(region_config['names'])
for region_new in region_news:
region_ref: Shape = anno_ref.find([region_new.label], first=True)
if region_ref is None:
print(f'Not found corresponding region name = {region_new.label} annotation in reference. Skip')
continue
transform = region_ref.find_transform(region_new)
region_ref_childs = anno_ref.find_childs(region_ref)
region_new_childs = [child.map(transform) for child in region_ref_childs]
anno_new.add_shapes(region_new_childs)
# remove to avoid duplication
anno_ref.remove_shapes(region_ref_childs)
if 'depend' in region_config.keys():
dependence = region_config['depend']
for depend_region_name, depend_labels in dependence.items():
region_ref = anno_ref.find(depend_region_name, first=True)
if region_ref is None:
print(f'Unknow depend region name in reference, name = {depend_region_name}. Skip!')
continue
region_new = anno_new.find(depend_region_name, first=True)
if region_new is None:
print(f'Unknow depend region name in new, name = {depend_region_name}. Skip!')
continue
transform = region_ref.find_transform(region_new)
shapes = anno_ref.find(depend_labels)
mapped_shapes = [shape.map(transform) for shape in shapes]
anno_new.add_shapes(mapped_shapes)
anno_ref.remove_shapes(shapes)
anno_new.to_json(json_path)
| [
"copy.deepcopy",
"argparse.ArgumentParser",
"shapely.geometry.Polygon",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"numpy.zeros",
"numpy.argmin",
"shapely.geometry.LineString",
"pathlib.Path",
"numpy.diff",
"numpy.array"
] | [((5891, 5907), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5905, 5907), False, 'from argparse import ArgumentParser\n'), ((6385, 6407), 'pathlib.Path', 'Path', (['args.region_path'], {}), '(args.region_path)\n', (6389, 6407), False, 'from pathlib import Path\n'), ((6992, 7011), 'pathlib.Path', 'Path', (['args.json_dir'], {}), '(args.json_dir)\n', (6996, 7011), False, 'from pathlib import Path\n'), ((325, 358), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (333, 358), True, 'import numpy as np\n'), ((493, 516), 'numpy.diff', 'np.diff', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (500, 516), True, 'import numpy as np\n'), ((1051, 1082), 'shapely.geometry.Polygon', 'geometry.Polygon', (['parent.points'], {}), '(parent.points)\n', (1067, 1082), False, 'from shapely import geometry\n'), ((2347, 2396), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src_array', 'dst_array'], {}), '(src_array, dst_array)\n', (2374, 2396), False, 'import cv2\n'), ((7131, 7159), 'copy.deepcopy', 'copy.deepcopy', (['back_anno_ref'], {}), '(back_anno_ref)\n', (7144, 7159), False, 'import copy\n'), ((425, 437), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (434, 437), True, 'import numpy as np\n'), ((464, 476), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (473, 476), True, 'import numpy as np\n'), ((542, 557), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (551, 557), True, 'import numpy as np\n'), ((584, 599), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (593, 599), True, 'import numpy as np\n'), ((1137, 1166), 'shapely.geometry.Polygon', 'geometry.Polygon', (['self.points'], {}), '(self.points)\n', (1153, 1166), False, 'from shapely import geometry\n'), ((371, 387), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (379, 387), True, 'import numpy as np\n'), ((1330, 1362), 'shapely.geometry.LineString', 'geometry.LineString', (['self.points'], {}), '(self.points)\n', (1349, 1362), False, 'from shapely import geometry\n'), ((2207, 2239), 'numpy.array', 'np.array', (['region_src', 'np.float32'], {}), '(region_src, np.float32)\n', (2215, 2239), True, 'import numpy as np\n'), ((2275, 2307), 'numpy.array', 'np.array', (['region_dst', 'np.float32'], {}), '(region_dst, np.float32)\n', (2283, 2307), True, 'import numpy as np\n'), ((2524, 2562), 'numpy.array', 'np.array', (['src_points'], {'dtype': 'np.float32'}), '(src_points, dtype=np.float32)\n', (2532, 2562), True, 'import numpy as np\n'), ((2623, 2644), 'numpy.array', 'np.array', (['[src_array]'], {}), '([src_array])\n', (2631, 2644), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing as npt
import pytest
import lenstronomy.Plots.plot_util as plot_util
class TestPlotUtil(object):
def setup(self):
pass
def test_sqrt(self):
image = np.random.randn(10, 10)
image_rescaled = plot_util.sqrt(image)
npt.assert_almost_equal(np.min(image_rescaled), 0)
if __name__ == '__main__':
pytest.main()
| [
"numpy.random.randn",
"numpy.min",
"lenstronomy.Plots.plot_util.sqrt",
"pytest.main"
] | [((381, 394), 'pytest.main', 'pytest.main', ([], {}), '()\n', (392, 394), False, 'import pytest\n'), ((218, 241), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (233, 241), True, 'import numpy as np\n'), ((267, 288), 'lenstronomy.Plots.plot_util.sqrt', 'plot_util.sqrt', (['image'], {}), '(image)\n', (281, 288), True, 'import lenstronomy.Plots.plot_util as plot_util\n'), ((321, 343), 'numpy.min', 'np.min', (['image_rescaled'], {}), '(image_rescaled)\n', (327, 343), True, 'import numpy as np\n')] |
import argparse
import sklearn.metrics
import numpy as np
from numba import njit
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import common
import inputparser
import util
import clustermaker
def convert_clustering_to_assignment(clusters):
mapping = {vid: cidx for cidx, cluster in enumerate(clusters) for vid in cluster}
vids = common.sort_vids(mapping.keys())
assign = np.array([mapping[vid] for vid in vids])
return (vids, assign)
def extract_assignment(paramsfn):
params = inputparser.load_params(paramsfn)
clusters = params['clusters']
C = len(clusters)
vids, assign = convert_clustering_to_assignment(clusters)
return (C, vids, assign)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('params1_fn')
parser.add_argument('params2_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
vids = common.extract_vids(variants)
V, T, T_prime, omega = inputparser.load_read_counts(variants)
M, S = V.shape
C1, vids1, assign1 = extract_assignment(args.params1_fn)
C2, vids2, assign2 = extract_assignment(args.params2_fn)
assert vids1 == vids2 == vids
hparams = {
'phi_alpha0': 1.,
'phi_beta0': 1.,
'conc': 1e-2,
}
llh1 = clustermaker.calc_llh(V, T_prime, assign1, hparams['phi_alpha0'], hparams['phi_beta0'], hparams['conc'])
llh2 = clustermaker.calc_llh(V, T_prime, assign2, hparams['phi_alpha0'], hparams['phi_beta0'], hparams['conc'])
nlglh1 = -llh1 / (M*S*np.log(2))
nlglh2 = -llh2 / (M*S*np.log(2))
homo, comp, vm = sklearn.metrics.homogeneity_completeness_v_measure(assign1, assign2)
ami = sklearn.metrics.adjusted_mutual_info_score(assign1, assign2)
print(C1, C2, llh1, llh2, nlglh1, nlglh2, homo, comp, vm, ami, sep=',')
if __name__ == '__main__':
main()
| [
"inputparser.load_read_counts",
"argparse.ArgumentParser",
"numpy.log",
"os.path.dirname",
"inputparser.load_ssms",
"common.extract_vids",
"clustermaker.calc_llh",
"numpy.array",
"inputparser.load_params"
] | [((430, 470), 'numpy.array', 'np.array', (['[mapping[vid] for vid in vids]'], {}), '([mapping[vid] for vid in vids])\n', (438, 470), True, 'import numpy as np\n'), ((541, 574), 'inputparser.load_params', 'inputparser.load_params', (['paramsfn'], {}), '(paramsfn)\n', (564, 574), False, 'import inputparser\n'), ((738, 850), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LOL HI THERE"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='LOL HI THERE', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (761, 850), False, 'import argparse\n'), ((1005, 1039), 'inputparser.load_ssms', 'inputparser.load_ssms', (['args.ssm_fn'], {}), '(args.ssm_fn)\n', (1026, 1039), False, 'import inputparser\n'), ((1049, 1078), 'common.extract_vids', 'common.extract_vids', (['variants'], {}), '(variants)\n', (1068, 1078), False, 'import common\n'), ((1104, 1142), 'inputparser.load_read_counts', 'inputparser.load_read_counts', (['variants'], {}), '(variants)\n', (1132, 1142), False, 'import inputparser\n'), ((1401, 1510), 'clustermaker.calc_llh', 'clustermaker.calc_llh', (['V', 'T_prime', 'assign1', "hparams['phi_alpha0']", "hparams['phi_beta0']", "hparams['conc']"], {}), "(V, T_prime, assign1, hparams['phi_alpha0'], hparams[\n 'phi_beta0'], hparams['conc'])\n", (1422, 1510), False, 'import clustermaker\n'), ((1515, 1624), 'clustermaker.calc_llh', 'clustermaker.calc_llh', (['V', 'T_prime', 'assign2', "hparams['phi_alpha0']", "hparams['phi_beta0']", "hparams['conc']"], {}), "(V, T_prime, assign2, hparams['phi_alpha0'], hparams[\n 'phi_beta0'], hparams['conc'])\n", (1536, 1624), False, 'import clustermaker\n'), ((132, 157), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (147, 157), False, 'import os\n'), ((1644, 1653), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1650, 1653), True, 'import numpy as np\n'), ((1679, 1688), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1685, 1688), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import torch
import os
from typing import List
from advattack.error_handling.exception import DatasetNotFoundError
from advattack.data_handling.dataset import Dataset
import codecs
import numpy as np
import matplotlib.pyplot as plt
import glob
from advattack.util.logger import logger
class MNISTDataset(Dataset):
""" MNIST dataset (http://yann.lecun.com/exdb/mnist/)
"""
# TODO: Fix this ugly hard coding ...
logger = logger.getChild("data_handling.dataset.mnist.mnist_dataset.MNISTDataset")
def __init__(self, root_path, feature_transform_fun=None, target_transform_fun=None):
super(MNISTDataset, self).__init__(root_path, feature_transform_fun, target_transform_fun)
def __len__(self):
return self.samples.shape[0]
def visualize_samples(self, min_index, max_index):
plots_per_column = 15 # we might want to make this configurable
plot_count = max_index - min_index + 1
cols = min(plot_count, plots_per_column)
rows = int(plot_count/plots_per_column) + 1
plt.figure(figsize=(plots_per_column, rows))
plt.subplots_adjust(top=0.8, bottom=0, hspace=1, wspace=0.5)
for fig_index, index in enumerate(range(min_index, max_index+1)):
ax = plt.subplot(rows, cols, fig_index+1)
ax.set_axis_off()
pixels, label_index = self[index]
label = self.label_map[label_index]
plt.title(f'idx:{index}\nlbl:{label}')
plt.imshow(pixels, cmap='gray')
plt.show()
@classmethod
def get_config(cls):
resources = {
# {resource_url: (md5_hash, destination_folder, extraction_function)}
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz': (
"f68b3c2dcbeaaa9fbdd348bbdeb94873", "samples/", MNISTDataset.extract_samples),
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz': (
"d53e105ee54ea40749a09fcbcd1e9432", "labels/", MNISTDataset.extract_labels),
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz': (
"9fb629c4189551a2d022fa330f9573f3", "samples/", MNISTDataset.extract_samples),
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz': (
"ec29112dd5afa0611ce80d1b7f02629c", "labels/", MNISTDataset.extract_labels)
}
return resources
@classmethod
def load(cls, path, feature_transform_fun=None, target_transform_fun=None):
if cls.check_exists(path):
return MNISTDataset(root_path=path)
else:
raise DatasetNotFoundError(f"Dataset not found in {path}.")
@classmethod
def check_exists(cls, root_path) -> bool:
files = glob.glob(os.path.join(root_path, "**/*.pt"))
return len(files) == 4
@classmethod
def create_dataset(cls, root_path) -> str:
resources = cls.get_config()
cls.populate_data_repository(root_path, resources, force_download=True)
return root_path
@classmethod
def extract_labels(cls, source_path):
target_file_name = os.path.splitext(os.path.basename(source_path))[0] + ".pt"
target_path = os.path.join(os.path.dirname(source_path), target_file_name)
unzipped_path = Dataset.extract_gzip(source_path, remove_finished=True)
data = cls.read_label_file(unzipped_path)
with open(target_path, 'wb') as f:
torch.save(data, f)
os.remove(unzipped_path)
@classmethod
def extract_samples(cls, source_path):
target_file_name = os.path.splitext(os.path.basename(source_path))[0] + ".pt"
target_path = os.path.join(os.path.dirname(source_path), target_file_name)
unzipped_path = Dataset.extract_gzip(source_path, remove_finished=True)
data = MNISTDataset.read_image_file(unzipped_path)
with open(target_path, 'wb') as f:
torch.save(data, f)
def load_data_from_disc(self) -> (List, List, List):
"""Method that implements loading functionality of an on disk dataset.
:param folder_path: Path to dataset folder
:param data_files:
:return: samples, targets
"""
samples_paths = sorted(glob.glob(os.path.join(self.root_path, "samples/*.pt")))
labels_paths = sorted(glob.glob(os.path.join(self.root_path, "labels/*.pt")))
MNISTDataset.logger.debug(f"Loading samples from {samples_paths}")
samples = [torch.load(path) for path in samples_paths]
samples_tensor = torch.cat(samples, 0)
samples_tensor = samples_tensor.unsqueeze(-1)
labels = [torch.load(path) for path in labels_paths]
labels_tensor = torch.cat(labels, 0)
label_map = list(range(10))
return samples_tensor, labels_tensor, label_map
# Helper methods to extract the data from the provided raw dataset
@classmethod
def get_int(cls, b):
return int(codecs.encode(b, 'hex'), 16)
@classmethod
def read_label_file(cls, path:str):
with open(path, 'rb') as f:
data = f.read()
assert cls.get_int(data[:4]) == 2049
length = cls.get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
torch_tensor = torch.from_numpy(parsed).view(length).long()
torch_tensor = torch_tensor.long()
return torch_tensor
@classmethod
def read_image_file(cls, path:str):
with open(path, 'rb') as f:
data = f.read()
print(cls.get_int(data[:4]))
assert cls.get_int(data[:4]) == 2051
length = cls.get_int(data[4:8])
num_rows = cls.get_int(data[8:12])
num_cols = cls.get_int(data[12:16])
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
torch_tensor = torch.from_numpy(parsed).view(length, num_rows, num_cols)
torch_tensor = torch_tensor.float()
return torch_tensor
if __name__== "__main__":
from advattack import datasets_path
root_path = os.path.join(datasets_path, MNISTDataset.get_dataset_identifier())
if not MNISTDataset.check_exists(root_path):
root_path = MNISTDataset.create_dataset(root_path=root_path)
dataset = MNISTDataset.load(root_path)
dataset.visualize_samples(min_index=0, max_index=5)
len(dataset)
| [
"matplotlib.pyplot.title",
"os.remove",
"advattack.data_handling.dataset.Dataset.extract_gzip",
"torch.cat",
"advattack.error_handling.exception.DatasetNotFoundError",
"matplotlib.pyplot.figure",
"os.path.join",
"codecs.encode",
"matplotlib.pyplot.imshow",
"os.path.dirname",
"torch.load",
"mat... | [((462, 535), 'advattack.util.logger.logger.getChild', 'logger.getChild', (['"""data_handling.dataset.mnist.mnist_dataset.MNISTDataset"""'], {}), "('data_handling.dataset.mnist.mnist_dataset.MNISTDataset')\n", (477, 535), False, 'from advattack.util.logger import logger\n'), ((1071, 1115), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(plots_per_column, rows)'}), '(figsize=(plots_per_column, rows))\n', (1081, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1184), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.8)', 'bottom': '(0)', 'hspace': '(1)', 'wspace': '(0.5)'}), '(top=0.8, bottom=0, hspace=1, wspace=0.5)\n', (1143, 1184), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1550), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1548, 1550), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3353), 'advattack.data_handling.dataset.Dataset.extract_gzip', 'Dataset.extract_gzip', (['source_path'], {'remove_finished': '(True)'}), '(source_path, remove_finished=True)\n', (3318, 3353), False, 'from advattack.data_handling.dataset import Dataset\n'), ((3487, 3511), 'os.remove', 'os.remove', (['unzipped_path'], {}), '(unzipped_path)\n', (3496, 3511), False, 'import os\n'), ((3766, 3821), 'advattack.data_handling.dataset.Dataset.extract_gzip', 'Dataset.extract_gzip', (['source_path'], {'remove_finished': '(True)'}), '(source_path, remove_finished=True)\n', (3786, 3821), False, 'from advattack.data_handling.dataset import Dataset\n'), ((4555, 4576), 'torch.cat', 'torch.cat', (['samples', '(0)'], {}), '(samples, 0)\n', (4564, 4576), False, 'import torch\n'), ((4717, 4737), 'torch.cat', 'torch.cat', (['labels', '(0)'], {}), '(labels, 0)\n', (4726, 4737), False, 'import torch\n'), ((1276, 1314), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', '(fig_index + 1)'], {}), '(rows, cols, fig_index + 1)\n', (1287, 1314), True, 'import matplotlib.pyplot as plt\n'), ((1449, 1490), 'matplotlib.pyplot.title', 'plt.title', (['f"""idx:{index}\nlbl:{label}"""'], {}), '(f"""idx:{index}\nlbl:{label}""")\n', (1458, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1500, 1531), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pixels'], {'cmap': '"""gray"""'}), "(pixels, cmap='gray')\n", (1510, 1531), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2680), 'advattack.error_handling.exception.DatasetNotFoundError', 'DatasetNotFoundError', (['f"""Dataset not found in {path}."""'], {}), "(f'Dataset not found in {path}.')\n", (2647, 2680), False, 'from advattack.error_handling.exception import DatasetNotFoundError\n'), ((2771, 2805), 'os.path.join', 'os.path.join', (['root_path', '"""**/*.pt"""'], {}), "(root_path, '**/*.pt')\n", (2783, 2805), False, 'import os\n'), ((3226, 3254), 'os.path.dirname', 'os.path.dirname', (['source_path'], {}), '(source_path)\n', (3241, 3254), False, 'import os\n'), ((3459, 3478), 'torch.save', 'torch.save', (['data', 'f'], {}), '(data, f)\n', (3469, 3478), False, 'import torch\n'), ((3694, 3722), 'os.path.dirname', 'os.path.dirname', (['source_path'], {}), '(source_path)\n', (3709, 3722), False, 'import os\n'), ((3936, 3955), 'torch.save', 'torch.save', (['data', 'f'], {}), '(data, f)\n', (3946, 3955), False, 'import torch\n'), ((4486, 4502), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (4496, 4502), False, 'import torch\n'), ((4650, 4666), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (4660, 4666), False, 'import torch\n'), ((4964, 4987), 'codecs.encode', 'codecs.encode', (['b', '"""hex"""'], {}), "(b, 'hex')\n", (4977, 4987), False, 'import codecs\n'), ((5229, 5274), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8', 'offset': '(8)'}), '(data, dtype=np.uint8, offset=8)\n', (5242, 5274), True, 'import numpy as np\n'), ((5799, 5845), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8', 'offset': '(16)'}), '(data, dtype=np.uint8, offset=16)\n', (5812, 5845), True, 'import numpy as np\n'), ((4259, 4303), 'os.path.join', 'os.path.join', (['self.root_path', '"""samples/*.pt"""'], {}), "(self.root_path, 'samples/*.pt')\n", (4271, 4303), False, 'import os\n'), ((4346, 4389), 'os.path.join', 'os.path.join', (['self.root_path', '"""labels/*.pt"""'], {}), "(self.root_path, 'labels/*.pt')\n", (4358, 4389), False, 'import os\n'), ((3149, 3178), 'os.path.basename', 'os.path.basename', (['source_path'], {}), '(source_path)\n', (3165, 3178), False, 'import os\n'), ((3617, 3646), 'os.path.basename', 'os.path.basename', (['source_path'], {}), '(source_path)\n', (3633, 3646), False, 'import os\n'), ((5873, 5897), 'torch.from_numpy', 'torch.from_numpy', (['parsed'], {}), '(parsed)\n', (5889, 5897), False, 'import torch\n'), ((5302, 5326), 'torch.from_numpy', 'torch.from_numpy', (['parsed'], {}), '(parsed)\n', (5318, 5326), False, 'import torch\n')] |
# Advent of Code 2021
# --- Day 3: Binary Diagnostic ---
# https://adventofcode.com/2021/day/3
#
# Author: NoNonsenseTekkie
import pandas as pd
import numpy as np
def make_headers(n):
return ["c" + str(i+1) for i in range(n)]
def to_binary_matrix(report):
"""
Convert the diagnostic report as list of lines into a list of arrays.
:param report: The input report as a list of lines.
:return: A Numpy 2-D array of a list of arrays of binaries.
"""
arrays = list(map(lambda n: list(n.strip()), report))
return np.array(arrays)
def to_decimal(binary_array):
"""
Join the list of ['1', '0', '1', '1', '0'] into a binary string
and convert into decimal integer.
:param binary_array: The list of binary strings.
:return: The decimal value of the joined binary string.
"""
binary_str = "".join(binary_array)
return int(binary_str, 2)
def get_gamma_array(df: pd.DataFrame):
"""
Get the top most values from the data frame mode as gamma rate.
:param df: The data frame to extract gamma rate.
:return: The gamma rate array.
"""
top = df.mode().to_numpy()
if top.shape[0] > 1:
gamma_list = [top[1][i] if top[1][i] == '1' else top[0][i] for i in range(top.shape[1])]
array = np.array(gamma_list)
else:
array = top[0]
return array
def part_1(df: pd.DataFrame):
"""
Use the binary numbers in your diagnostic report to calculate the gamma rate and epsilon rate, then multiply them
together. What is the power consumption of the submarine?
(Be sure to represent your answer in decimal, not binary.)
:param df: The data frame of the diagnostic report.
:return: The power consumption of the submarine (gamma rate x epsilon rate)
"""
print("\n===== Part 1 =====")
gamma_array, epsilon_array = get_gamma_epsilon(df)
gamma = to_decimal(gamma_array)
epsilon = to_decimal(epsilon_array)
print(f"Gamma = {gamma} | Epsilon = {epsilon}")
return gamma * epsilon
def get_epsilon_array(gamma_array):
"""Flip 0 and 1 for epsilon from gamma"""
return list(map(lambda d: '1' if d == '0' else '0', gamma_array))
def get_gamma_epsilon(df: pd.DataFrame):
gamma_array = get_gamma_array(df)
return gamma_array, get_epsilon_array(gamma_array)
def query_on(df, column, value):
query = f"c{column} == ['{value}']"
return df.query(query)
def part_2(df: pd.DataFrame):
print("\n===== Part 2 =====")
df_oxygen = query_oxygen(df_report, 0)
oxygen = to_decimal(df_oxygen.to_numpy()[0])
print(f"Oxygen scrubber rating:", oxygen)
df_co2 = query_co2(df_report, 0)
co2 = to_decimal(df_co2.to_numpy()[0])
print(f"CO2 scrubber rating:", co2)
return oxygen * co2
def query_oxygen(df: pd.DataFrame, index: int):
if index >= df.shape[1]:
return df
gamma_array = get_gamma_array(df)
query_o = query_on(df, index+1, gamma_array[index])
# print(f"DEBUG: Query on {index}:\n{query_o}")
# print(f"DEBUG: shape = {query_o.shape}")
if query_o.shape[0] == 1:
return query_o
else:
return query_oxygen(query_o, index+1)
def query_co2(df: pd.DataFrame, index: int):
if index >= df.shape[1]:
return df
gamma_array, epsilon_array = get_gamma_epsilon(df)
query = query_on(df, index+1, epsilon_array[index])
# print(f"DEBUG: Query on {index}:\n{query}")
# print(f"DEBUG: shape = {query.shape}")
if query.shape[0] == 1:
return query
else:
return query_co2(query, index+1)
# MAIN STARTS HERE
INPUT = "input.txt"
with open(INPUT) as file:
lines = file.readlines()
binary_arrays = to_binary_matrix(lines)
headers = make_headers(binary_arrays.shape[1])
df_report = pd.DataFrame(binary_arrays, columns=headers)
solution_1 = part_1(df_report)
print(f"The power consumption of the submarine is {solution_1}.")
# Part 2
solution_2 = part_2(df_report)
print(f"The life support rating of the submarine = {solution_2}")
| [
"pandas.DataFrame",
"numpy.array"
] | [((3760, 3804), 'pandas.DataFrame', 'pd.DataFrame', (['binary_arrays'], {'columns': 'headers'}), '(binary_arrays, columns=headers)\n', (3772, 3804), True, 'import pandas as pd\n'), ((544, 560), 'numpy.array', 'np.array', (['arrays'], {}), '(arrays)\n', (552, 560), True, 'import numpy as np\n'), ((1279, 1299), 'numpy.array', 'np.array', (['gamma_list'], {}), '(gamma_list)\n', (1287, 1299), True, 'import numpy as np\n')] |
from typing import Union
import numpy as np
from casadi import MX, SX, DM
class Mapping:
"""
Mapping of index set to a different index set
Example of use:
- to_map = Mapping([0, 1, 1, 3, -1, 1], [3])
- obj = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
- mapped_obj = to_map.map(obj)
Expected result:
- mapped_obj == np.array([0.1, 0.2, 0.2, -0.4, 0, 0.1])
Attributes
----------
map_idx: list[int]
The actual index list that links to the other set, an negative value links to a numerical 0
Methods
-------
map(self, obj: list) -> list
Apply the mapping to an obj
len(self) -> int
Get the len of the mapping
"""
def __init__(self, map_idx: Union[list, tuple, range, np.ndarray]):
"""
Parameters
----------
map_idx: Union[list, tuple, range]
The actual index list that links to the other set
"""
self.map_idx = map_idx
def map(self, obj: Union[tuple, list, np.ndarray, MX, SX, DM]) -> Union[np.ndarray, MX, SX, DM]:
"""
Apply the mapping to an matrix object. The rows are mapped while the columns are preserved as is
Parameters
----------
obj: Union[np.ndarray, MX, SX, DM]
The matrix to map
Returns
-------
The list mapped
"""
# Declare a zero filled object
if isinstance(obj, (tuple, list)):
obj = np.array(obj)
if isinstance(obj, np.ndarray):
if len(obj.shape) == 1:
obj = obj[:, np.newaxis]
mapped_obj = np.zeros((len(self.map_idx), obj.shape[1]))
elif isinstance(obj, (MX, SX, DM)):
mapped_obj = type(obj).zeros(len(self.map_idx), obj.shape[1])
else:
raise RuntimeError("map must be applied on np.ndarray, MX or SX")
# Fill the positive values
index_plus_in_origin = [abs(v) for v in self.map_idx if v is not None and v >= 0]
index_plus_in_new = [i for i, v in enumerate(self.map_idx) if v is not None and v >= 0]
mapped_obj[index_plus_in_new, :] = obj[index_plus_in_origin, :] # Fill the non zeros values
# Fill the negative values
index_minus_in_origin = [abs(v) for v in self.map_idx if v is not None and v < 0]
index_minus_in_new = [i for i, v in enumerate(self.map_idx) if v is not None and v < 0]
mapped_obj[index_minus_in_new, :] = -obj[index_minus_in_origin, :] # Fill the non zeros values
return mapped_obj
@property
def len(self) -> int:
"""
Get the len of the mapping
Returns
-------
The len of the mapping
"""
return len(self.map_idx)
class BiMapping:
"""
Mapping of two index sets between each other
Attributes
----------
to_second: Mapping
The mapping that links the first variable to the second
to_first: Mapping
The mapping that links the second variable to the first
"""
def __init__(
self, to_second: Union[Mapping, int, list, tuple, range], to_first: Union[Mapping, int, list, tuple, range]
):
"""
Parameters
----------
to_second: Union[Mapping, list[int], tuple[int], range]
The mapping that links the first index set to the second
to_first: Union[Mapping, list[int], tuple[int], range]
The mapping that links the second index set to the first
"""
if isinstance(to_second, (list, tuple, range)):
to_second = Mapping(to_second)
if isinstance(to_first, (list, tuple, range)):
to_first = Mapping(to_first)
if not isinstance(to_second, Mapping):
raise RuntimeError("to_second must be a Mapping class")
if not isinstance(to_first, Mapping):
raise RuntimeError("to_first must be a Mapping class")
self.to_second = to_second
self.to_first = to_first
| [
"numpy.array"
] | [((1488, 1501), 'numpy.array', 'np.array', (['obj'], {}), '(obj)\n', (1496, 1501), True, 'import numpy as np\n')] |
# import copy
# h, w = list(map(int, input().split()))
# black_list = []
# white_list = []
# for i in range(h):
# for j, val in enumerate(list(input())):
# if val == "#":
# black_list.append([i,j])
# else:
# white_list.append([i,j])
# max_val = 0
# for white in white_list:
# min_val = w*h
# for black in black_list:
# diff = abs(white[0]-black[0]) + abs(white[1]-black[1])
# if diff < min_val:
# min_val = diff
# if max_val < min_val:
# max_val = min_val
# print(max_val)
import numpy
def main():
height, width = map(int, input().split())
max_value = height + width
values = []
for y in range(height):
values.append([0 if v == '#' else max_value for v in input().strip()])
values = numpy.array(values)
for x in range(width - 1):
a = values[:, x] + 1
b = values[:, x + 1]
values[:, x + 1] = numpy.minimum(values[:, x] + 1, values[:, x + 1])
for x in range(width - 1, 0, -1):
values[:, x - 1] = numpy.minimum(values[:, x] + 1, values[:, x - 1])
for y in range(height - 1):
values[y + 1, :] = numpy.minimum(values[y, :] + 1, values[y + 1, :])
for y in range(height - 1, 0, -1):
values[y - 1, :] = numpy.minimum(values[y, :] + 1, values[y - 1, :])
print(numpy.max(values), flush=True)
if __name__ == '__main__':
main()
| [
"numpy.max",
"numpy.minimum",
"numpy.array"
] | [((763, 782), 'numpy.array', 'numpy.array', (['values'], {}), '(values)\n', (774, 782), False, 'import numpy\n'), ((900, 949), 'numpy.minimum', 'numpy.minimum', (['(values[:, x] + 1)', 'values[:, x + 1]'], {}), '(values[:, x] + 1, values[:, x + 1])\n', (913, 949), False, 'import numpy\n'), ((1016, 1065), 'numpy.minimum', 'numpy.minimum', (['(values[:, x] + 1)', 'values[:, x - 1]'], {}), '(values[:, x] + 1, values[:, x - 1])\n', (1029, 1065), False, 'import numpy\n'), ((1126, 1175), 'numpy.minimum', 'numpy.minimum', (['(values[y, :] + 1)', 'values[y + 1, :]'], {}), '(values[y, :] + 1, values[y + 1, :])\n', (1139, 1175), False, 'import numpy\n'), ((1243, 1292), 'numpy.minimum', 'numpy.minimum', (['(values[y, :] + 1)', 'values[y - 1, :]'], {}), '(values[y, :] + 1, values[y - 1, :])\n', (1256, 1292), False, 'import numpy\n'), ((1304, 1321), 'numpy.max', 'numpy.max', (['values'], {}), '(values)\n', (1313, 1321), False, 'import numpy\n')] |
from src.modeling.models.BNN import BNN
from src.modeling.trainers.BNN_trainer import BNN_trainer
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
from dotmap import DotMap
from src.modeling.layers.FC_v2 import FC
from src.modeling.layers.RecalibrationLayer import RecalibrationLayer
NUM_SAMPLES = 1024
IN_DIM = 100
HIDDEN_DIM = 10
OUT_DIM = 2
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def stub_data():
X = np.random.random(size=(NUM_SAMPLES, IN_DIM))
# W_tru = np.random.random(size=(IN_DIM, OUT_DIM))
# b_tru = 5
# y = np.matmul(X, W_tru) + b_tru
W_hidden = np.random.random(size=(IN_DIM, HIDDEN_DIM))
W_last = np.random.random(size=(HIDDEN_DIM, OUT_DIM))
y_mid = np.matmul(X, W_hidden) + 5
y_mid[y_mid < 0] = 0
# y_mid = sigmoid(y_mid)
y = np.matmul(y_mid, W_last) + 2
return (X, y)
if __name__ == "__main__":
model_config = [
DotMap(
{
"layer_name": "FC",
"input_dim": IN_DIM,
"output_dim": 10,
"activation": "swish",
"weight_decay": 0.05,
"ensemble_size": 1,
}
),
DotMap(
{
"layer_name": "FC",
"input_dim": 10,
"output_dim": OUT_DIM,
"activation": "swish",
"weight_decay": 0.05,
"ensemble_size": 1,
}
),
]
model = BNN(DotMap(name="test"), model_config)
trainer_config = DotMap(
{
"model_dir": "random_thing",
"epochs": 2,
"batch_size": 2,
"num_nets": 1,
}
)
trainer = BNN_trainer(trainer_config, model)
X, y = stub_data()
trainer.train(X, y)
trainer.calibrate(X, y)
| [
"dotmap.DotMap",
"numpy.random.random",
"numpy.exp",
"numpy.matmul",
"src.modeling.trainers.BNN_trainer.BNN_trainer"
] | [((515, 559), 'numpy.random.random', 'np.random.random', ([], {'size': '(NUM_SAMPLES, IN_DIM)'}), '(size=(NUM_SAMPLES, IN_DIM))\n', (531, 559), True, 'import numpy as np\n'), ((685, 728), 'numpy.random.random', 'np.random.random', ([], {'size': '(IN_DIM, HIDDEN_DIM)'}), '(size=(IN_DIM, HIDDEN_DIM))\n', (701, 728), True, 'import numpy as np\n'), ((742, 786), 'numpy.random.random', 'np.random.random', ([], {'size': '(HIDDEN_DIM, OUT_DIM)'}), '(size=(HIDDEN_DIM, OUT_DIM))\n', (758, 786), True, 'import numpy as np\n'), ((1616, 1702), 'dotmap.DotMap', 'DotMap', (["{'model_dir': 'random_thing', 'epochs': 2, 'batch_size': 2, 'num_nets': 1}"], {}), "({'model_dir': 'random_thing', 'epochs': 2, 'batch_size': 2,\n 'num_nets': 1})\n", (1622, 1702), False, 'from dotmap import DotMap\n'), ((1786, 1820), 'src.modeling.trainers.BNN_trainer.BNN_trainer', 'BNN_trainer', (['trainer_config', 'model'], {}), '(trainer_config, model)\n', (1797, 1820), False, 'from src.modeling.trainers.BNN_trainer import BNN_trainer\n'), ((800, 822), 'numpy.matmul', 'np.matmul', (['X', 'W_hidden'], {}), '(X, W_hidden)\n', (809, 822), True, 'import numpy as np\n'), ((889, 913), 'numpy.matmul', 'np.matmul', (['y_mid', 'W_last'], {}), '(y_mid, W_last)\n', (898, 913), True, 'import numpy as np\n'), ((995, 1131), 'dotmap.DotMap', 'DotMap', (["{'layer_name': 'FC', 'input_dim': IN_DIM, 'output_dim': 10, 'activation':\n 'swish', 'weight_decay': 0.05, 'ensemble_size': 1}"], {}), "({'layer_name': 'FC', 'input_dim': IN_DIM, 'output_dim': 10,\n 'activation': 'swish', 'weight_decay': 0.05, 'ensemble_size': 1})\n", (1001, 1131), False, 'from dotmap import DotMap\n'), ((1270, 1407), 'dotmap.DotMap', 'DotMap', (["{'layer_name': 'FC', 'input_dim': 10, 'output_dim': OUT_DIM, 'activation':\n 'swish', 'weight_decay': 0.05, 'ensemble_size': 1}"], {}), "({'layer_name': 'FC', 'input_dim': 10, 'output_dim': OUT_DIM,\n 'activation': 'swish', 'weight_decay': 0.05, 'ensemble_size': 1})\n", (1276, 1407), False, 'from dotmap import DotMap\n'), ((1560, 1579), 'dotmap.DotMap', 'DotMap', ([], {'name': '"""test"""'}), "(name='test')\n", (1566, 1579), False, 'from dotmap import DotMap\n'), ((476, 486), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (482, 486), True, 'import numpy as np\n')] |
"""
Linear algebra homework problem 1
"""
import numpy as np
A = np.array([[0.780, 0.563], [0.913, 0.659]])
b = np.array([[0.217], [0.254]])
x = np.array([[1], [-1]])
x_a = np.array([[0.999], [-1.001]])
x_b = np.array([[0.341], [-0.087]])
def calc_residual(x_test):
return b - (A @ x_test)
def part_a():
print(f'Residual of x_a:\n{calc_residual(x_a)}')
print(f'Residual of x_b:\n{calc_residual(x_b)}')
def part_b():
print()
u, s_vals, vt = np.linalg.svd(A)
cond = np.linalg.cond(A)
print(f'Sng. values of A: {s_vals}')
print(f'Condition # of A: {cond}')
print("vt@(x_a - x):")
print(vt@(x_a-x))
print("vt@(x_b - x):")
print(vt@(x_b-x))
print("Well I can see that the matrix is very ill-conditioned. I\'m not 100% sure why the expression vt@(x_b-x) "
"seems to return something very similar to the singular values.")
def part_c():
print()
x_solve = np.linalg.solve(A, b)
print("x-x_solve:")
print(x - x_solve)
print("Ax-b:")
print(A@x - b)
print('The problem is absolutely the roundoff error. Calculating Ax-b, the error is on the order of machine '
'precision, but the solver returns an error more than six orders of magnitude greater.')
def part_d():
print()
print("Residual of x_b:")
print(r_b := calc_residual(x_b))
dx = np.linalg.solve(A, -r_b)
print("x_b - dx:")
print(x_b-dx)
print("Residual of (x_b - dx):")
print(calc_residual(x_b-dx))
print("That's a huge improvement over the original guess. The error of the updated vector is on the order of "
"machine precision.")
if __name__ == '__main__':
print('COMMIT!!!!')
part_a()
part_b()
part_c()
part_d()
| [
"numpy.linalg.solve",
"numpy.linalg.svd",
"numpy.array",
"numpy.linalg.cond"
] | [((68, 109), 'numpy.array', 'np.array', (['[[0.78, 0.563], [0.913, 0.659]]'], {}), '([[0.78, 0.563], [0.913, 0.659]])\n', (76, 109), True, 'import numpy as np\n'), ((115, 143), 'numpy.array', 'np.array', (['[[0.217], [0.254]]'], {}), '([[0.217], [0.254]])\n', (123, 143), True, 'import numpy as np\n'), ((148, 169), 'numpy.array', 'np.array', (['[[1], [-1]]'], {}), '([[1], [-1]])\n', (156, 169), True, 'import numpy as np\n'), ((176, 205), 'numpy.array', 'np.array', (['[[0.999], [-1.001]]'], {}), '([[0.999], [-1.001]])\n', (184, 205), True, 'import numpy as np\n'), ((212, 241), 'numpy.array', 'np.array', (['[[0.341], [-0.087]]'], {}), '([[0.341], [-0.087]])\n', (220, 241), True, 'import numpy as np\n'), ((469, 485), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (482, 485), True, 'import numpy as np\n'), ((497, 514), 'numpy.linalg.cond', 'np.linalg.cond', (['A'], {}), '(A)\n', (511, 514), True, 'import numpy as np\n'), ((930, 951), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (945, 951), True, 'import numpy as np\n'), ((1354, 1378), 'numpy.linalg.solve', 'np.linalg.solve', (['A', '(-r_b)'], {}), '(A, -r_b)\n', (1369, 1378), True, 'import numpy as np\n')] |
import numpy as np
import scipy.misc
from skimage.morphology import skeletonize
def save(sample_at, probabilities, lines, losses, iterations, save_dir='ginn/data'):
# Okay, so this code is a bit weird, but I had to find a work around to saving data for javascript access
# ... Bear with me...
# This mess below constructs a javascript variable as a string to save it as a .js file
objectName = 'dataFileFirst'
data_string = '{0} = new Object();\n'.format(objectName)
for sample_id, prob, line, loss, in zip(sample_at, probabilities, lines, losses):
data_string += '{0}[{1}] = new Object();\n'.format(objectName, sample_id)
print('Processing {} of {}'.format(sample_id, len(sample_at)))
idxs = []
for li in range(3):
data_string += '{0}[{1}][{2}] = new Object();\n'.format(objectName, sample_id, li)
for ui in range(16):
data_string += '{0}[{1}][{2}][{3}] = new Object();\n'.format(objectName, sample_id, li, ui)
data_string += '{0}[{1}][{2}][{3}][\'x\'] = new Object();\n'.format(objectName, sample_id, li, ui)
data_string += '{0}[{1}][{2}][{3}][\'x\'] = ['.format(objectName, sample_id, li, ui)
indices = np.logical_and(line[0] == li, line[3] == ui)
xis = line[1][indices]
yis = line[2][indices]
for xi, yi in zip(xis, yis):
idxs.append((xi, yi))
tmp_img = np.zeros_like(prob)
tmp_img[xis, yis] = 1
skeleton = skeletonize(tmp_img).astype(int)
xis, yis = np.nonzero(skeleton)
for position, xi in enumerate(xis):
if position != len(xis) - 1:
data_string += '{0},'.format(xi)
else:
data_string += '{0}'.format(xi)
data_string += ']\n'
data_string += '{0}[{1}][{2}][{3}][\'y\'] = new Object();\n'.format(objectName, sample_id, li, ui)
data_string += '{0}[{1}][{2}][{3}][\'y\'] = ['.format(objectName, sample_id, li, ui)
for position, yi in enumerate(yis):
if position != len(yis) - 1:
data_string += '{0},'.format(yi)
else:
data_string += '{0}'.format(yi)
data_string += ']\n'
scipy.misc.imsave('{}/p_{}.png'.format(save_dir,sample_id), (prob))
if sample_id == 100:
print('SAVING FIRST')
with open('{}/data_first.js'.format(save_dir), 'w') as f:
f.write(data_string)
objectName = 'dataFileLast'
data_string = '{} = new Object();\n'.format(objectName)
if sample_id == sample_at[-1]:
print('SAVING LAST')
with open('{}/data_last.js'.format(save_dir), 'w') as f:
f.write(data_string)
# Now store the training statistics
statString = 'trainingStats = new Object();\n'
statString += 'trainingStats[\"losses\"] = new Object();\n'
statString += 'trainingStats[\"losses\"] = ' + str(list(losses)) + ';'
statString += '\n'
statString += 'trainingStats[\"iterations\"] = new Object();\n'
statString += 'trainingStats[\"iterations\"] = ' + str(list(sample_at)) + ';'
statString += '\n'
with open('{}/training_stats.js'.format(save_dir), 'w') as f:
f.write(statString)
| [
"numpy.nonzero",
"numpy.zeros_like",
"skimage.morphology.skeletonize",
"numpy.logical_and"
] | [((1258, 1302), 'numpy.logical_and', 'np.logical_and', (['(line[0] == li)', '(line[3] == ui)'], {}), '(line[0] == li, line[3] == ui)\n', (1272, 1302), True, 'import numpy as np\n'), ((1494, 1513), 'numpy.zeros_like', 'np.zeros_like', (['prob'], {}), '(prob)\n', (1507, 1513), True, 'import numpy as np\n'), ((1640, 1660), 'numpy.nonzero', 'np.nonzero', (['skeleton'], {}), '(skeleton)\n', (1650, 1660), True, 'import numpy as np\n'), ((1580, 1600), 'skimage.morphology.skeletonize', 'skeletonize', (['tmp_img'], {}), '(tmp_img)\n', (1591, 1600), False, 'from skimage.morphology import skeletonize\n')] |
import logging, warnings, json
from abc import ABCMeta, abstractmethod
from os import rename, makedirs
from os.path import join, basename, isfile
from glob import glob
import numpy as np
import torch
import torch.optim as optims
import torch.optim.lr_scheduler as lr_schedulers
from torch.nn import DataParallel as DP
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from linearlr import LinearLR
from ..models import build_model
from ..utils import dist_get_rank, sprint_stats
class BaseTask(metaclass=ABCMeta):
def __init__(self, args, loader, is_train):
self.loader = loader
self.dataset = loader.dataset
self.is_train = is_train
self.device = args.device
self.gather = False
self.gpu_gather = args.gpu_gather
self.resume_epoch = 0
self.has_val_score = False
self.exp_dir = args.exp_dir
if self.is_train:
self.last_lr = args.lr
self.lr_update_per_epoch = args.lr_update_per_epoch
self.model = build_model(args, self.dataset)
logging.debug(str(self.model))
logging.debug(f'Total number of parameters: {sum([p.numel() for p in self.model.parameters()])}')
self.rank = dist_get_rank()
if self.rank >= 0:
self.device = torch.cuda.current_device() if args.use_cuda else 'cpu'
self.model = self.model.to(self.device)
self.model = DDP(
self.model,
[self.device] if args.use_cuda else None,
find_unused_parameters=True,
)
else:
if args.use_cuda:
if torch.cuda.device_count() > 1:
self.model = DP(self.model)
self.model = self.model.to(self.device)
self.output_device = args.device if args.gpu_gather else 'cpu'
if is_train:
logging.debug(f'Optimizer: {args.optim} with base learning rate {args.lr:.6g}')
self.set_optim(args)
self.set_lr_schedule(args)
self.auto_load(args)
def set_optim(self, args):
if args.optim == 'SGD':
self.optim = optims.SGD(self.model.parameters(),
lr=args.lr, weight_decay=args.wd, momentum=args.momentum, nesterov=args.nesterov)
elif args.optim == 'Adam':
self.optim = optims.Adam(self.model.parameters(),
lr=args.lr, weight_decay=args.wd)
else:
self.optim = vars(optims)[args.optim](self.model.parameters(), lr=args.lr)
def set_lr_schedule(self, args):
if args.lr_schedule:
if args.lr_update_per_epoch:
if args.lr_schedule == 'Linear':
self.lr_scheduler = LinearLR(
self.optim,
T=args.n_epoch,
last_epoch=self.resume_epoch - 1,
)
elif args.lr_schedule == 'CosineAnnealing':
self.lr_scheduler = lr_schedulers.CosineAnnealingLR(
self.optim,
T_max=args.n_epoch,
last_epoch=self.resume_epoch - 1,
)
elif args.lr_schedule == 'Step':
self.lr_scheduler = lr_schedulers.StepLR(self.optim,
step_size=args.lr_decay_step_size,
gamma=args.lr_schedule_gamma,
last_epoch=self.resume_epoch - 1,
)
elif args.lr_schedule == 'MultiStep-ImageNet':
self.lr_scheduler = lr_schedulers.MultiStepLR(self.optim,
milestones=[round(0.3*args.n_epoch), round(0.6*args.n_epoch)],
gamma=args.lr_schedule_gamma,
last_epoch=self.resume_epoch - 1,
)
elif args.lr_schedule == 'MultiStep-COCO':
self.lr_scheduler = lr_schedulers.MultiStepLR(self.optim,
milestones=[round(0.6*args.n_epoch), round(0.9*args.n_epoch)],
gamma=args.lr_schedule_gamma,
last_epoch=self.resume_epoch - 1,
)
else:
self.lr_scheduler = vars(lr_schedulers) \
[args.lr_schedule + 'LR'](self.optim,
last_epoch=self.resume_epoch - 1,
)
else:
# using the last_epoch as a counter for number of iters processed so far
n_iter_epoch = len(self.loader)
n_iter_total = args.n_epoch*n_iter_epoch
last_counter = self.resume_epoch*n_iter_epoch - 1
if args.lr_schedule == 'Linear':
self.lr_scheduler = LinearLR(self.optim,
T=n_iter_total,
last_epoch=last_counter,
)
elif args.lr_schedule == 'CosineAnnealing':
self.lr_scheduler = lr_schedulers.CosineAnnealingLR(
self.optim,
T_max=n_iter_total,
last_epoch=last_counter,
)
elif args.lr_schedule == 'Step':
self.lr_scheduler = lr_schedulers.StepLR(self.optim,
step_size=args.lr_decay_step_size,
gamma=args.lr_schedule_gamma,
last_epoch=last_counter,
)
else:
self.lr_scheduler = vars(lr_schedulers) \
[args.lr_schedule + 'LR'](self.optim,
last_epoch=last_counter,
)
def get_lr(self):
if not hasattr(self, 'lr_scheduler'):
return self.lr_scheduler.get_last_lr()[0]
else:
return self.optim.param_groups[0]['lr']
def update_lr_epoch(self):
if not hasattr(self, 'lr_scheduler'):
return
self.lr_scheduler.step()
lr = self.lr_scheduler.get_last_lr()[0]
if self.last_lr != lr:
logging.info(f'Base learning rate updated to {lr:.6g}')
self.last_lr = lr
def update_lr_iter(self):
if not hasattr(self, 'lr_scheduler'):
return
self.lr_scheduler.step()
def train_mode(self, gather=True):
self.model.train()
self.gather = gather
def test_mode(self, gather=True):
self.model.eval()
self.gather = gather
def mark_best_model(self, epoch, score):
with open(join(self.exp_dir, 'best-model.txt'), 'w') as f:
f.write('%d\n%g\n' % (epoch, score))
def query_best_model(self):
file_path = join(self.exp_dir, 'best-model.txt')
if not isfile(file_path):
return None, None
with open(file_path) as f:
content = f.readlines()
epoch = int(content[0].strip())
score = float(content[1].strip())
return epoch, score
def save(self, epoch):
if self.rank > 0:
return
out_dict = {}
if isinstance(self.model, (DP, DDP)):
out_dict['model'] = self.model.module.state_dict()
else:
out_dict['model'] = self.model.state_dict()
out_dict['optim'] = self.optim.state_dict()
if hasattr(self, 'lr_scheduler'):
out_dict['lr_scheduler'] = self.lr_scheduler.state_dict()
# using a temporary file first to prevent getting
# corrupted saves in case of a system crash
tmp_path = join(self.exp_dir, 'temp.pth')
torch.save(out_dict, tmp_path)
rename(tmp_path, join(self.exp_dir, 'e%03d.pth' % epoch))
def load(self, path, strict=False):
if isinstance(self.device, int):
device = f'cuda:{self.device}'
else:
device = self.device
ckpt = torch.load(path, map_location=device)
if 'model' in ckpt:
# llcv format
model_state_dict = ckpt['model']
if hasattr(self, 'optim') and 'optim' in ckpt:
self.optim.load_state_dict(ckpt['optim'])
if hasattr(self, 'lr_scheduler') and 'lr_scheduler' in ckpt:
self.lr_scheduler.load_state_dict(ckpt['lr_scheduler'])
else:
# simple model-state-dict format
model_state_dict = ckpt
# convert both the model and the state_dict to the basic format without DP or DDP wrappers
if next(iter(model_state_dict)).startswith('module.'):
model_state_dict = {k[7:]: v for k, v in model_state_dict.items()}
if isinstance(self.model, (DP, DDP)):
model = self.model.module
else:
model = self.model
model.load_state_dict(model_state_dict, strict)
def auto_load(self, args):
'''
Automatically finding out which model to load given the arguments
'''
strict = args.load_strict
if self.is_train:
self.resume_epoch = 0
saves = glob(join(args.exp_dir, 'e*.pth'))
if saves:
# previously trained, loading the latest model
self.resume_epoch = max([int(basename(s).split('.')[0][1:]) for s in saves])
ckpt_path = join(self.exp_dir, 'e%03d.pth' % self.resume_epoch)
logging.warning('Resume training from the latest model ' + ckpt_path)
self.load(ckpt_path, strict)
elif args.pretrain:
logging.info('Loading pretrained model ' + args.pretrain)
self.load(args.pretrain, strict)
else:
if args.test_init:
return
if args.ckpt:
ckpt_path = args.ckpt
logging.info('Loading checkpoint ' + ckpt_path)
elif args.ckpt_epoch:
self.resume_epoch = args.ckpt_epoch
ckpt_path = join(self.exp_dir, 'e%03d.pth' % self.resume_epoch)
logging.info('Loading checkpoint with specified epoch ' + ckpt_path)
else:
self.resume_epoch, score = self.query_best_model()
if self.resume_epoch is not None:
ckpt_path = join(self.exp_dir, 'e%03d.pth' % self.resume_epoch)
logging.info(f'Loading the best checkpoint (score {score:.6g}) from {ckpt_path}')
else:
saves = glob(join(args.exp_dir, 'e*.pth'))
if not saves:
raise Exception('Cannot find saved models in ' + args.exp_dir)
self.resume_epoch = max([int(basename(s).split('.')[0][1:]) for s in saves])
ckpt_path = join(self.exp_dir, 'e%03d.pth' % self.resume_epoch)
logging.info(f'Loading the latest model from {ckpt_path}')
self.load(ckpt_path, strict)
def summarize_timing(self, timing_type, samples, n_warmup, out_dir):
assert len(samples) > n_warmup, 'Not enough timing samples after warming up'
samples = 1e3*np.asarray(samples)
logging.info(sprint_stats(samples[n_warmup:], timing_type.capitalize() + ' (ms)'))
if out_dir:
makedirs(out_dir, exist_ok=True)
out_dict = {
timing_type: {
'unit': 'ms',
'n_warmup': n_warmup,
'mean': samples[n_warmup:].mean(),
'std': samples[n_warmup:].std(ddof=1),
'min': samples[n_warmup:].min(),
'max': samples[n_warmup:].max(),
'samples': samples.tolist(),
},
}
out_path = join(out_dir, timing_type.replace(' ', '_') + '.json')
logging.info(f'Saving timing information to {out_path}')
json.dump(out_dict, open(out_path, 'w'), indent=2)
@abstractmethod
def forward(self, data):
pass
def backward(self, data):
pass
def log_iter(self, str_prefix='', str_suffix=''):
pass
def log_iter_tb(self, accu_iter, is_train):
pass
def log_epoch(self, str_prefix='', str_suffix=''):
pass
def log_epoch_tb(self, epoch, is_train):
pass
def dist_gather(self, is_train):
pass
def get_test_scores(self, force_update=False):
pass
def summarize_test(self, args):
pass
| [
"linearlr.LinearLR",
"torch.nn.parallel.distributed.DistributedDataParallel",
"logging.debug",
"os.makedirs",
"torch.optim.lr_scheduler.StepLR",
"logging.warning",
"os.path.basename",
"torch.load",
"numpy.asarray",
"torch.cuda.device_count",
"torch.save",
"logging.info",
"os.path.isfile",
... | [((7027, 7063), 'os.path.join', 'join', (['self.exp_dir', '"""best-model.txt"""'], {}), "(self.exp_dir, 'best-model.txt')\n", (7031, 7063), False, 'from os.path import join, basename, isfile\n'), ((7898, 7928), 'os.path.join', 'join', (['self.exp_dir', '"""temp.pth"""'], {}), "(self.exp_dir, 'temp.pth')\n", (7902, 7928), False, 'from os.path import join, basename, isfile\n'), ((7938, 7968), 'torch.save', 'torch.save', (['out_dict', 'tmp_path'], {}), '(out_dict, tmp_path)\n', (7948, 7968), False, 'import torch\n'), ((8230, 8267), 'torch.load', 'torch.load', (['path'], {'map_location': 'device'}), '(path, map_location=device)\n', (8240, 8267), False, 'import torch\n'), ((1484, 1574), 'torch.nn.parallel.distributed.DistributedDataParallel', 'DDP', (['self.model', '([self.device] if args.use_cuda else None)'], {'find_unused_parameters': '(True)'}), '(self.model, [self.device] if args.use_cuda else None,\n find_unused_parameters=True)\n', (1487, 1574), True, 'from torch.nn.parallel.distributed import DistributedDataParallel as DDP\n'), ((1954, 2033), 'logging.debug', 'logging.debug', (['f"""Optimizer: {args.optim} with base learning rate {args.lr:.6g}"""'], {}), "(f'Optimizer: {args.optim} with base learning rate {args.lr:.6g}')\n", (1967, 2033), False, 'import logging, warnings, json\n'), ((6374, 6429), 'logging.info', 'logging.info', (['f"""Base learning rate updated to {lr:.6g}"""'], {}), "(f'Base learning rate updated to {lr:.6g}')\n", (6386, 6429), False, 'import logging, warnings, json\n'), ((7080, 7097), 'os.path.isfile', 'isfile', (['file_path'], {}), '(file_path)\n', (7086, 7097), False, 'from os.path import join, basename, isfile\n'), ((7995, 8034), 'os.path.join', 'join', (['self.exp_dir', "('e%03d.pth' % epoch)"], {}), "(self.exp_dir, 'e%03d.pth' % epoch)\n", (7999, 8034), False, 'from os.path import join, basename, isfile\n'), ((11502, 11521), 'numpy.asarray', 'np.asarray', (['samples'], {}), '(samples)\n', (11512, 11521), True, 'import numpy as np\n'), ((11648, 11680), 'os.makedirs', 'makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (11656, 11680), False, 'from os import rename, makedirs\n'), ((12218, 12274), 'logging.info', 'logging.info', (['f"""Saving timing information to {out_path}"""'], {}), "(f'Saving timing information to {out_path}')\n", (12230, 12274), False, 'import logging, warnings, json\n'), ((1349, 1376), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (1374, 1376), False, 'import torch\n'), ((6872, 6908), 'os.path.join', 'join', (['self.exp_dir', '"""best-model.txt"""'], {}), "(self.exp_dir, 'best-model.txt')\n", (6876, 6908), False, 'from os.path import join, basename, isfile\n'), ((9436, 9464), 'os.path.join', 'join', (['args.exp_dir', '"""e*.pth"""'], {}), "(args.exp_dir, 'e*.pth')\n", (9440, 9464), False, 'from os.path import join, basename, isfile\n'), ((9676, 9727), 'os.path.join', 'join', (['self.exp_dir', "('e%03d.pth' % self.resume_epoch)"], {}), "(self.exp_dir, 'e%03d.pth' % self.resume_epoch)\n", (9680, 9727), False, 'from os.path import join, basename, isfile\n'), ((9745, 9814), 'logging.warning', 'logging.warning', (["('Resume training from the latest model ' + ckpt_path)"], {}), "('Resume training from the latest model ' + ckpt_path)\n", (9760, 9814), False, 'import logging, warnings, json\n'), ((10173, 10220), 'logging.info', 'logging.info', (["('Loading checkpoint ' + ckpt_path)"], {}), "('Loading checkpoint ' + ckpt_path)\n", (10185, 10220), False, 'import logging, warnings, json\n'), ((1704, 1729), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1727, 1729), False, 'import torch\n'), ((1769, 1783), 'torch.nn.DataParallel', 'DP', (['self.model'], {}), '(self.model)\n', (1771, 1783), True, 'from torch.nn import DataParallel as DP\n'), ((2832, 2902), 'linearlr.LinearLR', 'LinearLR', (['self.optim'], {'T': 'args.n_epoch', 'last_epoch': '(self.resume_epoch - 1)'}), '(self.optim, T=args.n_epoch, last_epoch=self.resume_epoch - 1)\n', (2840, 2902), False, 'from linearlr import LinearLR\n'), ((4967, 5028), 'linearlr.LinearLR', 'LinearLR', (['self.optim'], {'T': 'n_iter_total', 'last_epoch': 'last_counter'}), '(self.optim, T=n_iter_total, last_epoch=last_counter)\n', (4975, 5028), False, 'from linearlr import LinearLR\n'), ((9911, 9968), 'logging.info', 'logging.info', (["('Loading pretrained model ' + args.pretrain)"], {}), "('Loading pretrained model ' + args.pretrain)\n", (9923, 9968), False, 'import logging, warnings, json\n'), ((10338, 10389), 'os.path.join', 'join', (['self.exp_dir', "('e%03d.pth' % self.resume_epoch)"], {}), "(self.exp_dir, 'e%03d.pth' % self.resume_epoch)\n", (10342, 10389), False, 'from os.path import join, basename, isfile\n'), ((10407, 10475), 'logging.info', 'logging.info', (["('Loading checkpoint with specified epoch ' + ckpt_path)"], {}), "('Loading checkpoint with specified epoch ' + ckpt_path)\n", (10419, 10475), False, 'import logging, warnings, json\n'), ((3104, 3206), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'lr_schedulers.CosineAnnealingLR', (['self.optim'], {'T_max': 'args.n_epoch', 'last_epoch': '(self.resume_epoch - 1)'}), '(self.optim, T_max=args.n_epoch, last_epoch=\n self.resume_epoch - 1)\n', (3135, 3206), True, 'import torch.optim.lr_scheduler as lr_schedulers\n'), ((5204, 5297), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'lr_schedulers.CosineAnnealingLR', (['self.optim'], {'T_max': 'n_iter_total', 'last_epoch': 'last_counter'}), '(self.optim, T_max=n_iter_total, last_epoch=\n last_counter)\n', (5235, 5297), True, 'import torch.optim.lr_scheduler as lr_schedulers\n'), ((10647, 10698), 'os.path.join', 'join', (['self.exp_dir', "('e%03d.pth' % self.resume_epoch)"], {}), "(self.exp_dir, 'e%03d.pth' % self.resume_epoch)\n", (10651, 10698), False, 'from os.path import join, basename, isfile\n'), ((10720, 10806), 'logging.info', 'logging.info', (['f"""Loading the best checkpoint (score {score:.6g}) from {ckpt_path}"""'], {}), "(\n f'Loading the best checkpoint (score {score:.6g}) from {ckpt_path}')\n", (10732, 10806), False, 'import logging, warnings, json\n'), ((11143, 11194), 'os.path.join', 'join', (['self.exp_dir', "('e%03d.pth' % self.resume_epoch)"], {}), "(self.exp_dir, 'e%03d.pth' % self.resume_epoch)\n", (11147, 11194), False, 'from os.path import join, basename, isfile\n'), ((11216, 11274), 'logging.info', 'logging.info', (['f"""Loading the latest model from {ckpt_path}"""'], {}), "(f'Loading the latest model from {ckpt_path}')\n", (11228, 11274), False, 'import logging, warnings, json\n'), ((3392, 3528), 'torch.optim.lr_scheduler.StepLR', 'lr_schedulers.StepLR', (['self.optim'], {'step_size': 'args.lr_decay_step_size', 'gamma': 'args.lr_schedule_gamma', 'last_epoch': '(self.resume_epoch - 1)'}), '(self.optim, step_size=args.lr_decay_step_size, gamma=\n args.lr_schedule_gamma, last_epoch=self.resume_epoch - 1)\n', (3412, 3528), True, 'import torch.optim.lr_scheduler as lr_schedulers\n'), ((5483, 5610), 'torch.optim.lr_scheduler.StepLR', 'lr_schedulers.StepLR', (['self.optim'], {'step_size': 'args.lr_decay_step_size', 'gamma': 'args.lr_schedule_gamma', 'last_epoch': 'last_counter'}), '(self.optim, step_size=args.lr_decay_step_size, gamma=\n args.lr_schedule_gamma, last_epoch=last_counter)\n', (5503, 5610), True, 'import torch.optim.lr_scheduler as lr_schedulers\n'), ((10859, 10887), 'os.path.join', 'join', (['args.exp_dir', '"""e*.pth"""'], {}), "(args.exp_dir, 'e*.pth')\n", (10863, 10887), False, 'from os.path import join, basename, isfile\n'), ((9599, 9610), 'os.path.basename', 'basename', (['s'], {}), '(s)\n', (9607, 9610), False, 'from os.path import join, basename, isfile\n'), ((11062, 11073), 'os.path.basename', 'basename', (['s'], {}), '(s)\n', (11070, 11073), False, 'from os.path import join, basename, isfile\n')] |
# -*- coding: utf-8 -*-
"""
Trains a MNIST classifier.
"""
import numpy as np
import sys
import os
import pickle
import argparse
import math
import time
from bisect import bisect_left
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
from torch.autograd import Variable as V
import torchtext
from torchtext import data
from torchtext import datasets
import tqdm
np.random.seed(1)
parser = argparse.ArgumentParser(description='SST OE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=5, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=64, help='Batch size.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.01, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.5, help='Momentum.')
parser.add_argument('--test_bs', type=int, default=256)
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./snapshots', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='./snapshots', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
parser.add_argument('--mix', dest='mix', action='store_true', help='Mix outliers images with in-dist images.')
# Acceleration
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
args = parser.parse_args()
# ============================ SST ============================ #
# set up fields
TEXT_sst = data.Field(pad_first=True)
LABEL_sst = data.Field(sequential=False)
# make splits for data
train_sst, val_sst, test_sst = datasets.SST.splits(
TEXT_sst, LABEL_sst, fine_grained=False, train_subtrees=False,
filter_pred=lambda ex: ex.label != 'neutral')
# build vocab
TEXT_sst.build_vocab(train_sst, max_size=10000)
LABEL_sst.build_vocab(train_sst, max_size=10000)
print('vocab length (including special tokens):', len(TEXT_sst.vocab))
# create our own iterator, avoiding the calls to build_vocab in SST.iters
train_iter_sst, val_iter_sst, test_iter_sst = data.BucketIterator.splits(
(train_sst, val_sst, test_sst), batch_size=args.batch_size, repeat=False)
# ============================ SST ============================ #
# ============================ WikiText-2 ============================ #
# set up fields
TEXT_wtxt = data.Field(pad_first=True, lower=True)
# make splits for data
train_OE, val_OE, test_OE = datasets.WikiText2.splits(TEXT_wtxt)
# build vocab
TEXT_wtxt.build_vocab(train_sst.text, max_size=10000)
print('vocab length (including special tokens):', len(TEXT_wtxt.vocab))
# create our own iterator, avoiding the calls to build_vocab in SST.iters
train_iter_oe, val_iter_oe, test_iter_oe = data.BPTTIterator.splits(
(train_OE, val_OE, test_OE), batch_size=args.batch_size, bptt_len=15, repeat=False)
# ============================ WikiText-2 ============================ #
# ============================ WikiText-103 ============================ #
# set up fields
TEXT_wtxt = data.Field(pad_first=True, lower=True)
# make splits for data
train_OE, val_OE, test_OE = datasets.WikiText103.splits(TEXT_wtxt)
# build vocab
TEXT_wtxt.build_vocab(train_sst.text, max_size=10000)
print('vocab length (including special tokens):', len(TEXT_wtxt.vocab))
# create our own iterator, avoiding the calls to build_vocab in SST.iters
train_iter_oe, val_iter_oe, test_iter_oe = data.BPTTIterator.splits(
(train_OE, val_OE, test_OE), batch_size=args.batch_size, bptt_len=15, repeat=False)
# ============================ WikiText-103 ============================ #
exit() | [
"torchtext.datasets.SST.splits",
"numpy.random.seed",
"argparse.ArgumentParser",
"torchtext.datasets.WikiText2.splits",
"torchtext.datasets.WikiText103.splits",
"torchtext.data.BPTTIterator.splits",
"torchtext.data.BucketIterator.splits",
"torchtext.data.Field"
] | [((495, 512), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (509, 512), True, 'import numpy as np\n'), ((523, 629), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SST OE"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='SST OE', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (546, 629), False, 'import argparse\n'), ((1749, 1775), 'torchtext.data.Field', 'data.Field', ([], {'pad_first': '(True)'}), '(pad_first=True)\n', (1759, 1775), False, 'from torchtext import data\n'), ((1788, 1816), 'torchtext.data.Field', 'data.Field', ([], {'sequential': '(False)'}), '(sequential=False)\n', (1798, 1816), False, 'from torchtext import data\n'), ((1872, 2005), 'torchtext.datasets.SST.splits', 'datasets.SST.splits', (['TEXT_sst', 'LABEL_sst'], {'fine_grained': '(False)', 'train_subtrees': '(False)', 'filter_pred': "(lambda ex: ex.label != 'neutral')"}), "(TEXT_sst, LABEL_sst, fine_grained=False, train_subtrees\n =False, filter_pred=lambda ex: ex.label != 'neutral')\n", (1891, 2005), False, 'from torchtext import datasets\n'), ((2314, 2419), 'torchtext.data.BucketIterator.splits', 'data.BucketIterator.splits', (['(train_sst, val_sst, test_sst)'], {'batch_size': 'args.batch_size', 'repeat': '(False)'}), '((train_sst, val_sst, test_sst), batch_size=args.\n batch_size, repeat=False)\n', (2340, 2419), False, 'from torchtext import data\n'), ((2589, 2627), 'torchtext.data.Field', 'data.Field', ([], {'pad_first': '(True)', 'lower': '(True)'}), '(pad_first=True, lower=True)\n', (2599, 2627), False, 'from torchtext import data\n'), ((2680, 2716), 'torchtext.datasets.WikiText2.splits', 'datasets.WikiText2.splits', (['TEXT_wtxt'], {}), '(TEXT_wtxt)\n', (2705, 2716), False, 'from torchtext import datasets\n'), ((2976, 3089), 'torchtext.data.BPTTIterator.splits', 'data.BPTTIterator.splits', (['(train_OE, val_OE, test_OE)'], {'batch_size': 'args.batch_size', 'bptt_len': '(15)', 'repeat': '(False)'}), '((train_OE, val_OE, test_OE), batch_size=args.\n batch_size, bptt_len=15, repeat=False)\n', (3000, 3089), False, 'from torchtext import data\n'), ((3269, 3307), 'torchtext.data.Field', 'data.Field', ([], {'pad_first': '(True)', 'lower': '(True)'}), '(pad_first=True, lower=True)\n', (3279, 3307), False, 'from torchtext import data\n'), ((3360, 3398), 'torchtext.datasets.WikiText103.splits', 'datasets.WikiText103.splits', (['TEXT_wtxt'], {}), '(TEXT_wtxt)\n', (3387, 3398), False, 'from torchtext import datasets\n'), ((3658, 3771), 'torchtext.data.BPTTIterator.splits', 'data.BPTTIterator.splits', (['(train_OE, val_OE, test_OE)'], {'batch_size': 'args.batch_size', 'bptt_len': '(15)', 'repeat': '(False)'}), '((train_OE, val_OE, test_OE), batch_size=args.\n batch_size, bptt_len=15, repeat=False)\n', (3682, 3771), False, 'from torchtext import data\n')] |
import numpy as np
import pytest
from gym_gridverse.agent import Agent
from gym_gridverse.envs.reset_functions import empty
from gym_gridverse.geometry import Orientation, Position, Shape
from gym_gridverse.grid import Grid
from gym_gridverse.grid_object import (
Color,
Door,
Exit,
Floor,
Key,
NoneGridObject,
Wall,
)
from gym_gridverse.representations.representation import (
default_convert,
default_representation_space,
no_overlap_convert,
no_overlap_representation_space,
)
@pytest.fixture
def default_representation_fixture():
"""Creates an observation representation of 3 by 3"""
height, width = 3, 3
max_obj_type = Key.type_index
max_obj_state = Door.num_states()
max_color_value = Color.BLUE.value
return height, width, max_obj_type, max_obj_state, max_color_value
def test_default_representation_space(default_representation_fixture):
(
height,
width,
max_obj_type,
max_obj_state,
max_color_value,
) = default_representation_fixture
expected_item_space = [max_obj_type, max_obj_state, max_color_value]
expected_grid_space = np.array([[expected_item_space] * width] * height)
space = default_representation_space(
max_obj_type, max_obj_state, max_color_value, width, height
)
np.testing.assert_array_equal(
space['grid'].upper_bound, expected_grid_space
)
np.testing.assert_array_equal(
space['grid'].lower_bound, np.zeros((height, width, 3), dtype=int)
)
np.testing.assert_equal(
space['agent_id_grid'].upper_bound, np.ones((height, width), dtype=int)
)
np.testing.assert_equal(
space['agent_id_grid'].lower_bound, np.zeros((height, width), dtype=int)
)
np.testing.assert_array_equal(
space['item'].upper_bound, expected_item_space
)
np.testing.assert_array_equal(space['item'].lower_bound, [0, 0, 0])
np.testing.assert_array_equal(space['agent'].upper_bound, np.ones(6))
np.testing.assert_array_equal(
space['agent'].lower_bound, np.array([-1.0, -1.0, 0.0, 0.0, 0.0, 0.0])
)
def test_default_representation_convert(default_representation_fixture):
height, width, _, _, _ = default_representation_fixture
agent = Agent(Position(0, 2), Orientation.N, Key(Color.RED))
grid = Grid.from_shape((height, width))
grid[1, 1] = Door(Door.Status.CLOSED, Color.BLUE)
floor_index = Floor.type_index
# y, x, one-hot encoding with North (== 0)
expected_agent_representation = [
-1.0,
(4 - width + 1) / (width - 1),
1.0,
0.0,
0.0,
0.0,
]
expected_item_representation = [
agent.obj.type_index,
agent.obj.state_index,
agent.obj.color.value,
]
expected_grid_representation = np.array(
[
[
[floor_index, 0, 0],
[floor_index, 0, 0],
[
floor_index,
0,
0,
],
],
[
[floor_index, 0, 0],
[
Door.type_index,
Door.Status.CLOSED.value,
Color.BLUE.value,
],
[floor_index, 0, 0],
],
[
[floor_index, 0, 0],
[floor_index, 0, 0],
[floor_index, 0, 0],
],
]
)
expected_agent_id_grid = np.zeros((height, width), dtype=int)
expected_agent_id_grid[0, 2] = 1
rep = default_convert(grid, agent)
np.testing.assert_array_equal(rep['grid'], expected_grid_representation)
np.testing.assert_array_equal(rep['agent_id_grid'], expected_agent_id_grid)
np.testing.assert_array_equal(rep['agent'], expected_agent_representation)
np.testing.assert_array_equal(rep['item'], expected_item_representation)
@pytest.fixture
def no_overlap_fixture():
"""Creates a state representation of 4 by 5"""
height, width = 4, 5
# hard coded from above
max_object_type = Exit.type_index
max_object_status = 0
max_color_value = Color.GREEN.value
return height, width, max_object_type, max_object_status, max_color_value
def test_no_overlap_space(no_overlap_fixture):
(
height,
width,
max_object_type,
max_object_status,
max_color_value,
) = no_overlap_fixture
max_channel_values = [
max_object_type,
max_object_type + max_object_status + 1,
max_object_type + max_object_status + max_color_value + 2,
]
expected_grid_object_space = np.array(
[[max_channel_values] * width] * height
)
space = no_overlap_representation_space(
max_object_type, max_object_status, max_color_value, width, height
)
np.testing.assert_array_equal(
space['grid'].upper_bound, expected_grid_object_space
)
np.testing.assert_array_equal(
space['grid'].lower_bound, np.zeros((height, width, 3), dtype=int)
)
np.testing.assert_equal(
space['agent_id_grid'].upper_bound, np.ones((height, width), dtype=int)
)
np.testing.assert_equal(
space['agent_id_grid'].lower_bound, np.zeros((height, width), dtype=int)
)
np.testing.assert_array_equal(space['item'].upper_bound, max_channel_values)
np.testing.assert_array_equal(space['item'].lower_bound, [0, 0, 0])
np.testing.assert_equal(space['agent'].upper_bound, np.ones(6))
np.testing.assert_equal(
space['agent'].lower_bound, np.array([-1.0, -1.0, 0.0, 0.0, 0.0, 0.0])
)
def test_no_overlap_convert(no_overlap_fixture):
height, width, max_object_type, max_object_status, _ = no_overlap_fixture
first_item_status = max_object_type + 1
first_item_color = max_object_type + max_object_status + 2
state = empty(Shape(height, width), random_agent=True)
expected_agent_state = np.array(
[
NoneGridObject.type_index,
first_item_status,
first_item_color,
]
)
expected_grid_state = np.array(
[
[
[
Floor.type_index,
first_item_status,
first_item_color,
]
]
* width
]
* height
)
# we expect walls to be around
expected_grid_state[0, :] = [
Wall.type_index,
first_item_status,
first_item_color,
]
expected_grid_state[height - 1, :] = [
Wall.type_index,
first_item_status,
first_item_color,
]
expected_grid_state[:, 0] = [
Wall.type_index,
first_item_status,
first_item_color,
]
expected_grid_state[:, width - 1] = [
Wall.type_index,
first_item_status,
first_item_color,
]
# we expect exit to be in corner
expected_grid_state[height - 2, width - 2, :] = [
Exit.type_index,
first_item_status,
first_item_color,
]
representation = no_overlap_convert(
state.grid, state.agent, max_object_type, max_object_status
)
expected_agent_id_grid = np.zeros((height, width), dtype=int)
expected_agent_id_grid[state.agent.position.y, state.agent.position.x] = 1
np.testing.assert_array_equal(representation['grid'], expected_grid_state)
np.testing.assert_array_equal(
representation['agent_id_grid'], expected_agent_id_grid
)
np.testing.assert_array_equal(representation['item'], expected_agent_state)
expected_agent_representation = np.zeros(6)
expected_agent_representation[0] = (
2 * state.agent.position.y - state.grid.shape.height + 1
) / (state.grid.shape.height - 1)
expected_agent_representation[1] = (
2 * state.agent.position.x - state.grid.shape.width + 1
) / (state.grid.shape.width - 1)
expected_agent_representation[2 + state.agent.orientation.value] = 1
np.testing.assert_equal(
representation['agent'], expected_agent_representation
)
| [
"gym_gridverse.geometry.Position",
"gym_gridverse.representations.representation.default_convert",
"gym_gridverse.geometry.Shape",
"numpy.testing.assert_array_equal",
"gym_gridverse.grid.Grid.from_shape",
"gym_gridverse.grid_object.Door",
"gym_gridverse.representations.representation.default_representat... | [((719, 736), 'gym_gridverse.grid_object.Door.num_states', 'Door.num_states', ([], {}), '()\n', (734, 736), False, 'from gym_gridverse.grid_object import Color, Door, Exit, Floor, Key, NoneGridObject, Wall\n'), ((1167, 1217), 'numpy.array', 'np.array', (['([[expected_item_space] * width] * height)'], {}), '([[expected_item_space] * width] * height)\n', (1175, 1217), True, 'import numpy as np\n'), ((1231, 1324), 'gym_gridverse.representations.representation.default_representation_space', 'default_representation_space', (['max_obj_type', 'max_obj_state', 'max_color_value', 'width', 'height'], {}), '(max_obj_type, max_obj_state, max_color_value,\n width, height)\n', (1259, 1324), False, 'from gym_gridverse.representations.representation import default_convert, default_representation_space, no_overlap_convert, no_overlap_representation_space\n'), ((1340, 1417), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["space['grid'].upper_bound", 'expected_grid_space'], {}), "(space['grid'].upper_bound, expected_grid_space)\n", (1369, 1417), True, 'import numpy as np\n'), ((1783, 1860), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["space['item'].upper_bound", 'expected_item_space'], {}), "(space['item'].upper_bound, expected_item_space)\n", (1812, 1860), True, 'import numpy as np\n'), ((1879, 1946), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["space['item'].lower_bound", '[0, 0, 0]'], {}), "(space['item'].lower_bound, [0, 0, 0])\n", (1908, 1946), True, 'import numpy as np\n'), ((2353, 2385), 'gym_gridverse.grid.Grid.from_shape', 'Grid.from_shape', (['(height, width)'], {}), '((height, width))\n', (2368, 2385), False, 'from gym_gridverse.grid import Grid\n'), ((2403, 2439), 'gym_gridverse.grid_object.Door', 'Door', (['Door.Status.CLOSED', 'Color.BLUE'], {}), '(Door.Status.CLOSED, Color.BLUE)\n', (2407, 2439), False, 'from gym_gridverse.grid_object import Color, Door, Exit, Floor, Key, NoneGridObject, Wall\n'), ((2844, 3104), 'numpy.array', 'np.array', (['[[[floor_index, 0, 0], [floor_index, 0, 0], [floor_index, 0, 0]], [[\n floor_index, 0, 0], [Door.type_index, Door.Status.CLOSED.value, Color.\n BLUE.value], [floor_index, 0, 0]], [[floor_index, 0, 0], [floor_index, \n 0, 0], [floor_index, 0, 0]]]'], {}), '([[[floor_index, 0, 0], [floor_index, 0, 0], [floor_index, 0, 0]],\n [[floor_index, 0, 0], [Door.type_index, Door.Status.CLOSED.value, Color\n .BLUE.value], [floor_index, 0, 0]], [[floor_index, 0, 0], [floor_index,\n 0, 0], [floor_index, 0, 0]]])\n', (2852, 3104), True, 'import numpy as np\n'), ((3529, 3565), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (3537, 3565), True, 'import numpy as np\n'), ((3614, 3642), 'gym_gridverse.representations.representation.default_convert', 'default_convert', (['grid', 'agent'], {}), '(grid, agent)\n', (3629, 3642), False, 'from gym_gridverse.representations.representation import default_convert, default_representation_space, no_overlap_convert, no_overlap_representation_space\n'), ((3648, 3720), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["rep['grid']", 'expected_grid_representation'], {}), "(rep['grid'], expected_grid_representation)\n", (3677, 3720), True, 'import numpy as np\n'), ((3725, 3800), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["rep['agent_id_grid']", 'expected_agent_id_grid'], {}), "(rep['agent_id_grid'], expected_agent_id_grid)\n", (3754, 3800), True, 'import numpy as np\n'), ((3805, 3879), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["rep['agent']", 'expected_agent_representation'], {}), "(rep['agent'], expected_agent_representation)\n", (3834, 3879), True, 'import numpy as np\n'), ((3884, 3956), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["rep['item']", 'expected_item_representation'], {}), "(rep['item'], expected_item_representation)\n", (3913, 3956), True, 'import numpy as np\n'), ((4688, 4737), 'numpy.array', 'np.array', (['([[max_channel_values] * width] * height)'], {}), '([[max_channel_values] * width] * height)\n', (4696, 4737), True, 'import numpy as np\n'), ((4765, 4868), 'gym_gridverse.representations.representation.no_overlap_representation_space', 'no_overlap_representation_space', (['max_object_type', 'max_object_status', 'max_color_value', 'width', 'height'], {}), '(max_object_type, max_object_status,\n max_color_value, width, height)\n', (4796, 4868), False, 'from gym_gridverse.representations.representation import default_convert, default_representation_space, no_overlap_convert, no_overlap_representation_space\n'), ((4884, 4972), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["space['grid'].upper_bound", 'expected_grid_object_space'], {}), "(space['grid'].upper_bound,\n expected_grid_object_space)\n", (4913, 4972), True, 'import numpy as np\n'), ((5334, 5410), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["space['item'].upper_bound", 'max_channel_values'], {}), "(space['item'].upper_bound, max_channel_values)\n", (5363, 5410), True, 'import numpy as np\n'), ((5415, 5482), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["space['item'].lower_bound", '[0, 0, 0]'], {}), "(space['item'].lower_bound, [0, 0, 0])\n", (5444, 5482), True, 'import numpy as np\n'), ((5990, 6064), 'numpy.array', 'np.array', (['[NoneGridObject.type_index, first_item_status, first_item_color]'], {}), '([NoneGridObject.type_index, first_item_status, first_item_color])\n', (5998, 6064), True, 'import numpy as np\n'), ((6153, 6244), 'numpy.array', 'np.array', (['([[[Floor.type_index, first_item_status, first_item_color]] * width] * height)'], {}), '([[[Floor.type_index, first_item_status, first_item_color]] * width\n ] * height)\n', (6161, 6244), True, 'import numpy as np\n'), ((7128, 7207), 'gym_gridverse.representations.representation.no_overlap_convert', 'no_overlap_convert', (['state.grid', 'state.agent', 'max_object_type', 'max_object_status'], {}), '(state.grid, state.agent, max_object_type, max_object_status)\n', (7146, 7207), False, 'from gym_gridverse.representations.representation import default_convert, default_representation_space, no_overlap_convert, no_overlap_representation_space\n'), ((7252, 7288), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (7260, 7288), True, 'import numpy as np\n'), ((7373, 7447), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["representation['grid']", 'expected_grid_state'], {}), "(representation['grid'], expected_grid_state)\n", (7402, 7447), True, 'import numpy as np\n'), ((7452, 7542), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["representation['agent_id_grid']", 'expected_agent_id_grid'], {}), "(representation['agent_id_grid'],\n expected_agent_id_grid)\n", (7481, 7542), True, 'import numpy as np\n'), ((7557, 7632), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["representation['item']", 'expected_agent_state'], {}), "(representation['item'], expected_agent_state)\n", (7586, 7632), True, 'import numpy as np\n'), ((7670, 7681), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (7678, 7681), True, 'import numpy as np\n'), ((8045, 8124), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["representation['agent']", 'expected_agent_representation'], {}), "(representation['agent'], expected_agent_representation)\n", (8068, 8124), True, 'import numpy as np\n'), ((1502, 1541), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'int'}), '((height, width, 3), dtype=int)\n', (1510, 1541), True, 'import numpy as np\n'), ((1621, 1656), 'numpy.ones', 'np.ones', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (1628, 1656), True, 'import numpy as np\n'), ((1736, 1772), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (1744, 1772), True, 'import numpy as np\n'), ((2009, 2019), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (2016, 2019), True, 'import numpy as np\n'), ((2092, 2134), 'numpy.array', 'np.array', (['[-1.0, -1.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([-1.0, -1.0, 0.0, 0.0, 0.0, 0.0])\n', (2100, 2134), True, 'import numpy as np\n'), ((2295, 2309), 'gym_gridverse.geometry.Position', 'Position', (['(0)', '(2)'], {}), '(0, 2)\n', (2303, 2309), False, 'from gym_gridverse.geometry import Orientation, Position, Shape\n'), ((2326, 2340), 'gym_gridverse.grid_object.Key', 'Key', (['Color.RED'], {}), '(Color.RED)\n', (2329, 2340), False, 'from gym_gridverse.grid_object import Color, Door, Exit, Floor, Key, NoneGridObject, Wall\n'), ((5053, 5092), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'int'}), '((height, width, 3), dtype=int)\n', (5061, 5092), True, 'import numpy as np\n'), ((5172, 5207), 'numpy.ones', 'np.ones', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (5179, 5207), True, 'import numpy as np\n'), ((5287, 5323), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (5295, 5323), True, 'import numpy as np\n'), ((5540, 5550), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (5547, 5550), True, 'import numpy as np\n'), ((5617, 5659), 'numpy.array', 'np.array', (['[-1.0, -1.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([-1.0, -1.0, 0.0, 0.0, 0.0, 0.0])\n', (5625, 5659), True, 'import numpy as np\n'), ((5921, 5941), 'gym_gridverse.geometry.Shape', 'Shape', (['height', 'width'], {}), '(height, width)\n', (5926, 5941), False, 'from gym_gridverse.geometry import Orientation, Position, Shape\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from collections import OrderedDict
class Vocab(object):
def __init__(self, vocab_path=None):
# word
self.idx2word = OrderedDict()
self.word2idx = OrderedDict()
self.word_cnt = OrderedDict()
# char
self.idx2char = OrderedDict()
self.char2idx = OrderedDict()
self.char_cnt = OrderedDict()
self.label2idx = None # assign value via dataloader
self.idx2label = None #
# embedding
self.word_embed_dim = None
self.word_embeddings = None
self.char_embed_dim = None
self.char_embeddings = None
# init tokens
self.pad_token = '<pad>'
self.unk_token = '<unk>'
self.init_tokens = [self.pad_token, self.unk_token]
for token in self.init_tokens:
self.add_word(token)
self.add_char(token)
if vocab_path is not None:
self.load_file(vocab_path)
def load_file(self, fname):
with open(fname, 'r') as rf:
for line in rf:
token = line.strip()
if len(token) == 0:
continue
self.add_word(token)
[self.add_char(ch) for ch in token]
def word_size(self):
return len(self.idx2word)
def char_size(self):
return len(self.idx2char)
def get_token_idx(self, token2idx, token):
token = token.lower()
return token2idx[token] \
if token in token2idx \
else token2idx[self.unk_token]
def get_word_idx(self, token):
return self.get_token_idx(self.word2idx, token)
def get_char_idx(self, token):
return self.get_token_idx(self.char2idx, token)
def add_token(self, token2idx, idx2token, token_cnt, token, cnt=1):
token = token.lower()
if token in token2idx:
idx = token2idx[token]
else:
idx = len(token2idx)
token2idx[token] = idx
idx2token[idx] = token
if cnt > 0:
if token in token_cnt:
token_cnt[token] += 1
else:
token_cnt[token] = cnt
return idx
def add_word(self, token, cnt=1):
self.add_token(self.word2idx, self.idx2word, self.word_cnt, token, cnt)
def add_char(self, token, cnt=1):
self.add_token(self.char2idx, self.idx2char, self.char_cnt, token, cnt)
def filter_word_by_cnt(self, min_cnt):
filtered_tokens = [token for token in self.word2idx if self.word_cnt[token] > min_cnt]
self.word2idx = OrderedDict()
self.idx2word = OrderedDict()
self.char2idx = OrderedDict()
self.idx2char = OrderedDict()
for token in self.init_tokens:
self.add_word(token, cnt=0)
self.add_char(token, cnt=0)
for token in filtered_tokens:
self.add_word(token, cnt=0)
[self.add_char(ch, cnt=0) for ch in token]
def load_pretrained_word_embeddings(self, embedding_path, kernel='kv'):
trained_embeddings = OrderedDict()
if kernel == 'gensim':
from gensim.models.word2vec import Word2Vec
w2v_model = Word2Vec.load(embedding_path)
word_dict = w2v_model.wv.vocab
for token in word_dict:
if token not in self.word2idx:
continue
trained_embeddings[token] = w2v_model[token].tolist()
if self.word_embed_dim is None:
self.word_embed_dim = len(list(trained_embeddings[token]))
elif kernel == 'kv':
import pickle
with open(embedding_path, 'rb') as fin:
word_dict = pickle.load(fin)
for token in word_dict:
if token not in self.word2idx:
continue
trained_embeddings[token] = word_dict[token]
if self.word_embed_dim is None:
self.word_embed_dim = len(list(trained_embeddings[token]))
else:
raise NotImplementedError("Not support embedding kernel {}.".format(kernel))
filtered_tokens = trained_embeddings.keys()
self.word2idx = OrderedDict()
self.id2token = OrderedDict()
for token in self.init_tokens:
self.add_word(token, cnt=0)
for token in filtered_tokens:
self.add_word(token, cnt=0)
# load embeddings
self.word_embeddings = np.random.rand(self.word_size(), self.word_embed_dim)
for token in self.word2idx.keys():
if token in trained_embeddings:
self.word_embeddings[self.get_word_idx(token)] = trained_embeddings[token]
def randomly_word_embeddings(self, embed_dim):
self.word_embed_dim = embed_dim
word_size= self.word_size()
self.word_embeddings = np.random.rand(word_size, embed_dim)
for token in self.init_tokens:
self.word_embeddings[self.get_word_idx(token)] = np.zeros([embed_dim])
def randomly_char_embeddings(self, embed_dim):
self.char_embed_dim = embed_dim
char_size = self.char_size()
self.char_embeddings = np.random.rand(char_size, embed_dim)
for token in self.init_tokens:
self.char_embeddings[self.get_char_idx(token)] = np.zeros([embed_dim])
def get_word_vector(self, tokens):
vec = [self.get_word_idx(tok) for tok in tokens]
return vec
def get_char_vector(self, tokens):
vec = []
for token in tokens:
char_vec = []
for ch in token:
char_vec.append(self.get_char_idx(ch))
vec.append(char_vec)
return vec
| [
"numpy.random.rand",
"gensim.models.word2vec.Word2Vec.load",
"numpy.zeros",
"pickle.load",
"collections.OrderedDict"
] | [((182, 195), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (193, 195), False, 'from collections import OrderedDict\n'), ((220, 233), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (231, 233), False, 'from collections import OrderedDict\n'), ((258, 271), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (269, 271), False, 'from collections import OrderedDict\n'), ((312, 325), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (323, 325), False, 'from collections import OrderedDict\n'), ((350, 363), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (361, 363), False, 'from collections import OrderedDict\n'), ((388, 401), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (399, 401), False, 'from collections import OrderedDict\n'), ((2668, 2681), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2679, 2681), False, 'from collections import OrderedDict\n'), ((2706, 2719), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2717, 2719), False, 'from collections import OrderedDict\n'), ((2744, 2757), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2755, 2757), False, 'from collections import OrderedDict\n'), ((2782, 2795), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2793, 2795), False, 'from collections import OrderedDict\n'), ((3156, 3169), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3167, 3169), False, 'from collections import OrderedDict\n'), ((4322, 4335), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4333, 4335), False, 'from collections import OrderedDict\n'), ((4360, 4373), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4371, 4373), False, 'from collections import OrderedDict\n'), ((4980, 5016), 'numpy.random.rand', 'np.random.rand', (['word_size', 'embed_dim'], {}), '(word_size, embed_dim)\n', (4994, 5016), True, 'import numpy as np\n'), ((5299, 5335), 'numpy.random.rand', 'np.random.rand', (['char_size', 'embed_dim'], {}), '(char_size, embed_dim)\n', (5313, 5335), True, 'import numpy as np\n'), ((3282, 3311), 'gensim.models.word2vec.Word2Vec.load', 'Word2Vec.load', (['embedding_path'], {}), '(embedding_path)\n', (3295, 3311), False, 'from gensim.models.word2vec import Word2Vec\n'), ((5117, 5138), 'numpy.zeros', 'np.zeros', (['[embed_dim]'], {}), '([embed_dim])\n', (5125, 5138), True, 'import numpy as np\n'), ((5436, 5457), 'numpy.zeros', 'np.zeros', (['[embed_dim]'], {}), '([embed_dim])\n', (5444, 5457), True, 'import numpy as np\n'), ((3800, 3816), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (3811, 3816), False, 'import pickle\n')] |
#!/usr/local/sci/python
#************************************************************************
#
# Plot figures and output numbers for Carbon Monoxide (CMO) section.
# For BAMS SotC 2016
#
#************************************************************************
# SVN Info
# $Rev:: 28 $: Revision of last commit
# $Author:: rdunn $: Author of last commit
# $Date:: 2020-04-09 11:37:08 +0100 (Thu, 09 Apr #$: Date of last commit
#************************************************************************
# START
#************************************************************************
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import iris
import utils # RJHD utilities
import settings
data_loc = "{}/{}/data/CMO/".format(settings.ROOTLOC, settings.YEAR)
reanalysis_loc = "{}/{}/data/RNL/".format(settings.ROOTLOC, settings.YEAR)
image_loc = "{}/{}/images/".format(settings.ROOTLOC, settings.YEAR)
LW = 3
#************************************************************************
def read_ts(filename):
'''
Read the timeseries and return a Timeseries object
:param str filename: file to read
:returns: Timeseries object
'''
indata = np.genfromtxt(filename, dtype=(float), skip_header=1)
year = indata[:, 0]
month = indata[:, ]
times = year + (month-1)/12.
return utils.Timeseries("CO burden", times, indata[:, 2]) # read_ts
#************************************************************************
# Timeseries
print("missing linear trend")
# cmo_monthly = read_ts(data_loc + "co_global_tg_mm_cams.txt")
# minor_tick_interval = 1
# minorLocator = MultipleLocator(minor_tick_interval)
# fig = plt.figure(figsize=(8, 6.5))
# ax = plt.axes([0.13, 0.07, 0.75, 0.86])
# plt.plot(cmo_monthly.times, cmo_monthly.data, 'k', ls='-', lw=LW)
# ax.set_xlim([None, int(settings.YEAR)+2])
# ax.xaxis.set_minor_locator(minorLocator)
# utils.thicken_panel_border(ax)
# for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(settings.FONTSIZE)
# for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(settings.FONTSIZE)
# fig.text(0.03, 0.5, "Tg", va='center', rotation='vertical', fontsize=settings.FONTSIZE)
# plt.savefig(image_loc + "CMO_ts{}".format(settings.OUTFMT))
# plt.close()
#************************************************************************
# Global Map
seasonal_list = []
cube_list = iris.load(data_loc + "TCCO_ANO_YEAR_JAS_mean_{}.nc".format(settings.YEAR))
names = np.array([cube.var_name for cube in cube_list])
selected_cube, = np.where(names == "tcco_ano")
cube = cube_list[selected_cube[0]]
cube.coord('latitude').guess_bounds()
cube.coord('longitude').guess_bounds()
bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
utils.plot_smooth_map_iris(image_loc + "CMO_{}_anoms".format(settings.YEAR), cube, settings.COLOURMAP_DICT["composition"], bounds, "Anomalies from 2003-{} (%)".format(settings.YEAR[2:]))
utils.plot_smooth_map_iris(image_loc + "p2.1_CMO_{}_anoms".format(settings.YEAR), cube, settings.COLOURMAP_DICT["composition"], bounds, "Anomalies from 2003-{} (%)".format(settings.YEAR[2:]), figtext="(ab) Carbon Monoxide")
# # Global Map - Jan-Jun
# selected_cube, = np.where(names == "tcco_ano_jan_jun")
# cube = cube_list[selected_cube[0]]
# cube.coord('latitude').guess_bounds()
# cube.coord('longitude').guess_bounds()
# seasonal_list += [cube]
# bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
# utils.plot_smooth_map_iris(image_loc + "CMO_{}_Jan-Jun_anoms".format(settings.YEAR), cube, settings.COLOURMAP_DICT["composition"], bounds, "Anomalies from 2003-{} (%)".format(settings.YEAR[2:]), title="January - June {}".format(settings.YEAR))
# Global Map - Jul-Dec
selected_cube, = np.where(names == "tcco_ano_jul_sep")
cube = cube_list[selected_cube[0]]
cube.coord('latitude').guess_bounds()
cube.coord('longitude').guess_bounds()
seasonal_list += [cube]
bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
utils.plot_smooth_map_iris(image_loc + "CMO_{}_Jul_Sep_anoms".format(settings.YEAR), cube, settings.COLOURMAP_DICT["composition"], bounds, "Anomalies (%)".format(settings.YEAR[2:]), title="July - Sept {}".format(settings.YEAR))
#utils.plot_smooth_map_iris_multipanel(image_loc + "CMO_{}_season_anoms".format(settings.YEAR), seasonal_list, settings.COLOURMAP_DICT["composition"], bounds, "Anomalies from 2003-{} (%)".format(settings.YEAR[2:]), shape=(2,1), title=["January - June {}".format(settings.YEAR), "July - December {}".format(settings.YEAR)], figtext=["(a)","(b)"])
#************************************************************************
# Trend Map
# cube_list = iris.load(data_loc + "SOTC_2015_CO_Trends_Map_Data.nc", "relative total column carbon monoxide linear trend 2003 2015 ") # final space in name necessary
# cube = cube_list[0]
# cube.coord('latitude').guess_bounds()
# cube.coord('longitude').guess_bounds()
# bounds = [-100, -3, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 100]
# print("add zero line")
# utils.plot_smooth_map_iris(image_loc + "CMO_{}_trend".format(settings.YEAR), cube, settings.COLOURMAP_DICT["composition"], bounds, "Trend over 2003-15 (% yr"+r'$^{-1}$'+")")
#************************************************************************
# END
#************************************************************************
| [
"numpy.where",
"numpy.array",
"numpy.genfromtxt",
"utils.Timeseries"
] | [((2747, 2794), 'numpy.array', 'np.array', (['[cube.var_name for cube in cube_list]'], {}), '([cube.var_name for cube in cube_list])\n', (2755, 2794), True, 'import numpy as np\n'), ((2813, 2842), 'numpy.where', 'np.where', (["(names == 'tcco_ano')"], {}), "(names == 'tcco_ano')\n", (2821, 2842), True, 'import numpy as np\n'), ((4007, 4044), 'numpy.where', 'np.where', (["(names == 'tcco_ano_jul_sep')"], {}), "(names == 'tcco_ano_jul_sep')\n", (4015, 4044), True, 'import numpy as np\n'), ((1450, 1501), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'dtype': 'float', 'skip_header': '(1)'}), '(filename, dtype=float, skip_header=1)\n', (1463, 1501), True, 'import numpy as np\n'), ((1599, 1649), 'utils.Timeseries', 'utils.Timeseries', (['"""CO burden"""', 'times', 'indata[:, 2]'], {}), "('CO burden', times, indata[:, 2])\n", (1615, 1649), False, 'import utils\n')] |
import numpy
import trivial
import logging
import itertools
class FourierBasis(trivial.TrivialBasis):
"""Fourier Basis linear function approximation. Requires the ranges for each dimension, and is thus able to
use only sine or cosine (and uses cosine). So, this has half the coefficients that a full Fourier approximation
would use.
From the paper:
<NAME>, <NAME> and <NAME>.
Value Function Approximation in Reinforcement Learning using the Fourier Basis.
In Proceedings of the Twenty-Fifth Conference on Artificial Intelligence, pages 380-385, August 2011.
"""
def __init__(self, nvars, ranges, order=3):
log = logging.getLogger('pyrl.representation.fourier')
nterms = pow(order + 1, nvars)
self.numTerms = nterms
self.order = order
self.ranges = numpy.array(ranges)
iter = itertools.product(range(order + 1), repeat=nvars)
self.multipliers = numpy.array([list(map(int, x)) for x in iter])
log.debug("Numterms: %d Order: %d \n Ranges: %s", self.numTerms, self.order, self.ranges)
def computeFeatures(self, features):
log = logging.getLogger('pyrl.representation.fourier.computeFeatures')
if len(features) == 0:
return numpy.ones((1,))
basisFeatures = numpy.array([self.scale(features[i], i) for i in range(len(features))])
log.info("Features: %s", features)
log.info("Basis Features: %s", basisFeatures)
# return_val = numpy.cos(numpy.pi * numpy.dot(self.multipliers, basisFeatures))
return numpy.cos(numpy.pi * numpy.dot(self.multipliers, basisFeatures)) # return_val
| [
"numpy.dot",
"numpy.array",
"numpy.ones",
"logging.getLogger"
] | [((660, 708), 'logging.getLogger', 'logging.getLogger', (['"""pyrl.representation.fourier"""'], {}), "('pyrl.representation.fourier')\n", (677, 708), False, 'import logging\n'), ((828, 847), 'numpy.array', 'numpy.array', (['ranges'], {}), '(ranges)\n', (839, 847), False, 'import numpy\n'), ((1143, 1207), 'logging.getLogger', 'logging.getLogger', (['"""pyrl.representation.fourier.computeFeatures"""'], {}), "('pyrl.representation.fourier.computeFeatures')\n", (1160, 1207), False, 'import logging\n'), ((1258, 1274), 'numpy.ones', 'numpy.ones', (['(1,)'], {}), '((1,))\n', (1268, 1274), False, 'import numpy\n'), ((1592, 1634), 'numpy.dot', 'numpy.dot', (['self.multipliers', 'basisFeatures'], {}), '(self.multipliers, basisFeatures)\n', (1601, 1634), False, 'import numpy\n')] |
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
##ensembling method of boosted classification tree
def btc(T, X, y, D):
n = X.shape[0]
f = []
w = [1/n]*n
e = []
alpha = []
for t in range(T):
dtc = DecisionTreeClassifier(max_depth=D)
f.append(dtc.fit(X, y, sample_weight=w))
out =[]
#for i in range(n):
# out.append([1.0 if f[t].predict(X[i,].reshape(1, -1), check_input=True) != y[i] else 0.0][0])
out = [f[t].predict(X, check_input=True) != y]
e.append(np.dot(np.array(out),np.array(w))/np.sum(w))
alpha.append((math.log((1-e[t])/e[t]))/2)
for i in range(n):
w[i] = w[i]*math.exp(alpha[t]*[1.0 if f[t].predict(X[i,].reshape(1, -1),
check_input=True) != y[i] else 0.0][0])
def f_ens(x):
return np.sign(sum([alpha[t]*f[t].predict(x, check_input=True) for t in range(T)]))
return f_ens
##testing the tree function defined above
X = pd.read_csv("mushrooms_X.csv").to_numpy()
y = pd.read_csv("mushrooms_Y.csv").to_numpy()
X = X[:,1:]
y = y[:,1]
##sample splitting
test_ix = np.random.choice(X.shape[0], size = int(0.25*X.shape[0]), replace=False)
train_ix = [i for i in range(X.shape[0]) if i not in test_ix]
X_test, y_test = X[test_ix], y[test_ix]
X_train, y_train = X[train_ix], y[train_ix]
T_list = list(range(0,100,5))
T_list[0] = 1
T_list
D = 2
accu = []
for t in T_list:
accu.append(np.sum([y_test == btc(t, X_train, y_train,D)(X_test)])/X_test.shape[0])
print(accu)
##report accuracy to see how good the tree model is | [
"numpy.sum",
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"numpy.array",
"math.log"
] | [((321, 356), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': 'D'}), '(max_depth=D)\n', (343, 356), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1152, 1182), 'pandas.read_csv', 'pd.read_csv', (['"""mushrooms_X.csv"""'], {}), "('mushrooms_X.csv')\n", (1163, 1182), True, 'import pandas as pd\n'), ((1198, 1228), 'pandas.read_csv', 'pd.read_csv', (['"""mushrooms_Y.csv"""'], {}), "('mushrooms_Y.csv')\n", (1209, 1228), True, 'import pandas as pd\n'), ((673, 682), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (679, 682), True, 'import numpy as np\n'), ((707, 734), 'math.log', 'math.log', (['((1 - e[t]) / e[t])'], {}), '((1 - e[t]) / e[t])\n', (715, 734), False, 'import math\n'), ((646, 659), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (654, 659), True, 'import numpy as np\n'), ((660, 671), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (668, 671), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Simple example showing the use of start/end time and ping
keyword arguments to ek80.read_raw()
"""
import numpy as np
from echolab2.instruments import EK80
raw_file = 'C:/EK Test Data/EK80/FM/FM_-_70_KHZ_2MS_CAL-Phase0-D20190531-T194722-0.raw'
start_time=np.datetime64('2019-05-31T19:48:35', 'ms')
end_time=np.datetime64('2019-05-31T19:55:00', 'ms')
start_ping = 250
end_ping = 259
print('Reading the raw file %s' % (raw_file))
ek80 = EK80.EK80()
# Read the whole file
ek80.read_raw(raw_file)
print(ek80)
# Read a time span
ek80.read_raw(raw_file, start_time=start_time, end_time=end_time)
print(ek80)
# Read a ping span
ek80.read_raw(raw_file, start_ping=start_ping, end_ping=end_ping)
print(ek80)
"""
Python 3.7.7 (tags/v3.7.7:d7c567b08f, Mar 10 2020, 10:41:24) [MSC v.1900 64 bit (AMD64)] on AKCSL2051-LN20, Standard
>>> Reading the raw file C:/EK80 Test Data/EK80/FM/FM_-_70_KHZ_2MS_CAL-Phase0-D20190531-T194722-0.raw
<class 'echolab2.instruments.EK80.EK80'> at 0x1fd58c1be48
EK80 object contains data from 1 channel:
EKA 240814-0F ES70-18CD :: complex-FM (500, 1270, 4)
data start time: 2019-05-31T19:47:23.984
data end time: 2019-05-31T19:57:35.329
number of pings: 500
<class 'echolab2.instruments.EK80.EK80'> at 0x1fd58c1be48
EK80 object contains data from 1 channel:
EKA 240814-0F ES70-18CD :: complex-FM (315, 1270, 4)
data start time: 2019-05-31T19:48:35.439
data end time: 2019-05-31T19:54:59.349
number of pings: 315
<class 'echolab2.instruments.EK80.EK80'> at 0x1ed2e28ed08
EK80 object contains data from 1 channel:
EKA 240814-0F ES70-18CD :: complex-FM (10, 1270, 4)
data start time: 2019-05-31T19:53:40.119
data end time: 2019-05-31T19:53:52.310
number of pings: 10
"""
| [
"numpy.datetime64",
"echolab2.instruments.EK80.EK80"
] | [((286, 328), 'numpy.datetime64', 'np.datetime64', (['"""2019-05-31T19:48:35"""', '"""ms"""'], {}), "('2019-05-31T19:48:35', 'ms')\n", (299, 328), True, 'import numpy as np\n'), ((338, 380), 'numpy.datetime64', 'np.datetime64', (['"""2019-05-31T19:55:00"""', '"""ms"""'], {}), "('2019-05-31T19:55:00', 'ms')\n", (351, 380), True, 'import numpy as np\n'), ((469, 480), 'echolab2.instruments.EK80.EK80', 'EK80.EK80', ([], {}), '()\n', (478, 480), False, 'from echolab2.instruments import EK80\n')] |
import numpy as np
d_one = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
])
d_two = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]
])
d_three = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
])
d_four = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]
])
d_five = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]
])
d_six = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]
]) | [
"numpy.array"
] | [((29, 209), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (37, 209), True, 'import numpy as np\n'), ((240, 420), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (248, 420), True, 'import numpy as np\n'), ((453, 633), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (461, 633), True, 'import numpy as np\n'), ((665, 845), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (673, 845), True, 'import numpy as np\n'), ((877, 1057), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (885, 1057), True, 'import numpy as np\n'), ((1088, 1268), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1,\n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (1096, 1268), True, 'import numpy as np\n')] |
import numpy
import numpy as np
njobs = 1000
batch_size = 200
nsamples_per_job = 1
def init():
"""
Return an initialization of weights
"""
return np.zeros(10)
def train(data, w, coef_shared):
for i in range(data[0].shape[0]):
idx = int(np.floor(data[0][i] * 10))
coef_shared[idx] += 1
return
train_hogwild = train
def compute_gradient(data, w):
grad = np.zeros(w.shape)
for i in range(data.shape[0]):
idx = int(np.floor(data[i] * 10))
grad[idx] -= 1.
return grad
def get_data():
"""
Return a list of data
Each element in the list is the fraction of data that will be processed by the same worker
"""
ls = []
for i in range(njobs):
ls.append([np.random.rand(batch_size)])
return ls, None
def finish(w, gt):
"""
process trained model
"""
print(np.sum(w), '/', njobs * batch_size)
print(w) | [
"numpy.random.rand",
"numpy.floor",
"numpy.zeros",
"numpy.sum"
] | [((164, 176), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (172, 176), True, 'import numpy as np\n'), ((406, 423), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (414, 423), True, 'import numpy as np\n'), ((882, 891), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (888, 891), True, 'import numpy as np\n'), ((272, 297), 'numpy.floor', 'np.floor', (['(data[0][i] * 10)'], {}), '(data[0][i] * 10)\n', (280, 297), True, 'import numpy as np\n'), ((477, 499), 'numpy.floor', 'np.floor', (['(data[i] * 10)'], {}), '(data[i] * 10)\n', (485, 499), True, 'import numpy as np\n'), ((761, 787), 'numpy.random.rand', 'np.random.rand', (['batch_size'], {}), '(batch_size)\n', (775, 787), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# scene3dpanel.py - The Scene3DPanel class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`Scene3DPanel` class, a FSLeyes view which
draws the scene in 3D.
"""
import logging
import wx
import numpy as np
import fsl.transform.affine as affine
import fsleyes.displaycontext.scene3dopts as scene3dopts
import fsleyes.gl.wxglscene3dcanvas as scene3dcanvas
import fsleyes.profiles.scene3dviewprofile as scene3dviewprofile
import fsleyes.actions as actions
from . import canvaspanel
log = logging.getLogger(__name__)
class Scene3DPanel(canvaspanel.CanvasPanel):
"""The ``Scene3DPanel`` is a :class:`.CanvasPanel` which draws the
contents of the :class:`.OverlayList` as a 3D scene.
The ``Scene3DPanel`` uses a :class:`.Scene3DCanvas`, which manages all of
the GL state and drawing logic. A :class:`.Scene3DViewProfile` instance
is used to manage all of the user interaction logic.
The scene properties are described and changed via a :class:`.Scene3DOpts`
instance, accessible through the :meth:`.CanvasPanel.sceneOpts`
property.
"""
@staticmethod
def defaultLayout():
"""Returns a list of control panel types to be added for the default
3D panel layout.
"""
return ['OverlayDisplayToolBar',
'Scene3DToolBar',
'OverlayListPanel',
'LocationPanel']
@staticmethod
def controlOrder():
"""Returns a list of control panel names, specifying the order in
which they should appear in the FSLeyes ortho panel settings menu.
"""
return ['OverlayListPanel',
'LocationPanel',
'OverlayInfoPanel',
'OverlayDisplayPanel',
'CanvasSettingsPanel',
'AtlasPanel',
'OverlayDisplayToolBar',
'Scene3DToolBar',
'FileTreePanel']
def __init__(self, parent, overlayList, displayCtx, frame):
"""Create a ``Scene3dPanel``.
:arg parent: A :mod:`wx` parent object.
:arg overlayList: A :class:`.OverlayList` instance.
:arg displayCtx: A :class:`.DisplayContext` instance.
:arg frame: The :class:`.FSLeyesFrame` instance.
"""
sceneOpts = scene3dopts.Scene3DOpts(self)
canvaspanel.CanvasPanel.__init__(self,
parent,
overlayList,
displayCtx,
frame,
sceneOpts)
# In 3D, the displaySpace must always be
# set to world, regardless of the parent
# DC value. This can be overridden manually
# however (e.g. through the python shell)
displayCtx.detachDisplaySpace()
displayCtx.defaultDisplaySpace = 'world'
displayCtx.displaySpace = 'world'
contentPanel = self.contentPanel
self.__canvas = scene3dcanvas.WXGLScene3DCanvas(contentPanel,
overlayList,
displayCtx)
opts = self.__canvas.opts
opts.bindProps('pos', displayCtx, 'location')
opts.bindProps('showCursor', sceneOpts)
opts.bindProps('cursorColour', sceneOpts)
opts.bindProps('bgColour', sceneOpts)
opts.bindProps('showLegend', sceneOpts)
opts.bindProps('legendColour', sceneOpts, 'fgColour')
opts.bindProps('labelSize', sceneOpts)
opts.bindProps('occlusion', sceneOpts)
opts.bindProps('light', sceneOpts)
opts.bindProps('lightPos', sceneOpts)
opts.bindProps('lightDistance', sceneOpts)
opts.bindProps('showLight', sceneOpts)
opts.bindProps('zoom', sceneOpts)
opts.bindProps('offset', sceneOpts)
opts.bindProps('rotation', sceneOpts)
opts.bindProps('highDpi', sceneOpts)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.__canvas, flag=wx.EXPAND, proportion=1)
contentPanel.SetSizer(sizer)
self.centrePanelLayout()
self.initProfile(scene3dviewprofile.Scene3DViewProfile)
self.syncLocation = True
def destroy(self):
"""Must be called when this ``Scene3DPanel`` is no longer in use.
"""
self.__canvas.destroy()
self.__canvas = None
canvaspanel.CanvasPanel.destroy(self)
def getGLCanvases(self):
"""Returns all of the :class:`.SliceCanvas` instances contained
within this ``Scene3DPanel``.
"""
return [self.__canvas]
def getActions(self):
"""Overrides :meth:`.ViewPanel.getActions`. Returns a list of actions
that can be executed on this ``Scene3DPanel``, and which will be added
to its view menu.
"""
actionz = [self.screenshot,
self.movieGif,
self.showCommandLineArgs,
self.applyCommandLineArgs,
None,
self.toggleDisplaySync,
self.resetDisplay]
names = [a.actionName if a is not None else None for a in actionz]
return list(zip(names, actionz))
@actions.action
def resetDisplay(self):
"""An action which resets the current camera configuration
(zoom/pan/rotation). See the :meth:`.Scene3DViewProfile.resetDisplay`
method.
"""
self.currentProfile.resetDisplay()
def getMovieFrame(self, overlay, opts):
"""Returns the current movie frame. If the :attr:`movieAxis` is ``3``
(e.g. time series), the volume index is returned. Otherwise the
current rotation matrix is returned.
"""
if self.movieAxis == 3:
return super(Scene3DPanel, self).getMovieFrame(overlay, opts)
else:
return np.copy(self.__canvas.opts.rotation)
def doMovieUpdate(self, overlay, opts):
"""Overrides :meth:`.CanvasPanel.doMovieUpdate`. For x/y/z axis
movies, the scene is rotated. Otherwise (for time) the ``CanvasPanel``
implementation is called.
"""
if self.movieAxis >= 3:
return canvaspanel.CanvasPanel.doMovieUpdate(self, overlay, opts)
else:
canvas = self.__canvas
currot = canvas.opts.rotation
rate = float(self.movieRate)
rateMin = self.getAttribute('movieRate', 'minval')
rateMax = self.getAttribute('movieRate', 'maxval')
rate = 0.1 + 0.9 * (rate - rateMin) / (rateMax - rateMin)
rate = rate * np.pi / 10
rots = [0, 0, 0]
rots[self.movieAxis] = rate
xform = affine.axisAnglesToRotMat(*rots)
xform = affine.concat(xform, currot)
canvas.opts.rotation = xform
return np.copy(xform)
| [
"fsl.transform.affine.axisAnglesToRotMat",
"fsleyes.gl.wxglscene3dcanvas.WXGLScene3DCanvas",
"wx.BoxSizer",
"numpy.copy",
"fsl.transform.affine.concat",
"fsleyes.displaycontext.scene3dopts.Scene3DOpts",
"logging.getLogger"
] | [((609, 636), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (626, 636), False, 'import logging\n'), ((2403, 2432), 'fsleyes.displaycontext.scene3dopts.Scene3DOpts', 'scene3dopts.Scene3DOpts', (['self'], {}), '(self)\n', (2426, 2432), True, 'import fsleyes.displaycontext.scene3dopts as scene3dopts\n'), ((3144, 3214), 'fsleyes.gl.wxglscene3dcanvas.WXGLScene3DCanvas', 'scene3dcanvas.WXGLScene3DCanvas', (['contentPanel', 'overlayList', 'displayCtx'], {}), '(contentPanel, overlayList, displayCtx)\n', (3175, 3214), True, 'import fsleyes.gl.wxglscene3dcanvas as scene3dcanvas\n'), ((4221, 4247), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (4232, 4247), False, 'import wx\n'), ((6147, 6183), 'numpy.copy', 'np.copy', (['self.__canvas.opts.rotation'], {}), '(self.__canvas.opts.rotation)\n', (6154, 6183), True, 'import numpy as np\n'), ((7022, 7054), 'fsl.transform.affine.axisAnglesToRotMat', 'affine.axisAnglesToRotMat', (['*rots'], {}), '(*rots)\n', (7047, 7054), True, 'import fsl.transform.affine as affine\n'), ((7075, 7103), 'fsl.transform.affine.concat', 'affine.concat', (['xform', 'currot'], {}), '(xform, currot)\n', (7088, 7103), True, 'import fsl.transform.affine as affine\n'), ((7165, 7179), 'numpy.copy', 'np.copy', (['xform'], {}), '(xform)\n', (7172, 7179), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from Utils.CV import PNTripletloss_search
from Utils import utils
import warnings
warnings.filterwarnings("ignore")
utils.set_seed(1)
df = pd.read_table("../../data/curatedMetagenomicData/QinJ_2012/counts/QinJ_2012_counts_species.csv",
sep=",", index_col=0)
df = df.dropna(axis=1)
new_columns_names=[]
for x in df.columns:
if '.' in x:
new_columns_names.append(x.split('.')[0] + "-" + x.split('.')[1])
else:
new_columns_names.append(x)
df.columns = new_columns_names
log_df = df.apply(np.log1p,axis=1).T
label_df = pd.read_table("../../data/curatedMetagenomicData/QinJ_2012/QinJ_2012_pData.csv",
sep=",", index_col=0)[['study_condition']]
label_df = label_df.dropna()
label_df.loc[label_df["study_condition"] == "control", "study_condition"] = 0
label_df.loc[label_df["study_condition"] == "T2D", "study_condition"] = 1
data_df = log_df.join(label_df).dropna()
data_arr = np.array(data_df)
X = data_arr[:, :-1].astype(np.float)
y = data_arr[:, -1].astype(np.int)
print(X.shape)
print(y.sum())
acc_df,auc_df,f1_df = PNTripletloss_search(X,y,embedding_dim=24,
support_num=140,query_num=80,
L=[0.05,0.1,0.2,0.3,0.4,0.5],
margins=[1,2,3,4,5,6,7,8,9,10]
,filter_threshold=0.0, cv_num=3)
acc_df.to_csv("MixtureLoss/T2D_MixtureLoss_ACC.csv",sep="\t",header=True,index=True)
auc_df.to_csv("MixtureLoss/T2D_MixtureLoss_AUC.csv",sep="\t",header=True,index=True)
f1_df.to_csv("MixtureLoss/T2D_MixtureLoss_f1.csv",sep="\t",header=True,index=True)
| [
"warnings.filterwarnings",
"Utils.utils.set_seed",
"Utils.CV.PNTripletloss_search",
"numpy.array",
"pandas.read_table"
] | [((155, 188), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (178, 188), False, 'import warnings\n'), ((190, 207), 'Utils.utils.set_seed', 'utils.set_seed', (['(1)'], {}), '(1)\n', (204, 207), False, 'from Utils import utils\n'), ((216, 344), 'pandas.read_table', 'pd.read_table', (['"""../../data/curatedMetagenomicData/QinJ_2012/counts/QinJ_2012_counts_species.csv"""'], {'sep': '""","""', 'index_col': '(0)'}), "(\n '../../data/curatedMetagenomicData/QinJ_2012/counts/QinJ_2012_counts_species.csv'\n , sep=',', index_col=0)\n", (229, 344), True, 'import pandas as pd\n'), ((1048, 1065), 'numpy.array', 'np.array', (['data_df'], {}), '(data_df)\n', (1056, 1065), True, 'import numpy as np\n'), ((1206, 1398), 'Utils.CV.PNTripletloss_search', 'PNTripletloss_search', (['X', 'y'], {'embedding_dim': '(24)', 'support_num': '(140)', 'query_num': '(80)', 'L': '[0.05, 0.1, 0.2, 0.3, 0.4, 0.5]', 'margins': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]', 'filter_threshold': '(0.0)', 'cv_num': '(3)'}), '(X, y, embedding_dim=24, support_num=140, query_num=80,\n L=[0.05, 0.1, 0.2, 0.3, 0.4, 0.5], margins=[1, 2, 3, 4, 5, 6, 7, 8, 9, \n 10], filter_threshold=0.0, cv_num=3)\n', (1226, 1398), False, 'from Utils.CV import PNTripletloss_search\n'), ((654, 761), 'pandas.read_table', 'pd.read_table', (['"""../../data/curatedMetagenomicData/QinJ_2012/QinJ_2012_pData.csv"""'], {'sep': '""","""', 'index_col': '(0)'}), "('../../data/curatedMetagenomicData/QinJ_2012/QinJ_2012_pData.csv'\n , sep=',', index_col=0)\n", (667, 761), True, 'import pandas as pd\n')] |
from __future__ import print_function, division
import os
import torch
import numpy as np
import pandas as pd
import nibabel as nib
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn.functional as F
device = (torch.device('cuda') if torch.cuda.is_available()
else torch.device('cpu'))
class Conv3DNet(nn.Module):
def __init__(self, in_shape3d, n_chans=16, n_out=2):
super().__init__()
self.in_shape3d = in_shape3d
self.n_chans = n_chans
self.n_out = n_out
self.conv1 = nn.Conv3d(1, 16, kernel_size=3, padding=1)
self.conv2 = nn.Conv3d(16, 8, kernel_size=3, padding=1)
D = self.in_shape3d[0] // 4
H = self.in_shape3d[1] // 4
W = self.in_shape3d[2] // 4
self.fc1 = nn.Linear(D * H * W * n_chans // 2, 32)
self.fc2 = nn.Linear(32, self.n_out)
def forward(self, x):
out = F.max_pool3d(torch.tanh(self.conv1(x)), 2)
out = F.max_pool3d(torch.tanh(self.conv2(out)), 2)
out = out.view(-1, self.num_flat_features(out))
out = torch.tanh(self.fc1(out))
out = self.fc2(out)
return out
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def training_loop_conv(model, train_loader, test_loader, criterion, optimizer, n_epochs):
"""Training loop with training and validation loss."""
loss_vector = np.zeros(n_epochs)
loss_val_vector = np.zeros(n_epochs)
for epoch in range(n_epochs):
loss_train = 0.0
for imgs, labels in train_loader:
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs.unsqueeze(1)) #channels are on dim=1
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train += loss.item()
loss_val = 0.0
for imgs_test, labels_test in test_loader:
imgs_test = imgs_test.to(device=device)
labels_test = labels_test.to(device=device)
outputs_test = model(imgs_test.unsqueeze(1)) #channels are on dim=1
loss_test = criterion(outputs_test, labels_test)
loss_val += loss_test.item()
loss_vector[epoch] = float(loss_train/len(train_loader))
loss_val_vector[epoch] = float(loss_val/len(test_loader))
print("Epoch: %d, Training Loss: %f, Validation Loss: %f"
%(epoch+1, float(loss_train)/len(train_loader), float(loss_val)/len(test_loader)))
return loss_vector, loss_val_vector
def validate_conv(model, train_loader, val_loader):
"""Accuracy in training and in validation."""
for name, loader in [("train", train_loader), ("validation", val_loader)]:
correct = 0
total = 0
with torch.no_grad():
for imgs, labels in loader:
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs.unsqueeze(1)) #channels are on dim=1
_, predicted = torch.max(outputs, dim=1)
total += labels.shape[0]
correct += int((predicted == labels).sum())
print("Accuracy {}: {:.2f}".format(name , correct / total)) | [
"torch.nn.Conv3d",
"numpy.zeros",
"torch.cuda.is_available",
"torch.max",
"torch.nn.Linear",
"torch.device",
"torch.no_grad"
] | [((312, 337), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (335, 337), False, 'import torch\n'), ((288, 308), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (300, 308), False, 'import torch\n'), ((354, 373), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (366, 373), False, 'import torch\n'), ((1614, 1632), 'numpy.zeros', 'np.zeros', (['n_epochs'], {}), '(n_epochs)\n', (1622, 1632), True, 'import numpy as np\n'), ((1655, 1673), 'numpy.zeros', 'np.zeros', (['n_epochs'], {}), '(n_epochs)\n', (1663, 1673), True, 'import numpy as np\n'), ((605, 647), 'torch.nn.Conv3d', 'nn.Conv3d', (['(1)', '(16)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(1, 16, kernel_size=3, padding=1)\n', (614, 647), True, 'import torch.nn as nn\n'), ((669, 711), 'torch.nn.Conv3d', 'nn.Conv3d', (['(16)', '(8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(16, 8, kernel_size=3, padding=1)\n', (678, 711), True, 'import torch.nn as nn\n'), ((839, 878), 'torch.nn.Linear', 'nn.Linear', (['(D * H * W * n_chans // 2)', '(32)'], {}), '(D * H * W * n_chans // 2, 32)\n', (848, 878), True, 'import torch.nn as nn\n'), ((898, 923), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'self.n_out'], {}), '(32, self.n_out)\n', (907, 923), True, 'import torch.nn as nn\n'), ((3076, 3091), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3089, 3091), False, 'import torch\n'), ((3334, 3359), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (3343, 3359), False, 'import torch\n')] |
# import scholar.scholar as sch
from scipy import spatial
import numpy as np
import pickle as pkl
### Usage from other files ###
# import utils
# v = utils.vecMaster()
# word_list = v.expand(source_words, expansion_method, epsilon)
# def create_vector_object(sourcefile="data/fasttext.wiki.en.vec", destfile="data/fasttext.en", truncate=None):
### NOTE: run this function using python3
# otherwise the dict file is unusable
def create_fasttext_pkl(sourcefile="fasttexttrunc", destfile="data/fasttext.en", truncate=None):
f = open(sourcefile, "r")
firstline = f.readline()
# vector_dict={}
# token_dict={}
tokens = []
vectors = []
for line in f:
token = line[:line.index(' ')]
vector_string = line[line.index(' ') + 1:]
vector = np.fromstring(vector_string, sep=' ')
# token_dict[vector_string] = token
# vector_dict[token] = vector
tokens.append(token)
# tokens.append(unitcode(token.decode('utf-8',errors='ignore')))
vectors.append(vector)
vectors = np.vstack(vectors)
data = {}
data['tokens'] = tokens
data['vectors'] = vectors
f = open(destfile + '.pkl', 'wb')
pkl.dump(data, f, protocol=4)
f.close()
class vecMaster():
def __init__(self, sourcefile='data/fasttext.en.pkl'):
with open(sourcefile, 'rb') as myfile:
data = pkl.load(myfile)
self.token_list = data['tokens']
self.tokens = np.atleast_1d(self.token_list[:50000])
self.vectors = data['vectors'][:50000]
def validate(self, word_list):
valid_words = word_list.copy()
for w in word_list:
if w not in self.tokens:
if ' ' in w:
for sub_w in w.split(' '):
if sub_w not in self.tokens:
print("Word " + w + " not found in vector model. Omitting...")
valid_words.remove(w)
return valid_words
def neighbor_expansion(self, source_words, epsilon=0.35, distance_metric='cosine', k=None):
source_words = self.validate(source_words)
#source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
sv = []
for w in source_words:
#if multiword, then average them
if ' ' in w:
words = w.split(' ')
phrase_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in words])
sv.append(np.mean(phrase_vectors,axis=0))
else:
#otherwise, take the vector
sv.append(self.vectors[np.squeeze(np.argwhere(self.tokens==w))])
source_vectors = np.vstack(sv)
distances = spatial.distance.cdist(self.vectors, source_vectors, distance_metric)[:,0]
if k is not None:
# find the k nearest
inds = np.argsort( distances )
return np.array( self.tokens[ inds[0:k] ] )
else:
return np.squeeze(self.tokens[np.argwhere(distances < epsilon)])
def mahalanobis_expansion(self, source_words, epsilon=0.25, k=None, sigma=0.00001):
source_words = self.validate(source_words)
source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
c = np.cov(source_vectors.T)
c += sigma * np.identity(c.shape[0])
c = np.linalg.inv(c)
#c = np.linalg.pinv(c)
def mahalanobis_squared(u, v, VI=c):
delta = u - v
return np.dot(np.dot(delta, VI), delta)
centroid = np.atleast_2d(np.mean(source_vectors, axis=0))
distances = spatial.distance.cdist(self.vectors, centroid, metric=mahalanobis_squared)[:, 0]
if k is not None:
# find the k nearest
inds = np.argsort( distances )
return np.array( self.tokens[ inds[0:k] ] )
else:
# find anything within radius epsilon (scaled by mean distance)
epsilon = epsilon * np.mean(distances)
return np.squeeze(self.tokens[np.argwhere(distances <= epsilon)])
def naive_centroid_expansion(self, source_words, epsilon=0.25, distance_metric='cosine', k=None):
source_words = self.validate(source_words)
source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
centroid = np.atleast_2d(np.mean(source_vectors, axis=0))
distances = spatial.distance.cdist(self.vectors, centroid, distance_metric)[:, 0]
if k is not None:
# find the k nearest
inds = np.argsort( distances )
return np.array( self.tokens[ inds[0:k] ] )
else:
# find anything within radius epsilon (scaled by mean distance)
epsilon = epsilon * np.mean(distances)
return np.squeeze(self.tokens[np.argwhere(distances <= epsilon)])
def bounding_box(self, source_words):
source_words = self.validate(source_words)
source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
min_vector = np.min(source_vectors,axis=0)
max_vector = np.max(source_vectors,axis=0)
print(min_vector)
print(max_vector)
print(min_vector.shape)
print(max_vector.shape)
indexes = []
for i in range(len(self.tokens)):
if (np.all(self.vectors[i] >= min_vector) and np.all(self.vectors[i] <= max_vector)):
indexes.append(i)
return np.squeeze(self.tokens)[indexes]
if __name__ == '__main__':
v=vecMaster()
#print(v.neighbor_expansion(['beautiful', 'gorgeous', 'handsome'], k=30))
#print(v.mahalanobis_expansion(['beautiful', 'gorgeous', 'handsome'], k=30))
#print(v.neighbor_expansion(['beautiful', 'gorgeous', 'handsome'], k=30))
#print(v.mahalanobis_expansion(['beautiful', 'gorgeous', 'handsome', 'studly','hot'],k=20))
#print(v.neighbor_expansion(['france', 'germany','guatemala'], k=30))
#print(v.mahalanobis_expansion(['france', 'germany','guatemala'], k=30))
#print(v.neighbor_expansion(['red', 'green','blue','yellow','ruby','orange','maroon'],k=20))
#print(v.mahalanobis_expansion(['red', 'green','blue','yellow','ruby','orange','maroon'],k=20))
# print(v.neighbor_expansion(['beautiful', 'gorgeous']))
# print(v.neighbor_expansion(['red', 'green','blue']))
#print(v.neighbor_expansion(['idiot', 'jerk','stupid','dumb','fat','imbecile','imbecilic','sadistic'], k=30))
#print(v.neighbor_expansion(['idiot', 'jerk','stupid','dumb','fat','imbecile','imbecilic','sadistic'], k=30))
#print(v.mahalanobis_expansion(['idiot', 'jerk','stupid','dumb','fat','imbecile','imbecilic','sadistic'], k=30))
print(v.bounding_box(['clever','smart','intelligent','red','belligerent','the']))
#print(v.mahalanobis_expansion(['genius', 'prodigy','innovator'], k=30))
| [
"scipy.spatial.distance.cdist",
"pickle.dump",
"numpy.identity",
"numpy.all",
"numpy.argwhere",
"numpy.argsort",
"numpy.min",
"numpy.max",
"numpy.linalg.inv",
"pickle.load",
"numpy.array",
"numpy.mean",
"numpy.squeeze",
"numpy.dot",
"numpy.atleast_1d",
"numpy.cov",
"numpy.fromstring"... | [((1051, 1069), 'numpy.vstack', 'np.vstack', (['vectors'], {}), '(vectors)\n', (1060, 1069), True, 'import numpy as np\n'), ((1184, 1213), 'pickle.dump', 'pkl.dump', (['data', 'f'], {'protocol': '(4)'}), '(data, f, protocol=4)\n', (1192, 1213), True, 'import pickle as pkl\n'), ((784, 821), 'numpy.fromstring', 'np.fromstring', (['vector_string'], {'sep': '""" """'}), "(vector_string, sep=' ')\n", (797, 821), True, 'import numpy as np\n'), ((1455, 1493), 'numpy.atleast_1d', 'np.atleast_1d', (['self.token_list[:50000]'], {}), '(self.token_list[:50000])\n', (1468, 1493), True, 'import numpy as np\n'), ((2740, 2753), 'numpy.vstack', 'np.vstack', (['sv'], {}), '(sv)\n', (2749, 2753), True, 'import numpy as np\n'), ((3368, 3392), 'numpy.cov', 'np.cov', (['source_vectors.T'], {}), '(source_vectors.T)\n', (3374, 3392), True, 'import numpy as np\n'), ((3450, 3466), 'numpy.linalg.inv', 'np.linalg.inv', (['c'], {}), '(c)\n', (3463, 3466), True, 'import numpy as np\n'), ((5203, 5233), 'numpy.min', 'np.min', (['source_vectors'], {'axis': '(0)'}), '(source_vectors, axis=0)\n', (5209, 5233), True, 'import numpy as np\n'), ((5254, 5284), 'numpy.max', 'np.max', (['source_vectors'], {'axis': '(0)'}), '(source_vectors, axis=0)\n', (5260, 5284), True, 'import numpy as np\n'), ((1375, 1391), 'pickle.load', 'pkl.load', (['myfile'], {}), '(myfile)\n', (1383, 1391), True, 'import pickle as pkl\n'), ((2775, 2844), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['self.vectors', 'source_vectors', 'distance_metric'], {}), '(self.vectors, source_vectors, distance_metric)\n', (2797, 2844), False, 'from scipy import spatial\n'), ((2929, 2950), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (2939, 2950), True, 'import numpy as np\n'), ((2972, 3004), 'numpy.array', 'np.array', (['self.tokens[inds[0:k]]'], {}), '(self.tokens[inds[0:k]])\n', (2980, 3004), True, 'import numpy as np\n'), ((3414, 3437), 'numpy.identity', 'np.identity', (['c.shape[0]'], {}), '(c.shape[0])\n', (3425, 3437), True, 'import numpy as np\n'), ((3656, 3687), 'numpy.mean', 'np.mean', (['source_vectors'], {'axis': '(0)'}), '(source_vectors, axis=0)\n', (3663, 3687), True, 'import numpy as np\n'), ((3709, 3783), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['self.vectors', 'centroid'], {'metric': 'mahalanobis_squared'}), '(self.vectors, centroid, metric=mahalanobis_squared)\n', (3731, 3783), False, 'from scipy import spatial\n'), ((3869, 3890), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (3879, 3890), True, 'import numpy as np\n'), ((3912, 3944), 'numpy.array', 'np.array', (['self.tokens[inds[0:k]]'], {}), '(self.tokens[inds[0:k]])\n', (3920, 3944), True, 'import numpy as np\n'), ((4471, 4502), 'numpy.mean', 'np.mean', (['source_vectors'], {'axis': '(0)'}), '(source_vectors, axis=0)\n', (4478, 4502), True, 'import numpy as np\n'), ((4524, 4587), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['self.vectors', 'centroid', 'distance_metric'], {}), '(self.vectors, centroid, distance_metric)\n', (4546, 4587), False, 'from scipy import spatial\n'), ((4673, 4694), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (4683, 4694), True, 'import numpy as np\n'), ((4716, 4748), 'numpy.array', 'np.array', (['self.tokens[inds[0:k]]'], {}), '(self.tokens[inds[0:k]])\n', (4724, 4748), True, 'import numpy as np\n'), ((5616, 5639), 'numpy.squeeze', 'np.squeeze', (['self.tokens'], {}), '(self.tokens)\n', (5626, 5639), True, 'import numpy as np\n'), ((3596, 3613), 'numpy.dot', 'np.dot', (['delta', 'VI'], {}), '(delta, VI)\n', (3602, 3613), True, 'import numpy as np\n'), ((4071, 4089), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (4078, 4089), True, 'import numpy as np\n'), ((4875, 4893), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (4882, 4893), True, 'import numpy as np\n'), ((5485, 5522), 'numpy.all', 'np.all', (['(self.vectors[i] >= min_vector)'], {}), '(self.vectors[i] >= min_vector)\n', (5491, 5522), True, 'import numpy as np\n'), ((5527, 5564), 'numpy.all', 'np.all', (['(self.vectors[i] <= max_vector)'], {}), '(self.vectors[i] <= max_vector)\n', (5533, 5564), True, 'import numpy as np\n'), ((2539, 2570), 'numpy.mean', 'np.mean', (['phrase_vectors'], {'axis': '(0)'}), '(phrase_vectors, axis=0)\n', (2546, 2570), True, 'import numpy as np\n'), ((3065, 3097), 'numpy.argwhere', 'np.argwhere', (['(distances < epsilon)'], {}), '(distances < epsilon)\n', (3076, 3097), True, 'import numpy as np\n'), ((4132, 4165), 'numpy.argwhere', 'np.argwhere', (['(distances <= epsilon)'], {}), '(distances <= epsilon)\n', (4143, 4165), True, 'import numpy as np\n'), ((4936, 4969), 'numpy.argwhere', 'np.argwhere', (['(distances <= epsilon)'], {}), '(distances <= epsilon)\n', (4947, 4969), True, 'import numpy as np\n'), ((3299, 3328), 'numpy.argwhere', 'np.argwhere', (['(self.tokens == w)'], {}), '(self.tokens == w)\n', (3310, 3328), True, 'import numpy as np\n'), ((4381, 4410), 'numpy.argwhere', 'np.argwhere', (['(self.tokens == w)'], {}), '(self.tokens == w)\n', (4392, 4410), True, 'import numpy as np\n'), ((5125, 5154), 'numpy.argwhere', 'np.argwhere', (['(self.tokens == w)'], {}), '(self.tokens == w)\n', (5136, 5154), True, 'import numpy as np\n'), ((2684, 2713), 'numpy.argwhere', 'np.argwhere', (['(self.tokens == w)'], {}), '(self.tokens == w)\n', (2695, 2713), True, 'import numpy as np\n'), ((2464, 2493), 'numpy.argwhere', 'np.argwhere', (['(self.tokens == w)'], {}), '(self.tokens == w)\n', (2475, 2493), True, 'import numpy as np\n')] |
"""
Waterbirds Dataset
- Reference code: https://github.com/kohpangwei/group_DRO/blob/master/data/cub_dataset.py
- See Group DRO, https://arxiv.org/abs/1911.08731 for more details
This waterbirds is in SupContrast and has data augmentations option.
"""
import os
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from . import model_attributes
# from utils.visualize import plot_data_batch
from copy import deepcopy
class Waterbirds(Dataset):
"""
Waterbirds dataset from waterbird_complete95_forest2water2 in GroupDRO paper
"""
def __init__(self, root_dir, target_name, confounder_names,
split, augment_data=False, model_type=None, args=None,
transform=None):
self.root_dir = root_dir
self.target_name = target_name
self.confounder_names = confounder_names
self.model_type = model_type
if '_pt' in model_type:
self.model_type = model_type[:-3]
self.augment_data = augment_data
self.split = split
self.split_dict = {
'train': 0,
'val': 1,
'test': 2
}
self.data_dir = os.path.join(
self.root_dir,
'_'.join([self.target_name] + self.confounder_names))
if not os.path.exists(self.data_dir):
raise ValueError(
f'{self.data_dir} does not exist yet. Please generate the dataset first.')
# Read in metadata
self.metadata_df = pd.read_csv(
os.path.join(self.data_dir, 'metadata.csv'))
# Filter for data split ('train', 'val', 'test')
self.metadata_df = self.metadata_df[
self.metadata_df['split'] == self.split_dict[self.split]]
# Get the y values
self.y_array = self.metadata_df['y'].values
self.n_classes = 2
# We only support one confounder for CUB for now
self.confounder_array = self.metadata_df['place'].values
self.n_confounders = 1
# Reverse
if args.dataset == 'waterbirds_r':
self.y_array = self.metadata_df['place'].values
self.confounder_array = self.metadata_df['y'].values
# Map to groups
self.n_groups = pow(2, 2)
self.group_array = (self.y_array * (self.n_groups / 2) +
self.confounder_array).astype('int')
# Extract filenames and splits
self.filename_array = self.metadata_df['img_filename'].values
self.split_array = self.metadata_df['split'].values
# Play nice with my earlier code
self.targets = torch.tensor(self.y_array)
self.targets_all = {'target': np.array(self.y_array),
'group_idx': np.array(self.group_array),
'spurious': np.array(self.confounder_array),
'sub_target': np.array(list(zip(self.y_array, self.confounder_array)))}
self.group_labels = ['LANDBIRD on land', 'LANDBIRD on water',
'WATERBIRD on land', 'WATERBIRD on water']
if args.dataset == 'waterbirds_r':
self.group_labels = ['LAND with landbird', 'LAND with waterbird',
'WATER with landbird', 'WATER with waterbird']
# Set transform
if model_attributes[self.model_type]['feature_type'] == 'precomputed':
self.features_mat = torch.from_numpy(np.load(
os.path.join(root_dir, 'features', model_attributes[self.model_type]['feature_filename']))).float()
self.train_transform = None
self.eval_transform = None
# Added for
self.data = self.features_mat
else:
self.features_mat = None
if transform is None:
self.train_transform = get_transform_cub(
self.model_type,
train=True,
augment_data=augment_data)
self.eval_transform = get_transform_cub(
self.model_type,
train=False,
augment_data=augment_data)
else:
self.train_transform = transform
self.eval_transform = transform
def __len__(self):
return len(self.filename_array)
def __getitem__(self, idx):
y = self.targets[idx] # changed to fit with earlier code
# g = self.group_array[idx]
if model_attributes[self.model_type]['feature_type'] == 'precomputed':
x = self.features_mat[idx, :]
print('loading from features_mat')
else:
img_filename = os.path.join(
self.data_dir,
self.filename_array[idx])
img = Image.open(img_filename).convert('RGB')
# Figure out split and transform accordingly
if self.split_array[idx] == self.split_dict['train'] and self.train_transform:
img = self.train_transform(img)
elif (self.split_array[idx] in [self.split_dict['val'], self.split_dict['test']] and
self.eval_transform):
img = self.eval_transform(img)
# Flatten if needed
if model_attributes[self.model_type]['flatten']:
assert img.dim() == 3
img = img.view(-1)
x = img
return x, y, idx
def group_str(self, group_idx):
y = group_idx // (self.n_groups / self.n_classes)
c = group_idx % (self.n_groups // self.n_classes)
group_name = f'{self.target_name} = {int(y)}'
bin_str = format(int(c), f'0{self.n_confounders}b')[::-1]
for attr_idx, attr_name in enumerate(self.confounder_names):
group_name += f', {attr_name} = {bin_str[attr_idx]}'
return group_name
def get_transform_cub(model_type, train, augment_data):
scale = 256.0 / 224.0
target_resolution = model_attributes[model_type]['target_resolution']
assert target_resolution is not None
if (not train) or (not augment_data):
# Resizes the image to a slightly larger square then crops the center.
transform = transforms.Compose([
transforms.Resize(
(int(target_resolution[0] * scale), int(target_resolution[1] * scale))),
transforms.CenterCrop(target_resolution),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.RandomResizedCrop(
target_resolution,
scale=(0.7, 1.0),
ratio=(0.75, 1.3333333333333333),
interpolation=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return transform
def load_waterbirds(args, train_shuffle=True,
train_transform=None,
eval_transform=None):
"""
Default dataloader setup for Waterbirds
Args:
- args (argparse): Experiment arguments
- train_shuffle (bool): Whether to shuffle training data
Returns:
- (train_loader, val_loader, test_loader): Tuple of dataloaders for each split
"""
train_set = Waterbirds(args.root_dir,
target_name=args.target_name,
confounder_names=args.confounder_names,
split='train',
augment_data=args.augment_data,
model_type=args.arch,
args=args,
transform=train_transform)
train_loader = DataLoader(train_set,
batch_size=args.bs_trn,
shuffle=train_shuffle,
num_workers=args.num_workers)
val_set = Waterbirds(args.root_dir,
target_name=args.target_name,
confounder_names=args.confounder_names,
split='val', model_type=args.arch,
args=args,
transform=eval_transform)
val_loader = DataLoader(val_set, batch_size=args.bs_val,
shuffle=False, num_workers=args.num_workers)
test_set = Waterbirds(args.root_dir,
target_name=args.target_name,
confounder_names=args.confounder_names,
split='test', model_type=args.arch,
args=args,
transform=eval_transform)
test_loader = DataLoader(test_set, batch_size=args.bs_val,
shuffle=False, num_workers=args.num_workers)
args.num_classes = 2
return (train_loader, val_loader, test_loader)
# def visualize_waterbirds(dataloader, num_datapoints, title, args, save,
# save_id, ftype='png', target_type='group_idx'):
# # Filter for selected datapoints (in case we use SubsetRandomSampler)
# try:
# subset_indices = dataloader.sampler.indices
# targets = dataloader.dataset.targets_all[target_type][subset_indices]
# subset = True
# except AttributeError:
# targets = dataloader.dataset.targets_all[target_type]
# subset = False
# all_data_indices = []
# for class_ in np.unique(targets):
# class_indices = np.where(targets == class_)[0]
# if subset:
# class_indices = subset_indices[class_indices]
# all_data_indices.extend(class_indices[:num_datapoints])
# plot_data_batch([dataloader.dataset.__getitem__(ix)[0] for ix in all_data_indices],
# mean=np.mean([0.485, 0.456, 0.406]),
# std=np.mean([0.229, 0.224, 0.225]), nrow=8, title=title,
# args=args, save=save, save_id=save_id, ftype=ftype)
def get_resampled_set(dataset, resampled_set_indices, copy_dataset=False):
"""
Obtain spurious dataset resampled_set
Args:
- dataset (torch.utils.data.Dataset): Spurious correlations dataset
- resampled_set_indices (int[]): List-like of indices
- deepcopy (bool): If true, copy the dataset
"""
resampled_set = copy.deepcopy(dataset) if copy_dataset else dataset
resampled_set.y_array = resampled_set.y_array[resampled_set_indices]
resampled_set.group_array = resampled_set.group_array[resampled_set_indices]
resampled_set.filename_array = resampled_set.filename_array[resampled_set_indices]
resampled_set.split_array = resampled_set.split_array[resampled_set_indices]
resampled_set.targets = resampled_set.y_array
for target_type, target_val in resampled_set.targets_all.items():
resampled_set.targets_all[target_type] = target_val[resampled_set_indices]
return resampled_set
# Refactor for modularity
def load_dataloaders(args, train_shuffle=True,
train_transform=None,
eval_transform=None):
return load_waterbirds(args, train_shuffle,
train_transform, eval_transform)
def visualize_dataset(dataloader, num_datapoints, title, args, save,
save_id, ftype='png', target_type='target'):
return visualize_waterbirds(dataloader, num_datapoints, title,
args, save, save_id, ftype, target_type) | [
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.exists",
"torchvision.transforms.RandomResizedCrop",
"PIL.Image.open",
"numpy.array",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.tensor",
"torchvision.trans... | [((7903, 8005), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.bs_trn', 'shuffle': 'train_shuffle', 'num_workers': 'args.num_workers'}), '(train_set, batch_size=args.bs_trn, shuffle=train_shuffle,\n num_workers=args.num_workers)\n', (7913, 8005), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8417, 8510), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.bs_val', 'shuffle': '(False)', 'num_workers': 'args.num_workers'}), '(val_set, batch_size=args.bs_val, shuffle=False, num_workers=args\n .num_workers)\n', (8427, 8510), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8867, 8961), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.bs_val', 'shuffle': '(False)', 'num_workers': 'args.num_workers'}), '(test_set, batch_size=args.bs_val, shuffle=False, num_workers=\n args.num_workers)\n', (8877, 8961), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2712, 2738), 'torch.tensor', 'torch.tensor', (['self.y_array'], {}), '(self.y_array)\n', (2724, 2738), False, 'import torch\n'), ((1391, 1420), 'os.path.exists', 'os.path.exists', (['self.data_dir'], {}), '(self.data_dir)\n', (1405, 1420), False, 'import os\n'), ((1623, 1666), 'os.path.join', 'os.path.join', (['self.data_dir', '"""metadata.csv"""'], {}), "(self.data_dir, 'metadata.csv')\n", (1635, 1666), False, 'import os\n'), ((2777, 2799), 'numpy.array', 'np.array', (['self.y_array'], {}), '(self.y_array)\n', (2785, 2799), True, 'import numpy as np\n'), ((2842, 2868), 'numpy.array', 'np.array', (['self.group_array'], {}), '(self.group_array)\n', (2850, 2868), True, 'import numpy as np\n'), ((2910, 2941), 'numpy.array', 'np.array', (['self.confounder_array'], {}), '(self.confounder_array)\n', (2918, 2941), True, 'import numpy as np\n'), ((4784, 4837), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.filename_array[idx]'], {}), '(self.data_dir, self.filename_array[idx])\n', (4796, 4837), False, 'import os\n'), ((6466, 6506), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['target_resolution'], {}), '(target_resolution)\n', (6487, 6506), True, 'import torchvision.transforms as transforms\n'), ((6520, 6541), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6539, 6541), True, 'import torchvision.transforms as transforms\n'), ((6555, 6621), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (6575, 6621), True, 'import torchvision.transforms as transforms\n'), ((6696, 6817), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['target_resolution'], {'scale': '(0.7, 1.0)', 'ratio': '(0.75, 1.3333333333333333)', 'interpolation': '(2)'}), '(target_resolution, scale=(0.7, 1.0), ratio=(\n 0.75, 1.3333333333333333), interpolation=2)\n', (6724, 6817), True, 'import torchvision.transforms as transforms\n'), ((6891, 6924), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6922, 6924), True, 'import torchvision.transforms as transforms\n'), ((6938, 6959), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6957, 6959), True, 'import torchvision.transforms as transforms\n'), ((6973, 7039), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (6993, 7039), True, 'import torchvision.transforms as transforms\n'), ((4889, 4913), 'PIL.Image.open', 'Image.open', (['img_filename'], {}), '(img_filename)\n', (4899, 4913), False, 'from PIL import Image\n'), ((3565, 3659), 'os.path.join', 'os.path.join', (['root_dir', '"""features"""', "model_attributes[self.model_type]['feature_filename']"], {}), "(root_dir, 'features', model_attributes[self.model_type][\n 'feature_filename'])\n", (3577, 3659), False, 'import os\n')] |
import os
import random
import shutil
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
from os import getcwd
import cv2
from tensorflow.keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, Activation, MaxPooling2D, Flatten, Dense
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
import imutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras import regularizers
print(os.getcwd)
print(len(os.listdir('with_mask')))
print(len(os.listdir('without_mask')))
try:
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm')
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training')
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing')
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withm')
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withoutm')
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withm')
os.mkdir('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withoutm')
except OSError:
pass
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):
dataset = []
for unitData in os.listdir(SOURCE):
data = SOURCE + '/' + unitData
if(os.path.getsize(data) > 0):
dataset.append(unitData)
else:
print('Skipped ' + unitData)
print('Invalid file i.e zero size')
train_set_length = int(len(dataset) * SPLIT_SIZE)
test_set_length = int(len(dataset) - train_set_length)
shuffled_set = random.sample(dataset, len(dataset))
train_set = dataset[0:train_set_length]
test_set = dataset[-test_set_length:]
for unitData in train_set:
temp_train_set = SOURCE + "/" + unitData
final_train_set = TRAINING + "/" + unitData
copyfile(temp_train_set, final_train_set)
for unitData in test_set:
temp_test_set = SOURCE + '/' + unitData
final_test_set = TESTING + '/' + unitData
copyfile(temp_test_set, final_test_set)
with_mask_dir = 'C:/Users/91797/Desktop/Mask-CNN/with_mask'
training_with_mask_dir = 'C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withm'
testing_with_mask_dir = 'C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withm'
without_mask_dir = 'C:/Users/91797/Desktop/Mask-CNN/without_mask'
training_without_mask_dir = 'C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withoutm'
testing_without_mask_dir = 'C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withoutm'
split_size = .8
split_data(with_mask_dir,training_with_mask_dir,testing_with_mask_dir,split_size)
split_data(without_mask_dir,training_without_mask_dir,testing_without_mask_dir,split_size)
print(len(os.listdir(training_with_mask_dir)))
print(len(os.listdir(testing_with_mask_dir)))
print(len(os.listdir(training_without_mask_dir)))
print(len(os.listdir(testing_without_mask_dir)))
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(100, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(100, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(50,activation='relu'),
tf.keras.layers.Dense(2, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
Training_dir = 'C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training'
train_datagen = ImageDataGenerator(rescale=1.0/255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(Training_dir,
batch_size=10,
class_mode='categorical',
target_size=(150,150))
Validation_dir = 'C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing'
validation_datagen = ImageDataGenerator(rescale=1.0/255)
validation_generator = validation_datagen.flow_from_directory(Validation_dir,batch_size=10,class_mode='categorical',target_size=(150,150))
history = model.fit_generator(train_generator,
epochs=20,
verbose=1,
validation_data=validation_generator)
acc=history.history['accuracy']
val_acc=history.history['val_accuracy']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(len(acc))
plt.plot(epochs, acc, 'r', "Training Accuracy")
plt.plot(epochs, val_acc, 'b', "Validation Accuracy")
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'r', "Training Loss")
plt.plot(epochs, val_loss, 'b', "Validation Loss")
plt.title('Training and validation loss')
model.save('mask_trained2.h5')
face_clsfr=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
labels_dict={0:'without_mask',1:'with_mask'}
color_dict={0:(0,0,255),1:(0,255,0)}
size = 4
webcam = cv2.VideoCapture(1) #Use camera 0
# We load the xml file
classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
(rval, im) = webcam.read()
im=cv2.flip(im,1,1) #Flip to act as a mirror
# Resize the image to speed up detection
mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size))
# detect MultiScale / faces
faces = classifier.detectMultiScale(mini)
# Draw rectangles around each face
for f in faces:
(x, y, w, h) = [v * size for v in f] #Scale the shapesize backup
#Save just the rectangle faces in SubRecFaces
face_img = im[y:y+h, x:x+w]
resized=cv2.resize(face_img,(150,150))
normalized=resized/255.0
reshaped=np.reshape(normalized,(1,150,150,3))
reshaped = np.vstack([reshaped])
result=model.predict(reshaped)
#print(result)
label=np.argmax(result,axis=1)[0]
cv2.rectangle(im,(x,y),(x+w,y+h),color_dict[label],2)
cv2.rectangle(im,(x,y-40),(x+w,y),color_dict[label],-1)
cv2.putText(im, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)
# Show the image
cv2.imshow('LIVE', im)
key = cv2.waitKey(10)
# if Esc key is press then break out of the loop
if key == 27: #The Esc key
break
# Stop video
webcam.release()
# Close all started windows
cv2.destroyAllWindows()
| [
"matplotlib.pyplot.title",
"os.mkdir",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"matplotlib.pyplot.figure",
"cv2.rectangle",
"cv2.imshow",
"tensorflow.keras.layers.Flatten",
"numpy.reshape"... | [((3913, 4101), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'rotation_range': '(40)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, rotation_range=40, width_shift_range=\n 0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, fill_mode='nearest')\n", (3931, 4101), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((4735, 4772), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (4753, 4772), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((5291, 5338), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""r"""', '"""Training Accuracy"""'], {}), "(epochs, acc, 'r', 'Training Accuracy')\n", (5299, 5338), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5393), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""b"""', '"""Validation Accuracy"""'], {}), "(epochs, val_acc, 'b', 'Validation Accuracy')\n", (5348, 5393), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5440), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (5404, 5440), True, 'import matplotlib.pyplot as plt\n'), ((5442, 5454), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5452, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5458, 5502), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""r"""', '"""Training Loss"""'], {}), "(epochs, loss, 'r', 'Training Loss')\n", (5466, 5502), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5554), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""b"""', '"""Validation Loss"""'], {}), "(epochs, val_loss, 'b', 'Validation Loss')\n", (5512, 5554), True, 'import matplotlib.pyplot as plt\n'), ((5558, 5599), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (5567, 5599), True, 'import matplotlib.pyplot as plt\n'), ((5647, 5707), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (5668, 5707), False, 'import cv2\n'), ((5818, 5837), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (5834, 5837), False, 'import cv2\n'), ((5892, 5952), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (5913, 5952), False, 'import cv2\n'), ((7269, 7292), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7290, 7292), False, 'import cv2\n'), ((823, 881), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm')\n", (831, 881), False, 'import os\n'), ((887, 954), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training')\n", (895, 954), False, 'import os\n'), ((960, 1026), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing')\n", (968, 1026), False, 'import os\n'), ((1032, 1105), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withm"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withm')\n", (1040, 1105), False, 'import os\n'), ((1111, 1187), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withoutm"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/training/withoutm')\n", (1119, 1187), False, 'import os\n'), ((1193, 1265), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withm"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withm')\n", (1201, 1265), False, 'import os\n'), ((1271, 1346), 'os.mkdir', 'os.mkdir', (['"""C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withoutm"""'], {}), "('C:/Users/91797/Desktop/Mask-CNN/withm-withoutm/testing/withoutm')\n", (1279, 1346), False, 'import os\n'), ((1479, 1497), 'os.listdir', 'os.listdir', (['SOURCE'], {}), '(SOURCE)\n', (1489, 1497), False, 'import os\n'), ((6008, 6026), 'cv2.flip', 'cv2.flip', (['im', '(1)', '(1)'], {}), '(im, 1, 1)\n', (6016, 6026), False, 'import cv2\n'), ((6110, 6168), 'cv2.resize', 'cv2.resize', (['im', '(im.shape[1] // size, im.shape[0] // size)'], {}), '(im, (im.shape[1] // size, im.shape[0] // size))\n', (6120, 6168), False, 'import cv2\n'), ((7051, 7073), 'cv2.imshow', 'cv2.imshow', (['"""LIVE"""', 'im'], {}), "('LIVE', im)\n", (7061, 7073), False, 'import cv2\n'), ((7087, 7102), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (7098, 7102), False, 'import cv2\n'), ((744, 767), 'os.listdir', 'os.listdir', (['"""with_mask"""'], {}), "('with_mask')\n", (754, 767), False, 'import os\n'), ((781, 807), 'os.listdir', 'os.listdir', (['"""without_mask"""'], {}), "('without_mask')\n", (791, 807), False, 'import os\n'), ((2142, 2183), 'shutil.copyfile', 'copyfile', (['temp_train_set', 'final_train_set'], {}), '(temp_train_set, final_train_set)\n', (2150, 2183), False, 'from shutil import copyfile\n'), ((2330, 2369), 'shutil.copyfile', 'copyfile', (['temp_test_set', 'final_test_set'], {}), '(temp_test_set, final_test_set)\n', (2338, 2369), False, 'from shutil import copyfile\n'), ((3089, 3123), 'os.listdir', 'os.listdir', (['training_with_mask_dir'], {}), '(training_with_mask_dir)\n', (3099, 3123), False, 'import os\n'), ((3137, 3170), 'os.listdir', 'os.listdir', (['testing_with_mask_dir'], {}), '(testing_with_mask_dir)\n', (3147, 3170), False, 'import os\n'), ((3184, 3221), 'os.listdir', 'os.listdir', (['training_without_mask_dir'], {}), '(training_without_mask_dir)\n', (3194, 3221), False, 'import os\n'), ((3235, 3271), 'os.listdir', 'os.listdir', (['testing_without_mask_dir'], {}), '(testing_without_mask_dir)\n', (3245, 3271), False, 'import os\n'), ((3319, 3405), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(100)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(150, 150, 3)'}), "(100, (3, 3), activation='relu', input_shape=(150, \n 150, 3))\n", (3341, 3405), True, 'import tensorflow as tf\n'), ((3406, 3440), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3434, 3440), True, 'import tensorflow as tf\n'), ((3452, 3506), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(100)', '(3, 3)'], {'activation': '"""relu"""'}), "(100, (3, 3), activation='relu')\n", (3474, 3506), True, 'import tensorflow as tf\n'), ((3512, 3546), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3540, 3546), True, 'import tensorflow as tf\n'), ((3560, 3585), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (3583, 3585), True, 'import tensorflow as tf\n'), ((3592, 3620), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (3615, 3620), True, 'import tensorflow as tf\n'), ((3627, 3671), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (3648, 3671), True, 'import tensorflow as tf\n'), ((3677, 3723), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (3698, 3723), True, 'import tensorflow as tf\n'), ((6498, 6530), 'cv2.resize', 'cv2.resize', (['face_img', '(150, 150)'], {}), '(face_img, (150, 150))\n', (6508, 6530), False, 'import cv2\n'), ((6581, 6621), 'numpy.reshape', 'np.reshape', (['normalized', '(1, 150, 150, 3)'], {}), '(normalized, (1, 150, 150, 3))\n', (6591, 6621), True, 'import numpy as np\n'), ((6638, 6659), 'numpy.vstack', 'np.vstack', (['[reshaped]'], {}), '([reshaped])\n', (6647, 6659), True, 'import numpy as np\n'), ((6794, 6857), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', 'color_dict[label]', '(2)'], {}), '(im, (x, y), (x + w, y + h), color_dict[label], 2)\n', (6807, 6857), False, 'import cv2\n'), ((6857, 6922), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y - 40)', '(x + w, y)', 'color_dict[label]', '(-1)'], {}), '(im, (x, y - 40), (x + w, y), color_dict[label], -1)\n', (6870, 6922), False, 'import cv2\n'), ((6922, 7026), 'cv2.putText', 'cv2.putText', (['im', 'labels_dict[label]', '(x, y - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(2)'], {}), '(im, labels_dict[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, \n 0.8, (255, 255, 255), 2)\n', (6933, 7026), False, 'import cv2\n'), ((1551, 1572), 'os.path.getsize', 'os.path.getsize', (['data'], {}), '(data)\n', (1566, 1572), False, 'import os\n'), ((6749, 6774), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (6758, 6774), True, 'import numpy as np\n')] |
"""
Electrodes define any type of sources and receivers used in a survey.
"""
# Copyright 2018-2022 The emsig community.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from copy import deepcopy
import numpy as np
from scipy.special import sindg, cosdg
from emg3d import fields, utils
__all__ = [
'Wire', 'Point', 'Dipole', 'Source',
'TxElectricPoint', 'TxMagneticPoint',
'TxElectricDipole', 'TxMagneticDipole',
'TxElectricWire',
'RxElectricPoint', 'RxMagneticPoint',
'rotation', 'point_to_dipole', 'dipole_to_point', 'point_to_square_loop',
]
# BASE ELECTRODE TYPES
class Wire:
"""A wire consists of an arbitrary number of electrodes.
.. note::
Use any of the Tx*/Rx* classes to create sources and receivers, not
this class.
Parameters
----------
coordinates : array_like
Electrode locations of shape (n, 3), where n is the number of
electrodes: ``[[x1, y1, z1], [...], [xn, yn, zn]]``.
"""
# Attributes which are stored/required with to_dict/from_dict.
_serialize = {'coordinates'}
def __init__(self, coordinates):
"""Initiate an electrode."""
# Cast and check dimension and shape.
self._points = np.asarray(np.atleast_2d(coordinates), dtype=float)
if not (self._points.ndim == 2 and self._points.shape[1] == 3):
raise ValueError(
"`coordinates` must be of shape (x, 3), provided: "
f"{coordinates}"
)
def __eq__(self, electrode):
"""Compare two electrodes."""
# Check if same Type.
equal = self.__class__.__name__ == electrode.__class__.__name__
# Check input.
if equal:
for name in self._serialize:
comp = getattr(self, name)
if isinstance(comp, np.ndarray):
equal *= np.allclose(comp, getattr(electrode, name))
else:
equal *= comp == getattr(electrode, name)
return bool(equal)
def __repr__(self):
"""Simple representation."""
s0 = (f"{self.__class__.__name__}: "
f"{self._repr_add if hasattr(self, '_repr_add') else ''}\n")
s1 = (f" center={{{self.center[0]:,.1f}; "
f"{self.center[1]:,.1f}; {self.center[2]:,.1f}}} m; ")
s2 = (f"n={self.segment_n}; l={self.length:,.1f} m")
return s0 + s1 + s2 if len(s1+s2) < 80 else s0 + s1 + "\n " + s2
def copy(self):
"""Return a copy of the Survey."""
return self.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the Electrode in a dict.
Parameters
----------
copy : bool, default: False
If True, returns a deep copy of the dict.
Returns
-------
out : dict
Dictionary containing all information to re-create the Electrode.
"""
out = {
'__class__': self.__class__.__name__,
**{prop: getattr(self, prop) for prop in self._serialize},
}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into its class instance.
Parameters
----------
inp : dict
Dictionary as obtained from the classes' ``to_dict``.
Returns
-------
electrode : {Tx*, Rx*}
A source or receiver instance.
"""
return cls(**{k: v for k, v in inp.items() if k != '__class__'})
@property
def points(self):
"""Electrode locations (n, 3)."""
return self._points
@property
def coordinates(self):
"""Electrode coordinate as accepted by its class."""
if hasattr(self, '_coordinates'):
return self._coordinates
else:
return self._points
@property
def xtype(self):
"""Flag of the type of electrodes.
In reality, all electrodes are electric. But we do idealize some loops
as theoretical "magnetic dipoles" (TxMagneticDipole, RxMagneticPoint).
``xtype`` is a flag for this.
"""
if not hasattr(self, '_xtype'):
if 'Magnetic' in self.__class__.__name__:
self._xtype = 'magnetic'
else: # Default
self._xtype = 'electric'
return self._xtype
@property
def center(self):
"""Center point of all unique electrodes."""
if not hasattr(self, '_center'):
self._center = np.unique(self.points, axis=0).mean(axis=0)
return self._center
@property
def length(self):
"""Total length of all dipole segments formed by the electrodes."""
if not hasattr(self, '_length'):
lengths = np.linalg.norm(np.diff(self.points, axis=0), axis=1)
self._segment_lengths = lengths
self._length = lengths.sum()
return self._length
@property
def segment_lengths(self):
"""Length of each individual dipole segment in the wire."""
if not hasattr(self, '_segment_lengths'):
_ = self.length # Sets length and segment_lengths
return self._segment_lengths
@property
def segment_n(self):
"""Number of dipole segments in the wire."""
return len(self.segment_lengths)
@property
def _prefix(self):
"""Prefix used for collecting Tx/Rx in Surveys."""
name = self.__class__.__name__
return name[:2] + ''.join(c for c in name if c.isupper())[1:]
class Point(Wire):
"""A point electrode is defined by its center, azimuth, and elevation.
A ``Point`` is a special case of a ``Wire`` that consists of only one
electrode, and has therefore no length (infinitesimal small dipole). It is
principally used by receivers to sample the field at a given point.
.. note::
Use any of the Tx*/Rx* classes to create sources and receivers, not
this class.
Parameters
----------
coordinates : array_like
Point defined as (x, y, z, azimuth, elevation)
"""
def __init__(self, coordinates):
"""Initiate an electric point."""
if len(coordinates) != 5:
raise ValueError(
"Point coordinates are wrong defined. They must be "
"defined as (x, y, z, azimuth, elevation)."
f"Provided coordinates: {coordinates}."
)
# Cast and store input coordinates.
self._coordinates = np.asarray(coordinates, dtype=np.float64).squeeze()
# Provide center to `Electrode`.
super().__init__(coordinates[:3])
def __repr__(self):
"""Simple representation."""
s0 = (f"{self.__class__.__name__}: "
f"{self._repr_add if hasattr(self, '_repr_add') else ''}\n")
s1 = (f" x={self.center[0]:,.1f} m, "
f"y={self.center[1]:,.1f} m, z={self.center[2]:,.1f} m, ")
s2 = f"θ={self.azimuth:.1f}°, φ={self.elevation:.1f}°"
return s0 + s1 + s2 if len(s1+s2) < 80 else s0 + s1 + "\n " + s2
@property
def azimuth(self):
"""Anticlockwise rotation (°) from x-axis towards y-axis."""
return self._coordinates[3]
@property
def elevation(self):
"""Anticlockwise (upwards) rotation (°) from the xy-plane."""
return self._coordinates[4]
class Dipole(Wire):
"""A dipole consists of two electrodes in a straight line.
A ``Dipole`` is a special case of a ``Wire`` that consists of exactly two
electrodes. A dipole has therefore an azimuth and an elevation. It
corresponds to one segment in a wire.
.. note::
Use any of the Tx*/Rx* classes to create sources and receivers, not
this class.
Parameters
----------
coordinates : array_like
Dipole coordinates. Three formats are accepted:
- [[x1, y1, z1], [x2, y2, z2]];
- (x1, x2, y1, y2, z1, z2);
- (x, y, z, azimuth, elevation); this format takes also the ``length``
parameter.
length : float, default: 1.0
Length of the dipole (m). This parameter is only used if the provided
coordinates are in the format (x, y, z, azimuth, elevation).
"""
def __init__(self, coordinates, length=1.0):
"""Initiate an electric dipole."""
# Cast coordinates.
coordinates = np.asarray(coordinates, dtype=np.float64).squeeze()
# Check which format was provided.
is_point = coordinates.shape == (5, )
is_flat = coordinates.shape == (6, )
is_dipole = coordinates.shape == (2, 3)
# Store depending on format.
if is_point:
# Add length to attributes which have to be serialized.
self._serialize = {'length'} | self._serialize
# If magnetic, get the loop which area corresponds to length.
if self.xtype == 'magnetic':
points = point_to_square_loop(coordinates, length)
# If electric, get the dipole.
else:
points = point_to_dipole(coordinates, length)
# Store length and original input coordinates.
self._length = length
self._coordinates = coordinates
elif is_flat or is_dipole:
if is_flat:
# Re-arrange for points.
points = np.array([coordinates[::2], coordinates[1::2]])
# Store original input.
self._coordinates = coordinates
else:
# Input is already in the format for Electrode.
points = coordinates
# If magnetic, get the loop which area corresponds to its length.
if self.xtype == 'magnetic':
azimuth, elevation, length = dipole_to_point(points)
center = tuple(np.sum(points, 0)/2)
coo = (*center, azimuth, elevation)
points = point_to_square_loop(coo, length)
# Store original input.
self._coordinates = coordinates
# Ensure the two poles are distinct.
if np.allclose(points[0, :], points[1, :]):
raise ValueError(
"The two electrodes are identical, use the format "
"(x, y, z, azimuth, elevation) instead. "
f"Provided coordinates: {coordinates}."
)
else:
raise ValueError(
"Coordinates are wrong defined. They must be defined either "
"as a point, (x, y, z, azimuth, elevation), or as two points, "
"(x1, x2, y1, y2, z1, z2) or [[x1, y1, z1], [x2, y2, z2]]. "
f"Provided coordinates: {coordinates}."
)
super().__init__(points)
def __repr__(self):
"""Simple representation."""
s0 = (f"{self.__class__.__name__}: "
f"{self._repr_add if hasattr(self, '_repr_add') else ''}\n")
# Point dipole.
if self.coordinates.size == 5:
s1 = (f" center={{{self.center[0]:,.1f}; "
f"{self.center[1]:,.1f}; {self.center[2]:,.1f}}} m; ")
s2 = (f"θ={self.azimuth:.1f}°, φ={self.elevation:.1f}°; "
f"l={self.length:,.1f} m")
# Finite dipole.
else:
s1 = (f" e1={{{self.points[0, 0]:,.1f}; "
f"{self.points[0, 1]:,.1f}; {self.points[0, 2]:,.1f}}} m; ")
s2 = (f"e2={{{self.points[1, 0]:,.1f}; "
f"{self.points[1, 1]:,.1f}; {self.points[1, 2]:,.1f}}} m")
return s0 + s1 + s2 if len(s1+s2) < 80 else s0 + s1 + "\n " + s2
@property
def azimuth(self):
"""Anticlockwise rotation (°) from x-axis towards y-axis."""
if not hasattr(self, '_azimuth'):
if len(self.coordinates) == 5:
out = self._coordinates[3:]
else:
out = dipole_to_point(self._points)[:2]
self._azimuth, self._elevation = out
return self._azimuth
@property
def elevation(self):
"""Anticlockwise (upwards) rotation (°) from the xy-plane."""
if not hasattr(self, '_elevation'):
_ = self.azimuth # Sets azimuth and elevation
return self._elevation
# SOURCES
class Source(Wire):
"""A source adds strength to a Wire instance.
.. note::
Use any of the Tx* classes to create sources, not this class.
Parameters
----------
strength : {float, complex}
Source strength (A).
"""
# Add strength to attributes which have to be serialized.
_serialize = {'strength'} | Wire._serialize
def __init__(self, strength, **kwargs):
"""Initiate an electric source."""
# Store strength, add a repr-addition.
self._strength = strength
self._repr_add = f"{self.strength:,.1f} A;"
super().__init__(**kwargs)
@property
def strength(self):
"""Source strength (A)."""
return self._strength
def get_field(self, grid, frequency):
"""Returns source field for given grid and frequency."""
return fields.get_source_field(grid, self, frequency)
@utils._known_class
class TxElectricPoint(Source, Point):
"""Electric point source.
Parameters
----------
coordinates : array_like
Point coordinates in the format (x, y, z, azimuth, elevation).
strength : float, default: 1.0
Source strength (A).
"""
def __init__(self, coordinates, strength=1.0):
"""Initiate an electric point source."""
super().__init__(coordinates=coordinates, strength=strength)
@utils._known_class
class TxMagneticPoint(Source, Point):
"""Magnetic point source.
.. note::
The magnetic point source is not implemented for magnetic permeability.
Parameters
----------
coordinates : array_like
Point coordinates in the format (x, y, z, azimuth, elevation).
strength : float, default: 1.0
Source strength (A). (Note that the source field is always an electric
field; in this case the electric field due to a magnetic point.)
"""
def __init__(self, coordinates, strength=1.0):
"""Initiate an magnetic point source."""
super().__init__(coordinates=coordinates, strength=strength)
@utils._known_class
class TxElectricDipole(Source, Dipole):
"""Electric dipole source, two electrodes connected by a wire.
Parameters
----------
coordinates : array_like
Dipole coordinates. Three formats are accepted:
- [[x1, y1, z1], [x2, y2, z2]];
- (x1, x2, y1, y2, z1, z2);
- (x, y, z, azimuth, elevation); this format takes also the ``length``
parameter.
strength : float, default: 1.0
Source strength (A).
length : float, default: 1.0
Length of the dipole (m). This parameter is only used if the provided
coordinates are in the format (x, y, z, azimuth, elevation).
"""
def __init__(self, coordinates, strength=1.0, length=1.0):
"""Initiate an electric dipole source."""
super().__init__(
coordinates=coordinates, strength=strength, length=length)
@utils._known_class
class TxMagneticDipole(Source, Dipole):
"""Magnetic dipole source using a square loop perpendicular to the dipole.
The magnetic dipole source simulates a magnetic dipole with an electric
square loop perpendicular and at the center of the dipole. The area of the
loop corresponds to the dipole length, to represent the same strength.
Parameters
----------
coordinates : array_like
Dipole coordinates. Three formats are accepted:
- [[x1, y1, z1], [x2, y2, z2]];
- (x1, x2, y1, y2, z1, z2);
- (x, y, z, azimuth, elevation); this format takes also the ``length``
parameter.
strength : float, default: 1.0
Source strength (A).
length : float, default: 1.0
Length of the dipole (m). This parameter is only used if the provided
coordinates are in the format (x, y, z, azimuth, elevation).
"""
def __init__(self, coordinates, strength=1.0, length=1.0):
"""Initiate a magnetic source."""
super().__init__(
coordinates=coordinates, strength=strength, length=length)
@utils._known_class
class TxElectricWire(Source, Wire):
"""Electric wire source consisting of a series of dipoles.
Parameters
----------
coordinates : array_like
Electrode locations of shape (n, 3), where n is the number of
electrodes: ``[[x1, y1, z1], [...], [xn, yn, zn]]``.
strength : float, default: 1.0
Source strength (A).
"""
def __init__(self, coordinates, strength=1.0):
"""Initiate an electric wire source."""
super().__init__(coordinates=coordinates, strength=strength)
# RECEIVERS
class Receiver(Wire):
"""A receiver can be positioned absolutely or relative to source..
.. note::
Use any of the Rx* classes to create receivers, not this class.
Parameters
----------
relative : bool
If False, the coordinates are absolute coordinates. If True, the
coordinates define the offset from the source center.
Note that ``relative=True`` makes only sense in combination with
sources, such as is the case in a :class:`emg3d.surveys.Survey`.
data_type : str
Data type of the measured responses. Currently implemented is only
``'complex'``.
"""
# Add relative to attributes which have to be serialized.
_serialize = {'relative', 'data_type'} | Wire._serialize
def __init__(self, relative, data_type, **kwargs):
"""Initiate a receiver."""
# Check data type is a known type.
if data_type.lower() != 'complex':
raise ValueError(f"Unknown data type '{data_type}'.")
# Store relative, add a repr-addition.
self._relative = relative
self._data_type = data_type.lower()
self._repr_add = (
f"{['absolute', 'relative'][self.relative]}; {self.data_type};"
)
super().__init__(**kwargs)
@property
def relative(self):
"""True if coordinates are relative to source, False if absolute."""
return self._relative
@property
def data_type(self):
"""Data type of the measured responses."""
return self._data_type
def center_abs(self, source):
"""Returns points as absolute positions."""
if self.relative:
return source.center + self.center
else:
return self.center
def coordinates_abs(self, source):
"""Returns coordinates as absolute positions."""
if not hasattr(self, 'azimuth'):
return self.center_abs(source)
else:
return (*self.center_abs(source), self.azimuth, self.elevation)
@utils._known_class
class RxElectricPoint(Receiver, Point):
"""Electric point receiver (point sampling the field).
Parameters
----------
coordinates : array_like
Point defined as (x, y, z, azimuth, elevation)
relative : bool, default: False
If False, the coordinates are absolute coordinates. If True, the
coordinates define the offset from the source center.
Note that ``relative=True`` makes only sense in combination with
sources, such as is the case in a :class:`emg3d.surveys.Survey`.
data_type : str, default: 'complex'
Data type of the measured responses. Currently implemented is only the
default value.
"""
_adjoint_source = TxElectricPoint
def __init__(self, coordinates, relative=False, data_type='complex'):
"""Initiate an electric point receiver."""
super().__init__(
coordinates=coordinates, relative=relative, data_type=data_type
)
@utils._known_class
class RxMagneticPoint(Receiver, Point):
"""Magnetic point receiver (point sampling the field).
Parameters
----------
coordinates : array_like
Point defined as (x, y, z, azimuth, elevation)
relative : bool, default: False
If False, the coordinates are absolute coordinates. If True, the
coordinates define the offset from the source center.
Note that ``relative=True`` makes only sense in combination with
sources, such as is the case in a :class:`emg3d.surveys.Survey`.
data_type : str, default: 'complex'
Data type of the measured responses. Currently implemented is only the
default value.
"""
_adjoint_source = TxMagneticPoint
def __init__(self, coordinates, relative=False, data_type='complex'):
"""Initiate a magnetic point receiver."""
super().__init__(
coordinates=coordinates, relative=relative, data_type=data_type
)
# ROTATIONS AND CONVERSIONS
def point_to_dipole(point, length, deg=True):
"""Return coordinates of dipole points defined by center, angles, length.
Spherical to Cartesian.
Parameters
----------
point : tuple
Point coordinates in the form of (x, y, z, azimuth, elevation).
length : float
Dipole length (m).
deg : bool, default: True
Angles are in degrees if True, radians if False.
Returns
-------
dipole : ndarray
Coordinates of shape (2, 3): [[x1, y1, z1], [x2, y2, z2]].
"""
# Get coordinates relative to centrum.
xyz = rotation(point[3], point[4], deg=deg)*length/2
# Add half a dipole on both sides of the center.
return point[:3] + np.array([-xyz, xyz])
def dipole_to_point(dipole, deg=True):
"""Return azimuth and elevation for given electrode pair.
Cartesian to spherical.
Parameters
----------
dipole : ndarray
Dipole coordinates of shape (2, 3): [[x1, y1, z1], [x2, y2, z2]].
deg : bool, default: True
Return angles in degrees if True, radians if False.
Returns
-------
azimuth : float
Anticlockwise angle from x-axis towards y-axis.
elevation : float
Anticlockwise (upwards) angle from the xy-plane towards z-axis.
length : float
Dipole length (m).
"""
# Get distances between coordinates.
dx, dy, dz = np.diff(dipole.T).squeeze()
length = np.linalg.norm([dx, dy, dz])
# Get angles from complex planes.
azimuth = np.angle(dx + 1j*dy, deg=deg) # same as: np.arctan2(dy, dx)
elevation = np.angle(np.sqrt(dx**2+dy**2) + 1j*dz, deg=deg) # (dz, dxy)
return azimuth, elevation, length
def point_to_square_loop(source, area):
"""Return points of a loop of area perpendicular to source dipole.
Parameters
----------
source : tuple
Source dipole coordinates in the form of (x, y, z, azimuth, elevation).
area : float
Area of the square loop (m^2).
Returns
-------
out : ndarray
Array of shape (5, 3), corresponding to the x/y/z-coordinates for the
five points describing a closed rectangle perpendicular to the dipole,
of side-length length.
"""
half_diag = np.sqrt(area/2)
xyz_hor = rotation(source[3]+90.0, 0.0)*half_diag
xyz_ver = rotation(source[3], source[4]+90.0)*half_diag
points = source[:3] + np.stack(
[xyz_hor, xyz_ver, -xyz_hor, -xyz_ver, xyz_hor])
return points
def rotation(azimuth, elevation, deg=True):
"""Rotation factors for RHS coordinate system with positive z upwards.
The rotation factors multiplied with the length yield the corresponding
Cartesian coordinates. (The rotation factors correspond to the rotation of
a unit radius of length 1.)
Definition of spherical coordinates:
- azimuth θ: anticlockwise from x-axis towards y-axis, (-180°, +180°].
- elevation φ: anticlockwise (upwards) from the xy-plane towards z-axis
[-90°, +90°].
- radius (m).
Definition of Cartesian coordinates:
- x is Easting;
- y is Northing;
- z is positive upwards (RHS).
Parameters
----------
azimuth : float
Anticlockwise angle from x-axis towards y-axis.
elevation : float
Anticlockwise (upwards) angle from the xy-plane towards z-axis.
deg : bool, default: True
Angles are in degrees if True, radians if False.
Returns
-------
rot : ndarray
Rotation factors (x, y, z).
"""
if deg:
cos, sin = cosdg, sindg
else:
cos, sin = np.cos, np.sin
return np.array([cos(azimuth)*cos(elevation),
sin(azimuth)*cos(elevation),
sin(elevation)])
| [
"numpy.stack",
"numpy.atleast_2d",
"copy.deepcopy",
"numpy.sum",
"numpy.angle",
"numpy.asarray",
"numpy.allclose",
"emg3d.fields.get_source_field",
"numpy.diff",
"numpy.array",
"numpy.linalg.norm",
"numpy.unique",
"numpy.sqrt"
] | [((23108, 23136), 'numpy.linalg.norm', 'np.linalg.norm', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (23122, 23136), True, 'import numpy as np\n'), ((23190, 23223), 'numpy.angle', 'np.angle', (['(dx + 1.0j * dy)'], {'deg': 'deg'}), '(dx + 1.0j * dy, deg=deg)\n', (23198, 23223), True, 'import numpy as np\n'), ((23926, 23943), 'numpy.sqrt', 'np.sqrt', (['(area / 2)'], {}), '(area / 2)\n', (23933, 23943), True, 'import numpy as np\n'), ((13850, 13896), 'emg3d.fields.get_source_field', 'fields.get_source_field', (['grid', 'self', 'frequency'], {}), '(grid, self, frequency)\n', (13873, 13896), False, 'from emg3d import fields, utils\n'), ((22384, 22405), 'numpy.array', 'np.array', (['[-xyz, xyz]'], {}), '([-xyz, xyz])\n', (22392, 22405), True, 'import numpy as np\n'), ((24082, 24139), 'numpy.stack', 'np.stack', (['[xyz_hor, xyz_ver, -xyz_hor, -xyz_ver, xyz_hor]'], {}), '([xyz_hor, xyz_ver, -xyz_hor, -xyz_ver, xyz_hor])\n', (24090, 24139), True, 'import numpy as np\n'), ((1773, 1799), 'numpy.atleast_2d', 'np.atleast_2d', (['coordinates'], {}), '(coordinates)\n', (1786, 1799), True, 'import numpy as np\n'), ((3685, 3698), 'copy.deepcopy', 'deepcopy', (['out'], {}), '(out)\n', (3693, 3698), False, 'from copy import deepcopy\n'), ((23067, 23084), 'numpy.diff', 'np.diff', (['dipole.T'], {}), '(dipole.T)\n', (23074, 23084), True, 'import numpy as np\n'), ((23277, 23303), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (23284, 23303), True, 'import numpy as np\n'), ((5432, 5460), 'numpy.diff', 'np.diff', (['self.points'], {'axis': '(0)'}), '(self.points, axis=0)\n', (5439, 5460), True, 'import numpy as np\n'), ((7163, 7204), 'numpy.asarray', 'np.asarray', (['coordinates'], {'dtype': 'np.float64'}), '(coordinates, dtype=np.float64)\n', (7173, 7204), True, 'import numpy as np\n'), ((9048, 9089), 'numpy.asarray', 'np.asarray', (['coordinates'], {'dtype': 'np.float64'}), '(coordinates, dtype=np.float64)\n', (9058, 9089), True, 'import numpy as np\n'), ((10805, 10844), 'numpy.allclose', 'np.allclose', (['points[0, :]', 'points[1, :]'], {}), '(points[0, :], points[1, :])\n', (10816, 10844), True, 'import numpy as np\n'), ((5169, 5199), 'numpy.unique', 'np.unique', (['self.points'], {'axis': '(0)'}), '(self.points, axis=0)\n', (5178, 5199), True, 'import numpy as np\n'), ((10042, 10089), 'numpy.array', 'np.array', (['[coordinates[::2], coordinates[1::2]]'], {}), '([coordinates[::2], coordinates[1::2]])\n', (10050, 10089), True, 'import numpy as np\n'), ((10519, 10536), 'numpy.sum', 'np.sum', (['points', '(0)'], {}), '(points, 0)\n', (10525, 10536), True, 'import numpy as np\n')] |
import os
import numpy as np
from spellbook.ml.learn import stack_arrays
def test_stack_arrays():
data1 = np.random.random((3, 2))
data2 = np.zeros((3, 2))
data3 = np.ones((3, 2))
np.savez("temp.npz", F1=data1, F2=data2, F3=data3)
loaded = np.load("temp.npz")
os.remove("temp.npz")
feature_names = "F1,F2,F3"
result = stack_arrays(loaded, feature_names)
assert result.shape == (2, 9)
| [
"numpy.load",
"os.remove",
"numpy.zeros",
"numpy.ones",
"numpy.random.random",
"spellbook.ml.learn.stack_arrays",
"numpy.savez"
] | [((114, 138), 'numpy.random.random', 'np.random.random', (['(3, 2)'], {}), '((3, 2))\n', (130, 138), True, 'import numpy as np\n'), ((151, 167), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (159, 167), True, 'import numpy as np\n'), ((180, 195), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (187, 195), True, 'import numpy as np\n'), ((200, 250), 'numpy.savez', 'np.savez', (['"""temp.npz"""'], {'F1': 'data1', 'F2': 'data2', 'F3': 'data3'}), "('temp.npz', F1=data1, F2=data2, F3=data3)\n", (208, 250), True, 'import numpy as np\n'), ((264, 283), 'numpy.load', 'np.load', (['"""temp.npz"""'], {}), "('temp.npz')\n", (271, 283), True, 'import numpy as np\n'), ((288, 309), 'os.remove', 'os.remove', (['"""temp.npz"""'], {}), "('temp.npz')\n", (297, 309), False, 'import os\n'), ((354, 389), 'spellbook.ml.learn.stack_arrays', 'stack_arrays', (['loaded', 'feature_names'], {}), '(loaded, feature_names)\n', (366, 389), False, 'from spellbook.ml.learn import stack_arrays\n')] |
from tensorflow import reduce_sum, concat, reduce_max
from tensorflow.keras import Model
from tensorflow.keras.layers import Layer
from tensorflow.keras.activations import deserialize
from numpy import newaxis,prod
class L_module(Layer):
def __init__(self, n_L, out_dim = None, hidden_units = [], activation = 'linear', **kws):
super(L_module, self).__init__(**kws)
self.params = dict(n_L = n_L, activation = activation, hidden_units = hidden_units)
self.out_dim = out_dim
def build(self, input_shape):
# print(input_shape)
d = prod(input_shape[1:])
space_dim, feature_dim = input_shape[-2:]
out_dim = self.out_dim or space_dim #int(space_dim/self.stride)
n_L = self.params['n_L']
hidden_units = self.params['hidden_units']
self.hidden_layers = []
in_dim = space_dim
for i,u in enumerate(hidden_units):
self.hidden_layers += [self.add_weight(shape=(n_L, u, in_dim),
initializer='glorot_normal', trainable=True, name='Lh_%d' %i)]
in_dim = u
self.L = self.add_weight(shape=(n_L, out_dim, in_dim), initializer='glorot_normal', trainable=True, name='L')
def call(self, inputs):
n_L = self.params['n_L']
x = inputs
for l in self.hidden_layers:
x = l @ x
x = self.L @ x
act = deserialize(self.params['activation'])
return act(x)
class L_Conv(Model):
def __init__(self,
num_filters: int,
kernel_size: int,
#stride: int = 1,
activation = 'relu',
L_hid =[], L_act = 'linear',
):
"""Assumes channel last input x: (batch, space, features).
Uses stride to to scale space dimension: out_dim = int(space/stride).
call: L @ (x @ W + b)
"""
super(L_Conv, self).__init__()
self.num_filters = num_filters
self.kernel_size = kernel_size
self.stride = 1 # stride
self.activation = activation
self.L_params = dict(n_L = kernel_size-1, hidden_units = L_hid, activation = L_act)
def get_L(self, input_shape):
# assume channel last
space_dim, feature_dim = input_shape[-2:]
out_dim = int(space_dim/self.stride)
# num_L = kernel-1 b/c original input will be concat
# L = self.add_weight(shape=(self.kernel_size - 1, out_dim, space_dim),
# initializer='glorot_normal',
# trainable=True, name='L')
L = L_module(out_dim = out_dim, **self.L_params)
return L
def build(self, input_shape):
self.L = self.get_L(input_shape)
self.w = self.add_weight(shape=(self.kernel_size, input_shape[-1], self.num_filters),
initializer='glorot_normal',
trainable=True, name = 'w')
self.b = self.add_weight(shape=(self.kernel_size,1, self.num_filters),
initializer='zeros',
trainable=True, name = 'b')
self.activation_layer = deserialize(self.activation)
def call(self, inputs):
x0 = inputs[:,newaxis]
# (batch, space, features) --> (batch, 1, space, features)
#x = self.L @ x0
x = self.L(x0)
# (batch, 1, space, features) --> (batch, kernel_size, space/stride, features)
x = concat([x, x0], axis = 1) # add back the original
x = x @ self.w + self.b
# (batch, kernel_size, space, features) --> (batch, kernel_size, space, num_filters)
x = reduce_sum(x, axis = 1)
return self.activation_layer(x)
class L_Conv_max(L_Conv):
def __init__(self, stride: int = 1, **kws):
"""Does max_i(L_i L_j x)
"""
super(L_Conv_max, self).__init__(**kws)
def call(self, inputs):
x0 = inputs[:,newaxis]
#print(x0.shape)
# (batch, space, features) --> (batch, 1, space, features)
#x = self.L @ x0
x = self.L(x0)
# (batch, 1, space, features) --> (batch, kernel_size, space, features)
#print(x.shape)
x = concat([x, x0], axis = 1) # add back the original
#print(x.shape)
# apply L again
x1 = self.L(x[:,:,newaxis])
# (batch, kernel_size, space, features) --> (batch, kernel_size, kernel_size, space, features)
#print(x1.shape, x.shape)
x = concat([x1, x[:,:,newaxis]], axis = 2) # add back the original
x = x @ self.w + self.b
# (batch, kernel_size, kernel_size, space, features) --> (batch, kernel_size, kernel_size, space, num_filters)
x = reduce_sum(x, axis = 1)
# (batch, kernel_size, space, features)
# max pooling
x = reduce_max(x, axis = 1)
# (batch, space, features)
return self.activation_layer(x)
class L_Conv_strided(L_Conv):
def __init__(self, stride: int = 1, **kws):
"""Assumes channel last input x: (batch, space, features).
Uses stride to to scale space dimension: out_dim = int(space/stride).
call: L @ (x @ W + b)
"""
super(L_Conv_strided, self).__init__(**kws)
self.stride = stride
self.L_params['n_L'] += 1 # to make up for lack of residual conn.
def call(self, inputs):
x0 = inputs[:,newaxis]
# (batch, space, features) --> (batch, 1, space, features)
#x = self.L @ x0
x = self.L(x0)
# (batch, 1, space, features) --> (batch, kernel_size, space/stride, features)
# x = tf.concat([x, x0], axis = 1) # add back the original
x = x @ self.w + self.b
# (batch, kernel_size, space, features) --> (batch, kernel_size, space, num_filters)
x = reduce_sum(x, axis = 1)
return self.activation_layer(x) | [
"tensorflow.reduce_sum",
"tensorflow.concat",
"numpy.prod",
"tensorflow.reduce_max",
"tensorflow.keras.activations.deserialize"
] | [((590, 611), 'numpy.prod', 'prod', (['input_shape[1:]'], {}), '(input_shape[1:])\n', (594, 611), False, 'from numpy import newaxis, prod\n'), ((1472, 1510), 'tensorflow.keras.activations.deserialize', 'deserialize', (["self.params['activation']"], {}), "(self.params['activation'])\n", (1483, 1510), False, 'from tensorflow.keras.activations import deserialize\n'), ((3286, 3314), 'tensorflow.keras.activations.deserialize', 'deserialize', (['self.activation'], {}), '(self.activation)\n', (3297, 3314), False, 'from tensorflow.keras.activations import deserialize\n'), ((3593, 3616), 'tensorflow.concat', 'concat', (['[x, x0]'], {'axis': '(1)'}), '([x, x0], axis=1)\n', (3599, 3616), False, 'from tensorflow import reduce_sum, concat, reduce_max\n'), ((3784, 3805), 'tensorflow.reduce_sum', 'reduce_sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (3794, 3805), False, 'from tensorflow import reduce_sum, concat, reduce_max\n'), ((4361, 4384), 'tensorflow.concat', 'concat', (['[x, x0]'], {'axis': '(1)'}), '([x, x0], axis=1)\n', (4367, 4384), False, 'from tensorflow import reduce_sum, concat, reduce_max\n'), ((4666, 4704), 'tensorflow.concat', 'concat', (['[x1, x[:, :, newaxis]]'], {'axis': '(2)'}), '([x1, x[:, :, newaxis]], axis=2)\n', (4672, 4704), False, 'from tensorflow import reduce_sum, concat, reduce_max\n'), ((4912, 4933), 'tensorflow.reduce_sum', 'reduce_sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (4922, 4933), False, 'from tensorflow import reduce_sum, concat, reduce_max\n'), ((5027, 5048), 'tensorflow.reduce_max', 'reduce_max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (5037, 5048), False, 'from tensorflow import reduce_sum, concat, reduce_max\n'), ((6033, 6054), 'tensorflow.reduce_sum', 'reduce_sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (6043, 6054), False, 'from tensorflow import reduce_sum, concat, reduce_max\n')] |
# Following is the 2-D array. Print max from axis 0 and min from axis 1
# My Solution
import numpy as np
sampleArray = np.array([[34, 43, 73], [82, 22, 12], [53, 94, 66]])
print("Printing Original array")
print(sampleArray)
print("\nPrinting amin of Axis 1")
print(np.min(sampleArray, axis=1))
print("\nPrinting amax of Axis 0")
print(np.max(sampleArray, axis=0))
| [
"numpy.min",
"numpy.max",
"numpy.array"
] | [((121, 173), 'numpy.array', 'np.array', (['[[34, 43, 73], [82, 22, 12], [53, 94, 66]]'], {}), '([[34, 43, 73], [82, 22, 12], [53, 94, 66]])\n', (129, 173), True, 'import numpy as np\n'), ((269, 296), 'numpy.min', 'np.min', (['sampleArray'], {'axis': '(1)'}), '(sampleArray, axis=1)\n', (275, 296), True, 'import numpy as np\n'), ((340, 367), 'numpy.max', 'np.max', (['sampleArray'], {'axis': '(0)'}), '(sampleArray, axis=0)\n', (346, 367), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import h5py
import random
class BSD500(Dataset):
def __init__(self, data_dir, noise):
super(Dataset, self).__init__()
self.data_dir = data_dir
self.noise = noise
data_file = os.listdir(self.data_dir)
self.data_path = os.path.join(self.data_dir, data_file[0])
h5f = h5py.File(self.data_path, 'r')
self.keys = list(h5f.keys())
random.shuffle(self.keys)
h5f.close()
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
h5f = h5py.File(self.data_path, 'r')
key = self.keys[index]
data = torch.Tensor(np.array(h5f[key]))
noise = torch.FloatTensor(data.size()).normal_(mean=0, std=self.noise / 255.)
noisy_data = data + noise
h5f.close()
return noisy_data, data
| [
"h5py.File",
"random.shuffle",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((335, 360), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (345, 360), False, 'import os\n'), ((386, 427), 'os.path.join', 'os.path.join', (['self.data_dir', 'data_file[0]'], {}), '(self.data_dir, data_file[0])\n', (398, 427), False, 'import os\n'), ((442, 472), 'h5py.File', 'h5py.File', (['self.data_path', '"""r"""'], {}), "(self.data_path, 'r')\n", (451, 472), False, 'import h5py\n'), ((518, 543), 'random.shuffle', 'random.shuffle', (['self.keys'], {}), '(self.keys)\n', (532, 543), False, 'import random\n'), ((667, 697), 'h5py.File', 'h5py.File', (['self.data_path', '"""r"""'], {}), "(self.data_path, 'r')\n", (676, 697), False, 'import h5py\n'), ((757, 775), 'numpy.array', 'np.array', (['h5f[key]'], {}), '(h5f[key])\n', (765, 775), True, 'import numpy as np\n')] |
from typing import Callable, Iterable, Sized
from itertools import product
import numpy as np
def convert_tuple_to_array(elements: Iterable, **kw) -> np.ndarray:
if "dtype" in kw:
dtype = kw["dtype"]
else:
dtype = np.result_type(*elements)
return np.array(elements, dtype=dtype)
def cartesian_product(*arrays: Sized, aggregator: Callable, **kw) -> np.ndarray:
"""
Computes transformations of cartesian product of all the elements in arrays.
Parameters
----------
arrays: iterable of Sized
The arrays to product.
aggregator:
Callable to handle an item from product iterator.
May return scalar or numpy ndarray.
Returns
-------
ret:
Array with dimension of arrays and one more dimension
for their cartesian product.
"""
res = np.stack([aggregator(x, **kw) for x in product(*arrays)])
shape = tuple(map(len, arrays))
if 1 < len(res.shape):
shape = shape + res.shape[1:]
return res.reshape(shape)
| [
"numpy.result_type",
"numpy.array",
"itertools.product"
] | [((279, 310), 'numpy.array', 'np.array', (['elements'], {'dtype': 'dtype'}), '(elements, dtype=dtype)\n', (287, 310), True, 'import numpy as np\n'), ((242, 267), 'numpy.result_type', 'np.result_type', (['*elements'], {}), '(*elements)\n', (256, 267), True, 'import numpy as np\n'), ((885, 901), 'itertools.product', 'product', (['*arrays'], {}), '(*arrays)\n', (892, 901), False, 'from itertools import product\n')] |
# 问题一:处理检验数据
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def read_xlsx(path):
df=pd.read_excel(path,sheet_name='Sheet1',header=0)
return df
def read_csv(path):
res = pd.read_csv(path,delimiter=',')
return res
def Bessel(v):
sum=np.sum(v**2)
return np.sqrt(sum/(len(v)-1))
def less_err(x):
mean_x=[np.mean(x) for i in x ]
v=np.array(x)-np.array(mean_x)
return np.abs(v)
def del_index(df,col):
index=[]
for c in col:
v=less_err(df[c].values)
delta=Bessel(v)
d_list=np.array([delta for i in v])
less=v-3*d_list
res=np.where(less>0,1,0)
tmp=[i for i,x in list(enumerate(res)) if x==1]
index.append(tmp)
return index
def draw(l,c):
plt.plot(range(len(l.values)),l.values, '*:r', lw=3,label=c)
# plt.plot(l.values)
plt.bar(range(len(l.values)),l.values)
plt.xlabel('Time')
# plt.ylabel('')
plt.legend()
plt.show()
#
def change(l):
for i,e in enumerate(l):
if e=='':
l[i+1]='-'+l[i+1]
for e in l:
if e=='':
l.remove(e)
return l
# 判断每一列数据是否在范围内,不在则置为nan
def range_compare(raw_data):
col_range = read_xlsx('./data/raw_data/col_range.xlsx')
for k,v in zip(col_range['col'].values,col_range['range'].values):
raw=v.split('-')
if len(raw)>2:
raw= change(raw)
res=list(map(lambda x:float(x),raw))
raw_data[k] = np.where(raw_data[k]<res[0] ,np.nan,raw_data[k])
raw_data[k] = np.where(raw_data[k]>res[1],np.nan,raw_data[k])
raw_data.to_csv('./data2/325_range_compare.csv', header=True,index=False)
# 删除有nan值的行,并进行3delta检验
def delta3(raw_data,del_col):
for c in del_col:
raw_data=raw_data.drop(c,axis=1)
# print(raw_data.isna().sum(axis=1))
raw_data=raw_data.dropna(axis=0, how='any')
print(raw_data)
for c in del_col:
col.remove(c)
index=del_index(raw_data,col)
print(raw_data)
print(index)
mean_res=[]
for i,c in enumerate(col):
if len(index[i])!=0:
raw_data[c].drop(index[i],axis=0)
mean_res.append(np.mean(raw_data[c].values))
print(dict(zip(col,mean_res)))
pd.Series(dict(zip(col,mean_res))).to_csv('./313mean.csv',header=False)
if __name__ == '__main__':
col = list(read_csv('./data/raw_data/column.csv'))
raw_data=read_csv('./data2/325_range_compare.csv')
# nan_num=raw_data.isna().sum(axis=0)
# print(nan_num[nan_num>0])
del_col = ['S-ZORB.AT_5201.PV', 'S-ZORB.PDC_2502.PV', 'S-ZORB.SIS_LT_1001.PV', 'S-ZORB.AI_2903.PV',
'S-ZORB.FT_1204.TOTAL']
raw_data=range_compare(raw_data)
delta3(raw_data, del_col)
# end
# python
| [
"numpy.abs",
"numpy.sum",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"pandas.read_excel",
"numpy.mean",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.xlabel"
] | [((113, 163), 'pandas.read_excel', 'pd.read_excel', (['path'], {'sheet_name': '"""Sheet1"""', 'header': '(0)'}), "(path, sheet_name='Sheet1', header=0)\n", (126, 163), True, 'import pandas as pd\n'), ((207, 239), 'pandas.read_csv', 'pd.read_csv', (['path'], {'delimiter': '""","""'}), "(path, delimiter=',')\n", (218, 239), True, 'import pandas as pd\n'), ((278, 292), 'numpy.sum', 'np.sum', (['(v ** 2)'], {}), '(v ** 2)\n', (284, 292), True, 'import numpy as np\n'), ((426, 435), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (432, 435), True, 'import numpy as np\n'), ((901, 919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (911, 919), True, 'import matplotlib.pyplot as plt\n'), ((945, 957), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (955, 957), True, 'import matplotlib.pyplot as plt\n'), ((962, 972), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (970, 972), True, 'import matplotlib.pyplot as plt\n'), ((356, 366), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (363, 366), True, 'import numpy as np\n'), ((386, 397), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (394, 397), True, 'import numpy as np\n'), ((398, 414), 'numpy.array', 'np.array', (['mean_x'], {}), '(mean_x)\n', (406, 414), True, 'import numpy as np\n'), ((563, 591), 'numpy.array', 'np.array', (['[delta for i in v]'], {}), '([delta for i in v])\n', (571, 591), True, 'import numpy as np\n'), ((628, 652), 'numpy.where', 'np.where', (['(less > 0)', '(1)', '(0)'], {}), '(less > 0, 1, 0)\n', (636, 652), True, 'import numpy as np\n'), ((1466, 1517), 'numpy.where', 'np.where', (['(raw_data[k] < res[0])', 'np.nan', 'raw_data[k]'], {}), '(raw_data[k] < res[0], np.nan, raw_data[k])\n', (1474, 1517), True, 'import numpy as np\n'), ((1537, 1588), 'numpy.where', 'np.where', (['(raw_data[k] > res[1])', 'np.nan', 'raw_data[k]'], {}), '(raw_data[k] > res[1], np.nan, raw_data[k])\n', (1545, 1588), True, 'import numpy as np\n'), ((2151, 2178), 'numpy.mean', 'np.mean', (['raw_data[c].values'], {}), '(raw_data[c].values)\n', (2158, 2178), True, 'import numpy as np\n')] |
from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace
from scipy import interpolate
import matplotlib.pyplot as plt
# Cross-section class that stores x, y points for the cross-section
# and calculates various geometry data
d = 1000
class CrossSection:
"""
Class that contains geometry of and measurements of a cross-section.
Parameters
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
Attributes
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
xm : coordinates x[i-1]
xp : coordinates x[i+1]
ym : coordinates y[i-1]
yp : coordinates y[i+1]
A : cross-sectional area
P : cross-section perimeter
l : distance between x[i-1], y[i-1] and x, y
r_l : distance between x, y and reference point
umx : x-coordinate of reference point
umy : y-coordinate of reference point
"""
# Number of points that define the cross-section
def __init__(self, x, y):
self.x = x
self.y = y
self.roll()
# Sets the point of maximum velocity
def setUMPoint(self, umx, umy):
"""
Sets umx, umy.
Parameters
----------
umx : x-coordinate of reference point
umy : y-coordinate of reference point
"""
self.umx = umx
self.umy = umy
# Create arrays of x+1, y+1, x-1, x+1
def roll(self):
"""Creates xm, xp, ym, yp.
"""
self.xm = roll(self.x, 1)
self.ym = roll(self.y, 1)
self.xp = roll(self.x, self.x.size-1)
self.yp = roll(self.y, self.y.size-1)
# Calculate perimeter and area
def calcShapeParams(self):
self.genL()
self.calcA()
# l stores difference between each perimeter point
# pp is the length along perimeter to a point
# pp[-2] is the channel perimeter
def genL(self):
"""
Creates l and P.
"""
self.l = hypot(self.x - self.xp, self.y - self.yp)
self.pp = cumsum(self.l)
self.P = self.pp[-2]
# Calculates area of the cross-section
def calcA(self):
"""
Creates A.
"""
self.sA = (self.xm*self.y - self.x*self.ym).sum() * 0.5
self.A = fabs(self.sA)
# Generate lengths from maximum velocity point to perimeter points
def genRL(self):
"""
Creates r_l.
"""
self.r_l = hypot(self.x-self.umx, self.y-self.umy)
# Find left and right points defining a height above the cross-section
# bottom
def findLR(self, h):
"""
Finds left and right index given a height above the
lowest point in the cross-section.
Parameters
----------
h : height above the floor
Returns
-------
L : left index of x, y coordinate h above the floor
R : right index of x, y coordinate h above the floor
"""
ymin = self.y.min()
a_h = ymin + h
condL = logical_and(self.y > a_h, a_h > self.yp)
condR = logical_and(self.y < a_h, a_h < self.yp)
L = where(condL)[0][0] + 1
R = where(condR)[0][0]
return L,R
# Find centroid, maximum velocity position in phreatic cases
def findCentroid(self):
"""
Calculates centroid of the cross-section.
Returns
-------
cx : x-coordinate of centroid
cy : y-coordinate of centroid
"""
m = self.xm*self.y-self.x*self.ym
cx = (1/(6*self.sA))*((self.x + self.xm)*m).sum()
cy = (1/(6*self.sA))*((self.y + self.ym)*m).sum()
return cx, cy
# Redraw some length rl away normal to the perimeter
# It may be advantageous for stability to resample using a spline fit
# Setting dl sets the number of points defining the cross-section
## after resampling.
def redraw(self, rl, resample=False, dl=d):
"""
Regenerate cross-section perpendicular to current
given a distance for each x,y point.
Parameters
----------
rl : array of distances to move x, y points
resample : [bool] option to resample points equidistantly along
perimeter (optional)
dl : number of points in resampled cross-section (optional)
"""
alpha = arctan2(self.xp-self.xm, self.yp-self.ym)
nx = self.x + sign(self.x)*rl*cos(alpha)
ny = self.y - sign(self.x)*rl*sin(alpha)
# Check if we drew inside or outside..
c = ccw(self.x, self.y, self.xm, self.ym, nx, ny)
nx[c] = (self.x - sign(self.x)*rl*cos(alpha))[c]
ny[c] = (self.y + sign(self.x)*rl*sin(alpha))[c]
#Resample points by fitting spline
if resample:
tck, u = interpolate.splprep([nx, ny], u=None, k=1, s=0.0)
un = linspace(u.min(), u.max(), dl if dl!=nx.size else nx.size)
nx, ny = interpolate.splev(un, tck, der=0)
# New coordinates
y_roll = ny.size - ny.argmax()
nx = roll(nx, y_roll)
ny = roll(ny, y_roll)
self.x = nx
self.y = ny
self.roll()
# Counter clockwise function to determine if we drew points in the correct
# direction
def ccw(x, y, xm, ym, nx, ny):
"""
Determines if redrawn points are counter clockwise in cross-section
Parameters
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
xm : x[i-1]
ym : y[i-1]
nx : new x-coordinate
ny : new y-coordinate
Returns
-------
ccw : Array of bools indicating which new points are counter clockwise
"""
return (x - xm) * (ny - ym) > (y - ym) * (nx - xm)
# Calculate length of curve defined by points
def calcL(x,y):
"""
Calculates length of a curve given x,y points
Parameters
----------
x : x-coordinates of points
y : y-coordinates of points
Returns
-------
length : length of curve
"""
sub_lengths = hypot(x[1:] - x[:-1], y[1:] - y[:-1])
sub_sums = cumsum(sub_lengths)
length = sub_sums[-1]
return length
def calcArea(x, y, l=0, r=0):
"""
Calculates area of a polygon given x,y points
Parameters
----------
x : x-coordinates of points defining polygon
y : y-coordinates of points defining polygon
l : left index of subset of points (optional)
r : right index of subset of points (optional)
Returns
-------
A - area of polygon
"""
if l and r:
sA = (roll(x[l:r],1)*y[l:r] - x[l:r]*roll(y[l:r],1)).sum()
else:
sA = (roll(x,1)*y - x*roll(y,1)).sum()
return fabs(0.5 * sA)
| [
"numpy.arctan2",
"numpy.logical_and",
"numpy.roll",
"scipy.interpolate.splprep",
"numpy.hypot",
"numpy.cumsum",
"numpy.fabs",
"numpy.where",
"numpy.sin",
"numpy.cos",
"numpy.sign",
"scipy.interpolate.splev"
] | [((5284, 5321), 'numpy.hypot', 'hypot', (['(x[1:] - x[:-1])', '(y[1:] - y[:-1])'], {}), '(x[1:] - x[:-1], y[1:] - y[:-1])\n', (5289, 5321), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((5334, 5353), 'numpy.cumsum', 'cumsum', (['sub_lengths'], {}), '(sub_lengths)\n', (5340, 5353), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((5862, 5876), 'numpy.fabs', 'fabs', (['(0.5 * sA)'], {}), '(0.5 * sA)\n', (5866, 5876), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((1382, 1397), 'numpy.roll', 'roll', (['self.x', '(1)'], {}), '(self.x, 1)\n', (1386, 1397), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((1410, 1425), 'numpy.roll', 'roll', (['self.y', '(1)'], {}), '(self.y, 1)\n', (1414, 1425), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((1438, 1467), 'numpy.roll', 'roll', (['self.x', '(self.x.size - 1)'], {}), '(self.x, self.x.size - 1)\n', (1442, 1467), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((1478, 1507), 'numpy.roll', 'roll', (['self.y', '(self.y.size - 1)'], {}), '(self.y, self.y.size - 1)\n', (1482, 1507), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((1792, 1833), 'numpy.hypot', 'hypot', (['(self.x - self.xp)', '(self.y - self.yp)'], {}), '(self.x - self.xp, self.y - self.yp)\n', (1797, 1833), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((1846, 1860), 'numpy.cumsum', 'cumsum', (['self.l'], {}), '(self.l)\n', (1852, 1860), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((2038, 2051), 'numpy.fabs', 'fabs', (['self.sA'], {}), '(self.sA)\n', (2042, 2051), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((2180, 2223), 'numpy.hypot', 'hypot', (['(self.x - self.umx)', '(self.y - self.umy)'], {}), '(self.x - self.umx, self.y - self.umy)\n', (2185, 2223), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((2665, 2705), 'numpy.logical_and', 'logical_and', (['(self.y > a_h)', '(a_h > self.yp)'], {}), '(self.y > a_h, a_h > self.yp)\n', (2676, 2705), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((2716, 2756), 'numpy.logical_and', 'logical_and', (['(self.y < a_h)', '(a_h < self.yp)'], {}), '(self.y < a_h, a_h < self.yp)\n', (2727, 2756), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((3810, 3855), 'numpy.arctan2', 'arctan2', (['(self.xp - self.xm)', '(self.yp - self.ym)'], {}), '(self.xp - self.xm, self.yp - self.ym)\n', (3817, 3855), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4426, 4442), 'numpy.roll', 'roll', (['nx', 'y_roll'], {}), '(nx, y_roll)\n', (4430, 4442), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4450, 4466), 'numpy.roll', 'roll', (['ny', 'y_roll'], {}), '(ny, y_roll)\n', (4454, 4466), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4201, 4250), 'scipy.interpolate.splprep', 'interpolate.splprep', (['[nx, ny]'], {'u': 'None', 'k': '(1)', 's': '(0.0)'}), '([nx, ny], u=None, k=1, s=0.0)\n', (4220, 4250), False, 'from scipy import interpolate\n'), ((4330, 4363), 'scipy.interpolate.splev', 'interpolate.splev', (['un', 'tck'], {'der': '(0)'}), '(un, tck, der=0)\n', (4347, 4363), False, 'from scipy import interpolate\n'), ((2793, 2805), 'numpy.where', 'where', (['condR'], {}), '(condR)\n', (2798, 2805), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((3885, 3895), 'numpy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (3888, 3895), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((3928, 3938), 'numpy.sin', 'sin', (['alpha'], {}), '(alpha)\n', (3931, 3938), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((2764, 2776), 'numpy.where', 'where', (['condL'], {}), '(condL)\n', (2769, 2776), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((3869, 3881), 'numpy.sign', 'sign', (['self.x'], {}), '(self.x)\n', (3873, 3881), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((3912, 3924), 'numpy.sign', 'sign', (['self.x'], {}), '(self.x)\n', (3916, 3924), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4070, 4080), 'numpy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (4073, 4080), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4121, 4131), 'numpy.sin', 'sin', (['alpha'], {}), '(alpha)\n', (4124, 4131), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4054, 4066), 'numpy.sign', 'sign', (['self.x'], {}), '(self.x)\n', (4058, 4066), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((4105, 4117), 'numpy.sign', 'sign', (['self.x'], {}), '(self.x)\n', (4109, 4117), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((5753, 5768), 'numpy.roll', 'roll', (['x[l:r]', '(1)'], {}), '(x[l:r], 1)\n', (5757, 5768), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((5784, 5799), 'numpy.roll', 'roll', (['y[l:r]', '(1)'], {}), '(y[l:r], 1)\n', (5788, 5799), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((5821, 5831), 'numpy.roll', 'roll', (['x', '(1)'], {}), '(x, 1)\n', (5825, 5831), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n'), ((5837, 5847), 'numpy.roll', 'roll', (['y', '(1)'], {}), '(y, 1)\n', (5841, 5847), False, 'from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def gold_probability(start_day = 0 ,current_day = 1264):
pd_reader = pd.read_csv("./LBMA-GOLD.csv")
x = [x for x in range(start_day, current_day+1)]
y = pd_reader['USD (PM)'][start_day:current_day+1]
if pd_reader['USD (PM)'][start_day].astype(int) == -2147483648:
pd_reader['USD (PM)'][start_day] = pd_reader['USD (PM)'][start_day-1]
null = -2147483648
for i in range(start_day, current_day + 1):
if y[i] <= 0 or y[i] > 10000 or y[i].astype(int) == null:
y[i] = y[i - 1]
y_fit = np.polyfit(x, y, 40)
y_fit_1d = np.poly1d(y_fit) # 将多项式系数转换为多项式
der = np.polyder(y_fit_1d, 1)
der1 = der(x)
trade = 0
for i in der1:
if abs(i) <= 0.3:
trade += 1
print(trade)
print(trade/(current_day - start_day))
# plot
plt.plot(x, der1, 'c', label='der1')
plt.xlabel('Date')
plt.ylabel('USD')
plt.legend()
plt.title('trend fitting')
plt.show()
return trade/(current_day - start_day)
def bit_probability(start_day = 0 ,current_day = 1825):
pd_reader = pd.read_csv("./BCHAIN-MKPRU.csv")
x = [x for x in range(start_day, current_day+1)]
y = pd_reader['Value'][start_day:current_day+1]
if pd_reader['Value'][start_day].astype(int) == -2147483648:
pd_reader['Value'][start_day] = pd_reader['Value'][start_day-1]
null = -2147483648
for i in range(start_day, current_day + 1):
if y[i] <= 0 or y[i] > 10000 or y[i].astype(int) == null:
y[i] = y[i - 1]
y_fit = np.polyfit(x, y, 40)
y_fit_1d = np.poly1d(y_fit) # 将多项式系数转换为多项式
der = np.polyder(y_fit_1d, 1)
der1 = der(x)
trade = 0
for i in der1:
if abs(i) <= 1:
trade += 1
print(trade)
print(trade/(current_day - start_day))
# plot
plt.plot(x, der1, 'c', label='Fitting Curve')
plt.xlabel('Date')
plt.ylabel('Value')
plt.legend()
plt.title('trend fitting')
plt.show()
return trade/(current_day - start_day)
| [
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"pandas.read_csv",
"numpy.polyder",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((145, 175), 'pandas.read_csv', 'pd.read_csv', (['"""./LBMA-GOLD.csv"""'], {}), "('./LBMA-GOLD.csv')\n", (156, 175), True, 'import pandas as pd\n'), ((608, 628), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(40)'], {}), '(x, y, 40)\n', (618, 628), True, 'import numpy as np\n'), ((644, 660), 'numpy.poly1d', 'np.poly1d', (['y_fit'], {}), '(y_fit)\n', (653, 660), True, 'import numpy as np\n'), ((687, 710), 'numpy.polyder', 'np.polyder', (['y_fit_1d', '(1)'], {}), '(y_fit_1d, 1)\n', (697, 710), True, 'import numpy as np\n'), ((887, 923), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'der1', '"""c"""'], {'label': '"""der1"""'}), "(x, der1, 'c', label='der1')\n", (895, 923), True, 'import matplotlib.pyplot as plt\n'), ((928, 946), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (938, 946), True, 'import matplotlib.pyplot as plt\n'), ((951, 968), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""USD"""'], {}), "('USD')\n", (961, 968), True, 'import matplotlib.pyplot as plt\n'), ((973, 985), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (983, 985), True, 'import matplotlib.pyplot as plt\n'), ((990, 1016), 'matplotlib.pyplot.title', 'plt.title', (['"""trend fitting"""'], {}), "('trend fitting')\n", (999, 1016), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1031), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1029, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1150, 1183), 'pandas.read_csv', 'pd.read_csv', (['"""./BCHAIN-MKPRU.csv"""'], {}), "('./BCHAIN-MKPRU.csv')\n", (1161, 1183), True, 'import pandas as pd\n'), ((1604, 1624), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(40)'], {}), '(x, y, 40)\n', (1614, 1624), True, 'import numpy as np\n'), ((1640, 1656), 'numpy.poly1d', 'np.poly1d', (['y_fit'], {}), '(y_fit)\n', (1649, 1656), True, 'import numpy as np\n'), ((1683, 1706), 'numpy.polyder', 'np.polyder', (['y_fit_1d', '(1)'], {}), '(y_fit_1d, 1)\n', (1693, 1706), True, 'import numpy as np\n'), ((1881, 1926), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'der1', '"""c"""'], {'label': '"""Fitting Curve"""'}), "(x, der1, 'c', label='Fitting Curve')\n", (1889, 1926), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1949), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (1941, 1949), True, 'import matplotlib.pyplot as plt\n'), ((1954, 1973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (1964, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 1990), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1988, 1990), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2021), 'matplotlib.pyplot.title', 'plt.title', (['"""trend fitting"""'], {}), "('trend fitting')\n", (2004, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2026, 2036), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2034, 2036), True, 'import matplotlib.pyplot as plt\n')] |
"""
Generate pseudo-data
====================
"""
import numpy as np
from scipy.stats import poisson
# number of pseudo-data sets and channels
n_pseudo = 100000
n_channels = 6
# detector resolution per channel
sigma_gg = 1.5
sigma_bb = 14.
resolutions = [sigma_gg] * 5 + [sigma_bb]
# dummy background model
bins = np.linspace(140, 155, 15)
background = np.zeros_like(bins) + 1000
if __name__ == "__main__":
# generate pseudo-data
np.random.seed(151)
for c in range(n_channels):
file_name = "data/pseudo_channel_{}".format(c)
d = poisson.rvs([background for _ in range(n_pseudo)])
np.save(file_name, d)
| [
"numpy.save",
"numpy.zeros_like",
"numpy.random.seed",
"numpy.linspace"
] | [((321, 346), 'numpy.linspace', 'np.linspace', (['(140)', '(155)', '(15)'], {}), '(140, 155, 15)\n', (332, 346), True, 'import numpy as np\n'), ((360, 379), 'numpy.zeros_like', 'np.zeros_like', (['bins'], {}), '(bins)\n', (373, 379), True, 'import numpy as np\n'), ((449, 468), 'numpy.random.seed', 'np.random.seed', (['(151)'], {}), '(151)\n', (463, 468), True, 'import numpy as np\n'), ((628, 649), 'numpy.save', 'np.save', (['file_name', 'd'], {}), '(file_name, d)\n', (635, 649), True, 'import numpy as np\n')] |
from generate_microbench_rates import generate_microbench_rates
import numpy as np
from isolation import get_iso_latency
from lacs import lacs
from mm_default import mm_default
import os
rate1 = 20.0
filenumber = 100
for rate2 in range(26,37):
generate_microbench_rates(filenumber, rate1, rate2)
bandwidth = 614
machine_number = 10
filesize = 100 # 10 GB
cachesize = 400 # 5 GB
delta = 0.1
# read the rates from pop.txt
with open("pop.txt", "r") as f:
lines = f.readlines()
user_number = len(lines)
file_number = len(lines[0].split(','))
rates = np.zeros((user_number,file_number))
for index_u in range(user_number):
line = lines[index_u]
rates[index_u,:]= np.asfarray(np.array(line.split(',')), np.float)
f.close()
mu_vector = np.ones(machine_number)*bandwidth/filesize
c_vector = np.ones(machine_number)*cachesize/filesize
avg_si, user_si, Lambda, Lambda_D = get_iso_latency(mu_vector, c_vector, rates,delta)
#print rates
lacs(mu_vector, c_vector, rates, delta, user_si,0)
mm_default(mu_vector, c_vector, rates, delta,0)
| [
"generate_microbench_rates.generate_microbench_rates",
"isolation.get_iso_latency",
"lacs.lacs",
"numpy.zeros",
"mm_default.mm_default",
"numpy.ones"
] | [((251, 302), 'generate_microbench_rates.generate_microbench_rates', 'generate_microbench_rates', (['filenumber', 'rate1', 'rate2'], {}), '(filenumber, rate1, rate2)\n', (276, 302), False, 'from generate_microbench_rates import generate_microbench_rates\n'), ((982, 1032), 'isolation.get_iso_latency', 'get_iso_latency', (['mu_vector', 'c_vector', 'rates', 'delta'], {}), '(mu_vector, c_vector, rates, delta)\n', (997, 1032), False, 'from isolation import get_iso_latency\n'), ((1054, 1105), 'lacs.lacs', 'lacs', (['mu_vector', 'c_vector', 'rates', 'delta', 'user_si', '(0)'], {}), '(mu_vector, c_vector, rates, delta, user_si, 0)\n', (1058, 1105), False, 'from lacs import lacs\n'), ((1110, 1158), 'mm_default.mm_default', 'mm_default', (['mu_vector', 'c_vector', 'rates', 'delta', '(0)'], {}), '(mu_vector, c_vector, rates, delta, 0)\n', (1120, 1158), False, 'from mm_default import mm_default\n'), ((618, 654), 'numpy.zeros', 'np.zeros', (['(user_number, file_number)'], {}), '((user_number, file_number))\n', (626, 654), True, 'import numpy as np\n'), ((841, 864), 'numpy.ones', 'np.ones', (['machine_number'], {}), '(machine_number)\n', (848, 864), True, 'import numpy as np\n'), ((899, 922), 'numpy.ones', 'np.ones', (['machine_number'], {}), '(machine_number)\n', (906, 922), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import copy
import math
import numpy as np
from numpy.random import randn, uniform
from auv_nav.sensors import SyncedOrientationBodyVelocity
from auv_nav.tools.interpolate import interpolate
from oplab import Console
# Particle Filter implementation using classes
# TODO: multiprocessing or multithreading
# TODO: port uncertainty calculation from Jim
# TODO: use 3D gaussian models for USBL
def gaussian_pdf(mu, sigma, x):
num = -((mu - x) ** 2) / (sigma ** 2) / 2.0
den = math.sqrt(2.0 * math.pi * (sigma ** 2))
return math.exp(num) / den
class Index:
X = 0
Y = 1
Z = 2
ROLL = 3
PITCH = 4
YAW = 5
VX = 6
VY = 7
VZ = 8
ALT = 9
DIM = 10
class Particle:
def __init__(self):
# The real-valued time, in seconds, since some epoch
self.time = None
# A 11-dimensional state vector
self.state = np.zeros((Index.DIM, 1), dtype=float)
# The particle trajectory
self.trajectory = []
self.trajectory_time = []
# The measured errors during PF
self.trajectory_error = []
# Particle weight
self.weight = None
@property
def eastings(self):
return self.state[Index.Y, 0]
@property
def northings(self):
return self.state[Index.X, 0]
def __eq__(self, other):
return self.weight == other.weight
def __lt__(self, other):
return self.weight < other.weight
class UsblObservationModel:
def __init__(self, usbl_noise_sigma_factor):
# The measurement
self.x = None
self.y = None
self.z = None
self.std = None
self.usbl_noise_sigma_factor = usbl_noise_sigma_factor
def set_observation(self, value):
self.x = value.northings
self.y = value.eastings
self.z = value.depth
# TODO: Could we use a 3D Gaussian instead of a 1D?
sigma = math.sqrt(
value.northings_std ** 2
+ value.eastings_std ** 2
+ value.depth_std ** 2
)
self.std = self.usbl_noise_sigma_factor * sigma
def measure(self, p):
"""
This is the main method of ObservationModel. It takes a state reference
as argument and is supposed to extract the state's variables to compute
an importance weight for the state.
Define this method in your sub-class!
@param state Reference to the state that has to be weightened.
@return importance weight for the state (positive, non-zero value).
"""
weight = 1.0
if self.x is not None:
dist = math.sqrt(
(self.x - p.state[Index.X, 0]) ** 2
+ (self.y - p.state[Index.Y, 0]) ** 2
+ (self.z - p.state[Index.Z, 0]) ** 2
)
p.trajectory_error.append(dist)
weight = gaussian_pdf(0, self.std, dist)
return weight
class DeadReckoningMovementModel:
"""
@class MovementModel
@brief Interface for movement models for particle filters.
The movement model in a particle filter defines how a particle's state
changes over time.
"""
def __init__(
self, sensors_std, dvl_noise_sigma_factor, imu_noise_sigma_factor
):
self.sensors_std = sensors_std
self.dvl_noise_sigma_factor = dvl_noise_sigma_factor
self.imu_noise_sigma_factor = imu_noise_sigma_factor
self.movement = np.zeros((Index.DIM, 1), dtype=float)
def set_movement(self, value):
self.movement[Index.Z, 0] = value.depth
self.movement[Index.ROLL, 0] = value.roll * math.pi / 180.0
self.movement[Index.PITCH, 0] = value.pitch * math.pi / 180.0
self.movement[Index.YAW, 0] = value.yaw * math.pi / 180.0
self.movement[Index.VX, 0] = value.x_velocity
self.movement[Index.VY, 0] = value.y_velocity
self.movement[Index.VZ, 0] = value.z_velocity
self.movement[Index.ALT, 0] = value.altitude
def propagate(self, p, dt):
"""
This is the main method of MovementModel. It takes a state reference as
argument and is supposed to extract the state's variables and
manipulate them. dt means delta t and defines the time in seconds that
has passed since the last filter update.
@param state Reference to the state that has to be manipulated.
@param dt time that has passed since the last filter update in seconds.
"""
depth_std_factor = self.sensors_std["position_z"]["factor"]
depth_std_offset = self.sensors_std["position_z"]["offset"]
velocity_std_factor = self.sensors_std["speed"]["factor"]
velocity_std_offset = self.sensors_std["speed"]["offset"]
imu_noise_std_offset = self.sensors_std["orientation"]["offset"]
imu_noise_std_factor = self.sensors_std["orientation"]["factor"]
k_dvl = self.dvl_noise_sigma_factor
k_imu = self.imu_noise_sigma_factor
def linear_noise(idx, factor, offset, gain=1.0):
return (
self.movement[idx, 0]
+ randn() * (self.movement[idx, 0] * factor + offset) * gain
)
# Propagate all states except for X and Y
p.state[Index.Z, 0] = linear_noise(
Index.Z, depth_std_factor, depth_std_offset
)
p.state[Index.ROLL, 0] = linear_noise(
Index.ROLL, imu_noise_std_factor, imu_noise_std_offset, k_imu
)
p.state[Index.PITCH, 0] = linear_noise(
Index.PITCH, imu_noise_std_factor, imu_noise_std_offset, k_imu
)
p.state[Index.YAW, 0] = linear_noise(
Index.YAW, imu_noise_std_factor, imu_noise_std_offset, k_imu
)
p.state[Index.VX, 0] = linear_noise(
Index.VX, velocity_std_factor, velocity_std_offset, k_dvl
)
p.state[Index.VY, 0] = linear_noise(
Index.VY, velocity_std_factor, velocity_std_offset, k_dvl
)
p.state[Index.VZ, 0] = linear_noise(
Index.VZ, velocity_std_factor, velocity_std_offset, k_dvl
)
cr = math.cos(p.state[Index.ROLL, 0])
sr = math.sin(p.state[Index.ROLL, 0])
cp = math.cos(p.state[Index.PITCH, 0])
sp = math.sin(p.state[Index.PITCH, 0])
cy = math.cos(p.state[Index.YAW, 0])
sy = math.sin(p.state[Index.YAW, 0])
f = np.eye(Index.DIM, dtype=float)
f[Index.X, Index.VX] = cy * cp * dt
f[Index.X, Index.VY] = (cy * sp * sr - sy * cr) * dt
f[Index.X, Index.VZ] = (cy * sp * cr + sy * sr) * dt
f[Index.Y, Index.VX] = sy * cp * dt
f[Index.Y, Index.VY] = (sy * sp * sr + cy * cr) * dt
f[Index.Y, Index.VZ] = (sy * sp * cr - cy * sr) * dt
f[Index.Z, Index.VX] = -sp * dt
f[Index.Z, Index.VY] = cp * sr * dt
f[Index.Z, Index.VZ] = cp * cr * dt
# Propagate the p.state forward
p.state = f @ p.state
p.time += dt
p.trajectory.append(p.state)
p.trajectory_time.append(p.time)
class ParticleFilter:
def __init__(
self,
num_particles,
movement_model,
observation_model,
expected_iterations=0,
):
self.particles = [Particle()] * num_particles
self.particles_history = []
self.iteration = 0
self.iteration_step = int(float(expected_iterations) / 20.0)
self.mm = movement_model
self.om = observation_model
for p in self.particles:
p.weight = 1.0 / float(num_particles)
self.particles_history.append(self.particles)
def __str__(self):
a = (
"Particle Filter with "
+ str(len(self.particles))
+ " particles.\n"
)
for i, p in enumerate(self.particles):
a += " Particle " + str(i) + "\n"
a += (
" (x, y, theta) = ("
+ str(p.x)
+ ", "
+ str(p.y)
+ ", "
+ str(p.theta)
+ ")\n"
)
a += " w = " + str(p.weight) + "\n"
return a
def set_prior(self, prior):
for i in range(len(self.particles)):
self.particles[i] = copy.deepcopy(prior)
self.particles[i].weight = 1.0 / float(len(self.particles))
def set_observation(self, value):
self.om.set_observation(value)
def set_movement(self, value):
self.mm.set_movement(value)
def should_resample(self):
return self.get_neff() < (len(self.particles) / 2.0)
def propagate(self, dt):
for p in self.particles:
self.mm.propagate(p, dt)
if self.iteration == self.iteration_step:
self.particles_history.append(copy.deepcopy(self.particles))
self.iteration = 0
self.iteration += 1
def measure(self):
for p in self.particles:
p.weight *= self.om.measure(p)
self.particles.sort(reverse=True)
self.normalize()
def normalize(self):
s = [p.weight for p in self.particles]
norm = np.sum(s)
# Avoid division by zero
if norm < 1e-20:
norm += 1e-20
for p in self.particles:
p.weight = p.weight / norm
def resample(self):
""" Importance resample """
inverse_num = 1.0 / len(self.particles)
# random start in CDF
start = uniform() * inverse_num
cumulative_weight = 0.0
# index to draw from
source_index = 0
cumulative_weight += self.particles[source_index].weight
new_particles = [None] * len(self.particles)
for dest_index, p in enumerate(self.particles):
# amount of cumulative weight to reach
prob_sum = start + inverse_num * dest_index
# sum weights until
while prob_sum > cumulative_weight:
source_index += 1
if source_index >= len(self.particles):
source_index = len(self.particles) - 1
break
# target sum reached
cumulative_weight += self.particles[source_index].weight
# copy particle (via assignment operator)
new_particles[dest_index] = copy.deepcopy(
self.particles[source_index]
)
# Update the particle list
self.particles = new_particles
def get_neff(self):
""" Returns the number of effective particles """
weights = [p.weight for p in self.particles]
return 1.0 / np.sum(np.square(weights))
def ParticleToSyncedOrientationBodyVelocity(p):
sobv_list = []
for t, x in zip(p.trajectory_time, p.trajectory):
m = SyncedOrientationBodyVelocity()
m.epoch_timestamp = t
m.northings = x[Index.X, 0]
m.eastings = x[Index.Y, 0]
m.depth = x[Index.Z, 0]
m.roll = x[Index.ROLL, 0] * 180.0 / math.pi
m.pitch = x[Index.PITCH, 0] * 180.0 / math.pi
m.yaw = x[Index.YAW, 0] * 180.0 / math.pi
m.x_velocity = x[Index.VX, 0]
m.y_velocity = x[Index.VY, 0]
m.z_velocity = x[Index.VZ, 0]
m.altitude = x[Index.ALT, 0]
sobv_list.append(m)
return sobv_list
def get_prior(dr_list, usbl_list):
dr_index = 0
# Interpolate DR to USBL updates
dr_eastings = []
dr_northings = []
for i in range(len(usbl_list)):
usbl_t = usbl_list[i].epoch_timestamp
dr_t = dr_list[dr_index + 1].epoch_timestamp
while dr_index < len(dr_list) - 2 and usbl_t > dr_t:
usbl_t = usbl_list[i].epoch_timestamp
dr_t = dr_list[dr_index + 1].epoch_timestamp
dr_index += 1
dr_eastings.append(
interpolate(
usbl_list[i].epoch_timestamp,
dr_list[dr_index].epoch_timestamp,
dr_list[dr_index + 1].epoch_timestamp,
dr_list[dr_index].eastings,
dr_list[dr_index + 1].eastings,
)
)
dr_northings.append(
interpolate(
usbl_list[i].epoch_timestamp,
dr_list[dr_index].epoch_timestamp,
dr_list[dr_index + 1].epoch_timestamp,
dr_list[dr_index].northings,
dr_list[dr_index + 1].northings,
)
)
usbl_eastings = [i.eastings for i in usbl_list]
usbl_northings = [i.northings for i in usbl_list]
eastings_error = [y - x for x, y in zip(dr_eastings, usbl_eastings)]
northings_error = [y - x for x, y in zip(dr_northings, usbl_northings)]
eastings_mean = np.mean(eastings_error)
northings_mean = np.mean(northings_error)
dr_index = 0
usbl_index = 0
usbl_t = usbl_list[usbl_index].epoch_timestamp
dr_t = dr_list[usbl_index].epoch_timestamp
while dr_index < len(dr_list) and usbl_t > dr_t:
usbl_t = usbl_list[usbl_index].epoch_timestamp
dr_t = dr_list[usbl_index].epoch_timestamp
dr_index += 1
while usbl_index < len(usbl_list) and usbl_t < dr_t:
usbl_t = usbl_list[usbl_index].epoch_timestamp
dr_t = dr_list[usbl_index].epoch_timestamp
usbl_index += 1
# Fix DR to index zero
dr_index = 0
# Build state from first known USBL and DR, and use that displacement
# error at the start of DR.
x = dr_list[dr_index].northings + northings_mean
y = dr_list[dr_index].eastings + eastings_mean
z = dr_list[dr_index].depth
alt = dr_list[dr_index].altitude
roll = dr_list[dr_index].roll * math.pi / 180.0
pitch = dr_list[dr_index].pitch * math.pi / 180.0
heading = dr_list[dr_index].yaw * math.pi / 180.0
vx = dr_list[dr_index].x_velocity
vy = dr_list[dr_index].y_velocity
vz = dr_list[dr_index].z_velocity
prior = Particle()
prior.state = np.array(
[
[x - northings_mean],
[y - eastings_mean],
[z],
[roll],
[pitch],
[heading],
[vx],
[vy],
[vz],
[alt],
]
)
prior.time = dr_list[0].epoch_timestamp
return prior, dr_index, usbl_index
def run_particle_filter(
usbl_list,
dr_list,
num_particles,
sensors_std,
dvl_noise_sigma_factor,
imu_noise_sigma_factor,
usbl_noise_sigma_factor,
measurement_update_flag=True,
):
"""Execute the particle filter over the dataset
Args:
usbl_list (list): List of USBL measurements
dr_list (list): List of DR measurements
num_particles (int): Number of particles
sensors_std (list): List of sensors standard deviations
dvl_noise_sigma_factor (float): DVL noise std multiplication factor
imu_noise_sigma_factor (float): IMU noise std multiplication factor
usbl_noise_sigma_factor (float): USBL noise std multiplication factor
measurement_update_flag (bool, optional): Whether to perform updates
or not.
Returns:
List: List containing at position
0: Output PF localisation
1: USBL data points used in updates
2: List of particles over time
3: Northings STD
4: Eastings STD
5: Yaw STD
"""
Console.info("Running Particle Filter with:")
Console.info("\t* Number of particles: {}".format(num_particles))
Console.info(
"\t* DVL noise std: f(x)={}x+{} m/s".format(
sensors_std["speed"]["factor"], sensors_std["speed"]["offset"]
)
)
Console.info(
"\t* IMU noise std: f(x)={}x+{} deg".format(
sensors_std["orientation"]["factor"],
sensors_std["orientation"]["offset"],
)
)
Console.info(
"\t* Depth noise std: f(x)={}x+{} meters".format(
sensors_std["position_z"]["factor"],
sensors_std["position_z"]["offset"],
)
)
Console.info(
"\t* USBL noise std: f(x)={}x+{} meters".format(
sensors_std["position_xy"]["factor"],
sensors_std["position_xy"]["offset"],
)
)
Console.info("Running {} iterations...".format(len(dr_list)))
prior, dr_idx, usbl_idx = get_prior(dr_list, usbl_list)
om = UsblObservationModel(usbl_noise_sigma_factor)
mm = DeadReckoningMovementModel(
sensors_std, dvl_noise_sigma_factor, imu_noise_sigma_factor
)
pf = ParticleFilter(
num_particles, mm, om, expected_iterations=len(dr_list)
)
pf.set_prior(prior)
last_t = dr_list[dr_idx].epoch_timestamp
resampled_usbl_list = []
# Loop through all DR
while dr_idx < len(dr_list):
Console.progress(dr_idx, len(dr_list))
dr_stamp = dr_list[dr_idx].epoch_timestamp
if usbl_idx < len(usbl_list):
usbl_stamp = usbl_list[usbl_idx].epoch_timestamp
else:
# Fake a posterior USBL measurement to force PF to read DR
usbl_stamp = dr_stamp + 1
if dr_stamp < usbl_stamp:
# Compute delta t
dt = dr_list[dr_idx].epoch_timestamp - last_t
# Set the current movement
pf.set_movement(dr_list[dr_idx])
# and propagate the filter
pf.propagate(dt)
last_t = dr_list[dr_idx].epoch_timestamp
dr_idx += 1
elif usbl_idx < len(usbl_list):
# Compute delta t
dt = usbl_list[usbl_idx].epoch_timestamp - last_t
# Set the measurement
pf.set_observation(usbl_list[usbl_idx])
# Propagate
pf.propagate(dt)
# And measure. Resample if NEFF > 0.5
pf.measure()
if pf.should_resample():
pf.resample()
resampled_usbl_list.append(usbl_list[usbl_idx])
last_t = usbl_list[usbl_idx].epoch_timestamp
usbl_idx += 1
# Find best particle
best_particle = pf.particles[0]
# Extract trajectory from it
pf_list = ParticleToSyncedOrientationBodyVelocity(best_particle)
# Get remaining bits
particles_list = pf.particles_history
# TODO: Compute std
northings_std = []
eastings_std = []
yaw_std = []
print("Resampled {} times.".format(len(resampled_usbl_list)))
return [
pf_list,
resampled_usbl_list,
particles_list,
northings_std,
eastings_std,
yaw_std,
]
| [
"numpy.random.uniform",
"math.exp",
"copy.deepcopy",
"numpy.sum",
"math.sqrt",
"numpy.random.randn",
"numpy.square",
"numpy.zeros",
"math.sin",
"auv_nav.sensors.SyncedOrientationBodyVelocity",
"numpy.mean",
"numpy.array",
"math.cos",
"auv_nav.tools.interpolate.interpolate",
"oplab.Consol... | [((698, 735), 'math.sqrt', 'math.sqrt', (['(2.0 * math.pi * sigma ** 2)'], {}), '(2.0 * math.pi * sigma ** 2)\n', (707, 735), False, 'import math\n'), ((12914, 12937), 'numpy.mean', 'np.mean', (['eastings_error'], {}), '(eastings_error)\n', (12921, 12937), True, 'import numpy as np\n'), ((12959, 12983), 'numpy.mean', 'np.mean', (['northings_error'], {}), '(northings_error)\n', (12966, 12983), True, 'import numpy as np\n'), ((14128, 14243), 'numpy.array', 'np.array', (['[[x - northings_mean], [y - eastings_mean], [z], [roll], [pitch], [heading],\n [vx], [vy], [vz], [alt]]'], {}), '([[x - northings_mean], [y - eastings_mean], [z], [roll], [pitch],\n [heading], [vx], [vy], [vz], [alt]])\n', (14136, 14243), True, 'import numpy as np\n'), ((15564, 15609), 'oplab.Console.info', 'Console.info', (['"""Running Particle Filter with:"""'], {}), "('Running Particle Filter with:')\n", (15576, 15609), False, 'from oplab import Console\n'), ((749, 762), 'math.exp', 'math.exp', (['num'], {}), '(num)\n', (757, 762), False, 'import math\n'), ((1101, 1138), 'numpy.zeros', 'np.zeros', (['(Index.DIM, 1)'], {'dtype': 'float'}), '((Index.DIM, 1), dtype=float)\n', (1109, 1138), True, 'import numpy as np\n'), ((2134, 2223), 'math.sqrt', 'math.sqrt', (['(value.northings_std ** 2 + value.eastings_std ** 2 + value.depth_std ** 2)'], {}), '(value.northings_std ** 2 + value.eastings_std ** 2 + value.\n depth_std ** 2)\n', (2143, 2223), False, 'import math\n'), ((3667, 3704), 'numpy.zeros', 'np.zeros', (['(Index.DIM, 1)'], {'dtype': 'float'}), '((Index.DIM, 1), dtype=float)\n', (3675, 3704), True, 'import numpy as np\n'), ((6349, 6381), 'math.cos', 'math.cos', (['p.state[Index.ROLL, 0]'], {}), '(p.state[Index.ROLL, 0])\n', (6357, 6381), False, 'import math\n'), ((6395, 6427), 'math.sin', 'math.sin', (['p.state[Index.ROLL, 0]'], {}), '(p.state[Index.ROLL, 0])\n', (6403, 6427), False, 'import math\n'), ((6441, 6474), 'math.cos', 'math.cos', (['p.state[Index.PITCH, 0]'], {}), '(p.state[Index.PITCH, 0])\n', (6449, 6474), False, 'import math\n'), ((6488, 6521), 'math.sin', 'math.sin', (['p.state[Index.PITCH, 0]'], {}), '(p.state[Index.PITCH, 0])\n', (6496, 6521), False, 'import math\n'), ((6535, 6566), 'math.cos', 'math.cos', (['p.state[Index.YAW, 0]'], {}), '(p.state[Index.YAW, 0])\n', (6543, 6566), False, 'import math\n'), ((6580, 6611), 'math.sin', 'math.sin', (['p.state[Index.YAW, 0]'], {}), '(p.state[Index.YAW, 0])\n', (6588, 6611), False, 'import math\n'), ((6625, 6655), 'numpy.eye', 'np.eye', (['Index.DIM'], {'dtype': 'float'}), '(Index.DIM, dtype=float)\n', (6631, 6655), True, 'import numpy as np\n'), ((9370, 9379), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (9376, 9379), True, 'import numpy as np\n'), ((11009, 11040), 'auv_nav.sensors.SyncedOrientationBodyVelocity', 'SyncedOrientationBodyVelocity', ([], {}), '()\n', (11038, 11040), False, 'from auv_nav.sensors import SyncedOrientationBodyVelocity\n'), ((2840, 2967), 'math.sqrt', 'math.sqrt', (['((self.x - p.state[Index.X, 0]) ** 2 + (self.y - p.state[Index.Y, 0]) ** 2 +\n (self.z - p.state[Index.Z, 0]) ** 2)'], {}), '((self.x - p.state[Index.X, 0]) ** 2 + (self.y - p.state[Index.Y, \n 0]) ** 2 + (self.z - p.state[Index.Z, 0]) ** 2)\n', (2849, 2967), False, 'import math\n'), ((8497, 8517), 'copy.deepcopy', 'copy.deepcopy', (['prior'], {}), '(prior)\n', (8510, 8517), False, 'import copy\n'), ((9691, 9700), 'numpy.random.uniform', 'uniform', ([], {}), '()\n', (9698, 9700), False, 'from numpy.random import randn, uniform\n'), ((10542, 10585), 'copy.deepcopy', 'copy.deepcopy', (['self.particles[source_index]'], {}), '(self.particles[source_index])\n', (10555, 10585), False, 'import copy\n'), ((12034, 12217), 'auv_nav.tools.interpolate.interpolate', 'interpolate', (['usbl_list[i].epoch_timestamp', 'dr_list[dr_index].epoch_timestamp', 'dr_list[dr_index + 1].epoch_timestamp', 'dr_list[dr_index].eastings', 'dr_list[dr_index + 1].eastings'], {}), '(usbl_list[i].epoch_timestamp, dr_list[dr_index].epoch_timestamp,\n dr_list[dr_index + 1].epoch_timestamp, dr_list[dr_index].eastings,\n dr_list[dr_index + 1].eastings)\n', (12045, 12217), False, 'from auv_nav.tools.interpolate import interpolate\n'), ((12356, 12541), 'auv_nav.tools.interpolate.interpolate', 'interpolate', (['usbl_list[i].epoch_timestamp', 'dr_list[dr_index].epoch_timestamp', 'dr_list[dr_index + 1].epoch_timestamp', 'dr_list[dr_index].northings', 'dr_list[dr_index + 1].northings'], {}), '(usbl_list[i].epoch_timestamp, dr_list[dr_index].epoch_timestamp,\n dr_list[dr_index + 1].epoch_timestamp, dr_list[dr_index].northings,\n dr_list[dr_index + 1].northings)\n', (12367, 12541), False, 'from auv_nav.tools.interpolate import interpolate\n'), ((9025, 9054), 'copy.deepcopy', 'copy.deepcopy', (['self.particles'], {}), '(self.particles)\n', (9038, 9054), False, 'import copy\n'), ((10854, 10872), 'numpy.square', 'np.square', (['weights'], {}), '(weights)\n', (10863, 10872), True, 'import numpy as np\n'), ((5333, 5340), 'numpy.random.randn', 'randn', ([], {}), '()\n', (5338, 5340), False, 'from numpy.random import randn, uniform\n')] |
import datetime as dt
import os
import sys
from multiprocessing import Pool
import numpy as np
from scipy.interpolate import griddata
from shutil import copy2, rmtree
import subprocess
#from subprocess import call
from time import time
#import pdb; pdb.set_trace()
#os.environ["OMP_NUM_THREADS"] = "1"
'''
three operations
1. write inputs
2. run simul
3. read input
'''
class Model:
def __init__(self,params = None):
self.idx = 0
self.homedir = os.path.abspath('./')
self.inputdir = os.path.abspath(os.path.join(self.homedir,"./input_files"))
self.deletedir = True
self.outputdir = None
self.parallel = False
from psutil import cpu_count # physcial cpu counts
self.ncores = cpu_count(logical=False)
if params is not None:
if 'deletedir' in params:
self.deletedir = params['deletedir']
if 'homedir' in params:
self.homedir = params['homedir']
self.inputdir = os.path.abspath(os.path.join(self.homedir,"./input_files"))
if 'inputdir' in params:
self.inputdir = params['inputdir']
if 'ncores' in params:
self.ncores = params['ncores']
# if 'outputdir' in params:
# # note that outputdir is not used for now; pyPCGA forces outputdir in ./simul/simul0000
# self.outputdir = params['outputdir']
if 'parallel' in params:
self.parallel = params['parallel']
#smalldomain..
if 'xyz' in params:
self.xyz = params['xyz']
else:
raise NameError('xyz is not defined')
if 'xyzout' in params:
self.xyzout = params['xyzout']
else:
raise NameError('xyzout is not defined')
if 'xyzoutval' in params:
self.xyzoutval = params['xyzoutval']
else:
raise NameError('xyzoutval is not defined')
if 'elexyz' in params:
self.elexyz = params['elexyz']
else:
raise NameError('coord is not defined')
def create_dir(self,idx=None):
mydirbase = "./simul/simul"
if idx is None:
idx = self.idx
mydir = mydirbase + "{0:04d}".format(idx)
mydir = os.path.abspath(os.path.join(self.homedir, mydir))
if not os.path.exists(mydir):
os.makedirs(mydir)
for filename in os.listdir(self.inputdir):
copy2(os.path.join(self.inputdir,filename),mydir)
return mydir
def cleanup(self,outputdir=None):
"""
Removes outputdir if specified. Otherwise removes all output files
in the current working directory.
"""
import shutil
import glob
log = "dummy.log"
if os.path.exists(log):
os.remove(log)
if outputdir is not None and outputdir != os.getcwd():
if os.path.exists(outputdir):
shutil.rmtree(outputdir)
else:
filelist = glob.glob("*.out")
filelist += glob.glob("*.sim")
for file in filelist:
os.remove(file)
def interp3d(self,s):
# interpolate
source_coord = np.vstack([self.xyz,self.xyzout])
s_all = np.vstack([s,self.xyzoutval*np.ones((self.xyzout.shape[0],1))])
target_coord = self.elexyz
s_interp = griddata(source_coord, s_all, target_coord, method = 'nearest')
return s_interp
def run_model(self,s,idx=0):
sim_dir = self.create_dir(idx)
os.chdir(sim_dir)
s_interp = self.interp3d(s)
with open("agu.sig","w") as f:
f.write("%d 1\n" % (s_interp.shape[0]))
for i in range(s_interp.shape[0]):
f.write("%e\n" % (np.exp(s_interp[i])))
subprocess.call(["./mpirun","-np","6","--bind-to","none","e4d"], stdout=open('/dev/null','w'))
#subprocess.call(["./mpirun","-np","3","--bind-to","none","e4d"])
output = np.loadtxt('agu.dpd',skiprows=1)
simul_obs = output[:,-1]
os.chdir(self.homedir)
if self.deletedir:
rmtree(sim_dir, ignore_errors=True)
# self.cleanup(sim_dir)
return simul_obs
def run(self,s,par,ncores=None):
if ncores is None:
ncores = self.ncores
method_args = range(s.shape[1])
args_map = [(s[:, arg:arg + 1], arg) for arg in method_args]
if par:
pool = Pool(processes=ncores)
simul_obs = pool.map(self, args_map)
else:
simul_obs =[]
for item in args_map:
simul_obs.append(self(item))
return np.array(simul_obs).T
#pool.close()
#pool.join()
def __call__(self,args):
return self.run_model(args[0],args[1])
if __name__ == '__main__':
import ert
import numpy as np
from time import time
s = np.loadtxt('true_all.txt')
#s = np.loadtxt("true.txt")
s = s.reshape(-1, 1)
#m = 91*91*31
xyz = np.load('xyz.npy')
xyzout = np.load('xyzout.npy')
xyzoutval = -4.6052
s = xyzoutval*np.ones((m,1))
elexyz = np.load('elexyz.npy')
params = {'deletedir':False, 'input_dir':'./input_files/', 'xyz': xyz, \
'xyzout': xyzout, 'elexyz': elexyz, 'xyzoutval': xyzoutval}
par = False # parallelization false
mymodel = ert.Model(params)
print('(1) single run')
from time import time
stime = time()
simul_obs = mymodel.run(s,par)
print('simulation run: %f sec' % (time() - stime))
obs = simul_obs + 0.1*np.random.randn(simul_obs.shape[0],simul_obs.shape[1])
#np.savetxt('obs.txt',obs)
import sys
sys.exit(0)
ncores = 2
nrelzs = 2
print('(2) parallel run with ncores = %d' % ncores)
par = True # parallelization false
srelz = np.zeros((np.size(s,0),nrelzs),'d')
for i in range(nrelzs):
srelz[:,i:i+1] = s + 0.1*np.random.randn(np.size(s,0),1)
simul_obs_all = mymodel.run(srelz,par,ncores = ncores)
print(simul_obs_all)
# use all the physcal cores if not specify ncores
#print('(3) parallel run with all the physical cores')
#simul_obs_all = mymodel.run(srelz,par)
#print(simul_obs_all)
| [
"numpy.load",
"os.remove",
"numpy.ones",
"numpy.exp",
"glob.glob",
"shutil.rmtree",
"os.path.join",
"os.chdir",
"psutil.cpu_count",
"os.path.abspath",
"numpy.random.randn",
"os.path.exists",
"numpy.loadtxt",
"numpy.size",
"scipy.interpolate.griddata",
"multiprocessing.Pool",
"os.list... | [((5314, 5340), 'numpy.loadtxt', 'np.loadtxt', (['"""true_all.txt"""'], {}), "('true_all.txt')\n", (5324, 5340), True, 'import numpy as np\n'), ((5432, 5450), 'numpy.load', 'np.load', (['"""xyz.npy"""'], {}), "('xyz.npy')\n", (5439, 5450), True, 'import numpy as np\n'), ((5465, 5486), 'numpy.load', 'np.load', (['"""xyzout.npy"""'], {}), "('xyzout.npy')\n", (5472, 5486), True, 'import numpy as np\n'), ((5566, 5587), 'numpy.load', 'np.load', (['"""elexyz.npy"""'], {}), "('elexyz.npy')\n", (5573, 5587), True, 'import numpy as np\n'), ((5796, 5813), 'ert.Model', 'ert.Model', (['params'], {}), '(params)\n', (5805, 5813), False, 'import ert\n'), ((5885, 5891), 'time.time', 'time', ([], {}), '()\n', (5889, 5891), False, 'from time import time\n'), ((6125, 6136), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6133, 6136), False, 'import sys\n'), ((492, 513), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (507, 513), False, 'import os\n'), ((786, 810), 'psutil.cpu_count', 'cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (795, 810), False, 'from psutil import cpu_count\n'), ((2667, 2692), 'os.listdir', 'os.listdir', (['self.inputdir'], {}), '(self.inputdir)\n', (2677, 2692), False, 'import os\n'), ((3058, 3077), 'os.path.exists', 'os.path.exists', (['log'], {}), '(log)\n', (3072, 3077), False, 'import os\n'), ((3520, 3554), 'numpy.vstack', 'np.vstack', (['[self.xyz, self.xyzout]'], {}), '([self.xyz, self.xyzout])\n', (3529, 3554), True, 'import numpy as np\n'), ((3693, 3754), 'scipy.interpolate.griddata', 'griddata', (['source_coord', 's_all', 'target_coord'], {'method': '"""nearest"""'}), "(source_coord, s_all, target_coord, method='nearest')\n", (3701, 3754), False, 'from scipy.interpolate import griddata\n'), ((3871, 3888), 'os.chdir', 'os.chdir', (['sim_dir'], {}), '(sim_dir)\n', (3879, 3888), False, 'import os\n'), ((4326, 4359), 'numpy.loadtxt', 'np.loadtxt', (['"""agu.dpd"""'], {'skiprows': '(1)'}), "('agu.dpd', skiprows=1)\n", (4336, 4359), True, 'import numpy as np\n'), ((4412, 4434), 'os.chdir', 'os.chdir', (['self.homedir'], {}), '(self.homedir)\n', (4420, 4434), False, 'import os\n'), ((5531, 5546), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (5538, 5546), True, 'import numpy as np\n'), ((555, 598), 'os.path.join', 'os.path.join', (['self.homedir', '"""./input_files"""'], {}), "(self.homedir, './input_files')\n", (567, 598), False, 'import os\n'), ((2516, 2549), 'os.path.join', 'os.path.join', (['self.homedir', 'mydir'], {}), '(self.homedir, mydir)\n', (2528, 2549), False, 'import os\n'), ((2577, 2598), 'os.path.exists', 'os.path.exists', (['mydir'], {}), '(mydir)\n', (2591, 2598), False, 'import os\n'), ((2613, 2631), 'os.makedirs', 'os.makedirs', (['mydir'], {}), '(mydir)\n', (2624, 2631), False, 'import os\n'), ((3092, 3106), 'os.remove', 'os.remove', (['log'], {}), '(log)\n', (3101, 3106), False, 'import os\n'), ((3187, 3212), 'os.path.exists', 'os.path.exists', (['outputdir'], {}), '(outputdir)\n', (3201, 3212), False, 'import os\n'), ((3295, 3313), 'glob.glob', 'glob.glob', (['"""*.out"""'], {}), "('*.out')\n", (3304, 3313), False, 'import glob\n'), ((3339, 3357), 'glob.glob', 'glob.glob', (['"""*.sim"""'], {}), "('*.sim')\n", (3348, 3357), False, 'import glob\n'), ((4486, 4521), 'shutil.rmtree', 'rmtree', (['sim_dir'], {'ignore_errors': '(True)'}), '(sim_dir, ignore_errors=True)\n', (4492, 4521), False, 'from shutil import copy2, rmtree\n'), ((4841, 4863), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'ncores'}), '(processes=ncores)\n', (4845, 4863), False, 'from multiprocessing import Pool\n'), ((5055, 5074), 'numpy.array', 'np.array', (['simul_obs'], {}), '(simul_obs)\n', (5063, 5074), True, 'import numpy as np\n'), ((6011, 6066), 'numpy.random.randn', 'np.random.randn', (['simul_obs.shape[0]', 'simul_obs.shape[1]'], {}), '(simul_obs.shape[0], simul_obs.shape[1])\n', (6026, 6066), True, 'import numpy as np\n'), ((6297, 6310), 'numpy.size', 'np.size', (['s', '(0)'], {}), '(s, 0)\n', (6304, 6310), True, 'import numpy as np\n'), ((2713, 2750), 'os.path.join', 'os.path.join', (['self.inputdir', 'filename'], {}), '(self.inputdir, filename)\n', (2725, 2750), False, 'import os\n'), ((3158, 3169), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3167, 3169), False, 'import os\n'), ((3231, 3255), 'shutil.rmtree', 'shutil.rmtree', (['outputdir'], {}), '(outputdir)\n', (3244, 3255), False, 'import shutil\n'), ((3424, 3439), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (3433, 3439), False, 'import os\n'), ((5967, 5973), 'time.time', 'time', ([], {}), '()\n', (5971, 5973), False, 'from time import time\n'), ((1075, 1118), 'os.path.join', 'os.path.join', (['self.homedir', '"""./input_files"""'], {}), "(self.homedir, './input_files')\n", (1087, 1118), False, 'import os\n'), ((3599, 3633), 'numpy.ones', 'np.ones', (['(self.xyzout.shape[0], 1)'], {}), '((self.xyzout.shape[0], 1))\n', (3606, 3633), True, 'import numpy as np\n'), ((6402, 6415), 'numpy.size', 'np.size', (['s', '(0)'], {}), '(s, 0)\n', (6409, 6415), True, 'import numpy as np\n'), ((4105, 4124), 'numpy.exp', 'np.exp', (['s_interp[i]'], {}), '(s_interp[i])\n', (4111, 4124), True, 'import numpy as np\n')] |
import pyBigWig
import os
from os import path
import pandas as pd
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from lxml import html
import requests
import statistics
import gzip
import shutil
from Bio import motifs
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
#from primer3plus import Design
VERSION = 0.6
# Version 1.0 Feature TODO list:
# confirm accurate coordinates for motifs
# confirm position / other attriutes of jaspar scanner
# remove references to primer generation
# convert to new plot mechanic
class EnhancerScan:
""" Class to handle scanning of bigwig files. """
def __init__(self):
pd.options.display.max_rows = 4000
pd.options.display.max_columns = 4000
self.track1_name = None
self.track1_bw = None
self.track1_values = None
self.track1_min_value = None
self.track1_max_value = None
self.track2_name = None
self.track2_bw = None
self.track2_values = None
self.track2_min_value = None
self.track2_max_value = None
self.track_comparison_type = None
self.track_plot_header = None
self.genome = None
self.chromosome = None
self.region_start = None
self.region_stop = None
self.region_values = None
self.region_max_value = None
self.region_mean_value = None
self.region_median_value = None
self.all_detected_peaks = None
self.auto_threshold = None
self.df_results = None
self.df_motifs = None
self.reset_results() # also generates a new dataframe for self.df_results and self.df_motifs
### Streamlit App functionality
self.last_figure = None # conforms to streamlit depreciating global pyplot functionality
print("pyEnhancerScanner version " + str(VERSION) + " beta\n")
print("The following tracks are locally available:")
for track in self.list_tracks(): print(track)
print("")
print("To download additional tracks, use the download_tracks() function or list them with list_external_tracks().")
def list_external_tracks(self):
""" Alias for download"""
print('External Track List:')
df_quicklist = pd.read_csv('external_tracks.db', delimiter=',', header=0)
#pd.set_option('display.max_colwidth', None) # so it doesnt truncate columns
#print(df_quicklist.loc[:, :'Size'])
#print("")
print("To download one of these tracks, use download_tracks(track_num=X) where X is the track number / index.")
print("You can specify your own download url by download_tracks(url='X').")
return df_quicklist
def load_track(self, genome, track1, track2=None, track_comparison_type=None, reset=True):
if reset is True:
self.reset_results()
if track1 is not None:
self.genome = genome
self.track1_name = track1
self.track1_bw = pyBigWig.open(track1)
header = self.track1_bw.header()
self.track1_min_value = header['minVal']
self.track1_max_value = header['maxVal']
self.track_plot_header = track1
if track2 is not None and track_comparison_type is not None:
self.track2_name = track2
self.track2_bw = pyBigWig.open(track2)
track2_header = self.track2_bw.header()
self.track2_min_value = track2_header['minVal']
self.track2_max_value = track2_header['maxVal']
self.track_comparison_type = track_comparison_type
self.track_plot_header = track1 + ' ' + track_comparison_type + ' ' + track2
elif track1 is None and track2 is None and track_comparison_type is None or genome is None:
return(print("Error: You must load a track by specifying a bigwig track and a genome."))
def download_tracks(self, url = '', track_num = 0):
if url == '' and track_num == 0:
self.list_external_tracks()
elif url != '':
if type(url) is int:
df_quicklist = pd.read_csv('external_tracks.db', delimiter=',', header=0)
url = df_quicklist.loc[url, 'URL_Path']
self.download_url(url)
else:
self.download_url(url)
elif track_num !=0:
df_quicklist = pd.read_csv('external_tracks.db', delimiter=',', header=0)
url = df_quicklist.loc[track_num, 'URL_Path']
self.download_url(url)
def download_jaspar(self, url = ''):
if url == '':
url = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_vertebrates_non-redundant_pfms_jaspar.txt'
self.download_url(url)
def list_tracks(self):
tracks = []
for bigwig in os.listdir(os.getcwd()):
if bigwig.endswith(".bw"):
if bigwig !='None':
tracks.append(bigwig)
return tracks
def list_bed(self):
list_bedfiles = []
for bedfile in os.listdir(os.getcwd()):
if bedfile.endswith(".bed"):
if bedfile !='None':
list_bedfiles.append(bedfile)
return list_bedfiles
def load_bed(self, genome, bed_file, track=None):
""" Load an existing bed file for analyzing regions enhancers. """
# load a bed file
# for each entry in bed file, create an entry in df_results.
self.reset_results()
self.genome = genome
## read in bed file and convert to dataframe
df_bed = pd.read_csv(bed_file, sep='\t', comment='#', header=None)
header = ['chrom', 'chromStart', 'chromEnd', 'name', 'score', 'strand', 'thickStart', 'thickEnd', 'itemRgb', 'blockCount', 'blockSizes', 'blockStarts']
df_bed.columns = header[:len(df_bed.columns)]
print(df_bed)
list_region_start = []
list_region_stop = []
#convert to df_results
for index, enhancer in df_bed.iterrows():
chromosome = enhancer['chrom']
self.chromosome = chromosome #from bed files
start = enhancer['chromStart']
stop = enhancer['chromEnd']
name = enhancer['name']
full_name = enhancer['name']
size = enhancer['chromEnd'] - enhancer['chromStart']
primerf = ''
primerftemp = ''
primerr = ''
primerrtemp = ''
if track is not None:
self.load_track(genome, track, reset=False)
mean_peak_values = self.get_mean_range_values_bed(chromosome, start, stop) #should be based on track
max_peak_values = self.get_max_range_values_bed(chromosome, start, stop)
sequence = self.get_sequence(self.genome, chromosome, start, stop)
else:
mean_peak_values = 0
max_peak_values = 0
sequence = self.get_sequence(self.genome, chromosome, start, stop)
list_region_start.append(start)
list_region_stop.append(stop)
self.df_results.loc[len(self.df_results)]=([chromosome, start, stop, name, full_name, mean_peak_values, max_peak_values, size, primerf, primerftemp, primerr, primerrtemp, sequence])
self.region_start = sorted(list_region_start)[0]
self.region_stop = sorted(list_region_stop)[-1]
if self.track1_bw != None:
self.region_values = self.track1_bw.values(self.chromosome, self.region_start, self.region_stop) # but we need a track loaded....
def enhancer_scanner(self, chromosome, region_start=0, region_stop=0, peak_width=50, peak_height='auto', merge_distance=200, final_size=None, final_mean_peak_values=None, reset=True):
if reset is True:
self.reset_results()
#allow direct copy from ucsc genome browser
if ':' in chromosome:
temp = chromosome
chromosome, temp = temp.split(':')
region_start, region_stop = temp.replace(',','').split('-')
self.chromosome = chromosome
self.region_start = int(region_start)
self.region_stop = int(region_stop)
### GET REGION VALUES universal for single or multiple tracks
self.get_region_values()
if peak_height == 'auto':
self.auto_threshold = self.region_max_value * .25
peak_height = self.auto_threshold
print("")
print("Using auto detection of peak height: ", self.auto_threshold)
elif peak_height == 'mean':
peak_height = float(self.region_mean_value)
elif peak_height == 'median':
peak_height = float(self.region_median_value)
else:
peak_height = float(peak_height)
# PEAK DETECTION
# detect peaks, returns a tuple of an array of peaks and a dictionary or properties
peaks = signal.find_peaks(self.region_values, width = float(peak_width), height = peak_height)
# make list of unqiue peak widths as tuples and sort
list_widths = sorted(list(set(zip(list(peaks[1]['left_bases'] + self.region_start), list(peaks[1]['right_bases'] + self.region_start))))) # its fixed for final location from widths now
print('Total Peaks Detected:', len(list_widths))
# merge overlapping tuples and tuples within a certain distance
list_merged = []
for higher in sorted(list_widths, key=lambda tup: tup[0]): #sorted by lower bound
if not list_merged:
list_merged.append(higher)
else:
lower = list_merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
list_merged[-1] = (lower[0], upper_bound) # replace by merged interval
elif higher[0] - lower[1] < merge_distance: #seems to work
upper_bound = max(lower[1], higher[1])
list_merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
list_merged.append(higher)
self.all_detected_peaks = list_merged
# update results dataframe
# 'chrom', 'chromStart', 'chromEnd', 'name', 'full_name', 'mean_peak_values', 'max_peak_values', 'size_bp', 'sequence'
for peak_num, peak in enumerate(self.all_detected_peaks):
chromosome = self.chromosome
start = int(peak[0])
stop = int(peak[1])
name = 'P' + str(peak_num+1)
full_name = 'PEAK_' + str(peak_num+1) + '_' + str(self.chromosome) + ':' + str(self.region_start) + '-' + str(self.region_stop) #TODO change the region to local coords
size = peak[1] - peak[0]
primerf = ''
primerftemp = ''
primerr = ''
primerrtemp = ''
mean_peak_values = self.get_mean_range_values(start-self.region_start, stop-self.region_start)
max_peak_values = self.get_max_range_values(start-self.region_start, stop-self.region_start)
sequence = self.get_sequence(self.genome, chromosome, start, stop)
#columns=['chrom', 'chromStart', 'chromEnd', 'name', 'full_name', 'mean_peak_values', 'max_peak_values', 'size_bp', 'primerF', 'primerFtemp', 'primerR' 'primerRtemp','sequence']
self.df_results.loc[len(self.df_results)]=([chromosome, start, stop, name, full_name, mean_peak_values, max_peak_values, size, primerf, primerftemp, primerr, primerrtemp, sequence])
#filter size and mean peak values
if final_size is not None:
self.df_results = self.df_results[self.df_results.size_bp >= final_size]
if final_mean_peak_values is not None:
self.df_results = self.df_results[self.df_results.mean_peak_values >= final_mean_peak_values]
print('Final Passed Merge/Filters:', len(self.df_results))
def motif_scanner(self, tfactor=None, score_threshold=8, plot=True, fig_width=8, fig_height=4, jaspar_data='JASPAR2020_CORE_vertebrates_non-redundant_pfms_jaspar.txt'):
""" Function to scan detected sequences for transcription factor motifs using JASPAR. Multiple transcription factors can be specificied with a plus sign.
example: 'OTX2+VSX2'
"""
if len(self.df_results) < 1:
raise RuntimeError("You run the ecr_scanner prior to running the motif_scanner!")
self.df_motifs = pd.DataFrame(columns=['name', 'full_name', 'tfactor', 'motif_coords', 'position', 'strand', 'score', 'rel_score'])
file_handle = open(jaspar_data)
tf_dict = {}
list_tfs = []
coords = ''
strand = ''
# biopython motif parser using jaspar format
for motif in motifs.parse(file_handle, fmt="jaspar"):
tf_dict[motif.name] = motif
if tfactor is None:
print("You must select a transcription factor to scan for.")
print(tf_dict.keys())
for tf in list(str(tfactor).split('+')):
if tf not in tf_dict.keys():
print(tf + " was not found in the database.\n")
print("You must select an available transcription factor to scan for. Both upper and titlecase are valid posibilities.\n")
print(tf_dict.keys())
else:
list_tfs = list(str(tfactor).split('+'))
for index, enhancer in self.df_results.iterrows():
test_seq=Seq(enhancer['sequence'])
for tfactor in list_tfs:
self.df_motifs.loc[len(self.df_motifs)]=(enhancer['name'], enhancer['full_name'], tfactor, coords, 0, strand, -1, 0) #dummy value to preserve order for ploting
pssm = tf_dict[tfactor].pssm
footprint = len(pssm[0])
start_pos = 0
max_score = pssm.max
min_score = pssm.min
abs_score_threshold = (max_score - min_score) * 0.8 + min_score
for position, score in pssm.search(test_seq):
rel_score = (score - min_score) / (max_score - min_score)
if int(position) >= 0:
strand = '+'
start_pos = enhancer['chromStart'] + position
if int(position) < 0:
strand = '-'
start_pos = enhancer['chromEnd'] + position
coords = enhancer['chrom'] + ':' + str(start_pos) + '-' + str(start_pos + footprint)
if score > score_threshold:
self.df_motifs.loc[len(self.df_motifs)]=(enhancer['name'], enhancer['full_name'], tfactor, coords, position, strand, float(score), rel_score)
if plot is True:
plt.figure(figsize=(fig_width, fig_height))
for tfactor in list_tfs:
df_plot = self.df_motifs.loc[self.df_motifs['tfactor'] == tfactor]
plt.scatter(x=df_plot.name, y=df_plot.score, alpha=0.75)
plt.title('JASPAR detected ' + str(list_tfs)+' motif(s)')
plt.ylim(bottom=score_threshold-1)
plt.legend(list_tfs, loc='center left', bbox_to_anchor=(1, 0.5))
plt.xticks(rotation=90)
plt.ylabel('JASPAR score')
self.df_motifs = self.df_motifs[self.df_motifs['score'] >= 0] # drop negative scores
else:
self.df_motifs = self.df_motifs[self.df_motifs['score'] >= 0] # drop negative scores
print(self.df_motifs)
def plot_detected_enhancers(self, fig_width=20, fig_height=4, ymax='auto'):
#calculate the x indexes for plotting
x_index = [self.region_start + i for i in range(self.region_stop-self.region_start)]
if ymax is 'auto':
ymax = self.region_max_value
plt.figure(figsize=(fig_width, fig_height))
plt.plot(x_index, self.region_values)
plt.title(self.track_plot_header + ' ' + self.genome + ' ' + self.chromosome + ': ' + str(self.region_start) + ' -> ' + str(self.region_stop-1))
plt.xlabel('Coordinates')
plt.ylabel('Peak Values')
plt.ylim(0, ymax)
for index, enhancer in self.df_results.iterrows():
plt.annotate(enhancer['name'], ((enhancer['chromStart'] + enhancer['chromEnd'])/2, ymax*.90), fontsize=9, color='red')
plt.axvspan(enhancer['chromStart'],enhancer['chromEnd'], 0, ymax, alpha=0.25, color='red')
plt.xticks([self.region_start, (self.region_start+self.region_stop)/2, self.region_stop])
plt.tight_layout()
def plot_detected_mean_peak_values(self, sort=False):
self.df_results.plot.bar(x='name', y='mean_peak_values', title=self.track_plot_header, ylabel='Mean Peak Values')
def plot_detected_max_peak_values(self, sort=False):
self.df_results.plot.bar(x='name', y='max_peak_values', title=self.track_plot_header, ylabel='Max Peak Values')
def plot_detected_size(self, sort=False):
self.df_results.plot.bar(x='name', y='size_bp', title=self.track_plot_header, ylabel='Size (bp)')
def plot_detected_motifs(self, motif, score_threshold=8, fig_width=20, fig_height=4):
#calculate the x indexes for plotting
x_index = [self.region_start + i for i in range(self.region_stop-self.region_start)]
ymax = self.region_max_value
plt.figure(figsize=(fig_width, fig_height))
plt.plot(x_index, self.region_values)
plt.title(motif + ' motifs ' + self.track_plot_header + ' ' + self.genome + ' ' + self.chromosome + ': ' + str(self.region_start) + ' -> ' + str(self.region_stop-1))
plt.xlabel('Coordinates')
plt.ylabel('Peak Values')
plt.ylim(0, ymax)
for index, enhancer in self.df_results.iterrows():
plt.annotate(enhancer['name'], ((enhancer['chromStart'] + enhancer['chromEnd'])/2, ymax*.90), fontsize=9, color='red')
plt.axvspan(enhancer['chromStart'],enhancer['chromEnd'], 0, ymax, alpha=0.25, color='red')
df_plot2 = pd.DataFrame(columns=['motif_name', 'x', 'y']) #calculated values
for index, row in self.df_motifs.iterrows():
start, stop = row.motif_coords.split(':')[1].split('-')
df_plot2 = df_plot2.append({'motif_name':row.tfactor, 'x':(int(stop)+int(start))/2, 'y':ymax/2}, ignore_index=True)
df_plot = df_plot2.loc[df_plot2['motif_name'] == motif]
plt.scatter(x=df_plot.x, y=df_plot.y, alpha=0.75, color='red')
plt.xticks([self.region_start, (self.region_start+self.region_stop)/2, self.region_stop])
plt.tight_layout()
def plot_custom(self, x, y):
fig, ax = plt.subplots()
ax.bar(self.df_results[x], self.df_results[y])
ax.set_title(self.track_plot_header)
ax.set_xlabel(x)
ax.set_ylabel(y)
self.last_figure = fig
def save_bed(self, file_name):
df_bed = pd.DataFrame(columns=['#chrom', 'chromStart', 'chromEnd', 'name', 'score', 'strand'])
#convert df_results into df_bed
for index, row in self.df_results.iterrows():
df_bed.loc[len(df_bed)]=([row['chrom'], row['chromStart'], row['chromEnd'], row['name'], row['max_peak_values'], '+'])
df_bed.to_csv(file_name + '.bed', sep='\t', index=None)
print(file_name + '.bed' + " Saved!")
def save_genbank(self, file_name):
sequence_string = self.get_sequence(self.genome, self.chromosome, self.region_start, self.region_stop)
sequence_object = Seq(sequence_string)
#create a record
genbank_record = SeqRecord(sequence_object,
id='123456789', # random accession number
name=self.chromosome + str(self.region_start) + str(self.region_stop),
description='Autogenerated Genbank file annotated with enhancer scanner data')
genbank_record.annotations['molecule_type']='DNA'
#annotate peaks
for index, peak in self.df_results.iterrows():
feature = SeqFeature(FeatureLocation(start=peak['chromStart']-self.region_start, end=peak['chromEnd']-self.region_start), type='Peak', strand=0)
feature.qualifiers['label'] = peak['name']
#feature.qualifiers['ApEinfo_revcolor'] = '#000000' #black
#feature.qualifiers['ApEinfo_fwdcolor'] = '#000000' #black
genbank_record.features.append(feature)
#annotate motifs
for index, motif in self.df_motifs.iterrows():
coords = motif['motif_coords']
coords = coords.split(':')[1]
start_pos, stop_pos = coords.split('-')
feature = SeqFeature(FeatureLocation(start=int(start_pos)-self.region_start, end=int(stop_pos)-self.region_start), type='TF', strand=0)
feature.qualifiers['label'] = motif['tfactor']
genbank_record.features.append(feature)
output_file = open(file_name + '.gb', 'w')
SeqIO.write(genbank_record, output_file, 'genbank')
print(file_name + '.gb' + " Saved!")
def save_results(self, file_name):
self.df_results.to_csv(file_name + '.csv', sep=',', index=None)
print(file_name + '.csv' + " Saved!")
def save_motifs(self, file_name):
self.df_motifs.to_csv(file_name + '.csv', sep=',', index=None)
print(file_name + '.csv' + " Saved!")
def reset_results(self):
""" Internal function to reset the results and motif dataframes. """
self.df_results = pd.DataFrame(columns=['chrom', 'chromStart', 'chromEnd', 'name', 'full_name', 'mean_peak_values', 'max_peak_values', 'size_bp', 'primerF', 'primerFtemp', 'primerR', 'primerRtemp','sequence'])
self.df_motifs = pd.DataFrame(columns=['name', 'full_name', 'tfactor', 'motif_coords', 'position', 'strand', 'score', 'rel_score'])
def get_mean_range_values(self, start, stop):
return self.region_values[start:stop+1].mean()
def get_mean_range_values_bed(self, chromosome, start, stop):
return np.array(self.track1_bw.values(chromosome, start, stop)).mean()
def get_max_range_values(self, start, stop):
return self.region_values[start:stop+1].max()
def get_max_range_values_bed(self, chromosome, start, stop):
return np.array(self.track1_bw.values(chromosome, start, stop)).max()
def get_region_values(self):
region_start = int(self.region_start)
region_stop = int(self.region_stop)
chromosome = self.chromosome
# If multiple track comparison
if self.track2_bw != None:
region_values = None
if self.track_comparison_type == 'subtract':
region_values = np.subtract(np.array(self.track1_bw.values(chromosome, region_start, region_stop)), np.array(self.track2_bw.values(chromosome, region_start, region_stop)))
if self.track_comparison_type == 'add':
region_values = np.add(np.array(self.track1_bw.values(chromosome, region_start, region_stop)), np.array(self.track2_bw.values(chromosome, region_start, region_stop)))
if self.track_comparison_type == 'divide':
region_values = np.true_divide(np.array(self.track1_bw.values(chromosome, region_start, region_stop)), np.array(self.track2_bw.values(chromosome, region_start, region_stop)))
if self.track_comparison_type == 'multiply':
region_values = np.multiply(np.array(self.track1_bw.values(chromosome, region_start, region_stop)), np.array(self.track2_bw.values(chromosome, region_start, region_stop)))
self.region_values = np.nan_to_num(region_values, nan=0.0, posinf=100, neginf=0.0).clip(min=0)
self.region_max_value = self.region_values.max()
self.region_mean_value = self.region_values.mean()
else: #only one track grab region values to detect
self.region_values = np.array(self.track1_bw.values(chromosome, region_start, region_stop))
self.region_max_value = self.region_values.max()
self.region_mean_value = self.region_values.mean()
print("Max peak height in this range: ",self.region_max_value)
print("Mean peak height in this range: ", self.region_mean_value)
print("Median peak height in this range: ", self.region_median_value)
print("")
print("Max peak height across whole track: ", self.track1_max_value, self.track2_max_value)
print("Minumum peak height across whole track1: ", self.track1_min_value, self.track2_min_value)
def get_sequence(self, genome, chromosome, start, stop, padding_start=0, padding_stop=0):
#page = requests.get('http://genome.ucsc.edu/cgi-bin/das/mm10/dna?segment=chr14:48645000,48660000')
# be careful: the DAS server uses an index of (+1) for the first base.
padding_start = int(padding_start)
padding_stop = int(padding_stop)
start = int(start)
stop = int(stop)
page = requests.get('http://genome.ucsc.edu/cgi-bin/das/'+genome+'/dna?segment='+chromosome+':'+str(start-padding_start)+','+str(stop+padding_stop))
return html.fromstring(page.content).text_content().strip().replace('\n', '')
def ungzip(self, filepath):
new_filepath = filepath.split('.')[-1]
with gzip.open(filepath, 'rb') as f_in:
with open(new_filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def download_url(self, url):
filename = url.split('/')[-1]
if '?' in filename:
filename = filename.split('?')[0]
if '=' in filename:
filename = filename.split('=')[-1]
filename = filename.replace('%2E', '.')
filename = filename.replace('%2D', '-')
filename = filename.replace('%5F', '_')
#check if file exists already
if path.exists(filename):
print("This track already exists!")
else:
print('Downloading ', filename, '...')
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
if int(r.status_code) == 200:
print('Download successful!')
else:
print(r.status_code)
if filename.endswith('gz'):
print('Unzipping...')
self.ungzip(filename)
print('Done.') | [
"Bio.Seq.Seq",
"Bio.SeqIO.write",
"numpy.nan_to_num",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"os.path.exists",
"lxml.html.fromstring",
"matplotlib.pyplot.axvspan",
"requests.get",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.sub... | [((2371, 2429), 'pandas.read_csv', 'pd.read_csv', (['"""external_tracks.db"""'], {'delimiter': '""","""', 'header': '(0)'}), "('external_tracks.db', delimiter=',', header=0)\n", (2382, 2429), True, 'import pandas as pd\n'), ((5765, 5822), 'pandas.read_csv', 'pd.read_csv', (['bed_file'], {'sep': '"""\t"""', 'comment': '"""#"""', 'header': 'None'}), "(bed_file, sep='\\t', comment='#', header=None)\n", (5776, 5822), True, 'import pandas as pd\n'), ((12841, 12959), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'full_name', 'tfactor', 'motif_coords', 'position', 'strand',\n 'score', 'rel_score']"}), "(columns=['name', 'full_name', 'tfactor', 'motif_coords',\n 'position', 'strand', 'score', 'rel_score'])\n", (12853, 12959), True, 'import pandas as pd\n'), ((13156, 13195), 'Bio.motifs.parse', 'motifs.parse', (['file_handle'], {'fmt': '"""jaspar"""'}), "(file_handle, fmt='jaspar')\n", (13168, 13195), False, 'from Bio import motifs\n'), ((16324, 16367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (16334, 16367), True, 'import matplotlib.pyplot as plt\n'), ((16376, 16413), 'matplotlib.pyplot.plot', 'plt.plot', (['x_index', 'self.region_values'], {}), '(x_index, self.region_values)\n', (16384, 16413), True, 'import matplotlib.pyplot as plt\n'), ((16575, 16600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Coordinates"""'], {}), "('Coordinates')\n", (16585, 16600), True, 'import matplotlib.pyplot as plt\n'), ((16609, 16634), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Peak Values"""'], {}), "('Peak Values')\n", (16619, 16634), True, 'import matplotlib.pyplot as plt\n'), ((16643, 16660), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'ymax'], {}), '(0, ymax)\n', (16651, 16660), True, 'import matplotlib.pyplot as plt\n'), ((16964, 17061), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[self.region_start, (self.region_start + self.region_stop) / 2, self.\n region_stop]'], {}), '([self.region_start, (self.region_start + self.region_stop) / 2,\n self.region_stop])\n', (16974, 17061), True, 'import matplotlib.pyplot as plt\n'), ((17062, 17080), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17078, 17080), True, 'import matplotlib.pyplot as plt\n'), ((17874, 17917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (17884, 17917), True, 'import matplotlib.pyplot as plt\n'), ((17926, 17963), 'matplotlib.pyplot.plot', 'plt.plot', (['x_index', 'self.region_values'], {}), '(x_index, self.region_values)\n', (17934, 17963), True, 'import matplotlib.pyplot as plt\n'), ((18146, 18171), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Coordinates"""'], {}), "('Coordinates')\n", (18156, 18171), True, 'import matplotlib.pyplot as plt\n'), ((18180, 18205), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Peak Values"""'], {}), "('Peak Values')\n", (18190, 18205), True, 'import matplotlib.pyplot as plt\n'), ((18214, 18231), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'ymax'], {}), '(0, ymax)\n', (18222, 18231), True, 'import matplotlib.pyplot as plt\n'), ((18546, 18592), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['motif_name', 'x', 'y']"}), "(columns=['motif_name', 'x', 'y'])\n", (18558, 18592), True, 'import pandas as pd\n'), ((18935, 18997), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'df_plot.x', 'y': 'df_plot.y', 'alpha': '(0.75)', 'color': '"""red"""'}), "(x=df_plot.x, y=df_plot.y, alpha=0.75, color='red')\n", (18946, 18997), True, 'import matplotlib.pyplot as plt\n'), ((19015, 19112), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[self.region_start, (self.region_start + self.region_stop) / 2, self.\n region_stop]'], {}), '([self.region_start, (self.region_start + self.region_stop) / 2,\n self.region_stop])\n', (19025, 19112), True, 'import matplotlib.pyplot as plt\n'), ((19113, 19131), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19129, 19131), True, 'import matplotlib.pyplot as plt\n'), ((19184, 19198), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19196, 19198), True, 'import matplotlib.pyplot as plt\n'), ((19437, 19526), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['#chrom', 'chromStart', 'chromEnd', 'name', 'score', 'strand']"}), "(columns=['#chrom', 'chromStart', 'chromEnd', 'name', 'score',\n 'strand'])\n", (19449, 19526), True, 'import pandas as pd\n'), ((20037, 20057), 'Bio.Seq.Seq', 'Seq', (['sequence_string'], {}), '(sequence_string)\n', (20040, 20057), False, 'from Bio.Seq import Seq\n'), ((21485, 21536), 'Bio.SeqIO.write', 'SeqIO.write', (['genbank_record', 'output_file', '"""genbank"""'], {}), "(genbank_record, output_file, 'genbank')\n", (21496, 21536), False, 'from Bio import SeqIO\n'), ((22034, 22234), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chrom', 'chromStart', 'chromEnd', 'name', 'full_name', 'mean_peak_values',\n 'max_peak_values', 'size_bp', 'primerF', 'primerFtemp', 'primerR',\n 'primerRtemp', 'sequence']"}), "(columns=['chrom', 'chromStart', 'chromEnd', 'name',\n 'full_name', 'mean_peak_values', 'max_peak_values', 'size_bp',\n 'primerF', 'primerFtemp', 'primerR', 'primerRtemp', 'sequence'])\n", (22046, 22234), True, 'import pandas as pd\n'), ((22251, 22369), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'full_name', 'tfactor', 'motif_coords', 'position', 'strand',\n 'score', 'rel_score']"}), "(columns=['name', 'full_name', 'tfactor', 'motif_coords',\n 'position', 'strand', 'score', 'rel_score'])\n", (22263, 22369), True, 'import pandas as pd\n'), ((26407, 26428), 'os.path.exists', 'path.exists', (['filename'], {}), '(filename)\n', (26418, 26428), False, 'from os import path\n'), ((3111, 3132), 'pyBigWig.open', 'pyBigWig.open', (['track1'], {}), '(track1)\n', (3124, 3132), False, 'import pyBigWig\n'), ((3467, 3488), 'pyBigWig.open', 'pyBigWig.open', (['track2'], {}), '(track2)\n', (3480, 3488), False, 'import pyBigWig\n'), ((4977, 4988), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4986, 4988), False, 'import os\n'), ((5236, 5247), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5245, 5247), False, 'import os\n'), ((15274, 15317), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (15284, 15317), True, 'import matplotlib.pyplot as plt\n'), ((15593, 15629), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(score_threshold - 1)'}), '(bottom=score_threshold - 1)\n', (15601, 15629), True, 'import matplotlib.pyplot as plt\n'), ((15640, 15704), 'matplotlib.pyplot.legend', 'plt.legend', (['list_tfs'], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(list_tfs, loc='center left', bbox_to_anchor=(1, 0.5))\n", (15650, 15704), True, 'import matplotlib.pyplot as plt\n'), ((15717, 15740), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (15727, 15740), True, 'import matplotlib.pyplot as plt\n'), ((15753, 15779), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""JASPAR score"""'], {}), "('JASPAR score')\n", (15763, 15779), True, 'import matplotlib.pyplot as plt\n'), ((16733, 16860), 'matplotlib.pyplot.annotate', 'plt.annotate', (["enhancer['name']", "((enhancer['chromStart'] + enhancer['chromEnd']) / 2, ymax * 0.9)"], {'fontsize': '(9)', 'color': '"""red"""'}), "(enhancer['name'], ((enhancer['chromStart'] + enhancer[\n 'chromEnd']) / 2, ymax * 0.9), fontsize=9, color='red')\n", (16745, 16860), True, 'import matplotlib.pyplot as plt\n'), ((16864, 16960), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (["enhancer['chromStart']", "enhancer['chromEnd']", '(0)', 'ymax'], {'alpha': '(0.25)', 'color': '"""red"""'}), "(enhancer['chromStart'], enhancer['chromEnd'], 0, ymax, alpha=\n 0.25, color='red')\n", (16875, 16960), True, 'import matplotlib.pyplot as plt\n'), ((18304, 18431), 'matplotlib.pyplot.annotate', 'plt.annotate', (["enhancer['name']", "((enhancer['chromStart'] + enhancer['chromEnd']) / 2, ymax * 0.9)"], {'fontsize': '(9)', 'color': '"""red"""'}), "(enhancer['name'], ((enhancer['chromStart'] + enhancer[\n 'chromEnd']) / 2, ymax * 0.9), fontsize=9, color='red')\n", (18316, 18431), True, 'import matplotlib.pyplot as plt\n'), ((18435, 18531), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (["enhancer['chromStart']", "enhancer['chromEnd']", '(0)', 'ymax'], {'alpha': '(0.25)', 'color': '"""red"""'}), "(enhancer['chromStart'], enhancer['chromEnd'], 0, ymax, alpha=\n 0.25, color='red')\n", (18446, 18531), True, 'import matplotlib.pyplot as plt\n'), ((25855, 25880), 'gzip.open', 'gzip.open', (['filepath', '"""rb"""'], {}), "(filepath, 'rb')\n", (25864, 25880), False, 'import gzip\n'), ((26560, 26577), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (26572, 26577), False, 'import requests\n'), ((13886, 13911), 'Bio.Seq.Seq', 'Seq', (["enhancer['sequence']"], {}), "(enhancer['sequence'])\n", (13889, 13911), False, 'from Bio.Seq import Seq\n'), ((15454, 15510), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'df_plot.name', 'y': 'df_plot.score', 'alpha': '(0.75)'}), '(x=df_plot.name, y=df_plot.score, alpha=0.75)\n', (15465, 15510), True, 'import matplotlib.pyplot as plt\n'), ((20572, 20680), 'Bio.SeqFeature.FeatureLocation', 'FeatureLocation', ([], {'start': "(peak['chromStart'] - self.region_start)", 'end': "(peak['chromEnd'] - self.region_start)"}), "(start=peak['chromStart'] - self.region_start, end=peak[\n 'chromEnd'] - self.region_start)\n", (20587, 20680), False, 'from Bio.SeqFeature import SeqFeature, FeatureLocation\n'), ((25958, 25989), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (25976, 25989), False, 'import shutil\n'), ((4261, 4319), 'pandas.read_csv', 'pd.read_csv', (['"""external_tracks.db"""'], {'delimiter': '""","""', 'header': '(0)'}), "('external_tracks.db', delimiter=',', header=0)\n", (4272, 4319), True, 'import pandas as pd\n'), ((4529, 4587), 'pandas.read_csv', 'pd.read_csv', (['"""external_tracks.db"""'], {'delimiter': '""","""', 'header': '(0)'}), "('external_tracks.db', delimiter=',', header=0)\n", (4540, 4587), True, 'import pandas as pd\n'), ((24145, 24206), 'numpy.nan_to_num', 'np.nan_to_num', (['region_values'], {'nan': '(0.0)', 'posinf': '(100)', 'neginf': '(0.0)'}), '(region_values, nan=0.0, posinf=100, neginf=0.0)\n', (24158, 24206), True, 'import numpy as np\n'), ((25690, 25719), 'lxml.html.fromstring', 'html.fromstring', (['page.content'], {}), '(page.content)\n', (25705, 25719), False, 'from lxml import html\n')] |
import math
import operator
from functools import reduce
import bezier
import cv2
import numpy as np
import pyclipper
from pyclipper import PyclipperOffset
from scipy.interpolate import splprep, splev
from shapely.geometry import Polygon
def compute_two_points_angle(_base_point, _another_point):
"""
以基点作x+延长线,这根线以基点为圆心进行顺时针运动,与基点和另一个点的连线重合所经历的角度
:param _base_point: 基点
:param _another_point: 另一个点
"""
diff_x, diff_y = _another_point[0] - _base_point[0], _another_point[1] - _base_point[1]
clockwise_angle = 180 + math.degrees(math.atan2(-diff_y, -diff_x))
return clockwise_angle
def get_clockwise_angle_of_two_lines(_center_point, _point_1, _point_2):
"""
以中心点为圆心,点1到点2之间的顺时针的角度
:param _center_point: 中心点
:param _point_1: 点1的坐标
:param _point_2: 点2的坐标
:return: 夹角的角度
"""
angle_1 = compute_two_points_angle(_center_point, _point_1)
angle_2 = compute_two_points_angle(_center_point, _point_2)
if angle_2 < angle_1:
return angle_2 + 360 - angle_1
else:
return angle_2 - angle_1
def curved_polygon(_points):
"""
利用B样条插值对多边形进行优化,使得更加平滑
:param _points: 多边形所在点
:return: 平滑后的50个点
"""
tck, u = splprep(_points.T, u=None, s=1.0, per=1, quiet=2)
u_new = np.linspace(u.min(), u.max(), 1000)
x_new, y_new = splev(u_new, tck, der=0)
return np.array(list(zip(x_new.astype(np.int), y_new.astype(np.int)))).reshape((-1, 1, 2))
def approximate_curved_polygon(_contour, point_num=200):
"""
使用贝塞尔曲线进行拟合,得到平滑的闭合多边形轮廓
:param _contour: 构成多边形轮廓的点集. Array:(N, 2)
:param point_num: 每次拟合的点的数量,越大则越平滑. Int
:return: 返回平滑后的轮廓点
"""
to_return_contour = []
_contour = np.reshape(_contour, (-1, 2))
# 复制起始点到最后,保证生成闭合的曲线
_contour = np.vstack((_contour, _contour[0, :].reshape((-1, 2))))
for start_index in range(0, _contour.shape[0], point_num):
# 多取一个点,防止曲线中间出现断点
end_index = start_index + point_num + 1
end_index = end_index if end_index < _contour.shape[0] else _contour.shape[0]
nodes = np.transpose(_contour[start_index:end_index, :])
# 拟合贝塞尔曲线
curve = bezier.Curve(nodes, degree=nodes.shape[1] - 1)
curve = curve.evaluate_multi(np.linspace(0.0, 1.0, point_num * 5))
to_return_contour.append(np.transpose(curve))
to_return_contour = np.array(to_return_contour).reshape((-1, 2))
return to_return_contour
def get_region_proportion(_regions, _proportion):
"""
获取一堆区域的相应的占比
"""
assert _proportion in {'area', 'height', 'width'}, '不支持的占比计算方式'
all_region_values = []
if _proportion == 'area':
all_region_values = [np.sum(m_region) for m_region in _regions]
elif _proportion == 'height':
for m_region in _regions:
m_region_y, _ = np.where(m_region)
all_region_values.append(max(m_region_y) - min(m_region_y))
elif _proportion == 'width':
for m_region in _regions:
_, m_region_x = np.where(m_region)
all_region_values.append(max(m_region_x) - min(m_region_x))
sum_region_value = sum(all_region_values)
return [m_region_value / sum_region_value for m_region_value in all_region_values]
def get_bounding_rectangle(_x, _y):
"""
获得一系列点的组成最小外接矩形的相关信息
:rtype: object
:param _x: 一系列点的x值
:param _y: 一系列点的y值
:return: 最小外接矩形的左上角x,左上角y,右下角x,右下角y,矩形的高度和宽度
"""
left_top_corner_x, left_top_corner_y = min(_x), min(_y)
right_bottom_corner_x, right_bottom_corner_y = max(_x), max(_y)
width = right_bottom_corner_x - left_top_corner_x
height = right_bottom_corner_y - left_top_corner_y
return left_top_corner_x, left_top_corner_y, right_bottom_corner_x, right_bottom_corner_y, height, width
def interpolate_points(_points):
"""
对线段进行插值,方便后面对多边形进行插值算法的时候更加理想
:param _points: 所有点
:return: 插值完成后的点
"""
to_return_points = []
_points = np.array(_points)
for m_point_previous, m_point_next in zip(_points, _points[1:]):
m_segments = np.max(np.abs(m_point_previous - m_point_next) // 10)
if m_segments > 1:
new_x = np.linspace(m_point_previous[0], m_point_next[0], num=int(m_segments), endpoint=False,
dtype=np.int)
new_y = np.linspace(m_point_previous[1], m_point_next[1], num=int(m_segments), endpoint=False,
dtype=np.int)
to_return_points.append(np.vstack([new_x, new_y]))
else:
to_return_points.append(np.array([[m_point_previous[0]], [m_point_previous[1]]]))
return np.hstack(to_return_points).T
def get_polygon_region_contour(_region_mask, _mode='max'):
"""
获得多边形区域的轮廓
:param _region_mask: 有多边形区域的图像
:param _mode: 为'all'时,返回所有的轮廓点集;
为'max'时,返回最大的轮廓点集;
:return: 这个多边形轮廓
"""
_, contours, _ = cv2.findContours(_region_mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
to_return_contours = []
if _mode == 'max':
to_return_contours = [max(contours, key=cv2.contourArea), ]
elif _mode == 'all':
to_return_contours = contours
return to_return_contours
def concentric_circle_delete_duplicated(_all_centers, _down_scale_ratio=4):
"""
简易的二维坐标去重
相当于将相邻坐标放到一个格子里
"""
tile_grids = dict()
to_return_optimized_centers = []
for m_x, m_y in _all_centers:
m_x_downscaled, m_y_downscaled = m_x // _down_scale_ratio, m_y // _down_scale_ratio
m_downscale_name = '%d_%d' % (m_x_downscaled, m_y_downscaled)
if m_downscale_name not in tile_grids:
tile_grids[m_downscale_name] = (m_x, m_y, 1)
else:
sum_x, sum_y, sum_counter = tile_grids[m_downscale_name]
tile_grids[m_downscale_name] = (sum_x + m_x, sum_y + m_y, sum_counter + 1)
for _, (m_sum_x, m_sum_y, m_sum_counter) in tile_grids.items():
to_return_optimized_centers.append((m_sum_x // m_sum_counter, m_sum_y // m_sum_counter))
return to_return_optimized_centers
def nms(_rectangles, _scores, _nms_threshold):
"""
非极大值抑制
Args:
_rectangles: 所有bbox
_scores: 所有bbox的score
_nms_threshold: nms的阈值
Returns: nms之后的bbox
"""
x1 = _rectangles[:, 0]
y1 = _rectangles[:, 1]
x2 = _rectangles[:, 2]
y2 = _rectangles[:, 3]
scores = _scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 获得由大到小的分数索引
score_index = np.argsort(scores)[::-1]
keep = []
while len(score_index) > 0:
max_index = score_index[0]
# 最大的肯定是需要的框
keep.append(max_index)
intersection_left_x = np.maximum(x1[max_index], x1[score_index[1:]])
intersection_top_y = np.maximum(y1[max_index], y1[score_index[1:]])
intersection_right_x = np.minimum(x2[max_index], x2[score_index[1:]])
intersection_bottom_y = np.minimum(y2[max_index], y2[score_index[1:]])
width = np.maximum(0.0, intersection_right_x - intersection_left_x + 1)
height = np.maximum(0.0, intersection_bottom_y - intersection_top_y + 1)
intersection = width * height
min_areas = areas[score_index[1:]].copy()
min_areas_mask = areas[score_index[1:]] < areas[max_index]
min_areas[~min_areas_mask] = areas[max_index]
iou = intersection / min_areas
ids = np.where(np.logical_and(iou < _nms_threshold, min_areas != intersection))[0]
# 算iou的时候没把第一个参考框索引考虑进来,所以这里都要+1
score_index = score_index[ids + 1]
return keep
def rotate_degree_img(_img, _degree, _center=None, _with_expand=True, _mask=None):
"""
逆时针旋转图像
Args:
_img: 待旋转图像
_degree: 角度
_center: 旋转中心,默认为图像几何中心
_with_expand: 是否需要调整图像大小,保证所有内容都不丢失
_mask: 待旋转的mask,可为None
Returns: 旋转后的图像,旋转后的mask
"""
if _mask is not None:
assert _img.shape == _mask.shape[:2], 'mask and shape is not same'
h, w = _img.shape[:2]
if _center is None:
center = (w / 2, h / 2)
else:
center = _center
rotate_matrix = cv2.getRotationMatrix2D(center, _degree, 1)
top_right = np.array((w - 1, 0)) - np.array(center)
bottom_right = np.array((w - 1, h - 1)) - np.array(center)
top_right_after_rotate = rotate_matrix[0:2, 0:2].dot(top_right)
bottom_right_after_rotate = rotate_matrix[0:2, 0:2].dot(bottom_right)
if _with_expand:
new_width = max(int(abs(bottom_right_after_rotate[0] * 2) + 0.5), int(abs(top_right_after_rotate[0] * 2) + 0.5))
new_height = max(int(abs(top_right_after_rotate[1] * 2) + 0.5),
int(abs(bottom_right_after_rotate[1] * 2) + 0.5))
else:
new_width = w
new_height = h
offset_x = (new_width - w) / 2
offset_y = (new_height - h) / 2
rotate_matrix[0, 2] += offset_x
rotate_matrix[1, 2] += offset_y
rotated_img = cv2.warpAffine(_img, rotate_matrix, (new_width, new_height))
if _mask is not None:
rotated_mask = cv2.warpAffine(_mask, rotate_matrix, (new_width, new_height))
else:
rotated_mask = None
return rotated_img, rotated_mask
def rotate_points(_points, _degree=0, _center=(0, 0)):
"""
逆时针绕着一个点旋转点
Args:
_points: 需要旋转的点
_degree: 角度
_center: 中心点
Returns: 旋转后的点
"""
angle = np.deg2rad(_degree)
rotate_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
center = np.atleast_2d(_center)
points = np.atleast_2d(_points)
return np.squeeze((rotate_matrix @ (points.T - center.T) + center.T).T)
def resize_convex_hull_polygon(_convex_hull_points, _resize_ratio):
"""
对凸包的多边形进行缩放
Args:
_convex_hull_points: 凸包多边形的轮廓
_resize_ratio: 缩放比例
Returns: 缩放后的点
"""
center_point = np.mean(_convex_hull_points, axis=0)
diff_points = _convex_hull_points - center_point
r = np.linalg.norm(diff_points, axis=1)
theta = np.arctan2(diff_points[:, 1], diff_points[:, 0])
target_r = r * _resize_ratio
to_return_points = np.zeros_like(diff_points, dtype=np.float)
to_return_points[:, 0] = target_r * np.cos(theta)
to_return_points[:, 1] = target_r * np.sin(theta)
# 向下取整
return (to_return_points + center_point).astype(np.int)
def get_distance(_p1, _p2):
"""
获取两点之间的欧式距离
Args:
_p1: 点的坐标
_p2: 点的坐标
Returns: 两点间的欧式距离
"""
return np.sqrt(np.sum(np.square(_p1 - _p2)))
def resize_with_height(_image, _target_height):
"""
将图像高度resize到指定高度的等比例缩放
Args:
_image: 待缩放图像
_target_height: 目标高度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
ratio = h / _target_height
target_w = int(np.ceil(w / ratio))
return cv2.resize(_image, (target_w, _target_height))
def resize_with_width(_image, _target_width):
"""
将图像宽度resize到指定宽度的等比例缩放
Args:
_image: 待缩放图像
_target_width: 目标宽度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
ratio = w / _target_width
target_h = int(np.ceil(h / ratio))
return cv2.resize(_image, (_target_width, target_h))
def resize_with_short_side(_image, _target_short_side_size):
"""
将图像最短边resize到指定长度的等比例缩放
Args:
_image: 图像
_target_short_side_size: 最短边目标长度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
if h > w:
return resize_with_width(_image, _target_short_side_size)
else:
return resize_with_height(_image, _target_short_side_size)
def _compute_image_specific_base(_image, _height_base=None, _width_base=None):
"""
计算图像的宽高在一定基数基础上的最邻近向上取整的宽高
Args:
_image: 图像
_height_base: 高度的基数
_width_base: 宽度的基数
Returns: 最临近高度,最邻近宽度
"""
h, w = _image.shape[:2]
target_h = h
target_w = w
if _height_base is not None:
if h <= _height_base:
target_h = _height_base
else:
target_h = int(np.ceil(h / _height_base) * _height_base)
if _width_base is not None:
if w <= _width_base:
target_w = _width_base
else:
target_w = int(np.ceil(w / _width_base) * _width_base)
return target_h, target_w
def resize_with_specific_base(_image, _height_base=None, _width_base=None):
"""
将图像缩放到特定基的倍数的高宽
Args:
_image: 待缩放图像
_height_base: 高度的基
_width_base: 宽度的基
Returns: 缩放后的图像
"""
target_h, target_w = _compute_image_specific_base(_image, _height_base, _width_base)
return cv2.resize(_image, (target_w, target_h))
def center_pad_image_with_specific_base(_image, _height_base=None, _width_base=None, _pad_value=0):
"""
将图像中心填充到特定基的倍数的高宽的图像中
Args:
_image: 待缩放图像
_height_base: 高度的基
_width_base: 宽度的基
_pad_value: pad的填充值
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
target_h, target_w = _compute_image_specific_base(_image, _height_base, _width_base)
full_size_image = np.ones((target_h, target_w, _image.shape[2]), dtype=_image.dtype) * _pad_value
left_margin = (target_w - w) // 2
right_margin = left_margin + w
top_margin = (target_h - h) // 2
bottom_margin = top_margin + target_h
full_size_image[top_margin:bottom_margin, left_margin:right_margin, ...] = _image
return full_size_image
def get_cropped_image(_image, _location):
"""
抠取图中的特定区域
Args:
_image: 待抠取图像
_location: 待抠取区域
Returns: 抠取出来的结果
"""
h, w = _image.shape[:2]
top_left_x = int(np.clip(_location['top_left_x'], a_min=0, a_max=1) * w)
top_left_y = int(np.clip(_location['top_left_y'], a_min=0, a_max=1) * h)
bottom_right_x = int(np.clip(_location['bottom_right_x'], a_min=0, a_max=1) * w)
bottom_right_y = int(np.clip(_location['bottom_right_y'], a_min=0, a_max=1) * h)
return _image.copy()[top_left_y:bottom_right_y + 1, top_left_x:bottom_right_x + 1, ...]
def get_min_area_bbox(_image, _contour, _scale_ratio=1.0):
"""
获取一个contour对应的最小面积矩形
note:主要是解决了旋转角度不合适的问题
Args:
_image: bbox所在图像
_contour: 轮廓
_scale_ratio: 缩放比例
Returns: 最小面积矩形的相关信息
"""
h, w = _image.shape[:2]
if _scale_ratio != 1:
reshaped_contour = _contour.reshape(-1, 2)
current_polygon = Polygon(reshaped_contour)
distance = current_polygon.area * _scale_ratio / current_polygon.length
offset = PyclipperOffset()
offset.AddPath(reshaped_contour, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
scaled_contour = np.array(offset.Execute(distance)).reshape(-1, 1, 2)
else:
scaled_contour = _contour
rotated_box = cv2.minAreaRect(scaled_contour)
if -90 <= rotated_box[2] <= -45:
to_rotate_degree = rotated_box[2] + 90
bbox_height, bbox_width = rotated_box[1]
else:
to_rotate_degree = rotated_box[2]
bbox_width, bbox_height = rotated_box[1]
# 几何信息归一化可以方便进行在缩放前的图像上进行操作
to_return_rotated_box = {
'degree': int(to_rotate_degree),
'center_x': rotated_box[0][0] / w,
'center_y': rotated_box[0][1] / h,
'box_height': bbox_height / h,
'box_width': bbox_width / w,
}
# if abs(to_rotate_degree) > 10:
# cv2.waitKey(0)
return to_return_rotated_box
def get_rotated_box_roi_from_image(_image, _rotated_box):
"""
在图像中抠取一个旋转的box的roi
Args:
_image: 待抠取图像
_rotated_box: 旋转的box
Returns: 抠取的roi
"""
h, w = _image.shape[:2]
to_rotate_degree = _rotated_box['degree']
box_center = (_rotated_box['center_x'] * w, _rotated_box['center_y'] * h)
half_box_height, half_box_width = _rotated_box['box_height'] / 2, _rotated_box['box_width'] / 2
rotated_image, _ = rotate_degree_img(_image, to_rotate_degree, box_center, _with_expand=False)
to_crop_location = {
'top_left_x': _rotated_box['center_x'] - half_box_width,
'top_left_y': _rotated_box['center_y'] - half_box_height,
'bottom_right_x': _rotated_box['center_x'] + half_box_width,
'bottom_right_y': _rotated_box['center_y'] + half_box_height,
}
cropped_image = get_cropped_image(rotated_image, to_crop_location)
return cropped_image
def get_coordinates_of_rotated_box(_image, _rotated_box):
"""
获取旋转的矩形的对应的四个顶点坐标
Args:
_image: 对应的图像
_rotated_box: 旋转的矩形
Returns: 四个对应在图像中的坐标点
"""
h, w = _image.shape[:2]
center_x = _rotated_box['center_x']
center_y = _rotated_box['center_y']
half_box_width = _rotated_box['box_width'] / 2
half_box_height = _rotated_box['box_height'] / 2
raw_points = np.array([
[center_x - half_box_width, center_y - half_box_height],
[center_x + half_box_width, center_y - half_box_height],
[center_x + half_box_width, center_y + half_box_height],
[center_x - half_box_width, center_y + half_box_height]
])
rotated_points = np.clip(rotate_points(raw_points, _rotated_box['degree'], (center_x, center_y)), a_min=0, a_max=1)
rotated_points[:, 0] *= w
rotated_points[:, 1] *= h
return rotated_points.astype(np.int32)
def clockwise_sort_points(_point_coordinates):
"""
以左上角为起点的顺时针排序
原理就是将笛卡尔坐标转换为极坐标,然后对极坐标的φ进行排序
Args:
_point_coordinates: 待排序的点[(x,y),]
Returns: 排序完成的点
"""
center_point = tuple(
map(operator.truediv, reduce(lambda x, y: map(operator.add, x, y), _point_coordinates),
[len(_point_coordinates)] * 2))
return sorted(_point_coordinates, key=lambda coord: (180 + math.degrees(
math.atan2(*tuple(map(operator.sub, coord, center_point))[::-1]))) % 360)
| [
"numpy.arctan2",
"numpy.maximum",
"numpy.sum",
"math.atan2",
"numpy.abs",
"numpy.ones",
"numpy.clip",
"numpy.argsort",
"cv2.warpAffine",
"numpy.mean",
"numpy.linalg.norm",
"numpy.sin",
"cv2.minAreaRect",
"bezier.Curve",
"cv2.getRotationMatrix2D",
"numpy.atleast_2d",
"numpy.zeros_like... | [((1231, 1280), 'scipy.interpolate.splprep', 'splprep', (['_points.T'], {'u': 'None', 's': '(1.0)', 'per': '(1)', 'quiet': '(2)'}), '(_points.T, u=None, s=1.0, per=1, quiet=2)\n', (1238, 1280), False, 'from scipy.interpolate import splprep, splev\n'), ((1348, 1372), 'scipy.interpolate.splev', 'splev', (['u_new', 'tck'], {'der': '(0)'}), '(u_new, tck, der=0)\n', (1353, 1372), False, 'from scipy.interpolate import splprep, splev\n'), ((1728, 1757), 'numpy.reshape', 'np.reshape', (['_contour', '(-1, 2)'], {}), '(_contour, (-1, 2))\n', (1738, 1757), True, 'import numpy as np\n'), ((3960, 3977), 'numpy.array', 'np.array', (['_points'], {}), '(_points)\n', (3968, 3977), True, 'import numpy as np\n'), ((8140, 8183), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '_degree', '(1)'], {}), '(center, _degree, 1)\n', (8163, 8183), False, 'import cv2\n'), ((8950, 9010), 'cv2.warpAffine', 'cv2.warpAffine', (['_img', 'rotate_matrix', '(new_width, new_height)'], {}), '(_img, rotate_matrix, (new_width, new_height))\n', (8964, 9010), False, 'import cv2\n'), ((9407, 9426), 'numpy.deg2rad', 'np.deg2rad', (['_degree'], {}), '(_degree)\n', (9417, 9426), True, 'import numpy as np\n'), ((9566, 9588), 'numpy.atleast_2d', 'np.atleast_2d', (['_center'], {}), '(_center)\n', (9579, 9588), True, 'import numpy as np\n'), ((9602, 9624), 'numpy.atleast_2d', 'np.atleast_2d', (['_points'], {}), '(_points)\n', (9615, 9624), True, 'import numpy as np\n'), ((9636, 9700), 'numpy.squeeze', 'np.squeeze', (['(rotate_matrix @ (points.T - center.T) + center.T).T'], {}), '((rotate_matrix @ (points.T - center.T) + center.T).T)\n', (9646, 9700), True, 'import numpy as np\n'), ((9927, 9963), 'numpy.mean', 'np.mean', (['_convex_hull_points'], {'axis': '(0)'}), '(_convex_hull_points, axis=0)\n', (9934, 9963), True, 'import numpy as np\n'), ((10025, 10060), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_points'], {'axis': '(1)'}), '(diff_points, axis=1)\n', (10039, 10060), True, 'import numpy as np\n'), ((10073, 10121), 'numpy.arctan2', 'np.arctan2', (['diff_points[:, 1]', 'diff_points[:, 0]'], {}), '(diff_points[:, 1], diff_points[:, 0])\n', (10083, 10121), True, 'import numpy as np\n'), ((10178, 10220), 'numpy.zeros_like', 'np.zeros_like', (['diff_points'], {'dtype': 'np.float'}), '(diff_points, dtype=np.float)\n', (10191, 10220), True, 'import numpy as np\n'), ((10881, 10927), 'cv2.resize', 'cv2.resize', (['_image', '(target_w, _target_height)'], {}), '(_image, (target_w, _target_height))\n', (10891, 10927), False, 'import cv2\n'), ((11221, 11266), 'cv2.resize', 'cv2.resize', (['_image', '(_target_width, target_h)'], {}), '(_image, (_target_width, target_h))\n', (11231, 11266), False, 'import cv2\n'), ((12699, 12739), 'cv2.resize', 'cv2.resize', (['_image', '(target_w, target_h)'], {}), '(_image, (target_w, target_h))\n', (12709, 12739), False, 'import cv2\n'), ((14882, 14913), 'cv2.minAreaRect', 'cv2.minAreaRect', (['scaled_contour'], {}), '(scaled_contour)\n', (14897, 14913), False, 'import cv2\n'), ((16879, 17130), 'numpy.array', 'np.array', (['[[center_x - half_box_width, center_y - half_box_height], [center_x +\n half_box_width, center_y - half_box_height], [center_x + half_box_width,\n center_y + half_box_height], [center_x - half_box_width, center_y +\n half_box_height]]'], {}), '([[center_x - half_box_width, center_y - half_box_height], [\n center_x + half_box_width, center_y - half_box_height], [center_x +\n half_box_width, center_y + half_box_height], [center_x - half_box_width,\n center_y + half_box_height]])\n', (16887, 17130), True, 'import numpy as np\n'), ((2093, 2141), 'numpy.transpose', 'np.transpose', (['_contour[start_index:end_index, :]'], {}), '(_contour[start_index:end_index, :])\n', (2105, 2141), True, 'import numpy as np\n'), ((2176, 2222), 'bezier.Curve', 'bezier.Curve', (['nodes'], {'degree': '(nodes.shape[1] - 1)'}), '(nodes, degree=nodes.shape[1] - 1)\n', (2188, 2222), False, 'import bezier\n'), ((4637, 4664), 'numpy.hstack', 'np.hstack', (['to_return_points'], {}), '(to_return_points)\n', (4646, 4664), True, 'import numpy as np\n'), ((6510, 6528), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (6520, 6528), True, 'import numpy as np\n'), ((6700, 6746), 'numpy.maximum', 'np.maximum', (['x1[max_index]', 'x1[score_index[1:]]'], {}), '(x1[max_index], x1[score_index[1:]])\n', (6710, 6746), True, 'import numpy as np\n'), ((6776, 6822), 'numpy.maximum', 'np.maximum', (['y1[max_index]', 'y1[score_index[1:]]'], {}), '(y1[max_index], y1[score_index[1:]])\n', (6786, 6822), True, 'import numpy as np\n'), ((6854, 6900), 'numpy.minimum', 'np.minimum', (['x2[max_index]', 'x2[score_index[1:]]'], {}), '(x2[max_index], x2[score_index[1:]])\n', (6864, 6900), True, 'import numpy as np\n'), ((6933, 6979), 'numpy.minimum', 'np.minimum', (['y2[max_index]', 'y2[score_index[1:]]'], {}), '(y2[max_index], y2[score_index[1:]])\n', (6943, 6979), True, 'import numpy as np\n'), ((6997, 7060), 'numpy.maximum', 'np.maximum', (['(0.0)', '(intersection_right_x - intersection_left_x + 1)'], {}), '(0.0, intersection_right_x - intersection_left_x + 1)\n', (7007, 7060), True, 'import numpy as np\n'), ((7078, 7141), 'numpy.maximum', 'np.maximum', (['(0.0)', '(intersection_bottom_y - intersection_top_y + 1)'], {}), '(0.0, intersection_bottom_y - intersection_top_y + 1)\n', (7088, 7141), True, 'import numpy as np\n'), ((8200, 8220), 'numpy.array', 'np.array', (['(w - 1, 0)'], {}), '((w - 1, 0))\n', (8208, 8220), True, 'import numpy as np\n'), ((8223, 8239), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (8231, 8239), True, 'import numpy as np\n'), ((8259, 8283), 'numpy.array', 'np.array', (['(w - 1, h - 1)'], {}), '((w - 1, h - 1))\n', (8267, 8283), True, 'import numpy as np\n'), ((8286, 8302), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (8294, 8302), True, 'import numpy as np\n'), ((9060, 9121), 'cv2.warpAffine', 'cv2.warpAffine', (['_mask', 'rotate_matrix', '(new_width, new_height)'], {}), '(_mask, rotate_matrix, (new_width, new_height))\n', (9074, 9121), False, 'import cv2\n'), ((10261, 10274), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10267, 10274), True, 'import numpy as np\n'), ((10315, 10328), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10321, 10328), True, 'import numpy as np\n'), ((10850, 10868), 'numpy.ceil', 'np.ceil', (['(w / ratio)'], {}), '(w / ratio)\n', (10857, 10868), True, 'import numpy as np\n'), ((11190, 11208), 'numpy.ceil', 'np.ceil', (['(h / ratio)'], {}), '(h / ratio)\n', (11197, 11208), True, 'import numpy as np\n'), ((13175, 13241), 'numpy.ones', 'np.ones', (['(target_h, target_w, _image.shape[2])'], {'dtype': '_image.dtype'}), '((target_h, target_w, _image.shape[2]), dtype=_image.dtype)\n', (13182, 13241), True, 'import numpy as np\n'), ((14512, 14537), 'shapely.geometry.Polygon', 'Polygon', (['reshaped_contour'], {}), '(reshaped_contour)\n', (14519, 14537), False, 'from shapely.geometry import Polygon\n'), ((14635, 14652), 'pyclipper.PyclipperOffset', 'PyclipperOffset', ([], {}), '()\n', (14650, 14652), False, 'from pyclipper import PyclipperOffset\n'), ((561, 589), 'math.atan2', 'math.atan2', (['(-diff_y)', '(-diff_x)'], {}), '(-diff_y, -diff_x)\n', (571, 589), False, 'import math\n'), ((2260, 2296), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(point_num * 5)'], {}), '(0.0, 1.0, point_num * 5)\n', (2271, 2296), True, 'import numpy as np\n'), ((2331, 2350), 'numpy.transpose', 'np.transpose', (['curve'], {}), '(curve)\n', (2343, 2350), True, 'import numpy as np\n'), ((2376, 2403), 'numpy.array', 'np.array', (['to_return_contour'], {}), '(to_return_contour)\n', (2384, 2403), True, 'import numpy as np\n'), ((2689, 2705), 'numpy.sum', 'np.sum', (['m_region'], {}), '(m_region)\n', (2695, 2705), True, 'import numpy as np\n'), ((10561, 10581), 'numpy.square', 'np.square', (['(_p1 - _p2)'], {}), '(_p1 - _p2)\n', (10570, 10581), True, 'import numpy as np\n'), ((13732, 13782), 'numpy.clip', 'np.clip', (["_location['top_left_x']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['top_left_x'], a_min=0, a_max=1)\n", (13739, 13782), True, 'import numpy as np\n'), ((13809, 13859), 'numpy.clip', 'np.clip', (["_location['top_left_y']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['top_left_y'], a_min=0, a_max=1)\n", (13816, 13859), True, 'import numpy as np\n'), ((13890, 13944), 'numpy.clip', 'np.clip', (["_location['bottom_right_x']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['bottom_right_x'], a_min=0, a_max=1)\n", (13897, 13944), True, 'import numpy as np\n'), ((13975, 14029), 'numpy.clip', 'np.clip', (["_location['bottom_right_y']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['bottom_right_y'], a_min=0, a_max=1)\n", (13982, 14029), True, 'import numpy as np\n'), ((2828, 2846), 'numpy.where', 'np.where', (['m_region'], {}), '(m_region)\n', (2836, 2846), True, 'import numpy as np\n'), ((4075, 4114), 'numpy.abs', 'np.abs', (['(m_point_previous - m_point_next)'], {}), '(m_point_previous - m_point_next)\n', (4081, 4114), True, 'import numpy as np\n'), ((4491, 4516), 'numpy.vstack', 'np.vstack', (['[new_x, new_y]'], {}), '([new_x, new_y])\n', (4500, 4516), True, 'import numpy as np\n'), ((4568, 4624), 'numpy.array', 'np.array', (['[[m_point_previous[0]], [m_point_previous[1]]]'], {}), '([[m_point_previous[0]], [m_point_previous[1]]])\n', (4576, 4624), True, 'import numpy as np\n'), ((7414, 7477), 'numpy.logical_and', 'np.logical_and', (['(iou < _nms_threshold)', '(min_areas != intersection)'], {}), '(iou < _nms_threshold, min_areas != intersection)\n', (7428, 7477), True, 'import numpy as np\n'), ((9458, 9471), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9464, 9471), True, 'import numpy as np\n'), ((9521, 9534), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (9527, 9534), True, 'import numpy as np\n'), ((9536, 9549), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9542, 9549), True, 'import numpy as np\n'), ((3014, 3032), 'numpy.where', 'np.where', (['m_region'], {}), '(m_region)\n', (3022, 3032), True, 'import numpy as np\n'), ((9474, 9487), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (9480, 9487), True, 'import numpy as np\n'), ((12116, 12141), 'numpy.ceil', 'np.ceil', (['(h / _height_base)'], {}), '(h / _height_base)\n', (12123, 12141), True, 'import numpy as np\n'), ((12295, 12319), 'numpy.ceil', 'np.ceil', (['(w / _width_base)'], {}), '(w / _width_base)\n', (12302, 12319), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
seed = 0
k_fold=2
def input():
df = pd.read_csv('out.csv')
title = list(df.columns.values)
df = df[~np.isnan(df).any(axis=1)]
df = df.values
return df,title
def normalize(xtrain,xtest):
mu = np.mean(xtrain, axis = 0)
sigma = np.std(xtrain, axis = 0)
xtrain_s = xtrain - mu
xtrain_s /= sigma
xtest_s = xtest - mu
xtest_s /= sigma
return xtrain_s,xtest_s
kf = StratifiedKFold(k_fold,shuffle=True)
smote = SMOTE(ratio=1.0)
def split_data(X,y):
for tr, te in kf.split(X,y):
xtrain, xtest = X[tr], X[te]
ytrain, ytest = y[tr], y[te]
sxtrain, sytrain = smote.fit_sample(xtrain,ytrain)
return xtrain, xtest, ytrain, ytest,sxtrain, sytrain
def kfoldsampling(X,y):
np.random.seed(seed)
xtrain, xtest, ytrain, ytest, sxtrain, sytrain=split_data(X,y)
#no sample
xtrain_s,xtest_s=normalize(xtrain,xtest)
np.save('result/train_set/xtrain',xtrain_s)
np.save('result/train_set/ytrain',ytrain)
np.save('result/test_set/xtest',xtest_s)
np.save('result/test_set/ytest',ytest)
#SMOTE
np.random.seed(seed)
sxtrain_s,sxtest_s=normalize(sxtrain,xtest)
np.save('result/train_set/sxtrain',sxtrain_s)
np.save('result/train_set/sytrain',sytrain)
np.save('result/test_set/sxtest',sxtest_s)
np.save('result/test_set/sytest',ytest)
#over sample
np.random.seed(seed)
ros = RandomOverSampler(random_state=0)
oxtrain, oytrain = ros.fit_sample(xtrain, ytrain)
oxtrain_s,oxtest_s=normalize(oxtrain,xtest)
np.save('result/train_set/oxtrain',oxtrain_s)
np.save('result/train_set/oytrain',oytrain)
np.save('result/test_set/oxtest',oxtest_s)
np.save('result/test_set/oytest',ytest)
#under sample
np.random.seed(seed)
rus = RandomUnderSampler(random_state=0)
uxtrain, uytrain = rus.fit_sample(xtrain, ytrain)
uxtrain_s,uxtest_s=normalize(uxtrain,xtest)
np.save('result/train_set/uxtrain',uxtrain_s)
np.save('result/train_set/uytrain',uytrain)
np.save('result/test_set/uxtest',uxtest_s)
np.save('result/test_set/uytest',ytest)
| [
"imblearn.under_sampling.RandomUnderSampler",
"numpy.save",
"numpy.random.seed",
"pandas.read_csv",
"numpy.std",
"numpy.isnan",
"imblearn.over_sampling.RandomOverSampler",
"numpy.mean",
"sklearn.model_selection.StratifiedKFold",
"imblearn.over_sampling.SMOTE"
] | [((614, 651), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['k_fold'], {'shuffle': '(True)'}), '(k_fold, shuffle=True)\n', (629, 651), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((659, 675), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'ratio': '(1.0)'}), '(ratio=1.0)\n', (664, 675), False, 'from imblearn.over_sampling import SMOTE\n'), ((279, 301), 'pandas.read_csv', 'pd.read_csv', (['"""out.csv"""'], {}), "('out.csv')\n", (290, 301), True, 'import pandas as pd\n'), ((440, 463), 'numpy.mean', 'np.mean', (['xtrain'], {'axis': '(0)'}), '(xtrain, axis=0)\n', (447, 463), True, 'import numpy as np\n'), ((475, 497), 'numpy.std', 'np.std', (['xtrain'], {'axis': '(0)'}), '(xtrain, axis=0)\n', (481, 497), True, 'import numpy as np\n'), ((924, 944), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (938, 944), True, 'import numpy as np\n'), ((1064, 1108), 'numpy.save', 'np.save', (['"""result/train_set/xtrain"""', 'xtrain_s'], {}), "('result/train_set/xtrain', xtrain_s)\n", (1071, 1108), True, 'import numpy as np\n'), ((1109, 1151), 'numpy.save', 'np.save', (['"""result/train_set/ytrain"""', 'ytrain'], {}), "('result/train_set/ytrain', ytrain)\n", (1116, 1151), True, 'import numpy as np\n'), ((1152, 1193), 'numpy.save', 'np.save', (['"""result/test_set/xtest"""', 'xtest_s'], {}), "('result/test_set/xtest', xtest_s)\n", (1159, 1193), True, 'import numpy as np\n'), ((1194, 1233), 'numpy.save', 'np.save', (['"""result/test_set/ytest"""', 'ytest'], {}), "('result/test_set/ytest', ytest)\n", (1201, 1233), True, 'import numpy as np\n'), ((1242, 1262), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1256, 1262), True, 'import numpy as np\n'), ((1309, 1355), 'numpy.save', 'np.save', (['"""result/train_set/sxtrain"""', 'sxtrain_s'], {}), "('result/train_set/sxtrain', sxtrain_s)\n", (1316, 1355), True, 'import numpy as np\n'), ((1356, 1400), 'numpy.save', 'np.save', (['"""result/train_set/sytrain"""', 'sytrain'], {}), "('result/train_set/sytrain', sytrain)\n", (1363, 1400), True, 'import numpy as np\n'), ((1401, 1444), 'numpy.save', 'np.save', (['"""result/test_set/sxtest"""', 'sxtest_s'], {}), "('result/test_set/sxtest', sxtest_s)\n", (1408, 1444), True, 'import numpy as np\n'), ((1445, 1485), 'numpy.save', 'np.save', (['"""result/test_set/sytest"""', 'ytest'], {}), "('result/test_set/sytest', ytest)\n", (1452, 1485), True, 'import numpy as np\n'), ((1500, 1520), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1514, 1520), True, 'import numpy as np\n'), ((1528, 1561), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1545, 1561), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((1659, 1705), 'numpy.save', 'np.save', (['"""result/train_set/oxtrain"""', 'oxtrain_s'], {}), "('result/train_set/oxtrain', oxtrain_s)\n", (1666, 1705), True, 'import numpy as np\n'), ((1706, 1750), 'numpy.save', 'np.save', (['"""result/train_set/oytrain"""', 'oytrain'], {}), "('result/train_set/oytrain', oytrain)\n", (1713, 1750), True, 'import numpy as np\n'), ((1751, 1794), 'numpy.save', 'np.save', (['"""result/test_set/oxtest"""', 'oxtest_s'], {}), "('result/test_set/oxtest', oxtest_s)\n", (1758, 1794), True, 'import numpy as np\n'), ((1795, 1835), 'numpy.save', 'np.save', (['"""result/test_set/oytest"""', 'ytest'], {}), "('result/test_set/oytest', ytest)\n", (1802, 1835), True, 'import numpy as np\n'), ((1851, 1871), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1865, 1871), True, 'import numpy as np\n'), ((1879, 1913), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1897, 1913), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((2011, 2057), 'numpy.save', 'np.save', (['"""result/train_set/uxtrain"""', 'uxtrain_s'], {}), "('result/train_set/uxtrain', uxtrain_s)\n", (2018, 2057), True, 'import numpy as np\n'), ((2058, 2102), 'numpy.save', 'np.save', (['"""result/train_set/uytrain"""', 'uytrain'], {}), "('result/train_set/uytrain', uytrain)\n", (2065, 2102), True, 'import numpy as np\n'), ((2103, 2146), 'numpy.save', 'np.save', (['"""result/test_set/uxtest"""', 'uxtest_s'], {}), "('result/test_set/uxtest', uxtest_s)\n", (2110, 2146), True, 'import numpy as np\n'), ((2147, 2187), 'numpy.save', 'np.save', (['"""result/test_set/uytest"""', 'ytest'], {}), "('result/test_set/uytest', ytest)\n", (2154, 2187), True, 'import numpy as np\n'), ((345, 357), 'numpy.isnan', 'np.isnan', (['df'], {}), '(df)\n', (353, 357), True, 'import numpy as np\n')] |
import os
import webbrowser
import requests
from bs4 import BeautifulSoup
import pandas as pd
import geocoder
from geopy.geocoders import Nominatim
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import sklearn
from sklearn.cluster import KMeans
import folium
import numpy as np
import matplotlib.pyplot as plt
from selenium import webdriver
import time
from fuzzywuzzy import fuzz
def save_map(m):
filepath = 'map.html'
m.save(filepath)
def get_geo_location(address):
geolocator = Nominatim(user_agent="ny_explorer")
location = geolocator.geocode(address)
if location:
latitude = location.latitude
longitude = location.longitude
return [latitude, longitude]
return [None, None]
def get_new_york_data():
NY_DATASET = "https://cocl.us/new_york_dataset"
resp = requests.get(NY_DATASET).json()
features = resp['features']
column_names = ['Borough', 'Neighborhood', 'Latitude', 'Longitude']
new_york_data = pd.DataFrame(columns=column_names)
for data in features:
borough = data['properties']['borough']
neighborhood_name = data['properties']['name']
neighborhood_latlon = data['geometry']['coordinates']
neighborhood_lat = neighborhood_latlon[1]
neighborhood_lon = neighborhood_latlon[0]
new_york_data = new_york_data.append({'Borough': borough,
'Neighborhood': neighborhood_name,
'Latitude': neighborhood_lat,
'Longitude': neighborhood_lon}, ignore_index=True)
return new_york_data
def get_population_per_neighbourhood(read_from_csv=False):
if not read_from_csv:
WIKI_LINK = "https://en.wikipedia.org/wiki/Neighborhoods_in_New_York_City"
ROOT_WIKI_LINK = "https://en.wikipedia.org"
page = requests.get(WIKI_LINK)
soup = BeautifulSoup(page.text, 'html.parser')
population_list = []
for table_row in soup.select("table.wikitable tr"):
cells = table_row.findAll('td')
if len(cells) > 0:
borough = cells[0].text.strip().replace(
'\xa0', ' ').split(' ')[0]
population = int(cells[3].text.strip().replace(',', ''))
for item in cells[4].findAll('a'):
neighborhood = item.text
neighbourhood_page = requests.get(
ROOT_WIKI_LINK+item['href'])
soup = BeautifulSoup(
neighbourhood_page.text, 'html.parser')
table = soup.select("table.infobox tr")
should_record = False
for row in table:
head = row.find('th')
body = row.find('td')
if head and 'population' in head.text.lower():
should_record = True
continue
if should_record:
try:
population_list.append(
[borough, neighborhood, int(body.text.replace(',', ''))])
except:
pass
should_record = False
df = pd.DataFrame(population_list, columns=[
"Borough", "Neighborhood", "Population"])
df.to_csv('population.csv')
else:
df = pd.read_csv('population.csv')
df = df.sort_values(by=['Borough'])
df = df.drop_duplicates(subset='Neighborhood', keep='last')
return df
def get_hospital_data(lat, lng, borough, neighborhood):
radius = 1000
LIMIT = 100
VERSION = '20200328'
# FS_CLIENT_ID = "0LVR2DN1KYTA0PDB1AIBAMAKIVLKWZ0GBEUH3WLZFBDN5OST"
FS_CLIENT_ID = "A5S2CJNU43XNBJEADGVEDLOR024ZP5BC5KZY2E1F0WT0DZEI"
# FS_CLIENT_SECRET = "<KEY>"
FS_CLIENT_SECRET = "<KEY>"
FS_HOSPITAL_KEY = "<KEY>"
url = 'https://api.foursquare.com/v2/venues/search?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}&categoryId={}'.format(
FS_CLIENT_ID,
FS_CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT,
FS_HOSPITAL_KEY)
response = requests.get(url)
if not response.status_code == 200:
print("ERROR", response)
return None
results = response.json()
venue_data = results["response"]["venues"]
venue_details = []
for row in venue_data:
try:
venue_id = row['id']
venue_name = row['name']
lat = row["location"]["lat"]
lng = row["location"]["lng"]
venue_details.append(
[venue_id, venue_name, lat, lng, borough, neighborhood])
except KeyError:
pass
column_names = ['ID', 'Name', 'Latitude',
'Longitude', "Borough", "Neighborhood"]
df = pd.DataFrame(venue_details, columns=column_names)
return df
def get_hospital_per_neighborhood(row):
data = get_hospital_data(
row["Latitude"], row["Longitude"], row["Borough"], row["Neighborhood"])
if data is not None:
return len(data.index)
return 0
def plot_kmeans(dataset):
obs = dataset.copy()
silhouette_score_values = list()
number_of_clusters = range(3, 30)
for i in number_of_clusters:
classifier = KMeans(i, init='k-means++', n_init=10,
max_iter=300, tol=0.0001, random_state=10)
classifier.fit(obs)
labels = classifier.predict(obs)
silhouette_score_values.append(sklearn.metrics.silhouette_score(
obs, labels, metric='euclidean', random_state=0))
plt.plot(number_of_clusters, silhouette_score_values)
plt.title("Silhouette score values vs Numbers of Clusters ")
plt.show()
optimum_number_of_components = number_of_clusters[silhouette_score_values.index(
max(silhouette_score_values))]
print("Optimal number of components is:")
print(optimum_number_of_components)
def hospital_vs_population(row):
return row["Hospitals"] / row["Population"]
# create map
def show_bar_chart(df, field="Population", title='Number of Neighborhood for each Borough in New York City', x_label="Borough", y_label="No.of Neighborhood"):
plt.figure(figsize=(9, 5), dpi=100)
plt.title(title)
plt.xlabel(x_label, fontsize=15)
plt.ylabel(y_label, fontsize=15)
df.groupby('Borough')[field].sum().plot(kind='bar')
plt.legend()
plt.show()
def render_map_clusters(df, df_clusters, kclusters=3):
map_clusters = folium.Map(
location=get_geo_location("New York"), zoom_start=11)
colours = ['red', 'black', 'blue']
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
markers_colors = []
for lat, lon, poi, cluster, bed_per_people in zip(df['Latitude'], df['Longitude'], df['Borough'], df['Cluster Labels'], df_clusters[:, 1]):
label = folium.Popup(
str(poi) + ' Cluster ' + str(cluster),
parse_html=True
)
folium.CircleMarker(
[lat, lon],
radius=bed_per_people,
popup=label,
color=colours[cluster],
fill=True,
fill_color=colours[cluster],
fill_opacity=0.7).add_to(map_clusters)
save_map(map_clusters)
def get_chunks(lst, n, chunk_id=0):
nlist = []
for i in range(0, len(lst), n):
nlist.append(lst[i:i + n])
return nlist[chunk_id]
def get_bed_per_hospital():
ROOT_URL = "https://profiles.health.ny.gov/hospital/printview/{}"
NYM_NYC = [
103016, 106804, 102908, 103035, 102934, 1256608, 105117, 103009, 102974, 103006, 103041, 105086, 103056, 103086, 102973,
102970, 102950, 103074, 103008, 103007, 102985, 103012, 106809, 102937, 103068, 102944, 102995, 106803, 102916, 105109,
102914, 102960, 103038, 106810, 106811, 102961, 102940, 102933, 103078, 254693, 103065, 103021, 103080, 103033, 102919,
105116, 106825, 103084, 103087, 102989, 102929, 106817, 106819, 103073, 103085, 103025
]
NYM_LI = [
102999, 103062, 102928, 103002, 102980, 103077, 103049, 103011, 102918, 102965, 102994, 102966, 103069, 1189331, 102926,
103088, 103045, 103000, 103070, 105137, 103082, 102954, 103072
]
BRONX = [
102908, 106804, 105117, 102973, 102950, 106809, 102937, 103068, 102944, 103078, 103087
]
QUEENS = [
102974, 103006, 102912, 103074, 103008, 105109, 102933, 103033, 103084
]
HOSPITALS = list(set(NYM_LI + NYM_NYC + BRONX + QUEENS))
print('Total hospitals', len(HOSPITALS))
hospital_data = []
for val in HOSPITALS:
print("Processing hospital", val)
url = ROOT_URL.format(val)
browser = webdriver.Safari()
try:
browser.get(url)
time.sleep(10)
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
hospital_name = soup.find('h2').text
table = soup.select("table", id="number-of-beds")[0]
rows = table.findAll('tr')
hospital_name = soup.find('h2').text.strip()
icu_beds = 0
for row in rows:
tds = row.findAll('td')
should_record = False
for td in tds:
if "intensive care beds" == td.text.lower():
should_record = True
continue
if should_record:
icu_beds = td.text
bed_number = rows[-1].findAll('td')[-1].text
print(hospital_name, bed_number, icu_beds)
hospital_data.append([hospital_name, bed_number, icu_beds])
except Exception as e:
print(e)
browser.quit()
df = pd.DataFrame(
hospital_data, columns=[
"Hospital Name", "Bed Number", "ICU Bed Number"
]
)
df = df.drop_duplicates(subset='Hospital Name', keep='last')
df.to_csv('hospital_beds_.csv')
def get_hospital_per_neighborhood_borough(df):
column_names = ['ID', 'Name', 'Latitude',
'Longitude', "Borough", "Neighborhood"]
data = []
for i, row in df.iterrows():
h_df = get_hospital_data(
row["Latitude"], row["Longitude"], row["Borough"], row["Neighborhood"])
if h_df is not None:
for x, hrow in h_df.iterrows():
data.append([hrow[column] for column in column_names])
n_df = pd.DataFrame(data, columns=column_names)
n_df.to_csv('hospital_per_boro_nei.csv')
df = pd.read_csv("hospitals.csv")
h_df = pd.read_csv("hospital_beds.csv")
h_pbn_df = pd.read_csv("hospital_per_boro_nei.csv")
# get_hospital_per_neighborhood_borough(df)
# get_bed_per_hospital()
def combine_hospital_beds_with_boro_neighborhood(
hospital_df,
hospital_boro_nei_df
):
data = []
column_names = [
"Hospital Name", "Bed Number", "ICU Bed Number"
]
boro_neig_column_names = [
"Borough", "Neighborhood"
]
for i, row in hospital_df.iterrows():
data_per_hospital = None
max_ratio = 0
for x, hrow in hospital_boro_nei_df.iterrows():
ratio = fuzz.token_sort_ratio(row["Hospital Name"], hrow["Name"])
if ratio > max_ratio:
max_ratio = ratio
data_per_hospital = [
row[column] for column in column_names] + \
[hrow[column] for column in boro_neig_column_names]
if data_per_hospital:
data.append(data_per_hospital)
df = pd.DataFrame(data, columns=column_names+boro_neig_column_names)
df = df.drop_duplicates(
subset=["Borough", "Neighborhood", "Hospital Name"], keep="last"
)
df.to_csv('cleaned_hospital_data.csv')
return df
# print(h_df.head())
# print(h_pbn_df.head())
# combine_hospital_beds_with_boro_neighborhood(h_df, h_pbn_df)
c_df = pd.read_csv('cleaned_hospital_data.csv')
print(c_df.head(20))
c_df = c_df.groupby(
["Neighborhood", "Borough"]).agg({'Bed Number': "sum", "ICU Bed Number": "sum"})
print(c_df.head(20))
ny_df = get_new_york_data()
ny_borough_df = get_population_per_neighbourhood(True)
ny_df.set_index('Neighborhood')
ny_borough_df.set_index('Neighborhood')
ny_p_df = pd.merge(ny_df, ny_borough_df)
# df['Hospitals'] = df.apply(lambda row:get_hospital_per_neighborhood(row),axis=1)
def get_bed_per_person(row):
return row["Bed Number"] * 100 / row["Population"]
def get_icu_bed_per_person(row):
return row["ICU Bed Number"] * 100 / row["Population"]
ny_p_df.set_index('Neighborhood')
df = pd.merge(c_df, ny_p_df, how="inner", on=["Borough", "Neighborhood"])
df.to_csv('final_data.csv')
df["Bed per Person"] = df.apply(
lambda row: get_bed_per_person(row), axis=1)
df["ICU Bed per Person"] = df.apply(
lambda row: get_icu_bed_per_person(row), axis=1)
plot_kmeans(df[["Latitude", "Longitude",
"Bed per Person"]])
# set number of clusters
kclusters = 3
# run k-means clustering
df_clusters = df[["Latitude", "Longitude",
"Population", "Bed per Person", "ICU Bed per Person"]]
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(df_clusters)
# check cluster labels generated for each row in the dataframe
print(kmeans.labels_[0:24])
df.insert(0, 'Cluster Labels', kmeans.labels_)
print(df.head(30))
render_map_clusters(df, df_clusters)
show_bar_chart(ny_p_df)
# show_bar_chart(df, "Hospitals", title="Hospitals per Borough", y_label="Hospital")
# show_bar_chart(df, "Hospital vs Population", title="Hospital vs Population Borough", y_label="Hospital vs Population")
y_kmeans = kmeans.predict(df_clusters)
plt.scatter(df_clusters[["Population"]], df_clusters[[
"Bed per Person"]], c=y_kmeans, s=50, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)
plt.xlabel('Population')
plt.ylabel('Bed per Person')
plt.show()
y_kmeans = kmeans.predict(df_clusters)
plt.scatter(df_clusters[["Population"]], df_clusters[[
"Bed per Person"]], c=y_kmeans, s=50, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 2], c='black', s=200, alpha=0.5)
plt.xlabel('Population')
plt.ylabel('Bed per Person')
plt.show()
render_map_clusters(df, df_clusters)
print(df[(df['Cluster Labels'] == 0)].head())
print(df[(df['Cluster Labels'] == 2)].head())
print(df[(df['Cluster Labels'] == 1)].head())
| [
"matplotlib.pyplot.title",
"fuzzywuzzy.fuzz.token_sort_ratio",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.colors.rgb2hex",
"numpy.arange",
"pandas.DataFrame",
"sklearn.cluster.KMeans",
"pandas.merge",
"requests.get",
"selenium.webdriver.Safari",
"matplotlib.pyplot.show",
"mat... | [((10915, 10943), 'pandas.read_csv', 'pd.read_csv', (['"""hospitals.csv"""'], {}), "('hospitals.csv')\n", (10926, 10943), True, 'import pandas as pd\n'), ((10951, 10983), 'pandas.read_csv', 'pd.read_csv', (['"""hospital_beds.csv"""'], {}), "('hospital_beds.csv')\n", (10962, 10983), True, 'import pandas as pd\n'), ((10995, 11035), 'pandas.read_csv', 'pd.read_csv', (['"""hospital_per_boro_nei.csv"""'], {}), "('hospital_per_boro_nei.csv')\n", (11006, 11035), True, 'import pandas as pd\n'), ((12274, 12314), 'pandas.read_csv', 'pd.read_csv', (['"""cleaned_hospital_data.csv"""'], {}), "('cleaned_hospital_data.csv')\n", (12285, 12314), True, 'import pandas as pd\n'), ((12634, 12664), 'pandas.merge', 'pd.merge', (['ny_df', 'ny_borough_df'], {}), '(ny_df, ny_borough_df)\n', (12642, 12664), True, 'import pandas as pd\n'), ((12970, 13038), 'pandas.merge', 'pd.merge', (['c_df', 'ny_p_df'], {'how': '"""inner"""', 'on': "['Borough', 'Neighborhood']"}), "(c_df, ny_p_df, how='inner', on=['Borough', 'Neighborhood'])\n", (12978, 13038), True, 'import pandas as pd\n'), ((14038, 14150), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df_clusters[['Population']]", "df_clusters[['Bed per Person']]"], {'c': 'y_kmeans', 's': '(50)', 'cmap': '"""viridis"""'}), "(df_clusters[['Population']], df_clusters[['Bed per Person']], c\n =y_kmeans, s=50, cmap='viridis')\n", (14049, 14150), True, 'import matplotlib.pyplot as plt\n'), ((14193, 14263), 'matplotlib.pyplot.scatter', 'plt.scatter', (['centers[:, 0]', 'centers[:, 1]'], {'c': '"""black"""', 's': '(200)', 'alpha': '(0.5)'}), "(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\n", (14204, 14263), True, 'import matplotlib.pyplot as plt\n'), ((14264, 14288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Population"""'], {}), "('Population')\n", (14274, 14288), True, 'import matplotlib.pyplot as plt\n'), ((14289, 14317), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bed per Person"""'], {}), "('Bed per Person')\n", (14299, 14317), True, 'import matplotlib.pyplot as plt\n'), ((14318, 14328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14326, 14328), True, 'import matplotlib.pyplot as plt\n'), ((14369, 14481), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df_clusters[['Population']]", "df_clusters[['Bed per Person']]"], {'c': 'y_kmeans', 's': '(50)', 'cmap': '"""viridis"""'}), "(df_clusters[['Population']], df_clusters[['Bed per Person']], c\n =y_kmeans, s=50, cmap='viridis')\n", (14380, 14481), True, 'import matplotlib.pyplot as plt\n'), ((14524, 14594), 'matplotlib.pyplot.scatter', 'plt.scatter', (['centers[:, 0]', 'centers[:, 2]'], {'c': '"""black"""', 's': '(200)', 'alpha': '(0.5)'}), "(centers[:, 0], centers[:, 2], c='black', s=200, alpha=0.5)\n", (14535, 14594), True, 'import matplotlib.pyplot as plt\n'), ((14595, 14619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Population"""'], {}), "('Population')\n", (14605, 14619), True, 'import matplotlib.pyplot as plt\n'), ((14620, 14648), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bed per Person"""'], {}), "('Bed per Person')\n", (14630, 14648), True, 'import matplotlib.pyplot as plt\n'), ((14649, 14659), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14657, 14659), True, 'import matplotlib.pyplot as plt\n'), ((544, 579), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""ny_explorer"""'}), "(user_agent='ny_explorer')\n", (553, 579), False, 'from geopy.geocoders import Nominatim\n'), ((1023, 1057), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (1035, 1057), True, 'import pandas as pd\n'), ((4387, 4404), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4399, 4404), False, 'import requests\n'), ((5055, 5104), 'pandas.DataFrame', 'pd.DataFrame', (['venue_details'], {'columns': 'column_names'}), '(venue_details, columns=column_names)\n', (5067, 5104), True, 'import pandas as pd\n'), ((5841, 5894), 'matplotlib.pyplot.plot', 'plt.plot', (['number_of_clusters', 'silhouette_score_values'], {}), '(number_of_clusters, silhouette_score_values)\n', (5849, 5894), True, 'import matplotlib.pyplot as plt\n'), ((5899, 5959), 'matplotlib.pyplot.title', 'plt.title', (['"""Silhouette score values vs Numbers of Clusters """'], {}), "('Silhouette score values vs Numbers of Clusters ')\n", (5908, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5964, 5974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5972, 5974), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)', 'dpi': '(100)'}), '(figsize=(9, 5), dpi=100)\n', (6459, 6484), True, 'import matplotlib.pyplot as plt\n'), ((6489, 6505), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6498, 6505), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {'fontsize': '(15)'}), '(x_label, fontsize=15)\n', (6520, 6542), True, 'import matplotlib.pyplot as plt\n'), ((6547, 6579), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {'fontsize': '(15)'}), '(y_label, fontsize=15)\n', (6557, 6579), True, 'import matplotlib.pyplot as plt\n'), ((6640, 6652), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6650, 6652), True, 'import matplotlib.pyplot as plt\n'), ((6657, 6667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6665, 6667), True, 'import matplotlib.pyplot as plt\n'), ((6865, 6885), 'numpy.arange', 'np.arange', (['kclusters'], {}), '(kclusters)\n', (6874, 6885), True, 'import numpy as np\n'), ((10122, 10212), 'pandas.DataFrame', 'pd.DataFrame', (['hospital_data'], {'columns': "['Hospital Name', 'Bed Number', 'ICU Bed Number']"}), "(hospital_data, columns=['Hospital Name', 'Bed Number',\n 'ICU Bed Number'])\n", (10134, 10212), True, 'import pandas as pd\n'), ((10822, 10862), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'column_names'}), '(data, columns=column_names)\n', (10834, 10862), True, 'import pandas as pd\n'), ((11926, 11991), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': '(column_names + boro_neig_column_names)'}), '(data, columns=column_names + boro_neig_column_names)\n', (11938, 11991), True, 'import pandas as pd\n'), ((1935, 1958), 'requests.get', 'requests.get', (['WIKI_LINK'], {}), '(WIKI_LINK)\n', (1947, 1958), False, 'import requests\n'), ((1974, 2013), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""html.parser"""'], {}), "(page.text, 'html.parser')\n", (1987, 2013), False, 'from bs4 import BeautifulSoup\n'), ((3415, 3500), 'pandas.DataFrame', 'pd.DataFrame', (['population_list'], {'columns': "['Borough', 'Neighborhood', 'Population']"}), "(population_list, columns=['Borough', 'Neighborhood', 'Population']\n )\n", (3427, 3500), True, 'import pandas as pd\n'), ((3582, 3611), 'pandas.read_csv', 'pd.read_csv', (['"""population.csv"""'], {}), "('population.csv')\n", (3593, 3611), True, 'import pandas as pd\n'), ((5522, 5607), 'sklearn.cluster.KMeans', 'KMeans', (['i'], {'init': '"""k-means++"""', 'n_init': '(10)', 'max_iter': '(300)', 'tol': '(0.0001)', 'random_state': '(10)'}), "(i, init='k-means++', n_init=10, max_iter=300, tol=0.0001,\n random_state=10)\n", (5528, 5607), False, 'from sklearn.cluster import KMeans\n'), ((7013, 7030), 'matplotlib.colors.rgb2hex', 'colors.rgb2hex', (['i'], {}), '(i)\n', (7027, 7030), True, 'import matplotlib.colors as colors\n'), ((9075, 9093), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (9091, 9093), False, 'from selenium import webdriver\n'), ((13509, 13553), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'kclusters', 'random_state': '(0)'}), '(n_clusters=kclusters, random_state=0)\n', (13515, 13553), False, 'from sklearn.cluster import KMeans\n'), ((867, 891), 'requests.get', 'requests.get', (['NY_DATASET'], {}), '(NY_DATASET)\n', (879, 891), False, 'import requests\n'), ((5740, 5825), 'sklearn.metrics.silhouette_score', 'sklearn.metrics.silhouette_score', (['obs', 'labels'], {'metric': '"""euclidean"""', 'random_state': '(0)'}), "(obs, labels, metric='euclidean',\n random_state=0)\n", (5772, 5825), False, 'import sklearn\n'), ((9148, 9162), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (9158, 9162), False, 'import time\n'), ((9221, 9255), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (9234, 9255), False, 'from bs4 import BeautifulSoup\n'), ((11543, 11600), 'fuzzywuzzy.fuzz.token_sort_ratio', 'fuzz.token_sort_ratio', (["row['Hospital Name']", "hrow['Name']"], {}), "(row['Hospital Name'], hrow['Name'])\n", (11564, 11600), False, 'from fuzzywuzzy import fuzz\n'), ((7349, 7503), 'folium.CircleMarker', 'folium.CircleMarker', (['[lat, lon]'], {'radius': 'bed_per_people', 'popup': 'label', 'color': 'colours[cluster]', 'fill': '(True)', 'fill_color': 'colours[cluster]', 'fill_opacity': '(0.7)'}), '([lat, lon], radius=bed_per_people, popup=label, color=\n colours[cluster], fill=True, fill_color=colours[cluster], fill_opacity=0.7)\n', (7368, 7503), False, 'import folium\n'), ((2492, 2535), 'requests.get', 'requests.get', (["(ROOT_WIKI_LINK + item['href'])"], {}), "(ROOT_WIKI_LINK + item['href'])\n", (2504, 2535), False, 'import requests\n'), ((2586, 2639), 'bs4.BeautifulSoup', 'BeautifulSoup', (['neighbourhood_page.text', '"""html.parser"""'], {}), "(neighbourhood_page.text, 'html.parser')\n", (2599, 2639), False, 'from bs4 import BeautifulSoup\n')] |
import argparse
import os, sys
import numpy as np
import pandas as pd
import pickle as pk
import torch
import torch.nn as nn
from torch.autograd import grad
from torch.utils.data import SubsetRandomSampler
import torchvision.datasets as datasets
import torchvision.transforms as transforms
sys.path.append('../')
from eigen import dominant_hessian_eigs
import resnet
parser = argparse.ArgumentParser('Gathers statistics of a model on the test'
'set, and saves these statistics to a pickle file in the model directory')
parser.add_argument('datadir', type=str,
metavar='DIR', help='Directory where ImageNet data is saved')
parser.add_argument('--model-path', type=str, required=True,metavar='PATH',
help='Path to saved PyTorch model')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--model', type=str, default='resnet50',
choices=['smoothresnet50','resnet50'], help='Model')
parser.add_argument('--num-images', type=int, default=50000,metavar='N',
help='total number of images to attack (default: 50000)')
parser.add_argument('--batch-size', type=int, default=100,metavar='N',
help='number of images to attack at a time')
parser.add_argument('--norm', default='L2', choices=['L2', 'Linf'],
help='norm measuring adversarial perturbations')
parser.add_argument('--curvature', action='store_true', help='Compute curvature statistics. This can take a long time (up to a day on four GPUs)')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
print('Arguments:')
for p in vars(args).items():
print(' ',p[0]+': ',p[1])
print('\n')
has_cuda = torch.cuda.is_available()
# Data and model loading code
# ----------------------------
if args.num_images<50000:
IX = np.random.choice(50000, size=args.num_images, replace=False)
else:
IX = np.arange(50000)
IX = torch.from_numpy(IX)
sampler = SubsetRandomSampler(IX)
valdir =os.path.join(args.datadir, 'validation/')
loader = torch.utils.data.DataLoader(
dataset = datasets.ImageFolder(valdir, transforms.Compose(
[transforms.Resize(int(288*1.14)),
transforms.CenterCrop(288),
transforms.ToTensor()])),
sampler=sampler,
batch_size=args.batch_size, shuffle=False,
num_workers=4, pin_memory=True)
model = getattr(resnet, args.model)().cuda()
Nsamples=args.num_images
Nclasses=Nc=1000
savedict = torch.load(args.model_path,map_location='cpu')
model.load_state_dict(savedict['state_dict'])
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
model = nn.Sequential(nn.BatchNorm2d(3,affine=False), model)
model[0].running_var = torch.tensor(std)**2
model[0].running_mean = torch.tensor(mean)
model.eval()
for p in model.parameters():
p.requires_grad_(False)
if has_cuda:
model = model.cuda()
if torch.cuda.device_count()>1:
model = nn.DataParallel(model)
# max_{i\not \in top5} p_i - p_c
def criterion(z,y):
p = z.softmax(dim=-1)
ix = torch.arange(z.shape[0],device=z.device)
#pc = p.clone()
#pc[ix,y] = 0.
ps = p.sort(dim=-1,descending=True)[0]
return (ps[:,5:]).max(dim=-1)[0] - p[ix,y]
Loss = torch.zeros(Nsamples).cuda()
NormGradLoss = torch.zeros(Nsamples).cuda()
if args.curvature and args.norm=='L2':
LambdaMaxLoss = torch.zeros(Nsamples).cuda()
Top1 = torch.zeros(Nsamples,dtype=torch.uint8).cuda()
Rank = torch.zeros(Nsamples,dtype=torch.int64).cuda()
Top5 = torch.zeros(Nsamples,dtype=torch.uint8).cuda()
sys.stdout.write('\nRunning through dataloader:\n')
Jx = torch.arange(Nc).cuda().view(1,-1)
Jx = Jx.expand(args.batch_size, Nc)
for i, (x,y) in enumerate(loader):
sys.stdout.write(' Completed [%6.2f%%]\r'%(100*i*args.batch_size/Nsamples))
sys.stdout.flush()
x, y = x.cuda(), y.cuda()
x.requires_grad_(True)
yhat = model(x)
p = yhat.softmax(dim=-1)
psort , jsort = p.sort(dim=-1,descending=True)
b = jsort==y.view(-1,1)
rank = Jx[b]
pmax = psort[:,0]
logpmax = pmax.log()
p5,ix5 = psort[:,0:5], jsort[:,0:5]
ix1 = jsort[:,0]
sump5 = p5.sum(dim=-1)
loss = criterion(yhat, y)
g = grad(loss.sum(),x)[0]
if args.norm=='L2':
gn = g.view(len(y),-1).norm(dim=-1)
elif args.norm=='Linf':
gn = g.view(len(y),-1).norm(p=1,dim=-1)
if args.curvature and args.norm=='L2':
lmin, lmax = dominant_hessian_eigs(lambda z: criterion(model(z),y).sum(), x, fd=False,
tol=1e-3, maxiters=100)
top1 = ix1==y
top5 = (ix5==y.view(args.batch_size,1)).sum(dim=-1)
ix = torch.arange(i*args.batch_size, (i+1)*args.batch_size,device=x.device)
Loss[ix] = loss.detach()
Rank[ix]= rank.detach()
Top1[ix] = top1.detach()
Top5[ix] = top5.detach().type(torch.uint8)
NormGradLoss[ix] = gn.detach()
if args.curvature:
LambdaMaxLoss[ix] = lmax.detach()
sys.stdout.write(' Completed [%6.2f%%]\r'%(100.))
df = pd.DataFrame({'loss':Loss.cpu().numpy(),
'top1':np.array(Top1.cpu().numpy(),dtype=np.bool),
'top5':np.array(Top5.cpu().numpy(), dtype=np.bool),
'norm_grad_loss':NormGradLoss.cpu().numpy(),
'rank': Rank.cpu().numpy()})
if args.curvature and args.norm=='L2':
df['lambda_max_loss'] = LambdaMaxLoss.cpu().numpy()
print('\n\ntop1 error: %.2f%%,\ttop5 error: %.2f%%'%(100-df['top1'].sum()/Nsamples*100, 100-df['top5'].sum()/Nsamples*100))
Lmax = NormGradLoss.max()
Lmean = NormGradLoss.mean()
dualnorm = 'L1' if args.norm=='Linf' else 'L2'
print('mean & max gradient norm (%s): %.2f, %.2f'%(dualnorm, Lmean, Lmax))
if args.curvature and args.norm=='L2':
Cmax = LambdaMaxLoss.max()
Cmean = LambdaMaxLoss.mean()
print('mean & max curvature (%s): %.2g, %.2g'%(dualnorm, Cmean, Cmax))
LossGap = (-Loss).clamp(0)
Lbound = LossGap/Lmax
df['Lbound'] = Lbound.cpu().numpy()
print('mean 1st order lower bound on adversarial distance (%s): %.2g'%(args.norm, Lbound.mean()))
if args.curvature and args.norm=='L2':
Cbound = 1/Cmax*(-NormGradLoss + (NormGradLoss.pow(2) + 2*Cmax*LossGap).sqrt())
df['Cbound'] = Cbound.cpu().numpy()
print('mean 2nd order lower bound on adversarial distance (L2): %.2g'%Cbound.mean())
ix1 = np.array(df['top1'], dtype=bool)
ix5 = np.array(df['top5'], dtype=bool)
ix15 = np.logical_or(ix5,ix1)
ixw = np.logical_not(np.logical_or(ix1, ix5))
df['type'] = pd.DataFrame(ix1.astype(np.int8) + ix5.astype(np.int8))
d = {0:'mis-classified',1:'top5',2:'top1'}
df['type'] = df['type'].map(d)
df['type'] = df['type'].astype('category')
df['ix'] = IX.numpy()
basename = args.model_path.split('.pth.tar')
pklpath = basename[0]+'-stats-%s.pkl'%args.norm
df.to_pickle(pklpath)
| [
"sys.stdout.write",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.cuda.device_count",
"numpy.arange",
"torch.arange",
"sys.stdout.flush",
"os.path.join",
"sys.path.append",
"torch.load",
"numpy.random.choice",
"torch.zeros",
"torchvision.transforms.CenterCrop",
"torch.manual_seed"... | [((291, 313), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (306, 313), False, 'import os, sys\n'), ((382, 531), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Gathers statistics of a model on the testset, and saves these statistics to a pickle file in the model directory"""'], {}), "(\n 'Gathers statistics of a model on the testset, and saves these statistics to a pickle file in the model directory'\n )\n", (405, 531), False, 'import argparse\n'), ((1503, 1531), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1520, 1531), False, 'import torch\n'), ((1532, 1557), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1546, 1557), True, 'import numpy as np\n'), ((1663, 1688), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1686, 1688), False, 'import torch\n'), ((1885, 1905), 'torch.from_numpy', 'torch.from_numpy', (['IX'], {}), '(IX)\n', (1901, 1905), False, 'import torch\n'), ((1917, 1940), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['IX'], {}), '(IX)\n', (1936, 1940), False, 'from torch.utils.data import SubsetRandomSampler\n'), ((1950, 1991), 'os.path.join', 'os.path.join', (['args.datadir', '"""validation/"""'], {}), "(args.datadir, 'validation/')\n", (1962, 1991), False, 'import os, sys\n'), ((2535, 2582), 'torch.load', 'torch.load', (['args.model_path'], {'map_location': '"""cpu"""'}), "(args.model_path, map_location='cpu')\n", (2545, 2582), False, 'import torch\n'), ((2812, 2830), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (2824, 2830), False, 'import torch\n'), ((3624, 3677), 'sys.stdout.write', 'sys.stdout.write', (['"""\nRunning through dataloader:\n"""'], {}), '("""\nRunning through dataloader:\n""")\n', (3640, 3677), False, 'import os, sys\n'), ((5028, 5080), 'sys.stdout.write', 'sys.stdout.write', (["(' Completed [%6.2f%%]\\r' % 100.0)"], {}), "(' Completed [%6.2f%%]\\r' % 100.0)\n", (5044, 5080), False, 'import os, sys\n'), ((6399, 6431), 'numpy.array', 'np.array', (["df['top1']"], {'dtype': 'bool'}), "(df['top1'], dtype=bool)\n", (6407, 6431), True, 'import numpy as np\n'), ((6438, 6470), 'numpy.array', 'np.array', (["df['top5']"], {'dtype': 'bool'}), "(df['top5'], dtype=bool)\n", (6446, 6470), True, 'import numpy as np\n'), ((6478, 6501), 'numpy.logical_or', 'np.logical_or', (['ix5', 'ix1'], {}), '(ix5, ix1)\n', (6491, 6501), True, 'import numpy as np\n'), ((1787, 1847), 'numpy.random.choice', 'np.random.choice', (['(50000)'], {'size': 'args.num_images', 'replace': '(False)'}), '(50000, size=args.num_images, replace=False)\n', (1803, 1847), True, 'import numpy as np\n'), ((1863, 1879), 'numpy.arange', 'np.arange', (['(50000)'], {}), '(50000)\n', (1872, 1879), True, 'import numpy as np\n'), ((2704, 2735), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {'affine': '(False)'}), '(3, affine=False)\n', (2718, 2735), True, 'import torch.nn as nn\n'), ((2767, 2784), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (2779, 2784), False, 'import torch\n'), ((3110, 3151), 'torch.arange', 'torch.arange', (['z.shape[0]'], {'device': 'z.device'}), '(z.shape[0], device=z.device)\n', (3122, 3151), False, 'import torch\n'), ((3791, 3879), 'sys.stdout.write', 'sys.stdout.write', (["(' Completed [%6.2f%%]\\r' % (100 * i * args.batch_size / Nsamples))"], {}), "(' Completed [%6.2f%%]\\r' % (100 * i * args.batch_size /\n Nsamples))\n", (3807, 3879), False, 'import os, sys\n'), ((3872, 3890), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3888, 3890), False, 'import os, sys\n'), ((4723, 4800), 'torch.arange', 'torch.arange', (['(i * args.batch_size)', '((i + 1) * args.batch_size)'], {'device': 'x.device'}), '(i * args.batch_size, (i + 1) * args.batch_size, device=x.device)\n', (4735, 4800), False, 'import torch\n'), ((6522, 6545), 'numpy.logical_or', 'np.logical_or', (['ix1', 'ix5'], {}), '(ix1, ix5)\n', (6535, 6545), True, 'import numpy as np\n'), ((2950, 2975), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2973, 2975), False, 'import torch\n'), ((2995, 3017), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (3010, 3017), True, 'import torch.nn as nn\n'), ((3298, 3319), 'torch.zeros', 'torch.zeros', (['Nsamples'], {}), '(Nsamples)\n', (3309, 3319), False, 'import torch\n'), ((3342, 3363), 'torch.zeros', 'torch.zeros', (['Nsamples'], {}), '(Nsamples)\n', (3353, 3363), False, 'import torch\n'), ((3466, 3506), 'torch.zeros', 'torch.zeros', (['Nsamples'], {'dtype': 'torch.uint8'}), '(Nsamples, dtype=torch.uint8)\n', (3477, 3506), False, 'import torch\n'), ((3520, 3560), 'torch.zeros', 'torch.zeros', (['Nsamples'], {'dtype': 'torch.int64'}), '(Nsamples, dtype=torch.int64)\n', (3531, 3560), False, 'import torch\n'), ((3574, 3614), 'torch.zeros', 'torch.zeros', (['Nsamples'], {'dtype': 'torch.uint8'}), '(Nsamples, dtype=torch.uint8)\n', (3585, 3614), False, 'import torch\n'), ((3430, 3451), 'torch.zeros', 'torch.zeros', (['Nsamples'], {}), '(Nsamples)\n', (3441, 3451), False, 'import torch\n'), ((3681, 3697), 'torch.arange', 'torch.arange', (['Nc'], {}), '(Nc)\n', (3693, 3697), False, 'import torch\n'), ((2201, 2227), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(288)'], {}), '(288)\n', (2222, 2227), True, 'import torchvision.transforms as transforms\n'), ((2257, 2278), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2276, 2278), True, 'import torchvision.transforms as transforms\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Load required packages
from psychopy import core, event, misc
from iViewXAPI import*
from iViewXAPIReturnCodes import*
import subprocess
import numpy as np
import os
from threading import Thread
import helpers
import time
# Numbers used by iView X for identify eye trackers
ET_server_dict = {'iViewX':0, 'iViewXOEM':1, 'iViewNG':2}
ET_device_dict = {'NONE':0, 'RED':1, 'REDm':2, 'RED250Mobile':2, 'HiSpeed':3,
'MRI':4, 'HED':5, 'Custom':7, 'REDn':8}
tracking_mode_dict = {'SMART_BINOCULAR':0, 'MONOCULAR_LEFT':1,
'MONOCULAR_RIGHT':2, 'BINOCULAR':3,
'SMART_MONOCULAR':4}
#%% To construct a nested Class for sample
# self.sample.leftEye.gazeX = x
# self.sample.leftEye.gazeY = y
# self.sample.rightEye.gazeX = x
# self.sample.rightEye.gazeY = y
class Dir:
def __init__(self):
self.gazeX = 0
self.gazeY = 0
class Eye:
def __init__(self):
self.leftEye = Dir()
self.rightEye = Dir()
#%%
class Connect(Thread):
"""
Basic functionally to communicate with and manage SMI eye trackers
"""
def __init__(self):
'''
Constructs an instance of the SMITE interface, with specified settings.
If settings is not provided, the name of an eyeTracker should
be given, e.g., RED-m
'''
# Create gaze sample
self.sample = Eye()
self.clock = core.Clock()
self.connect_timeout = 30 # in seconds
self.mouse = event.Mouse()
self.buf = []
#%%
def init(self):
print('Dummy connected')
#%%
def abort_calibration(self):
''' Aborts calibration
All system supported
'''
print('abort_calibration')
#%%
def abort_calibration_point(self):
''' Aborts calibration point
Supported systems: REDn, RED250 Mobile
'''
print('abort_calibration_points')
#%%
def accept_calibration_point(self):
''' Wait for accept
All system supported
'''
print('accept_calibration_points')
#%%
def change_calibration_point(self, number, positionX, positionY):
''' Change calibration point 'number' to a new position (positionX, positionY)
All system supported (WARNING: should not be done on the remotes unless
you REALLY know what you're doing. So don't do it.)
'''
print('change_calibration_point')
#%%
def clear_aoi(self):
''' Removes all trigger AOIs
Not supported. Use your own code and data from the buffer instead
Supported systems: RED, RED-m, HiSpeed
'''
print('clear_aoi')
#%%
def clear_recording_buffer(self):
''' Clears recording buffer from all recorded data
Supported systems: all
'''
print('clear_recording_buffer')
#%%
def configure_filter(self, filter_type=0, filter_action=1):
'''
Queries or sets filter parameters. The usage of the parameter data depends on the parameter action
Args: filter_type: 0 - averaging disabled, 1 - averaging enabled
filter_action: 0 - query the current filter status (output passed to variable 'filter_status'),
1 - configure filter parameters
Supported systems: all but REDn
WARNING: For some reason, the only combination that works is filter_type=0, filter_action=1,
which means that avaraging is disabled. On the other hand, this is not problem. If you want averaged data, just
average data from the left and the right eye yourself.
Let me know if you know how to fix it.
Returns:
filter_status - outputs the current filter status (does not work)
'''
print('configure_filter')
return None
#%%
def connect(self, ip_listen, port_listen, ip_send, port_send,
connect_timeout=30):
''' Connect to eye tracker server
Supported systems: all
'''
print('connect')
#%%
def continue_eye_tracking(self):
'''
Wakes up and enables the eye tracking application from suspend mode to continue processing gaze
data. The application can be set to suspend mode by calling iV_PauseEyetracking
Supported systems: all but RED and HiSpeed
'''
print('continue_eye_tracking')
#%%
def continue_recording(self, msg):
'''
Continues gaze data recording. iV_ContinueRecording does not return until gaze recording is continued.
Before it can be continued, the data needs to be paused using. iV_PauseRecording. Additionally this
function allows a message to be stored inside the idf data buffer.
Supported systems: all
'''
print('continue_recording')
#%%
def define_aoi(self, aoi_data):
'''
Defines an AOI. The API can handle up to 20 AOIs
Supported systems: all but RED-n and RED250 mobile.
Args:
aoi_data - struct, see SDK manual for description
'''
print('define_aoi')
#%%
def define_aoi_port(self, port):
'''
Selects a port for sending out TTL trigger
Supported systems: all but RED-n and RED250 mobile.
Args:
port - int
'''
print('define_aoi_port')
#%%
def delete_red_geometry(self, profile):
'''
Deletes the geometry setup with the given profile name. It is not possible
to delete a geometry profile if it is currently in use.
See chapter Setting up RED Geometry in the iView X SDK Manual.
Supported systems: all but HiSpeed
'''
print('delete_red_geometry')
#%%
def disable_aoi(self, aoi_name):
'''
Disables all AOIs with the given name.
Supported systems: all but RED-n and RED250 mobile.
Args:
port - int
'''
print('disable_aoi')
#%%
def disable_aoi_group(aoi_group):
'''
Disables an AOI group
Supported systems: all but RED-n and RED250 mobile.
Args:
port - int
'''
print('disable_aoi_group')
#%%
def disable_gaze_data_filter(self):
'''
Disables the raw data filter. The gaze data filter can be enabled using
iV_EnableGazeDataFilter.
Supported systems: all
'''
print('disable_gaze_data_filter')
#%%
def disable_processor_high_performance_mode(self):
'''
Disables a CPU high performance mode allowing the CPU to reduce the performance.
Supported systems: all but RED and Hi-Speed
'''
print('disable_processor_high_performance_mode')
#%%
def disconnect(self):
''' Disconnects the eye tracker
Supported systems: all
'''
print('disconnect')
#%%
def enable_aoi(self, aoi_name):
'''
Enables all AOIs with the given name
Supported systems: all but RED-n and RED250 mobile.
'''
print('enable_aoi')
#%%
def enable_aoi_group(self, aoi_group):
'''
Disables an AOI group
Supported systems: all but RED-n and RED250 mobile.
'''
print('enable_aoi_group')
#%%
def enable_gaze_data_filter(self):
'''
This API bilateral filter was implemented due to special human-computer
interaction (HCI) application requirements. It smoothes gaze position data in EyeDataStruct::gazeX and
EyeDataStruct::gazeY contained in SampleStruct, e.g. obtained by iV_GetSample. The gaze data filter
can be disabled using iV_DisableGazeDataFilter
'''
print('enable_aoi_group')
#%%
def enable_processor_high_performance_mode(self):
'''
Enables a CPU high performance mode allowing the CPU to reduce the performance.
Supported systems: all but RED and Hi-Speed
'''
print('enable_processor_high_performance_mode')
#%%
def get_accuracy(self, visualization = 0):
''' Get accuracy. Only possible after a successful validation
If the parameter visualization is set to 1 the accuracy
data will be visualized in a dialog window.
'''
print('get_accuracy')
return (None, None, None, None)
#%%
def get_accuracy_image(self, fname=None):
''' Returns validation screen image and optinally save it to disk
'''
print('get_accuracy_image')
#%%
def get_aoi_output_value(self):
'''
Returns the current AOI value.
Supported systems: all
'''
print('get_aoi_output_value')
return None
#%%
def get_calibration_parameter(self):
''' Updates stored calibrationData information with currently selected
parameters.
Supported systems: RED-n and RED250 Mobile
'''
print('get_calibration_parameter')
return None
#%%
def get_calibration_point(self, calibration_point_number):
''' Delivers information about a calibration point.
Supported systems: all
'''
print('get_calibration_point')
return None
#%%
def get_calibration_quality(self, calibration_point_number):
''' Delivers fixation quality information about a calibration point.
If the passed parameter left or right is NULL, no data will be returned
Supported systems: RED-n and RED250 Mobile
'''
print('get_calibration_quality')
return None, None
#%%
def get_calibration_quality_image(self):
'''
Same functionally as get_accuracy_image
Supported systems: RED-n and RED250 Mobile
'''
print('get_calibration_quality_image')
return None
#%%
def get_calibration_status(self):
''' Updates calibrationStatus information.
The client needs to be connected to the iView eye tracking server.
Supported systems: all
'''
print('get_calibration_status')
return None
#%%
def get_current_calibration_point(self):
''' Updates data in currentCalibrationPoint with the current calibration
point position
Supported systems: all
'''
print('get_current_calibration_point')
return None, None
#%%
def get_current_RED_geometry(self):
'''
Supported systems: all but HiSpeed
'''
print('get_current_RED_geometry')
return None
#%%
def get_current_time_stamp(self):
''' Provides the current eye tracker timestamp in microseconds
Supported systems: all
'''
print('get_current_time_stamp')
return None
#%%
def get_device_name(self):
''' Queries the device name information of the connected device.
Supported systems: all but RED and HiSpeed
'''
print('get_device_name')
return None
#%%
def get_event(self):
''' Updates data from eventDataSample with current event data.
Supported systems: all but RED-n professional
'''
print('get_event')
return None
#%%
def get_eye_image(self):
''' Updates imageData with current eye image (format: monochrome 8bpp).
Supported systems: ToDo
'''
print('#%%')
return None, None
#%%
def get_feature_key(self):
''' Gets the device specific feature key. Used for RED-OEM, RED250mobile and REDn devices only
Supported systems: RED-n and RED250 Mobile
'''
print('get_feature_key')
return None
#%%
def get_gaze_channel_quality(self):
''' Retrieve gaze quality data. Fills qualityData with validated accuracy results. Before quality data is
accessible the system needs to be validated with iV_Validate
Supported systems: RED-n and RED250 Mobile
'''
print('get_gaze_channel_quality')
return None
#%%
def get_recording_state(self):
''' Queries the recording state of the eye tracking server.
This function can be used to check if the eye
tracking server is currently performing a recording.
Supported systems: RED-n and RED250 Mobile
'''
print('get_recording_state')
return None
#%%
def get_RED_geometry(self):
''' Gets the geometry data of a requested profile without selecting them.
Supported systems: all but HiSpeed
'''
print('get_RED_geometry')
return None
#%%
def get_sample(self):
''' Updates data in sampleData with current eye tracking data.
Supported systems: all
'''
print('get_sample')
return None
#%%
def get_scene_video(self):
''' Updates imageData with current scene video image (format: RGB 24bpp)
Not Supported
'''
#%%
def get_serial_number(self):
''' Retrieve the serial number information of the connected device.
Supported systems: all but RED and HiSpeed
'''
print('get_serial_number')
return None
#%%
def get_speed_mode(self):
''' This function retrieves the speed modes used and supported by the
connected iView eye tracking server
speedModes:
int numberOfSpeedModes - number of supported speed modes
int speedMode - the current sampling frequency
int speedModes - an array of sampling frequencies supported by the connected iView eye tracking server;
int version - version of the current data structure
Supported systems: RED-n and RED250 Mobile
'''
print('get_speed_mode')
return None
#%%
def get_system_info(self):
'''
int API_Buildnumber build number of iView X SDK in use
int API_MajorVersion - major version number of iView X SDK in use
int API_MinorVersion - minor version number of iView X SDK in use
int iV_Buildnumber - build number of iView eye tracking server in use
enum ETDevice- iV_ETDevice type of eye tracking device
int iV_MajorVersion - major version number of iView eye tracking server in use
int iV_MinorVersion - major version number of iView eye tracking server in use
int samplerate
ETDevice {
NONE = 0, RED = 1, REDm = 2, HiSpeed = 3,
MRI = 4, HED = 5, Custom = 7, REDn = 8 }
Supported systems: all
'''
print('get_system_info')
return None
#%%
def get_tracking_mode(self):
''' Get eye tracking mode (see set_tracking_mode)
'''
print('get_tracking_mode')
return None
#%%
def get_tracking_monitor(self):
''' Returns validation screen image and optinally save it to disk
The tracking monitor image depicts the positions of both eyes and shows notification arrows
if the participant is not properly positioned infront of the eye tracker.
The tracking monitor is useful to validate the positioning before and
during a recording session.
Supported systems: all but HiSpeed
'''
print('get_tracking_mode')
return None, None
#%%
def get_tracking_status(self):
''' Updates trackingStatus with current tracking status.
This function can be used to get the current eye positions.
Supported systems: all
'''
print('get_tracking_status')
return None
#%%
def get_use_calibration_key(self):
''' Gets the currently set interaction key status for the calibration and validation process.
If enableKeys is 0 all available user interaction keys:
• SPACE for accepting calibration/validation points
• ESC for aborting calibration/validation
• TAB for skipping a point (only SMI iViewRED 4.2 or later)
are disabled.
Supported systems: RED-n and RED250 Mobile
'''
print('get_use_calibration_key')
return None
#%%
def hide_accuracy_monitor(self):
''' Hides accuracy monitor window which can be opened by iV_ShowAccuracyMonitor.
Supported systems: all
'''
print('hide_accuracy_monitor')
#%%
def hide_eye_image_monitor(self):
''' Hides eye image monitor window which can be opened by iV_ShowEyeImageMonitor.
Supported systems: all but RED-n professional
'''
print('hide_eye_image_monitor')
#%%
def hide_scene_video_monitor(self):
''' Hides scene video monitor window which can be opened by iV_ShowSceneVideoMonitor.
Not Supported
'''
#%%
def hide_tracking_monitor(self):
''' Hides tracking monitor window which can be opened by iV_ShowTrackingMonitor
Supported systems: all but HiSpeed
'''
print('hide_tracking_monitor')
#%%
def is_connected(self):
''' Checks if connection to iView eye tracking server is still established.
Supported systems: all
Returns:
res - 1 if intended functionality has been fulfilled
0 if no connection established
'''
print('is_connected')
return None
#%%
def load_calibration(self, name):
''' Loads a previously saved calibration. A calibration has to be saved by using iV_SaveCalibration.
Supported systems: all
'''
print('load_calibration')
#%%
def log(self, msg):
''' Writes logMessage into log file
Supported systems: all
'''
print('log')
#%%
def pause_eye_tracking(self):
''' Suspend the eye tracking application and disables calculation of gaze data.
The application can be reactivated by calling iV_ContinueEyetracking.
Supported systems: all but RED and HiSpeed
'''
print('pause_eye_tracking')
#%%
def pause_recording(self):
''' Pauses gaze data recording. iV_PauseRecording does not return until
gaze recording is paused.
Supported systems: all
'''
print('pause_recording')
#%%
def quit_server(self):
''' Disconnects and closes iView eye tracking server.
After this function has been called no other function
or application can communicate with iView eye tracking server.
'''
print('quit_server')
#%%
def recalibrate_one_point(self, calibration_point_number):
''' Restarts a calibration procedure with a point from the latest calibration process.
The point is specified
by its index in the calibration point profile (counted from 1). If the
requested point is not found, an error
code will be returned. The number of calibration points can be retrieved
via iV_GetCalibrationQuality
Supported systems: RED-n and RED250 Mobile
'''
print('recalibrate_one_point')
#%%
def release_aoi_port(self):
''' Releases the port for sending TTL trigger.
Supported systems: all but RED-n and RED250 Mobile
'''
print('release_aoi_port')
#%%
def remove_aoi(self, name):
''' Removes all AOIs with the given name.
Supported systems: all but RED-n and RED250 Mobile
'''
print('remove_aoi')
#%%
def reset_calibration_points(self):
''' Resets the positions of the calibration points
Supported systems: all
'''
print('reset_calibration_points')
#%%
def save_calibration(self, name):
''' Saves a calibration with a custom name. To save a calibration it
is required that a successful calibration already has been completed.
Supported systems: all
'''
print('save_calibration')
#%%
def save_data(self, filename, description = "",
user = None, overwrite=0):
''' Writes recorded data buffer to disc.
The data recording needs to be stopped using iV_StopRecording
before the data buffer can be saved to given location.
The filename can include the path. If the connected eye tracking device
is an HED, scene video buffer is written, too. iV_SaveData will not return
until the data has been saved.
If there is already a file with a certain name 'name.idf', this file will not
be overwritten, but save as another file with name 'name_1.idf'.
Args:
filename - full path including the filename of the data file being created
description - Optional experiment description tag stored in the idf file. This tag is available in BeGaze and in the text export from an idf file.
user - Optional name of test person. This tag is available in BeGaze and in the text export
from an idf file.
overwrite - Overwriting policy.
• 0: do not overwrite file filename if it already exists
• 1: overwrite file filename if it already exists
Supported systems: all
'''
# Set the use equal to the filename if not explicitly given
if user == None:
user = filename
# Split filename into path and filename
path, filename = os.path.split(filename)
assert(len(path) > 0), "Filename must have a path"
assert(len(filename) > 0), "Filename must be given"
# Check if a '.idf was added to the filename. If so, remove it
ext = os.path.splitext(filename)[1]
if '.idf' in ext:
filename = filename.strip('.idf')
print('save_data')
#%%
def select_RED_geometry(self, profile):
''' Selects a predefined geometry profile.
Supported systems: all but HiSpeed
'''
print('select_RED_geometry')
#%%
def send_command(self, cmd):
''' Sends a remote command to iView eye tracking server.
Please refer to the iView X help file for further information about remote commands.
Supported systems: all
'''
print('send_command')
#%%
def send_image_message(self, msg):
''' Sends a text message to iView X idf recording data file.
If the etMessage has the suffix ".jpg", ".bmp",
".png", or ".avi" BeGaze will separate the data buffer
automatically into according trials.
Supported systems: all
'''
print('send_image_message')
#%%
def set_aoi_hit_callback(self, callback_function):
''' Sets a callback function for the AOI hit functions.
Supported systems: all but RED-n and RED250 Mobile
'''
print('set_aoi_hit_callback')
#%%
def set_calibration_callback(self, callback_function):
''' Sets a callback function for the AOI hit functions.
Supported systems: All
'''
print('set_calibration_callback')
#%%
def set_connection_timeout(self, time):
''' Defines a customized timeout for how long iV_Connect tries to
connect to iView eye tracking server.
Supported systems: all but RED-n professional
'''
print('set_connection_timeout')
#%%
def set_event_callback(self, callback_function):
''' Sets a callback function for the event data.
The function will be called if a real-time detected fixation has
been started or ended.
Supported systems: all but RED-n professional
'''
print('set_event_callback')
#%%
def set_event_detection_parameters(self, name):
''' Defines the detection parameter for online fixation detection algorithm.
Supported systems: all but RED-n professional
'''
#%%
def set_eye_image_callback(self, callback_function):
''' Sets a callback function for the eye image data.
Supported systems: all but RED-n professional and RED-mx
'''
print('set_eye_image_callback')
#%%
def set_licence(self, key):
''' Sets the customer license (required only for OEM devices!).
Supported systems: RED-n and RED-n scientific
'''
print('set_licence')
#%%
def set_logger(self, log_level=1, filename='iv_logfile'):
''' Sets the customer license (required only for OEM devices!).
ToDo: What log levels are there and what do they mean?
Supported systems: all
'''
print('set_logger')
#%%
def set_resolution(self, stimulus_width, stimulus_height):
''' Sets the customer license (required only for OEM devices!).
Defines a fixed resolution independent to the screen resolution of
chosen display device defined in iV_-SetupCalibration function.
Could be useful when using real-time data with a screen with low resolution.
Supported systems: all
'''
print('set_resolution')
#%%
def set_RED_geometry(self, function_name):
''' Define the eye trackers stand alone and monitor integrated geometry
Supported systems: all but HiSpeed
'''
print('set_RED_geometry')
#%%
def set_sample_callback(self, function_name):
''' Sets a callback function for the raw sample data.
The function will be called if iView eye tracking server
has calculated a new data sample.
Attention: Algorithms with high processor usage and long calculation
time should not run within this callback due to a higher probability of data loss
Supported systems: all
'''
print('set_sample_callback')
#%%
def set_scene_video_callback(self, name):
''' Sets a callback function for the scene video image data.
The function will be called if a new scene video
image is available. The image format is RGB 24bpp.
Not Supported.
'''
#%%
def set_speed_mode(self, samplingrate):
''' This function requests the iView eye tracking server to switch the
eye tracking frequency to the specified value. Use iV_GetSpeedModes
to get the available speed modes for the connected eye tracking device.
Supported systems: RED-n
'''
if self.set_sampling_freq_allowed:
print('set_speed_mode')
else:
print("WARNING: set_speed_mode is not supported on this eye tracker")
#%%
def set_tracking_mode(self, mode):
''' This function is available with SMI iViewRED 4.4 or later and replaces the iV_SetTrackingParameter
function
e.g., set_tracking_mode(self, 'SMART_BINOCULAR')
Eye tracking modes:
smart_binocular: tracks both eye separately, but can handle temporal monocular loss (default)
smart_binocular_right/left: both eye visible, but one one dominant (e.g., obvious squinting)
monocular_right/left: only one eye visible
0 - SmartBinocular SmartBinocular mode.
1 - MonocularLeft Monocular mode using only the left eye.
2 - MonocularRight Monocular mode using only the right eye.
3 - Binocular Binocular mode.
4 - SmartMonocular SmartMonocular mode.
Supported systems: all but RED and HiSpeed
'''
if self.set_tracking_mode_allowed:
assert (mode == 'SMART_BINOCULAR' or
mode == 'MONOCULAR_LEFT' or
mode == 'MONOCULAR_RIGHT' or
mode == 'BINOCULAR' or
mode == 'SMART_MONOCULAR')
print('set_tracking_mode')
else:
print("WARNING: set_tracking_mode is not supported on this eye tracker")
#%%
def set_tracking_monitor_callback(self, function_name):
''' Sets a callback function for the tracking monitor image data.
The function will be called if a new tracking
monitor image was calculated. The image format is BGR 24bpp
Supported systems: all but HiSpeed
'''
print('set_tracking_monitor_callback')
#%%
def set_tracking_parameter(self, eye_type=0, parameter_type=4, activate=1):
''' Sets iView eye tracking server tracking parameters.
See Eye Tracking Parameter subsection and iView eye tracking server
manual for further explanations. Important note: This function can
strongly affect tracking stability of your iView X and
eyetracking-server system. Only experienced users should use this
function.
Args:
eye_type - select specific eye (0 is left, 1 is right)
parameter_type - parameter to set (see manual)
activate - new value for selected parameter
Supported systems: ToDo
'''
print('set_tracking_parameter')
#%%
def setup_calibration_parameters(self,
autoaccept=1,
bg_color=0,
screen=1,
fg_color=0,
cal_method=5,
cal_speed=1,
target_size=20,
target_shape=2):
"""
Sets the calibration and validation visualization parameter.
Setup calibration parameters (but do not initiate calibration)
An option to define position of calibration point
1 - autoAccept
2- background Brightness
3- displayDevice
4 - foreground Brightness
5 - cal method
6 - speed (cal)
7 - target Filename[256]
8 - targetShape
9 - targetSize
10 - visualization
Supported systems: all
"""
print('setup_calibration_parameters')
#%%
def setup_debug_mode(self, enable_debug_mode=False):
'''Enables or disables the debug mode for the current connection.
The debug mode disables the automatic
connection termination after 5 seconds of an unresponsive server or client.
This can happen e.g. during
debugging a client application. Beware: the debug mode must not be enabled
for production code, as it
makes the connection status detection of all API functions unreliable!
Supported systems: ?
'''
print('setup_debug_mode')
#%%
def setup_ltp_recording(self, port_name, enable_recording):
'''Enables or disables the LPT signal recording functionality.
Not Supported.
'''
print('setup_ltp_recording')
#%%
def set_use_calibration_key(self, mode):
''' Sets and resets the interaction keys during the calibration and validation process.
See get_use_calibration_key
'''
print('set_use_calibration_key')
#%%
def show_accuracy_monitor(self):
'''The validated accuracy results will be visualized in a separate window.
Before the image can be drawn the calibration needs to be performed with
iV_Calibrate and validated with iV_Validate.
Supported systems: all
'''
print('show_accuracy_monitor')
#%%
def show_eye_image_monitor(self):
'''Visualizes eye image in a separate window while the participant is
beeing tracked (equal to image obtained with iV_GetEyeImage).
Supported systems: all but RED-n professional and RED-m mx
'''
print('show_eye_image_monitor')
#%%
def show_scene_video_monitor(self):
'''Visualizes scene video in separate window.
Only available for HED devices.
Not Supported.
'''
#%%
def show_tracking_monitor(self):
'''Visualizes RED tracking monitor in a separate window.
Supported systems: all but HiSpeed
'''
print('show_tracking_monitor')
#%%
def start_iview_server(self, et_application):
'''Starts the iView eye tracking server application. Depending on the PC,
it may take several seconds to start the iView eye tracking server application.
The connection needs to be established separately using iV_Connect.
The connection timeout can be extended using iV_SetConnectionTimeout.
Supported systems: all
'''
print('start_iview_server')
#%%
def start_recording(self):
''' Starts gaze data recording
Supported systems: all
'''
print('start_recording')
#%%
def stop_recording(self):
''' Stops gaze data recording
Supported systems: all
'''
print('stop_recording')
#%%
def test_ttl(self, value):
'''Sends a TTL value to defined port. Define a port with iV_DefineAOIPort
Supported systems: all
'''
print('test_ttl')
#%%
def validate_iview(self):
''' Starts a validation procedure. To proceed, the participant needs to
be tracked and has to fixate the validation point.
Depending on the validation settings (which can be changed using iV_SetupCalibration
and iV_SetUseCalibrationKeys) the user can accept the validation points
manually (by pressing [SPACE] or calling iV_AcceptCalibrationPoint) or
abort the validation (by pressing [ESC] or calling iV_AbortCalibration)
If the validation is visualized by the
API (CalibrationStruct::visualization is set to 1) the function will not
return until the validation has been finished (closed automatically)
or aborted (by using [ESC]).
If the CalibrationStruct::visualization is set to 0, the function call returns immediately.
The user has to implement the visualization of validation points.
Information about the current validation point can be retrieved with
iV_GetCurrentCalibrationPoint or with setting up the calibration callback using
iV_SetCalibrationCallback.
'''
print('validate_iview')
###############################################################################
'''
Below are convenience functions that extends the basic iview
functionally about or/and make calls more transparent
'''
###############################################################################
#%%
def set_cal_positions(self, cal_positions):
"""
Sets the positions of the calibration locations
cal_positions is a dict: {1:[x,y],2:[x,y],....}
"""
if cal_positions:
for k in cal_positions.keys():
self.change_calibration_point(k, cal_positions[k][0], cal_positions[k][1])
#%%
def set_begaze_trial_image(self, imname):
'''
imname - ex. 'testimage.jpg'
The filename should not include a path
'''
# Skip the path if there is one
filename = os.path.split(imname)[1]
# Get the file extension
ext = os.path.splitext(imname)[1]
# check extention is one of the supported ones
assert(len([i for i in ['.png','.jpg','.jpeg','.bmp','.avi'] if ext == i]) > 0), "Filename not supported"
self.send_image_message(imname)
#%%
def set_begaze_mouse_click(self, which, x, y):
''' Make BeGaze understand that a mouse click has happened
'''
assert which in 'left' or which in 'right', 'SMITE: SMI BeGaze mouse press must be for ''left'' or ''right'' mouse button'
self.send_image_message('UE-mouseclick {} x={} y={}'.format(which, x, y))
#%%
def set_begaze_key_press(self, string):
''' can use this to send any string into BeGaze event stream (do
not know length limit). We advise to keep this short
special format to achieve this
'''
self.send_image_message('UE-keypress {}'.format(string))
#%%
def start_eye_image_recording(self, image_name, path):
''' Starts eye image recording
Example: start_eye_image_recording('test',"c:\\eyeimages\\" )
'''
self.send_command(' '.join(["ET_EVB 1", image_name, path]))
#%%
def stop_eye_image_recording(self):
''' Stops eye image recording
'''
self.send_command("ET_EVE")
#%%
def get_latest_sample(self):
''' Simulates gaze position with mouse
ToDO use same struct as gaze data
'''
x, y = self.mouse.getPos()
xy = np.array([[x, y]])
mon = self.win.monitor
# Convert to SMI-coordinate system
if 'norm' in self.win.units:
xy = helpers.psychopy2smi(xy, mon, units='norm')
elif 'deg' in self.win.units:
xy = helpers.psychopy2smi(xy, mon, units='deg')
elif 'pix' in self.win.units:
xy = helpers.psychopy2smi(xy, mon, units='pix')
# Here put x and y values in struct
x = xy[0][0]
y = xy[0][1]
self.sample.leftEye.gazeX = x
self.sample.leftEye.gazeY = y
self.sample.rightEye.gazeX = x
self.sample.rightEye.gazeY = y
return self.sample
#%%
def get_headbox_coordinates(self):
''' Get headbox coordinates
'''
return self.get_tracking_status()
#%%
def increment_trial_number(self):
''' Increments trial number in iview X buffer.
'''
self.send_command("ET_INC")
#%%
@WINFUNCTYPE(None, CSample)
def sample_callback(sample):
''' Callback function for sample data
'''
# Append data to buffer
self.buf.append(sample)
#%%
def consume_buffer_data(self):
''' Consume all samples '''
return self.buf.get_all()
#%%
def peek_buffer_data(self):
''' Consume all samples '''
return self.buf.peek()
#%%
def start_buffer(self, sample_buffer_length=3):
Thread.__init__(self)
# Initialize the ring buffer
self.buf = helpers.RingBuffer(maxlen=sample_buffer_length)
self.__stop = False
self.start()
#%%
def run(self):
# Called by the e.g., et.start()
# Continously read data into the ringbuffer (convert to deg)
while True:
if self.__stop:
break
# Get samples and store in ringbuffer
sample = self.get_latest_sample()
self.buf.append(sample)
time.sleep(0.01)
#%%
def record_eye_images(self,name = 'img', dur = 1, recorded_eye = 0):
'''
Records eye images (without overlays for dur s)
recorded_eye = 0 actually means right eye (wrong in SDK)
'''
self.stop_recording()
self.set_tracking_parameter(recorded_eye,3,0)
self.set_tracking_parameter(recorded_eye,4,0)
self.set_tracking_parameter(recorded_eye,5,0)
core.wait(0.1)
self.start_eye_image_recording(name)
core.wait(dur)
self.stop_eye_image_recording()
self.set_tracking_parameter(recorded_eye,3,1)
self.set_tracking_parameter(recorded_eye,4,1)
self.set_tracking_parameter(recorded_eye,5,1)
#%%
def enable_bilateral_filter(self):
'''
This API bilateral filter was implemented due to special human-computer
interaction (HCI) application requirements. It smoothes gaze position data in EyeDataStruct::gazeX and
EyeDataStruct::gazeY contained in SampleStruct, e.g. obtained by iV_GetSample. The gaze data filter
can be disabled using iV_DisableGazeDataFilter
'''
self.enable_gaze_data_filter()
#%%
def disable_bilateral_filter(self):
'''
Disables bilateral filter
'''
self.disable_gaze_data_filter()
#%%
def delete_temp_idf_file(self):
''' Remove temp idf-files (otherwise the iview
server complains that there are unsaved data)
'''
try:
subprocess.Popen([(''.join(['del /F /S /Q /A "',
self.constants.temp_folder_path,
'\*.idf"']))])
except NameError:
print('Could not delete temp idf files')
#%%
def de_init(self, close_et_server=False):
self.disable_processor_high_performance_mode()
self.disconnect()
if close_et_server:
self.quit_server()
#%%
def average_data(self, average = False):
'''
Should data be averaged across the eyes?
'''
if average:
self.configure_filter(filter_type=1, filter_action=1)
else:
self.configure_filter(filter_type=0, filter_action=1)
| [
"psychopy.event.Mouse",
"threading.Thread.__init__",
"psychopy.core.wait",
"helpers.psychopy2smi",
"helpers.RingBuffer",
"time.sleep",
"numpy.array",
"os.path.splitext",
"os.path.split",
"psychopy.core.Clock"
] | [((1531, 1543), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (1541, 1543), False, 'from psychopy import core, event, misc\n'), ((1613, 1626), 'psychopy.event.Mouse', 'event.Mouse', ([], {}), '()\n', (1624, 1626), False, 'from psychopy import core, event, misc\n'), ((24008, 24031), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (24021, 24031), False, 'import os\n'), ((40558, 40576), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (40566, 40576), True, 'import numpy as np\n'), ((42092, 42113), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (42107, 42113), False, 'from threading import Thread\n'), ((42179, 42226), 'helpers.RingBuffer', 'helpers.RingBuffer', ([], {'maxlen': 'sample_buffer_length'}), '(maxlen=sample_buffer_length)\n', (42197, 42226), False, 'import helpers\n'), ((43176, 43190), 'psychopy.core.wait', 'core.wait', (['(0.1)'], {}), '(0.1)\n', (43185, 43190), False, 'from psychopy import core, event, misc\n'), ((43255, 43269), 'psychopy.core.wait', 'core.wait', (['dur'], {}), '(dur)\n', (43264, 43269), False, 'from psychopy import core, event, misc\n'), ((24237, 24263), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (24253, 24263), False, 'import os\n'), ((38901, 38922), 'os.path.split', 'os.path.split', (['imname'], {}), '(imname)\n', (38914, 38922), False, 'import os\n'), ((38982, 39006), 'os.path.splitext', 'os.path.splitext', (['imname'], {}), '(imname)\n', (38998, 39006), False, 'import os\n'), ((40705, 40748), 'helpers.psychopy2smi', 'helpers.psychopy2smi', (['xy', 'mon'], {'units': '"""norm"""'}), "(xy, mon, units='norm')\n", (40725, 40748), False, 'import helpers\n'), ((42688, 42704), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (42698, 42704), False, 'import time\n'), ((40804, 40846), 'helpers.psychopy2smi', 'helpers.psychopy2smi', (['xy', 'mon'], {'units': '"""deg"""'}), "(xy, mon, units='deg')\n", (40824, 40846), False, 'import helpers\n'), ((40902, 40944), 'helpers.psychopy2smi', 'helpers.psychopy2smi', (['xy', 'mon'], {'units': '"""pix"""'}), "(xy, mon, units='pix')\n", (40922, 40944), False, 'import helpers\n')] |
"""Main alignment class"""
import logging
import time
import typing as ty
import warnings
import numpy as np
from .utilities import check_xy, convert_peak_values_to_index, generate_function, shift, time_loop
METHODS = ["pchip", "zero", "slinear", "quadratic", "cubic", "linear"]
LOGGER = logging.getLogger(__name__)
class Aligner:
"""Main alignment class"""
_method, _gaussian_ratio, _gaussian_resolution, _gaussian_width, _n_iterations = None, None, None, None, None
_corr_sig_l, _corr_sig_x, _corr_sig_y, _reduce_range_factor, _scale_range = None, None, None, None, None
_search_space, _computed = None, False
def __init__(
self,
x: np.ndarray,
array: ty.Optional[np.ndarray],
peaks: ty.Iterable[float],
method: str = "cubic",
width: float = 10,
ratio: float = 2.5,
resolution: int = 100,
iterations: int = 5,
grid_steps: int = 20,
shift_range: ty.Optional[ty.Tuple[int, int]] = None,
weights: ty.Optional[ty.List[float]] = None,
return_shifts: bool = False,
align_by_index: bool = False,
only_shift: bool = False,
):
"""Signal calibration and alignment by reference peaks
A simplified version of the MSALIGN function found in MATLAB (see references for link)
This version of the msalign function accepts most of the parameters that MATLAB's function accepts with the
following exceptions: GroupValue, ShowPlotValue. A number of other parameters is allowed, although they have
been renamed to comply with PEP8 conventions. The Python version is 8-60 times slower than the MATLAB
implementation, which is mostly caused by a really slow instantiation of the
`scipy.interpolate.PchipInterpolator` interpolator. In order to speed things up, I've also included several
other interpolation methods which are significantly faster and give similar results.
References
----------
<NAME>., <NAME>., <NAME>., and <NAME>. (2007) Signal Processing Methods for Mass
Spectrometry. In Systems Bioinformatics: An Engineering Case-Based Approach, <NAME> and <NAME>, eds.
Artech House Publishers).
MSALIGN: https://nl.mathworks.com/help/bioinfo/ref/msalign.html
Parameters
----------
x : np.ndarray
1D array of separation units (N). The number of elements of xvals must equal the number of elements of
zvals.shape[1]
array : np.ndarray
2D array of intensities that must have common separation units (M x N) where M is the number of vectors
and N is number of points in the vector
peaks : list
list of reference peaks that must be found in the xvals vector
method : str
interpolation method. Default: 'cubic'. MATLAB version uses 'pchip' which is significantly slower in Python
weights: list (optional)
list of weights associated with the list of peaks. Must be the same length as list of peaks
width : float (optional)
width of the gaussian peak in separation units. Default: 10
ratio : float (optional)
scaling value that determines the size of the window around every alignment peak. The synthetic signal is
compared to the input signal within these regions. Default: 2.5
resolution : int (optional)
Default: 100
iterations : int (optional)
number of iterations. Increasing this value will (slightly) slow down the function but will improve
performance. Default: 5
grid_steps : int (optional)
number of steps to be used in the grid search. Default: 20
shift_range : list / numpy array (optional)
maximum allowed shifts. Default: [-100, 100]
only_shift : bool
determines if signal should be shifted (True) or rescaled (False). Default: True
return_shifts : bool
decide whether shift parameter `shift_opt` should also be returned. Default: False
align_by_index : bool
decide whether alignment should be done based on index rather than `xvals` array. Default: False
"""
self.x = np.asarray(x)
if array is not None:
self.array = check_xy(self.x, np.asarray(array))
else:
self.array = np.empty((0, len(self.x)))
self.n_signals = self.array.shape[0]
self.array_aligned = np.zeros_like(self.array)
self.peaks = list(peaks)
# set attributes
self.n_peaks = len(self.peaks)
# accessible attributes
self.scale_opt = np.ones((self.n_signals, 1), dtype=np.float32)
self.shift_opt = np.zeros((self.n_signals, 1), dtype=np.float32)
self.shift_values = np.zeros_like(self.shift_opt)
self.method = method
self.gaussian_ratio = ratio
self.gaussian_resolution = resolution
self.gaussian_width = width
self.n_iterations = iterations
self.grid_steps = grid_steps
if shift_range is None:
shift_range = [-100, 100]
self.shift_range = shift_range
if weights is None:
weights = np.ones(self.n_peaks)
self.weights = weights
# return shift vector
self._return_shifts = return_shifts
# If the number of points is equal to 1, then only shift
if self.n_peaks == 1:
only_shift = True
if only_shift and not align_by_index:
align_by_index = True
LOGGER.warning("Only computing shifts - changed `align_by_index` to `True`.")
# align signals by index rather than peak value
self._align_by_index = align_by_index
# align by index - rather than aligning to arbitrary non-integer values in the xvals, you can instead
# use index of those values
if self._align_by_index:
self.peaks = convert_peak_values_to_index(self.x, self.peaks)
self.x = np.arange(self.x.shape[0])
LOGGER.debug(f"Aligning by index - peak positions: {self.peaks}")
self._only_shift = only_shift
self._initialize()
@property
def method(self):
"""Interpolation method."""
return self._method
@method.setter
def method(self, value: str):
if value not in METHODS:
raise ValueError(f"Method `{value}` not found in the method options: {METHODS}")
self._method = value
@property
def gaussian_ratio(self):
"""Gaussian ratio."""
return self._gaussian_ratio
@gaussian_ratio.setter
def gaussian_ratio(self, value: float):
if value <= 0:
raise ValueError("Value of 'ratio' must be above 0!")
self._gaussian_ratio = value
@property
def gaussian_resolution(self):
"""Gaussian resolution of every Gaussian pulse (number of points)."""
return self._gaussian_resolution
@gaussian_resolution.setter
def gaussian_resolution(self, value: float):
if value <= 0:
raise ValueError("Value of 'resolution' must be above 0!")
self._gaussian_resolution = value
@property
def gaussian_width(self):
"""Width of the Gaussian pulse in std dev of the Gaussian pulses (in X)."""
return self._gaussian_width
@gaussian_width.setter
def gaussian_width(self, value: float):
self._gaussian_width = value
@property
def n_iterations(self):
"""Total number of iterations - increase to improve accuracy."""
return self._n_iterations
@n_iterations.setter
def n_iterations(self, value: int):
if value < 1 or not isinstance(value, int):
raise ValueError("Value of 'iterations' must be above 0 and be an integer!")
self._n_iterations = value
@property
def grid_steps(self):
"""Total number of iterations - increase to improve accuracy."""
return self._grid_steps
@grid_steps.setter
def grid_steps(self, value: int):
if value < 1 or not isinstance(value, int):
raise ValueError("Value of 'iterations' must be above 0 and be an integer!")
self._grid_steps = value
@property
def shift_range(self):
"""Total number of iterations - increase to improve accuracy."""
return self._shift_range
@shift_range.setter
def shift_range(self, value: ty.Tuple[float, float]):
if len(value) != 2:
raise ValueError(
"Number of 'shift_values' is not correct. Shift range accepts" " numpy array with two values."
)
if np.diff(value) == 0:
raise ValueError("Values of 'shift_values' must not be the same!")
self._shift_range = np.asarray(value)
@property
def weights(self):
"""Total number of iterations - increase to improve accuracy."""
return self._weights
@weights.setter
def weights(self, value: ty.Optional[ty.Iterable[float]]):
if value is None:
value = np.ones(self.n_peaks)
if not isinstance(value, ty.Iterable):
raise ValueError("Weights must be provided as an iterable.")
if len(value) != self.n_peaks:
raise ValueError("Number of weights does not match the number of peaks.")
self._weights = np.asarray(value)
def _initialize(self):
"""Prepare dataset for alignment"""
# check that values for gaussian_width are valid
gaussian_widths = np.zeros((self.n_peaks, 1))
for i in range(self.n_peaks):
gaussian_widths[i] = self.gaussian_width
# set the synthetic target signal
corr_sig_x = np.zeros((self.gaussian_resolution + 1, self.n_peaks))
corr_sig_y = np.zeros((self.gaussian_resolution + 1, self.n_peaks))
gaussian_resolution_range = np.arange(0, self.gaussian_resolution + 1)
for i in range(self.n_peaks):
left_l = self.peaks[i] - self.gaussian_ratio * gaussian_widths[i] # noqa
right_l = self.peaks[i] + self.gaussian_ratio * gaussian_widths[i] # noqa
corr_sig_x[:, i] = left_l + (gaussian_resolution_range * (right_l - left_l) / self.gaussian_resolution)
corr_sig_y[:, i] = self.weights[i] * np.exp(
-np.square((corr_sig_x[:, i] - self.peaks[i]) / gaussian_widths[i]) # noqa
)
self._corr_sig_l = (self.gaussian_resolution + 1) * self.n_peaks
self._corr_sig_x = corr_sig_x.flatten("F")
self._corr_sig_y = corr_sig_y.flatten("F")
# set reduce_range_factor to take 5 points of the previous ranges or half of
# the previous range if grid_steps < 10
self._reduce_range_factor = min(0.5, 5 / self.grid_steps)
# set scl such that the maximum peak can shift no more than the limits imposed by shift when scaling
self._scale_range = 1 + self.shift_range / max(self.peaks)
if self._only_shift:
self._scale_range = np.array([1, 1])
# create the mesh-grid only once
mesh_a, mesh_b = np.meshgrid(
np.divide(np.arange(0, self.grid_steps), self.grid_steps - 1),
np.divide(np.arange(0, self.grid_steps), self.grid_steps - 1),
)
self._search_space = np.tile(
np.vstack([mesh_a.flatten(order="F"), mesh_b.flatten(order="F")]).T, [1, self._n_iterations]
)
def run(self, n_iterations: int = None):
"""Execute the alignment procedure for each signal in the 2D array and collate the shift/scale vectors"""
self.n_iterations = n_iterations or self.n_iterations
# iterate for every signal
t_start = time.time()
# main loop: searches for the optimum values of Scale and Shift factors by search over a multi-resolution
# grid, getting better at each iteration. Increasing the number of iterations improves the shift and scale
# parameters
for n_signal, y in enumerate(self.array):
self.shift_opt[n_signal], self.scale_opt[n_signal] = self.compute(y)
LOGGER.debug(f"Processed {self.n_signals} signals " + time_loop(t_start, self.n_signals + 1, self.n_signals))
self._computed = True
def compute(self, y: np.ndarray) -> ty.Tuple[float, float]:
"""Compute correction factors.
This function does not set value in any of the class attributes so can be used in a iterator where values
are computed lazily.
"""
_scale_range = np.array([-0.5, 0.5])
scale_opt, shift_opt = 0.0, 1.0
# set to back to the user input arguments (or default)
_shift = self.shift_range.copy()
_scale = self._scale_range.copy()
# generate interpolation function for each signal - instantiation of the interpolator can be quite slow,
# so you can slightly increase the number of iterations without significant slowdown of the process
func = generate_function(self.method, self.x, y)
# iterate to estimate the shift and scale - at each iteration, the grid search is readjusted and the
# shift/scale values are optimized further
for n_iter in range(self.n_iterations):
# scale and shift search space
scale_grid = _scale[0] + self._search_space[:, (n_iter * 2) - 2] * np.diff(_scale)
shift_grid = _shift[0] + self._search_space[:, (n_iter * 2) + 1] * np.diff(_shift)
temp = (
np.reshape(scale_grid, (scale_grid.shape[0], 1)) * np.reshape(self._corr_sig_x, (1, self._corr_sig_l))
+ np.tile(shift_grid, [self._corr_sig_l, 1]).T
)
# interpolate at each iteration. Need to remove NaNs which can be introduced by certain (e.g.
# PCHIP) interpolator
temp = np.nan_to_num(func(temp.flatten("C")).reshape(temp.shape))
# determine the best position
i_max = np.dot(temp, self._corr_sig_y).argmax()
# save optimum value
scale_opt = scale_grid[i_max]
shift_opt = shift_grid[i_max]
# readjust grid for next iteration_reduce_range_factor
_scale = scale_opt + _scale_range * np.diff(_scale) * self._reduce_range_factor
_shift = shift_opt + _scale_range * np.diff(_shift) * self._reduce_range_factor
return shift_opt, scale_opt
def apply(self, return_shifts: bool = None):
"""Align the signals against the computed values"""
if not self._computed:
warnings.warn("Aligning data without computing optimal alignment parameters", UserWarning)
self._return_shifts = return_shifts if return_shifts is not None else self._return_shifts
if self._only_shift:
self.shift()
else:
self.align()
# return aligned data and shifts
if self._return_shifts:
return self.array_aligned, self.shift_values
# only return data
return self.array_aligned
def align(self, shift_opt=None, scale_opt=None):
"""Realign array based on the optimized shift and scale parameters
Parameters
----------
shift_opt: Optional[np.ndarray]
vector containing values by which to shift the array
scale_opt : Optional[np.ndarray]
vector containing values by which to rescale the array
"""
t_start = time.time()
if shift_opt is None:
shift_opt = self.shift_opt
if scale_opt is None:
scale_opt = self.scale_opt
# realign based on provided values
for iteration, y in enumerate(self.array):
# interpolate back to the original domain
self.array_aligned[iteration] = self._apply(y, shift_opt[iteration], scale_opt[iteration])
self.shift_values = self.shift_opt
LOGGER.debug(f"Re-aligned {self.n_signals} signals " + time_loop(t_start, self.n_signals + 1, self.n_signals))
def _apply(self, y: np.ndarray, shift_value: float, scale_value: float):
"""Apply alignment correction to array `y`."""
func = generate_function(self.method, (self.x - shift_value) / scale_value, y)
return np.nan_to_num(func(self.x))
def shift(self, shift_opt=None):
"""Quickly shift array based on the optimized shift parameters.
This method does not interpolate but rather moves the data left and right without applying any scaling.
Parameters
----------
shift_opt: Optional[np.ndarray]
vector containing values by which to shift the array
"""
t_start = time.time()
if shift_opt is None:
shift_opt = np.round(self.shift_opt).astype(np.int32)
# quickly shift based on provided values
for iteration, y in enumerate(self.array):
self.array_aligned[iteration] = self._shift(y, shift_opt[iteration])
self.shift_values = shift_opt
LOGGER.debug(f"Re-aligned {self.n_signals} signals " + time_loop(t_start, self.n_signals + 1, self.n_signals))
@staticmethod
def _shift(y: np.ndarray, shift_value: float):
"""Apply shift correction to array `y`."""
return shift(y, -int(shift_value))
| [
"numpy.zeros_like",
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"numpy.ones",
"time.time",
"numpy.diff",
"numpy.arange",
"numpy.array",
"numpy.reshape",
"numpy.tile",
"numpy.dot",
"warnings.warn",
"numpy.round",
"logging.getLogger"
] | [((291, 318), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (308, 318), False, 'import logging\n'), ((4288, 4301), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4298, 4301), True, 'import numpy as np\n'), ((4534, 4559), 'numpy.zeros_like', 'np.zeros_like', (['self.array'], {}), '(self.array)\n', (4547, 4559), True, 'import numpy as np\n'), ((4716, 4762), 'numpy.ones', 'np.ones', (['(self.n_signals, 1)'], {'dtype': 'np.float32'}), '((self.n_signals, 1), dtype=np.float32)\n', (4723, 4762), True, 'import numpy as np\n'), ((4788, 4835), 'numpy.zeros', 'np.zeros', (['(self.n_signals, 1)'], {'dtype': 'np.float32'}), '((self.n_signals, 1), dtype=np.float32)\n', (4796, 4835), True, 'import numpy as np\n'), ((4864, 4893), 'numpy.zeros_like', 'np.zeros_like', (['self.shift_opt'], {}), '(self.shift_opt)\n', (4877, 4893), True, 'import numpy as np\n'), ((8855, 8872), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (8865, 8872), True, 'import numpy as np\n'), ((9434, 9451), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (9444, 9451), True, 'import numpy as np\n'), ((9607, 9634), 'numpy.zeros', 'np.zeros', (['(self.n_peaks, 1)'], {}), '((self.n_peaks, 1))\n', (9615, 9634), True, 'import numpy as np\n'), ((9790, 9844), 'numpy.zeros', 'np.zeros', (['(self.gaussian_resolution + 1, self.n_peaks)'], {}), '((self.gaussian_resolution + 1, self.n_peaks))\n', (9798, 9844), True, 'import numpy as np\n'), ((9866, 9920), 'numpy.zeros', 'np.zeros', (['(self.gaussian_resolution + 1, self.n_peaks)'], {}), '((self.gaussian_resolution + 1, self.n_peaks))\n', (9874, 9920), True, 'import numpy as np\n'), ((9958, 10000), 'numpy.arange', 'np.arange', (['(0)', '(self.gaussian_resolution + 1)'], {}), '(0, self.gaussian_resolution + 1)\n', (9967, 10000), True, 'import numpy as np\n'), ((11791, 11802), 'time.time', 'time.time', ([], {}), '()\n', (11800, 11802), False, 'import time\n'), ((12616, 12637), 'numpy.array', 'np.array', (['[-0.5, 0.5]'], {}), '([-0.5, 0.5])\n', (12624, 12637), True, 'import numpy as np\n'), ((15529, 15540), 'time.time', 'time.time', ([], {}), '()\n', (15538, 15540), False, 'import time\n'), ((16754, 16765), 'time.time', 'time.time', ([], {}), '()\n', (16763, 16765), False, 'import time\n'), ((5277, 5298), 'numpy.ones', 'np.ones', (['self.n_peaks'], {}), '(self.n_peaks)\n', (5284, 5298), True, 'import numpy as np\n'), ((6077, 6103), 'numpy.arange', 'np.arange', (['self.x.shape[0]'], {}), '(self.x.shape[0])\n', (6086, 6103), True, 'import numpy as np\n'), ((8727, 8741), 'numpy.diff', 'np.diff', (['value'], {}), '(value)\n', (8734, 8741), True, 'import numpy as np\n'), ((9143, 9164), 'numpy.ones', 'np.ones', (['self.n_peaks'], {}), '(self.n_peaks)\n', (9150, 9164), True, 'import numpy as np\n'), ((11106, 11122), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (11114, 11122), True, 'import numpy as np\n'), ((14643, 14737), 'warnings.warn', 'warnings.warn', (['"""Aligning data without computing optimal alignment parameters"""', 'UserWarning'], {}), "('Aligning data without computing optimal alignment parameters',\n UserWarning)\n", (14656, 14737), False, 'import warnings\n'), ((4374, 4391), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (4384, 4391), True, 'import numpy as np\n'), ((11225, 11254), 'numpy.arange', 'np.arange', (['(0)', 'self.grid_steps'], {}), '(0, self.grid_steps)\n', (11234, 11254), True, 'import numpy as np\n'), ((11300, 11329), 'numpy.arange', 'np.arange', (['(0)', 'self.grid_steps'], {}), '(0, self.grid_steps)\n', (11309, 11329), True, 'import numpy as np\n'), ((13435, 13450), 'numpy.diff', 'np.diff', (['_scale'], {}), '(_scale)\n', (13442, 13450), True, 'import numpy as np\n'), ((13530, 13545), 'numpy.diff', 'np.diff', (['_shift'], {}), '(_shift)\n', (13537, 13545), True, 'import numpy as np\n'), ((13583, 13631), 'numpy.reshape', 'np.reshape', (['scale_grid', '(scale_grid.shape[0], 1)'], {}), '(scale_grid, (scale_grid.shape[0], 1))\n', (13593, 13631), True, 'import numpy as np\n'), ((13634, 13685), 'numpy.reshape', 'np.reshape', (['self._corr_sig_x', '(1, self._corr_sig_l)'], {}), '(self._corr_sig_x, (1, self._corr_sig_l))\n', (13644, 13685), True, 'import numpy as np\n'), ((13704, 13746), 'numpy.tile', 'np.tile', (['shift_grid', '[self._corr_sig_l, 1]'], {}), '(shift_grid, [self._corr_sig_l, 1])\n', (13711, 13746), True, 'import numpy as np\n'), ((14044, 14074), 'numpy.dot', 'np.dot', (['temp', 'self._corr_sig_y'], {}), '(temp, self._corr_sig_y)\n', (14050, 14074), True, 'import numpy as np\n'), ((16820, 16844), 'numpy.round', 'np.round', (['self.shift_opt'], {}), '(self.shift_opt)\n', (16828, 16844), True, 'import numpy as np\n'), ((10402, 10468), 'numpy.square', 'np.square', (['((corr_sig_x[:, i] - self.peaks[i]) / gaussian_widths[i])'], {}), '((corr_sig_x[:, i] - self.peaks[i]) / gaussian_widths[i])\n', (10411, 10468), True, 'import numpy as np\n'), ((14318, 14333), 'numpy.diff', 'np.diff', (['_scale'], {}), '(_scale)\n', (14325, 14333), True, 'import numpy as np\n'), ((14410, 14425), 'numpy.diff', 'np.diff', (['_shift'], {}), '(_shift)\n', (14417, 14425), True, 'import numpy as np\n')] |
import os
from glob import glob
import numpy as np
from PIL import Image
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from jpeg_eigen import jpeg_recompress_pil, jpeg_feature
def main():
# Parameters ---
ps_root = 'data/ps/'
raw_root = 'data/raw/'
pil_root = 'data/pil/'
ps_features_path = 'data/ps.npy'
pil_features_path = 'data/pil.npy'
raw_file_list = glob(raw_root + '*.png')
ps_file_list = [os.path.join(ps_root, os.path.split(os.path.splitext(file_path)[0])[1]) + '.jpg' for file_path in
raw_file_list]
pil_file_list = [os.path.join(pil_root, os.path.split(os.path.splitext(file_path)[0])[1]) + '.jpg' for file_path in
raw_file_list]
print('Compressing RAW to PIL with PS quantization matrix')
for raw_file_path, ps_file_path, pil_file_path in tqdm(zip(raw_file_list, ps_file_list, pil_file_list)):
if not os.path.exists(pil_file_path):
img_ps = Image.open(ps_file_path)
qtables_in = img_ps.quantization
jpeg_recompress_pil(raw_file_path, pil_file_path, qtables_in=qtables_in, )
if not os.path.exists(ps_features_path):
print('Extracting features for PS images')
features_ps = []
for ps_file_path in tqdm(ps_file_list):
features_ps += [jpeg_feature(ps_file_path)]
features_ps = np.stack(features_ps)
np.save(ps_features_path, features_ps)
else:
print('Loading features for PS images')
features_ps = np.load(ps_features_path)
if not os.path.exists(pil_features_path):
print('Extracting features for PIL images')
features_pil = []
for pil_file_path in tqdm(pil_file_list):
features_pil += [jpeg_feature(pil_file_path)]
features_pil = np.stack(features_pil)
np.save(pil_features_path, features_pil)
else:
print('Loading features for PIL images')
features_pil = np.load(pil_features_path)
np.random.seed(197)
rand_idxs = np.random.permutation(np.arange(len(raw_file_list)))
train_idxs = rand_idxs[:len(raw_file_list) // 2]
test_idxs = rand_idxs[len(raw_file_list) // 2:]
features_train = np.concatenate((features_ps[train_idxs], features_pil[train_idxs]), axis=0)
labels_train = np.concatenate((np.zeros(len(train_idxs)), np.ones(len(train_idxs))))
features_test = np.concatenate((features_ps[test_idxs], features_pil[test_idxs]), axis=0)
labels_test = np.concatenate((np.zeros(len(test_idxs)), np.ones(len(test_idxs))))
clf = RandomForestClassifier()
clf.fit(features_train, labels_train)
pred_test = clf.predict_proba(features_test)[:, 1]
auc_score = roc_auc_score(labels_test, pred_test)
print('Test AUC: {:.2f}'.format(auc_score))
if __name__ == '__main__':
main()
| [
"sklearn.ensemble.RandomForestClassifier",
"numpy.stack",
"tqdm.tqdm",
"numpy.save",
"numpy.random.seed",
"numpy.load",
"os.path.exists",
"sklearn.metrics.roc_auc_score",
"PIL.Image.open",
"jpeg_eigen.jpeg_recompress_pil",
"jpeg_eigen.jpeg_feature",
"os.path.splitext",
"glob.glob",
"numpy.... | [((459, 483), 'glob.glob', 'glob', (["(raw_root + '*.png')"], {}), "(raw_root + '*.png')\n", (463, 483), False, 'from glob import glob\n'), ((2056, 2075), 'numpy.random.seed', 'np.random.seed', (['(197)'], {}), '(197)\n', (2070, 2075), True, 'import numpy as np\n'), ((2272, 2347), 'numpy.concatenate', 'np.concatenate', (['(features_ps[train_idxs], features_pil[train_idxs])'], {'axis': '(0)'}), '((features_ps[train_idxs], features_pil[train_idxs]), axis=0)\n', (2286, 2347), True, 'import numpy as np\n'), ((2458, 2531), 'numpy.concatenate', 'np.concatenate', (['(features_ps[test_idxs], features_pil[test_idxs])'], {'axis': '(0)'}), '((features_ps[test_idxs], features_pil[test_idxs]), axis=0)\n', (2472, 2531), True, 'import numpy as np\n'), ((2629, 2653), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2651, 2653), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2769, 2806), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_test', 'pred_test'], {}), '(labels_test, pred_test)\n', (2782, 2806), False, 'from sklearn.metrics import roc_auc_score\n'), ((1203, 1235), 'os.path.exists', 'os.path.exists', (['ps_features_path'], {}), '(ps_features_path)\n', (1217, 1235), False, 'import os\n'), ((1341, 1359), 'tqdm.tqdm', 'tqdm', (['ps_file_list'], {}), '(ps_file_list)\n', (1345, 1359), False, 'from tqdm import tqdm\n'), ((1439, 1460), 'numpy.stack', 'np.stack', (['features_ps'], {}), '(features_ps)\n', (1447, 1460), True, 'import numpy as np\n'), ((1469, 1507), 'numpy.save', 'np.save', (['ps_features_path', 'features_ps'], {}), '(ps_features_path, features_ps)\n', (1476, 1507), True, 'import numpy as np\n'), ((1588, 1613), 'numpy.load', 'np.load', (['ps_features_path'], {}), '(ps_features_path)\n', (1595, 1613), True, 'import numpy as np\n'), ((1626, 1659), 'os.path.exists', 'os.path.exists', (['pil_features_path'], {}), '(pil_features_path)\n', (1640, 1659), False, 'import os\n'), ((1768, 1787), 'tqdm.tqdm', 'tqdm', (['pil_file_list'], {}), '(pil_file_list)\n', (1772, 1787), False, 'from tqdm import tqdm\n'), ((1870, 1892), 'numpy.stack', 'np.stack', (['features_pil'], {}), '(features_pil)\n', (1878, 1892), True, 'import numpy as np\n'), ((1901, 1941), 'numpy.save', 'np.save', (['pil_features_path', 'features_pil'], {}), '(pil_features_path, features_pil)\n', (1908, 1941), True, 'import numpy as np\n'), ((2024, 2050), 'numpy.load', 'np.load', (['pil_features_path'], {}), '(pil_features_path)\n', (2031, 2050), True, 'import numpy as np\n'), ((982, 1011), 'os.path.exists', 'os.path.exists', (['pil_file_path'], {}), '(pil_file_path)\n', (996, 1011), False, 'import os\n'), ((1034, 1058), 'PIL.Image.open', 'Image.open', (['ps_file_path'], {}), '(ps_file_path)\n', (1044, 1058), False, 'from PIL import Image\n'), ((1116, 1188), 'jpeg_eigen.jpeg_recompress_pil', 'jpeg_recompress_pil', (['raw_file_path', 'pil_file_path'], {'qtables_in': 'qtables_in'}), '(raw_file_path, pil_file_path, qtables_in=qtables_in)\n', (1135, 1188), False, 'from jpeg_eigen import jpeg_recompress_pil, jpeg_feature\n'), ((1389, 1415), 'jpeg_eigen.jpeg_feature', 'jpeg_feature', (['ps_file_path'], {}), '(ps_file_path)\n', (1401, 1415), False, 'from jpeg_eigen import jpeg_recompress_pil, jpeg_feature\n'), ((1818, 1845), 'jpeg_eigen.jpeg_feature', 'jpeg_feature', (['pil_file_path'], {}), '(pil_file_path)\n', (1830, 1845), False, 'from jpeg_eigen import jpeg_recompress_pil, jpeg_feature\n'), ((540, 567), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (556, 567), False, 'import os\n'), ((695, 722), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (711, 722), False, 'import os\n')] |
from tripletpairs.kineticmodelling import timeresolvedmodels
from tripletpairs.kineticmodelling import KineticSimulation
import numpy as np
from matplotlib import pyplot as plt
###############################################################################
# SET UP THE KINETIC MODEL
###############################################################################
m = timeresolvedmodels.Merrifield()
m.kSF = 0.05
m.k_SF = 0.05
m.kDISS = 5e-3
m.kTTA = 1e-23
m.kRELAX = 0
m.kSSA = 0
m.kTTNR = 1e-5
m.kTNR = 1e-5
m.kSNR = 0.06
m.G = 1e17
#m.t = np.logspace(-5, 5, 100)
###############################################################################
# SET THE SPIN HAMILTONIAN PARAMETERS
###############################################################################
J = 0
D = 5e-6
E = D/3
X = D/1000
rAB = (np.cos(86.37*np.pi/180), 0, np.sin(86.37*np.pi/180))
alpha = np.pi/2
beta = -118.32*np.pi/180
gamma = np.pi/2
theta = np.pi/4
phi = 0
B = np.linspace(0, 0.25, 10)
###############################################################################
# DO THE SIMULATION
###############################################################################
sim = KineticSimulation(m)
sim.set_spin_hamiltonian_parameters(J, X, rAB, D, E, alpha, beta, gamma, B, theta, phi)
sim.simulate_state_populations(['S1', 'TT_total', 'T1'])
sim.convolve_populations_with_irf(2)
mfe1 = sim.calculate_mfe('S1', time_range=(20, 30))
mfe2 = sim.calculate_mfe('S1', time_range=(100, 200))
###############################################################################
# PLOT SOME RESULTS
###############################################################################
fig, axes = plt.subplots(nrows=2, ncols=1, gridspec_kw={'hspace': 0.3, 'height_ratios': [2, 1.5]}, figsize=(6, 8))
# kinetics at B = 0
ax = axes[0]
ax.loglog(sim.times, sim.state_populations['S1'][:, 0], color='darkred', label=r'S$_1$')
ax.loglog(sim.times, sim.state_populations['TT_total'][:, 0], color='blue', label='TT')
ax.loglog(sim.times, sim.state_populations['T1'][:, 0], color='purple', label=r'T$_1$')
ax.set_xlim([1, 1e5])
ax.set_ylim([1e12, 1e18])
ax.set_xlabel('Time (ns)', fontsize=14)
ax.set_ylabel(r'Population (cm$^{-3}$)', fontsize=14)
ax.tick_params(axis='both', labelsize=14)
ax.legend(fontsize=14, frameon=False)
# magnetic field effect
ax = axes[1]
ax.plot(1000*B, 100*mfe1, color='seagreen', label='20-30ns')
ax.plot(1000*B, 100*mfe2, color='lime', label='100-200ns')
ax.axhline(0, color='0.5', linewidth=1)
ax.set_xlim([-5, 250])
ax.set_xlabel('Magnetic Field Strength (mT)', fontsize=14)
ax.set_ylabel('MFE (%)', fontsize=14)
ax.tick_params(axis='both', labelsize=14)
ax.legend(fontsize=14, frameon=False)
| [
"tripletpairs.kineticmodelling.timeresolvedmodels.Merrifield",
"tripletpairs.kineticmodelling.KineticSimulation",
"numpy.sin",
"numpy.cos",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((372, 403), 'tripletpairs.kineticmodelling.timeresolvedmodels.Merrifield', 'timeresolvedmodels.Merrifield', ([], {}), '()\n', (401, 403), False, 'from tripletpairs.kineticmodelling import timeresolvedmodels\n'), ((950, 974), 'numpy.linspace', 'np.linspace', (['(0)', '(0.25)', '(10)'], {}), '(0, 0.25, 10)\n', (961, 974), True, 'import numpy as np\n'), ((1164, 1184), 'tripletpairs.kineticmodelling.KineticSimulation', 'KineticSimulation', (['m'], {}), '(m)\n', (1181, 1184), False, 'from tripletpairs.kineticmodelling import KineticSimulation\n'), ((1668, 1774), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'gridspec_kw': "{'hspace': 0.3, 'height_ratios': [2, 1.5]}", 'figsize': '(6, 8)'}), "(nrows=2, ncols=1, gridspec_kw={'hspace': 0.3, 'height_ratios':\n [2, 1.5]}, figsize=(6, 8))\n", (1680, 1774), True, 'from matplotlib import pyplot as plt\n'), ((812, 839), 'numpy.cos', 'np.cos', (['(86.37 * np.pi / 180)'], {}), '(86.37 * np.pi / 180)\n', (818, 839), True, 'import numpy as np\n'), ((840, 867), 'numpy.sin', 'np.sin', (['(86.37 * np.pi / 180)'], {}), '(86.37 * np.pi / 180)\n', (846, 867), True, 'import numpy as np\n')] |
"""
単変量特徴選択
個々の特徴量とターゲットとの間に統計的に顕著な関係がるかどうかを計算する。
最も高い確信度で関連している特徴量が選択される。
この方法は、計算が高速でモデルを構築する必要がないが、
個々の特徴量を個別に考慮するために他の特徴量と組み合わさって意味のある特徴量は捨てられる。
特徴選択後に使われるモデルとは完全に独立である。
selectPercentile:全部の特徴量に対する上位の割合を指定
selectKBest:使用される上位の特徴量
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
import numpy as np
import scipy as sp
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_regression
def spt(X_train, y_train, X_test, y_test, X_name_list, percentile=20):
"""
単変量特徴選択
個々の特徴量とターゲットとの間に統計的に顕著な関係がるかどうかを計算する。
selectPercentile:全部の特徴量に対する上位の割合を指定
selectPercentile 20%
:param X_train:
:param y_train:
:param X_test:
:param y_test:
:param X_name_list: feature list
ex) X_name_list=list(dfR2.drop('Corrosion',axis=1).columns)
:param percentile:
:return: X_train_selected, X_test_selected
"""
Xl = list(range(0, len(X_name_list)))
select=SelectPercentile(score_func=f_regression,percentile=percentile)
select.fit(X_train, y_train)
X_train_selected=select.transform(X_train)
X_test_selected=select.transform((X_test))
print("SelectPercentile {} %".format(percentile))
print("X_train.shape: {}".format(X_train.shape))
print("X_train_selected.shape:{}".format(X_train_selected.shape))
mask =select.get_support()
#plt.matshow(mask.reshape(1, -1), cmap='gray_r')
plt.matshow(mask.reshape(1, -1), cmap="YlGnBu")
plt.xlabel("SelectPercentile {} %".format(percentile))
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
plt.show()
# Array listで受け取る
select_name_index, = np.where(mask == True)
X_name_selected = []
for li in select_name_index:
new_X = X_name_list[li]
X_name_selected.append(new_X)
print(X_name_selected)
return X_train_selected, X_test_selected, X_name_selected
def skb(X_train, y_train, X_test, y_test, X_name_list, k=5):
"""
単変量特徴選択
個々の特徴量とターゲットとの間に統計的に顕著な関係がるかどうかを計算する。
selectKBest
:param X_train:
:param y_train:
:param X_test:
:param y_test:
:param X_name_list:
:param k:
:return: X_train_selected, X_test_selected
"""
Xl = list(range(0, len(X_name_list)))
select = SelectKBest(score_func=f_regression,k=k)
select.fit(X_train, y_train)
X_train_selected = select.transform(X_train)
X_test_selected = select.transform((X_test))
print("SelectKBest {}".format(k))
print("X_train.shape: {}".format(X_train.shape))
print("X_train_selected.shape:{}".format(X_train_selected.shape))
mask = select.get_support()
plt.matshow(mask.reshape(1, -1), cmap="YlGnBu")
plt.xlabel("SelectKBest {}".format(k))
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
plt.show()
# Array listで受け取る
select_name_index, = np.where(mask == True)
X_name_selected = []
for li in select_name_index:
new_X = X_name_list[li]
X_name_selected.append(new_X)
print(X_name_selected)
return X_train_selected, X_test_selected, X_name_selected
| [
"seaborn.set_style",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yticks",
"numpy.where",
"sklearn.feature_selection.SelectPercentile",
"matplotlib.pyplot.xticks",
"sklearn.feature_selection.SelectKBest"
] | [((308, 334), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (321, 334), True, 'import seaborn as sns\n'), ((1002, 1066), 'sklearn.feature_selection.SelectPercentile', 'SelectPercentile', ([], {'score_func': 'f_regression', 'percentile': 'percentile'}), '(score_func=f_regression, percentile=percentile)\n', (1018, 1066), False, 'from sklearn.feature_selection import SelectPercentile, SelectKBest, f_regression\n'), ((1582, 1596), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (1592, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1602, 1642), 'matplotlib.pyplot.xticks', 'plt.xticks', (['Xl', 'X_name_list'], {'rotation': '(90)'}), '(Xl, X_name_list, rotation=90)\n', (1612, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1656, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1732), 'numpy.where', 'np.where', (['(mask == True)'], {}), '(mask == True)\n', (1718, 1732), True, 'import numpy as np\n'), ((2346, 2387), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_regression', 'k': 'k'}), '(score_func=f_regression, k=k)\n', (2357, 2387), False, 'from sklearn.feature_selection import SelectPercentile, SelectKBest, f_regression\n'), ((2822, 2836), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (2832, 2836), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2882), 'matplotlib.pyplot.xticks', 'plt.xticks', (['Xl', 'X_name_list'], {'rotation': '(90)'}), '(Xl, X_name_list, rotation=90)\n', (2852, 2882), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2896, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2970), 'numpy.where', 'np.where', (['(mask == True)'], {}), '(mask == True)\n', (2956, 2970), True, 'import numpy as np\n')] |
"""
bin modis data into regular latitude and longitude bins
"""
import numpy as np
def reproj_L1B(raw_data, raw_x, raw_y, xlim, ylim, res):
'''
=========================================================================================
Reproject MODIS L1B file to a regular grid
-----------------------------------------------------------------------------------------
d_array, x_array, y_array, bin_count = reproj_L1B(raw_data, raw_x, raw_y, xlim, ylim, res)
-----------------------------------------------------------------------------------------
Input:
raw_data: L1B data, N*M 2-D array.
raw_x: longitude info. N*M 2-D array.
raw_y: latitude info. N*M 2-D array.
xlim: range of longitude, a list.
ylim: range of latitude, a list.
res: resolution, single value.
Output:
d_array: L1B reprojected data.
x_array: reprojected longitude.
y_array: reprojected latitude.
bin_count: how many raw data point included in a reprojected grid.
Note:
function do not performs well if "res" is larger than the resolution of input data.
size of "raw_data", "raw_x", "raw_y" must agree.
=========================================================================================
'''
import numpy as np
x_bins=np.arange(xlim[0], xlim[1], res)
y_bins=np.arange(ylim[0], ylim[1], res)
x_indices=np.searchsorted(x_bins, raw_x.flat, 'right')
y_indices=np.searchsorted(y_bins, raw_y.flat, 'right')
y_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
x_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
d_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
bin_count=np.zeros([len(y_bins), len(x_bins)], dtype=np.int)
for n in range(len(y_indices)): #indices
bin_row=y_indices[n]-1 # '-1' is because we call 'right' in np.searchsorted.
bin_col=x_indices[n]-1
bin_count[bin_row, bin_col] += 1
x_array[bin_row, bin_col] += raw_x.flat[n]
y_array[bin_row, bin_col] += raw_y.flat[n]
d_array[bin_row, bin_col] += raw_data.flat[n]
for i in range(x_array.shape[0]):
for j in range(x_array.shape[1]):
if bin_count[i, j] > 0:
x_array[i, j]=x_array[i, j]/bin_count[i, j]
y_array[i, j]=y_array[i, j]/bin_count[i, j]
d_array[i, j]=d_array[i, j]/bin_count[i, j]
else:
d_array[i, j]=np.nan
x_array[i, j]=np.nan
y_array[i,j]=np.nan
return d_array, x_array, y_array, bin_count
| [
"numpy.arange",
"numpy.searchsorted"
] | [((1438, 1470), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', 'res'], {}), '(xlim[0], xlim[1], res)\n', (1447, 1470), True, 'import numpy as np\n'), ((1483, 1515), 'numpy.arange', 'np.arange', (['ylim[0]', 'ylim[1]', 'res'], {}), '(ylim[0], ylim[1], res)\n', (1492, 1515), True, 'import numpy as np\n'), ((1531, 1575), 'numpy.searchsorted', 'np.searchsorted', (['x_bins', 'raw_x.flat', '"""right"""'], {}), "(x_bins, raw_x.flat, 'right')\n", (1546, 1575), True, 'import numpy as np\n'), ((1591, 1635), 'numpy.searchsorted', 'np.searchsorted', (['y_bins', 'raw_y.flat', '"""right"""'], {}), "(y_bins, raw_y.flat, 'right')\n", (1606, 1635), True, 'import numpy as np\n')] |
import numpy as np
import qutip
from .constants import *
from scipy.linalg import eig
def adiabatic_passage_f_sweep(atom, F, f_res, f_range, B_0, B_rf):
B_homogenous = B_0*1e-4
B_int = B_rf*1e-4
g_F = atom.g_J*(F*(F+1) - atom.I*(atom.I+1) + atom.J*(atom.J+1)) / (2*F*(F+1)) +\
atom.g_I*(F*(F+1) + atom.I*(atom.I+1) - atom.J*(atom.J+1)) / (2*F*(F+1))
omega_l_2 = g_F * mu_bohr * B_homogenous / hbar
omega_q_2 = ((atom.g_J-atom.g_I)**2 * mu_bohr**2 * B_homogenous**2/(4*hbar* h*2*np.pi*atom.delta_E_hf))
eigenvalues = np.zeros((f_res, int(2*F+1)))
delta_range = np.linspace(-f_range/2, f_range/2, f_res)
omega_rf_range = omega_l_2 + delta_range
omega_rabi = mu_bohr * g_F * B_int / hbar
H_int = hbar*omega_rabi/2 * np.mat(qutip.jmat(F, "x"))
for i, delta in enumerate(delta_range):
H_ze_l = hbar * delta * np.mat(qutip.jmat(F, "z"))
H_ze_q = hbar * omega_q_2 * np.mat(1-(2*qutip.jmat(F, "z")/(2*atom.I+1))**2)
H = H_ze_l + H_ze_q + H_int
eigenvalues[i] = sorted(np.real(eig(H, right=False)))
return eigenvalues, omega_rf_range/(2*np.pi)
def adiabatic_passage_B_sweep(atom, F, B_res, B_range, f, B_rf):
B_int = B_rf * 1e-4
g_F = atom.g_J*(F*(F+1) - atom.I*(atom.I+1) + atom.J*(atom.J+1)) / (2*F*(F+1)) +\
atom.g_I*(F*(F+1) + atom.I*(atom.I+1) - atom.J*(atom.J+1)) / (2*F*(F+1))
B_0_center = 2*np.pi*f*hbar/(g_F*mu_bohr)
omega_rabi = mu_bohr * g_F * B_int / hbar
H_int = omega_rabi/2 * np.mat(qutip.jmat(F, "x"))
eigenvalues = np.zeros((B_res, int(2*F+1)))
B_array = B_0_center + np.linspace(-B_range/2, B_range/2, B_res)*1e-4
for i, B_0 in enumerate(B_array):
omega_l_2 = g_F * mu_bohr * B_0 / hbar
omega_q_2 = ((atom.g_J-atom.g_I)**2 * mu_bohr**2 * B_0**2/(4*hbar* h*2*np.pi*atom.delta_E_hf))
delta_f = omega_l_2 - f*2*np.pi
H = delta_f * np.mat(qutip.jmat(F, "z"))
H += omega_q_2 * np.mat(1-(2*qutip.jmat(F, "z")/(2*atom.I+1))**2)
H += H_int
eigenvalues[i] = sorted(np.real(eig(H)[0]))
return eigenvalues, B_array
| [
"qutip.jmat",
"scipy.linalg.eig",
"numpy.linspace"
] | [((601, 646), 'numpy.linspace', 'np.linspace', (['(-f_range / 2)', '(f_range / 2)', 'f_res'], {}), '(-f_range / 2, f_range / 2, f_res)\n', (612, 646), True, 'import numpy as np\n'), ((774, 792), 'qutip.jmat', 'qutip.jmat', (['F', '"""x"""'], {}), "(F, 'x')\n", (784, 792), False, 'import qutip\n'), ((1517, 1535), 'qutip.jmat', 'qutip.jmat', (['F', '"""x"""'], {}), "(F, 'x')\n", (1527, 1535), False, 'import qutip\n'), ((1614, 1659), 'numpy.linspace', 'np.linspace', (['(-B_range / 2)', '(B_range / 2)', 'B_res'], {}), '(-B_range / 2, B_range / 2, B_res)\n', (1625, 1659), True, 'import numpy as np\n'), ((878, 896), 'qutip.jmat', 'qutip.jmat', (['F', '"""z"""'], {}), "(F, 'z')\n", (888, 896), False, 'import qutip\n'), ((1059, 1078), 'scipy.linalg.eig', 'eig', (['H'], {'right': '(False)'}), '(H, right=False)\n', (1062, 1078), False, 'from scipy.linalg import eig\n'), ((1922, 1940), 'qutip.jmat', 'qutip.jmat', (['F', '"""z"""'], {}), "(F, 'z')\n", (1932, 1940), False, 'import qutip\n'), ((2076, 2082), 'scipy.linalg.eig', 'eig', (['H'], {}), '(H)\n', (2079, 2082), False, 'from scipy.linalg import eig\n'), ((946, 964), 'qutip.jmat', 'qutip.jmat', (['F', '"""z"""'], {}), "(F, 'z')\n", (956, 964), False, 'import qutip\n'), ((1979, 1997), 'qutip.jmat', 'qutip.jmat', (['F', '"""z"""'], {}), "(F, 'z')\n", (1989, 1997), False, 'import qutip\n')] |
# Python modules
import os
import struct
import pprint
pp = pprint.PrettyPrinter(depth=2)
# 3rd party modules
import pydicom
import pydicom.dicomio
import numpy as np
# Our modules
import vespa.analysis.fileio.raw_reader as raw_reader
import vespa.common.util.config as util_config
import vespa.common.util.misc as util_misc
from vespa.common.base_transform import transformation_matrix
from vespa.common.mrs_data_raw import DataRaw, DataRawFidsum
from vespa.common.constants import Deflate
# need for inline processing - no wx
try:
import wx # just a test here
from vespa.analysis.fileio.dicom_browser_dialog import SiemensMrsBrowser
except:
SiemensMrsBrowser = None
# DICOM standard tags
TAG_SOP_CLASS_UID = (0x0008, 0x0016)
class RawReaderDicomSiemens(raw_reader.RawReader):
""" Read a single Siemens DICOM file into an DataRaw object. """
def __init__(self):
raw_reader.RawReader.__init__(self)
def pickfile(self, default_path=""):
""" Default is multiple filenames that will be 'loaded into the screen' """
if not os.path.exists(default_path):
default_path = util_config.VespaConfig()["general"].get("last_dicom_browse_path", "")
if not os.path.exists(default_path):
default_path = util_misc.get_documents_dir()
if SiemensMrsBrowser is not None:
dialog = SiemensMrsBrowser(multi_select=self.multiple,
default_path=default_path,
show_tags=False,
preview_size=None)
dialog.ShowModal()
self.filenames = dialog.filenames
else:
self.filenames = []
if self.filenames:
config = util_config.VespaConfig()
config["general"]["last_dicom_browse_path"] = dialog.path
config.write()
return bool(self.filenames)
def read_raw(self, filename, ignore_data, *args, **kwargs):
"""
Given Siemens DICOM filename, return a populated DataRaw object
The sop_class_uid flags if this is the older proprietary Siemens hack
format or the newer DICOM standard MR Spectroscopy Storage object.
Ignore data has no effect on this parser
"""
# the .IMA format is a DICOM standard, but Siemens stores a lot of
# information inside a private and very complicated header with its own
# data storage format, we have to get that information out along with
# the data. we start by reading in the DICOM file completely
dataset = pydicom.dicomio.read_file(filename)
sop_class_uid = pydicom.uid.UID(str(dataset['SOPClassUID'].value.upper()))
if sop_class_uid.name == 'MR Spectroscopy Storage':
d = _get_parameters_dicom_sop(dataset)
else:
d = _get_parameters_siemens_proprietary(dataset)
d["data_source"] = filename
return [DataRaw(attributes=d),]
class RawReaderDicomSiemensFidsum(RawReaderDicomSiemens):
""" Read multiple Siemens DICOMs file into a DataRawFidsum object """
def __init__(self):
RawReaderDicomSiemens.__init__(self)
def read_raw(self, filename, ignore_data=False, *args, **kwargs):
""" base class read_raw() returns DataRaw, we convert to DataRawFidsum """
raw = super().read_raw(filename, ignore_data)[0]
raw = DataRawFidsum(raw.deflate(Deflate.DICTIONARY))
return [raw, ]
#################### Internal functions start here ###############
CSA1 = 0
CSA2 = 1
ima_types = {
"floats": ["NumberOfAverages", "RSatPositionSag", "PercentPhaseFieldOfView",
"RSatOrientationSag", "MixingTime", "PercentPhaseFieldOfView",
"RSatPositionCor", "InversionTime", "RepetitionTime",
"VoiThickness", "TransmitterReferenceAmplitude",
"ImageOrientationPatient", "SliceThickness",
"RSatOrientationTra", "PixelBandwidth", "SAR",
"PixelSpacing", "ImagePositionPatient", "VoiPosition",
"SliceLocation","FlipAngle", "VoiInPlaneRotation", "VoiPhaseFoV",
"SliceMeasurementDuration", "HammingFilterWidth",
"RSatPositionTra", "MagneticFieldStrength", "VoiOrientation",
"PercentSampling", "EchoTime", "VoiReadoutFoV", "RSatThickness",
"RSatOrientationCor", "ImagingFrequency", "TriggerTime",
"dBdt", "TransmitterCalibration", "PhaseGradientAmplitude",
"ReadoutGradientAmplitude", "SelectionGradientAmplitude",
"GradientDelayTime", "dBdt_max", "t_puls_max", "dBdt_thresh",
"dBdt_limit", "SW_korr_faktor", "Stim_lim", "Stim_faktor"],
"integers": ["Rows", "Columns", "DataPointColumns",
"SpectroscopyAcquisitionOut-of-planePhaseSteps",
"EchoPartitionPosition", "AcquisitionMatrix",
"NumberOfFrames", "EchoNumbers", "RealDwellTime",
"EchoTrainLength", "EchoLinePosition",
"EchoColumnPosition", "SpectroscopyAcquisitionDataColumns",
"SpectroscopyAcquisitionPhaseColumns",
"SpectroscopyAcquisitionPhaseRows", "RfWatchdogMask",
"NumberOfPhaseEncodingSteps", "DataPointRows",
"UsedPatientWeight", "NumberOfPrescans",
"Stim_mon_mode", "Operation_mode_flag", "CoilId",
"MiscSequenceParam", "MrProtocolVersion",
"ProtocolSliceNumber"],
"strings": ["ReferencedImageSequence", "ScanningSequence", "SequenceName",
"ImagedNucleus", "TransmittingCoil", "PhaseEncodingDirection",
"VariableFlipAngleFlag", "SequenceMask",
"AcquisitionMatrixText", "MultistepIndex",
"DataRepresentation", "SignalDomainColumns",
"k-spaceFiltering", "ResonantNucleus",
"ImaCoilString", "FrequencyCorrection",
"WaterReferencedPhaseCorrection", "SequenceFileOwner",
"CoilForGradient", "CoilForGradient2",
"PositivePCSDirections", ],
}
def _read_csa_header(csa_header_bytes):
# two possibilities exist here, either this is a CSA2 format beginning with
# an SV10 string, or a CSA1 format which doesn't. in CSA2 after the "SV10"
# are four junk bytes, then the number of tags in a uint32 and a delimiter
# uint32 containing the value 77. in CSA1 there is just the number of tags
# and the delimiter. after that the two formats contain the same structure
# for each tag, but the definition of the size of the items in each tag is
# different between the two versions
if csa_header_bytes[:4] == "SV10".encode('latin-1'):
num_tags, delimiter = struct.unpack("<II", csa_header_bytes[8:16])
header_offset = 16
header_format = CSA2
else:
num_tags, delimiter = struct.unpack("<II", csa_header_bytes[:8])
header_offset = 8
header_format = CSA1
# now we can iteratively read the tags and the items inside them
csa_header = {}
for i in range(num_tags):
the_bytes = csa_header_bytes[header_offset:(header_offset + 84)]
r = struct.unpack("<64si4siii", the_bytes)
name, vm, vr, syngo_dt, nitems, delimiter = r
header_offset += 84
# the name of the tag is 64 bytes long, but the string we want is
# null-terminated inside, so extract the real name by taking only bytes
# up until the first 0x00
name = name.decode('latin-1')
name = name.split("\x00", 1)[0]
# read all the items inside this tag
item_list = []
for j in range(nitems):
sizes = struct.unpack("<4L", csa_header_bytes[header_offset:(header_offset + 16)])
header_offset += 16
if header_format == CSA2:
item_length = sizes[1]
if (header_offset + item_length) > len(csa_header_bytes):
item_length = len(csa_header_bytes) - header_offset
elif header_format == CSA1:
item_length = sizes[0]
item_bytes = csa_header_bytes[header_offset:(header_offset + item_length)]
item, = struct.unpack("<%ds" % item_length, item_bytes)
item = item.decode('latin-1')
item = item.split("\x00", 1)[0]
if item_length > 0:
if name in ima_types["floats"]:
item = float(item)
elif name in ima_types["integers"]:
item = int(item)
elif name in ima_types["strings"]:
pass
else:
pass
# warnings.warn("Unhandled name {0} with vr {1} and value {2}".format(name, vr, item))
item_list.append(item)
header_offset += item_length
header_offset += (4 - (item_length % 4)) % 4 # move offset to next 4 byte boundary
if len(item_list) == 1:
item_list = item_list[0]
csa_header[name] = item_list
return csa_header
def _get_parameters_dicom_sop(dataset):
"""
Returns a subset of the parameters from a Pydicom dataset
"""
# get shape of the data (slices, rows, columns, fid_points)
section = dataset[0x5200,0x9229][0][0x0018,0x9103][0]
data_shape = (section["SpectroscopyAcquisitionOutOfPlanePhaseSteps"].value,
section["SpectroscopyAcquisitionPhaseRows"].value,
section["SpectroscopyAcquisitionPhaseColumns"].value,
section["SpectroscopyAcquisitionDataColumns"].value, )
data_iter = iter(dataset['SpectroscopyData'].value) # (0x5600, 0x0020)
data = [complex(r, i) for r, i in zip(data_iter, data_iter)]
complex_data = np.fromiter(data, dtype=np.complex64)
complex_data.shape = data_shape
complex_data = complex_data.conjugate()
try:
iorient = dataset[0x5200,0x9229][0][0x0020,0x9116][0]['ImageOrientationPatient'].value
row_vector = np.array(iorient[0:3])
col_vector = np.array(iorient[3:6])
voi_position = dataset[0x5200,0x9230][0][0x0020,0x9113][0]['ImagePositionPatient'].value
voxel_size = [dataset[0x0018,0x9126][0]['SlabThickness'].value,
dataset[0x0018,0x9126][1]['SlabThickness'].value,
dataset[0x0018,0x9126][2]['SlabThickness'].value]
tform = transformation_matrix(row_vector, col_vector, voi_position, voxel_size)
except:
# this will trigger default
voxel_size = np.array([20.0, 20.0, 20.0])
tform = None
params = {'is_dicom_sop': True,
'sw' : dataset["SpectralWidth"].value,
'frequency' : dataset["TransmitterFrequency"].value,
'resppm' : 4.7,
'echopeak' : 0.0,
'nucleus' : dataset["ResonantNucleus"].value,
'seqte' : dataset[0x5200,0x9229][0][0x0018,0x9114][0]['EffectiveEchoTime'].value,
'seqtr' : dataset[0x5200,0x9229][0][0x0018,0x9112][0]['RepetitionTime'].value,
'voxel_dimensions' : voxel_size,
'header' : str(dataset),
'transform' : tform,
'data' : complex_data}
return params
def _get_parameters_siemens_proprietary(dataset):
""" Returns a subset of the parameters from a Pydicom dataset """
#--------------------------------------------------------------------------
# Find Image CSA Header - search tags (0029, 00xx), Siemens start xx at 10
xx = 0x0010
header_index = 0
while (0x0029, xx) in dataset:
if dataset[0x0029, xx].value == "SIEMENS CSA HEADER":
header_index = xx
xx += 1
# check that we have found the header
if header_index == 0:
raise KeyError("Could not find header index")
# now we know which tag contains the CSA image header info: (0029, xx10)
csa_header_bytes = dataset[0x0029, 0x0100 * header_index + 0x0010].value
csa_header = _read_csa_header(csa_header_bytes)
# could also get the Series CSA Header info: (0029, xx20), but we don't
# get data shape (slices, rows, columns, fid_points)
data_shape = (csa_header["SpectroscopyAcquisitionOut-of-planePhaseSteps"],
csa_header["Rows"],
csa_header["Columns"],
csa_header["DataPointColumns"], )
#--------------------------------------------------------------------------
# Find CSA Non-Image Data - search tags (0029, 00xx), start xx at 10
xx = 0x0010
data_index = 0
while (0x7fe1, xx) in dataset:
if dataset[0x7fe1, xx].value == "SIEMENS CSA NON-IMAGE":
data_index = xx
xx += 1
# check that we have found the data
if data_index == 0:
raise KeyError("Could not find data index")
# extract the actual data bytes
csa_data_bytes = dataset[0x7fe1, 0x0100 * data_index + 0x0010].value
# data stored in string as 4 byte floats in (real, imaginary) pairs
data = struct.unpack("<%df" % (len(csa_data_bytes) / 4), csa_data_bytes)
data_iter = iter(data)
data = [complex(r, i) for r, i in zip(data_iter, data_iter)]
complex_data = np.fromiter(data, dtype=np.complex64)
complex_data.shape = data_shape
try:
row_vector = np.array(csa_header['ImageOrientationPatient'][0:3])
col_vector = np.array(csa_header['ImageOrientationPatient'][3:6])
voi_position = np.array(csa_header["VoiPosition"])
# voxel_size = (*csa_header["PixelSpacing"],csa_header["SliceThickness"])
# Since VB13 these are used for voxel_size, and ReadoutFoV and PhaseFoV are swapped
voxel_size = [csa_header["VoiReadoutFoV"],
csa_header["VoiPhaseFoV"],
csa_header["VoiThickness"]]
tform = transformation_matrix(row_vector, col_vector, voi_position, voxel_size)
except:
# this will trigger default
voxel_size = [20.0, 20.0, 20.0]
tform = None
header = '\n--- DICOM TAGS -------\n'+str(dataset)
header += '\n\n--- CSA IMAGE HEADER -------\n'+pp.pformat(csa_header)
params = {'is_dicom_sop': False,
'sw' : 1.0 / (csa_header["RealDwellTime"] * 1e-9),
'frequency' : csa_header["ImagingFrequency"],
'resppm' : 4.7,
'echopeak' : 0.0,
'nucleus' : '1H',
'seqte' : csa_header["EchoTime"],
'seqtr' : csa_header["RepetitionTime"],
'voxel_dimensions' : voxel_size,
'header' : header,
'transform' : tform,
'data' : complex_data}
return params
| [
"vespa.common.util.config.VespaConfig",
"vespa.common.base_transform.transformation_matrix",
"struct.unpack",
"os.path.exists",
"vespa.analysis.fileio.dicom_browser_dialog.SiemensMrsBrowser",
"pydicom.dicomio.read_file",
"vespa.common.mrs_data_raw.DataRaw",
"pprint.PrettyPrinter",
"vespa.analysis.fi... | [((60, 89), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'depth': '(2)'}), '(depth=2)\n', (80, 89), False, 'import pprint\n'), ((9886, 9923), 'numpy.fromiter', 'np.fromiter', (['data'], {'dtype': 'np.complex64'}), '(data, dtype=np.complex64)\n', (9897, 9923), True, 'import numpy as np\n'), ((13380, 13417), 'numpy.fromiter', 'np.fromiter', (['data'], {'dtype': 'np.complex64'}), '(data, dtype=np.complex64)\n', (13391, 13417), True, 'import numpy as np\n'), ((903, 938), 'vespa.analysis.fileio.raw_reader.RawReader.__init__', 'raw_reader.RawReader.__init__', (['self'], {}), '(self)\n', (932, 938), True, 'import vespa.analysis.fileio.raw_reader as raw_reader\n'), ((2622, 2657), 'pydicom.dicomio.read_file', 'pydicom.dicomio.read_file', (['filename'], {}), '(filename)\n', (2647, 2657), False, 'import pydicom\n'), ((6841, 6885), 'struct.unpack', 'struct.unpack', (['"""<II"""', 'csa_header_bytes[8:16]'], {}), "('<II', csa_header_bytes[8:16])\n", (6854, 6885), False, 'import struct\n'), ((6982, 7024), 'struct.unpack', 'struct.unpack', (['"""<II"""', 'csa_header_bytes[:8]'], {}), "('<II', csa_header_bytes[:8])\n", (6995, 7024), False, 'import struct\n'), ((7284, 7322), 'struct.unpack', 'struct.unpack', (['"""<64si4siii"""', 'the_bytes'], {}), "('<64si4siii', the_bytes)\n", (7297, 7322), False, 'import struct\n'), ((10133, 10155), 'numpy.array', 'np.array', (['iorient[0:3]'], {}), '(iorient[0:3])\n', (10141, 10155), True, 'import numpy as np\n'), ((10180, 10202), 'numpy.array', 'np.array', (['iorient[3:6]'], {}), '(iorient[3:6])\n', (10188, 10202), True, 'import numpy as np\n'), ((10535, 10606), 'vespa.common.base_transform.transformation_matrix', 'transformation_matrix', (['row_vector', 'col_vector', 'voi_position', 'voxel_size'], {}), '(row_vector, col_vector, voi_position, voxel_size)\n', (10556, 10606), False, 'from vespa.common.base_transform import transformation_matrix\n'), ((13488, 13540), 'numpy.array', 'np.array', (["csa_header['ImageOrientationPatient'][0:3]"], {}), "(csa_header['ImageOrientationPatient'][0:3])\n", (13496, 13540), True, 'import numpy as np\n'), ((13565, 13617), 'numpy.array', 'np.array', (["csa_header['ImageOrientationPatient'][3:6]"], {}), "(csa_header['ImageOrientationPatient'][3:6])\n", (13573, 13617), True, 'import numpy as np\n'), ((13642, 13677), 'numpy.array', 'np.array', (["csa_header['VoiPosition']"], {}), "(csa_header['VoiPosition'])\n", (13650, 13677), True, 'import numpy as np\n'), ((14020, 14091), 'vespa.common.base_transform.transformation_matrix', 'transformation_matrix', (['row_vector', 'col_vector', 'voi_position', 'voxel_size'], {}), '(row_vector, col_vector, voi_position, voxel_size)\n', (14041, 14091), False, 'from vespa.common.base_transform import transformation_matrix\n'), ((1082, 1110), 'os.path.exists', 'os.path.exists', (['default_path'], {}), '(default_path)\n', (1096, 1110), False, 'import os\n'), ((1225, 1253), 'os.path.exists', 'os.path.exists', (['default_path'], {}), '(default_path)\n', (1239, 1253), False, 'import os\n'), ((1282, 1311), 'vespa.common.util.misc.get_documents_dir', 'util_misc.get_documents_dir', ([], {}), '()\n', (1309, 1311), True, 'import vespa.common.util.misc as util_misc\n'), ((1376, 1488), 'vespa.analysis.fileio.dicom_browser_dialog.SiemensMrsBrowser', 'SiemensMrsBrowser', ([], {'multi_select': 'self.multiple', 'default_path': 'default_path', 'show_tags': '(False)', 'preview_size': 'None'}), '(multi_select=self.multiple, default_path=default_path,\n show_tags=False, preview_size=None)\n', (1393, 1488), False, 'from vespa.analysis.fileio.dicom_browser_dialog import SiemensMrsBrowser\n'), ((1774, 1799), 'vespa.common.util.config.VespaConfig', 'util_config.VespaConfig', ([], {}), '()\n', (1797, 1799), True, 'import vespa.common.util.config as util_config\n'), ((2983, 3004), 'vespa.common.mrs_data_raw.DataRaw', 'DataRaw', ([], {'attributes': 'd'}), '(attributes=d)\n', (2990, 3004), False, 'from vespa.common.mrs_data_raw import DataRaw, DataRawFidsum\n'), ((7792, 7864), 'struct.unpack', 'struct.unpack', (['"""<4L"""', 'csa_header_bytes[header_offset:header_offset + 16]'], {}), "('<4L', csa_header_bytes[header_offset:header_offset + 16])\n", (7805, 7864), False, 'import struct\n'), ((8308, 8355), 'struct.unpack', 'struct.unpack', (["('<%ds' % item_length)", 'item_bytes'], {}), "('<%ds' % item_length, item_bytes)\n", (8321, 8355), False, 'import struct\n'), ((10677, 10705), 'numpy.array', 'np.array', (['[20.0, 20.0, 20.0]'], {}), '([20.0, 20.0, 20.0])\n', (10685, 10705), True, 'import numpy as np\n'), ((1139, 1164), 'vespa.common.util.config.VespaConfig', 'util_config.VespaConfig', ([], {}), '()\n', (1162, 1164), True, 'import vespa.common.util.config as util_config\n')] |
''' PolynomialFiltering.components.EmpFmpPair
(C) Copyright 2019 - Blue Lightning Development, LLC.
<NAME>. <EMAIL>
SPDX-License-Identifier: MIT
See separate LICENSE file for full text
'''
from abc import abstractmethod
from math import isnan;
from numpy import array, diag, zeros, sqrt, transpose
from numpy import array as vector
from polynomialfiltering.components.AbstractRecursiveFilter import AbstractRecursiveFilter
from polynomialfiltering.components.ExpandingMemoryPolynomialFilter import makeEMP, EMPBase
from polynomialfiltering.components.FadingMemoryPolynomialFilter import makeFMP, FMPBase
class EmpFmpPair(AbstractRecursiveFilter) :
"""
Filter composed of an expanding memory and a fading memory filter of the same order.
The EMP filter is used to initialize and after the sample number when the 0th order
variance of the EMP filter matches that variance of the FMP at the configured theta
fading factor, we switch to the FMP filter. See Morrison 1969, Section 13.8
"""
'''@ emp : EMPBase'''
'''@ fmp : FMPBase'''
'''@ current : AbstractRecursiveFilter'''
def __init__(self, order : int, theta : float, tau : float) :
super().__init__(order, tau);
"""
Constructor
Arguments:
order - integer polynomial orer
theta - fading factor
tau - nominal time step
"""
self.emp = makeEMP(order, tau);
self.fmp = makeFMP(order, theta, tau)
self.current = self.emp;
def start(self, t : float, Z : vector) -> None:
"""@super"""
self.current = self.emp;
self.current.start(t, Z)
def predict(self, t : float) -> vector :
"""@super"""
return self.current.predict(t)
def update(self, t : float, Zstar : vector, e : float) -> vector:
"""@super"""
'''@ innovation : vector'''
innovation = self.current.update(t, Zstar, e)
if (self.current == self.emp) :
if (self.emp.getN() >= self.emp.nSwitch(self.fmp.getTheta())) :
self.fmp.copyState( self.emp );
self.current = self.fmp;
return innovation;
def getN(self)->int:
"""@super"""
return self.current.getN()
def getTau(self) -> float:
"""@super"""
return self.current.getTau()
def getTime(self) -> float:
"""@super"""
return self.current.getTime()
def getState(self) -> vector:
"""@super"""
return self.current.getState()
def getVRF(self) -> array:
"""@super"""
return self.current.getVRF()
def _gammaParameter(self, t : float, dtau : float) -> float: # pragma: no cover
"""@none | stub to meet interface; never used"""
return 0
def _gamma(self, n : float) -> vector: # pragma: no cover
"""@none | stub to meet interface; never used"""
return zeros([self.order+1,1])
def _VRF(self) -> array: # pragma: no cover
"""@none | stub to meet interface; never used"""
return zeros([self.order+1, self.order+1])
| [
"polynomialfiltering.components.ExpandingMemoryPolynomialFilter.makeEMP",
"polynomialfiltering.components.FadingMemoryPolynomialFilter.makeFMP",
"numpy.zeros"
] | [((1447, 1466), 'polynomialfiltering.components.ExpandingMemoryPolynomialFilter.makeEMP', 'makeEMP', (['order', 'tau'], {}), '(order, tau)\n', (1454, 1466), False, 'from polynomialfiltering.components.ExpandingMemoryPolynomialFilter import makeEMP, EMPBase\n'), ((1487, 1513), 'polynomialfiltering.components.FadingMemoryPolynomialFilter.makeFMP', 'makeFMP', (['order', 'theta', 'tau'], {}), '(order, theta, tau)\n', (1494, 1513), False, 'from polynomialfiltering.components.FadingMemoryPolynomialFilter import makeFMP, FMPBase\n'), ((2978, 3004), 'numpy.zeros', 'zeros', (['[self.order + 1, 1]'], {}), '([self.order + 1, 1])\n', (2983, 3004), False, 'from numpy import array, diag, zeros, sqrt, transpose\n'), ((3127, 3166), 'numpy.zeros', 'zeros', (['[self.order + 1, self.order + 1]'], {}), '([self.order + 1, self.order + 1])\n', (3132, 3166), False, 'from numpy import array, diag, zeros, sqrt, transpose\n')] |
from collections import namedtuple
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from wind_repower_usa.config import EXTERNAL_DIR
Turbine = namedtuple('Turbine', ('name',
'file_name',
'power_curve',
'capacity_mw',
'rotor_diameter_m',
'hub_height_m'))
def new_turbine_models():
"""Return all turbine models used for repowering."""
return se_34m114, e138ep3, se_42m140, e126
def turbine_from_nrel_sam(wind_turbine_model, file_name, name=None):
"""Pass one row of the CSV as `wind_turbine_model` and a machine-readable name as `file_name`"""
wind_speeds = [float(x) for x in wind_turbine_model['Wind Speed Array'].split('|')]
generation_kw = [float(x) for x in wind_turbine_model['Power Curve Array'].split('|')]
assert generation_kw[-1] == 0., "no cut-off in power curve, need to be fixed manually"
wind_speeds += [70.]
generation_kw += [0.]
if wind_speeds[0] > 0.:
wind_speeds = [0.] + wind_speeds
generation_kw = [0.] + generation_kw
turbine = Turbine(
name=name or wind_turbine_model['Name'],
file_name=file_name,
power_curve=interp1d(wind_speeds, generation_kw),
capacity_mw=wind_turbine_model['KW Rating'] * 1e-3,
rotor_diameter_m=wind_turbine_model['Rotor Diameter'],
hub_height_m=None,
)
return turbine
wind_turbine_models = pd.read_csv(EXTERNAL_DIR / 'nrel-sam-powercurves' /
'nrel-sam-wind-turbines.csv', skiprows=[1, 2])
se_34m114 = turbine_from_nrel_sam(wind_turbine_models.iloc[267], 'se_34m114',
name='Senvion 3.4M114 (3.4MW, 114m)')
vestas_v42_600 = turbine_from_nrel_sam(wind_turbine_models.iloc[145], 'vestas_v42_600')
northwind100 = turbine_from_nrel_sam(wind_turbine_models.iloc[118], 'northwind100')
e44 = turbine_from_nrel_sam(wind_turbine_models.iloc[165], 'e44')
def power_curve_ge15_77():
"""Power curve for GE1.5-77
https://www.ge.com/in/wind-energy/1.5-MW-wind-turbine
Hub height: 65 / 80m
https://www.nrel.gov/docs/fy15osti/63684.pdf page 21
https://geosci.uchicago.edu/~moyer/GEOS24705/Readings/GEA14954C15-MW-Broch.pdf
"""
wind_speeds = np.hstack((np.arange(0, 27, step=1.5), [29., 70]))
generation_kw = [0., 0., 0., 70., 210., 520., 930., 1280., 1470, 1500.] + [1500.] * 8 + [0., 0.]
return interp1d(wind_speeds, generation_kw)
ge15_77 = Turbine(
name='GE1.5-77 (1.5MW, 77m)',
file_name='ge15_77',
power_curve=power_curve_ge15_77(),
capacity_mw=1.5,
rotor_diameter_m=77.,
hub_height_m=None,
)
def power_curve_e138ep3():
"""Power curve for Enercon E-138 EP3
https://www.enercon.de/en/products/ep-3/e-138-ep3/
Rated power 3,500 kW
Rotor diameter 138,6 m
Hub height in meter 81 / 111 / 131 / 160
"""
wind_speeds = np.hstack((np.linspace(0, 25, num=26), [26, 70]))
generation_kw = [0., 0., 0., 30., 200., 490., 950., 1400., 2050., 2550., 3100., 3400.,
3480, 3500., 3500., 3500., 3500., 3500., 3500., 3500., 3500., 3480., 3410.,
3300, 3200., 3000., 0., 0.]
return interp1d(wind_speeds, generation_kw)
e138ep3 = Turbine(
name='Enercon E-138 EP3 (3.5MW, 138m)',
file_name='e138ep3',
power_curve=power_curve_e138ep3(),
capacity_mw=3.5,
rotor_diameter_m=138.6,
hub_height_m=None,
)
def power_curve_se_42m140():
"""Power curve for Senvion 4.2M140
Extracted by hand from offial spec-sheet at:
https://www.senvion.com/global/en/products-services/wind-turbines/4xm/
"""
wind_speeds = np.hstack(([0., 1., 2., 3.], np.arange(4, 27, step=2), [27., 40.]))
generation_kw = [0., 0., 0., 0., 300., 1050., 2700., 4000., 4200., 4200., 4200., 4200.,
4200., 4000., 2500., 650., 0., 0.]
return interp1d(wind_speeds, generation_kw)
se_42m140 = Turbine(
name='Senvion 4.2M140 (4.2MW, 140m)',
file_name='se_42m140',
power_curve=power_curve_se_42m140(),
capacity_mw=4.2,
rotor_diameter_m=140.,
hub_height_m=None,
)
def power_curve_e126():
"""Power curve for Enercon E-126 (7.580MW Onshore)
Extracted by hand from official datasheet at:
https://www.enercon.de/produkte/ep-8/e-126/
See also:
https://www.enercon.de/fileadmin/Redakteur/Medien-Portal/broschueren/pdf/en/ENERCON_Produkt_en_06_2015.pdf
Cut-out: 28-34m/s
"""
wind_speeds = np.hstack((np.arange(0, 29, step=1), [34., 70.]))
generation_kw = [0., 0., 0., 100., 200., 400., 800., 1200., 2000., 2800., 3700., 4900., 5700.,
6500., 7000., 7300., 7580., 7580., 7580., 7580., 7580., 7580., 7580.,
7580., 7580., 7580., 7580., 7580., 7580., 0., 0.]
return interp1d(wind_speeds, generation_kw)
e126 = Turbine(
name='Enercon E-126 (7.58MW, 127m)',
file_name='e126',
power_curve=power_curve_e126(),
capacity_mw=7.58,
rotor_diameter_m=127.,
hub_height_m=135,
)
| [
"pandas.read_csv",
"numpy.arange",
"collections.namedtuple",
"numpy.linspace",
"scipy.interpolate.interp1d"
] | [((175, 289), 'collections.namedtuple', 'namedtuple', (['"""Turbine"""', "('name', 'file_name', 'power_curve', 'capacity_mw', 'rotor_diameter_m',\n 'hub_height_m')"], {}), "('Turbine', ('name', 'file_name', 'power_curve', 'capacity_mw',\n 'rotor_diameter_m', 'hub_height_m'))\n", (185, 289), False, 'from collections import namedtuple\n'), ((1552, 1654), 'pandas.read_csv', 'pd.read_csv', (["(EXTERNAL_DIR / 'nrel-sam-powercurves' / 'nrel-sam-wind-turbines.csv')"], {'skiprows': '[1, 2]'}), "(EXTERNAL_DIR / 'nrel-sam-powercurves' /\n 'nrel-sam-wind-turbines.csv', skiprows=[1, 2])\n", (1563, 1654), True, 'import pandas as pd\n'), ((2551, 2587), 'scipy.interpolate.interp1d', 'interp1d', (['wind_speeds', 'generation_kw'], {}), '(wind_speeds, generation_kw)\n', (2559, 2587), False, 'from scipy.interpolate import interp1d\n'), ((3333, 3369), 'scipy.interpolate.interp1d', 'interp1d', (['wind_speeds', 'generation_kw'], {}), '(wind_speeds, generation_kw)\n', (3341, 3369), False, 'from scipy.interpolate import interp1d\n'), ((4020, 4056), 'scipy.interpolate.interp1d', 'interp1d', (['wind_speeds', 'generation_kw'], {}), '(wind_speeds, generation_kw)\n', (4028, 4056), False, 'from scipy.interpolate import interp1d\n'), ((4940, 4976), 'scipy.interpolate.interp1d', 'interp1d', (['wind_speeds', 'generation_kw'], {}), '(wind_speeds, generation_kw)\n', (4948, 4976), False, 'from scipy.interpolate import interp1d\n'), ((1315, 1351), 'scipy.interpolate.interp1d', 'interp1d', (['wind_speeds', 'generation_kw'], {}), '(wind_speeds, generation_kw)\n', (1323, 1351), False, 'from scipy.interpolate import interp1d\n'), ((2399, 2425), 'numpy.arange', 'np.arange', (['(0)', '(27)'], {'step': '(1.5)'}), '(0, 27, step=1.5)\n', (2408, 2425), True, 'import numpy as np\n'), ((3043, 3069), 'numpy.linspace', 'np.linspace', (['(0)', '(25)'], {'num': '(26)'}), '(0, 25, num=26)\n', (3054, 3069), True, 'import numpy as np\n'), ((3822, 3846), 'numpy.arange', 'np.arange', (['(4)', '(27)'], {'step': '(2)'}), '(4, 27, step=2)\n', (3831, 3846), True, 'import numpy as np\n'), ((4629, 4653), 'numpy.arange', 'np.arange', (['(0)', '(29)'], {'step': '(1)'}), '(0, 29, step=1)\n', (4638, 4653), True, 'import numpy as np\n')] |
import numpy as np
from edutorch.nn import Linear
from tests.gradient_check import estimate_gradients
def test_linear_forward() -> None:
input_dim = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = input_dim * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(input_dim, *input_shape)
model = Linear(input_dim, output_dim)
model.w = np.linspace(-0.2, 0.3, num=weight_size).reshape(
np.prod(input_shape), output_dim
)
model.b = np.linspace(-0.3, 0.1, num=output_dim)
out = model(x)
correct_out = np.array(
[[1.49834967, 1.70660132, 1.91485297], [3.25553199, 3.5141327, 3.77273342]]
)
assert np.allclose(out, correct_out)
def test_linear_backward() -> None:
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
model = Linear(10, 5)
params = {"w": w, "b": b}
dx_num, dw_num, db_num = estimate_gradients(model, dout, x, params)
_ = model(x)
dx, dw, db = model.backward(dout)
assert np.allclose(dx_num, dx)
assert np.allclose(dw_num, dw)
assert np.allclose(db_num, db)
| [
"numpy.random.randn",
"numpy.allclose",
"edutorch.nn.Linear",
"numpy.array",
"numpy.linspace",
"numpy.prod",
"tests.gradient_check.estimate_gradients"
] | [((400, 429), 'edutorch.nn.Linear', 'Linear', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (406, 429), False, 'from edutorch.nn import Linear\n'), ((554, 592), 'numpy.linspace', 'np.linspace', (['(-0.3)', '(0.1)'], {'num': 'output_dim'}), '(-0.3, 0.1, num=output_dim)\n', (565, 592), True, 'import numpy as np\n'), ((631, 721), 'numpy.array', 'np.array', (['[[1.49834967, 1.70660132, 1.91485297], [3.25553199, 3.5141327, 3.77273342]]'], {}), '([[1.49834967, 1.70660132, 1.91485297], [3.25553199, 3.5141327, \n 3.77273342]])\n', (639, 721), True, 'import numpy as np\n'), ((743, 772), 'numpy.allclose', 'np.allclose', (['out', 'correct_out'], {}), '(out, correct_out)\n', (754, 772), True, 'import numpy as np\n'), ((819, 844), 'numpy.random.randn', 'np.random.randn', (['(10)', '(2)', '(3)'], {}), '(10, 2, 3)\n', (834, 844), True, 'import numpy as np\n'), ((853, 874), 'numpy.random.randn', 'np.random.randn', (['(6)', '(5)'], {}), '(6, 5)\n', (868, 874), True, 'import numpy as np\n'), ((883, 901), 'numpy.random.randn', 'np.random.randn', (['(5)'], {}), '(5)\n', (898, 901), True, 'import numpy as np\n'), ((913, 935), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (928, 935), True, 'import numpy as np\n'), ((949, 962), 'edutorch.nn.Linear', 'Linear', (['(10)', '(5)'], {}), '(10, 5)\n', (955, 962), False, 'from edutorch.nn import Linear\n'), ((1023, 1065), 'tests.gradient_check.estimate_gradients', 'estimate_gradients', (['model', 'dout', 'x', 'params'], {}), '(model, dout, x, params)\n', (1041, 1065), False, 'from tests.gradient_check import estimate_gradients\n'), ((1133, 1156), 'numpy.allclose', 'np.allclose', (['dx_num', 'dx'], {}), '(dx_num, dx)\n', (1144, 1156), True, 'import numpy as np\n'), ((1168, 1191), 'numpy.allclose', 'np.allclose', (['dw_num', 'dw'], {}), '(dw_num, dw)\n', (1179, 1191), True, 'import numpy as np\n'), ((1203, 1226), 'numpy.allclose', 'np.allclose', (['db_num', 'db'], {}), '(db_num, db)\n', (1214, 1226), True, 'import numpy as np\n'), ((234, 254), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (241, 254), True, 'import numpy as np\n'), ((286, 306), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (293, 306), True, 'import numpy as np\n'), ((501, 521), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (508, 521), True, 'import numpy as np\n'), ((316, 354), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(0.5)'], {'num': 'input_size'}), '(-0.1, 0.5, num=input_size)\n', (327, 354), True, 'import numpy as np\n'), ((444, 483), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.3)'], {'num': 'weight_size'}), '(-0.2, 0.3, num=weight_size)\n', (455, 483), True, 'import numpy as np\n')] |
# Program 09c: Phase portrait and Poincare section of a nonautonomous ODE.
# See Figure 9.11(b).
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
xmin, xmax = -2, 2
ymin, ymax = -2, 2
k = 0.3
omega = 1.25
gamma = 0.5
def dx_dt(x, t):
return [x[1], x[0] - k*x[1] - x[0]**3 + gamma*np.cos(omega*t)]
# Phase portrait
t = np.linspace(0, 500, 10000)
xs = odeint(dx_dt, [1,0], t)
plt.plot(xs[:, 0], xs[:, 1], 'r-', lw=0.5)
plt.xlabel('x', fontsize=15)
plt.ylabel('y', fontsize=15)
plt.tick_params(labelsize=15)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title('Phase portrait')
# The Poincare section.
fig, ax = plt.subplots(figsize=(6, 6))
t = np.linspace(0, 4000 * (2*np.pi) / omega, 16000000)
xs = odeint(dx_dt, [1, 0], t)
x = [xs[4000*i, 0] for i in range(4000)]
y = [xs[4000*i, 1] for i in range(4000)]
ax.scatter(x, y, color='blue', s=0.1)
plt.xlabel('x', fontsize=15)
plt.ylabel('y', fontsize=15)
plt.tick_params(labelsize=15)
plt.title('The Poincare section')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"scipy.integrate.odeint",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplo... | [((364, 390), 'numpy.linspace', 'np.linspace', (['(0)', '(500)', '(10000)'], {}), '(0, 500, 10000)\n', (375, 390), True, 'import numpy as np\n'), ((396, 420), 'scipy.integrate.odeint', 'odeint', (['dx_dt', '[1, 0]', 't'], {}), '(dx_dt, [1, 0], t)\n', (402, 420), False, 'from scipy.integrate import odeint\n'), ((420, 462), 'matplotlib.pyplot.plot', 'plt.plot', (['xs[:, 0]', 'xs[:, 1]', '"""r-"""'], {'lw': '(0.5)'}), "(xs[:, 0], xs[:, 1], 'r-', lw=0.5)\n", (428, 462), True, 'import matplotlib.pyplot as plt\n'), ((463, 491), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {'fontsize': '(15)'}), "('x', fontsize=15)\n", (473, 491), True, 'import matplotlib.pyplot as plt\n'), ((492, 520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {'fontsize': '(15)'}), "('y', fontsize=15)\n", (502, 520), True, 'import matplotlib.pyplot as plt\n'), ((521, 550), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(15)'}), '(labelsize=15)\n', (536, 550), True, 'import matplotlib.pyplot as plt\n'), ((551, 571), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (559, 571), True, 'import matplotlib.pyplot as plt\n'), ((572, 592), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (580, 592), True, 'import matplotlib.pyplot as plt\n'), ((593, 620), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase portrait"""'], {}), "('Phase portrait')\n", (602, 620), True, 'import matplotlib.pyplot as plt\n'), ((656, 684), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (668, 684), True, 'import matplotlib.pyplot as plt\n'), ((689, 741), 'numpy.linspace', 'np.linspace', (['(0)', '(4000 * (2 * np.pi) / omega)', '(16000000)'], {}), '(0, 4000 * (2 * np.pi) / omega, 16000000)\n', (700, 741), True, 'import numpy as np\n'), ((745, 769), 'scipy.integrate.odeint', 'odeint', (['dx_dt', '[1, 0]', 't'], {}), '(dx_dt, [1, 0], t)\n', (751, 769), False, 'from scipy.integrate import odeint\n'), ((892, 920), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {'fontsize': '(15)'}), "('x', fontsize=15)\n", (902, 920), True, 'import matplotlib.pyplot as plt\n'), ((921, 949), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {'fontsize': '(15)'}), "('y', fontsize=15)\n", (931, 949), True, 'import matplotlib.pyplot as plt\n'), ((950, 979), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(15)'}), '(labelsize=15)\n', (965, 979), True, 'import matplotlib.pyplot as plt\n'), ((980, 1013), 'matplotlib.pyplot.title', 'plt.title', (['"""The Poincare section"""'], {}), "('The Poincare section')\n", (989, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1023, 1025), True, 'import matplotlib.pyplot as plt\n'), ((325, 342), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (331, 342), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
#%%
# 删除文件中的空白行
# 参考:https://www.cnblogs.com/billyzh/p/5851429.html
def delblankline(infile,outfile):
infopen = open(infile,'r')
outfopen = open(outfile,'w')
lines = infopen.readlines()
line_cnt = 0
blank_line = []
for line in lines:
if line.split():
outfopen.writelines(line)
line_cnt = line_cnt + 1
else:
outfopen.writelines("")
blank_line.append(line_cnt)
infopen.close()
outfopen.close()
return np.array(blank_line)
#####################################
################## 删除空白行并且加载数据 ##########################
filename = '../../05_modelsim/cnn-result-data_under_test.txt'
delblankline(filename, filename+'.del.txt')
x = np.loadtxt(filename+'.del.txt')
filename = '../../../python/keras_cnn/isa-npu/ver_compare/sp-5.txt'
blank_line = delblankline(filename, filename+'.del.txt')[1:]
y = np.loadtxt(filename+'.del.txt')
layer = np.zeros(y.shape)
for t in range(blank_line.shape[0]):
layer[blank_line[t]] = t
###################################################################
NO = 10 # 输出的向量维度
plt.figure(figsize=(9,9))
x2 = x
x = x[:y.shape[0]]
error = np.absolute(x-y)/2**16
error_rel = error/(y/2**16 + 1e-3)
error_out = np.absolute(x[x.shape[0]-NO:x.shape[0]]-y[y.shape[0]-NO:y.shape[0]])/2**16
plt.subplot(4,1,1); plt.plot(error); plt.title('absolute error for every step'); plt.xlabel('step'); plt.ylabel('absolute error');
plt.subplot(4,1,2); plt.plot(y/2**16); plt.title('float-point result for every step'); plt.xlabel('step'); plt.ylabel('fp result');
plt.subplot(4,1,3); plt.plot(error_rel); plt.title('relative error for every step [%f]'%(np.mean(error_rel))); plt.xlabel('step'); plt.ylabel('relative error'); plt.ylim((0, 5))
plt.subplot(4,1,4); plt.plot(error_out); plt.title('absolute error for output'); plt.xlabel('output index'); plt.ylabel('absolute error');
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.20, right=0.9, hspace=0.6, wspace=0.5) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.absolute",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.loadtxt",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.... | [((783, 816), 'numpy.loadtxt', 'np.loadtxt', (["(filename + '.del.txt')"], {}), "(filename + '.del.txt')\n", (793, 816), True, 'import numpy as np\n'), ((948, 981), 'numpy.loadtxt', 'np.loadtxt', (["(filename + '.del.txt')"], {}), "(filename + '.del.txt')\n", (958, 981), True, 'import numpy as np\n'), ((988, 1005), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (996, 1005), True, 'import numpy as np\n'), ((1158, 1184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (1168, 1184), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1383), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (1374, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1383, 1398), 'matplotlib.pyplot.plot', 'plt.plot', (['error'], {}), '(error)\n', (1391, 1398), True, 'import matplotlib.pyplot as plt\n'), ((1400, 1442), 'matplotlib.pyplot.title', 'plt.title', (['"""absolute error for every step"""'], {}), "('absolute error for every step')\n", (1409, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1462), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (1454, 1462), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""absolute error"""'], {}), "('absolute error')\n", (1474, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1514), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (1505, 1514), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1535), 'matplotlib.pyplot.plot', 'plt.plot', (['(y / 2 ** 16)'], {}), '(y / 2 ** 16)\n', (1522, 1535), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1579), 'matplotlib.pyplot.title', 'plt.title', (['"""float-point result for every step"""'], {}), "('float-point result for every step')\n", (1542, 1579), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1599), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (1591, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1624), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fp result"""'], {}), "('fp result')\n", (1611, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1646), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (1637, 1646), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1665), 'matplotlib.pyplot.plot', 'plt.plot', (['error_rel'], {}), '(error_rel)\n', (1654, 1665), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1755), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (1747, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1785), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""relative error"""'], {}), "('relative error')\n", (1767, 1785), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1803), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 5)'], {}), '((0, 5))\n', (1795, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1824), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (1815, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1843), 'matplotlib.pyplot.plot', 'plt.plot', (['error_out'], {}), '(error_out)\n', (1832, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1845, 1883), 'matplotlib.pyplot.title', 'plt.title', (['"""absolute error for output"""'], {}), "('absolute error for output')\n", (1854, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1911), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""output index"""'], {}), "('output index')\n", (1895, 1911), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1941), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""absolute error"""'], {}), "('absolute error')\n", (1923, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1943, 2034), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.92)', 'bottom': '(0.08)', 'left': '(0.2)', 'right': '(0.9)', 'hspace': '(0.6)', 'wspace': '(0.5)'}), '(top=0.92, bottom=0.08, left=0.2, right=0.9, hspace=0.6,\n wspace=0.5)\n', (1962, 2034), True, 'import matplotlib.pyplot as plt\n'), ((556, 576), 'numpy.array', 'np.array', (['blank_line'], {}), '(blank_line)\n', (564, 576), True, 'import numpy as np\n'), ((1218, 1236), 'numpy.absolute', 'np.absolute', (['(x - y)'], {}), '(x - y)\n', (1229, 1236), True, 'import numpy as np\n'), ((1288, 1362), 'numpy.absolute', 'np.absolute', (['(x[x.shape[0] - NO:x.shape[0]] - y[y.shape[0] - NO:y.shape[0]])'], {}), '(x[x.shape[0] - NO:x.shape[0]] - y[y.shape[0] - NO:y.shape[0]])\n', (1299, 1362), True, 'import numpy as np\n'), ((1715, 1733), 'numpy.mean', 'np.mean', (['error_rel'], {}), '(error_rel)\n', (1722, 1733), True, 'import numpy as np\n')] |
import sys
import os
import argparse
import time
import gensim
import random
sys.path.insert(0, '../markov/')
import markov_python3
import numpy as np
import scipy.spatial.distance
class Sentence(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for line in open(os.path.join(self.dirname, fname)):
line_low = line.lower()
yield line_low.split()
def process_arguments(args):
parser = argparse.ArgumentParser(description='configure the irc clients')
parser.add_argument('--path', action='store', help='the path to a folder containing text files')
parser.add_argument('--google_path', action='store', help='the path to the pretrained google model')
parser.add_argument('--method', action='store', help='the test function to use')
params = vars(parser.parse_args(args))
return params
def avg_feature_vector(words, model, num_features):
# function to average all words vectors in a given paragraph
featureVec = np.zeros((num_features,), dtype="float64")
nwords = 0
# list containing names of words in the vocabulary
# index2word_set = set(model.index2word) this is moved as input param for performance reasons
for word in words:
if word in model.vocab:
nwords = nwords+1
featureVec = np.add(featureVec, model[word])
#else:
# print('not in vocabulary: ' + word)
if nwords > 0:
featureVec = np.divide(featureVec, nwords)
return featureVec
def first_testing(model_selftrained, features):
sentence_a = 'This attribution is putting the other in a condition of authority and assurance'
sentence_b = 'condition of a superior joke, that by rise up is to say the promotion of new statuses, new powers.'
sentence_c = 'Feels stange, of course, being perceived as not human.'
sentence_d = 'of human expression'
sentence_e = 'This joke is putting other into a weird condition'
sentences = [sentence_a, sentence_b, sentence_c, sentence_d, sentence_e]
for index, sentence in enumerate(sentences):
_sentence = sentence.replace('.', '')
_sentence = _sentence.lower()
sentences[index] = _sentence.replace(',', '')
sentence_a_vec = avg_feature_vector(sentences[0].split(), model=model_selftrained, num_features=features)
sentence_b_vec = avg_feature_vector(sentences[1].split(), model=model_selftrained, num_features=features)
sentence_c_vec = avg_feature_vector(sentences[2].split(), model=model_selftrained, num_features=features)
sentence_d_vec = avg_feature_vector(sentences[3].split(), model=model_selftrained, num_features=features)
sentence_e_vec = avg_feature_vector(sentences[4].split(), model=model_selftrained, num_features=features)
sena_senb_similarity = 1 - scipy.spatial.distance.cosine(sentence_a_vec, sentence_b_vec)
sena_self_similarity = 1 - scipy.spatial.distance.cosine(sentence_a_vec, sentence_a_vec)
sena_sene_similarity = 1 - scipy.spatial.distance.cosine(sentence_a_vec, sentence_e_vec)
sena_send_similarity = 1 - scipy.spatial.distance.cosine(sentence_a_vec, sentence_d_vec)
print(sena_senb_similarity)
print(sena_self_similarity)
print(sena_sene_similarity)
print(sena_send_similarity)
def second_training(google_model, path, features):
lines = []
for fname in os.listdir(path):
for line in open(os.path.join(path, fname)):
line_low = line.lower()
lines.append(line_low)
log = []
print('Collected ' + str(len(lines)) + ' lines.')
t0 = time.time()
for i in range(1000000):
random_a = random.choice(lines)
random_b = random.choice(lines)
random_a_vec = avg_feature_vector(random_a.split(), model=google_model, num_features=features)
random_b_vec = avg_feature_vector(random_b.split(), model=google_model, num_features=features)
similarity = 1 - scipy.spatial.distance.cosine(random_a_vec, random_b_vec)
log.append((random_a, random_b, similarity))
t1 = time.time()
print('calculating all vectors took ' + str(t1-t0) + 's')
log.sort(key=lambda log: log[2], reverse=True)
print('Best results:')
for i in range(30):
print('Index: ' + str(i))
print(log[i][0])
print(log[i][1])
print('with similarity: ' + str(log[i][2]))
def train_markovs(path, max_markov=30):
markovs = []
for fname in os.listdir(path):
if len(markovs) > max_markov:
break
print('Start training markov from ' + fname)
markov_chain = markov_python3.Markov(prefix=fname)
line_count = 0
for line in open(os.path.join(path, fname)):
line_low = line.lower()
markov_chain.add_line_to_index(line_low.split())
line_count += 1
print('Done training markov from ' + fname)
if line_count > 200:
markovs.append(markov_chain)
return markovs
def third_testing(path, google_path, features):
markovs = train_markovs(path=path, max_markov=120)
print('Done Training Markovs')
model = gensim.models.Word2Vec.load_word2vec_format(google_path, binary=True)
print('Done loading Google model')
model_selftrained = gensim.models.Word2Vec(Sentence(path), min_count=5, size=features, workers=8)
print('Done training own model')
_t0 = time.time()
# loading all lines for comparison
lines_vectors_google = []
lines_vectors_own = []
for fname in os.listdir(path):
for line in open(os.path.join(path, fname)):
line_low = line.lower()
vector_google = avg_feature_vector(line_low.split(), model=model_selftrained, num_features=features)
vector_own = avg_feature_vector(line_low.split(), model=model, num_features=features)
lines_vectors_google.append((line_low, vector_google))
lines_vectors_own.append((line_low, vector_own))
_t1 = time.time()
print('Calculating all vectors on google/own models for sentences from own corpus done')
print('That took ' + str(int(_t1-_t0)) + 's. It was done for ' + str(len(lines_vectors_google)) + ' lines')
markov_dict = {}
for markov in markovs:
generated_sentences = []
print('----------------------------------------')
print(markov.prefix)
for i in range(10):
t0 = time.time()
sentence = markov.generate(max_words=100)
sentence_vec_google = avg_feature_vector(' '.join(sentence).lower().split(), model=model_selftrained, num_features=features)
sentence_vec_own = avg_feature_vector(' '.join(sentence).lower().split(), model=model, num_features=features)
# iterating through all vectors of all existing text lines from our corpus
biggest_similarity_google = 0.0
biggest_similarity_sentence_google = ''
biggest_similarity_own = 0.0
biggest_similarity_sentence_own = ''
for index_corpus_line in range(len(lines_vectors_google)):
vec_google = lines_vectors_google[index_corpus_line][1]
vec_own = lines_vectors_own[index_corpus_line][1]
similarity_google = 1 - scipy.spatial.distance.cosine(vec_google, sentence_vec_google)
similarity_own = 1 - scipy.spatial.distance.cosine(vec_own, sentence_vec_own)
if similarity_google > biggest_similarity_google:
biggest_similarity_google = similarity_google
biggest_similarity_sentence_google = lines_vectors_google[index_corpus_line][0]
if similarity_own > biggest_similarity_own:
biggest_similarity_own = similarity_own
biggest_similarity_sentence_own = lines_vectors_own[index_corpus_line][0]
t1 = time.time()
print('----------------------------------------')
print('markov: ' + ' '.join(sentence))
print('closest via google: ' + biggest_similarity_sentence_google)
print('closest via own model: ' + biggest_similarity_sentence_own)
#print('Calculating this took: ' + str(int(t1-t0)) + 's')
generated_sentences.append(sentence)
markov_dict[markov] = generated_sentences
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
features = 300
method = params['method']
path = params['path']
google_path = params['google_path']
if method in 'first':
sentences = Sentence(path)
model_selftrained = gensim.models.Word2Vec(sentences, min_count=5, size=features, workers=8)
first_testing(model_selftrained, features=features)
elif method in 'google':
t0 = time.time()
model = gensim.models.Word2Vec.load_word2vec_format(google_path, binary=True)
t1 = time.time()
print('Loading the google model took ' + str(t1-t0) + 's')
second_training(google_model=model, path=path, features=features)
elif method in 'markov':
# loading google model and own model (for comparing)
third_testing(path=path, google_path=google_path, features=features) | [
"numpy.divide",
"argparse.ArgumentParser",
"gensim.models.Word2Vec.load_word2vec_format",
"numpy.zeros",
"sys.path.insert",
"random.choice",
"time.time",
"gensim.models.Word2Vec",
"markov_python3.Markov",
"numpy.add",
"os.path.join",
"os.listdir"
] | [((77, 109), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../markov/"""'], {}), "(0, '../markov/')\n", (92, 109), False, 'import sys\n'), ((529, 593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""configure the irc clients"""'}), "(description='configure the irc clients')\n", (552, 593), False, 'import argparse\n'), ((1082, 1124), 'numpy.zeros', 'np.zeros', (['(num_features,)'], {'dtype': '"""float64"""'}), "((num_features,), dtype='float64')\n", (1090, 1124), True, 'import numpy as np\n'), ((3442, 3458), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3452, 3458), False, 'import os\n'), ((3660, 3671), 'time.time', 'time.time', ([], {}), '()\n', (3669, 3671), False, 'import time\n'), ((4132, 4143), 'time.time', 'time.time', ([], {}), '()\n', (4141, 4143), False, 'import time\n'), ((4519, 4535), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4529, 4535), False, 'import os\n'), ((5198, 5267), 'gensim.models.Word2Vec.load_word2vec_format', 'gensim.models.Word2Vec.load_word2vec_format', (['google_path'], {'binary': '(True)'}), '(google_path, binary=True)\n', (5241, 5267), False, 'import gensim\n'), ((5457, 5468), 'time.time', 'time.time', ([], {}), '()\n', (5466, 5468), False, 'import time\n'), ((5582, 5598), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5592, 5598), False, 'import os\n'), ((6038, 6049), 'time.time', 'time.time', ([], {}), '()\n', (6047, 6049), False, 'import time\n'), ((316, 340), 'os.listdir', 'os.listdir', (['self.dirname'], {}), '(self.dirname)\n', (326, 340), False, 'import os\n'), ((1541, 1570), 'numpy.divide', 'np.divide', (['featureVec', 'nwords'], {}), '(featureVec, nwords)\n', (1550, 1570), True, 'import numpy as np\n'), ((3720, 3740), 'random.choice', 'random.choice', (['lines'], {}), '(lines)\n', (3733, 3740), False, 'import random\n'), ((3760, 3780), 'random.choice', 'random.choice', (['lines'], {}), '(lines)\n', (3773, 3780), False, 'import random\n'), ((4669, 4704), 'markov_python3.Markov', 'markov_python3.Markov', ([], {'prefix': 'fname'}), '(prefix=fname)\n', (4690, 4704), False, 'import markov_python3\n'), ((8669, 8741), 'gensim.models.Word2Vec', 'gensim.models.Word2Vec', (['sentences'], {'min_count': '(5)', 'size': 'features', 'workers': '(8)'}), '(sentences, min_count=5, size=features, workers=8)\n', (8691, 8741), False, 'import gensim\n'), ((1404, 1435), 'numpy.add', 'np.add', (['featureVec', 'model[word]'], {}), '(featureVec, model[word])\n', (1410, 1435), True, 'import numpy as np\n'), ((3485, 3510), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (3497, 3510), False, 'import os\n'), ((4753, 4778), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (4765, 4778), False, 'import os\n'), ((5625, 5650), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (5637, 5650), False, 'import os\n'), ((6468, 6479), 'time.time', 'time.time', ([], {}), '()\n', (6477, 6479), False, 'import time\n'), ((7939, 7950), 'time.time', 'time.time', ([], {}), '()\n', (7948, 7950), False, 'import time\n'), ((8844, 8855), 'time.time', 'time.time', ([], {}), '()\n', (8853, 8855), False, 'import time\n'), ((8872, 8941), 'gensim.models.Word2Vec.load_word2vec_format', 'gensim.models.Word2Vec.load_word2vec_format', (['google_path'], {'binary': '(True)'}), '(google_path, binary=True)\n', (8915, 8941), False, 'import gensim\n'), ((8955, 8966), 'time.time', 'time.time', ([], {}), '()\n', (8964, 8966), False, 'import time\n'), ((371, 404), 'os.path.join', 'os.path.join', (['self.dirname', 'fname'], {}), '(self.dirname, fname)\n', (383, 404), False, 'import os\n')] |
'''
A* algorithm
use networkx
'''
import networkx as nx
from copy import deepcopy
import numpy as np
from config import CFG
import datetime
import time
import math
class A_star:
def __init__(self, net, A_star_sim = 1024, check_repeat_state = True):
'''
:param net: neural network
:param A_star_sim: number of simulation
:param check_repeat_state
if True: graph
if False: tree
'''
self.env = None
self.net = net
self.A_star_sim = A_star_sim
self.check_repeat_state = check_repeat_state
if self.check_repeat_state:
self.check_parent_node = False
else:
self.check_parent_node = True
def expand_node(self):
# execute all possible action from selected node
self.env.state = deepcopy(self.G.node[self.selected_node]['state'])
valid_moves = self.env.get_valid_moves()
value_pi = self.net.predict_value_policy([self.env.state, self.env.target_state])
do_not_check_parent = False
for i in range(len(valid_moves)):
if valid_moves[i][0] != 0:
self.env.state = deepcopy(self.G.node[self.selected_node]['state'])
self.env.execute_action(valid_moves[i])
same_state = False
same_parent_state = False
if self.check_repeat_state:
same_state = self.check_same_state()
if self.check_parent_node: # check if selected nodes state and generated child state is same
if not do_not_check_parent:
same_parent_state = self.check_same_parent_node()
if same_parent_state:
do_not_check_parent = True
if not same_state and not same_parent_state:
self.store_node_infos(i, value_pi[i]) # add new node
# if selected node is root node and impossible action
else:
if self.selected_node == 0:
self.child_node_Nsa.append([-1, 0, i])
# add expanded node to closed node list
self.closed_node_list.append([self.selected_node, self.G.node[self.selected_node]['score']])
for open_idx in self.open_node_list:
if open_idx[0] == self.selected_node:
self.open_node_list.remove([self.selected_node, open_idx[1]])
break
def check_same_parent_node(self):
same_state = np.array_equal(self.G.node[self.G.node[self.selected_node]['parent_node']]['state'], self.env.state)
if same_state:
return True
else:
return False
def check_same_state(self):
same_state_exist = False
# check for same state in open node list
for i in range(len(self.open_node_list)):
# if same state exist
if np.array_equal(self.env.state, self.G.node[self.open_node_list[i][0]]['state']):
same_state_exist = True
self.get_score(self.selected_node)
tmp_score = self.G.node[self.selected_node]['score'] + 1
if self.open_node_list[i][1] > tmp_score:
self.open_node_list[i][1] = deepcopy(tmp_score)
self.G.node[self.open_node_list[i][0]]['parent_node'] = self.selected_node
break
# check for same state in closed node list
if not same_state_exist:
for i in range(len(self.closed_node_list)):
if np.array_equal(self.env.state, self.G.node[self.closed_node_list[i][0]]['state']):
same_state_exist = True
break
return same_state_exist
def store_node_infos(self, action_num, predicted_value):
# create new node and store infos
new_node_num = len(self.G.node)
self.G.add_node(new_node_num)
self.G.add_edge(self.selected_node, new_node_num)
self.G.node[new_node_num]['state'] = deepcopy(self.env.state)
self.get_value(new_node_num, predicted_value)
self.G.node[new_node_num]['dist_from_root'] = self.G.node[self.selected_node]['dist_from_root'] + 1
self.get_score(new_node_num)
self.G.node[new_node_num]['parent_node'] = self.selected_node
self.open_node_list.append([new_node_num, self.G.node[new_node_num]['score']])
if self.selected_node == 0:
self.child_node_Nsa.append([new_node_num, 0, action_num])
def select_node(self):
'''
selected node with lowest score in open node list
'''
open_node_score_list =[i[1] for i in self.open_node_list]
chosen_score = min(open_node_score_list)
chosen_idx = open_node_score_list.index(chosen_score)
self.selected_node = self.open_node_list[chosen_idx][0]
def get_Nsa(self):
for closed_node_idx in range(len(self.closed_node_list)):
node_num = self.closed_node_list[closed_node_idx][0]
child_node_found = False
while True:
for child_node_num in range(len(self.child_node_Nsa)):
if self.child_node_Nsa[child_node_num][0] == node_num:
child_node_found = True
self.child_node_Nsa[child_node_num][1] += 1
break
if child_node_found == True:
break
node_num = self.G.node[node_num]['parent_node']
for i in range(len(self.child_node_Nsa)):
self.pi_Nsa.append(self.child_node_Nsa[i][1])
def get_score(self, node_num):
self.G.node[node_num]['score'] = math.log(self.G.node[node_num]['value'], CFG.value_decay_rate) + self.G.node[node_num]['dist_from_root']
def get_value(self, node_num, predicted_value):
done, _ = self.env.check_target_state_reached()
if done:
predicted_value = 1.
self.target_state_reached = True
self.target_node_num = node_num
else:
if predicted_value == 0:
# store value very small number if predicted value is 0, since h(n) use log
predicted_value = 10**(-100)
self.G.node[node_num]['value'] = predicted_value
## 작성 완료, 디버그 확인 필요
def get_tar_reached_child_node(self):
child_node_found = False
node_num = self.target_node_num
action_num = -1
while True:
for child_node_num in range(len(self.child_node_Nsa)):
if self.child_node_Nsa[child_node_num][0] == node_num:
child_node_found = True
action_num = child_node_num
break
if child_node_found:
break
node_num = self.G.node[node_num]['parent_node']
return action_num
def get_pi(self):
# calculate pi with Nsa list
if not self.target_state_reached:
self.get_Nsa()
sum_Nsa = sum(self.pi_Nsa)
Nsa_list = []
for i in range(len(self.pi_Nsa)):
Nsa_list.append(self.pi_Nsa[i]/sum_Nsa)
psa_vector = np.array(Nsa_list)
else:
child_node_num = self.get_tar_reached_child_node()
psa_vector = [0] * self.env.action_size
psa_vector[child_node_num] = 1
return psa_vector
def search(self, env):
self.env = env
self.open_node_list = [] # [node number, f(n)]
self.closed_node_list = []
self.selected_node = 0
self.pi_Nsa = []
self.target_state_reached = False
self.target_node_num = -1
self.child_node_Nsa = []
self.G = nx.Graph()
self.G.add_node(0)
self.G.node[0]['state'] = self.env.state
self.get_value(0, 0) # (node number, predicted value(value pi))
self.G.node[0]['dist_from_root'] = 0
self.get_score(0) # in this code f(n) will be called score
self.G.node[0]['parent_node'] = 0
self.open_node_list.append([0, self.G.node[0]['score']])
for _ in range(self.A_star_sim):
self.expand_node()
self.select_node()
if self.target_state_reached:
break
del self.closed_node_list[0]
self.env.state = deepcopy(self.G.node[0]['state'])
psa_vector = self.get_pi()
return psa_vector
| [
"copy.deepcopy",
"networkx.Graph",
"numpy.array",
"numpy.array_equal",
"math.log"
] | [((833, 883), 'copy.deepcopy', 'deepcopy', (["self.G.node[self.selected_node]['state']"], {}), "(self.G.node[self.selected_node]['state'])\n", (841, 883), False, 'from copy import deepcopy\n'), ((2522, 2627), 'numpy.array_equal', 'np.array_equal', (["self.G.node[self.G.node[self.selected_node]['parent_node']]['state']", 'self.env.state'], {}), "(self.G.node[self.G.node[self.selected_node]['parent_node']][\n 'state'], self.env.state)\n", (2536, 2627), True, 'import numpy as np\n'), ((4045, 4069), 'copy.deepcopy', 'deepcopy', (['self.env.state'], {}), '(self.env.state)\n', (4053, 4069), False, 'from copy import deepcopy\n'), ((7773, 7783), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7781, 7783), True, 'import networkx as nx\n'), ((8388, 8421), 'copy.deepcopy', 'deepcopy', (["self.G.node[0]['state']"], {}), "(self.G.node[0]['state'])\n", (8396, 8421), False, 'from copy import deepcopy\n'), ((2924, 3003), 'numpy.array_equal', 'np.array_equal', (['self.env.state', "self.G.node[self.open_node_list[i][0]]['state']"], {}), "(self.env.state, self.G.node[self.open_node_list[i][0]]['state'])\n", (2938, 3003), True, 'import numpy as np\n'), ((5721, 5783), 'math.log', 'math.log', (["self.G.node[node_num]['value']", 'CFG.value_decay_rate'], {}), "(self.G.node[node_num]['value'], CFG.value_decay_rate)\n", (5729, 5783), False, 'import math\n'), ((7228, 7246), 'numpy.array', 'np.array', (['Nsa_list'], {}), '(Nsa_list)\n', (7236, 7246), True, 'import numpy as np\n'), ((1173, 1223), 'copy.deepcopy', 'deepcopy', (["self.G.node[self.selected_node]['state']"], {}), "(self.G.node[self.selected_node]['state'])\n", (1181, 1223), False, 'from copy import deepcopy\n'), ((3573, 3659), 'numpy.array_equal', 'np.array_equal', (['self.env.state', "self.G.node[self.closed_node_list[i][0]]['state']"], {}), "(self.env.state, self.G.node[self.closed_node_list[i][0]][\n 'state'])\n", (3587, 3659), True, 'import numpy as np\n'), ((3276, 3295), 'copy.deepcopy', 'deepcopy', (['tmp_score'], {}), '(tmp_score)\n', (3284, 3295), False, 'from copy import deepcopy\n')] |
from tensorflow.keras.applications.imagenet_utils import preprocess_input as efficientnet_preprocess_input
from tensorflow.keras.layers import Activation
from tensorflow.keras.backend import sigmoid, constant
from tensorflow.keras.initializers import Initializer
from torch.nn import ConvTranspose2d, init
from torch import Tensor
import numpy as np
import math
from skimage.transform import rescale
from skimage.util import pad as padding
from scipy.ndimage.filters import gaussian_filter
class Swish(Activation):
"""
Custom Swish activation function for Keras.
"""
def __init__(self, activation, **kwargs):
super(Swish, self).__init__(activation, **kwargs)
self.__name__ = 'Swish'
def swish1(x):
"""
Standard Swish activation.
Args:
x: Keras tensor
Input tensor
Returns:
Output tensor of Swish transformation.
"""
return x * sigmoid(x)
def eswish(x):
"""
E-swish activation with Beta value of 1.25.
Args:
x: Keras tensor
Input tensor
Returns:
Output tensor of E-swish transformation.
"""
beta = 1.25
return beta * x * sigmoid(x)
class keras_BilinearWeights(Initializer):
"""
A Keras implementation of bilinear weights by <NAME> (https://github.com/tensorlayer/tensorlayer/issues/53)
"""
def __init__(self, shape=None, dtype=None):
self.shape = shape
self.dtype = dtype
def __call__(self, shape=None, dtype=None):
# Initialize parameters
if shape:
self.shape = shape
self.dtype = type=np.float32 # Overwrites argument
scale = 2
filter_size = self.shape[0]
num_channels = self.shape[2]
# Create bilinear weights
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=self.dtype)
scale_factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(filter_size):
for y in range(filter_size):
bilinear_kernel[x,y] = (1 - abs(x - center) / scale_factor) * \
(1 - abs(y - center) / scale_factor)
# Assign weights
weights = np.zeros((filter_size, filter_size, num_channels, num_channels))
for i in range(num_channels):
weights[:, :, i, i] = bilinear_kernel
return constant(value=weights)
def get_config(self):
return {'shape': self.shape}
class pytorch_BilinearConvTranspose2d(ConvTranspose2d):
"""
A PyTorch implementation of transposed bilinear convolution by mjstevens777 (https://gist.github.com/mjstevens777/9d6771c45f444843f9e3dce6a401b183)
"""
def __init__(self, channels, kernel_size, stride, groups=1):
"""Set up the layer.
Parameters
----------
channels: int
The number of input and output channels
stride: int or tuple
The amount of upsampling to do
groups: int
Set to 1 for a standard convolution. Set equal to channels to
make sure there is no cross-talk between channels.
"""
if isinstance(stride, int):
stride = (stride, stride)
assert groups in (1, channels), "Must use no grouping, " + \
"or one group per channel"
padding = (stride[0] - 1, stride[1] - 1)
super().__init__(
channels, channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups)
def reset_parameters(self):
"""Reset the weight and bias."""
init.constant(self.bias, 0)
init.constant(self.weight, 0)
bilinear_kernel = self.bilinear_kernel(self.kernel_size[0])
for i in range(self.in_channels):
if self.groups == 1:
j = i
else:
j = 0
self.weight.data[i, j] = bilinear_kernel
@staticmethod
def bilinear_kernel(kernel_size):
"""Generate a bilinear upsampling kernel."""
bilinear_kernel = np.zeros([kernel_size, kernel_size])
scale_factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(kernel_size):
for y in range(kernel_size):
bilinear_kernel[x,y] = (1 - abs(x - center) / scale_factor) * \
(1 - abs(y - center) / scale_factor)
return Tensor(bilinear_kernel)
def resize(source_array, target_height, target_width):
"""
Resizes an image or image-like Numpy array to be no larger than (target_height, target_width) or (target_height, target_width, c).
Args:
source_array: ndarray
Numpy array of shape (h, w) or (h, w, 3)
target_height: int
Desired maximum height
target_width: int
Desired maximum width
Returns:
Resized Numpy array.
"""
# Get height and width of source array
source_height, source_width = source_array.shape[:2]
# Compute correct scale for resizing operation
target_ratio = target_height / target_width
source_ratio = source_height / source_width
if target_ratio > source_ratio:
scale = target_width / source_width
else:
scale = target_height / source_height
# Perform rescaling
resized_array = rescale(source_array, scale, multichannel=True)
return resized_array
def pad(source_array, target_height, target_width):
"""
Pads an image or image-like Numpy array with zeros to fit the target-size.
Args:
source_array: ndarray
Numpy array of shape (h, w) or (h, w, 3)
target_height: int
Height of padded image
target_width: int
Width of padded image
Returns:
Zero-padded Numpy array of shape (target_height, target_width) or (target_height, target_width, c).
"""
# Get height and width of source array
source_height, source_width = source_array.shape[:2]
# Ensure array is resized properly
if (source_height > target_height) or (source_width > target_width):
source_array = resize(source_array, target_height, target_width)
source_height, source_width = source_array.shape[:2]
# Compute padding variables
pad_left = int((target_width - source_width) / 2)
pad_top = int((target_height - source_height) / 2)
pad_right = int(target_width - source_width - pad_left)
pad_bottom = int(target_height - source_height - pad_top)
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
has_channels_dim = len(source_array.shape) == 3
if has_channels_dim:
paddings.append([0,0])
# Perform padding
target_array = padding(source_array, paddings, 'constant')
return target_array
def preprocess(batch, resolution, lite=False):
"""
Preprocess Numpy array according to model preferences.
Args:
batch: ndarray
Numpy array of shape (n, h, w, 3)
resolution: int
Input height and width of model to utilize
lite: boolean
Defines if EfficientPose Lite model is used
Returns:
Preprocessed Numpy array of shape (n, resolution, resolution, 3).
"""
# Resize frames according to side
batch = [resize(frame, resolution, resolution) for frame in batch]
# Pad frames in batch to form quadratic input
batch = [pad(frame, resolution, resolution) for frame in batch]
# Convert from normalized pixels to RGB absolute values
batch = [np.uint8(255 * frame) for frame in batch]
# Construct Numpy array from batch
batch = np.asarray(batch)
# Preprocess images in batch
if lite:
batch = efficientnet_preprocess_input(batch, mode='tf')
else:
batch = efficientnet_preprocess_input(batch, mode='torch')
return batch
def extract_coordinates(frame_output, frame_height, frame_width, real_time=False):
"""
Extract coordinates from supplied confidence maps.
Args:
frame_output: ndarray
Numpy array of shape (h, w, c)
frame_height: int
Height of relevant frame
frame_width: int
Width of relevant frame
real-time: boolean
Defines if processing is performed in real-time
Returns:
List of predicted coordinates for all c body parts in the frame the outputs are computed from.
"""
# Define body parts
body_parts = ['head_top', 'upper_neck', 'right_shoulder', 'right_elbow', 'right_wrist', 'thorax', 'left_shoulder', 'left_elbow', 'left_wrist', 'pelvis', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle']
# Define confidence level
confidence = 0.3
# Fetch output resolution
output_height, output_width = frame_output.shape[0:2]
# Initialize coordinates
frame_coords = []
# Iterate over body parts
for i in range(frame_output.shape[-1]):
# Find peak point
conf = frame_output[...,i]
if not real_time:
conf = gaussian_filter(conf, sigma=1.)
max_index = np.argmax(conf)
peak_y = float(math.floor(max_index / output_width))
peak_x = max_index % output_width
# Verify confidence
if real_time and conf[int(peak_y),int(peak_x)] < confidence:
peak_x = -0.5
peak_y = -0.5
else:
peak_x += 0.5
peak_y += 0.5
# Normalize coordinates
peak_x /= output_width
peak_y /= output_height
# Convert to original aspect ratio
if frame_width > frame_height:
norm_padding = (frame_width - frame_height) / (2 * frame_width)
peak_y = (peak_y - norm_padding) / (1.0 - (2 * norm_padding))
peak_y = -0.5 / output_height if peak_y < 0.0 else peak_y
peak_y = 1.0 if peak_y > 1.0 else peak_y
elif frame_width < frame_height:
norm_padding = (frame_height - frame_width) / (2 * frame_height)
peak_x = (peak_x - norm_padding) / (1.0 - (2 * norm_padding))
peak_x = -0.5 / output_width if peak_x < 0.0 else peak_x
peak_x = 1.0 if peak_x > 1.0 else peak_x
frame_coords.append((body_parts[i], peak_x, peak_y))
return frame_coords
def display_body_parts(image, image_draw, coordinates, image_height=1024, image_width=1024, marker_radius=5):
"""
Draw markers on predicted body part locations.
Args:
image: PIL Image
The loaded image the coordinate predictions are inferred for
image_draw: PIL ImageDraw module
Module for performing drawing operations
coordinates: List
Predicted body part coordinates in image
image_height: int
Height of image
image_width: int
Width of image
marker_radius: int
Radius of marker
Returns:
Instance of PIL image with annotated body part predictions.
"""
# Define body part colors
body_part_colors = ['#fff142', '#fff142', '#576ab1', '#5883c4', '#56bdef', '#f19718', '#d33592', '#d962a6', '#e18abd', '#f19718', '#8ac691', '#a3d091', '#bedb8f', '#7b76b7', '#907ab8', '#a97fb9']
# Draw markers
for i, (body_part, body_part_x, body_part_y) in enumerate(coordinates):
body_part_x *= image_width
body_part_y *= image_height
image_draw.ellipse([(body_part_x - marker_radius, body_part_y - marker_radius), (body_part_x + marker_radius, body_part_y + marker_radius)], fill=body_part_colors[i])
return image
def display_segments(image, image_draw, coordinates, image_height=1024, image_width=1024, segment_width=5):
"""
Draw segments between body parts according to predicted body part locations.
Args:
image: PIL Image
The loaded image the coordinate predictions are inferred for
image_draw: PIL ImageDraw module
Module for performing drawing operations
coordinates: List
Predicted body part coordinates in image
image_height: int
Height of image
image_width: int
Width of image
segment_width: int
Width of association line between markers
Returns:
Instance of PIL image with annotated body part segments.
"""
# Define segments and colors
segments = [(0, 1), (1, 5), (5, 2), (5, 6), (5, 9), (2, 3), (3, 4), (6, 7), (7, 8), (9, 10), (9, 13), (10, 11), (11, 12), (13, 14), (14, 15)]
segment_colors = ['#fff142', '#fff142', '#576ab1', '#5883c4', '#56bdef', '#f19718', '#d33592', '#d962a6', '#e18abd', '#f19718', '#8ac691', '#a3d091', '#bedb8f', '#7b76b7', '#907ab8', '#a97fb9']
# Draw segments
for (body_part_a_index, body_part_b_index) in segments:
_, body_part_a_x, body_part_a_y = coordinates[body_part_a_index]
body_part_a_x *= image_width
body_part_a_y *= image_height
_, body_part_b_x, body_part_b_y = coordinates[body_part_b_index]
body_part_b_x *= image_width
body_part_b_y *= image_height
image_draw.line([(body_part_a_x, body_part_a_y), (body_part_b_x, body_part_b_y)], fill=segment_colors[body_part_b_index], width=segment_width)
return image
def display_camera(cv2, frame, coordinates, frame_height, frame_width):
"""
Display camera frame with annotated body parts and segments according to predicted body part locations.
Args:
cv2: OpenCV
Imported OpenCV instance
frame: ndarray
Numpy array of shape (h, w, 3)
coordinates: List
Predicted body part coordinates in frame
frame_height: int
Height of frame
frame_width: int
Width of frame
"""
# Define body parts and segments
segments = [(0, 1), (1, 5), (5, 2), (5, 6), (5, 9), (2, 3), (3, 4), (6, 7), (7, 8), (9, 10), (9, 13), (10, 11), (11, 12), (13, 14), (14, 15)]
body_part_colors = [(66, 241, 255), (66, 241, 255), (177, 106, 87), (196, 131, 88), (239, 189, 86), (24, 151, 241), (146, 53, 211), (166, 98, 217), (189, 138, 225), (24, 151, 241), (145, 198, 138), (145, 208, 163), (143, 219, 190), (183, 118, 123), (184, 122, 144), (185, 127, 169)]
# Draw lines and markers
remaining = [i for i in range(len(body_part_colors))]
for (a, b) in segments:
a_coordinates = coordinates[a]
a_coordinate_x = int(a_coordinates[1] * frame_width)
a_coordinate_y = int(a_coordinates[2] * frame_height)
b_coordinates = coordinates[b]
b_coordinate_x = int(b_coordinates[1] * frame_width)
b_coordinate_y = int(b_coordinates[2] * frame_height)
if not (a_coordinate_x < 0 or a_coordinate_y < 0 or b_coordinate_x < 0 or b_coordinate_y < 0):
cv2.line(frame, (a_coordinate_x, a_coordinate_y), (b_coordinate_x, b_coordinate_y), color=body_part_colors[a], thickness=2)
if a in remaining:
cv2.circle(frame, (a_coordinate_x, a_coordinate_y), radius=3, color=body_part_colors[a], thickness=2)
remaining.remove(a)
if b in remaining:
cv2.circle(frame, (b_coordinate_x, b_coordinate_y), radius=3, color=body_part_colors[b], thickness=2)
remaining.remove(b)
# Display predictions
frame = cv2.resize(cv2.flip(frame, 1), (1000, 1000))
cv2.imshow('EfficientPose (Groos et al., 2020)', frame) | [
"scipy.ndimage.filters.gaussian_filter",
"numpy.uint8",
"tensorflow.keras.backend.sigmoid",
"skimage.transform.rescale",
"tensorflow.keras.applications.imagenet_utils.preprocess_input",
"numpy.argmax",
"numpy.asarray",
"skimage.util.pad",
"numpy.zeros",
"math.floor",
"torch.Tensor",
"torch.nn.... | [((5696, 5743), 'skimage.transform.rescale', 'rescale', (['source_array', 'scale'], {'multichannel': '(True)'}), '(source_array, scale, multichannel=True)\n', (5703, 5743), False, 'from skimage.transform import rescale\n'), ((7119, 7162), 'skimage.util.pad', 'padding', (['source_array', 'paddings', '"""constant"""'], {}), "(source_array, paddings, 'constant')\n", (7126, 7162), True, 'from skimage.util import pad as padding\n'), ((8053, 8070), 'numpy.asarray', 'np.asarray', (['batch'], {}), '(batch)\n', (8063, 8070), True, 'import numpy as np\n'), ((946, 956), 'tensorflow.keras.backend.sigmoid', 'sigmoid', (['x'], {}), '(x)\n', (953, 956), False, 'from tensorflow.keras.backend import sigmoid, constant\n'), ((1219, 1229), 'tensorflow.keras.backend.sigmoid', 'sigmoid', (['x'], {}), '(x)\n', (1226, 1229), False, 'from tensorflow.keras.backend import sigmoid, constant\n'), ((1871, 1925), 'numpy.zeros', 'np.zeros', (['[filter_size, filter_size]'], {'dtype': 'self.dtype'}), '([filter_size, filter_size], dtype=self.dtype)\n', (1879, 1925), True, 'import numpy as np\n'), ((2384, 2448), 'numpy.zeros', 'np.zeros', (['(filter_size, filter_size, num_channels, num_channels)'], {}), '((filter_size, filter_size, num_channels, num_channels))\n', (2392, 2448), True, 'import numpy as np\n'), ((2561, 2584), 'tensorflow.keras.backend.constant', 'constant', ([], {'value': 'weights'}), '(value=weights)\n', (2569, 2584), False, 'from tensorflow.keras.backend import sigmoid, constant\n'), ((3823, 3850), 'torch.nn.init.constant', 'init.constant', (['self.bias', '(0)'], {}), '(self.bias, 0)\n', (3836, 3850), False, 'from torch.nn import ConvTranspose2d, init\n'), ((3859, 3888), 'torch.nn.init.constant', 'init.constant', (['self.weight', '(0)'], {}), '(self.weight, 0)\n', (3872, 3888), False, 'from torch.nn import ConvTranspose2d, init\n'), ((4283, 4319), 'numpy.zeros', 'np.zeros', (['[kernel_size, kernel_size]'], {}), '([kernel_size, kernel_size])\n', (4291, 4319), True, 'import numpy as np\n'), ((4742, 4765), 'torch.Tensor', 'Tensor', (['bilinear_kernel'], {}), '(bilinear_kernel)\n', (4748, 4765), False, 'from torch import Tensor\n'), ((7959, 7980), 'numpy.uint8', 'np.uint8', (['(255 * frame)'], {}), '(255 * frame)\n', (7967, 7980), True, 'import numpy as np\n'), ((8134, 8181), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'efficientnet_preprocess_input', (['batch'], {'mode': '"""tf"""'}), "(batch, mode='tf')\n", (8163, 8181), True, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input as efficientnet_preprocess_input\n'), ((8208, 8258), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'efficientnet_preprocess_input', (['batch'], {'mode': '"""torch"""'}), "(batch, mode='torch')\n", (8237, 8258), True, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input as efficientnet_preprocess_input\n'), ((9579, 9594), 'numpy.argmax', 'np.argmax', (['conf'], {}), '(conf)\n', (9588, 9594), True, 'import numpy as np\n'), ((9526, 9558), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['conf'], {'sigma': '(1.0)'}), '(conf, sigma=1.0)\n', (9541, 9558), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((9618, 9654), 'math.floor', 'math.floor', (['(max_index / output_width)'], {}), '(max_index / output_width)\n', (9628, 9654), False, 'import math\n')] |
'''
This script creates json files which can be used to render Manhattan plots.
'''
# TODO: combine with QQ.
from ..utils import chrom_order
from ..conf_utils import conf
from ..file_utils import VariantFileReader, write_json, common_filepaths
from .load_utils import MaxPriorityQueue, parallelize_per_pheno
import numpy as np
import math,time
def timeit(f):
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print(f"func:{f.__name__} took: {round(te-ts,4)} sec")
return result
return timed
@timeit
def run(argv):
parallelize_per_pheno(
get_input_filepaths = lambda pheno: common_filepaths['pheno'](pheno['phenocode']),
get_output_filepaths = lambda pheno: common_filepaths['manhattan'](pheno['phenocode']),
convert = create_manhattan,
cmd = 'manhattan',
)
@timeit
def create_manhattan(pheno):
make_json_file(common_filepaths['pheno'](pheno['phenocode']), common_filepaths['manhattan'](pheno['phenocode']))
@timeit
def make_json_file(result_file, output_file, write_as_given=False):
BIN_LENGTH = int(3e6)
NEGLOG10_PVAL_BIN_SIZE = 0.05 # Use 0.05, 0.1, 0.15, etc
NEGLOG10_PVAL_BIN_DIGITS = 2 # Then round to this many digits
with VariantFileReader(result_file) as variants:
variant_bins, unbinned_variants = bin_variants(
variants,
BIN_LENGTH,
NEGLOG10_PVAL_BIN_SIZE,
NEGLOG10_PVAL_BIN_DIGITS
)
np_label(unbinned_variants)
rv = {
'variant_bins': variant_bins,
'unbinned_variants': unbinned_variants,
}
write_json(filepath=output_file, data=rv, write_as_given=write_as_given)
def rounded_neglog10(pval, neglog10_pval_bin_size, neglog10_pval_bin_digits):
return round(-math.log10(pval) // neglog10_pval_bin_size * neglog10_pval_bin_size, neglog10_pval_bin_digits)
def get_pvals_and_pval_extents(pvals, neglog10_pval_bin_size):
# expects that NEGLOG10_PVAL_BIN_SIZE is the distance between adjacent bins.
pvals = sorted(pvals)
extents = [[pvals[0], pvals[0]]]
for p in pvals:
if extents[-1][1] + neglog10_pval_bin_size * 1.1 > p:
extents[-1][1] = p
else:
extents.append([p,p])
rv_pvals, rv_pval_extents = [], []
for (start, end) in extents:
if start == end:
rv_pvals.append(start)
else:
rv_pval_extents.append([start,end])
return (rv_pvals, rv_pval_extents)
# TODO: convert bins from {(chrom, pos): []} to {chrom:{pos:[]}}?
@timeit
def bin_variants(variant_iterator, bin_length, neglog10_pval_bin_size, neglog10_pval_bin_digits):
bins = {}
unbinned_variant_pq = MaxPriorityQueue()
chrom_n_bins = {}
def bin_variant(variant):
chrom_key = chrom_order[variant['chrom']]
pos_bin = variant['pos'] // bin_length
chrom_n_bins[chrom_key] = max(chrom_n_bins.get(chrom_key,0), pos_bin)
if (chrom_key, pos_bin) in bins:
bin = bins[(chrom_key, pos_bin)]
else:
bin = {"chrom": variant['chrom'],
"startpos": pos_bin * bin_length,
"neglog10_pvals": set()}
bins[(chrom_key, pos_bin)] = bin
#TODO review with juha
if 'mlogp' in variant:
bin["neglog10_pvals"].add(round(variant['mlogp'] // neglog10_pval_bin_size * neglog10_pval_bin_size, neglog10_pval_bin_digits))
else:
bin["neglog10_pvals"].add(rounded_neglog10(variant['pval'], neglog10_pval_bin_size, neglog10_pval_bin_digits))
# put most-significant variants into the priorityqueue and bin the rest
hla_variant_pq =MaxPriorityQueue()
gw_sig_pq = MaxPriorityQueue()
for variant in variant_iterator:
if variant['chrom']=="6" and variant['pos'] > conf.hla_begin and variant['pos'] < conf.hla_end:
hla_variant_pq.add(variant, variant['pval'])
if( len(hla_variant_pq) > conf.manhattan_hla_num_unbinned ):
old = hla_variant_pq.pop()
bin_variant(old)
continue
else:
unbinned_variant_pq.add(variant, variant['pval'])
if len(unbinned_variant_pq) > conf.manhattan_num_unbinned:
old = unbinned_variant_pq.pop()
if old['pval'] < conf.manhattan_unbin_anyway_pval:
unbinned_variant_pq.add(old, old['pval'])
else:
bin_variant(old)
max_p = unbinned_variant_pq.peek()
add_hla = list(hla_variant_pq.pop_all())
for v in filter(lambda x: x['pval']<=max_p['pval'], add_hla ):
unbinned_variant_pq.add(v, v['pval'])
unbinned_variants = list(unbinned_variant_pq.pop_all())
# unroll bins into simple array (preserving chromosomal order)
binned_variants = []
for chrom_key in sorted(chrom_n_bins.keys()):
for pos_key in range(int(1+chrom_n_bins[chrom_key])):
b = bins.get((chrom_key, pos_key), None)
if b and len(b['neglog10_pvals']) != 0:
b['neglog10_pvals'], b['neglog10_pval_extents'] = get_pvals_and_pval_extents(b['neglog10_pvals'], neglog10_pval_bin_size)
b['pos'] = int(b['startpos'] + bin_length/2)
del b['startpos']
binned_variants.append(b)
return binned_variants, unbinned_variants
@timeit
def np_label(variants,check = False):
chroms = {}
print(len(variants))
for v in variants:
#kind of like a defaultdict. if it's the first variant of the chromosome it initalizes an empty list.
#Now that the value must be a list, we can just append the variant to the chrom specific variant list
chroms.setdefault(v['chrom'], []).append(v)
peak_variants = []
for vs in chroms.values():
print(f"chrom:{vs[0]['chrom']}")
#iniitalize pval,pos array
var_array = np.zeros((len(vs),2))
pos_dict = {}
for i,v in enumerate(vs):
var_array[i] = v['pos'],v['pval']
pos_dict[v['pos']] = v
while len(var_array):
# work with arrays to check results are identical
#returns best hit?
min_pval_idx = np.argmin(var_array[:,1])
pos = var_array[min_pval_idx][0]
# filter variants based on pos of best hit
filter_mask = np.abs(var_array[:,0] - pos) > conf.within_pheno_mask_around_peak
var_array = var_array[filter_mask]
# return variant from that position
best_assoc = pos_dict[pos]
best_assoc['peak'] = True
peak_variants.append(best_assoc)
if check or len(variants) < 1000:
assert label_peaks(variants) == peak_variants
print('new method works')
def label_peaks(variants):
chroms = {}
print(len(variants))
peak_variants = []
for v in variants:
#kind of like a defaultdict. if it's the first variant of the chromosome it initalizes an empty list.
#Now that the value must be a list, we can just append the variant to the chrom specific variant list
chroms.setdefault(v['chrom'], []).append(v)
for vs in chroms.values():
print(f"chrom:{vs[0]['chrom']}")
while vs:
best_assoc = min(vs, key=lambda assoc: assoc['pval'])
#best_assoc['peak'] = True
vs = [v for v in vs if abs(v['pos'] - best_assoc['pos']) > conf.within_pheno_mask_around_peak]
peak_variants.append(best_assoc)
return peak_variants
| [
"math.log10",
"numpy.abs",
"numpy.argmin",
"time.time"
] | [((408, 419), 'time.time', 'time.time', ([], {}), '()\n', (417, 419), False, 'import math, time\n'), ((465, 476), 'time.time', 'time.time', ([], {}), '()\n', (474, 476), False, 'import math, time\n'), ((6239, 6265), 'numpy.argmin', 'np.argmin', (['var_array[:, 1]'], {}), '(var_array[:, 1])\n', (6248, 6265), True, 'import numpy as np\n'), ((6391, 6420), 'numpy.abs', 'np.abs', (['(var_array[:, 0] - pos)'], {}), '(var_array[:, 0] - pos)\n', (6397, 6420), True, 'import numpy as np\n'), ((1823, 1839), 'math.log10', 'math.log10', (['pval'], {}), '(pval)\n', (1833, 1839), False, 'import math, time\n')] |
import numpy as np
from bokeh.io import curdoc, show
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
N = 100
x_ = np.linspace(0, 10, 200)
y_ = np.linspace(0, 10, 200)
z_ = np.linspace(0, 10, N)
x, y, z = np.meshgrid(x_, y_, z_, indexing='xy')
data = np.sin(x+z)*np.cos(y)
source = ColumnDataSource(data=dict(image=[data[:, :, 0]]))
p = figure(x_range=(0, 10), y_range=(0, 10))
p.image(image='image', x=0, y=0, dw=10, dh=10, source=source, palette="Spectral11")
slider = Slider(start=0, end=(N-1), value=0, step=1, title="Frame")
def update(attr, old, new):
source.data = dict(image=[data[:, :, slider.value]])
slider.on_change('value', update)
curdoc().add_root(column(p, slider))
show(p) | [
"numpy.meshgrid",
"bokeh.plotting.figure",
"bokeh.models.Slider",
"bokeh.io.show",
"bokeh.io.curdoc",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"bokeh.layouts.column"
] | [((186, 209), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(200)'], {}), '(0, 10, 200)\n', (197, 209), True, 'import numpy as np\n'), ((215, 238), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(200)'], {}), '(0, 10, 200)\n', (226, 238), True, 'import numpy as np\n'), ((244, 265), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', 'N'], {}), '(0, 10, N)\n', (255, 265), True, 'import numpy as np\n'), ((277, 315), 'numpy.meshgrid', 'np.meshgrid', (['x_', 'y_', 'z_'], {'indexing': '"""xy"""'}), "(x_, y_, z_, indexing='xy')\n", (288, 315), True, 'import numpy as np\n'), ((412, 452), 'bokeh.plotting.figure', 'figure', ([], {'x_range': '(0, 10)', 'y_range': '(0, 10)'}), '(x_range=(0, 10), y_range=(0, 10))\n', (418, 452), False, 'from bokeh.plotting import figure\n'), ((547, 605), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0)', 'end': '(N - 1)', 'value': '(0)', 'step': '(1)', 'title': '"""Frame"""'}), "(start=0, end=N - 1, value=0, step=1, title='Frame')\n", (553, 605), False, 'from bokeh.models import ColumnDataSource, Slider\n'), ((766, 773), 'bokeh.io.show', 'show', (['p'], {}), '(p)\n', (770, 773), False, 'from bokeh.io import curdoc, show\n'), ((324, 337), 'numpy.sin', 'np.sin', (['(x + z)'], {}), '(x + z)\n', (330, 337), True, 'import numpy as np\n'), ((336, 345), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (342, 345), True, 'import numpy as np\n'), ((746, 763), 'bokeh.layouts.column', 'column', (['p', 'slider'], {}), '(p, slider)\n', (752, 763), False, 'from bokeh.layouts import column\n'), ((728, 736), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (734, 736), False, 'from bokeh.io import curdoc, show\n')] |
#!/usr/bin/env python
from __future__ import print_function
import os.path as osp
import sys
import itertools, pkg_resources, sys
from distutils.version import LooseVersion
if LooseVersion(pkg_resources.get_distribution("chainer").version) >= LooseVersion('7.0.0') and \
sys.version_info.major == 2:
print('''Please install chainer <= 7.0.0:
sudo pip install chainer==6.7.0
c.f https://github.com/jsk-ros-pkg/jsk_recognition/pull/2485
''', file=sys.stderr)
sys.exit(1)
if [p for p in list(itertools.chain(*[pkg_resources.find_distributions(_) for _ in sys.path])) if "cupy-" in p.project_name ] == []:
print('''Please install CuPy
sudo pip install cupy-cuda[your cuda version]
i.e.
sudo pip install cupy-cuda91
''', file=sys.stderr)
sys.exit(1)
import chainer
from chainer import cuda
import chainer.serializers as S
from chainer import Variable
import cv2
from distutils.version import LooseVersion
import numpy as np
import cv_bridge
from dynamic_reconfigure.server import Server
from jsk_perception.cfg import FastRCNNConfig as Config
from jsk_recognition_msgs.msg import Rect, RectArray
from jsk_recognition_msgs.msg import ClassificationResult
import jsk_recognition_utils
from jsk_recognition_utils.chainermodels import VGG16FastRCNN
from jsk_recognition_utils.chainermodels import VGG_CNN_M_1024
from jsk_recognition_utils.nms import nms
from jsk_topic_tools import ConnectionBasedTransport
import message_filters
import rospkg
import rospy
from sensor_msgs.msg import Image
def img_preprocessing(orig_img, pixel_means, max_size=1000, scale=600):
img = orig_img.astype(np.float32, copy=True)
img -= pixel_means
im_size_min = np.min(img.shape[0:2])
im_size_max = np.max(img.shape[0:2])
im_scale = float(scale) / float(im_size_min)
if np.rint(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return img.transpose([2, 0, 1]).astype(np.float32), im_scale
class FastRCNN(ConnectionBasedTransport):
def __init__(self, model, target_names, pixel_means, use_gpu):
super(FastRCNN, self).__init__()
self._srv = Server(Config, self.configCallback)
self.model = model
self._pub_rects = self.advertise('~output/rect_array',
RectArray, queue_size=1)
self._pub_class = self.advertise('~output/class',
ClassificationResult, queue_size=1)
self.target_names = target_names
self.pixel_means = np.array(pixel_means, dtype=np.float32)
self.use_gpu = use_gpu
self.classifier_name = rospy.get_param("~classifier_name", rospy.get_name())
def configCallback(self, config, level):
self.nms_thresh = config.nms_thresh
self.conf_thresh = config.conf_thresh
return config
def subscribe(self):
self._sub = message_filters.Subscriber('~input', Image)
self._sub_rects = message_filters.Subscriber('~input/rect_array',
RectArray)
use_async = rospy.get_param('~approximate_sync', False)
queue_size = rospy.get_param('~queue_size', 100)
subs = [self._sub, self._sub_rects]
if use_async:
slop = rospy.get_param('~slop', 0.1)
sync = message_filters.ApproximateTimeSynchronizer(
subs, queue_size, slop)
else:
sync = message_filters.TimeSynchronizer(subs, queue_size)
sync.registerCallback(self._detect)
def unsubscribe(self):
self._sub.unregister()
self._sub_rects.unregister()
def _detect(self, imgmsg, rects_msg):
bridge = cv_bridge.CvBridge()
im_orig = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='bgr8')
im, im_scale = img_preprocessing(im_orig, self.pixel_means)
rects_orig = jsk_recognition_utils.rects_msg_to_ndarray(rects_msg)
if len(rects_orig) == 0:
return
rects = rects_orig * im_scale
scores, bbox_pred = self._im_detect(im, rects)
rects = RectArray(header=imgmsg.header)
labels = []
label_proba = []
for cls_id in range(1, len(self.target_names)):
_cls = scores[:, cls_id][:, np.newaxis]
_bbx = bbox_pred[:, cls_id * 4: (cls_id + 1) * 4]
dets = np.hstack((_bbx, _cls))
keep = nms(dets, self.nms_thresh)
dets = dets[keep, :]
orig_rects = cuda.cupy.asnumpy(rects_orig)[keep, :]
inds = np.where(dets[:, -1] >= self.conf_thresh)[0]
for i in inds:
_bbox = dets[i, :4]
x1, y1, x2, y2 = orig_rects[i]
width = x2 - x1
height = y2 - y1
center_x = x1 + 0.5 * width
center_y = y1 + 0.5 * height
dx, dy, dw, dh = _bbox
_center_x = dx * width + center_x
_center_y = dy * height + center_y
_width = np.exp(dw) * width
_height = np.exp(dh) * height
x1 = _center_x - 0.5 * _width
y1 = _center_y - 0.5 * _height
x2 = _center_x + 0.5 * _width
y2 = _center_y + 0.5 * _height
rect = Rect(x=x1, y=y1, width=x2-x1, height=y2-y1)
rects.rects.append(rect)
labels.append(cls_id)
label_proba.append(dets[:, -1][i])
# publish classification result
clss = ClassificationResult(
header=imgmsg.header,
classifier=self.classifier_name,
target_names=self.target_names,
labels=labels,
label_names=[self.target_names[l] for l in labels],
label_proba=label_proba,
)
self._pub_rects.publish(rects)
self._pub_class.publish(clss)
def _im_detect(self, im, rects):
xp = cuda.cupy if self.use_gpu else np
im = xp.asarray(im)
rects = xp.asarray(rects)
x_data = im[xp.newaxis, :, :, :]
# batch_indices is always 0 when batch size is 1
batch_indices = xp.zeros((len(rects), 1), dtype=np.float32)
rects = xp.hstack((batch_indices, rects))
if LooseVersion(chainer.__version__).version[0] < 2:
x = Variable(x_data, volatile=True)
rects_val = Variable(rects, volatile=True)
self.model.train = False
cls_score, bbox_pred = self.model(x, rects_val)
else:
with chainer.using_config('train', False), \
chainer.no_backprop_mode():
x = Variable(x_data)
rects_val = Variable(rects)
cls_score, bbox_pred = self.model(x, rects_val)
scores = cuda.to_cpu(cls_score.data)
bbox_pred = cuda.to_cpu(bbox_pred.data)
return scores, bbox_pred
def main():
rospy.init_node('fast_rcnn_caffenet')
# get parameters
try:
model_name = rospy.get_param('~model')
except KeyError as e:
rospy.logerr('Unspecified rosparam: {0}'.format(e))
sys.exit(1)
gpu = rospy.get_param('~gpu', -1)
use_gpu = True if gpu >= 0 else False
# setup model
PKG = 'jsk_perception'
rp = rospkg.RosPack()
data_path = osp.join(rp.get_path(PKG), 'trained_data')
if model_name == 'vgg_cnn_m_1024':
model = VGG_CNN_M_1024()
chainermodel = osp.join(data_path, 'vgg_cnn_m_1024.chainermodel')
elif model_name == 'vgg16':
model = VGG16FastRCNN()
chainermodel = osp.join(data_path, 'vgg16_fast_rcnn.chainermodel')
else:
rospy.logerr('Unsupported model: {0}'.format(model_name))
sys.exit(1)
rospy.loginfo('Loading chainermodel')
S.load_hdf5(chainermodel, model)
if use_gpu:
model.to_gpu(gpu)
rospy.loginfo('Finished loading chainermodel')
# assumptions
target_names = [
'__background__',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor',
]
pixel_means = [102.9801, 115.9465, 122.7717]
fast_rcnn = FastRCNN(
model=model, target_names=target_names,
pixel_means=pixel_means, use_gpu=use_gpu)
rospy.spin()
if __name__ == '__main__':
main()
| [
"jsk_recognition_utils.rects_msg_to_ndarray",
"jsk_recognition_msgs.msg.RectArray",
"jsk_recognition_msgs.msg.ClassificationResult",
"chainer.no_backprop_mode",
"numpy.exp",
"rospy.get_name",
"os.path.join",
"jsk_recognition_utils.chainermodels.VGG16FastRCNN",
"message_filters.TimeSynchronizer",
"... | [((475, 486), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (483, 486), False, 'import itertools, pkg_resources, sys\n'), ((767, 778), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (775, 778), False, 'import itertools, pkg_resources, sys\n'), ((1681, 1703), 'numpy.min', 'np.min', (['img.shape[0:2]'], {}), '(img.shape[0:2])\n', (1687, 1703), True, 'import numpy as np\n'), ((1722, 1744), 'numpy.max', 'np.max', (['img.shape[0:2]'], {}), '(img.shape[0:2])\n', (1728, 1744), True, 'import numpy as np\n'), ((1911, 2001), 'cv2.resize', 'cv2.resize', (['img', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.\n INTER_LINEAR)\n', (1921, 2001), False, 'import cv2\n'), ((7041, 7078), 'rospy.init_node', 'rospy.init_node', (['"""fast_rcnn_caffenet"""'], {}), "('fast_rcnn_caffenet')\n", (7056, 7078), False, 'import rospy\n'), ((7274, 7301), 'rospy.get_param', 'rospy.get_param', (['"""~gpu"""', '(-1)'], {}), "('~gpu', -1)\n", (7289, 7301), False, 'import rospy\n'), ((7399, 7415), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (7413, 7415), False, 'import rospkg\n'), ((7860, 7897), 'rospy.loginfo', 'rospy.loginfo', (['"""Loading chainermodel"""'], {}), "('Loading chainermodel')\n", (7873, 7897), False, 'import rospy\n'), ((7902, 7934), 'chainer.serializers.load_hdf5', 'S.load_hdf5', (['chainermodel', 'model'], {}), '(chainermodel, model)\n', (7913, 7934), True, 'import chainer.serializers as S\n'), ((7981, 8027), 'rospy.loginfo', 'rospy.loginfo', (['"""Finished loading chainermodel"""'], {}), "('Finished loading chainermodel')\n", (7994, 8027), False, 'import rospy\n'), ((8633, 8645), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (8643, 8645), False, 'import rospy\n'), ((246, 267), 'distutils.version.LooseVersion', 'LooseVersion', (['"""7.0.0"""'], {}), "('7.0.0')\n", (258, 267), False, 'from distutils.version import LooseVersion\n'), ((1801, 1832), 'numpy.rint', 'np.rint', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (1808, 1832), True, 'import numpy as np\n'), ((2257, 2292), 'dynamic_reconfigure.server.Server', 'Server', (['Config', 'self.configCallback'], {}), '(Config, self.configCallback)\n', (2263, 2292), False, 'from dynamic_reconfigure.server import Server\n'), ((2653, 2692), 'numpy.array', 'np.array', (['pixel_means'], {'dtype': 'np.float32'}), '(pixel_means, dtype=np.float32)\n', (2661, 2692), True, 'import numpy as np\n'), ((3013, 3056), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input"""', 'Image'], {}), "('~input', Image)\n", (3039, 3056), False, 'import message_filters\n'), ((3083, 3141), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/rect_array"""', 'RectArray'], {}), "('~input/rect_array', RectArray)\n", (3109, 3141), False, 'import message_filters\n'), ((3215, 3258), 'rospy.get_param', 'rospy.get_param', (['"""~approximate_sync"""', '(False)'], {}), "('~approximate_sync', False)\n", (3230, 3258), False, 'import rospy\n'), ((3280, 3315), 'rospy.get_param', 'rospy.get_param', (['"""~queue_size"""', '(100)'], {}), "('~queue_size', 100)\n", (3295, 3315), False, 'import rospy\n'), ((3819, 3839), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (3837, 3839), False, 'import cv_bridge\n'), ((4001, 4054), 'jsk_recognition_utils.rects_msg_to_ndarray', 'jsk_recognition_utils.rects_msg_to_ndarray', (['rects_msg'], {}), '(rects_msg)\n', (4043, 4054), False, 'import jsk_recognition_utils\n'), ((4217, 4248), 'jsk_recognition_msgs.msg.RectArray', 'RectArray', ([], {'header': 'imgmsg.header'}), '(header=imgmsg.header)\n', (4226, 4248), False, 'from jsk_recognition_msgs.msg import Rect, RectArray\n'), ((5651, 5859), 'jsk_recognition_msgs.msg.ClassificationResult', 'ClassificationResult', ([], {'header': 'imgmsg.header', 'classifier': 'self.classifier_name', 'target_names': 'self.target_names', 'labels': 'labels', 'label_names': '[self.target_names[l] for l in labels]', 'label_proba': 'label_proba'}), '(header=imgmsg.header, classifier=self.classifier_name,\n target_names=self.target_names, labels=labels, label_names=[self.\n target_names[l] for l in labels], label_proba=label_proba)\n', (5671, 5859), False, 'from jsk_recognition_msgs.msg import ClassificationResult\n'), ((6914, 6941), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['cls_score.data'], {}), '(cls_score.data)\n', (6925, 6941), False, 'from chainer import cuda\n'), ((6962, 6989), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['bbox_pred.data'], {}), '(bbox_pred.data)\n', (6973, 6989), False, 'from chainer import cuda\n'), ((7131, 7156), 'rospy.get_param', 'rospy.get_param', (['"""~model"""'], {}), "('~model')\n", (7146, 7156), False, 'import rospy\n'), ((7530, 7546), 'jsk_recognition_utils.chainermodels.VGG_CNN_M_1024', 'VGG_CNN_M_1024', ([], {}), '()\n', (7544, 7546), False, 'from jsk_recognition_utils.chainermodels import VGG_CNN_M_1024\n'), ((7570, 7620), 'os.path.join', 'osp.join', (['data_path', '"""vgg_cnn_m_1024.chainermodel"""'], {}), "(data_path, 'vgg_cnn_m_1024.chainermodel')\n", (7578, 7620), True, 'import os.path as osp\n'), ((2791, 2807), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (2805, 2807), False, 'import rospy\n'), ((3401, 3430), 'rospy.get_param', 'rospy.get_param', (['"""~slop"""', '(0.1)'], {}), "('~slop', 0.1)\n", (3416, 3430), False, 'import rospy\n'), ((3450, 3517), 'message_filters.ApproximateTimeSynchronizer', 'message_filters.ApproximateTimeSynchronizer', (['subs', 'queue_size', 'slop'], {}), '(subs, queue_size, slop)\n', (3493, 3517), False, 'import message_filters\n'), ((3568, 3618), 'message_filters.TimeSynchronizer', 'message_filters.TimeSynchronizer', (['subs', 'queue_size'], {}), '(subs, queue_size)\n', (3600, 3618), False, 'import message_filters\n'), ((4483, 4506), 'numpy.hstack', 'np.hstack', (['(_bbx, _cls)'], {}), '((_bbx, _cls))\n', (4492, 4506), True, 'import numpy as np\n'), ((4526, 4552), 'jsk_recognition_utils.nms.nms', 'nms', (['dets', 'self.nms_thresh'], {}), '(dets, self.nms_thresh)\n', (4529, 4552), False, 'from jsk_recognition_utils.nms import nms\n'), ((6451, 6482), 'chainer.Variable', 'Variable', (['x_data'], {'volatile': '(True)'}), '(x_data, volatile=True)\n', (6459, 6482), False, 'from chainer import Variable\n'), ((6507, 6537), 'chainer.Variable', 'Variable', (['rects'], {'volatile': '(True)'}), '(rects, volatile=True)\n', (6515, 6537), False, 'from chainer import Variable\n'), ((7251, 7262), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7259, 7262), False, 'import itertools, pkg_resources, sys\n'), ((7669, 7684), 'jsk_recognition_utils.chainermodels.VGG16FastRCNN', 'VGG16FastRCNN', ([], {}), '()\n', (7682, 7684), False, 'from jsk_recognition_utils.chainermodels import VGG16FastRCNN\n'), ((7708, 7759), 'os.path.join', 'osp.join', (['data_path', '"""vgg16_fast_rcnn.chainermodel"""'], {}), "(data_path, 'vgg16_fast_rcnn.chainermodel')\n", (7716, 7759), True, 'import os.path as osp\n'), ((7844, 7855), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7852, 7855), False, 'import itertools, pkg_resources, sys\n'), ((192, 233), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""chainer"""'], {}), "('chainer')\n", (222, 233), False, 'import itertools, pkg_resources, sys\n'), ((4611, 4640), 'chainer.cuda.cupy.asnumpy', 'cuda.cupy.asnumpy', (['rects_orig'], {}), '(rects_orig)\n', (4628, 4640), False, 'from chainer import cuda\n'), ((4670, 4711), 'numpy.where', 'np.where', (['(dets[:, -1] >= self.conf_thresh)'], {}), '(dets[:, -1] >= self.conf_thresh)\n', (4678, 4711), True, 'import numpy as np\n'), ((5421, 5468), 'jsk_recognition_msgs.msg.Rect', 'Rect', ([], {'x': 'x1', 'y': 'y1', 'width': '(x2 - x1)', 'height': '(y2 - y1)'}), '(x=x1, y=y1, width=x2 - x1, height=y2 - y1)\n', (5425, 5468), False, 'from jsk_recognition_msgs.msg import Rect, RectArray\n'), ((6666, 6702), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (6686, 6702), False, 'import chainer\n'), ((6723, 6749), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (6747, 6749), False, 'import chainer\n'), ((6771, 6787), 'chainer.Variable', 'Variable', (['x_data'], {}), '(x_data)\n', (6779, 6787), False, 'from chainer import Variable\n'), ((6816, 6831), 'chainer.Variable', 'Variable', (['rects'], {}), '(rects)\n', (6824, 6831), False, 'from chainer import Variable\n'), ((5146, 5156), 'numpy.exp', 'np.exp', (['dw'], {}), '(dw)\n', (5152, 5156), True, 'import numpy as np\n'), ((5191, 5201), 'numpy.exp', 'np.exp', (['dh'], {}), '(dh)\n', (5197, 5201), True, 'import numpy as np\n'), ((6385, 6418), 'distutils.version.LooseVersion', 'LooseVersion', (['chainer.__version__'], {}), '(chainer.__version__)\n', (6397, 6418), False, 'from distutils.version import LooseVersion\n'), ((525, 560), 'pkg_resources.find_distributions', 'pkg_resources.find_distributions', (['_'], {}), '(_)\n', (557, 560), False, 'import itertools, pkg_resources, sys\n')] |
import metpy.calc as mpcalc
from metpy.units import units
import numpy as np
import xarray as xr
import os
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import warnings
warnings.simplefilter('ignore')
# open netCDF4 file with xarray and parse data to CF standard using Metpy
ds = xr.open_dataset('data/isentropic_example.nc').metpy.parse_cf()
data_proj = ds.t.metpy.cartopy_crs
time = ds.time.values
# extract atmospehric variables
temperature = ds.t
lat = temperature.metpy.y
lon = temperature.metpy.x
mixing = ds.q
z = ds.z
u = ds.u
v = ds.v
# Can have different vertical levels for wind and thermodynamic variables
# Find and select the common levels
press = temperature.metpy.vertical
common_levels = np.intersect1d(press, u.metpy.vertical)
temperature = temperature.metpy.sel(vertical=common_levels)
u = u.metpy.sel(vertical=common_levels)
v = v.metpy.sel(vertical=common_levels)
# Get common pressure levels as a data array
press = press.metpy.sel(vertical=common_levels)
# Needed to make numpy broadcasting work between 1D pressure and other 3D arrays
# Use .metpy.unit_array to get numpy array with units rather than xarray DataArray
pressure_for_calc = press.metpy.unit_array[:, None, None]
mixing['units'] = 'dimensionless'
# Interpolate all the data
isen_level = np.array([290, 295, 300, 305, 310]) * units.kelvin
# use Metoy to interpolate data
ret = mpcalc.isentropic_interpolation(isen_level, press, temperature, mixing, u, v)
isen_press, isen_mixing, isen_u, isen_v = ret
# Squeeze the returned arrays
isen_press = isen_press.squeeze()
isen_mixing = isen_mixing.squeeze()
isen_u = isen_u.squeeze()
isen_v = isen_v.squeeze()
# search through the image directory to see if the plot already exists
for isen_level_idx, isen_lvl in enumerate(isen_level):
fname = f'imgs/isentropic/{isen_lvl.m}K_{str(time)[0:13]}.png'
print(fname)
if os.path.isfile(fname):
print('Already have it')
continue
# smoothe the data to get a better snapshot of synoptic conditions
isen_press = mpcalc.smooth_gaussian(isen_press.squeeze(), 9)
isen_u = mpcalc.smooth_gaussian(isen_u.squeeze(), 9)
isen_v = mpcalc.smooth_gaussian(isen_v.squeeze(), 9)
# use .values because we don't care about using DataArray
dx, dy = mpcalc.lat_lon_grid_deltas(lon.values, lat.values)
lift = -mpcalc.advection(isen_press[isen_level_idx], [isen_u[isen_level_idx],
isen_v[isen_level_idx]], [dx, dy], dim_order='yx')
# plot isentropic ascent and pressure levels
fig = plt.figure(figsize=(14, 8), dpi=200)
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal(central_longitude=-100))
fig.patch.set_facecolor('white')
ax.add_feature(cfeature.COASTLINE)
levels = np.arange(300, 1000, 25)
cntr = ax.contour(lon, lat, isen_press[isen_level_idx], transform=data_proj, colors='black', levels=levels)
cntr.clabel(fmt='%d')
# plot isentropic wind in knots
lon_slice = slice(None, None, 5)
lat_slice = slice(None, None, 5)
ax.barbs(lon[lon_slice], lat[lat_slice],
isen_u[isen_level_idx, lon_slice, lat_slice].to('knots').magnitude,
isen_v[isen_level_idx, lon_slice, lat_slice].to('knots').magnitude,
transform=data_proj, zorder=2, length=5, regrid_shape=50)
# plot isentropic vertical motion in microbar/s
levels = np.arange(-6, 7)
cs = ax.contourf(lon, lat, lift.to('microbar/s'), levels=levels, cmap='RdBu',
transform=data_proj, extend='both')
plt.colorbar(cs)
# add US/State boundaries using Cartopy
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS, linewidth=2)
ax.add_feature(cfeature.STATES, linestyle=':')
ax.set_extent((-120, -70, 25, 55), crs=data_proj)
# plt.show()
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
| [
"cartopy.crs.LambertConformal",
"warnings.simplefilter",
"matplotlib.pyplot.close",
"xarray.open_dataset",
"matplotlib.pyplot.colorbar",
"metpy.calc.isentropic_interpolation",
"os.path.isfile",
"metpy.calc.lat_lon_grid_deltas",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.arange",
"metpy.... | [((219, 250), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (240, 250), False, 'import warnings\n'), ((758, 797), 'numpy.intersect1d', 'np.intersect1d', (['press', 'u.metpy.vertical'], {}), '(press, u.metpy.vertical)\n', (772, 797), True, 'import numpy as np\n'), ((1424, 1501), 'metpy.calc.isentropic_interpolation', 'mpcalc.isentropic_interpolation', (['isen_level', 'press', 'temperature', 'mixing', 'u', 'v'], {}), '(isen_level, press, temperature, mixing, u, v)\n', (1455, 1501), True, 'import metpy.calc as mpcalc\n'), ((1333, 1368), 'numpy.array', 'np.array', (['[290, 295, 300, 305, 310]'], {}), '([290, 295, 300, 305, 310])\n', (1341, 1368), True, 'import numpy as np\n'), ((1920, 1941), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (1934, 1941), False, 'import os\n'), ((2321, 2371), 'metpy.calc.lat_lon_grid_deltas', 'mpcalc.lat_lon_grid_deltas', (['lon.values', 'lat.values'], {}), '(lon.values, lat.values)\n', (2347, 2371), True, 'import metpy.calc as mpcalc\n'), ((2624, 2660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)', 'dpi': '(200)'}), '(figsize=(14, 8), dpi=200)\n', (2634, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2843, 2867), 'numpy.arange', 'np.arange', (['(300)', '(1000)', '(25)'], {}), '(300, 1000, 25)\n', (2852, 2867), True, 'import numpy as np\n'), ((3461, 3477), 'numpy.arange', 'np.arange', (['(-6)', '(7)'], {}), '(-6, 7)\n', (3470, 3477), True, 'import numpy as np\n'), ((3621, 3637), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs'], {}), '(cs)\n', (3633, 3637), True, 'import matplotlib.pyplot as plt\n'), ((3972, 4011), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (3983, 4011), True, 'import matplotlib.pyplot as plt\n'), ((4016, 4030), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4025, 4030), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2508), 'metpy.calc.advection', 'mpcalc.advection', (['isen_press[isen_level_idx]', '[isen_u[isen_level_idx], isen_v[isen_level_idx]]', '[dx, dy]'], {'dim_order': '"""yx"""'}), "(isen_press[isen_level_idx], [isen_u[isen_level_idx],\n isen_v[isen_level_idx]], [dx, dy], dim_order='yx')\n", (2400, 2508), True, 'import metpy.calc as mpcalc\n'), ((331, 376), 'xarray.open_dataset', 'xr.open_dataset', (['"""data/isentropic_example.nc"""'], {}), "('data/isentropic_example.nc')\n", (346, 376), True, 'import xarray as xr\n'), ((2706, 2751), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {'central_longitude': '(-100)'}), '(central_longitude=-100)\n', (2727, 2751), True, 'import cartopy.crs as ccrs\n')] |
import sys
import os
HOME=os.environ['HOME']
sys.path.insert(1,HOME+'/github/StreamingSVM')
from operations import LoadLibsvm
import numpy as np
training_filepath = sys.argv[1]
n_features = int(sys.argv[2])
training_loader = LoadLibsvm.LoadLibSVM(filename=training_filepath, n_features=n_features)
x_training, y_training = training_loader.load_all_data()
totalpoints = len(x_training) * n_features
zeropoints = totalpoints - np.count_nonzero(x_training)
print(float(zeropoints)/float(totalpoints)*100.0)
| [
"operations.LoadLibsvm.LoadLibSVM",
"numpy.count_nonzero",
"sys.path.insert"
] | [((45, 94), 'sys.path.insert', 'sys.path.insert', (['(1)', "(HOME + '/github/StreamingSVM')"], {}), "(1, HOME + '/github/StreamingSVM')\n", (60, 94), False, 'import sys\n'), ((227, 299), 'operations.LoadLibsvm.LoadLibSVM', 'LoadLibsvm.LoadLibSVM', ([], {'filename': 'training_filepath', 'n_features': 'n_features'}), '(filename=training_filepath, n_features=n_features)\n', (248, 299), False, 'from operations import LoadLibsvm\n'), ((427, 455), 'numpy.count_nonzero', 'np.count_nonzero', (['x_training'], {}), '(x_training)\n', (443, 455), True, 'import numpy as np\n')] |
from numba import i4
from numba.core.types import string
from numba.experimental import jitclass
from numpy import inf, zeros
from numpy.random import randint
# region @jitclass
@jitclass({
'nameAlg': string,
'ts': i4,
'row': i4,
'col': i4,
'param': i4[:],
'current': i4[:],
'total': i4[:],
})
# endregion
class NormalCore:
def __init__(self, row: int, col: int):
self.nameAlg = 'MIDAS'
self.ts: int = 1
self.row = row
self.col = col
self.param = randint(1, 1 << 16, 2 * row).astype(i4)
self.current = zeros(row * col, i4)
self.total = zeros(row * col, i4)
@staticmethod
def ChiSquaredTest(a: float, s: float, t: float) -> float:
return 0 if s == 0 or t - 1 == 0 else pow((a - s / t) * t, 2) / (s * (t - 1))
def Call(self, src: int, dst: int, ts: int) -> float:
if self.ts < ts:
self.current *= 0
self.ts = ts
minCurrent = minTotal = inf
for i in range(self.row):
i = i * self.col + ((src + 347 * dst) * self.param[i] + self.param[i + self.row]) % self.col
self.current[i] += 1
self.total[i] += 1
minCurrent = min(minCurrent, self.current[i])
minTotal = min(minTotal, self.total[i])
return self.ChiSquaredTest(minCurrent, minTotal, ts)
| [
"numpy.zeros",
"numpy.random.randint",
"numba.experimental.jitclass"
] | [((180, 295), 'numba.experimental.jitclass', 'jitclass', (["{'nameAlg': string, 'ts': i4, 'row': i4, 'col': i4, 'param': i4[:],\n 'current': i4[:], 'total': i4[:]}"], {}), "({'nameAlg': string, 'ts': i4, 'row': i4, 'col': i4, 'param': i4[:],\n 'current': i4[:], 'total': i4[:]})\n", (188, 295), False, 'from numba.experimental import jitclass\n'), ((523, 543), 'numpy.zeros', 'zeros', (['(row * col)', 'i4'], {}), '(row * col, i4)\n', (528, 543), False, 'from numpy import inf, zeros\n'), ((559, 579), 'numpy.zeros', 'zeros', (['(row * col)', 'i4'], {}), '(row * col, i4)\n', (564, 579), False, 'from numpy import inf, zeros\n'), ((466, 494), 'numpy.random.randint', 'randint', (['(1)', '(1 << 16)', '(2 * row)'], {}), '(1, 1 << 16, 2 * row)\n', (473, 494), False, 'from numpy.random import randint\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.