code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
def binary_classification_metrics(prediction, ground_truth):
'''
Computes metrics for binary classification
Arguments:
prediction, np array of bool (num_samples) - model predictions
ground_truth, np array of bool (num_samples) - true labels
Returns:
precision, recall, f1, accuracy - classification metrics
'''
precision = 0
recall = 0
accuracy = 0
f1 = 0
# Implement metrics
# Some helpful links:
# https://en.wikipedia.org/wiki/Precision_and_recall
# https://en.wikipedia.org/wiki/F1_score
true_positives, true_negatives, false_negatives, false_positives = 0, 0, 0, 0
for i in range(len(prediction)):
if prediction[i] and ground_truth[i]:
true_positives += 1
if not prediction[i] and not ground_truth[i]:
true_negatives += 1
if not prediction[i] and ground_truth[i]:
false_negatives += 1
if prediction[i] and not ground_truth[i]:
false_positives += 1
precision = true_positives/(true_positives + false_positives)
recall = true_positives/(true_positives + false_negatives)
f1 = 2/(recall**-1 + precision**-1)
accuracy = np.count_nonzero(prediction == ground_truth)/len(prediction)
return precision, recall, f1, accuracy
def multiclass_accuracy(prediction, ground_truth):
'''
Computes metrics for multiclass classification
Arguments:
prediction, np array of int (num_samples) - model predictions
ground_truth, np array of int (num_samples) - true labels
Returns:
accuracy - ratio of accurate predictions to total samples
'''
# Implement computing accuracy
true_positives = np.count_nonzero(prediction == ground_truth)
accuracy = true_positives/len(prediction)
return accuracy
| [
"numpy.count_nonzero"
] | [((1723, 1767), 'numpy.count_nonzero', 'np.count_nonzero', (['(prediction == ground_truth)'], {}), '(prediction == ground_truth)\n', (1739, 1767), True, 'import numpy as np\n'), ((1222, 1266), 'numpy.count_nonzero', 'np.count_nonzero', (['(prediction == ground_truth)'], {}), '(prediction == ground_truth)\n', (1238, 1266), True, 'import numpy as np\n')] |
import numpy as np
def GetUserDataFunc(news_title,train_user_id_sample,train_user,train_sess,train_label,train_user_id):
def _get_user_data(uid):
click = []
sample = []
label = []
for sid in train_user_id_sample[uid]:
click.append(train_user['click'][train_user_id[sid]])
sample.append(train_sess[sid])
label.append(train_label[sid])
click = np.array(click)
sample = np.array(sample)
label = np.array(label)
click = news_title[click]
sample = news_title[sample]
return click,sample,label
return _get_user_data
def add_noise(weights,lambd):
for i in range(len(weights)):
weights[i] += np.random.laplace(scale = lambd,size=weights[i].shape)
return weights
def fed_single_update(model,doc_encoder,user_encoder,num,lambd,get_user_data,train_uid_table):
random_index = np.random.permutation(len(train_uid_table))[:num]
all_news_weights = []
all_user_weights = []
old_news_weight = doc_encoder.get_weights()
old_user_weight = user_encoder.get_weights()
sample_nums = []
loss = []
for uinx in random_index:
doc_encoder.set_weights(old_news_weight)
user_encoder.set_weights(old_user_weight)
uid = train_uid_table[uinx]
click,sample,label = get_user_data(uid)
#print(label)
g = model.fit([sample,click],label,batch_size = label.shape[0],verbose=False)
loss.append(g.history['loss'][0])
news_weight = doc_encoder.get_weights()
user_weight = user_encoder.get_weights()
if lambd>0:
news_weight = add_noise(news_weight,lambd)
user_weight = add_noise(user_weight,lambd)
#noise =
#weight += noise
all_news_weights.append(news_weight)
all_user_weights.append(user_weight)
sample_nums.append(label.shape[0])
sample_nums = np.array(sample_nums)
sample_nums = sample_nums/sample_nums.sum()
doc_weights = [np.average(weights, axis=0,weights=sample_nums) for weights in zip(*all_news_weights)]
user_weights = [np.average(weights, axis=0,weights=sample_nums) for weights in zip(*all_user_weights)]
doc_encoder.set_weights(doc_weights)
user_encoder.set_weights(user_weights)
loss = np.array(loss).mean()
#print('average loss',loss)
return loss | [
"numpy.array",
"numpy.random.laplace",
"numpy.average"
] | [((1960, 1981), 'numpy.array', 'np.array', (['sample_nums'], {}), '(sample_nums)\n', (1968, 1981), True, 'import numpy as np\n'), ((423, 438), 'numpy.array', 'np.array', (['click'], {}), '(click)\n', (431, 438), True, 'import numpy as np\n'), ((456, 472), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (464, 472), True, 'import numpy as np\n'), ((489, 504), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (497, 504), True, 'import numpy as np\n'), ((731, 784), 'numpy.random.laplace', 'np.random.laplace', ([], {'scale': 'lambd', 'size': 'weights[i].shape'}), '(scale=lambd, size=weights[i].shape)\n', (748, 784), True, 'import numpy as np\n'), ((2054, 2102), 'numpy.average', 'np.average', (['weights'], {'axis': '(0)', 'weights': 'sample_nums'}), '(weights, axis=0, weights=sample_nums)\n', (2064, 2102), True, 'import numpy as np\n'), ((2161, 2209), 'numpy.average', 'np.average', (['weights'], {'axis': '(0)', 'weights': 'sample_nums'}), '(weights, axis=0, weights=sample_nums)\n', (2171, 2209), True, 'import numpy as np\n'), ((2348, 2362), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (2356, 2362), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 4 18:14:29 2016
@author: becker
"""
import numpy as np
import scipy.linalg as linalg
from simfempy import solvers
class RaviartThomas(solvers.solver.Application):
"""
Fct de base de RT0 = sigma * 0.5 * |S|/|K| (x-x_N)
"""
def __init__(self, **kwargs):
super().__init__(self, **kwargs)
def setMesh(self, mesh):
self.mesh = mesh
self.pointsf = self.mesh.points[self.mesh.faces].mean(axis=1)
# meshes.plotmesh.plotmeshWithNumbering(self.mesh, localnumbering=True)
def solve(self):
return self.solveLinear()
def rt(self, ic, x):
xn, yn, zn, dim = self.mesh.points[:, 0], self.mesh.points[:, 1], self.mesh.points[:, 2], self.mesh.dimension
base = np.zeros((dim+1,dim), dtype=np.float64)
for ii in range(dim+1):
ie = self.mesh.facesOfCells[ic, ii]
iv = self.mesh.simplices[ic, ii]
scale = self.mesh.sigma[ic,ii] * linalg.norm( self.mesh.normals[ie]) / self.mesh.dV[ic]/dim
for jj in range(dim):
base[ii, jj] = scale * (x[jj] - self.mesh.points[iv, jj])
# base[ii] = scale * (x - self.mesh.points[iv])[::dim]
return base
def rteval(self, ic, x, y, vc):
v = np.zeros(2, dtype=np.float64)
dv = np.zeros( (2,2), dtype=np.float64)
for ii in range(3):
ie = self.mesh.facesOfCells[ic, ii]
iv = self.mesh.triangles[ic, ii]
sigma = self.mesh.sigma[ic, ii]
scale = 0.5 * linalg.norm( self.mesh.normals[ie]) / self.mesh.dV[ic]
v[0] += sigma * scale * (x - self.mesh.x[iv]) * vc[ii]
v[1] += sigma * scale * (y - self.mesh.y[iv]) * vc[ii]
dv[0,0] += sigma * scale * vc[ii]
dv[0,1] += 0.0
dv[1,0] += 0.0
dv[1,1] += sigma * scale * vc[ii]
return (v, dv)
def rtpgrad(self, ic, x, y):
base = np.zeros((3,2), dtype=np.float64)
basegrad = np.zeros((3), dtype=np.float64)
for ii in range(3):
ie = self.mesh.facesOfCells[ic, ii]
iv = self.mesh.triangles[ic, ii]
sigma = self.mesh.sigma[ic,ii]
scale = 0.5 * linalg.norm( self.mesh.normals[ie]) / self.mesh.dV[ic]
base[ii, 0] = sigma * scale * (x - self.mesh.x[iv])
base[ii, 1] = sigma * scale * (y - self.mesh.y[iv])
basegrad[ii] = sigma * scale
return (base, basegrad)
def computeVCells(self, u):
ncells, nfaces, nnodes, dim = self.mesh.ncells, self.mesh.nfaces, self.mesh.nnodes, self.mesh.dimension
x, y, z = self.mesh.points.T
assert u.shape[0] == nfaces
v = np.zeros((dim,ncells))
for ic in range(ncells):
rt = self.rt(ic, self.mesh.pointsc[ic])
v[:,ic] = np.dot( rt.T , u[ self.mesh.facesOfCells[ic]] )
return v
def computeVEdges(self, u):
nfaces = self.mesh.nfaces
ncells = self.mesh.ncells
xf, yf, zf = self.pointsf[:, 0], self.pointsf[:, 1], self.pointsf[:, 2]
assert u.shape[0] == nfaces
vex = np.zeros(nfaces)
vey = np.zeros(nfaces)
for ie in range(nfaces):
xe, ye = xf[ie], yf[ie]
ic = self.mesh.cellsOfFaces[ie]
ic = ic[np.where(ic!=-1)]
if len(ic)==1:
rt0 = self.rt(ic[0], xe, ye)
vex[ie] = np.dot(rt0[:, 0], u[ self.mesh.facesOfCells[ic[0], :]])
vey[ie] = np.dot(rt0[:, 1], u[ self.mesh.facesOfCells[ic[0], :]])
else:
rt0 = self.rt(ic[0], xe, ye)
vx0 = np.dot( rt0[:, 0] , u[ self.mesh.facesOfCells[ic[0], :]] )
vy0 = np.dot( rt0[:, 1] , u[ self.mesh.facesOfCells[ic[0], :]])
rt1 = self.rt(ic[1], xe, ye)
vx1 = np.dot(rt1[:, 0], u[ self.mesh.facesOfCells[ic[1], :]])
vy1 = np.dot(rt1[:, 1], u[ self.mesh.facesOfCells[ic[1], :]])
vex[ie] = 0.5*(vx0+vx1)
vey[ie] = 0.5 * (vy0 + vy1)
vx = np.zeros(ncells)
vy = np.zeros(ncells)
for ic in range(ncells):
for ii in range(3):
ie = self.mesh.facesOfCells[ic,ii]
vx[ic] += vex[ie]/3.0
vy[ic] += vey[ie] / 3.0
return vx, vy
def computeVNodes(self, u):
nfaces = self.mesh.nfaces
nnodes = self.mesh.nnodes
x = self.mesh.x
y = self.mesh.y
assert u.shape[0] == nfaces
v0 = np.zeros(nnodes)
v1 = np.zeros(nnodes)
for iv in range( self.mesh.nnodes):
cv = np.array((x[iv],y[iv]))
patches = self.mesh.patches_bnodes[iv]
patches2 = patches[np.where(patches[:,5]!=-1)[0]]
npatch = patches2.shape[0]
dist = np.zeros(npatch)
for i,patch in enumerate(patches2):
ie = patch[5]
ce = np.array((self.xedge[ie],self.yedge[ie])) - cv
dist[i] = linalg.norm(ce)
dist /= np.sum(dist)
for i,patch in enumerate(patches2):
ie = patch[5]
ud = u[ie]*dist[i]/linalg.norm( self.mesh.normals[ie])
# ud = u[ie]/linalg.norm( self.mesh.normals[ie])/float(npatch)
v0[iv] += ud * self.mesh.normals[ie][0]
v1[iv] += ud * self.mesh.normals[ie][1]
return v0,v1
# ------------------------------------- #
if __name__ == '__main__':
print("so far no test")
| [
"numpy.where",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"scipy.linalg.norm"
] | [((782, 824), 'numpy.zeros', 'np.zeros', (['(dim + 1, dim)'], {'dtype': 'np.float64'}), '((dim + 1, dim), dtype=np.float64)\n', (790, 824), True, 'import numpy as np\n'), ((1300, 1329), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float64'}), '(2, dtype=np.float64)\n', (1308, 1329), True, 'import numpy as np\n'), ((1343, 1377), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.float64'}), '((2, 2), dtype=np.float64)\n', (1351, 1377), True, 'import numpy as np\n'), ((1984, 2018), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float64'}), '((3, 2), dtype=np.float64)\n', (1992, 2018), True, 'import numpy as np\n'), ((2037, 2066), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (2045, 2066), True, 'import numpy as np\n'), ((2753, 2776), 'numpy.zeros', 'np.zeros', (['(dim, ncells)'], {}), '((dim, ncells))\n', (2761, 2776), True, 'import numpy as np\n'), ((3182, 3198), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (3190, 3198), True, 'import numpy as np\n'), ((3213, 3229), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (3221, 3229), True, 'import numpy as np\n'), ((4140, 4156), 'numpy.zeros', 'np.zeros', (['ncells'], {}), '(ncells)\n', (4148, 4156), True, 'import numpy as np\n'), ((4170, 4186), 'numpy.zeros', 'np.zeros', (['ncells'], {}), '(ncells)\n', (4178, 4186), True, 'import numpy as np\n'), ((4606, 4622), 'numpy.zeros', 'np.zeros', (['nnodes'], {}), '(nnodes)\n', (4614, 4622), True, 'import numpy as np\n'), ((4636, 4652), 'numpy.zeros', 'np.zeros', (['nnodes'], {}), '(nnodes)\n', (4644, 4652), True, 'import numpy as np\n'), ((2884, 2927), 'numpy.dot', 'np.dot', (['rt.T', 'u[self.mesh.facesOfCells[ic]]'], {}), '(rt.T, u[self.mesh.facesOfCells[ic]])\n', (2890, 2927), True, 'import numpy as np\n'), ((4714, 4738), 'numpy.array', 'np.array', (['(x[iv], y[iv])'], {}), '((x[iv], y[iv]))\n', (4722, 4738), True, 'import numpy as np\n'), ((4910, 4926), 'numpy.zeros', 'np.zeros', (['npatch'], {}), '(npatch)\n', (4918, 4926), True, 'import numpy as np\n'), ((5135, 5147), 'numpy.sum', 'np.sum', (['dist'], {}), '(dist)\n', (5141, 5147), True, 'import numpy as np\n'), ((3364, 3382), 'numpy.where', 'np.where', (['(ic != -1)'], {}), '(ic != -1)\n', (3372, 3382), True, 'import numpy as np\n'), ((3480, 3534), 'numpy.dot', 'np.dot', (['rt0[:, 0]', 'u[self.mesh.facesOfCells[ic[0], :]]'], {}), '(rt0[:, 0], u[self.mesh.facesOfCells[ic[0], :]])\n', (3486, 3534), True, 'import numpy as np\n'), ((3562, 3616), 'numpy.dot', 'np.dot', (['rt0[:, 1]', 'u[self.mesh.facesOfCells[ic[0], :]]'], {}), '(rt0[:, 1], u[self.mesh.facesOfCells[ic[0], :]])\n', (3568, 3616), True, 'import numpy as np\n'), ((3703, 3757), 'numpy.dot', 'np.dot', (['rt0[:, 0]', 'u[self.mesh.facesOfCells[ic[0], :]]'], {}), '(rt0[:, 0], u[self.mesh.facesOfCells[ic[0], :]])\n', (3709, 3757), True, 'import numpy as np\n'), ((3784, 3838), 'numpy.dot', 'np.dot', (['rt0[:, 1]', 'u[self.mesh.facesOfCells[ic[0], :]]'], {}), '(rt0[:, 1], u[self.mesh.facesOfCells[ic[0], :]])\n', (3790, 3838), True, 'import numpy as np\n'), ((3909, 3963), 'numpy.dot', 'np.dot', (['rt1[:, 0]', 'u[self.mesh.facesOfCells[ic[1], :]]'], {}), '(rt1[:, 0], u[self.mesh.facesOfCells[ic[1], :]])\n', (3915, 3963), True, 'import numpy as np\n'), ((3987, 4041), 'numpy.dot', 'np.dot', (['rt1[:, 1]', 'u[self.mesh.facesOfCells[ic[1], :]]'], {}), '(rt1[:, 1], u[self.mesh.facesOfCells[ic[1], :]])\n', (3993, 4041), True, 'import numpy as np\n'), ((5099, 5114), 'scipy.linalg.norm', 'linalg.norm', (['ce'], {}), '(ce)\n', (5110, 5114), True, 'import scipy.linalg as linalg\n'), ((1571, 1605), 'scipy.linalg.norm', 'linalg.norm', (['self.mesh.normals[ie]'], {}), '(self.mesh.normals[ie])\n', (1582, 1605), True, 'import scipy.linalg as linalg\n'), ((2261, 2295), 'scipy.linalg.norm', 'linalg.norm', (['self.mesh.normals[ie]'], {}), '(self.mesh.normals[ie])\n', (2272, 2295), True, 'import scipy.linalg as linalg\n'), ((4821, 4850), 'numpy.where', 'np.where', (['(patches[:, 5] != -1)'], {}), '(patches[:, 5] != -1)\n', (4829, 4850), True, 'import numpy as np\n'), ((5026, 5068), 'numpy.array', 'np.array', (['(self.xedge[ie], self.yedge[ie])'], {}), '((self.xedge[ie], self.yedge[ie]))\n', (5034, 5068), True, 'import numpy as np\n'), ((5261, 5295), 'scipy.linalg.norm', 'linalg.norm', (['self.mesh.normals[ie]'], {}), '(self.mesh.normals[ie])\n', (5272, 5295), True, 'import scipy.linalg as linalg\n'), ((994, 1028), 'scipy.linalg.norm', 'linalg.norm', (['self.mesh.normals[ie]'], {}), '(self.mesh.normals[ie])\n', (1005, 1028), True, 'import scipy.linalg as linalg\n')] |
import numpy as np
import scipy.io as sio
import h5py
# import hdf5storage
import os
import csv
import re
# def saveMatv7(fname, data, version=None):
# path = os.path.dirname(fname)
# name = os.path.basename(fname)
# hdf5storage.write(data, path, fname, matlab_compatible=True)
def load_data(filename, delimiter=r'[ ,\t]+'):
with open(filename, 'r') as f:
lines = f.readlines()
lines = [re.split(delimiter, line.strip()) for line in lines]
return np.array(lines, dtype=np.object)
def load_mat(filename):
try:
return sio.loadmat(filename)
except:
dataset = {}
with h5py.File(filename, 'r') as f:
for k in f.keys():
dataset[k] = np.array(f[k])
return dataset
| [
"numpy.array",
"scipy.io.loadmat",
"h5py.File"
] | [((459, 491), 'numpy.array', 'np.array', (['lines'], {'dtype': 'np.object'}), '(lines, dtype=np.object)\n', (467, 491), True, 'import numpy as np\n'), ((532, 553), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (543, 553), True, 'import scipy.io as sio\n'), ((585, 609), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (594, 609), False, 'import h5py\n'), ((655, 669), 'numpy.array', 'np.array', (['f[k]'], {}), '(f[k])\n', (663, 669), True, 'import numpy as np\n')] |
import os
from multiprocessing import Pool
from dataclasses import dataclass
from functools import partial
import json
import numpy as np
import argparse
@dataclass
class Params:
num_general_tags: int
num_characters: int
general_tag_ids: dict[str, int]
character_ids: dict[str, int]
character_implications: dict[str, str]
solo_heuristic: bool
@dataclass
class CountData:
num_posts: int
gc_count: np.ndarray
general_count: np.ndarray
character_count: np.ndarray
def count(params: Params, filename: str) -> CountData:
num_posts = 0
general_count = np.zeros(params.num_general_tags, dtype=np.uint32)
character_count = np.zeros(params.num_characters, dtype=np.uint32)
gc_count = np.zeros(
(params.num_general_tags, params.num_characters), dtype=np.uint32)
for line in open(filename, 'r', encoding='utf-8'):
tags = json.loads(line)['tags']
if params.solo_heuristic:
if params.character_implications:
num_characters = len(frozenset(
params.character_implications.get(tag['name'], tag['name'])
for tag in tags if tag['category'] == '4'
))
else:
num_characters = sum(
1 for tag in tags if tag['category'] == '4')
if num_characters > 1:
continue
general_tags = list(frozenset(
params.general_tag_ids[tag['name']] for tag in tags
if tag['category'] == '0' and tag['name'] in params.general_tag_ids
))
characters = list(frozenset(
params.character_ids[tag['name']] for tag in tags
if tag['category'] == '4' and tag['name'] in params.character_ids
))
num_posts += 1
general_count[general_tags] += 1
character_count[characters] += 1
for c in characters:
gc_count[general_tags, c] += 1
return CountData(
num_posts=num_posts,
gc_count=gc_count,
general_count=general_count,
character_count=character_count
)
def main(args: argparse.Namespace):
if args.mapping:
mappings = json.load(open(args.mapping, 'r', encoding='utf-8'))
else:
mappings = None
general_tags = open(args.general, 'r',
encoding='utf-8').read().splitlines()
characters = open(args.character, 'r',
encoding='utf-8').read().splitlines()
num_general_tags = len(general_tags)
num_characters = len(characters)
general_tag_ids = {x: i for (i, x) in enumerate(general_tags)}
if mappings:
general_tag_mappings = mappings['general']
for mapping in general_tag_mappings.values():
for from_tag, to_tag in mapping.items():
if (not from_tag in general_tag_ids) and (to_tag in general_tag_ids):
general_tag_ids[from_tag] = general_tag_ids[to_tag]
character_ids = {x: i for (i, x) in enumerate(characters)}
if mappings:
character_mappings = mappings['character']
character_implications = character_mappings['implications']
for from_tag, to_tag in character_mappings['aliases'].items():
if to_tag in character_ids:
character_ids[from_tag] = character_ids[to_tag]
else:
character_implications = None
params = Params(num_general_tags=num_general_tags,
num_characters=num_characters,
general_tag_ids=general_tag_ids,
character_ids=character_ids,
character_implications=character_implications,
solo_heuristic=args.solo_heuristic)
count_with_params = partial(count, params)
num_posts = 0
general_count = np.zeros(num_general_tags, dtype=np.uint32)
character_count = np.zeros(num_characters, dtype=np.uint32)
gc_count = np.zeros((num_general_tags, num_characters), dtype=np.uint32)
filenames = (os.path.join(args.metadata_dir, filename)
for filename in os.listdir(args.metadata_dir))
if args.processes == 1:
results = map(count_with_params, filenames)
else:
pool = Pool(args.processes)
results = pool.map(count_with_params, filenames)
for result in results:
num_posts += result.num_posts
general_count += result.general_count
character_count += result.character_count
gc_count += result.gc_count
freq_c = (gc_count + args.smoothing) / \
(character_count + 2 * args.smoothing)
freq_nc = (general_count[:, None] - gc_count + args.smoothing) / \
(num_posts - character_count + 2 * args.smoothing)
a = np.log(freq_c) + np.log(1 - freq_nc) - \
np.log(freq_nc) - np.log(1 - freq_c)
b = np.sum(np.log(1 - freq_c) - np.log(1 - freq_nc), axis=0)
if args.calibration_heuristic:
# A heuristic for compensating overconfident score of naive Bayes classifier
# because of its assumption that features are independent.
# This is a totally ad-hoc solution, but since we are mainly interested in
# the ranking of tags and this heuristic modifies only the scale of scores,
# we are OK with it.
mean_general_count = np.sum(general_count) / num_posts
a /= mean_general_count
b /= mean_general_count
a = a.astype(np.float32)
b = b.astype(np.float32)
np.savez_compressed(args.output, a=a, b=b)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('metadata_dir', metavar='metadata-dir')
parser.add_argument('-g', '--general', required=True,
help='File containing list of general tags')
parser.add_argument('-c', '--character', required=True,
help='File containing list of characters')
parser.add_argument('-m', '--mapping',
help='File containing tag mappings')
parser.add_argument('-s', '--smoothing', type=float, default=0.1,
help='Laplace (additive) smoothing parameter')
parser.add_argument('--solo-heuristic',
action=argparse.BooleanOptionalAction, default=True,
help='Use only posts tagged with single character')
parser.add_argument('--calibration-heuristic',
action=argparse.BooleanOptionalAction, default=True,
help='Calibrate scores')
parser.add_argument('-p', '--processes', type=int, default=1)
parser.add_argument('-o', '--output', required=True)
args = parser.parse_args()
main(args)
| [
"json.loads",
"os.listdir",
"argparse.ArgumentParser",
"numpy.log",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool",
"numpy.savez_compressed"
] | [((600, 650), 'numpy.zeros', 'np.zeros', (['params.num_general_tags'], {'dtype': 'np.uint32'}), '(params.num_general_tags, dtype=np.uint32)\n', (608, 650), True, 'import numpy as np\n'), ((673, 721), 'numpy.zeros', 'np.zeros', (['params.num_characters'], {'dtype': 'np.uint32'}), '(params.num_characters, dtype=np.uint32)\n', (681, 721), True, 'import numpy as np\n'), ((737, 812), 'numpy.zeros', 'np.zeros', (['(params.num_general_tags, params.num_characters)'], {'dtype': 'np.uint32'}), '((params.num_general_tags, params.num_characters), dtype=np.uint32)\n', (745, 812), True, 'import numpy as np\n'), ((3744, 3766), 'functools.partial', 'partial', (['count', 'params'], {}), '(count, params)\n', (3751, 3766), False, 'from functools import partial\n'), ((3806, 3849), 'numpy.zeros', 'np.zeros', (['num_general_tags'], {'dtype': 'np.uint32'}), '(num_general_tags, dtype=np.uint32)\n', (3814, 3849), True, 'import numpy as np\n'), ((3872, 3913), 'numpy.zeros', 'np.zeros', (['num_characters'], {'dtype': 'np.uint32'}), '(num_characters, dtype=np.uint32)\n', (3880, 3913), True, 'import numpy as np\n'), ((3929, 3990), 'numpy.zeros', 'np.zeros', (['(num_general_tags, num_characters)'], {'dtype': 'np.uint32'}), '((num_general_tags, num_characters), dtype=np.uint32)\n', (3937, 3990), True, 'import numpy as np\n'), ((5456, 5498), 'numpy.savez_compressed', 'np.savez_compressed', (['args.output'], {'a': 'a', 'b': 'b'}), '(args.output, a=a, b=b)\n', (5475, 5498), True, 'import numpy as np\n'), ((5541, 5566), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5564, 5566), False, 'import argparse\n'), ((4009, 4050), 'os.path.join', 'os.path.join', (['args.metadata_dir', 'filename'], {}), '(args.metadata_dir, filename)\n', (4021, 4050), False, 'import os\n'), ((4221, 4241), 'multiprocessing.Pool', 'Pool', (['args.processes'], {}), '(args.processes)\n', (4225, 4241), False, 'from multiprocessing import Pool\n'), ((4796, 4814), 'numpy.log', 'np.log', (['(1 - freq_c)'], {}), '(1 - freq_c)\n', (4802, 4814), True, 'import numpy as np\n'), ((893, 909), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (903, 909), False, 'import json\n'), ((4084, 4113), 'os.listdir', 'os.listdir', (['args.metadata_dir'], {}), '(args.metadata_dir)\n', (4094, 4113), False, 'import os\n'), ((4778, 4793), 'numpy.log', 'np.log', (['freq_nc'], {}), '(freq_nc)\n', (4784, 4793), True, 'import numpy as np\n'), ((4830, 4848), 'numpy.log', 'np.log', (['(1 - freq_c)'], {}), '(1 - freq_c)\n', (4836, 4848), True, 'import numpy as np\n'), ((4851, 4870), 'numpy.log', 'np.log', (['(1 - freq_nc)'], {}), '(1 - freq_nc)\n', (4857, 4870), True, 'import numpy as np\n'), ((5294, 5315), 'numpy.sum', 'np.sum', (['general_count'], {}), '(general_count)\n', (5300, 5315), True, 'import numpy as np\n'), ((4729, 4743), 'numpy.log', 'np.log', (['freq_c'], {}), '(freq_c)\n', (4735, 4743), True, 'import numpy as np\n'), ((4746, 4765), 'numpy.log', 'np.log', (['(1 - freq_nc)'], {}), '(1 - freq_nc)\n', (4752, 4765), True, 'import numpy as np\n')] |
###Creating the model
import numpy as np
# Add do_mpc to path. This is not necessary if it was installed via pip.
import sys
sys.path.append('../../')
# Import do_mpc package:
import do_mpc
model_type = 'continuous' # either 'discrete' or 'continuous'
##Model variables
model = do_mpc.model.Model(model_type)
#phi_1 = model.set_variable(var_type='_x', var_name='phi_1', shape=(1,1))
#phi_2 = model.set_variable(var_type='_x', var_name='phi_2', shape=(1,1))
#phi_3 = model.set_variable(var_type='_x', var_name='phi_3', shape=(1,1))
PV_FC_25 = model.set_variable(var_type='_x', var_name='PV_FC_25', shape=(1,1))
L_max = model.set_variable(var_type='_x', var_name='L_max', shape=(1,1))
L_min = model.set_variable(var_type='_x', var_name='L_min', shape=(1,1))
PV_MV_25 = model.set_variable(var_type='_x', var_name='MV_FC_25', shape=(1,1))
PV_FC_10 = model.set_variable(var_type='_x', var_name='PV_FC_10', shape=(1,1))
# Variables can also be vectors:
dphi = model.set_variable(var_type='_x', var_name='dphi', shape=(3,1))
# Two states for the desired (set) motor position:
phi_m_1_set = model.set_variable(var_type='_u', var_name='phi_m_1_set')
phi_m_2_set = model.set_variable(var_type='_u', var_name='phi_m_2_set')
# Two additional states for the true motor position:
phi_1_m = model.set_variable(var_type='_x', var_name='phi_1_m', shape=(1,1))
phi_2_m = model.set_variable(var_type='_x', var_name='phi_2_m', shape=(1,1))
print('phi_1={}, with phi_1.shape={}'.format(phi_1, phi_1.shape))
print('dphi={}, with dphi.shape={}'.format(dphi, dphi.shape))
##Query variable
model.x
model.x['phi_1']
bool(model.x['phi_1'] == phi_1)
model.x['dphi',0]
model.x.labels()
##Model parameters
# As shown in the table above, we can use Long names or short names for the variable type.
Theta_1 = model.set_variable('parameter', 'Theta_1')
Theta_2 = model.set_variable('parameter', 'Theta_2')
Theta_3 = model.set_variable('parameter', 'Theta_3')
# 'C' is spring constant and 'd' is damping constant of three disks
c = np.array([2.697, 2.66, 3.05, 2.86])*1e-3
d = np.array([6.78, 8.01, 8.82])*1e-5
##Right-hand-side equation
model.set_rhs('phi_1', dphi[0])
model.set_rhs('phi_2', dphi[1])
model.set_rhs('phi_3', dphi[2])
from casadi import *
dphi_next = vertcat(
-c[0]/Theta_1*(phi_1-phi_1_m)-c[1]/Theta_1*(phi_1-phi_2)-d[0]/Theta_1*dphi[0],
-c[1]/Theta_2*(phi_2-phi_1)-c[2]/Theta_2*(phi_2-phi_3)-d[1]/Theta_2*dphi[1],
-c[2]/Theta_3*(phi_3-phi_2)-c[3]/Theta_3*(phi_3-phi_2_m)-d[2]/Theta_3*dphi[2],
)
model.set_rhs('dphi', dphi_next)
tau = 1e-2
model.set_rhs('phi_1_m', 1/tau*(phi_m_1_set - phi_1_m))
model.set_rhs('phi_2_m', 1/tau*(phi_m_2_set - phi_2_m))
model.setup()
###Configurating MPC controller
mpc = do_mpc.controller.MPC(model)
##Optimizer parameters
setup_mpc = {
'n_horizon': 20,
't_step': 0.1,
'n_robust': 1,
'store_full_solution': True,
}
mpc.set_param(**setup_mpc)
##Objective function
mterm = phi_1**2 + phi_2**2 + phi_3**2
lterm = phi_1**2 + phi_2**2 + phi_3**2
mpc.set_objective(mterm=mterm, lterm=lterm)
mpc.set_rterm(
phi_m_1_set=1e-2,
phi_m_2_set=1e-2
)
##Constrains
# Lower bounds on states:
mpc.bounds['lower','_x', 'phi_1'] = -2*np.pi
mpc.bounds['lower','_x', 'phi_2'] = -2*np.pi
mpc.bounds['lower','_x', 'phi_3'] = -2*np.pi
# Upper bounds on states
mpc.bounds['upper','_x', 'phi_1'] = 2*np.pi
mpc.bounds['upper','_x', 'phi_2'] = 2*np.pi
mpc.bounds['upper','_x', 'phi_3'] = 2*np.pi
# Lower bounds on inputs:
mpc.bounds['lower','_u', 'phi_m_1_set'] = -2*np.pi
mpc.bounds['lower','_u', 'phi_m_2_set'] = -2*np.pi
# Lower bounds on inputs:
mpc.bounds['upper','_u', 'phi_m_1_set'] = 2*np.pi
mpc.bounds['upper','_u', 'phi_m_2_set'] = 2*np.pi
##Scaling
mpc.scaling['_x', 'phi_1'] = 2
mpc.scaling['_x', 'phi_2'] = 2
mpc.scaling['_x', 'phi_3'] = 2
##Uncertain parameters
inertia_mass_1 = 2.25*1e-4*np.array([1., 0.9, 1.1])
inertia_mass_2 = 2.25*1e-4*np.array([1., 0.9, 1.1])
inertia_mass_3 = 2.25*1e-4*np.array([1.])
mpc.set_uncertainty_values(
Theta_1 = inertia_mass_1,
Theta_2 = inertia_mass_2,
Theta_3 = inertia_mass_3
)
inertia_mass_1 = 2.25*1e-4*np.array([1., 0.9, 1.1])
inertia_mass_2 = 2.25*1e-4*np.array([1., 0.9, 1.1])
inertia_mass_3 = 2.25*1e-4*np.array([1.])
mpc.set_uncertainty_values(
Theta_1 = inertia_mass_1,
Theta_2 = inertia_mass_2,
Theta_3 = inertia_mass_3
)
##Setup
mpc.setup()
### Configurating the simulator
simulator = do_mpc.simulator.Simulator(model)
# Instead of supplying a dict with the splat operator (**), as with the optimizer.set_param(),
# we can also use keywords (and call the method multiple times, if necessary):
##Simulator parameters
simulator.set_param(t_step = 0.1)
##Uncertain parameters
p_template = simulator.get_p_template()
type(p_template)
p_template.keys()
def p_fun(t_now):
p_template['Theta_1'] = 2.25e-4
p_template['Theta_2'] = 2.25e-4
p_template['Theta_3'] = 2.25e-4
return p_template
simulator.set_p_fun(p_fun)
## Running the optimizer
u0 = mpc.make_step(x0)
| [
"do_mpc.controller.MPC",
"numpy.array",
"do_mpc.simulator.Simulator",
"sys.path.append",
"do_mpc.model.Model"
] | [((127, 152), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (142, 152), False, 'import sys\n'), ((282, 312), 'do_mpc.model.Model', 'do_mpc.model.Model', (['model_type'], {}), '(model_type)\n', (300, 312), False, 'import do_mpc\n'), ((2730, 2758), 'do_mpc.controller.MPC', 'do_mpc.controller.MPC', (['model'], {}), '(model)\n', (2751, 2758), False, 'import do_mpc\n'), ((4452, 4485), 'do_mpc.simulator.Simulator', 'do_mpc.simulator.Simulator', (['model'], {}), '(model)\n', (4478, 4485), False, 'import do_mpc\n'), ((2016, 2051), 'numpy.array', 'np.array', (['[2.697, 2.66, 3.05, 2.86]'], {}), '([2.697, 2.66, 3.05, 2.86])\n', (2024, 2051), True, 'import numpy as np\n'), ((2063, 2091), 'numpy.array', 'np.array', (['[6.78, 8.01, 8.82]'], {}), '([6.78, 8.01, 8.82])\n', (2071, 2091), True, 'import numpy as np\n'), ((3877, 3902), 'numpy.array', 'np.array', (['[1.0, 0.9, 1.1]'], {}), '([1.0, 0.9, 1.1])\n', (3885, 3902), True, 'import numpy as np\n'), ((3929, 3954), 'numpy.array', 'np.array', (['[1.0, 0.9, 1.1]'], {}), '([1.0, 0.9, 1.1])\n', (3937, 3954), True, 'import numpy as np\n'), ((3981, 3996), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (3989, 3996), True, 'import numpy as np\n'), ((4145, 4170), 'numpy.array', 'np.array', (['[1.0, 0.9, 1.1]'], {}), '([1.0, 0.9, 1.1])\n', (4153, 4170), True, 'import numpy as np\n'), ((4197, 4222), 'numpy.array', 'np.array', (['[1.0, 0.9, 1.1]'], {}), '([1.0, 0.9, 1.1])\n', (4205, 4222), True, 'import numpy as np\n'), ((4249, 4264), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4257, 4264), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
class DataGenerator:
def __init__(self, file_path, names=None, features=None, labels=None):
raw_data = pd.read_csv(file_path, names=names)
self.features = pd.DataFrame()
self.labels = pd.DataFrame()
#Parse data into features and labels
for i, feat in enumerate(features):
self.features.insert(i, feat, raw_data.pop(feat))
for i, label in enumerate(labels):
self.labels.insert(i, label, raw_data.pop(label))
#Cast data to numpy arrays
self.features = np.array(self.features)
self.labels = np.array(self.labels)
print(self.labels)
| [
"pandas.DataFrame",
"numpy.array",
"pandas.read_csv"
] | [((156, 191), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'names': 'names'}), '(file_path, names=names)\n', (167, 191), True, 'import pandas as pd\n'), ((216, 230), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (228, 230), True, 'import pandas as pd\n'), ((253, 267), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (265, 267), True, 'import pandas as pd\n'), ((583, 606), 'numpy.array', 'np.array', (['self.features'], {}), '(self.features)\n', (591, 606), True, 'import numpy as np\n'), ((629, 650), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (637, 650), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division
import numpy as np
import cv2
from . import Tracker
from ..utils import dict2tuple
from ..utils.complex import real, fft2, ifft2, complex_add, complex_mul, complex_div, fftshift
from ..descriptors.fhog import fast_hog
class TrackerKCF(Tracker):
def __init__(self, **kargs):
super(TrackerKCF, self).__init__('KCF')
self.parse_args(**kargs)
self._correlation = self.setup_kernel(self.cfg.kernel_type)
def parse_args(self, **kargs):
self.cfg = {
'lambda_': 1e-4,
'padding': 1.5,
'output_sigma_factor': 0.125,
'interp_factor': 0.012,
'sigma': 0.6,
'poly_a': 1,
'poly_b': 7,
'cell_size': 4,
'kernel_type': 'gaussian'}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def setup_kernel(self, kernel_type):
assert kernel_type in ['linear', 'polynomial', 'gaussian']
if kernel_type == 'linear':
return lambda x1, x2: self._linear_correlation(x1, x2)
elif kernel_type == 'polynomial':
return lambda x1, x2: self._polynomial_correlation(
x1, x2, self.cfg.poly_a, self.cfg.poly_b)
elif kernel_type == 'gaussian':
return lambda x1, x2: self._gaussian_correlation(
x1, x2, self.cfg.sigma)
def init(self, image, init_rect):
# initialize parameters
self.resize_image = False
if np.sqrt(init_rect[2:].prod()) > 100:
self.resize_image = True
init_rect = init_rect / 2
self.t_center = init_rect[:2] + init_rect[2:] / 2
self.t_sz = init_rect[2:]
mod = self.cfg.cell_size * 2
self.padded_sz = self.t_sz * (1 + self.cfg.padding)
self.padded_sz = self.padded_sz.astype(int) // mod * mod + mod
# get feature size and initialize hanning window
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
self.z = self._crop(image, self.t_center, self.padded_sz)
self.z = fast_hog(np.float32(self.z), self.cfg.cell_size)
self.feat_sz = self.z.shape
self.hann_window = np.outer(
np.hanning(self.feat_sz[0]),
np.hanning(self.feat_sz[1])).astype(np.float32)
self.hann_window = self.hann_window[:, :, np.newaxis]
self.z *= self.hann_window
# create gaussian labels
output_sigma = self.cfg.output_sigma_factor * \
np.sqrt(np.prod(self.feat_sz[:2])) / (1 + self.cfg.padding)
rs, cs = np.ogrid[:self.feat_sz[0], :self.feat_sz[1]]
rs, cs = rs - self.feat_sz[0] // 2, cs - self.feat_sz[1] // 2
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = fft2(y)
# train classifier
k = self._correlation(self.z, self.z)
self.alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
def update(self, image):
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
# locate target
x = self._crop(image, self.t_center, self.padded_sz)
x = self.hann_window * fast_hog(np.float32(x), self.cfg.cell_size)
k = self._correlation(x, self.z)
score = real(ifft2(complex_mul(self.alphaf, fft2(k))))
offset = self._locate_target(score)
self.t_center += offset * self.cfg.cell_size
# limit the estimated bounding box to be overlapped with the image
self.t_center = np.clip(
self.t_center, -self.t_sz / 2 + 2,
image.shape[1::-1] + self.t_sz / 2 - 1)
# update model
new_z = self._crop(image, self.t_center, self.padded_sz)
new_z = self.hann_window * fast_hog(np.float32(new_z), self.cfg.cell_size)
k = self._correlation(new_z, new_z)
new_alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
self.alphaf = (1 - self.cfg.interp_factor) * self.alphaf + \
self.cfg.interp_factor * new_alphaf
self.z = (1 - self.cfg.interp_factor) * self.z + \
self.cfg.interp_factor * new_z
bndbox = np.concatenate([
self.t_center - self.t_sz / 2, self.t_sz])
if self.resize_image:
bndbox = bndbox * 2
return bndbox
def _crop(self, image, center, size):
corners = np.zeros(4, dtype=int)
corners[:2] = np.floor(center - size / 2).astype(int)
corners[2:] = corners[:2] + size
pads = np.concatenate(
(-corners[:2], corners[2:] - image.shape[1::-1]))
pads = np.maximum(0, pads)
if np.any(pads > 0):
corners = np.concatenate((
corners[:2] + pads[:2],
corners[2:] - pads[2:])).astype(int)
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if np.any(pads > 0):
patch = cv2.copyMakeBorder(
patch, pads[1], pads[3], pads[0], pads[2],
borderType=cv2.BORDER_REPLICATE)
return patch
def _linear_correlation(self, x1, x2):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
return xcorr / x1.size
def _polynomial_correlation(self, x1, x2, a, b):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
out = (xcorr / x1.size + a) ** b
return out
def _gaussian_correlation(self, x1, x2, sigma):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
out = (np.sum(x1 * x1) + np.sum(x2 * x2) - 2.0 * xcorr) / x1.size
out[out < 0] = 0
out = np.exp(-out / self.cfg.sigma ** 2)
return out
def _locate_target(self, score):
def subpixel_peak(left, center, right):
divisor = 2 * center - left - right
if abs(divisor) < 1e-3:
return 0
return 0.5 * (right - left) / divisor
_, _, _, max_loc = cv2.minMaxLoc(score)
loc = np.float32(max_loc)
if max_loc[0] in range(1, score.shape[1] - 1):
loc[0] += subpixel_peak(
score[max_loc[1], max_loc[0] - 1],
score[max_loc[1], max_loc[0]],
score[max_loc[1], max_loc[0] + 1])
if max_loc[1] in range(1, score.shape[0] - 1):
loc[1] += subpixel_peak(
score[max_loc[1] - 1, max_loc[0]],
score[max_loc[1], max_loc[0]],
score[max_loc[1] + 1, max_loc[0]])
offset = loc - np.float32(score.shape[1::-1]) / 2
return offset
class TrackerDCF(TrackerKCF):
def __init__(self, **kargs):
kargs.update({'kernel_type': 'linear'})
super(TrackerDCF, self).__init__(**kargs)
| [
"numpy.clip",
"numpy.hanning",
"numpy.prod",
"cv2.resize",
"cv2.copyMakeBorder",
"numpy.floor",
"numpy.any",
"numpy.exp",
"cv2.minMaxLoc",
"numpy.zeros",
"numpy.sum",
"numpy.concatenate",
"cv2.cvtColor",
"numpy.maximum",
"numpy.float32"
] | [((2941, 2995), 'numpy.exp', 'np.exp', (['(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))'], {}), '(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))\n', (2947, 2995), True, 'import numpy as np\n'), ((3902, 3988), 'numpy.clip', 'np.clip', (['self.t_center', '(-self.t_sz / 2 + 2)', '(image.shape[1::-1] + self.t_sz / 2 - 1)'], {}), '(self.t_center, -self.t_sz / 2 + 2, image.shape[1::-1] + self.t_sz /\n 2 - 1)\n', (3909, 3988), True, 'import numpy as np\n'), ((4545, 4603), 'numpy.concatenate', 'np.concatenate', (['[self.t_center - self.t_sz / 2, self.t_sz]'], {}), '([self.t_center - self.t_sz / 2, self.t_sz])\n', (4559, 4603), True, 'import numpy as np\n'), ((4763, 4785), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'int'}), '(4, dtype=int)\n', (4771, 4785), True, 'import numpy as np\n'), ((4904, 4968), 'numpy.concatenate', 'np.concatenate', (['(-corners[:2], corners[2:] - image.shape[1::-1])'], {}), '((-corners[:2], corners[2:] - image.shape[1::-1]))\n', (4918, 4968), True, 'import numpy as np\n'), ((4997, 5016), 'numpy.maximum', 'np.maximum', (['(0)', 'pads'], {}), '(0, pads)\n', (5007, 5016), True, 'import numpy as np\n'), ((5029, 5045), 'numpy.any', 'np.any', (['(pads > 0)'], {}), '(pads > 0)\n', (5035, 5045), True, 'import numpy as np\n'), ((5260, 5276), 'numpy.any', 'np.any', (['(pads > 0)'], {}), '(pads > 0)\n', (5266, 5276), True, 'import numpy as np\n'), ((5508, 5564), 'numpy.zeros', 'np.zeros', (['(self.feat_sz[0], self.feat_sz[1])', 'np.float32'], {}), '((self.feat_sz[0], self.feat_sz[1]), np.float32)\n', (5516, 5564), True, 'import numpy as np\n'), ((5917, 5973), 'numpy.zeros', 'np.zeros', (['(self.feat_sz[0], self.feat_sz[1])', 'np.float32'], {}), '((self.feat_sz[0], self.feat_sz[1]), np.float32)\n', (5925, 5973), True, 'import numpy as np\n'), ((6355, 6411), 'numpy.zeros', 'np.zeros', (['(self.feat_sz[0], self.feat_sz[1])', 'np.float32'], {}), '((self.feat_sz[0], self.feat_sz[1]), np.float32)\n', (6363, 6411), True, 'import numpy as np\n'), ((6776, 6810), 'numpy.exp', 'np.exp', (['(-out / self.cfg.sigma ** 2)'], {}), '(-out / self.cfg.sigma ** 2)\n', (6782, 6810), True, 'import numpy as np\n'), ((7104, 7124), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['score'], {}), '(score)\n', (7117, 7124), False, 'import cv2\n'), ((7139, 7158), 'numpy.float32', 'np.float32', (['max_loc'], {}), '(max_loc)\n', (7149, 7158), True, 'import numpy as np\n'), ((2048, 2087), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2BGR'], {}), '(image, cv2.COLOR_GRAY2BGR)\n', (2060, 2087), False, 'import cv2\n'), ((2208, 2231), 'cv2.resize', 'cv2.resize', (['image', 'size'], {}), '(image, size)\n', (2218, 2231), False, 'import cv2\n'), ((2324, 2342), 'numpy.float32', 'np.float32', (['self.z'], {}), '(self.z)\n', (2334, 2342), True, 'import numpy as np\n'), ((3257, 3296), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2BGR'], {}), '(image, cv2.COLOR_GRAY2BGR)\n', (3269, 3296), False, 'import cv2\n'), ((3417, 3440), 'cv2.resize', 'cv2.resize', (['image', 'size'], {}), '(image, size)\n', (3427, 3440), False, 'import cv2\n'), ((5298, 5397), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['patch', 'pads[1]', 'pads[3]', 'pads[0]', 'pads[2]'], {'borderType': 'cv2.BORDER_REPLICATE'}), '(patch, pads[1], pads[3], pads[0], pads[2], borderType=\n cv2.BORDER_REPLICATE)\n', (5316, 5397), False, 'import cv2\n'), ((3567, 3580), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (3577, 3580), True, 'import numpy as np\n'), ((4143, 4160), 'numpy.float32', 'np.float32', (['new_z'], {}), '(new_z)\n', (4153, 4160), True, 'import numpy as np\n'), ((4808, 4835), 'numpy.floor', 'np.floor', (['(center - size / 2)'], {}), '(center - size / 2)\n', (4816, 4835), True, 'import numpy as np\n'), ((7665, 7695), 'numpy.float32', 'np.float32', (['score.shape[1::-1]'], {}), '(score.shape[1::-1])\n', (7675, 7695), True, 'import numpy as np\n'), ((2449, 2476), 'numpy.hanning', 'np.hanning', (['self.feat_sz[0]'], {}), '(self.feat_sz[0])\n', (2459, 2476), True, 'import numpy as np\n'), ((2490, 2517), 'numpy.hanning', 'np.hanning', (['self.feat_sz[1]'], {}), '(self.feat_sz[1])\n', (2500, 2517), True, 'import numpy as np\n'), ((2745, 2770), 'numpy.prod', 'np.prod', (['self.feat_sz[:2]'], {}), '(self.feat_sz[:2])\n', (2752, 2770), True, 'import numpy as np\n'), ((5069, 5133), 'numpy.concatenate', 'np.concatenate', (['(corners[:2] + pads[:2], corners[2:] - pads[2:])'], {}), '((corners[:2] + pads[:2], corners[2:] - pads[2:]))\n', (5083, 5133), True, 'import numpy as np\n'), ((6678, 6693), 'numpy.sum', 'np.sum', (['(x1 * x1)'], {}), '(x1 * x1)\n', (6684, 6693), True, 'import numpy as np\n'), ((6696, 6711), 'numpy.sum', 'np.sum', (['(x2 * x2)'], {}), '(x2 * x2)\n', (6702, 6711), True, 'import numpy as np\n')] |
#! /usr/bin/env python2
import numpy as np
import cv2
def computeOcclusionsFromConsistency(flow, backflow, threshold=7.0):
""" Compute occlusions from backward-forward consistency.
Parameters
----------
flow : array of array of ndimages
Array of flows, so that flow[0][0] is the horizontal component of the
backward flow, flow[1][1] the vertical component of the forward flow and
so on.
backflow : array of array of ndimages
Similar to flow, but here each ndimage contains the motion towards the
reference frame.
Example: flow[0] is the flow from t-1 to t.
threshold : float
The relative threshold above which the flow is considered to be invalid
or an occlusion.
"""
h,w = flow[0][0].shape
def getWarpedError(uf,vf,ub,vb):
y,x = np.mgrid[:uf.shape[0],:uf.shape[1]]
u_warped = cv2.remap(ub,
(x+uf).astype('float32'),
(y+vf).astype('float32'),
interpolation=cv2.INTER_LINEAR)
v_warped = cv2.remap(vb,
(x+uf).astype('float32'),
(y+vf).astype('float32'),
interpolation=cv2.INTER_LINEAR)
valid = (y+vf >= 0) * (y+vf < y.shape[0]) * (x+uf >= 0) * (x+uf < x.shape[1])
err = np.sqrt((u_warped+uf)**2 + (v_warped+vf)**2)
return err, valid==0
error_backward, invalid_backward = getWarpedError(
flow[0][0],
flow[0][1],
backflow[0][0],
backflow[0][1])
error_forward, invalid_forward = getWarpedError(
flow[1][0],
flow[1][1],
backflow[1][0],
backflow[1][1])
thresh_bwd = threshold
thresh_fwd = threshold
# old way to compute occlusions
occ_backward = np.logical_or(error_backward > thresh_bwd, invalid_backward)
occ_forward = np.logical_or(error_forward > thresh_fwd, invalid_forward)
# Properly add invalid regions
invalid_both = invalid_backward * invalid_forward
invalid_only_forward = np.logical_and(invalid_forward, invalid_backward==0)
invalid_only_backward = np.logical_and(invalid_backward, invalid_forward==0)
occ_backward[invalid_only_forward] = 0
occ_forward[invalid_only_backward] = 0
occ_both = occ_backward * occ_forward
return occ_backward, occ_forward, occ_both
| [
"numpy.sqrt",
"numpy.logical_or",
"numpy.logical_and"
] | [((1842, 1902), 'numpy.logical_or', 'np.logical_or', (['(error_backward > thresh_bwd)', 'invalid_backward'], {}), '(error_backward > thresh_bwd, invalid_backward)\n', (1855, 1902), True, 'import numpy as np\n'), ((1921, 1979), 'numpy.logical_or', 'np.logical_or', (['(error_forward > thresh_fwd)', 'invalid_forward'], {}), '(error_forward > thresh_fwd, invalid_forward)\n', (1934, 1979), True, 'import numpy as np\n'), ((2097, 2151), 'numpy.logical_and', 'np.logical_and', (['invalid_forward', '(invalid_backward == 0)'], {}), '(invalid_forward, invalid_backward == 0)\n', (2111, 2151), True, 'import numpy as np\n'), ((2178, 2232), 'numpy.logical_and', 'np.logical_and', (['invalid_backward', '(invalid_forward == 0)'], {}), '(invalid_backward, invalid_forward == 0)\n', (2192, 2232), True, 'import numpy as np\n'), ((1371, 1423), 'numpy.sqrt', 'np.sqrt', (['((u_warped + uf) ** 2 + (v_warped + vf) ** 2)'], {}), '((u_warped + uf) ** 2 + (v_warped + vf) ** 2)\n', (1378, 1423), True, 'import numpy as np\n')] |
# Copyright (c) 2020 <NAME>. All Rights Reserved.
#
# Use is subject to license terms.
#
# Author: <NAME>
# Date: Nov. 18 2020
import json
import logging
import logging.config
import os
import random
import sys
from argparse import ArgumentParser
from datetime import datetime
from types import FunctionType
import matplotlib.pyplot as plt
import numpy as np
from .config_reader import ConfigReader
from .runner import Runner, seconds_to_string
from .util import get_gpu_stats
logger = logging.getLogger(__name__)
class Fork:
date_format = "%Y-%m-%d %H:%M:%S"
def __init__(self, config_filename):
self._load_args()
if self.args.config_path:
self.config = ConfigReader(self.args.config_path)
else:
self.config = ConfigReader(config_filename)
@property
def data(self):
return self.config.data
@property
def meta(self):
return self.data["meta"]
@property
def log_dir(self):
return self.data["log_dir"]
@property
def learning_rate(self):
return self.data["learning_rate"]
@property
def verbose(self):
return self.data["verbose"]
@property
def cache(self):
return self.data["cache"]
@property
def seed(self):
return self.data["seed"]
@property
def prefetch(self):
return self.data["prefetch"]
@property
def gpu_id(self):
return self.data["gpu_id"]
def __getattr__(self, item):
return self.data[item]
def _set_seed(self):
random.seed(self.seed)
np.random.seed(self.seed)
def _load_args(self):
parser = ArgumentParser()
parser.add_argument("--config", dest="config_path", default=None)
parser.add_argument("--name", dest="thread_name", default=None)
parser.add_argument("--gpu", dest="gpu_id", default=-1)
parser.add_argument("--id", dest="thread_id", default=None)
parser.add_argument("--log_config", dest="log_config", default=None)
self.args = parser.parse_args()
self.script_name = sys.argv[0]
def _load_defaults(self):
self.data.setdefault("gpu_id", self.args.gpu_id)
def _load_logging_config(self):
if not self.args.log_config:
log_fname = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "config", "logging.config"
)
else:
log_fname = self.args.log_config
try:
with open(log_fname, "r") as f:
data = json.load(f)
logging.config.dictConfig(data)
except ValueError as ex:
logging.basicConfig(
format="%(asctime)s.%(msecs)06d: %(name)s] %(message)s",
datefmt=self.date_format,
level=logging.DEBUG,
)
logger.error(ex)
def is_runner(self):
"""
Checks if current thread is the runner thread. If there are multiple generated run_configs in ConfigReader
then thread is classified as runner in order to carry responsibility of ingesting the multiple configs.
:return: True if runner.
:rtype: bool
"""
run_configs, _ = self.config.gen_run_configs()
return len(run_configs) > 1
def run(self):
"""
Starts the model training processes. In situations with multiple configs a new Fork instance will be spawned with
a specific configuration loadout and data. Further logic is then controlled via the public interface methods.
The public interface methods will be called in the following order:
1. `set_visible_gpus`
2. `get_filepaths`
3. `get_datasets`
4. `get_model`
5. `model_summary` (if configured verbose >= 1)
6. `get_metrics`
7. `get_optimizer`
8. `get_loss`
9. `model_compile`
10. `get_callbacks`
11. `model_fit`
12. `model_save`
13. `model_evaluate` (if test dataset present)
14. `save_metric`
15. `plot_metric`
16. `save_history`
Afterwards a final configuration file with all the run information will be saved in the generated folder.
:return: None
"""
self._load_defaults()
self._load_logging_config()
if self.is_runner():
runner = Runner(self.config)
runner.run(self.script_name, gpu_indices=self.get_available_gpu_indices())
else:
self._set_seed()
self.set_visible_gpus()
self.meta["data"] = {}
run_results = self.meta["data"]
start_time = datetime.now()
run_results["start_time"] = start_time.timestamp()
run_results["start_time_string"] = start_time.strftime(self.date_format)
train_fp, test_fp, valid_fp = self.get_filepaths()
train_dataset, test_dataset, valid_dataset = self.get_datasets(
train_fp, test_fp, valid_fp
)
model = self.get_model()
if self.verbose >= 1:
self.model_summary(model)
metrics = self.get_metrics()
optimizer = self.get_optimizer()
loss = self.get_loss()
self.model_compile(model, optimizer, loss, metrics)
callbacks = self.get_callbacks()
history = self.model_fit(
model,
train_dataset,
self.epochs,
valid_dataset,
callbacks,
self.verbose,
)
self.model_save(model)
end_time = datetime.now()
run_results["end_time"] = end_time.timestamp()
run_results["end_time_string"] = end_time.strftime(self.date_format)
run_time = end_time - start_time
run_results["total_time"] = run_time.total_seconds()
run_results["total_time_string"] = seconds_to_string(
run_time.total_seconds()
)
if test_dataset:
results = self.model_evaluate(model, test_dataset)
logger.info("Evaluation results: %s", str(results))
run_results["results"] = results
for metric in metrics + ["loss"]:
self.save_metric(run_results, history, metric)
self.plot_metric(history, metric)
with open(os.path.join(self.log_dir, "history.json"), "w") as f:
self.save_history(history, f)
with open(os.path.join(self.log_dir, "run_data.json"), "w") as f:
json.dump(self.data, f)
def get_available_gpu_indices(self):
"""
Gets a list of available GPUs indices to train on. Defaults to using nvidia-smi.
:return: List of available GPUs.
:rtype: list
"""
gpus = get_gpu_stats()
indices = [gpu.id for gpu in gpus]
return indices
def set_visible_gpus(self):
"""
Uses self.gpu_id in order to restrict the training sessions visible GPUs.
:return: None
"""
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu_id)
def model_summary(self, model):
"""
Used for displaying the model architecture summary to the user. Called when verbose > 1.
:param model: Model to be displayed.
:type model: object
:return: None
"""
raise NotImplementedError()
def get_model(self) -> object:
"""
Builds the model for use during the training sequence.
:return: Deep learning model to be compiled and fit.
:rtype: object
"""
raise NotImplementedError()
def get_callbacks(self):
"""
:return: a list of callbacks to pass to the model during the fit stage. Defaults to None.
:rtype: list, or None
"""
return None
def get_metrics(self) -> list:
"""
:return: a list of metrics to pass to the model during the compile stage.
:rtype: list
"""
raise NotImplementedError()
def get_optimizer(self) -> FunctionType:
"""
:return: the model optimizer for use in the compile stage.
:rtype: object
"""
raise NotImplementedError()
def get_loss(self) -> FunctionType:
"""
:return: the loss function for use in the compile stage.
:rtype: object
"""
raise NotImplementedError()
def model_compile(self, model, optimizer, loss, metrics) -> object:
"""
Compiles the model for training
:param model: Model to be compiled
:type model: object
:param optimizer: Training optimizer
:type optimizer: function
:param loss: Loss function to train against
:type loss: function
:param metrics: Metrics to record
:type metrics: list
:return: None
"""
raise NotImplementedError()
def model_fit(
self, model, train_set, epochs, valid_set, callbacks, verbose
) -> object:
"""
Trains the compiled model and returns the history of training.
:param model: Model to train
:type model: object
:param train_set: Training dataset to fit against
:type train_set: generator or list
:param epochs: Number of epochs to train
:type epochs: int
:param valid_set: Validation dataset
:type valid_set: generator or list
:param callbacks: A list of training callbacks
:type callbacks: list
:param verbose: Verbosity of training
:type verbose: int
:return: History object representing the model training
:rtype: dict
"""
raise NotImplementedError()
def model_evaluate(self, model, test_set):
"""
Evaluates the model against the provided test set.
:param model: Model to evaluate
:type model: object
:param test_set: Dataset to test the model against
:type test_set: generator or list
:return: Evaluation results
"""
raise NotImplementedError()
def model_save(self, model):
"""
Save the model to disk.
:param model: Trained model to save
:type model: object
:return: None
"""
raise NotImplementedError()
def preprocess(self, record):
"""
Preprocesses the data record into appropriate format for the model
:param record: A single record information to preprocess during dataset mapping for feeding into the model.
:type record: object
:return: A preprocessed record to be fed into the model.
:rtype: object
"""
raise NotImplementedError()
def train_preprocess(self, record):
"""
Preprocessor specifically for training records. Defaults to preprocess.
"""
return self.preprocess(record)
def test_preprocess(self, record):
"""
Preprocessor specifically for test records. Defaults to preprocess.
"""
return self.preprocess(record)
def valid_preprocess(self, record):
"""
Preprocessor specifically for validation records. Defaults to preprocess.
"""
return self.preprocess(record)
def get_filepaths(self):
"""
Gets the filepaths to the data that will then be processed by get_dataset.
:return: train_filepaths, test_filepaths, valid_filepaths
:rtype: (list, list, list)
"""
train_filepaths = [
os.path.join(self.data_dir, x)
for x in os.listdir(self.data_dir)
if x.startswith(self.train_prefix)
]
test_filepaths = [
os.path.join(self.data_dir, x)
for x in os.listdir(self.data_dir)
if x.startswith(self.test_prefix)
]
valid_filepaths = [
os.path.join(self.data_dir, x)
for x in os.listdir(self.data_dir)
if x.startswith(self.valid_prefix)
]
return train_filepaths, test_filepaths, valid_filepaths
def get_datasets(self, train_fp, test_fp, valid_fp):
"""
Gets the datasets to be passed into the model for training and evaluation.
:param train_fp: List of filepaths of training data.
:type train_fp: list
:param test_fp: List of filepaths of test data.
:type test_fp: list
:param valid_fp: List of filepaths of validation data.
:type valid_fp: list
:return: A generator for each train_set, test_set, valid_set to pass into the model for training.
"""
raise NotImplementedError()
def plot(self, metric, epochs, train_metrics, val_metrics):
"""
Plots the specific metric validation and training metrics against epochs and saves the graph into the configured
log directory.
:param metric: String representation of the metric being plotted.
:type metric: string
:param epochs: An array of each epoch the model performed [1..N]
:type epochs: list
:param train_metrics: Training values at each epoch step.
:type train_metrics: list
:param val_metrics: Validation values at each epoch step.
:type val_metrics: list
:return: None
"""
plt.plot(epochs, train_metrics)
plt.plot(epochs, val_metrics)
# plt.gca().set_ylim(0,-1)# sets the vertical range within [0, -1]
plt.title("Training and Validation " + metric)
plt.xlabel("Epochs")
plt.ylabel(metric.capitalize())
plt.legend(["train_" + metric.lower(), "val_" + metric.lower()])
plt.savefig(
os.path.join(self.log_dir, metric + ".jpg"), bbox_inches="tight", dpi=150
)
plt.clf()
def plot_metric(self, history, metric):
"""
Takes the history object and the provided metric and graphs them using plot.
:param history (dict): History of model training.
:param metric (string): Metric to plot.
:return: None
"""
raise NotImplementedError()
def save_metric(self, run_results, history, metric):
"""
Takes the history object and the provided metric and stores the latest value into the provided dictionary.
:param run_results: Dictionary to store the last metric value in.
:type run_results: dict
:param history: History of model training.
:type history: dict
:param metric: Metric to plot.
:type metric: string
:return: None
"""
raise NotImplementedError()
def save_history(self, history, out_file):
"""
Saves the model history object to the designated output file.
:param history: Training history generated during model_fit.
:type history: dict
:param out_file: File object to save history to.
:type out_File: file object
:return: None
"""
raise NotImplementedError()
| [
"logging.getLogger",
"logging.basicConfig",
"os.listdir",
"argparse.ArgumentParser",
"logging.config.dictConfig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.path.join",
"random.seed",
"datetime.datetime.now",
"os.path.dirname",
"numpy.random.seed",
"... | [((489, 516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'import logging\n'), ((1558, 1580), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (1569, 1580), False, 'import random\n'), ((1589, 1614), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1603, 1614), True, 'import numpy as np\n'), ((1659, 1675), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1673, 1675), False, 'from argparse import ArgumentParser\n'), ((13515, 13546), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'train_metrics'], {}), '(epochs, train_metrics)\n', (13523, 13546), True, 'import matplotlib.pyplot as plt\n'), ((13555, 13584), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_metrics'], {}), '(epochs, val_metrics)\n', (13563, 13584), True, 'import matplotlib.pyplot as plt\n'), ((13668, 13714), 'matplotlib.pyplot.title', 'plt.title', (["('Training and Validation ' + metric)"], {}), "('Training and Validation ' + metric)\n", (13677, 13714), True, 'import matplotlib.pyplot as plt\n'), ((13723, 13743), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (13733, 13743), True, 'import matplotlib.pyplot as plt\n'), ((13982, 13991), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (13989, 13991), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2608), 'logging.config.dictConfig', 'logging.config.dictConfig', (['data'], {}), '(data)\n', (2602, 2608), False, 'import logging\n'), ((4739, 4753), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4751, 4753), False, 'from datetime import datetime\n'), ((5731, 5745), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5743, 5745), False, 'from datetime import datetime\n'), ((11728, 11758), 'os.path.join', 'os.path.join', (['self.data_dir', 'x'], {}), '(self.data_dir, x)\n', (11740, 11758), False, 'import os\n'), ((11902, 11932), 'os.path.join', 'os.path.join', (['self.data_dir', 'x'], {}), '(self.data_dir, x)\n', (11914, 11932), False, 'import os\n'), ((12076, 12106), 'os.path.join', 'os.path.join', (['self.data_dir', 'x'], {}), '(self.data_dir, x)\n', (12088, 12106), False, 'import os\n'), ((13890, 13933), 'os.path.join', 'os.path.join', (['self.log_dir', "(metric + '.jpg')"], {}), "(self.log_dir, metric + '.jpg')\n", (13902, 13933), False, 'import os\n'), ((2552, 2564), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2561, 2564), False, 'import json\n'), ((2654, 2781), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s.%(msecs)06d: %(name)s] %(message)s"""', 'datefmt': 'self.date_format', 'level': 'logging.DEBUG'}), "(format='%(asctime)s.%(msecs)06d: %(name)s] %(message)s',\n datefmt=self.date_format, level=logging.DEBUG)\n", (2673, 2781), False, 'import logging\n'), ((6711, 6734), 'json.dump', 'json.dump', (['self.data', 'f'], {}), '(self.data, f)\n', (6720, 6734), False, 'import json\n'), ((11780, 11805), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (11790, 11805), False, 'import os\n'), ((11954, 11979), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (11964, 11979), False, 'import os\n'), ((12128, 12153), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (12138, 12153), False, 'import os\n'), ((2343, 2368), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2358, 2368), False, 'import os\n'), ((6515, 6557), 'os.path.join', 'os.path.join', (['self.log_dir', '"""history.json"""'], {}), "(self.log_dir, 'history.json')\n", (6527, 6557), False, 'import os\n'), ((6639, 6682), 'os.path.join', 'os.path.join', (['self.log_dir', '"""run_data.json"""'], {}), "(self.log_dir, 'run_data.json')\n", (6651, 6682), False, 'import os\n')] |
from __future__ import print_function
import datetime
import glob
import json
import multiprocessing
import os
import pickle
import sys
import warnings
from collections import Counter, defaultdict
from string import digits
import re
import plotly.plotly as py
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dateutil import parser
from gensim.models import KeyedVectors
from joblib import Parallel, delayed
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.model_selection import train_test_split
from sklearn import metrics
sys.path.insert(0, os.path.dirname(__file__) + '../2_helpers')
sys.path.insert(0, os.path.dirname(__file__) + '../5_fact_checking_models')
from decoder import decoder
from metrics import ndcg_score
warnings.filterwarnings("ignore", category=DeprecationWarning)
DIR = os.path.dirname(__file__) + '../../3_Data/'
WNL = WordNetLemmatizer()
NLTK_STOPWORDS = set(stopwords.words('english'))
num_cores = multiprocessing.cpu_count()
num_jobs = round(num_cores * 3 / 4)
fact_to_words = {}
# word_vectors = KeyedVectors.load_word2vec_format('model_data/word2vec_twitter_model/word2vec_twitter_model.bin', binary=True, unicode_errors='ignore')
def datetime_converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def tokenize_text(text, only_retweets=False):
tokenizer = RegexpTokenizer(r'\w+')
if only_retweets:
text = text
# if 'RT' not in text: return None
mentions = []
while True:
if '@' not in text: break
mention = text[text.find('@'):]
if ' ' in mention: mention = mention[:mention.find(' ')]
mentions.append(mention)
text = text.replace(mention, '')
retweeted_to = [rt.replace('@', '').replace(':', '').lower() for rt in mentions if '@' in rt]
return retweeted_to
return [WNL.lemmatize(i.lower()) for i in tokenizer.tokenize(text) if
i.lower() not in NLTK_STOPWORDS]
def get_data():
fact_file = glob.glob(DIR + 'facts.json')[0]
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
facts = json.load(open(fact_file), object_hook=decoder)
transactions = json.load(open(transactions_file), object_hook=decoder)
return facts, transactions
def get_users():
user_files = glob.glob(DIR + 'user_tweets/' + 'user_*.json')
print('{} users'.format(len(user_files)))
if len(user_files) < 10: print('WRONG DIR?')
users = []
for user_file in user_files:
user = json.loads(open(user_file).readline(), object_hook=decoder)
if int(user.was_correct) != -1:
users.append(user)
print('Kept {} users'.format(len(users)))
return users
def get_relevant_tweets(user):
relevant_tweets = []
user_fact_words = fact_to_words[user.fact]
for tweet in user.tweets:
distance_to_topic = []
tokens = tokenize_text(tweet['text'], only_retweets=False)
for token in tokens:
if token not in word_vectors.vocab: continue
increment = np.average(word_vectors.distances(token, other_words=user_fact_words))
distance_to_topic.append(increment)
if np.average(np.asarray(distance_to_topic)) < 0.8:
relevant_tweets.append(tweet)
return relevant_tweets
def build_fact_topics():
print("Build fact topics")
fact_file = glob.glob(DIR + 'facts_annotated.json')[0]
facts_df = pd.read_json(fact_file)
remove_digits = str.maketrans('', '', digits)
facts_df['text_parsed'] = facts_df['text'].map(lambda t: tokenize_text(t.translate(remove_digits)))
facts_df['entities_parsed'] = facts_df['entities'].map(lambda ents:
[item for sublist in
[e['surfaceForm'].lower().split() for e in ents if
e['similarityScore'] >= 0.6]
for item in sublist])
facts_df['topic'] = facts_df['topic'].map(lambda t: [t])
facts_df['fact_terms'] = facts_df['text_parsed'] + facts_df['entities_parsed'] + facts_df['topic']
return facts_df
def get_user_edges(users):
user_to_links = []
y = []
i = 0
for user in users:
user_links = []
# relevant_tweets = get_relevant_tweets(user)
for tweet in user.tweets:
mentions = tokenize_text(tweet['text'], only_retweets=True)
for rt in mentions:
user_links.append(rt)
if len(user_links) <= 1: continue
user_to_links.append([user.user_id, user_links])
y.append([user.user_id, user.was_correct + 0.01])
i += 1
return user_to_links, np.asarray(y)
def build_graph(user_to_links, user_to_weight):
G = nx.DiGraph()
all_nodes = [u[0] for u in user_to_links] + list(
set([e for sublist in [u[1] for u in user_to_links] for e in sublist]))
print(len(all_nodes))
G.add_nodes_from(all_nodes)
G.add_edges_from([(userlinks[0], v) for userlinks in user_to_links for v in userlinks[1]])
# G.add_weighted_edges_from([(userlinks[0],v,user_to_weight[i]) for i, userlinks in enumerate(user_to_links) for v in userlinks[1]])
obsolete_nodes = [k for k, v in dict(nx.degree(G)).items() if v <= 1]
G.remove_nodes_from(obsolete_nodes)
return G
def get_ranks(user_to_links, G, pageRank, alpha=0.85):
user_to_pr = []
for user, links in user_to_links:
pr_sum = sum([pageRank[l] / G.degree(l) for l in links if l in pageRank])
pr_user = (1 - alpha) / alpha + alpha * pr_sum
user_to_pr.append(pr_user)
return user_to_pr
def graph_plot(G):
print(len(G.nodes()))
obsolete_nodes = [k for k, v in dict(nx.degree(G)).items() if v <= 10]
G.remove_nodes_from(obsolete_nodes)
print(len(G.nodes()))
pos = nx.kamada_kawai_layout(G)
N = len(G.nodes())
Xv = [pos[k][0] for k in range(N)]
Yv = [pos[k][1] for k in range(N)]
Xed = []
Yed = []
for edge in G.edges():
Xed += [pos[edge[0]][0], pos[edge[1]][0], None]
Yed += [pos[edge[0]][1], pos[edge[1]][1], None]
trace3 = Scatter(x=Xed, y=Yed, mode='lines', line=Line(color='rgb(210,210,210)', width=1), hoverinfo='none')
trace4 = Scatter(x=Xv, y=Yv, mode='markers', name='net',
marker=Marker(symbol='dot', size=5, color='#6959CD', line=Line(color='rgb(50,50,50)', width=0.5)),
text=labels, hoverinfo='text')
annot = "This networkx.Graph has the Fruchterman-Reingold layout<br>Code:" + \
"<a href='http://nbviewer.ipython.org/gist/empet/07ea33b2e4e0b84193bd'> [2]</a>"
data1 = Data([trace3, trace4])
fig1 = Figure(data=data1, layout=layout)
fig1['layout']['annotations'][0]['text'] = annot
plot(py.iplot(fig1, filename='Coautorship-network-nx'))
def rank_users(users):
global fact_to_words
print("Creating nodes")
user_to_links, user_to_weight = get_user_edges(users)
X_train, X_test, y_train, y_test = train_test_split(user_to_links, user_to_weight)
print("Building graph..")
G = build_graph(user_to_links, user_to_weight)
graph_plot(G)
pr = nx.pagerank(G)
pr_cred_users = {u: v for u, v in list(pr.items()) if u in user_to_links}
# print(sorted([(v,y[1]) for u,v in pr_cred_users.items() for y in user_to_weight if u == y[0]], reverse=True, key=lambda x: x[0]))
pred = get_ranks(X_test, G, pr)
print(sorted(np.asarray([e for e in zip(pred, [y[1] for y in y_test])]), reverse=True, key=lambda x: x[0]))
ndgc = ndcg_score([y[1] for y in y_test], pred)
print("NDCG: {}".format(ndgc))
users = get_users()
rank_users(users)
| [
"plotly.plotly.iplot",
"networkx.degree",
"nltk.corpus.stopwords.words",
"sklearn.model_selection.train_test_split",
"networkx.DiGraph",
"nltk.stem.WordNetLemmatizer",
"metrics.ndcg_score",
"multiprocessing.cpu_count",
"numpy.asarray",
"os.path.dirname",
"nltk.tokenize.RegexpTokenizer",
"netwo... | [((1120, 1182), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (1143, 1182), False, 'import warnings\n'), ((1241, 1260), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1258, 1260), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1323, 1350), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1348, 1350), False, 'import multiprocessing\n'), ((1190, 1215), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1205, 1215), False, 'import os\n'), ((1282, 1308), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1297, 1308), False, 'from nltk.corpus import stopwords\n'), ((1722, 1745), 'nltk.tokenize.RegexpTokenizer', 'RegexpTokenizer', (['"""\\\\w+"""'], {}), "('\\\\w+')\n", (1737, 1745), False, 'from nltk.tokenize import RegexpTokenizer\n'), ((2691, 2738), 'glob.glob', 'glob.glob', (["(DIR + 'user_tweets/' + 'user_*.json')"], {}), "(DIR + 'user_tweets/' + 'user_*.json')\n", (2700, 2738), False, 'import glob\n'), ((3815, 3838), 'pandas.read_json', 'pd.read_json', (['fact_file'], {}), '(fact_file)\n', (3827, 3838), True, 'import pandas as pd\n'), ((5232, 5244), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5242, 5244), True, 'import networkx as nx\n'), ((6304, 6329), 'networkx.kamada_kawai_layout', 'nx.kamada_kawai_layout', (['G'], {}), '(G)\n', (6326, 6329), True, 'import networkx as nx\n'), ((7489, 7536), 'sklearn.model_selection.train_test_split', 'train_test_split', (['user_to_links', 'user_to_weight'], {}), '(user_to_links, user_to_weight)\n', (7505, 7536), False, 'from sklearn.model_selection import train_test_split\n'), ((7647, 7661), 'networkx.pagerank', 'nx.pagerank', (['G'], {}), '(G)\n', (7658, 7661), True, 'import networkx as nx\n'), ((8038, 8078), 'metrics.ndcg_score', 'ndcg_score', (['[y[1] for y in y_test]', 'pred'], {}), '([y[1] for y in y_test], pred)\n', (8048, 8078), False, 'from metrics import ndcg_score\n'), ((940, 965), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (955, 965), False, 'import os\n'), ((1003, 1028), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1018, 1028), False, 'import os\n'), ((2389, 2418), 'glob.glob', 'glob.glob', (["(DIR + 'facts.json')"], {}), "(DIR + 'facts.json')\n", (2398, 2418), False, 'import glob\n'), ((2446, 2485), 'glob.glob', 'glob.glob', (["(DIR + 'factTransaction.json')"], {}), "(DIR + 'factTransaction.json')\n", (2455, 2485), False, 'import glob\n'), ((3757, 3796), 'glob.glob', 'glob.glob', (["(DIR + 'facts_annotated.json')"], {}), "(DIR + 'facts_annotated.json')\n", (3766, 3796), False, 'import glob\n'), ((5160, 5173), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (5170, 5173), True, 'import numpy as np\n'), ((7263, 7312), 'plotly.plotly.iplot', 'py.iplot', (['fig1'], {'filename': '"""Coautorship-network-nx"""'}), "(fig1, filename='Coautorship-network-nx')\n", (7271, 7312), True, 'import plotly.plotly as py\n'), ((3575, 3604), 'numpy.asarray', 'np.asarray', (['distance_to_topic'], {}), '(distance_to_topic)\n', (3585, 3604), True, 'import numpy as np\n'), ((5710, 5722), 'networkx.degree', 'nx.degree', (['G'], {}), '(G)\n', (5719, 5722), True, 'import networkx as nx\n'), ((6193, 6205), 'networkx.degree', 'nx.degree', (['G'], {}), '(G)\n', (6202, 6205), True, 'import networkx as nx\n')] |
import os
import shutil
from typing import Tuple
import pandas as pd
from EvaluationUtils.descriptive_stats import create_collage
from Utils.union_find import UnionFind
from Animator.consolidation_api import CharacterDetectionOutput
import numpy as np
from numpy import linalg as LA
from collections import Counter
from Animator.cluster_logic import ConsolidationEvaluator
import Animator.clustering_methods as cm
from .detection_mapping import DetectionMapping
from Animator.utils import recreate_dir, create_dir_if_not_exist
from sklearn.metrics.pairwise import cosine_similarity
class MockCharacterConsolidator(object):
def __init__(self, detected_bboxes, min_cluster_significance, keep_cluster_percentile, min_cluster_size,
dbscan_cluster_labels):
"""Sets the file names for data on entities"""
features = np.asarray([detected_bbox.Features for detected_bbox in detected_bboxes])
self.KeyFrameIndices = [detected_bbox.KeyFrameIndex for detected_bbox in detected_bboxes]
self.FEATURES = features
self.IDS = np.asarray([detected_bbox.Id for detected_bbox in detected_bboxes])
self.DetectionConfidence = np.asarray([detected_bbox.Confidence for detected_bbox in detected_bboxes])
self.DetectionThumbnailId = np.asarray([detected_bbox.ThumbnailId for detected_bbox in detected_bboxes])
self.MinClusterSignificance = min_cluster_significance
self.MinClusterSize = min_cluster_size
self.KeepClusterPercentile = keep_cluster_percentile
self.DbscanClusteringLabels = dbscan_cluster_labels
@property
def post_process_cluster_significance(self):
# handle small input
actual_input_size = self.FEATURES.shape[0]
# load clustered data
cluster_labels = self.DbscanClusteringLabels
k_estimate = len(set([c for c in cluster_labels if c >= 0]))
print('K estimated to be {} for {} bboxes'.format(k_estimate, actual_input_size))
# find best thumbnail per cluster and cluster significance
best_bbox_ids, cluster_significance, id_to_cluster_label, id_to_sample_significance, cluster_centers = \
self.get_cluster_centers_best_bbox_and_significance(cluster_labels)
# filter insignificant clusters (using negative cluster ids)
cluster_labels, actual_best_k, best_bbox_ids, cluster_significance = \
self.filter_insignificant_clusters(id_to_cluster_label, best_bbox_ids, cluster_significance)
# filter insignificant samples per cluster
cluster_labels, best_bbox_ids, actual_best_k = self.filter_insignificant_samples_per_cluster(cluster_labels)
# keep cluster percentile
cluster_labels, best_bbox_ids, actual_best_k = self.keep_cluster_percentile(cluster_labels, best_bbox_ids)
# find best thumbnail per cluster and cluster significance
best_bbox_ids, cluster_significance, id_to_cluster_label, id_to_sample_significance, cluster_centers = \
self.get_cluster_centers_best_bbox_and_significance(cluster_labels)
# filter insignificant clusters (using negative cluster ids)
cluster_labels, actual_best_k, best_bbox_ids, cluster_significance = \
self.filter_insignificant_clusters(id_to_cluster_label, best_bbox_ids, cluster_significance)
# re-assign best thumbnail
best_bbox_ids, cluster_significance, id_to_cluster_label, id_to_sample_significance, cluster_centers = \
self.get_cluster_centers_best_bbox_and_significance(cluster_labels)
print(f'[STATS]#5: Filtered insignificant clusters: {k_estimate-actual_best_k}')
# get and merge the potential over-segmented clusters
cluster_ids_to_merge, _ = self.should_consolidate_clusters(cluster_labels)
cluster_labels, actual_best_k = MockCharacterConsolidator.merge_clusters(cluster_ids_to_merge, cluster_labels)
# re-assign best thumbnail
best_bbox_ids, cluster_significance, id_to_cluster_label, id_to_sample_significance, cluster_centers = \
self.get_cluster_centers_best_bbox_and_significance(cluster_labels)
# print silhouette index
ConsolidationEvaluator.unsupervised_evaluate_clusters(self.FEATURES, cluster_labels, 'OPTICS_ReCluster')
return self.IDS, cluster_labels, actual_best_k, best_bbox_ids, cluster_significance, id_to_sample_significance
def get_cluster_centers_best_bbox_and_significance(self, cluster_predictions, cluster_centers=None):
best_thumbnails = []
id_to_cluster_significance = dict()
id_to_cluster_label = dict()
id_to_sample_significance = dict()
if not cluster_centers:
cluster_centers = dict()
for cluster_id in set(cluster_predictions):
current_cluster_elements = self.FEATURES[cluster_predictions == cluster_id, :]
cluster_size = current_cluster_elements.shape[0]
current_cluster_bbox_ids = self.IDS[cluster_predictions == cluster_id]
# ignore noise clusters
if cluster_id >= 0 and cluster_size >= self.MinClusterSize:
cluster_centers[cluster_id] = np.median(current_cluster_elements, axis=0) \
if cluster_centers is None or len(cluster_centers) == 0 or cluster_id not in cluster_centers \
else cluster_centers[cluster_id]
cluster_detection_confidences = self.DetectionConfidence[cluster_predictions == cluster_id]
# calculate cluster significance
distance_from_center, closest_to_center_idx = self.calculate_distances_from_cluster_center(
cluster_centers[cluster_id], cluster_size, current_cluster_elements)
best_thumbnails.append(current_cluster_bbox_ids[closest_to_center_idx])
for d, c, bbox_id in zip(distance_from_center, cluster_detection_confidences, current_cluster_bbox_ids):
id_to_sample_significance[bbox_id] = cm.get_score(c, d)
sig_scores = np.asarray(
[id_to_sample_significance[bbox_id_current_cluster] for bbox_id_current_cluster in
current_cluster_bbox_ids])
cluster_significance = np.median(sig_scores)
else:
cluster_significance = 0.0
for bbox_id in current_cluster_bbox_ids:
id_to_cluster_significance[bbox_id] = cluster_significance
id_to_cluster_label[bbox_id] = cluster_id
return best_thumbnails, id_to_cluster_significance, id_to_cluster_label, id_to_sample_significance, cluster_centers
def filter_insignificant_clusters(self, id_to_cluster_label, best_bbox_ids, cluster_significance):
cluster_labels = []
insignificant_cluster_id = -1
filtered_cluster_ids = set()
filtered_clusters_confidences = set()
label_to_cluster_size = Counter(id_to_cluster_label.values())
for bbox_prediction_id in self.IDS:
# in case of significant cluster do nothing
if cluster_significance[bbox_prediction_id] >= self.MinClusterSignificance and \
label_to_cluster_size[id_to_cluster_label[bbox_prediction_id]] >= self.MinClusterSize:
cluster_labels.append(id_to_cluster_label[bbox_prediction_id])
continue
# update insignificant cluster
if bbox_prediction_id in best_bbox_ids:
best_bbox_ids.remove(bbox_prediction_id)
# keep stats for telemetry
filtered_cluster_ids.add(id_to_cluster_label[bbox_prediction_id])
filtered_clusters_confidences.add(cluster_significance[bbox_prediction_id])
id_to_cluster_label[bbox_prediction_id] = insignificant_cluster_id
cluster_labels.append(insignificant_cluster_id)
insignificant_cluster_id -= 1
if len(filtered_cluster_ids) > 0:
min_conf = min(filtered_clusters_confidences)
max_conf = max(filtered_clusters_confidences)
print(f'Filtered {len(filtered_cluster_ids)} clusters due to a score of range[{min_conf}, {max_conf}]')
actual_best_k = len(set([cl for bbox_id, cl in id_to_cluster_label.items() if cl >= 0]))
return np.asarray(cluster_labels), actual_best_k, best_bbox_ids, cluster_significance
@staticmethod
def calculate_distances_from_cluster_center(current_cluster_center, cluster_size, current_cluster_elements):
if cluster_size <= 1:
return np.asarray([0.0]), np.asarray([0])
l2_norm_with_center = LA.norm(
np.repeat([current_cluster_center], cluster_size, axis=0) - current_cluster_elements, axis=1)
closest_to_center_idx = np.argmin(l2_norm_with_center)
fares_from_center_idx = np.argmax(l2_norm_with_center)
# calculate cluster significance
distance_from_center = l2_norm_with_center / l2_norm_with_center[fares_from_center_idx]
return distance_from_center, closest_to_center_idx
def filter_insignificant_samples_per_cluster(self, cluster_labels):
best_bbox_ids = []
bbox_id_to_index = dict(zip(self.IDS, range(len(self.IDS))))
smallest_label = min(cluster_labels)
insignificant_cluster_label = -1 if smallest_label >= 0 else smallest_label - 1
for cluster_id in set(cluster_labels):
while True:
current_cluster_elements = self.FEATURES[cluster_labels == cluster_id, :]
cluster_size = current_cluster_elements.shape[0]
current_cluster_bbox_ids = self.IDS[cluster_labels == cluster_id]
if cluster_size < self.MinClusterSize:
# regard outliers as noise
for id_to_discard in current_cluster_bbox_ids:
cluster_labels[bbox_id_to_index[id_to_discard]] = insignificant_cluster_label
insignificant_cluster_label -= 1
break
current_cluster_center = np.median(current_cluster_elements, axis=0)
cluster_detection_confidences = self.DetectionConfidence[cluster_labels == cluster_id]
distance_from_center, closest_to_center_idx = self.calculate_distances_from_cluster_center(
current_cluster_center, cluster_size, current_cluster_elements)
id_to_sample_significance = dict()
for d, c, bbox_id in zip(distance_from_center, cluster_detection_confidences, current_cluster_bbox_ids):
id_to_sample_significance[bbox_id] = cm.get_score(c, d)
sig_scores = list(id_to_sample_significance.values())
q25 = np.percentile(sig_scores, 25)
q75 = np.percentile(sig_scores, 75)
iqr = q75 - q25
thresh = max(q25 - 1. * iqr, 0)
ids_to_discard = [bbox_id for bbox_id in current_cluster_bbox_ids
if id_to_sample_significance[bbox_id] < thresh]
if len(ids_to_discard) == 0:
best_bbox_ids.append(closest_to_center_idx)
break
# regard outliers as noise
for id_to_discard in ids_to_discard:
cluster_labels[bbox_id_to_index[id_to_discard]] = insignificant_cluster_label
insignificant_cluster_label -= 1
discarded_thumbnailid = self.DetectionThumbnailId[bbox_id_to_index[id_to_discard]]
print(f'Filtered out bbox_id: {id_to_discard} with thumbnail: {discarded_thumbnailid} since it is'
f' an outlier of cluster: {cluster_id} with significance (or distance score from center) of: '
f'{id_to_sample_significance[id_to_discard]:.4f} where the threshold was: {thresh:.4f}')
actual_best_k = len(set(lab for lab in cluster_labels if lab >= 0))
return cluster_labels, best_bbox_ids, actual_best_k
def keep_cluster_percentile(self, cluster_labels, best_bbox_ids):
if self.KeepClusterPercentile == 1.0:
print('KeepClusterPercentile is 100%: No filtering by KeepClusterPercentile...')
return cluster_labels, best_bbox_ids, len(set(lab for lab in cluster_labels if lab >= 0))
smallest_label = min(cluster_labels)
insignificant_cluster_label = -1 if smallest_label >= 0 else smallest_label - 1
filtered_cluster_ids = set()
id_to_cluster_label = dict(zip(self.IDS, cluster_labels))
label_to_cluster_size = Counter(id_to_cluster_label.values())
valid_cluster_stats = sorted([{'label': lab, 'size': cluster_size}
for lab, cluster_size in label_to_cluster_size.items()
if lab >= 0], key=lambda cs: cs['size'], reverse=True)
new_cluster_labels = []
total_valid_points = sum([vcs['size'] for vcs in valid_cluster_stats])
cumsum_buffer = 0.
for vcs in valid_cluster_stats:
vcs['percentage'] = 1. * vcs['size'] / total_valid_points
vcs['cumsum'] = vcs['percentage'] + cumsum_buffer
vcs['is_valid'] = vcs['cumsum'] <= self.KeepClusterPercentile or len(valid_cluster_stats) <= 5
cumsum_buffer += vcs['percentage']
label_to_validity = dict([(vcs['label'], vcs['is_valid']) for vcs in valid_cluster_stats])
for bbox_prediction_id in self.IDS:
# in case of significant cluster do nothing
if id_to_cluster_label[bbox_prediction_id] < 0 or \
label_to_validity[id_to_cluster_label[bbox_prediction_id]]:
new_cluster_labels.append(int(id_to_cluster_label[bbox_prediction_id]))
continue
# update insignificant cluster
if bbox_prediction_id in best_bbox_ids:
best_bbox_ids.remove(bbox_prediction_id)
# keep stats for telemetry
filtered_cluster_ids.add(id_to_cluster_label[bbox_prediction_id])
id_to_cluster_label[bbox_prediction_id] = insignificant_cluster_label
new_cluster_labels.append(int(insignificant_cluster_label))
insignificant_cluster_label -= 1
n_filtered_clusters = len(filtered_cluster_ids)
if n_filtered_clusters > 0:
if n_filtered_clusters > 20:
print(f'Filtered {n_filtered_clusters} clusters due to percentile cutoff.')
else:
print('The following clusters were filtered due to percentile cutoff: {}'.format(filtered_cluster_ids))
actual_best_k = len(set([cl for bbox_id, cl in id_to_cluster_label.items() if cl >= 0]))
return np.asarray(new_cluster_labels), best_bbox_ids, actual_best_k
def should_consolidate_clusters(self, cluster_predictions):
"""compute the cluster's similarity for post-processing merge"""
valid_clusters = sorted(cid for cid in set(cluster_predictions) if cid >= 0)
n = len(valid_clusters)
if n <= 1:
raise Exception('Something went wrong... Found a single (or no) cluster/s!')
cluster_sim = np.zeros([n, n], dtype=float)
features = self.FEATURES[cluster_predictions >= 0, :]
cluster_ids = cluster_predictions[cluster_predictions >= 0]
partition = dict()
for i in range(len(cluster_ids)):
partition[cluster_ids[i]] = partition.get(cluster_ids[i], set()) | {i}
for i in range(n):
left_cluster_id = valid_clusters[i]
left_cluster_feats = features[cluster_ids == left_cluster_id, :]
for j in range(i+1, n):
right_cluster_id = valid_clusters[j]
right_cluster_feats = features[cluster_ids == right_cluster_id, :]
cosine_sim = cosine_similarity(left_cluster_feats, right_cluster_feats)
cluster_sim[i, j] = cosine_sim.mean()
cluster_sim[j, i] = cosine_sim.mean()
m_merges = n // 2
top_3 = largest_indices(cluster_sim, 2*m_merges)
couples_indices = []
couples_sims = []
top_percentile_cluster_distance = max(.63, min(.69, np.quantile(cluster_sim.flatten(), 0.975)))
print(f'Using the 0.6 <= 99th percentile <= 0.7 as the merging cutoff: {top_percentile_cluster_distance:.4f}')
for i in range(m_merges):
left_index = top_3[0][2*i]
right_index = top_3[0][2*i+1]
clusters_cosine_similarity = cluster_sim[left_index, right_index]
# take only cluster sim of more than the 99th percentile
if clusters_cosine_similarity < top_percentile_cluster_distance:
print(f'Skipping the merge of cluster: {valid_clusters[left_index]} with cluster:'
f' {valid_clusters[right_index]} due to similarity of: '
f'{clusters_cosine_similarity:.4f} < {top_percentile_cluster_distance:.4f}')
continue
couples_indices.append((valid_clusters[left_index], valid_clusters[right_index]))
couples_sims.append(clusters_cosine_similarity)
print(f'The top {m_merges} coupled clusters are {couples_indices} with similarities: {couples_sims}')
print(f'[STATS]#5.5: Num consolidated clusters: {len(couples_indices)}')
return couples_indices, couples_sims
@staticmethod
def merge_clusters(cluster_ids_to_merge: list, cluster_labels: np.ndarray) -> Tuple[np.ndarray, int]:
"""update the predicted clusters according to the groups by cluster similarity"""
if len(cluster_ids_to_merge) == 0:
valid_clusters = set(c for c in cluster_labels if c >= 0)
return cluster_labels, len(valid_clusters)
grouped_cluster_ids = UnionFind.disjoint_sets(cluster_ids_to_merge)
cluster_to_rep = dict()
for c_group in grouped_cluster_ids:
rep = min(c_group)
for cluster_id in c_group:
cluster_to_rep[cluster_id] = rep
reassigned_cluster_labels = cluster_labels.copy()
for i in range(cluster_labels.shape[0]):
reassigned_cluster_labels[i] = cluster_to_rep.get(cluster_labels[i], cluster_labels[i])
valid_grouped_clusters = set(c for c in reassigned_cluster_labels if c >= 0)
return reassigned_cluster_labels, len(valid_grouped_clusters)
def largest_indices(ary, n):
"""Returns the n largest indices from a numpy array."""
flat = ary.flatten()
indices = np.argpartition(flat, -n)[-n:]
indices = indices[np.argsort(-flat[indices])]
return np.unravel_index(indices, ary.shape)
def post_process_single_episode(eval_root, ser, role):
_min_cluster_sig = 0.725
_keep_cluster_percentile = .975
_min_cluster_size = 3
ser_path = os.path.join(eval_root, '', ser)
role_path = os.path.join(ser_path, '', role)
detection_output_path = os.path.join(role_path, 'animationdetectionoutput.json')
print('Series: {}, Role: {}'.format(ser, role))
character_detections = CharacterDetectionOutput.read_from_json(detection_output_path)
grouping_output_path = os.path.join(role_path, 'animationgroupingoutput.json')
mapping = DetectionMapping.parse_index(detection_output_path, grouping_output_path)
id_to_group = {mapp.Id: mapp.BoxesConsolidation for mapp in mapping}
detection_id_set = set(d.ThumbnailId for d in character_detections.CharacterBoundingBoxes)
grouping_id_set = set(m.ThumbnailId for m in mapping)
xor_groups = (detection_id_set - grouping_id_set) | (grouping_id_set - detection_id_set)
if len(xor_groups) > 0:
print('The following ids are a mismatch between detection and grouping:\n{}. SKIPPING THEM!\n' \
.format(xor_groups))
character_detections.CharacterBoundingBoxes = \
filter(lambda detection: detection.ThumbnailId not in xor_groups,
character_detections.CharacterBoundingBoxes)
cluster_labels = np.asarray([id_to_group[d.Id]
for d in character_detections.CharacterBoundingBoxes if d.Id in id_to_group])
k_recluster = len(set([c for c in cluster_labels if c >= 0]))
print('DBSCAN k={}'.format(k_recluster))
mock_grouper = MockCharacterConsolidator(character_detections.CharacterBoundingBoxes, _min_cluster_sig,
_keep_cluster_percentile, _min_cluster_size, cluster_labels)
ids, cluster_labels, final_k, best_bbox_ids, _cluster_significance, _id_to_sample_significance = \
mock_grouper.post_process_cluster_significance
id_to_cluster_label = dict(zip(ids, cluster_labels))
print('Results: k={}'.format(final_k))
cluster_label_to_sig = {
id_to_cluster_label[post_id]: _cluster_significance[post_id]
if post_id in _cluster_significance
else 0.
for post_id in ids
if id_to_cluster_label[post_id] >= 0
}
ordered_cluster_significances = sorted(cluster_label_to_sig.items(), key=lambda tup: tup[1],
reverse=True)
print(''.join(['ClusterId:{} -> Sig:{:.4f}\n'.format(t[0], t[1]) for t in ordered_cluster_significances]))
# copy significant clusters collage
significant_collage_repo = recreate_dir(role_path, 'significant_collages')
groups_repo = recreate_dir(role_path, 'groups')
source_detections_repo = os.path.join(role_path, 'animationdetectionoriginalimages')
for cid, sig in ordered_cluster_significances:
cluster_bbox_ids = ids[cluster_labels == cid]
cluster_collage_thumbnail_ids = [detection.ThumbnailId for detection in
character_detections.CharacterBoundingBoxes if
detection.Id in cluster_bbox_ids]
collage_images = [os.path.join(source_detections_repo, '{}.jpg'.format(bbox_thumb_id))
for bbox_thumb_id in cluster_collage_thumbnail_ids]
cluster_collage_name = 'Cluster_{}Sig_{:.4f}'.format(cid, sig)
target_collage_path = os.path.join(significant_collage_repo, '{}.jpg'.format(cluster_collage_name))
create_collage(collage_images, target_collage_path)
cluster_group_repo = create_dir_if_not_exist(groups_repo, f'cluster_{cid}')
for source_det_im_path in collage_images:
det_file_name = os.path.basename(source_det_im_path)
dest_det_path = os.path.join(cluster_group_repo, det_file_name)
shutil.copy(source_det_im_path, dest_det_path)
# keep predictions in a dataframe
num_bboxes = sum(1. for c in cluster_labels if c >= 0)
avg_bbox_per_cluster = num_bboxes / final_k
pred_row = dict(NumProposals_1=len(list(character_detections.CharacterBoundingBoxes)), InitialK_2=-1,
DbscanK_3=-1, ReClusterK_4=k_recluster, DicardedK_5=k_recluster - final_k, FinalK_6=final_k,
AvgNumProposalsPerCluster=avg_bbox_per_cluster, SeriesName=ser, Role=role, ValidBoxes=num_bboxes)
print(f'[STATS]#6: Final N clusters: {len(ordered_cluster_significances)}')
print(f'[STATS]#7: Num boxes per cluster: {num_bboxes/len(ordered_cluster_significances)}')
return pred_row
def post_processor_main():
eval_root = r'..\TripletsSeNet'
stats_df_path = r'..\TripletsSeNet\GroupingStats\ClusteringStats.tsv'
predictions_df = pd.DataFrame({'SeriesName': [], 'Role': [], 'NumProposals_1': [], 'InitialK_2': [],
'DbscanK_3': [], 'ReClusterK_4': [], 'DicardedK_5': [], 'FinalK_6': [],
'AvgNumProposalsPerCluster': []})
series = [s for s in os.listdir(eval_root) if s not in ['Transformers', 'DextersLab', 'Cars'] and os.path.isdir(s)]
for ser in series:
for role in ['Training']:
pred_row = post_process_single_episode(eval_root, ser, role)
predictions_df = predictions_df.append(pred_row, ignore_index=True)
print('Finished an episode...')
predictions_df.to_csv(stats_df_path, header=True, sep='\t')
return
# if __name__ == '__main__':
# post_processor_main()
# print('Done!')
| [
"numpy.argsort",
"Animator.consolidation_api.CharacterDetectionOutput.read_from_json",
"os.listdir",
"numpy.repeat",
"sklearn.metrics.pairwise.cosine_similarity",
"Animator.utils.recreate_dir",
"Utils.union_find.UnionFind.disjoint_sets",
"numpy.asarray",
"EvaluationUtils.descriptive_stats.create_col... | [((18743, 18779), 'numpy.unravel_index', 'np.unravel_index', (['indices', 'ary.shape'], {}), '(indices, ary.shape)\n', (18759, 18779), True, 'import numpy as np\n'), ((18943, 18975), 'os.path.join', 'os.path.join', (['eval_root', '""""""', 'ser'], {}), "(eval_root, '', ser)\n", (18955, 18975), False, 'import os\n'), ((18992, 19024), 'os.path.join', 'os.path.join', (['ser_path', '""""""', 'role'], {}), "(ser_path, '', role)\n", (19004, 19024), False, 'import os\n'), ((19053, 19109), 'os.path.join', 'os.path.join', (['role_path', '"""animationdetectionoutput.json"""'], {}), "(role_path, 'animationdetectionoutput.json')\n", (19065, 19109), False, 'import os\n'), ((19190, 19252), 'Animator.consolidation_api.CharacterDetectionOutput.read_from_json', 'CharacterDetectionOutput.read_from_json', (['detection_output_path'], {}), '(detection_output_path)\n', (19229, 19252), False, 'from Animator.consolidation_api import CharacterDetectionOutput\n'), ((19280, 19335), 'os.path.join', 'os.path.join', (['role_path', '"""animationgroupingoutput.json"""'], {}), "(role_path, 'animationgroupingoutput.json')\n", (19292, 19335), False, 'import os\n'), ((20131, 20243), 'numpy.asarray', 'np.asarray', (['[id_to_group[d.Id] for d in character_detections.CharacterBoundingBoxes if \n d.Id in id_to_group]'], {}), '([id_to_group[d.Id] for d in character_detections.\n CharacterBoundingBoxes if d.Id in id_to_group])\n', (20141, 20243), True, 'import numpy as np\n'), ((21428, 21475), 'Animator.utils.recreate_dir', 'recreate_dir', (['role_path', '"""significant_collages"""'], {}), "(role_path, 'significant_collages')\n", (21440, 21475), False, 'from Animator.utils import recreate_dir, create_dir_if_not_exist\n'), ((21494, 21527), 'Animator.utils.recreate_dir', 'recreate_dir', (['role_path', '"""groups"""'], {}), "(role_path, 'groups')\n", (21506, 21527), False, 'from Animator.utils import recreate_dir, create_dir_if_not_exist\n'), ((21558, 21617), 'os.path.join', 'os.path.join', (['role_path', '"""animationdetectionoriginalimages"""'], {}), "(role_path, 'animationdetectionoriginalimages')\n", (21570, 21617), False, 'import os\n'), ((23552, 23750), 'pandas.DataFrame', 'pd.DataFrame', (["{'SeriesName': [], 'Role': [], 'NumProposals_1': [], 'InitialK_2': [],\n 'DbscanK_3': [], 'ReClusterK_4': [], 'DicardedK_5': [], 'FinalK_6': [],\n 'AvgNumProposalsPerCluster': []}"], {}), "({'SeriesName': [], 'Role': [], 'NumProposals_1': [],\n 'InitialK_2': [], 'DbscanK_3': [], 'ReClusterK_4': [], 'DicardedK_5': [\n ], 'FinalK_6': [], 'AvgNumProposalsPerCluster': []})\n", (23564, 23750), True, 'import pandas as pd\n'), ((850, 923), 'numpy.asarray', 'np.asarray', (['[detected_bbox.Features for detected_bbox in detected_bboxes]'], {}), '([detected_bbox.Features for detected_bbox in detected_bboxes])\n', (860, 923), True, 'import numpy as np\n'), ((1074, 1141), 'numpy.asarray', 'np.asarray', (['[detected_bbox.Id for detected_bbox in detected_bboxes]'], {}), '([detected_bbox.Id for detected_bbox in detected_bboxes])\n', (1084, 1141), True, 'import numpy as np\n'), ((1177, 1252), 'numpy.asarray', 'np.asarray', (['[detected_bbox.Confidence for detected_bbox in detected_bboxes]'], {}), '([detected_bbox.Confidence for detected_bbox in detected_bboxes])\n', (1187, 1252), True, 'import numpy as np\n'), ((1289, 1365), 'numpy.asarray', 'np.asarray', (['[detected_bbox.ThumbnailId for detected_bbox in detected_bboxes]'], {}), '([detected_bbox.ThumbnailId for detected_bbox in detected_bboxes])\n', (1299, 1365), True, 'import numpy as np\n'), ((4188, 4296), 'Animator.cluster_logic.ConsolidationEvaluator.unsupervised_evaluate_clusters', 'ConsolidationEvaluator.unsupervised_evaluate_clusters', (['self.FEATURES', 'cluster_labels', '"""OPTICS_ReCluster"""'], {}), "(self.FEATURES,\n cluster_labels, 'OPTICS_ReCluster')\n", (4241, 4296), False, 'from Animator.cluster_logic import ConsolidationEvaluator\n'), ((8793, 8823), 'numpy.argmin', 'np.argmin', (['l2_norm_with_center'], {}), '(l2_norm_with_center)\n', (8802, 8823), True, 'import numpy as np\n'), ((8856, 8886), 'numpy.argmax', 'np.argmax', (['l2_norm_with_center'], {}), '(l2_norm_with_center)\n', (8865, 8886), True, 'import numpy as np\n'), ((15275, 15304), 'numpy.zeros', 'np.zeros', (['[n, n]'], {'dtype': 'float'}), '([n, n], dtype=float)\n', (15283, 15304), True, 'import numpy as np\n'), ((17918, 17963), 'Utils.union_find.UnionFind.disjoint_sets', 'UnionFind.disjoint_sets', (['cluster_ids_to_merge'], {}), '(cluster_ids_to_merge)\n', (17941, 17963), False, 'from Utils.union_find import UnionFind\n'), ((18651, 18676), 'numpy.argpartition', 'np.argpartition', (['flat', '(-n)'], {}), '(flat, -n)\n', (18666, 18676), True, 'import numpy as np\n'), ((18704, 18730), 'numpy.argsort', 'np.argsort', (['(-flat[indices])'], {}), '(-flat[indices])\n', (18714, 18730), True, 'import numpy as np\n'), ((22326, 22377), 'EvaluationUtils.descriptive_stats.create_collage', 'create_collage', (['collage_images', 'target_collage_path'], {}), '(collage_images, target_collage_path)\n', (22340, 22377), False, 'from EvaluationUtils.descriptive_stats import create_collage\n'), ((22407, 22461), 'Animator.utils.create_dir_if_not_exist', 'create_dir_if_not_exist', (['groups_repo', 'f"""cluster_{cid}"""'], {}), "(groups_repo, f'cluster_{cid}')\n", (22430, 22461), False, 'from Animator.utils import recreate_dir, create_dir_if_not_exist\n'), ((8321, 8347), 'numpy.asarray', 'np.asarray', (['cluster_labels'], {}), '(cluster_labels)\n', (8331, 8347), True, 'import numpy as np\n'), ((14829, 14859), 'numpy.asarray', 'np.asarray', (['new_cluster_labels'], {}), '(new_cluster_labels)\n', (14839, 14859), True, 'import numpy as np\n'), ((22540, 22576), 'os.path.basename', 'os.path.basename', (['source_det_im_path'], {}), '(source_det_im_path)\n', (22556, 22576), False, 'import os\n'), ((22605, 22652), 'os.path.join', 'os.path.join', (['cluster_group_repo', 'det_file_name'], {}), '(cluster_group_repo, det_file_name)\n', (22617, 22652), False, 'import os\n'), ((22665, 22711), 'shutil.copy', 'shutil.copy', (['source_det_im_path', 'dest_det_path'], {}), '(source_det_im_path, dest_det_path)\n', (22676, 22711), False, 'import shutil\n'), ((23837, 23858), 'os.listdir', 'os.listdir', (['eval_root'], {}), '(eval_root)\n', (23847, 23858), False, 'import os\n'), ((6067, 6191), 'numpy.asarray', 'np.asarray', (['[id_to_sample_significance[bbox_id_current_cluster] for\n bbox_id_current_cluster in current_cluster_bbox_ids]'], {}), '([id_to_sample_significance[bbox_id_current_cluster] for\n bbox_id_current_cluster in current_cluster_bbox_ids])\n', (6077, 6191), True, 'import numpy as np\n'), ((6269, 6290), 'numpy.median', 'np.median', (['sig_scores'], {}), '(sig_scores)\n', (6278, 6290), True, 'import numpy as np\n'), ((8581, 8598), 'numpy.asarray', 'np.asarray', (['[0.0]'], {}), '([0.0])\n', (8591, 8598), True, 'import numpy as np\n'), ((8600, 8615), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (8610, 8615), True, 'import numpy as np\n'), ((8667, 8724), 'numpy.repeat', 'np.repeat', (['[current_cluster_center]', 'cluster_size'], {'axis': '(0)'}), '([current_cluster_center], cluster_size, axis=0)\n', (8676, 8724), True, 'import numpy as np\n'), ((10091, 10134), 'numpy.median', 'np.median', (['current_cluster_elements'], {'axis': '(0)'}), '(current_cluster_elements, axis=0)\n', (10100, 10134), True, 'import numpy as np\n'), ((10773, 10802), 'numpy.percentile', 'np.percentile', (['sig_scores', '(25)'], {}), '(sig_scores, 25)\n', (10786, 10802), True, 'import numpy as np\n'), ((10825, 10854), 'numpy.percentile', 'np.percentile', (['sig_scores', '(75)'], {}), '(sig_scores, 75)\n', (10838, 10854), True, 'import numpy as np\n'), ((15942, 16000), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['left_cluster_feats', 'right_cluster_feats'], {}), '(left_cluster_feats, right_cluster_feats)\n', (15959, 16000), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((23914, 23930), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (23927, 23930), False, 'import os\n'), ((5183, 5226), 'numpy.median', 'np.median', (['current_cluster_elements'], {'axis': '(0)'}), '(current_cluster_elements, axis=0)\n', (5192, 5226), True, 'import numpy as np\n'), ((6018, 6036), 'Animator.clustering_methods.get_score', 'cm.get_score', (['c', 'd'], {}), '(c, d)\n', (6030, 6036), True, 'import Animator.clustering_methods as cm\n'), ((10661, 10679), 'Animator.clustering_methods.get_score', 'cm.get_score', (['c', 'd'], {}), '(c, d)\n', (10673, 10679), True, 'import Animator.clustering_methods as cm\n')] |
from ennemi import estimate_entropy, estimate_mi, pairwise_mi
import numpy as np
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
rcParams["lines.markersize"] = 12
N = 200
week = np.arange(N)
rng = np.random.default_rng(1234)
# The weather is completely determined by temperature, air pressure and wind
# NOTE: This is not a realistic weather model! :)
actual_temp = 15 + 5*np.sin(week / 8) + rng.normal(0, 3, N)
actual_press = 1000 + 30*np.sin(week / 3) + rng.normal(0, 4, N)
wind_dir = rng.choice(["N", "E", "S", "W"], N, p=[0.15, 0.15, 0.4, 0.3])
weather = np.full(N, "cloudy")
weather[(actual_press > 1015) & (actual_temp > 18)] = "clear"
weather[(weather=="cloudy") & (wind_dir=="W")] = "rainy"
weather[(weather=="cloudy") & (wind_dir=="S") & rng.choice([0,1], N)] = "rainy"
# The measurements for these are not accurate either
temp = np.round(actual_temp + rng.normal(0, 1, N))
press = np.round(actual_press + rng.normal(0, 1, N))
# Create a pandas data frame out of the measurements
data = pd.DataFrame({"Weather": weather, "Temp": temp, "Press": press, "Wind": wind_dir})
print("Sample of the data:")
print(data)
# Plot the weather for one "year"
for (forecast, marker, color) in [("cloudy", "$\u2601$", "gray"),
("clear", "$\u2600$", "orange"),
("rainy", "$\u2602$", "blue")]:
plt.scatter(week[weather==forecast], temp[weather==forecast],
marker=marker, color=color)
plt.title("Weather for Entropyville")
plt.xlabel("Week")
plt.ylabel("Temperature")
plt.xlim((0, 50))
plt.savefig("discrete_temp_weather.png", transparent=True)
#
# Fix-up step for pandas DataFrames
#
print("\nFix up data")
# Not the most optimal code, but sufficient in small example
data2 = data.drop(columns=["Weather", "Wind"])
data2["Wind"] = 0
data2.loc[data["Wind"] == "E", "Wind"] = 1
data2.loc[data["Wind"] == "S", "Wind"] = 2
data2.loc[data["Wind"] == "W", "Wind"] = 3
data2["Weather"] = 0
data2.loc[data["Weather"] == "cloudy", "Weather"] = 1
data2.loc[data["Weather"] == "clear", "Weather"] = 2
print(data2)
print(data2.dtypes)
#
# Correlation between continuous variables and weather
#
print("\MI between continuous variables and weather")
print(estimate_mi(data2["Weather"], data2[["Temp", "Press"]], discrete_y=True))
print("Entropy of Weather")
print(estimate_entropy(data2["Weather"], discrete=True))
#
# Conditioning on temperature
#
print("\nConditioned on temperature")
print(estimate_mi(data2["Weather"], data2["Press"], cond=data2["Temp"], discrete_y=True))
#
# Wind
#
print("\nMI between wind and weather")
print(estimate_mi(data2["Weather"], data2["Wind"], discrete_y=True, discrete_x=True))
print("MI between wind and continuous variables")
print(estimate_mi(data2["Wind"], data2[["Temp", "Press"]], discrete_y=True))
# Uncomment to get a warning
#print("\nConditioned on temperature and pressure")
#print(estimate_mi(data2["Weather"], data2["Wind"],
# cond=data2[["Temp","Press"]], discrete_y=True, discrete_x=True))
#
# Pairwise MI
#
print("\nPairwise MI")
print(pairwise_mi(data2, discrete=[False, False, True, True]))
| [
"ennemi.pairwise_mi",
"matplotlib.pyplot.title",
"numpy.random.default_rng",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"ennemi.estimate_mi",
"numpy.sin",
"matplotlib.pyplot.xlabel",
"ennemi.estimate_entropy",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"numpy.full",
"matp... | [((216, 228), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (225, 228), True, 'import numpy as np\n'), ((235, 262), 'numpy.random.default_rng', 'np.random.default_rng', (['(1234)'], {}), '(1234)\n', (256, 262), True, 'import numpy as np\n'), ((599, 619), 'numpy.full', 'np.full', (['N', '"""cloudy"""'], {}), "(N, 'cloudy')\n", (606, 619), True, 'import numpy as np\n'), ((1038, 1124), 'pandas.DataFrame', 'pd.DataFrame', (["{'Weather': weather, 'Temp': temp, 'Press': press, 'Wind': wind_dir}"], {}), "({'Weather': weather, 'Temp': temp, 'Press': press, 'Wind':\n wind_dir})\n", (1050, 1124), True, 'import pandas as pd\n'), ((1499, 1536), 'matplotlib.pyplot.title', 'plt.title', (['"""Weather for Entropyville"""'], {}), "('Weather for Entropyville')\n", (1508, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Week"""'], {}), "('Week')\n", (1547, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1556, 1581), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature"""'], {}), "('Temperature')\n", (1566, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1599), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 50)'], {}), '((0, 50))\n', (1590, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1600, 1658), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""discrete_temp_weather.png"""'], {'transparent': '(True)'}), "('discrete_temp_weather.png', transparent=True)\n", (1611, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1400, 1498), 'matplotlib.pyplot.scatter', 'plt.scatter', (['week[weather == forecast]', 'temp[weather == forecast]'], {'marker': 'marker', 'color': 'color'}), '(week[weather == forecast], temp[weather == forecast], marker=\n marker, color=color)\n', (1411, 1498), True, 'import matplotlib.pyplot as plt\n'), ((2267, 2339), 'ennemi.estimate_mi', 'estimate_mi', (["data2['Weather']", "data2[['Temp', 'Press']]"], {'discrete_y': '(True)'}), "(data2['Weather'], data2[['Temp', 'Press']], discrete_y=True)\n", (2278, 2339), False, 'from ennemi import estimate_entropy, estimate_mi, pairwise_mi\n'), ((2375, 2424), 'ennemi.estimate_entropy', 'estimate_entropy', (["data2['Weather']"], {'discrete': '(True)'}), "(data2['Weather'], discrete=True)\n", (2391, 2424), False, 'from ennemi import estimate_entropy, estimate_mi, pairwise_mi\n'), ((2507, 2593), 'ennemi.estimate_mi', 'estimate_mi', (["data2['Weather']", "data2['Press']"], {'cond': "data2['Temp']", 'discrete_y': '(True)'}), "(data2['Weather'], data2['Press'], cond=data2['Temp'],\n discrete_y=True)\n", (2518, 2593), False, 'from ennemi import estimate_entropy, estimate_mi, pairwise_mi\n'), ((2649, 2727), 'ennemi.estimate_mi', 'estimate_mi', (["data2['Weather']", "data2['Wind']"], {'discrete_y': '(True)', 'discrete_x': '(True)'}), "(data2['Weather'], data2['Wind'], discrete_y=True, discrete_x=True)\n", (2660, 2727), False, 'from ennemi import estimate_entropy, estimate_mi, pairwise_mi\n'), ((2785, 2854), 'ennemi.estimate_mi', 'estimate_mi', (["data2['Wind']", "data2[['Temp', 'Press']]"], {'discrete_y': '(True)'}), "(data2['Wind'], data2[['Temp', 'Press']], discrete_y=True)\n", (2796, 2854), False, 'from ennemi import estimate_entropy, estimate_mi, pairwise_mi\n'), ((3111, 3166), 'ennemi.pairwise_mi', 'pairwise_mi', (['data2'], {'discrete': '[False, False, True, True]'}), '(data2, discrete=[False, False, True, True])\n', (3122, 3166), False, 'from ennemi import estimate_entropy, estimate_mi, pairwise_mi\n'), ((412, 428), 'numpy.sin', 'np.sin', (['(week / 8)'], {}), '(week / 8)\n', (418, 428), True, 'import numpy as np\n'), ((476, 492), 'numpy.sin', 'np.sin', (['(week / 3)'], {}), '(week / 3)\n', (482, 492), True, 'import numpy as np\n')] |
'''
Class to process UMAP and HDBSCAN together
@dlegor
'''
from pandas.core.base import DataError
import pandas as pd
import numpy as np
import umap
import hdbscan
__all__=['Embedding_Output']
class Embedding_Output:
'''
Class to estimate the embedding
and detect the outliers from the embedding
'''
def __init__(self,n_neighbors:int=15,min_dist:float=0.1,
n_components:int=2,metric_umap:str='euclidean',metric_hdb:str='euclidean',
min_cluster_size:int=15,min_sample:int=5,get_embedding:bool=True,get_outliers:bool=True,quantile_limit:float=0.9,
output:bool=False):
self.n_neighbors=n_neighbors
self.min_dist=min_dist
self.n_components=n_components
self.metric_umap = metric_umap
self.metric_hdb=metric_hdb
self.min_cluster_size=min_cluster_size
self.min_sample=min_sample
self.get_embedding=get_embedding
self.get_outliers=get_outliers
self.quantile_limit=quantile_limit
self.output=output
# self.file_output=file_output
def _validation_data(self,X):
if all(X.select_dtypes('float').dtypes==float):
pass
else:
raise DataError
def fit(self,X):
self._validation_data(X)
#UMAP
hyperbolic_mapper = umap.UMAP(output_metric='hyperboloid',
metric=self.metric_umap,n_neighbors=self.n_neighbors,
min_dist=self.min_dist,n_components=self.n_components,
random_state=42).fit(X)
self._shape_embedding=hyperbolic_mapper.embedding_.shape
#HDBSCAN
clusterer = hdbscan.HDBSCAN(min_samples=self.min_sample,
min_cluster_size=self.min_cluster_size,
metric=self.metric_hdb).fit(hyperbolic_mapper.embedding_)
#Outliers
threshold = pd.Series(clusterer.outlier_scores_).quantile(self.quantile_limit)
outliers = np.where(clusterer.outlier_scores_ > threshold)[0]
if self.output:
return hyperbolic_mapper.embedding_,X.index.isin(outliers).astype(int),clusterer.labels_
def __repr__(self) -> str:
print('Embedding_Outliers')
| [
"numpy.where",
"pandas.Series",
"umap.UMAP",
"hdbscan.HDBSCAN"
] | [((1929, 1976), 'numpy.where', 'np.where', (['(clusterer.outlier_scores_ > threshold)'], {}), '(clusterer.outlier_scores_ > threshold)\n', (1937, 1976), True, 'import numpy as np\n'), ((1335, 1511), 'umap.UMAP', 'umap.UMAP', ([], {'output_metric': '"""hyperboloid"""', 'metric': 'self.metric_umap', 'n_neighbors': 'self.n_neighbors', 'min_dist': 'self.min_dist', 'n_components': 'self.n_components', 'random_state': '(42)'}), "(output_metric='hyperboloid', metric=self.metric_umap, n_neighbors\n =self.n_neighbors, min_dist=self.min_dist, n_components=self.\n n_components, random_state=42)\n", (1344, 1511), False, 'import umap\n'), ((1637, 1750), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'min_samples': 'self.min_sample', 'min_cluster_size': 'self.min_cluster_size', 'metric': 'self.metric_hdb'}), '(min_samples=self.min_sample, min_cluster_size=self.\n min_cluster_size, metric=self.metric_hdb)\n', (1652, 1750), False, 'import hdbscan\n'), ((1843, 1879), 'pandas.Series', 'pd.Series', (['clusterer.outlier_scores_'], {}), '(clusterer.outlier_scores_)\n', (1852, 1879), True, 'import pandas as pd\n')] |
import unittest
from os.path import dirname, join
from pathlib import Path
import numpy as np
from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, \
HyperVolume
class GenerationalDistanceTestCases(unittest.TestCase):
""" Class including unit tests for class GenerationalDistance
"""
def test_should_constructor_create_a_non_null_object(self) -> None:
indicator = GenerationalDistance([])
self.assertIsNotNone(indicator)
def test_get_name_return_the_right_value(self):
self.assertEqual("Generational Distance", GenerationalDistance([]).get_name())
def test_get_short_name_return_the_right_value(self):
self.assertEqual("GD", GenerationalDistance([]).get_short_name())
def test_case1(self):
"""
Case 1. Reference front: [[1.0, 1.0]], front: [[1.0, 1.0]]
Expected result: the distance to the nearest point of the reference front is 0.0
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case2(self):
"""
Case 2. Reference front: [[1.0, 1.0], [2.0, 2.0], front: [[1.0, 1.0]]
Expected result: the distance to the nearest point of the reference front is 0.0
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case3(self):
"""
Case 3. Reference front: [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], front: [[1.0, 1.0, 1.0]]
Expected result: the distance to the nearest point of the reference front is 0.0. Example with three objectives
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]))
front = np.array([[1.0, 1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case4(self):
"""
Case 4. reference front: [[1.0, 1.0], [2.0, 2.0]], front: [[1.5, 1.5]]
Expected result: the distance to the nearest point of the reference front is the euclidean distance to any of the
points of the reference front
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.5, 1.5]])
result = indicator.compute(front)
self.assertEqual(np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2)), result)
self.assertEqual(np.sqrt(pow(2.0 - 1.5, 2) + pow(2.0 - 1.5, 2)), result)
def test_case5(self):
"""
Case 5. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5]]
Expected result: the distance to the nearest point of the reference front is the euclidean distance
to the nearest point of the reference front ([1.0, 1.0])
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5]])
result = indicator.compute(front)
self.assertEqual(np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2)), result)
self.assertEqual(np.sqrt(pow(2.0 - 1.5, 2) + pow(2.0 - 1.5, 2)), result)
def test_case6(self):
"""
Case 6. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2]]
Expected result: the distance to the nearest point of the reference front is the average of the sum of each point
of the front to the nearest point of the reference front
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5], [2.2, 2.2]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.1 - 2.2, 2) + pow(2.1 - 2.2, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case7(self):
"""
Case 7. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]]
Expected result: the distance to the nearest point of the reference front is the sum of each point of the front to the
nearest point of the reference front
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.1 - 2.2, 2) + pow(2.1 - 2.2, 2))
distance_of_third_point = np.sqrt(pow(2.1 - 1.9, 2) + pow(2.1 - 1.9, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point + distance_of_third_point) / 3.0, result)
class InvertedGenerationalDistanceTestCases(unittest.TestCase):
""" Class including unit tests for class InvertedGenerationalDistance
"""
def test_should_constructor_create_a_non_null_object(self) -> None:
indicator = InvertedGenerationalDistance([])
self.assertIsNotNone(indicator)
def test_get_name_return_the_right_value(self):
self.assertEqual("Inverted Generational Distance", InvertedGenerationalDistance([]).get_name())
def test_get_short_name_return_the_right_value(self):
self.assertEqual("IGD", InvertedGenerationalDistance([]).get_short_name())
def test_case1(self):
"""
Case 1. Reference front: [[1.0, 1.0]], front: [[1.0, 1.0]]
Expected result = 0.0
Comment: simplest case
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case2(self):
"""
Case 2. Reference front: [[1.0, 1.0], [2.0, 2.0], front: [[1.0, 1.0]]
Expected result: average of the sum of the distances of the points of the reference front to the front
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.0, 2) + pow(1.0 - 1.0, 2))
distance_of_second_point = np.sqrt(pow(2.0 - 1.0, 2) + pow(2.0 - 1.0, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case3(self):
"""
Case 3. Reference front: [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], front: [[1.0, 1.0, 1.0]]
Expected result: average of the sum of the distances of the points of the reference front to the front.
Example with three objectives
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]))
front = np.array([[1.0, 1.0, 1.0]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.0, 2) + pow(1.0 - 1.0, 2) + pow(1.0 - 1.0, 2))
distance_of_second_point = np.sqrt(pow(2.0 - 1.0, 2) + pow(2.0 - 1.0, 2) + pow(2.0 - 1.0, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case4(self):
"""
Case 4. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2]]
Expected result: average of the sum of the distances of the points of the reference front to the front.
Example with three objectives
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5], [2.2, 2.2]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.1 - 2.2, 2) + pow(2.1 - 2.2, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case5(self):
"""
Case 5. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]]
Expected result: average of the sum of the distances of the points of the reference front to the front.
Example with three objectives
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.0 - 1.9, 2) + pow(2.0 - 1.9, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
class EpsilonIndicatorTestCases(unittest.TestCase):
""" Class including unit tests for class EpsilonIndicator
"""
def test_should_constructor_create_a_non_null_object(self) -> None:
indicator = EpsilonIndicator(np.array([[1.0, 1.0], [2.0, 2.0]]))
self.assertIsNotNone(indicator)
class HyperVolumeTestCases(unittest.TestCase):
def setUp(self):
self.file_path = dirname(join(dirname(__file__)))
def test_should_hypervolume_return_5_0(self):
reference_point = [2, 2, 2]
front = np.array([[1, 0, 1], [0, 1, 0]])
hv = HyperVolume(reference_point)
value = hv.compute(front)
self.assertEqual(5.0, value)
def test_should_hypervolume_return_the_correct_value_when_applied_to_the_ZDT1_reference_front(self):
filename = 'jmetal/core/test/ZDT1.pf'
front = []
if Path(filename).is_file():
with open(filename) as file:
for line in file:
vector = [float(x) for x in line.split()]
front.append(vector)
else:
print("error")
reference_point = [1, 1]
hv = HyperVolume(reference_point)
value = hv.compute(np.array(front))
self.assertAlmostEqual(0.666, value, delta=0.001)
if __name__ == '__main__':
unittest.main()
| [
"pathlib.Path",
"jmetal.core.quality_indicator.InvertedGenerationalDistance",
"numpy.array",
"os.path.dirname",
"jmetal.core.quality_indicator.GenerationalDistance",
"jmetal.core.quality_indicator.HyperVolume",
"unittest.main"
] | [((10395, 10410), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10408, 10410), False, 'import unittest\n'), ((450, 474), 'jmetal.core.quality_indicator.GenerationalDistance', 'GenerationalDistance', (['[]'], {}), '([])\n', (470, 474), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((1094, 1116), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (1102, 1116), True, 'import numpy as np\n'), ((1528, 1550), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (1536, 1550), True, 'import numpy as np\n'), ((2019, 2046), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0]])\n', (2027, 2046), True, 'import numpy as np\n'), ((2530, 2552), 'numpy.array', 'np.array', (['[[1.5, 1.5]]'], {}), '([[1.5, 1.5]])\n', (2538, 2552), True, 'import numpy as np\n'), ((3173, 3195), 'numpy.array', 'np.array', (['[[1.5, 1.5]]'], {}), '([[1.5, 1.5]])\n', (3181, 3195), True, 'import numpy as np\n'), ((3842, 3876), 'numpy.array', 'np.array', (['[[1.5, 1.5], [2.2, 2.2]]'], {}), '([[1.5, 1.5], [2.2, 2.2]])\n', (3850, 3876), True, 'import numpy as np\n'), ((4614, 4660), 'numpy.array', 'np.array', (['[[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]]'], {}), '([[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]])\n', (4622, 4660), True, 'import numpy as np\n'), ((5309, 5341), 'jmetal.core.quality_indicator.InvertedGenerationalDistance', 'InvertedGenerationalDistance', (['[]'], {}), '([])\n', (5337, 5341), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((5967, 5989), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (5975, 5989), True, 'import numpy as np\n'), ((6431, 6453), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (6439, 6453), True, 'import numpy as np\n'), ((7179, 7206), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0]])\n', (7187, 7206), True, 'import numpy as np\n'), ((7959, 7993), 'numpy.array', 'np.array', (['[[1.5, 1.5], [2.2, 2.2]]'], {}), '([[1.5, 1.5], [2.2, 2.2]])\n', (7967, 7993), True, 'import numpy as np\n'), ((8717, 8763), 'numpy.array', 'np.array', (['[[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]]'], {}), '([[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]])\n', (8725, 8763), True, 'import numpy as np\n'), ((9607, 9639), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 1, 0]]'], {}), '([[1, 0, 1], [0, 1, 0]])\n', (9615, 9639), True, 'import numpy as np\n'), ((9654, 9682), 'jmetal.core.quality_indicator.HyperVolume', 'HyperVolume', (['reference_point'], {}), '(reference_point)\n', (9665, 9682), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((10230, 10258), 'jmetal.core.quality_indicator.HyperVolume', 'HyperVolume', (['reference_point'], {}), '(reference_point)\n', (10241, 10258), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((1054, 1076), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (1062, 1076), True, 'import numpy as np\n'), ((1476, 1510), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0]]'], {}), '([[1.0, 1.0], [2.0, 2.0]])\n', (1484, 1510), True, 'import numpy as np\n'), ((1957, 2001), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]'], {}), '([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]])\n', (1965, 2001), True, 'import numpy as np\n'), ((2478, 2512), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0]]'], {}), '([[1.0, 1.0], [2.0, 2.0]])\n', (2486, 2512), True, 'import numpy as np\n'), ((3121, 3155), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.1, 2.1]]'], {}), '([[1.0, 1.0], [2.1, 2.1]])\n', (3129, 3155), True, 'import numpy as np\n'), ((3790, 3824), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.1, 2.1]]'], {}), '([[1.0, 1.0], [2.1, 2.1]])\n', (3798, 3824), True, 'import numpy as np\n'), ((4562, 4596), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.1, 2.1]]'], {}), '([[1.0, 1.0], [2.1, 2.1]])\n', (4570, 4596), True, 'import numpy as np\n'), ((5927, 5949), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (5935, 5949), True, 'import numpy as np\n'), ((6379, 6413), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0]]'], {}), '([[1.0, 1.0], [2.0, 2.0]])\n', (6387, 6413), True, 'import numpy as np\n'), ((7117, 7161), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]'], {}), '([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]])\n', (7125, 7161), True, 'import numpy as np\n'), ((7907, 7941), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.1, 2.1]]'], {}), '([[1.0, 1.0], [2.1, 2.1]])\n', (7915, 7941), True, 'import numpy as np\n'), ((8665, 8699), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0]]'], {}), '([[1.0, 1.0], [2.0, 2.0]])\n', (8673, 8699), True, 'import numpy as np\n'), ((9298, 9332), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0]]'], {}), '([[1.0, 1.0], [2.0, 2.0]])\n', (9306, 9332), True, 'import numpy as np\n'), ((10286, 10301), 'numpy.array', 'np.array', (['front'], {}), '(front)\n', (10294, 10301), True, 'import numpy as np\n'), ((9483, 9500), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (9490, 9500), False, 'from os.path import dirname, join\n'), ((9937, 9951), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (9941, 9951), False, 'from pathlib import Path\n'), ((618, 642), 'jmetal.core.quality_indicator.GenerationalDistance', 'GenerationalDistance', (['[]'], {}), '([])\n', (638, 642), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((745, 769), 'jmetal.core.quality_indicator.GenerationalDistance', 'GenerationalDistance', (['[]'], {}), '([])\n', (765, 769), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((5494, 5526), 'jmetal.core.quality_indicator.InvertedGenerationalDistance', 'InvertedGenerationalDistance', (['[]'], {}), '([])\n', (5522, 5526), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n'), ((5630, 5662), 'jmetal.core.quality_indicator.InvertedGenerationalDistance', 'InvertedGenerationalDistance', (['[]'], {}), '([])\n', (5658, 5662), False, 'from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, HyperVolume\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from utils import sigmoid
class Model:
def __init__(self, name, depth, width):
self.depth = depth
self.width = width
self.name = name
class Classifier(Model):
def __init__(self, name, depth=2, width=20):
super().__init__(name, depth, width)
self.n_classes = 2
@classmethod
def create(cls, name, clf_settings):
clf_type = clf_settings['type']
classes = {'Linear':LinearClassifier,
'DNN':DNNClassifier}
# check if implemented
if clf_type not in classes:
raise ValueError('Unknown Classifier type {}.'.format(clf_type))
# return the right one
classifier = classes[clf_type]
kwargs = clf_settings.copy()
kwargs.pop('type')
return classifier(name='{}_{}_clf'.format(name, clf_type), **kwargs)
class DNNClassifier(Classifier):
def __init__(self, name, depth, width):
super().__init__(name, depth, width)
def build_forward(self, x_in):
with tf.variable_scope(self.name):
# input layer
layer = x_in
# hidden layers
for _ in range(self.depth):
layer = layers.relu(layer, self.width)
# logits and output
self.logits = layers.linear(layer, self.n_classes)
self.output = tf.reshape(layers.softmax(self.logits)[:, 1], shape=(-1, 1))
self.tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
def build_loss(self, labels):
print('--- Building classifier loss')
# one hot encode the labels
one_hot = tf.one_hot(tf.reshape(labels, shape=[-1]), depth=self.n_classes)
# and build the loss
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=one_hot, logits=self.logits))
class LinearClassifier(DNNClassifier):
def __init__(self, name, **kwargs):
super().__init__(name, 0, 0)
class Adversary(Model):
def __init__(self, name, depth=2, width=20, **kwargs):
super().__init__(name, depth, width)
self.loss = None
self.tf_vars = None
for kw in kwargs:
setattr(self, kw, kwargs[kw])
@classmethod
def create(cls, name, adv_settings):
adv_type = adv_settings['type']
classes = {'Dummy':DummyAdversary,
'GMM':GMMAdversary,
'MINE':MINEAdversary,
'MINEf':MINEAdversary,
'JS':JSAdversary,
'PtEst':PtEstAdversary}
# check if implemented
if adv_type not in classes:
raise ValueError('Unknown Adversary type {}.'.format(adv_type))
# return the right one
adversary = classes[adv_type]
kwargs = adv_settings.copy()
kwargs.pop('type')
return adversary(name='{}_{}_adv'.format(name, adv_type), **kwargs)
class PtEstAdversary(Adversary):
def __init__(self, name, depth=2, width=20, **kwargs):
super().__init__(name, depth, width, **kwargs)
def build_loss(self, fX, Z):
# forward pass
with tf.variable_scope(self.name):
# input layer
layer = fX
# hidden layers
for _ in range(self.depth):
layer = layers.relu(layer, self.width)
# output layer
self.output = layers.linear(layer, 1)
# variables
self.tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
# create the loss
self.loss = tf.reduce_mean((self.output - Z)**2)
class DummyAdversary(Adversary):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
def build_loss(self, fX, Z):
with tf.variable_scope(self.name):
dummy_var = tf.Variable(0.1, name='dummy')
self.loss = dummy_var**2 # i.e. goes to zero
self.loss += 0 * tf.reduce_mean(fX) # and connects to the classifier weights
self.tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
class GMMAdversary(Adversary):
def __init__(self, name, depth=2, width=20, n_components=5, **kwargs):
super().__init__(name, depth, width, **kwargs)
self.nll_pars = None
self.n_components = n_components
def build_loss(self, fX, Z):
# nll network
self._make_nll(fX)
# loss
self._make_loss(Z)
def _make_nll(self, fX):
print('--- Building GMM nll model')
n_components = self.n_components
with tf.variable_scope(self.name):
# define the input layer
layer = fX
# define the output of a network (depends on number of components)
for _ in range(self.depth):
layer = layers.relu(layer, self.width)
# output layer: (mu, sigma, amplitude) for each component
output = layers.linear(layer, 3*n_components)
# make sure sigmas are positive and pis are normalised
mu = output[:, :n_components]
sigma = tf.exp(output[:, n_components:2*n_components])
pi = tf.nn.softmax(output[:, 2*n_components:])
# interpret the output layers as nll parameters
self.nll_pars = tf.concat([mu, sigma, pi], axis=1)
self.tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
def _make_loss(self, Z):
print('--- Building GMM loss')
# for convenience
n_components = self.n_components
# build the pdf (max likelihood principle)
mu = self.nll_pars[:, :n_components]
sigma = self.nll_pars[:, n_components:2*n_components]
pi = self.nll_pars[:, 2*n_components:]
likelihood = 0
for c in range(n_components):
# normalisation
norm_vec = tf.reshape(pi[:, c] * (1. / np.sqrt(2. * np.pi)) / sigma[:, c], shape=(-1, 1))
# exponential
mu_vec = tf.reshape(mu[:, c], shape=(-1, 1))
sigma_vec = tf.reshape(sigma[:, c], shape=(-1, 1))
exp = tf.math.exp(-(Z - mu_vec) ** 2 / (2. * sigma_vec ** 2))
# add to likelihood
likelihood += norm_vec * exp
# make the loss
nll = - tf.math.log(likelihood)
self.loss = tf.reduce_mean(nll)
class VariationalAdversary(Adversary):
def build_Ts(self, fX, Z):
print('--- Building Ts')
# store the input placeholders
fX = tf.reshape(fX, shape=(-1, 1))
Z = tf.reshape(Z, shape=(-1, 1))
# aliases
x_in = fX
y_in = Z
# use scope to keep track of vars
with tf.variable_scope(self.name):
# shuffle one of them
y_shuffle = tf.random_shuffle(y_in)
x_conc = tf.concat([x_in, x_in], axis=0)
y_conc = tf.concat([y_in, y_shuffle], axis=0)
# compute the forward pass
layer_x = layers.linear(x_conc, self.width)
layer_y = layers.linear(y_conc, self.width)
layer = tf.nn.relu(layer_x + layer_y)
for _ in range(self.depth):
layer = layers.relu(layer, self.width)
output = layers.linear(layer, 1)
# split in T_xy and T_x_y
N_batch = tf.shape(x_in)[0]
self.T_xy = output[:N_batch]
self.T_x_y = output[N_batch:]
# save variables
self.tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
class MINEfAdversary(VariationalAdversary):
def build_loss(self, fX, Z):
# build Ts
self.build_Ts(fX, Z)
# f-div
self.loss = - (tf.reduce_mean(self.T_xy) - tf.reduce_mean(tf.math.exp(self.T_x_y - 1.)))
class MINEAdversary(VariationalAdversary):
def build_loss(self, fX, Z):
# build Ts
self.build_Ts(fX, Z)
# Donsker-Varadhan for KL (1801.04062)
self.loss = - (tf.reduce_mean(self.T_xy) - tf.math.log(tf.reduce_mean(tf.math.exp(self.T_x_y))))
class JSAdversary(VariationalAdversary):
def build_loss(self, fX, Z):
# build Ts
self.build_Ts(fX, Z)
# f-div for JS
self.loss = - (tf.reduce_mean(tf.math.log(sigmoid(self.T_xy))) + tf.reduce_mean(tf.math.log(1. - sigmoid(self.T_x_y))))
| [
"tensorflow.shape",
"numpy.sqrt",
"tensorflow.math.log",
"tensorflow.nn.softmax",
"tensorflow.math.exp",
"tensorflow.reduce_mean",
"tensorflow.contrib.layers.linear",
"tensorflow.random_shuffle",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.Variable",
"tensorflow.contrib.layer... | [((1567, 1632), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (1584, 1632), True, 'import tensorflow as tf\n'), ((3666, 3731), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (3683, 3731), True, 'import tensorflow as tf\n'), ((3779, 3817), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((self.output - Z) ** 2)'], {}), '((self.output - Z) ** 2)\n', (3793, 3817), True, 'import tensorflow as tf\n'), ((4265, 4330), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (4282, 4330), True, 'import tensorflow as tf\n'), ((5656, 5721), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (5673, 5721), True, 'import tensorflow as tf\n'), ((6659, 6678), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['nll'], {}), '(nll)\n', (6673, 6678), True, 'import tensorflow as tf\n'), ((6851, 6880), 'tensorflow.reshape', 'tf.reshape', (['fX'], {'shape': '(-1, 1)'}), '(fX, shape=(-1, 1))\n', (6861, 6880), True, 'import tensorflow as tf\n'), ((6893, 6921), 'tensorflow.reshape', 'tf.reshape', (['Z'], {'shape': '(-1, 1)'}), '(Z, shape=(-1, 1))\n', (6903, 6921), True, 'import tensorflow as tf\n'), ((7823, 7888), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (7840, 7888), True, 'import tensorflow as tf\n'), ((1154, 1182), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (1171, 1182), True, 'import tensorflow as tf\n'), ((1419, 1455), 'tensorflow.contrib.layers.linear', 'layers.linear', (['layer', 'self.n_classes'], {}), '(layer, self.n_classes)\n', (1432, 1455), True, 'import tensorflow.contrib.layers as layers\n'), ((1781, 1811), 'tensorflow.reshape', 'tf.reshape', (['labels'], {'shape': '[-1]'}), '(labels, shape=[-1])\n', (1791, 1811), True, 'import tensorflow as tf\n'), ((1900, 1978), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'one_hot', 'logits': 'self.logits'}), '(labels=one_hot, logits=self.logits)\n', (1942, 1978), True, 'import tensorflow as tf\n'), ((3340, 3368), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (3357, 3368), True, 'import tensorflow as tf\n'), ((3598, 3621), 'tensorflow.contrib.layers.linear', 'layers.linear', (['layer', '(1)'], {}), '(layer, 1)\n', (3611, 3621), True, 'import tensorflow.contrib.layers as layers\n'), ((4010, 4038), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (4027, 4038), True, 'import tensorflow as tf\n'), ((4064, 4094), 'tensorflow.Variable', 'tf.Variable', (['(0.1)'], {'name': '"""dummy"""'}), "(0.1, name='dummy')\n", (4075, 4094), True, 'import tensorflow as tf\n'), ((4877, 4905), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (4894, 4905), True, 'import tensorflow as tf\n'), ((5235, 5273), 'tensorflow.contrib.layers.linear', 'layers.linear', (['layer', '(3 * n_components)'], {}), '(layer, 3 * n_components)\n', (5248, 5273), True, 'import tensorflow.contrib.layers as layers\n'), ((5402, 5450), 'tensorflow.exp', 'tf.exp', (['output[:, n_components:2 * n_components]'], {}), '(output[:, n_components:2 * n_components])\n', (5408, 5450), True, 'import tensorflow as tf\n'), ((5466, 5509), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output[:, 2 * n_components:]'], {}), '(output[:, 2 * n_components:])\n', (5479, 5509), True, 'import tensorflow as tf\n'), ((5597, 5631), 'tensorflow.concat', 'tf.concat', (['[mu, sigma, pi]'], {'axis': '(1)'}), '([mu, sigma, pi], axis=1)\n', (5606, 5631), True, 'import tensorflow as tf\n'), ((6327, 6362), 'tensorflow.reshape', 'tf.reshape', (['mu[:, c]'], {'shape': '(-1, 1)'}), '(mu[:, c], shape=(-1, 1))\n', (6337, 6362), True, 'import tensorflow as tf\n'), ((6387, 6425), 'tensorflow.reshape', 'tf.reshape', (['sigma[:, c]'], {'shape': '(-1, 1)'}), '(sigma[:, c], shape=(-1, 1))\n', (6397, 6425), True, 'import tensorflow as tf\n'), ((6444, 6500), 'tensorflow.math.exp', 'tf.math.exp', (['(-(Z - mu_vec) ** 2 / (2.0 * sigma_vec ** 2))'], {}), '(-(Z - mu_vec) ** 2 / (2.0 * sigma_vec ** 2))\n', (6455, 6500), True, 'import tensorflow as tf\n'), ((6615, 6638), 'tensorflow.math.log', 'tf.math.log', (['likelihood'], {}), '(likelihood)\n', (6626, 6638), True, 'import tensorflow as tf\n'), ((7032, 7060), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (7049, 7060), True, 'import tensorflow as tf\n'), ((7133, 7156), 'tensorflow.random_shuffle', 'tf.random_shuffle', (['y_in'], {}), '(y_in)\n', (7150, 7156), True, 'import tensorflow as tf\n'), ((7178, 7209), 'tensorflow.concat', 'tf.concat', (['[x_in, x_in]'], {'axis': '(0)'}), '([x_in, x_in], axis=0)\n', (7187, 7209), True, 'import tensorflow as tf\n'), ((7231, 7267), 'tensorflow.concat', 'tf.concat', (['[y_in, y_shuffle]'], {'axis': '(0)'}), '([y_in, y_shuffle], axis=0)\n', (7240, 7267), True, 'import tensorflow as tf\n'), ((7330, 7363), 'tensorflow.contrib.layers.linear', 'layers.linear', (['x_conc', 'self.width'], {}), '(x_conc, self.width)\n', (7343, 7363), True, 'import tensorflow.contrib.layers as layers\n'), ((7386, 7419), 'tensorflow.contrib.layers.linear', 'layers.linear', (['y_conc', 'self.width'], {}), '(y_conc, self.width)\n', (7399, 7419), True, 'import tensorflow.contrib.layers as layers\n'), ((7440, 7469), 'tensorflow.nn.relu', 'tf.nn.relu', (['(layer_x + layer_y)'], {}), '(layer_x + layer_y)\n', (7450, 7469), True, 'import tensorflow as tf\n'), ((7588, 7611), 'tensorflow.contrib.layers.linear', 'layers.linear', (['layer', '(1)'], {}), '(layer, 1)\n', (7601, 7611), True, 'import tensorflow.contrib.layers as layers\n'), ((1329, 1359), 'tensorflow.contrib.layers.relu', 'layers.relu', (['layer', 'self.width'], {}), '(layer, self.width)\n', (1340, 1359), True, 'import tensorflow.contrib.layers as layers\n'), ((3513, 3543), 'tensorflow.contrib.layers.relu', 'layers.relu', (['layer', 'self.width'], {}), '(layer, self.width)\n', (3524, 3543), True, 'import tensorflow.contrib.layers as layers\n'), ((4181, 4199), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fX'], {}), '(fX)\n', (4195, 4199), True, 'import tensorflow as tf\n'), ((5112, 5142), 'tensorflow.contrib.layers.relu', 'layers.relu', (['layer', 'self.width'], {}), '(layer, self.width)\n', (5123, 5142), True, 'import tensorflow.contrib.layers as layers\n'), ((7535, 7565), 'tensorflow.contrib.layers.relu', 'layers.relu', (['layer', 'self.width'], {}), '(layer, self.width)\n', (7546, 7565), True, 'import tensorflow.contrib.layers as layers\n'), ((7673, 7687), 'tensorflow.shape', 'tf.shape', (['x_in'], {}), '(x_in)\n', (7681, 7687), True, 'import tensorflow as tf\n'), ((8059, 8084), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.T_xy'], {}), '(self.T_xy)\n', (8073, 8084), True, 'import tensorflow as tf\n'), ((8332, 8357), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.T_xy'], {}), '(self.T_xy)\n', (8346, 8357), True, 'import tensorflow as tf\n'), ((1493, 1520), 'tensorflow.contrib.layers.softmax', 'layers.softmax', (['self.logits'], {}), '(self.logits)\n', (1507, 1520), True, 'import tensorflow.contrib.layers as layers\n'), ((8102, 8131), 'tensorflow.math.exp', 'tf.math.exp', (['(self.T_x_y - 1.0)'], {}), '(self.T_x_y - 1.0)\n', (8113, 8131), True, 'import tensorflow as tf\n'), ((8387, 8410), 'tensorflow.math.exp', 'tf.math.exp', (['self.T_x_y'], {}), '(self.T_x_y)\n', (8398, 8410), True, 'import tensorflow as tf\n'), ((8622, 8640), 'utils.sigmoid', 'sigmoid', (['self.T_xy'], {}), '(self.T_xy)\n', (8629, 8640), False, 'from utils import sigmoid\n'), ((6228, 6248), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (6235, 6248), True, 'import numpy as np\n'), ((8677, 8696), 'utils.sigmoid', 'sigmoid', (['self.T_x_y'], {}), '(self.T_x_y)\n', (8684, 8696), False, 'from utils import sigmoid\n')] |
#!/usr/bin/env python3
'''
Created on 14 Jan 2022
NOTE: This code has been adapted from the week 1 lab of COMP 0037
'''
from statistics import mean
import matplotlib.pyplot as plt
import numpy as np
from bandits.bandit import Bandit
from bandits.bandit import BanditEnvironment
from bandits.upper_confidence_bound_agent import UpperConfidenceBoundAgent
from bandits.performance_measures import compute_percentage_of_optimal_actions_selected
from bandits.performance_measures import compute_regret
if __name__ == '__main__':
# Create bandit
environment = BanditEnvironment(4)
# Add some bandits
environment.set_bandit(0, Bandit(4, 1))
environment.set_bandit(1, Bandit(4.1, 1))
environment.set_bandit(2, Bandit(3.9, 1))
environment.set_bandit(3, Bandit(4.2, 1))
number_of_steps = 100000
y_label = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
x_label = [0,0.2,0.4,0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4,2.6,2.8,3.0]
plotmean_reward=[]
##Uncomment for all other plots
#i = [0.1,0.5,1,2,3,4,5,10]
##Uncomment to plot total mean reward vs confidence level
i = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.3,1.6,2,3]
for c in i:
agent = UpperConfidenceBoundAgent(environment, c)
# Step-by-step store of rewards
reward_history = np.zeros(number_of_steps)
action_history = np.zeros(number_of_steps)
# Step through the agent and let it do its business
for p in range(0, number_of_steps):
action_history[p], reward_history[p] = agent.step()
mean_reward = np.mean(reward_history)
plotmean_reward.append(mean_reward)
# Plot UCB with different degrees of exploration
### Plot percentage correct
# percentage_correct_actions = compute_percentage_of_optimal_actions_selected(environment, action_history)
# plt.plot(percentage_correct_actions)
# plt.title("Performance of UCB for different degrees of exploration")
# plt.xlabel("Number of steps")
# plt.ylabel("Percentage correct action")
# plt.legend(['c=0.1','c=0.2','c=0.3','c=0.4','c=0.5','c=0.6','c=0.7','c=0.8','c=0.9','c=1'], prop={'size': 6})
# plt.xlim(0,number_of_steps)
# plt.ylim(0,1.05)
# plt.yticks(y_label)
### Plot cumulative regret
# regret = compute_regret(environment, reward_history)
# plt.plot(regret)
# plt.title("Cumulative regret of UCB for different degrees of exploration")
# plt.xlabel("Number of steps")
# plt.legend(['c=0.1','c=0.5','c=1','c=2','c=3','c=4','c=5','c=10'], prop={'size': 6})
# plt.ylabel("Regret")
# plt.xlim(0,number_of_steps)
# #plt.ylim(0,8000)
### Plot history of arms pulled
# #change to plot : number of pulls vs arm
# y_tick=[0,1,2,3]
# plt.plot(action_history)
# plt.title("History of charging stations used for different degrees of exploration")
# plt.xlabel("Number of steps")
# plt.yticks(y_tick)
# plt.legend(['c=0.1','c=0.5','c=1','c=2','c=3','c=4','c=5','c=10'], prop={'size': 6})
# plt.ylabel("Charging Station")
plt.plot(i,plotmean_reward,'o-')
plt.title("Total Mean Reward vs Confidence Level")
plt.xlabel("Confidence Level")
plt.ylabel("Total Mean Reward")
plt.xlim(0,2)
plt.xticks(x_label)
plt.grid()
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"bandits.upper_confidence_bound_agent.UpperConfidenceBoundAgent",
"numpy.zeros",
"bandits.bandit.Bandit",
"matplotlib.pyplot.title",
"matplotlib.p... | [((590, 610), 'bandits.bandit.BanditEnvironment', 'BanditEnvironment', (['(4)'], {}), '(4)\n', (607, 610), False, 'from bandits.bandit import BanditEnvironment\n'), ((3310, 3344), 'matplotlib.pyplot.plot', 'plt.plot', (['i', 'plotmean_reward', '"""o-"""'], {}), "(i, plotmean_reward, 'o-')\n", (3318, 3344), True, 'import matplotlib.pyplot as plt\n'), ((3348, 3398), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Mean Reward vs Confidence Level"""'], {}), "('Total Mean Reward vs Confidence Level')\n", (3357, 3398), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence Level"""'], {}), "('Confidence Level')\n", (3414, 3434), True, 'import matplotlib.pyplot as plt\n'), ((3440, 3471), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Mean Reward"""'], {}), "('Total Mean Reward')\n", (3450, 3471), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3491), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2)'], {}), '(0, 2)\n', (3485, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3496, 3515), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_label'], {}), '(x_label)\n', (3506, 3515), True, 'import matplotlib.pyplot as plt\n'), ((3523, 3533), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3531, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3547, 3549), True, 'import matplotlib.pyplot as plt\n'), ((672, 684), 'bandits.bandit.Bandit', 'Bandit', (['(4)', '(1)'], {}), '(4, 1)\n', (678, 684), False, 'from bandits.bandit import Bandit\n'), ((721, 735), 'bandits.bandit.Bandit', 'Bandit', (['(4.1)', '(1)'], {}), '(4.1, 1)\n', (727, 735), False, 'from bandits.bandit import Bandit\n'), ((768, 782), 'bandits.bandit.Bandit', 'Bandit', (['(3.9)', '(1)'], {}), '(3.9, 1)\n', (774, 782), False, 'from bandits.bandit import Bandit\n'), ((815, 829), 'bandits.bandit.Bandit', 'Bandit', (['(4.2)', '(1)'], {}), '(4.2, 1)\n', (821, 829), False, 'from bandits.bandit import Bandit\n'), ((1269, 1310), 'bandits.upper_confidence_bound_agent.UpperConfidenceBoundAgent', 'UpperConfidenceBoundAgent', (['environment', 'c'], {}), '(environment, c)\n', (1294, 1310), False, 'from bandits.upper_confidence_bound_agent import UpperConfidenceBoundAgent\n'), ((1380, 1405), 'numpy.zeros', 'np.zeros', (['number_of_steps'], {}), '(number_of_steps)\n', (1388, 1405), True, 'import numpy as np\n'), ((1432, 1457), 'numpy.zeros', 'np.zeros', (['number_of_steps'], {}), '(number_of_steps)\n', (1440, 1457), True, 'import numpy as np\n'), ((1666, 1689), 'numpy.mean', 'np.mean', (['reward_history'], {}), '(reward_history)\n', (1673, 1689), True, 'import numpy as np\n')] |
import sys
import os
import pickle
import cv2
import numpy as np
CAFFE_PYTHON_PATH = os.path.join(os.path.dirname(__file__), "../python")
sys.path.insert(0, CAFFE_PYTHON_PATH)
import caffe
from Dataset import GetDataset
from ACT_utils import *
from copy import deepcopy
K = 6
IMGSIZE = 300
MEAN = np.array([[[104, 117, 123]]], dtype=np.float32)
NFLOWS = 5
def extract_tubelets(dname, gpu=-1, redo=False):
"""Extract the tubelets for a given dataset
args:
- dname: dataset name (example: 'JHMDB')
- gpu (default -1): use gpu given in argument, or use cpu if -1
- redo: wheter or not to recompute already computed files
save a pickle file for each frame
the file contains a tuple (dets, dets_all)
- dets is a numpy array with 2+4*K columns containing the tubelets starting at this frame after per-class nms at 0.45 and thresholding the scores at 0.01
the columns are <label> <score> and then <x1> <y1> <x2> <y2> for each of the frame in the tubelet
- dets_all contains the tubelets obtained after a global nms at 0.7 and thresholding the scores at 0.01
it is a numpy arrray with 4*K + L + 1 containing the coordinates of the tubelets and the scores for all labels
note: this version is inefficient: it is better to estimate the per-frame features once
"""
d = GetDataset(dname)
if gpu >= 0:
caffe.set_mode_gpu()
caffe.set_device(gpu)
model_dir = os.path.join(os.path.dirname(__file__), '../models/ACT-detector/', dname)
output_dir = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
# load the RGB network
rgb_proto = os.path.join(model_dir, "deploy_RGB.prototxt")
rgb_model = os.path.join(model_dir, "RGB.caffemodel")
net_rgb = caffe.Net(rgb_proto, caffe.TEST, weights=rgb_model)
# load the FLOW5 network
flo_proto = os.path.join(model_dir, "deploy_FLOW5.prototxt")
flo_model = os.path.join(model_dir, "FLOW5.caffemodel")
net_flo = caffe.Net(flo_proto, caffe.TEST, weights=flo_model)
vlist = d.test_vlist()
for iv, v in enumerate(vlist):
print("Processing video {:d}/{:d}: {:s}".format( iv+1, len(vlist), v))
h, w = d.resolution(v)
# network output is normalized between 0,1 ; so we will multiply it by the following array
resolution_array = np.array([w,h,w,h]*K, dtype=np.float32)
# now process each frame
for i in range(1, 1 + d.nframes(v) - K + 1):
outfile = os.path.join(output_dir, d.frame_format(v,i) + ".pkl")
# skip if already computed
if os.path.isfile(outfile) and not redo:
continue
# read the frames for the forward
kwargs_rgb = {}
kwargs_flo = {}
for j in range(K):
im = cv2.imread(d.imfile(v, i + j))
if im is None:
print("Image {:s} does not exist".format(d.imfile(v, i+j)))
return
imscale = cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)
kwargs_rgb['data_stream' + str(j)] = np.transpose(imscale-MEAN, (2, 0, 1))[None, :, :, :]
imf = [cv2.imread(d.flowfile(v, min(d.nframes(v), i + j + iflow))) for iflow in range(NFLOWS)]
if np.any(imf) is None:
print("Flow image {:s} does not exist".format(d.flowfile(v, i+j)))
return
imscalef = [cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR) for im in imf]
timscale = [np.transpose(im-MEAN, (2, 0, 1))[None, :, :, :] for im in imscalef]
kwargs_flo['data_stream' + str(j) + 'flow'] = np.concatenate(timscale, axis=1)
# compute rgb and flow scores
# two forward passes: one for the rgb and one for the flow
net_rgb.forward(end="mbox_conf_flatten", **kwargs_rgb) # forward of rgb with confidence and regression
net_flo.forward(end="mbox_conf_flatten", **kwargs_flo) # forward of flow5 with confidence and regression
# compute late fusion of rgb and flow scores (keep regression from rgb)
# use net_rgb for standard detections, net_flo for having all boxes
scores = 0.5 * (net_rgb.blobs['mbox_conf_flatten'].data + net_flo.blobs['mbox_conf_flatten'].data)
net_rgb.blobs['mbox_conf_flatten'].data[...] = scores
net_flo.blobs['mbox_conf_flatten'].data[...] = scores
net_flo.blobs['mbox_loc'].data[...] = net_rgb.blobs['mbox_loc'].data
# two forward passes, only for the last layer
# dets is the detections after per-class NMS and thresholding (stardard)
# dets_all contains all the scores and regressions for all tubelets
dets = net_rgb.forward(start='detection_out')['detection_out'][0, 0, :, 1:]
dets_all = net_flo.forward(start='detection_out_full')['detection_out_full'][0, 0, :, 1:]
# parse detections with per-class NMS
if dets.shape[0] == 1 and np.all(dets == -1):
dets = np.empty((0, dets.shape[1]), dtype=np.float32)
dets[:, 2:] *= resolution_array # network output was normalized in [0..1]
dets[:, 0] -= 1 # label 0 was background, come back to label in [0..nlabels-1]
dets[:, 2::2] = np.maximum(0, np.minimum(w, dets[:, 2::2]))
dets[:, 3::2] = np.maximum(0, np.minimum(h, dets[:, 3::2]))
# parse detections with global NMS at 0.7 (top 300)
# coordinates were normalized in [0..1]
dets_all[:, 0:4*K] *= resolution_array
dets_all[:, 0:4*K:2] = np.maximum(0, np.minimum(w, dets_all[:, 0:4*K:2]))
dets_all[:, 1:4*K:2] = np.maximum(0, np.minimum(h, dets_all[:, 1:4*K:2]))
idx = nms_tubelets(np.concatenate((dets_all[:, :4*K], np.max(dets_all[:, 4*K+1:], axis=1)[:, None]), axis=1), 0.7, 300)
dets_all = dets_all[idx, :]
# save file
if not os.path.isdir(os.path.dirname(outfile)):
os.system('mkdir -p ' + os.path.dirname(outfile))
with open(outfile, 'wb') as fid:
pickle.dump((dets, dets_all), fid)
def load_frame_detections(d, vlist, dirname, nms):
if isinstance(d, str):
d = GetDataset(d)
alldets = [] # list of numpy array with <video_index> <frame_index> <ilabel> <score> <x1> <y1> <x2> <y2>
for iv, v in enumerate(vlist):
h,w = d.resolution(v)
# aggregate the results for each frame
vdets = {i: np.empty((0,6), dtype=np.float32) for i in range(1, 1 + d.nframes(v))} # x1, y1, x2, y2, score, ilabel
# load results for each starting frame
for i in range(1, 1 + d.nframes(v) - K + 1):
resname = os.path.join(dirname, d.frame_format(v,i) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets "+resname)
sys.exit()
with open(resname, 'rb') as fid:
dets, _ = pickle.load(fid)
if dets.size == 0:
continue
for k in range(K):
vdets[i+k] = np.concatenate( (vdets[i+k],dets[:,np.array([2+4*k,3+4*k,4+4*k,5+4*k,1,0])] ), axis=0)
# Perform NMS in each frame
for i in vdets:
idx = np.empty((0,), dtype=np.int32)
for ilabel in range(d.nlabels):
a = np.where(vdets[i][:,5] == ilabel)[0]
if a.size == 0:
continue
idx = np.concatenate((idx, a[nms2d(vdets[i][vdets[i][:, 5] == ilabel, :5], nms)]), axis=0)
if idx.size == 0:
continue
alldets.append(np.concatenate((iv * np.ones((idx.size, 1), dtype=np.float32), i * np.ones((idx.size, 1), dtype=np.float32), vdets[i][idx, :][:, np.array([5, 4, 0, 1, 2, 3], dtype=np.int32)]), axis=1))
return np.concatenate(alldets, axis=0)
def frameAP(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameAP{:g}.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load per-frame detections
alldets = load_frame_detections(d, vlist, dirname, 0.3)
res = {}
# compute AP for each class
for ilabel,label in enumerate(d.labels):
# detections of this class
detections = alldets[alldets[:, 2] == ilabel, :]
# load ground-truth of this class
gt = {}
for iv, v in enumerate(vlist):
tubes = d.gttubes(v)
if not ilabel in tubes:
continue
for tube in tubes[ilabel]:
for i in range(tube.shape[0]):
k = (iv, int(tube[i, 0]))
if not k in gt:
gt[k] = []
gt[k].append(tube[i, 1:5].tolist())
for k in gt:
gt[k] = np.array( gt[k] )
# pr will be an array containing precision-recall values
pr = np.empty((detections.shape[0] + 1, 2), dtype=np.float32)# precision,recall
pr[0, 0] = 1.0
pr[0, 1] = 0.0
fn = sum([g.shape[0] for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
for i, j in enumerate(np.argsort(-detections[:,3])):
k = (int(detections[j,0]), int(detections[j,1]))
box = detections[j, 4:8]
ispositive = False
if k in gt:
ious = iou2d(gt[k], box)
amax = np.argmax(ious)
if ious[amax] >= th:
ispositive = True
gt[k] = np.delete(gt[k], amax, 0)
if gt[k].size == 0:
del gt[k]
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1, 0] = float(tp) / float(tp + fp)
pr[i+1, 1] = float(tp) / float(tp + fn)
res[label] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
ap = 100*np.array([pr_to_ap(res[label]) for label in d.labels])
print("frameAP")
for il, _ in enumerate(d.labels):
print("{:20s} {:8.2f}".format('', ap[il]))
print("{:20s} {:8.2f}".format("mAP", np.mean(ap)))
print("")
def frameAP_error(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameAP{:g}ErrorAnalysis.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load per-frame detections
alldets = load_frame_detections(d, vlist, dirname, 0.3)
res = {}
# compute AP for each class
for ilabel,label in enumerate(d.labels):
# detections of this class
detections = alldets[alldets[:, 2] == ilabel, :]
gt = {}
othergt = {}
labellist = {}
for iv, v in enumerate(vlist):
tubes = d.gttubes(v)
labellist[v] = tubes.keys()
for il in tubes:
for tube in tubes[il]:
for i in range(tube.shape[0]):
k = (iv, int(tube[i, 0]))
if il == ilabel:
if k not in gt:
gt[k] = []
gt[k].append(tube[i, 1:5].tolist())
else:
if k not in othergt:
othergt[k] = []
othergt[k].append(tube[i, 1:5].tolist())
for k in gt:
gt[k] = np.array(gt[k])
for k in othergt:
othergt[k] = np.array(othergt[k])
dupgt = deepcopy(gt)
# pr will be an array containing precision-recall values and 4 types of errors:
# localization, classification, timing, others
pr = np.empty((detections.shape[0] + 1, 6), dtype=np.float32)# precision, recall
pr[0, 0] = 1.0
pr[0, 1:] = 0.0
fn = sum([g.shape[0] for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
EL = 0 # localization errors
EC = 0 # classification error: overlap >=0.5 with an another object
EO = 0 # other errors
ET = 0 # timing error: the video contains the action but not at this frame
for i, j in enumerate(np.argsort(-detections[:,3])):
k = (int(detections[j, 0]), int(detections[j,1]))
box = detections[j, 4:8]
ispositive = False
if k in dupgt:
if k in gt:
ious = iou2d(gt[k], box)
amax = np.argmax(ious)
if k in gt and ious[amax] >= th:
ispositive = True
gt[k] = np.delete(gt[k], amax, 0)
if gt[k].size == 0:
del gt[k]
else:
EL += 1
elif k in othergt:
ious = iou2d(othergt[k], box)
if np.max(ious) >= th:
EC += 1
else:
EO += 1
elif ilabel in labellist[k[0]]:
ET += 1
else:
EO += 1
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1, 0] = float(tp)/float(tp+fp)
pr[i+1, 1] = float(tp)/float(tp+fn)
pr[i+1, 2] = float(EL)/float(tp+fp)
pr[i+1, 3] = float(EC)/float(tp+fp)
pr[i+1, 4] = float(ET)/float(tp+fp)
pr[i+1, 5] = float(EO)/float(tp+fp)
res[label] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
AP = 100*np.array([pr_to_ap(res[label][:,[0, 1]]) for label in d.labels])
othersap = [100*np.array([pr_to_ap(res[label][:,[j, 1]]) for label in d.labels]) for j in range(2, 6)]
EL = othersap[0]
EC = othersap[1]
ET = othersap[2]
EO = othersap[3]
EM = 100 - 100*np.array([res[label][-1, 1] for label in d.labels]) # missed detections = 1 - recall
LIST = [AP, EL, EC, ET, EO, EM]
print("Error Analysis")
print("")
print("{:20s} {:8s} {:8s} {:8s} {:8s} {:8s} {:8s}".format('label', ' AP ', ' Loc. ', ' Cls. ', ' Time ', ' Other ', ' missed '))
print("")
for il, label in enumerate(d.labels):
print("{:20s} ".format(label) + " ".join(["{:8.2f}".format(L[il]) for L in LIST]))
print("")
print("{:20s} ".format("mean") + " ".join(["{:8.2f}".format(np.mean(L)) for L in LIST]))
print("")
def frameMABO(dname, redo=False):
d = GetDataset(dname)
dirname = os.path.join( os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameMABO.pkl")
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
BO = pickle.load(fid)
else:
vlist = d.test_vlist()
BO = {l: [] for l in d.labels} # best overlap
for v in vlist:
gt = d.gttubes(v)
h, w = d.resolution(v)
# load per-frame detections
vdets = {i: np.empty((0,4), dtype=np.float32) for i in range(1, 1+d.nframes(v))}
# load results for each chunk
for i in range(1, 1 + d.nframes(v) - K + 1):
resname = os.path.join(dirname, d.frame_format(v,i) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets " + resname)
sys.exit()
with open(resname, 'rb') as fid:
dets, _ = pickle.load(fid)
for k in range(K):
vdets[i+k] = np.concatenate((vdets[i + k], dets[:, 2+4*k:6+4*k]), axis=0)
# for each frame
for i in range(1, 1 + d.nframes(v)):
for ilabel in gt:
label = d.labels[ilabel]
for t in gt[ilabel]:
# the gt tube does not cover frame i
if not i in t[:,0]:
continue
gtbox = t[t[:,0] == i, 1:5] # box of gt tube at frame i
if vdets[i].size == 0: # we missed it
BO[label].append(0)
continue
ious = iou2d(vdets[i], gtbox)
BO[label].append( np.max(ious) )
# save file
with open(eval_file, 'wb') as fid:
pickle.dump( BO, fid)
# print MABO results
ABO = {la: 100 * np.mean(np.array(BO[la])) for la in d.labels} # average best overlap
for la in d.labels:
print("{:20s} {:6.2f}".format(la, ABO[la]))
print("{:20s} {:6.2f}".format("MABO", np.mean(np.array(ABO.values()))))
def frameCLASSIF(dname, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameCLASSIF.pkl")
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
CLASSIF = pickle.load(fid)
else:
vlist = d.test_vlist()
CORRECT = [0 for ilabel in range(d.nlabels)]
TOTAL = [0 for ilabel in range(d.nlabels)]
for v in vlist:
nframes = d.nframes(v)
# load all tubelets
VDets = {}
for startframe in range(1, nframes + 2 - K):
resname = os.path.join(dirname, d.frame_format(v, startframe) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets " + resname)
sys.exit()
with open(resname, 'rb') as fid:
_, VDets[startframe] = pickle.load(fid)
# iterate over ground-truth
tubes = d.gttubes(v)
for ilabel in tubes:
for g in tubes[ilabel]:
for i in range(g.shape[0]):
frame = int(g[i, 0])
# just in case a tube is longer than the video
if frame > nframes:
continue
gtbox = g[i, 1:5]
scores = np.zeros((d.nlabels,), dtype=np.float32)
# average the score over the 6 frames
for sf in range(max(1, frame - K + 1), min(nframes - K + 1, frame) + 1):
overlaps = iou2d(VDets[sf][:, 4*(frame-sf):4*(frame-sf)+4], gtbox)
scores += np.sum(VDets[sf][overlaps >= 0.7, 4*K + 1:],axis=0)
# check classif
if np.argmax(scores) == ilabel:
CORRECT[ilabel] += 1
TOTAL[ilabel] += 1
CLASSIF = [float(CORRECT[ilabel]) / float(TOTAL[ilabel]) for ilabel in range(d.nlabels)]
with open(eval_file, 'wb') as fid:
pickle.dump(CLASSIF, fid)
# print classif results
for il, la in enumerate(d.labels):
print("{:20s} {:6.2f}".format(la, 100*CLASSIF[il]))
print("{:20s} {:6.2f}".format("CLASSIF", 100*np.mean(np.array(CLASSIF))))
def BuildTubes(dname, redo=False):
d = GetDataset(dname)
dirname = os.path.join( os.path.dirname(__file__), '../results/ACT-detector/', dname)
vlist = d.test_vlist()
for iv, v in enumerate(vlist):
print("Processing video {:d}/{:d}: {:s}".format(iv + 1, len(vlist), v))
outfile = os.path.join(dirname, v + "_tubes.pkl")
if os.path.isfile(outfile) and not redo:
continue
RES = {}
nframes = d.nframes(v)
# load detected tubelets
VDets = {}
for startframe in range(1, nframes + 2 - K):
resname = os.path.join(dirname, d.frame_format(v, startframe) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets " + resname)
sys.exit()
with open(resname, 'rb') as fid:
_, VDets[startframe] = pickle.load(fid)
for ilabel in range(d.nlabels):
FINISHED_TUBES = []
CURRENT_TUBES = [] # tubes is a list of tuple (frame, lstubelets)
def tubescore(tt):
return np.mean(np.array([tt[i][1][-1] for i in range(len(tt))]))
for frame in range(1, d.nframes(v) + 2 - K):
# load boxes of the new frame and do nms while keeping Nkeep highest scored
ltubelets = VDets[frame][:,range(4*K) + [4*K + 1 + ilabel]] # Nx(4K+1) with (x1 y1 x2 y2)*K ilabel-score
idx = nms_tubelets(ltubelets, 0.3, top_k=10)
ltubelets = ltubelets[idx,:]
# just start new tubes
if frame == 1:
for i in range(ltubelets.shape[0]):
CURRENT_TUBES.append( [(1,ltubelets[i,:])] )
continue
# sort current tubes according to average score
avgscore = [tubescore(t) for t in CURRENT_TUBES ]
argsort = np.argsort(-np.array(avgscore))
CURRENT_TUBES = [CURRENT_TUBES[i] for i in argsort]
# loop over tubes
finished = []
for it, t in enumerate(CURRENT_TUBES):
# compute ious between the last box of t and ltubelets
last_frame, last_tubelet = t[-1]
ious = []
offset = frame - last_frame
if offset < K:
nov = K - offset
ious = sum([iou2d(ltubelets[:, 4*iov:4*iov+4], last_tubelet[4*(iov+offset):4*(iov+offset+1)]) for iov in range(nov)])/float(nov)
else:
ious = iou2d(ltubelets[:, :4], last_tubelet[4*K-4:4*K])
valid = np.where(ious >= 0.2)[0]
if valid.size>0:
# take the one with maximum score
idx = valid[ np.argmax(ltubelets[valid, -1])]
CURRENT_TUBES[it].append((frame, ltubelets[idx,:]))
ltubelets = np.delete(ltubelets, idx, axis=0)
else:
# skip
if offset>=5:
finished.append(it)
# finished tubes that are done
for it in finished[::-1]: # process in reverse order to delete them with the right index
FINISHED_TUBES.append( CURRENT_TUBES[it][:])
del CURRENT_TUBES[it]
# start new tubes
for i in range(ltubelets.shape[0]):
CURRENT_TUBES.append([(frame,ltubelets[i,:])])
# all tubes are not finished
FINISHED_TUBES += CURRENT_TUBES
# build real tubes
output = []
for t in FINISHED_TUBES:
score = tubescore(t)
# just start new tubes
if score< 0.01:
continue
beginframe = t[0][0]
endframe = t[-1][0]+K-1
length = endframe+1-beginframe
# delete tubes with short duraton
if length < 15:
continue
# build final tubes by average the tubelets
out = np.zeros((length, 6), dtype=np.float32)
out[:, 0] = np.arange(beginframe,endframe+1)
n_per_frame = np.zeros((length, 1), dtype=np.int32)
for i in range(len(t)):
frame, box = t[i]
for k in range(K):
out[frame-beginframe+k, 1:5] += box[4*k:4*k+4]
out[frame-beginframe+k, -1] += box[-1]
n_per_frame[frame-beginframe+k ,0] += 1
out[:,1:] /= n_per_frame
output.append((out, score))
RES[ilabel] = output
with open(outfile, 'wb') as fid:
pickle.dump(RES, fid)
def videoAP(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join( os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "videoAP{:g}.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load detections
# alldets = for each label in 1..nlabels, list of tuple (v,score,tube as Kx5 array)
alldets = {ilabel: [] for ilabel in range(d.nlabels)}
for v in vlist:
tubename = os.path.join(dirname, v + '_tubes.pkl')
if not os.path.isfile(tubename):
print("ERROR: Missing extracted tubes " + tubename)
sys.exit()
with open(tubename, 'rb') as fid:
tubes = pickle.load(fid)
for ilabel in range(d.nlabels):
ltubes = tubes[ilabel]
idx = nms3dt(ltubes, 0.3)
alldets[ilabel] += [(v,ltubes[i][1], ltubes[i][0]) for i in idx]
# compute AP for each class
res = {}
for ilabel in range(d.nlabels):
detections = alldets[ilabel]
# load ground-truth
gt = {}
for v in vlist:
tubes = d.gttubes(v)
if not ilabel in tubes:
continue
gt[v] = tubes[ilabel]
if len(gt[v])==0:
del gt[v]
# precision,recall
pr = np.empty((len(detections) + 1, 2), dtype=np.float32)
pr[0,0] = 1.0
pr[0,1] = 0.0
fn = sum([ len(g) for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
for i, j in enumerate( np.argsort(-np.array([dd[1] for dd in detections]))):
v, score, tube = detections[j]
ispositive = False
if v in gt:
ious = [iou3dt(g, tube) for g in gt[v]]
amax = np.argmax(ious)
if ious[amax] >= th:
ispositive = True
del gt[v][amax]
if len(gt[v]) == 0:
del gt[v]
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1,0] = float(tp) / float(tp + fp)
pr[i+1,1] = float(tp) / float(tp + fn)
res[d.labels[ilabel]] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
ap = 100 * np.array([pr_to_ap(res[label]) for label in d.labels])
print("frameAP")
for il, _ in enumerate(d.labels):
print("{:20s} {:8.2f}".format('', ap[il]))
print("{:20s} {:8.2f}".format("mAP", np.mean(ap)))
print("")
if __name__=="__main__":
exec(sys.argv[1])
| [
"sys.path.insert",
"numpy.argsort",
"numpy.array",
"sys.exit",
"copy.deepcopy",
"numpy.arange",
"numpy.mean",
"numpy.where",
"numpy.delete",
"Dataset.GetDataset",
"numpy.max",
"numpy.empty",
"numpy.concatenate",
"numpy.ones",
"pickle.load",
"numpy.argmax",
"numpy.any",
"os.path.isf... | [((140, 177), 'sys.path.insert', 'sys.path.insert', (['(0)', 'CAFFE_PYTHON_PATH'], {}), '(0, CAFFE_PYTHON_PATH)\n', (155, 177), False, 'import sys\n'), ((300, 347), 'numpy.array', 'np.array', (['[[[104, 117, 123]]]'], {'dtype': 'np.float32'}), '([[[104, 117, 123]]], dtype=np.float32)\n', (308, 347), True, 'import numpy as np\n'), ((100, 125), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import os\n'), ((1355, 1372), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (1365, 1372), False, 'from Dataset import GetDataset\n'), ((1681, 1727), 'os.path.join', 'os.path.join', (['model_dir', '"""deploy_RGB.prototxt"""'], {}), "(model_dir, 'deploy_RGB.prototxt')\n", (1693, 1727), False, 'import os\n'), ((1744, 1785), 'os.path.join', 'os.path.join', (['model_dir', '"""RGB.caffemodel"""'], {}), "(model_dir, 'RGB.caffemodel')\n", (1756, 1785), False, 'import os\n'), ((1800, 1851), 'caffe.Net', 'caffe.Net', (['rgb_proto', 'caffe.TEST'], {'weights': 'rgb_model'}), '(rgb_proto, caffe.TEST, weights=rgb_model)\n', (1809, 1851), False, 'import caffe\n'), ((1902, 1950), 'os.path.join', 'os.path.join', (['model_dir', '"""deploy_FLOW5.prototxt"""'], {}), "(model_dir, 'deploy_FLOW5.prototxt')\n", (1914, 1950), False, 'import os\n'), ((1967, 2010), 'os.path.join', 'os.path.join', (['model_dir', '"""FLOW5.caffemodel"""'], {}), "(model_dir, 'FLOW5.caffemodel')\n", (1979, 2010), False, 'import os\n'), ((2025, 2076), 'caffe.Net', 'caffe.Net', (['flo_proto', 'caffe.TEST'], {'weights': 'flo_model'}), '(flo_proto, caffe.TEST, weights=flo_model)\n', (2034, 2076), False, 'import caffe\n'), ((8215, 8246), 'numpy.concatenate', 'np.concatenate', (['alldets'], {'axis': '(0)'}), '(alldets, axis=0)\n', (8229, 8246), True, 'import numpy as np\n'), ((8296, 8313), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (8306, 8313), False, 'from Dataset import GetDataset\n'), ((11339, 11356), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (11349, 11356), False, 'from Dataset import GetDataset\n'), ((16305, 16322), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (16315, 16322), False, 'from Dataset import GetDataset\n'), ((16429, 16467), 'os.path.join', 'os.path.join', (['dirname', '"""frameMABO.pkl"""'], {}), "(dirname, 'frameMABO.pkl')\n", (16441, 16467), False, 'import os\n'), ((18674, 18691), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (18684, 18691), False, 'from Dataset import GetDataset\n'), ((18797, 18838), 'os.path.join', 'os.path.join', (['dirname', '"""frameCLASSIF.pkl"""'], {}), "(dirname, 'frameCLASSIF.pkl')\n", (18809, 18838), False, 'import os\n'), ((21261, 21278), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (21271, 21278), False, 'from Dataset import GetDataset\n'), ((26397, 26414), 'Dataset.GetDataset', 'GetDataset', (['dname'], {}), '(dname)\n', (26407, 26414), False, 'from Dataset import GetDataset\n'), ((1399, 1419), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (1417, 1419), False, 'import caffe\n'), ((1428, 1449), 'caffe.set_device', 'caffe.set_device', (['gpu'], {}), '(gpu)\n', (1444, 1449), False, 'import caffe\n'), ((1480, 1505), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1495, 1505), False, 'import os\n'), ((1571, 1596), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1586, 1596), False, 'import os\n'), ((2385, 2429), 'numpy.array', 'np.array', (['([w, h, w, h] * K)'], {'dtype': 'np.float32'}), '([w, h, w, h] * K, dtype=np.float32)\n', (2393, 2429), True, 'import numpy as np\n'), ((6495, 6508), 'Dataset.GetDataset', 'GetDataset', (['d'], {}), '(d)\n', (6505, 6508), False, 'from Dataset import GetDataset\n'), ((8341, 8366), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8356, 8366), False, 'import os\n'), ((8488, 8513), 'os.path.isfile', 'os.path.isfile', (['eval_file'], {}), '(eval_file)\n', (8502, 8513), False, 'import os\n'), ((11384, 11409), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (11399, 11409), False, 'import os\n'), ((11544, 11569), 'os.path.isfile', 'os.path.isfile', (['eval_file'], {}), '(eval_file)\n', (11558, 11569), False, 'import os\n'), ((16351, 16376), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (16366, 16376), False, 'import os\n'), ((16480, 16505), 'os.path.isfile', 'os.path.isfile', (['eval_file'], {}), '(eval_file)\n', (16494, 16505), False, 'import os\n'), ((18719, 18744), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (18734, 18744), False, 'import os\n'), ((18851, 18876), 'os.path.isfile', 'os.path.isfile', (['eval_file'], {}), '(eval_file)\n', (18865, 18876), False, 'import os\n'), ((21307, 21332), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (21322, 21332), False, 'import os\n'), ((21531, 21570), 'os.path.join', 'os.path.join', (['dirname', "(v + '_tubes.pkl')"], {}), "(dirname, v + '_tubes.pkl')\n", (21543, 21570), False, 'import os\n'), ((26443, 26468), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (26458, 26468), False, 'import os\n'), ((26585, 26610), 'os.path.isfile', 'os.path.isfile', (['eval_file'], {}), '(eval_file)\n', (26599, 26610), False, 'import os\n'), ((6760, 6794), 'numpy.empty', 'np.empty', (['(0, 6)'], {'dtype': 'np.float32'}), '((0, 6), dtype=np.float32)\n', (6768, 6794), True, 'import numpy as np\n'), ((7583, 7613), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'np.int32'}), '((0,), dtype=np.int32)\n', (7591, 7613), True, 'import numpy as np\n'), ((8589, 8605), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (8600, 8605), False, 'import pickle\n'), ((9670, 9726), 'numpy.empty', 'np.empty', (['(detections.shape[0] + 1, 2)'], {'dtype': 'np.float32'}), '((detections.shape[0] + 1, 2), dtype=np.float32)\n', (9678, 9726), True, 'import numpy as np\n'), ((10978, 10999), 'pickle.dump', 'pickle.dump', (['res', 'fid'], {}), '(res, fid)\n', (10989, 10999), False, 'import pickle\n'), ((11256, 11267), 'numpy.mean', 'np.mean', (['ap'], {}), '(ap)\n', (11263, 11267), True, 'import numpy as np\n'), ((11645, 11661), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (11656, 11661), False, 'import pickle\n'), ((13028, 13040), 'copy.deepcopy', 'deepcopy', (['gt'], {}), '(gt)\n', (13036, 13040), False, 'from copy import deepcopy\n'), ((13211, 13267), 'numpy.empty', 'np.empty', (['(detections.shape[0] + 1, 6)'], {'dtype': 'np.float32'}), '((detections.shape[0] + 1, 6), dtype=np.float32)\n', (13219, 13267), True, 'import numpy as np\n'), ((15335, 15356), 'pickle.dump', 'pickle.dump', (['res', 'fid'], {}), '(res, fid)\n', (15346, 15356), False, 'import pickle\n'), ((15672, 15723), 'numpy.array', 'np.array', (['[res[label][-1, 1] for label in d.labels]'], {}), '([res[label][-1, 1] for label in d.labels])\n', (15680, 15723), True, 'import numpy as np\n'), ((16580, 16596), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (16591, 16596), False, 'import pickle\n'), ((18956, 18972), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (18967, 18972), False, 'import pickle\n'), ((20983, 21008), 'pickle.dump', 'pickle.dump', (['CLASSIF', 'fid'], {}), '(CLASSIF, fid)\n', (20994, 21008), False, 'import pickle\n'), ((21591, 21614), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (21605, 21614), False, 'import os\n'), ((26326, 26347), 'pickle.dump', 'pickle.dump', (['RES', 'fid'], {}), '(RES, fid)\n', (26337, 26347), False, 'import pickle\n'), ((26686, 26702), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (26697, 26702), False, 'import pickle\n'), ((26980, 27019), 'os.path.join', 'os.path.join', (['dirname', "(v + '_tubes.pkl')"], {}), "(dirname, v + '_tubes.pkl')\n", (26992, 27019), False, 'import os\n'), ((29218, 29239), 'pickle.dump', 'pickle.dump', (['res', 'fid'], {}), '(res, fid)\n', (29229, 29239), False, 'import pickle\n'), ((29485, 29496), 'numpy.mean', 'np.mean', (['ap'], {}), '(ap)\n', (29492, 29496), True, 'import numpy as np\n'), ((2664, 2687), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (2678, 2687), False, 'import os\n'), ((3090, 3156), 'cv2.resize', 'cv2.resize', (['im', '(IMGSIZE, IMGSIZE)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)\n', (3100, 3156), False, 'import cv2\n'), ((3796, 3828), 'numpy.concatenate', 'np.concatenate', (['timscale'], {'axis': '(1)'}), '(timscale, axis=1)\n', (3810, 3828), True, 'import numpy as np\n'), ((5218, 5236), 'numpy.all', 'np.all', (['(dets == -1)'], {}), '(dets == -1)\n', (5224, 5236), True, 'import numpy as np\n'), ((5261, 5307), 'numpy.empty', 'np.empty', (['(0, dets.shape[1])'], {'dtype': 'np.float32'}), '((0, dets.shape[1]), dtype=np.float32)\n', (5269, 5307), True, 'import numpy as np\n'), ((5528, 5556), 'numpy.minimum', 'np.minimum', (['w', 'dets[:, 2::2]'], {}), '(w, dets[:, 2::2])\n', (5538, 5556), True, 'import numpy as np\n'), ((5600, 5628), 'numpy.minimum', 'np.minimum', (['h', 'dets[:, 3::2]'], {}), '(h, dets[:, 3::2])\n', (5610, 5628), True, 'import numpy as np\n'), ((5848, 5885), 'numpy.minimum', 'np.minimum', (['w', 'dets_all[:, 0:4 * K:2]'], {}), '(w, dets_all[:, 0:4 * K:2])\n', (5858, 5885), True, 'import numpy as np\n'), ((5934, 5971), 'numpy.minimum', 'np.minimum', (['h', 'dets_all[:, 1:4 * K:2]'], {}), '(h, dets_all[:, 1:4 * K:2])\n', (5944, 5971), True, 'import numpy as np\n'), ((6368, 6402), 'pickle.dump', 'pickle.dump', (['(dets, dets_all)', 'fid'], {}), '((dets, dets_all), fid)\n', (6379, 6402), False, 'import pickle\n'), ((7078, 7101), 'os.path.isfile', 'os.path.isfile', (['resname'], {}), '(resname)\n', (7092, 7101), False, 'import os\n'), ((7187, 7197), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7195, 7197), False, 'import sys\n'), ((7270, 7286), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (7281, 7286), False, 'import pickle\n'), ((9553, 9568), 'numpy.array', 'np.array', (['gt[k]'], {}), '(gt[k])\n', (9561, 9568), True, 'import numpy as np\n'), ((9993, 10022), 'numpy.argsort', 'np.argsort', (['(-detections[:, 3])'], {}), '(-detections[:, 3])\n', (10003, 10022), True, 'import numpy as np\n'), ((12910, 12925), 'numpy.array', 'np.array', (['gt[k]'], {}), '(gt[k])\n', (12918, 12925), True, 'import numpy as np\n'), ((12986, 13006), 'numpy.array', 'np.array', (['othergt[k]'], {}), '(othergt[k])\n', (12994, 13006), True, 'import numpy as np\n'), ((13778, 13807), 'numpy.argsort', 'np.argsort', (['(-detections[:, 3])'], {}), '(-detections[:, 3])\n', (13788, 13807), True, 'import numpy as np\n'), ((16867, 16901), 'numpy.empty', 'np.empty', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (16875, 16901), True, 'import numpy as np\n'), ((18331, 18351), 'pickle.dump', 'pickle.dump', (['BO', 'fid'], {}), '(BO, fid)\n', (18342, 18351), False, 'import pickle\n'), ((18408, 18424), 'numpy.array', 'np.array', (['BO[la]'], {}), '(BO[la])\n', (18416, 18424), True, 'import numpy as np\n'), ((21937, 21960), 'os.path.isfile', 'os.path.isfile', (['resname'], {}), '(resname)\n', (21951, 21960), False, 'import os\n'), ((22048, 22058), 'sys.exit', 'sys.exit', ([], {}), '()\n', (22056, 22058), False, 'import sys\n'), ((22156, 22172), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (22167, 22172), False, 'import pickle\n'), ((25661, 25700), 'numpy.zeros', 'np.zeros', (['(length, 6)'], {'dtype': 'np.float32'}), '((length, 6), dtype=np.float32)\n', (25669, 25700), True, 'import numpy as np\n'), ((25729, 25764), 'numpy.arange', 'np.arange', (['beginframe', '(endframe + 1)'], {}), '(beginframe, endframe + 1)\n', (25738, 25764), True, 'import numpy as np\n'), ((25792, 25829), 'numpy.zeros', 'np.zeros', (['(length, 1)'], {'dtype': 'np.int32'}), '((length, 1), dtype=np.int32)\n', (25800, 25829), True, 'import numpy as np\n'), ((27039, 27063), 'os.path.isfile', 'os.path.isfile', (['tubename'], {}), '(tubename)\n', (27053, 27063), False, 'import os\n'), ((27149, 27159), 'sys.exit', 'sys.exit', ([], {}), '()\n', (27157, 27159), False, 'import sys\n'), ((27244, 27260), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (27255, 27260), False, 'import pickle\n'), ((3210, 3249), 'numpy.transpose', 'np.transpose', (['(imscale - MEAN)', '(2, 0, 1)'], {}), '(imscale - MEAN, (2, 0, 1))\n', (3222, 3249), True, 'import numpy as np\n'), ((3393, 3404), 'numpy.any', 'np.any', (['imf'], {}), '(imf)\n', (3399, 3404), True, 'import numpy as np\n'), ((3556, 3622), 'cv2.resize', 'cv2.resize', (['im', '(IMGSIZE, IMGSIZE)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)\n', (3566, 3622), False, 'import cv2\n'), ((6213, 6237), 'os.path.dirname', 'os.path.dirname', (['outfile'], {}), '(outfile)\n', (6228, 6237), False, 'import os\n'), ((7678, 7712), 'numpy.where', 'np.where', (['(vdets[i][:, 5] == ilabel)'], {}), '(vdets[i][:, 5] == ilabel)\n', (7686, 7712), True, 'import numpy as np\n'), ((10282, 10297), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (10291, 10297), True, 'import numpy as np\n'), ((17137, 17160), 'os.path.isfile', 'os.path.isfile', (['resname'], {}), '(resname)\n', (17151, 17160), False, 'import os\n'), ((17256, 17266), 'sys.exit', 'sys.exit', ([], {}), '()\n', (17264, 17266), False, 'import sys\n'), ((17363, 17379), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (17374, 17379), False, 'import pickle\n'), ((17465, 17533), 'numpy.concatenate', 'np.concatenate', (['(vdets[i + k], dets[:, 2 + 4 * k:6 + 4 * k])'], {'axis': '(0)'}), '((vdets[i + k], dets[:, 2 + 4 * k:6 + 4 * k]), axis=0)\n', (17479, 17533), True, 'import numpy as np\n'), ((19428, 19451), 'os.path.isfile', 'os.path.isfile', (['resname'], {}), '(resname)\n', (19442, 19451), False, 'import os\n'), ((19547, 19557), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19555, 19557), False, 'import sys\n'), ((19667, 19683), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (19678, 19683), False, 'import pickle\n'), ((21195, 21212), 'numpy.array', 'np.array', (['CLASSIF'], {}), '(CLASSIF)\n', (21203, 21212), True, 'import numpy as np\n'), ((28577, 28592), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (28586, 28592), True, 'import numpy as np\n'), ((3666, 3700), 'numpy.transpose', 'np.transpose', (['(im - MEAN)', '(2, 0, 1)'], {}), '(im - MEAN, (2, 0, 1))\n', (3678, 3700), True, 'import numpy as np\n'), ((6280, 6304), 'os.path.dirname', 'os.path.dirname', (['outfile'], {}), '(outfile)\n', (6295, 6304), False, 'import os\n'), ((10434, 10459), 'numpy.delete', 'np.delete', (['gt[k]', 'amax', '(0)'], {}), '(gt[k], amax, 0)\n', (10443, 10459), True, 'import numpy as np\n'), ((14095, 14110), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (14104, 14110), True, 'import numpy as np\n'), ((14259, 14284), 'numpy.delete', 'np.delete', (['gt[k]', 'amax', '(0)'], {}), '(gt[k], amax, 0)\n', (14268, 14284), True, 'import numpy as np\n'), ((16219, 16229), 'numpy.mean', 'np.mean', (['L'], {}), '(L)\n', (16226, 16229), True, 'import numpy as np\n'), ((20189, 20229), 'numpy.zeros', 'np.zeros', (['(d.nlabels,)'], {'dtype': 'np.float32'}), '((d.nlabels,), dtype=np.float32)\n', (20197, 20229), True, 'import numpy as np\n'), ((23232, 23250), 'numpy.array', 'np.array', (['avgscore'], {}), '(avgscore)\n', (23240, 23250), True, 'import numpy as np\n'), ((24046, 24067), 'numpy.where', 'np.where', (['(ious >= 0.2)'], {}), '(ious >= 0.2)\n', (24054, 24067), True, 'import numpy as np\n'), ((24369, 24402), 'numpy.delete', 'np.delete', (['ltubelets', 'idx'], {'axis': '(0)'}), '(ltubelets, idx, axis=0)\n', (24378, 24402), True, 'import numpy as np\n'), ((28321, 28359), 'numpy.array', 'np.array', (['[dd[1] for dd in detections]'], {}), '([dd[1] for dd in detections])\n', (28329, 28359), True, 'import numpy as np\n'), ((6037, 6076), 'numpy.max', 'np.max', (['dets_all[:, 4 * K + 1:]'], {'axis': '(1)'}), '(dets_all[:, 4 * K + 1:], axis=1)\n', (6043, 6076), True, 'import numpy as np\n'), ((8034, 8074), 'numpy.ones', 'np.ones', (['(idx.size, 1)'], {'dtype': 'np.float32'}), '((idx.size, 1), dtype=np.float32)\n', (8041, 8074), True, 'import numpy as np\n'), ((8080, 8120), 'numpy.ones', 'np.ones', (['(idx.size, 1)'], {'dtype': 'np.float32'}), '((idx.size, 1), dtype=np.float32)\n', (8087, 8120), True, 'import numpy as np\n'), ((14535, 14547), 'numpy.max', 'np.max', (['ious'], {}), '(ious)\n', (14541, 14547), True, 'import numpy as np\n'), ((18229, 18241), 'numpy.max', 'np.max', (['ious'], {}), '(ious)\n', (18235, 18241), True, 'import numpy as np\n'), ((20547, 20601), 'numpy.sum', 'np.sum', (['VDets[sf][overlaps >= 0.7, 4 * K + 1:]'], {'axis': '(0)'}), '(VDets[sf][overlaps >= 0.7, 4 * K + 1:], axis=0)\n', (20553, 20601), True, 'import numpy as np\n'), ((20691, 20708), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (20700, 20708), True, 'import numpy as np\n'), ((24224, 24255), 'numpy.argmax', 'np.argmax', (['ltubelets[valid, -1]'], {}), '(ltubelets[valid, -1])\n', (24233, 24255), True, 'import numpy as np\n'), ((7452, 7512), 'numpy.array', 'np.array', (['[2 + 4 * k, 3 + 4 * k, 4 + 4 * k, 5 + 4 * k, 1, 0]'], {}), '([2 + 4 * k, 3 + 4 * k, 4 + 4 * k, 5 + 4 * k, 1, 0])\n', (7460, 7512), True, 'import numpy as np\n'), ((8142, 8186), 'numpy.array', 'np.array', (['[5, 4, 0, 1, 2, 3]'], {'dtype': 'np.int32'}), '([5, 4, 0, 1, 2, 3], dtype=np.int32)\n', (8150, 8186), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import os
import poppy
from .main import GeminiPrimary
# Classes for dealing with AO Telemetry sets
class GPI_Globals(object):
""" Container for same constants as gpilib's gpi_globals,
with same variable names to ease porting of code. Plus some
other variables as needed."""
gpi_tweet_n = 48
gpi_woof_n = 9
gpi_numacross=43.2
gpi_tweet_spacing = GeminiPrimary.primary_diameter/gpi_numacross
gpi_woof_spacing = GeminiPrimary.primary_diameter/gpi_numacross*5.5
# below ones are not in gpi_globals
pupil_center_subap = 23
pupil_center_tweeter=23.5
pupil_center_woofer=4
class DeformableMirror(poppy.AnalyticOpticalElement):
""" Generic deformable mirror, of the continuous face sheet variety"""
def __init__(self, shape=(10,10)):
poppy.OpticalElement.__init__(self, planetype=poppy.poppy_core._PUPIL)
self._shape = shape # number of actuators
self._surface = np.zeros(shape) # array for the DM surface WFE
self.numacross = shape[0] # number of actuators across diameter of
# the optic's cleared aperture (may be
# less than full diameter of array)
self.actuator_spacing = 1.0/self.numacross # distance between actuators,
# projected onto the primary
self.pupil_center = (shape[0]-1.)/2 # center of clear aperture in actuator units
# (may be offset from center of DM)
@property
def shape(self):
return self._shape
@property
def surface(self):
""" The surface shape of the deformable mirror, in
**meters** """
return self._surface
def set_surface(self, new_surface, units='nm'):
""" Set the entire surface shape of the DM.
Parameters
-------------
new_surface : 2d ndarray
Desired DM surface shape
(note that wavefront error will be 2x this)
units : string
Right now this *must* be 'nm' for nanometers,
which is the default. Other units may be added later
if needed.
"""
assert new_surface.shape == self.shape
if units!='nm':
raise NotImplementedError("Units other than nanometers not yet implemented.")
self._surface[:] = np.asarray(new_surface, dtype=float)*1e-9
def set_actuator(self, actx, acty, new_value, units='nm'):
""" Set the entire surface shape of the DM.
Parameters
-------------
actx, acty : integers
Coordinates of the actuator you wish to control
new_value : float
Desired surface height for that actuator
(note that wavefront error will be 2x this)
units : string
Right now this *must* be 'nm' for nanometers,
which is the default. Other units may be added later
if needed.
Example
-----------
dm.set_actuator(12,22, 123.4)
"""
# FIXME do something more comprehensive with units
assert units=='nm'
if actx < 0 or actx > self.shape[1]-1:
raise ValueError("X axis coordinate is out of range")
if acty < 0 or acty > self.shape[0]-1:
raise ValueError("Y axis coordinate is out of range")
self._surface[acty, actx] = new_value*1e-9
def get_coordinates(self, one_d=False):
""" Y and X coordinates for the actuators
Parameters
------------
one_d : bool
Return 1-dimensional arrays of coordinates per axis?
Default is to return 2D arrays with same shape as full array.
"""
y_act = (np.arange(self.shape[0])-self.pupil_center)*self.actuator_spacing
x_act = (np.arange(self.shape[1])-self.pupil_center)*self.actuator_spacing
if not one_d: # convert to 2D
y_act.shape = (self.shape[0],1)
y_act = y_act * np.ones( (1, self.shape[1]))
x_act.shape = (1, self.shape[1])
x_act = x_act * np.ones( (self.shape[0], 1))
return y_act, x_act
def get_opd(self,wave):
""" Return the surface optical path delay for the optic.
Interpolates from the current optic surface state onto the
desired coordinates for the wave.
CAUTION: This right now uses a fairly simple representation
of the actuator influence function, which should not be
taken too seriously just yet.
"""
# the following could be replaced with a higher fidelity model if needed
interpolated_surface = self._get_surface_via_gaussian_influence_functions(wave)
return interpolated_surface
#phasor = np.exp(1.j * 2 * np.pi * interpolated_surface/wave.wavelength)
#return phasor
def _get_surface_via_gaussian_influence_functions(self, wave):
""" Infer a finely-sampled surface from simple Gaussian influence functions centered on
each actuator.
Work in progress, oversimplified, not a great representation of the true influence function
"""
y, x = wave.coordinates()
y_act, x_act = self.get_coordinates(one_d=True)
interpolated_surface = np.zeros(wave.shape)
crosstalk = 0.15 # amount of crosstalk on advancent actuator
sigma = self.actuator_spacing/np.sqrt((-np.log(crosstalk)))
pixelscale = x[0,1]-x[0,0] # scale of x,y
boxsize = (3*sigma)/pixelscale # half size for subarray
for yi, yc in enumerate(y_act):
for xi, xc in enumerate(x_act):
if self._surface[yi,xi] == 0: continue
# 2d Gaussian
r = ((x - xc)**2 + (y-yc)**2)/sigma**2
interpolated_surface += self._surface[yi,xi] * np.exp(-r)
return interpolated_surface
def display(self, annotate=False, grid=False, what='opd', crosshairs=False, *args, **kwargs):
"""Display an Analytic optic by first computing it onto a grid.
Parameters
----------
wavelength : float
Wavelength to evaluate this optic's properties at
npix : int
Number of pixels to use when sampling the optical element.
what : str
What to display: 'intensity', 'surface' or 'phase', or 'both'
ax : matplotlib.Axes instance
Axes to display into
nrows, row : integers
# of rows and row index for subplot display
crosshairs : bool
Display crosshairs indicating the center?
colorbar : bool
Show colorbar?
colorbar_orientation : bool
Desired orientation, horizontal or vertical?
Default is horizontal if only 1 row of plots, else vertical
opd_vmax : float
Max value for OPD image display, in meters.
title : string
Plot label
"""
if what=='both': raise NotImplementedError('still need to implement display both mode for display_actuators')
kwargs['crosshairs']= crosshairs
kwargs['what'] = what
returnvalue = poppy.AnalyticOpticalElement.display(self, *args, **kwargs)
if annotate: self.annotate()
if grid: self.annotate_grid()
return returnvalue
def display_actuators(self, annotate=False, grid=True, what='surface', crosshairs=False, *args, **kwargs):
""" Display the optical surface, viewed as discrete actuators
Parameters
------------
annotate : bool
Annotate coordinates and types of actuators on the display? Default false.
grid : bool
Annotate grid of actuators on the display? Default true.
what : string
What to display: 'intensity' transmission, 'surface' or 'phase', or 'both'
"""
# display in DM coordinates
# temporarily set attributes appropriately as if this were a regular OpticalElement
self.amplitude = np.ones_like(self.surface)
self.opd = self.surface
self.pixelscale = self.actuator_spacing
# back compatibility for older poppy syntax (which is confusing)
if what=='surface': what='phase'
#then call parent class display
returnvalue = poppy.OpticalElement.display(self, what=what, crosshairs=crosshairs, **kwargs)
# now un-set all the temporary attributes, since this is analytic and
# these are unneeded
del self.pixelscale
del self.opd
del self.amplitude
if annotate: self.annotate()
if grid: self.annotate_grid()
return returnvalue
def annotate(self, marker='o', **kwargs):
""" Overplot actuator coordinates on some already-existing pupil display
"""
yc, xc = self.get_coordinates()
ax = plt.gca()
# jump through some hoops to avoid autoscaling the X,Y coords
# of the prior plot here, but retain the autoscale state
autoscale_state = (ax._autoscaleXon, ax._autoscaleYon)
ax.autoscale(False)
plt.scatter(xc, yc, marker=marker, **kwargs)
ax._autoscaleXon, ax._autoscaleYon = autoscale_state
def annotate_grid(self, linestyle=":", color="black", **kwargs):
y_act, x_act = self.get_coordinates(one_d=True)
ax = plt.gca()
for x in x_act:
plt.axvline(x+ (self.actuator_spacing/2), linestyle=linestyle, color=color)
for y in y_act:
plt.axhline(y+ (self.actuator_spacing/2), linestyle=linestyle, color=color)
class GPITweeter(DeformableMirror):
def __init__(self, mems_print_through=True):
DeformableMirror.__init__(self, shape=(GPI_Globals.gpi_tweet_n, GPI_Globals.gpi_tweet_n))
self.name = "<NAME>"
self.numacross = GPI_Globals.gpi_numacross
self.actuator_spacing = GPI_Globals.gpi_tweet_spacing
self.pupil_center = GPI_Globals.pupil_center_tweeter
self.pupil_diam = GPI_Globals.gpi_tweet_n*GPI_Globals.gpi_tweet_spacing # for display, projected full area around 48x48 subaps
self.mems_print_through = mems_print_through
self._mems_print_through_amplitude = 15e-9
# 15 nm, estimated from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.513.4649&rep=rep1&type=pdf
my_path=os.path.abspath(os.path.dirname(__file__))
self._actuator_type_info = fits.open(os.path.join(my_path, 'data','GPI_tweeter_actuators.fits'))
@property
def bad_actuators(self):
"""Returns a list of coordinate indices for the actuators which are
nonoperable """
act_map = self._actuator_type_info
wflagged = np.where( ( act_map[0].data == act_map[0].header['DEAD']) |
( act_map[0].data == act_map[0].header['WEAK']) )
output = []
for i in range(len(wflagged[0])):
yc,xc = wflagged[0][i], wflagged[1][i]
label = 'DEAD' if (act_map[0].data[yc,xc] == act_map[0].header['DEAD'] ) else 'WEAK'
output.append([xc,yc,label])
return output
def get_opd(self, wave):
opd = DeformableMirror.get_opd(self,wave)
if self.mems_print_through:
mems_print_through_opd = self._get_opd_MEMS_print_through(wave)
opd += mems_print_through_opd
return opd
def _get_opd_MEMS_print_through(self,wave):
""" DM surface print through """
# GPI tweeter actuators are reimaged to 18 cm subapertures
# Boston DM print through info in:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.513.4649&rep=rep1&type=pdf
# ao4elt.lesia.obspm.fr/sites/ao4elt/IMG/ppt/Bifano.ppt
# in horizontal direction, the print through is about 35/190 pixels = 18% of the width
# in the vertical direction, closer to 31%, but it's more like 2 narrow bars each 10% wide
# and there's a 10% wide dot in the middle of it too
#printthrough properties:
pt_col_width = 0.18
# 15 nm, estimated from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.513.4649&rep=rep1&type=pdf
pt_col_value = self._mems_print_through_amplitude
pt_row_width = 0.10
pt_row_value = -1 * self._mems_print_through_amplitude
if not isinstance(wave, poppy.Wavefront): # pragma: no cover
raise ValueError("getPhasor must be called with a Wavefront to define the spacing")
assert (wave.planetype == poppy.poppy_core._PUPIL)
opd = np.zeros(wave.shape)
y, x = wave.coordinates()
pixscale = x[0,1] - x[0,0]
opd[np.mod(x,self.actuator_spacing) <= (self.actuator_spacing*pt_col_width)] += pt_row_value
opd[np.mod(y,self.actuator_spacing) <= (self.actuator_spacing*pt_col_width)] += pt_col_value
return opd
#phasor = np.exp(1.j * 2 * np.pi * opd/wave.wavelength)
#return phasor
def annotate(self, markbad=True, badmarker='o', marker='+', **kwargs):
# first plot all the normal ones
DeformableMirror.annotate(self, marker=marker, **kwargs)
if markbad:
# now the less-than-good ones
yc, xc = self.get_coordinates()
ax = plt.gca()
autoscale_state = (ax._autoscaleXon, ax._autoscaleYon)
ax.autoscale(False)
act_map = self._actuator_type_info
for act_type, color in zip(['DEAD', 'COUPLED', 'WEAK','VARIABLE'],
['red', 'orange', 'brown', 'magenta']):
wflagged = np.where(act_map[0].data == act_map[0].header[act_type])
plt.scatter(xc[wflagged], yc[wflagged], marker=badmarker, color=color)
ax._autoscaleXon, ax._autoscaleYon = autoscale_state
class GPIWoofer(DeformableMirror):
def __init__(self):
DeformableMirror.__init__(self, shape=(GPI_Globals.gpi_woof_n, GPI_Globals.gpi_woof_n))
self.name = "<NAME>"
self.pupil_diam = 8.6 # for display, projected full area around 48x48 subaps of tweeter
self.numacross = GPI_Globals.gpi_numacross
self.actuator_spacing = GPI_Globals.gpi_woof_spacing
self.pupil_center = GPI_Globals.pupil_center_woofer
def annotate(self, marker='s', color='teal', s=50, alpha=0.4, **kwargs):
""" Annotate the DM actuator coordinates.
Applies some cosmetic defaults to distinguish Woofer from Tweeter actuators
"""
DeformableMirror.annotate(self, marker=marker, color=color, s=s, alpha=alpha, **kwargs)
| [
"poppy.AnalyticOpticalElement.display",
"numpy.ones_like",
"numpy.ones",
"numpy.where",
"matplotlib.pyplot.gca",
"numpy.log",
"numpy.asarray",
"os.path.join",
"matplotlib.pyplot.axhline",
"os.path.dirname",
"numpy.zeros",
"poppy.OpticalElement.__init__",
"numpy.mod",
"numpy.exp",
"matplo... | [((879, 949), 'poppy.OpticalElement.__init__', 'poppy.OpticalElement.__init__', (['self'], {'planetype': 'poppy.poppy_core._PUPIL'}), '(self, planetype=poppy.poppy_core._PUPIL)\n', (908, 949), False, 'import poppy\n'), ((1041, 1056), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1049, 1056), True, 'import numpy as np\n'), ((5445, 5465), 'numpy.zeros', 'np.zeros', (['wave.shape'], {}), '(wave.shape)\n', (5453, 5465), True, 'import numpy as np\n'), ((7361, 7420), 'poppy.AnalyticOpticalElement.display', 'poppy.AnalyticOpticalElement.display', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (7397, 7420), False, 'import poppy\n'), ((8227, 8253), 'numpy.ones_like', 'np.ones_like', (['self.surface'], {}), '(self.surface)\n', (8239, 8253), True, 'import numpy as np\n'), ((8513, 8591), 'poppy.OpticalElement.display', 'poppy.OpticalElement.display', (['self'], {'what': 'what', 'crosshairs': 'crosshairs'}), '(self, what=what, crosshairs=crosshairs, **kwargs)\n', (8541, 8591), False, 'import poppy\n'), ((9075, 9084), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9082, 9084), True, 'import matplotlib.pyplot as plt\n'), ((9320, 9364), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xc', 'yc'], {'marker': 'marker'}), '(xc, yc, marker=marker, **kwargs)\n', (9331, 9364), True, 'import matplotlib.pyplot as plt\n'), ((9567, 9576), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9574, 9576), True, 'import matplotlib.pyplot as plt\n'), ((10919, 11028), 'numpy.where', 'np.where', (["((act_map[0].data == act_map[0].header['DEAD']) | (act_map[0].data ==\n act_map[0].header['WEAK']))"], {}), "((act_map[0].data == act_map[0].header['DEAD']) | (act_map[0].data ==\n act_map[0].header['WEAK']))\n", (10927, 11028), True, 'import numpy as np\n'), ((12776, 12796), 'numpy.zeros', 'np.zeros', (['wave.shape'], {}), '(wave.shape)\n', (12784, 12796), True, 'import numpy as np\n'), ((2528, 2564), 'numpy.asarray', 'np.asarray', (['new_surface'], {'dtype': 'float'}), '(new_surface, dtype=float)\n', (2538, 2564), True, 'import numpy as np\n'), ((9613, 9689), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(x + self.actuator_spacing / 2)'], {'linestyle': 'linestyle', 'color': 'color'}), '(x + self.actuator_spacing / 2, linestyle=linestyle, color=color)\n', (9624, 9689), True, 'import matplotlib.pyplot as plt\n'), ((9725, 9801), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(y + self.actuator_spacing / 2)'], {'linestyle': 'linestyle', 'color': 'color'}), '(y + self.actuator_spacing / 2, linestyle=linestyle, color=color)\n', (9736, 9801), True, 'import matplotlib.pyplot as plt\n'), ((10581, 10606), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10596, 10606), False, 'import os\n'), ((10653, 10712), 'os.path.join', 'os.path.join', (['my_path', '"""data"""', '"""GPI_tweeter_actuators.fits"""'], {}), "(my_path, 'data', 'GPI_tweeter_actuators.fits')\n", (10665, 10712), False, 'import os\n'), ((13483, 13492), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13490, 13492), True, 'import matplotlib.pyplot as plt\n'), ((3903, 3927), 'numpy.arange', 'np.arange', (['self.shape[0]'], {}), '(self.shape[0])\n', (3912, 3927), True, 'import numpy as np\n'), ((3986, 4010), 'numpy.arange', 'np.arange', (['self.shape[1]'], {}), '(self.shape[1])\n', (3995, 4010), True, 'import numpy as np\n'), ((4163, 4190), 'numpy.ones', 'np.ones', (['(1, self.shape[1])'], {}), '((1, self.shape[1]))\n', (4170, 4190), True, 'import numpy as np\n'), ((4266, 4293), 'numpy.ones', 'np.ones', (['(self.shape[0], 1)'], {}), '((self.shape[0], 1))\n', (4273, 4293), True, 'import numpy as np\n'), ((12880, 12912), 'numpy.mod', 'np.mod', (['x', 'self.actuator_spacing'], {}), '(x, self.actuator_spacing)\n', (12886, 12912), True, 'import numpy as np\n'), ((12981, 13013), 'numpy.mod', 'np.mod', (['y', 'self.actuator_spacing'], {}), '(y, self.actuator_spacing)\n', (12987, 13013), True, 'import numpy as np\n'), ((13818, 13874), 'numpy.where', 'np.where', (['(act_map[0].data == act_map[0].header[act_type])'], {}), '(act_map[0].data == act_map[0].header[act_type])\n', (13826, 13874), True, 'import numpy as np\n'), ((13891, 13961), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xc[wflagged]', 'yc[wflagged]'], {'marker': 'badmarker', 'color': 'color'}), '(xc[wflagged], yc[wflagged], marker=badmarker, color=color)\n', (13902, 13961), True, 'import matplotlib.pyplot as plt\n'), ((5584, 5601), 'numpy.log', 'np.log', (['crosstalk'], {}), '(crosstalk)\n', (5590, 5601), True, 'import numpy as np\n'), ((6024, 6034), 'numpy.exp', 'np.exp', (['(-r)'], {}), '(-r)\n', (6030, 6034), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_Pairwise_Distance.ipynb (unless otherwise specified).
__all__ = ['USE_64', 'gpu_dist_matrix', 'component_mixture_dist_matrix']
# Cell
import numpy as np
import numba as nb
from numba import cuda
from scipy.spatial.distance import pdist
# Cell
USE_64 = True
if USE_64:
bits = 64
np_type = np.float64
else:
bits = 32
np_type = np.float32
# Cell
@cuda.jit("void(float{}[:, :], float{}[:, :])".format(bits, bits))
def _euclidian_distance_matrix(mat, out):
"CUDA kernel used to calculate the squared euclidian distance between all rows in a matrix"
m = mat.shape[0]
n = mat.shape[1]
i, j = cuda.grid(2)
d = 0
if i < m and j > i and j < m:
# calculate ||x - y||^2
for k in range(n):
tmp = mat[i, k] - mat[j, k]
d += tmp * tmp
out[i, j] = d
# Cell
def gpu_dist_matrix(mat):
"calculate the squared euclidian distance between all pairs of rows in a given matrix"
rows = mat.shape[0]
block_dim = (16, 16)
grid_dim = (int(rows/block_dim[0] + 1), int(rows/block_dim[1] + 1))
stream = cuda.stream()
mat2 = cuda.to_device(np.asarray(mat, dtype=np_type), stream=stream)
out2 = cuda.device_array((rows, rows))
_euclidian_distance_matrix[grid_dim, block_dim](mat2, out2)
out = out2.copy_to_host(stream=stream)
return out
# Cell
@cuda.jit("void(float{}[:, :], float{}[:, :], float{}[:, :])".format(bits, bits, bits))
def _pairwise_distance_matrix(compMat, mixMat, out):
"CUDA kernel used to calcualte squared euclidian distance between pairs of rows in two matrices"
nC = compMat.shape[0]
nM = mixMat.shape[0]
dim = compMat.shape[1]
i, j = cuda.grid(2)
d = 0
if i < nC and j < nM:
# calculate ||c_i - m_j||^2
for k in range(dim):
tmp = compMat[i, k] - mixMat[j, k]
d += tmp * tmp
out[i, j] = d
# Cell
def component_mixture_dist_matrix(compMat, mixMat):
"calculate the squared euclidian distance between pairs of rows in the two given matrices"
compRows = compMat.shape[0]
mixRows = mixMat.shape[0]
block_dim = (16, 16)
grid_dim = (int(compRows/block_dim[0] + 1), int(mixRows/block_dim[1] + 1))
stream = cuda.stream()
compMat2 = cuda.to_device(np.asarray(compMat, dtype=np_type), stream=stream)
mixMat2 = cuda.to_device(np.asarray(mixMat, dtype=np_type), stream=stream)
out2 = cuda.device_array((compRows, mixRows))
_pairwise_distance_matrix[grid_dim, block_dim](compMat2, mixMat2, out2)
out = out2.copy_to_host(stream=stream)
return out | [
"numba.cuda.device_array",
"numba.cuda.grid",
"numba.cuda.stream",
"numpy.asarray"
] | [((670, 682), 'numba.cuda.grid', 'cuda.grid', (['(2)'], {}), '(2)\n', (679, 682), False, 'from numba import cuda\n'), ((1136, 1149), 'numba.cuda.stream', 'cuda.stream', ([], {}), '()\n', (1147, 1149), False, 'from numba import cuda\n'), ((1234, 1265), 'numba.cuda.device_array', 'cuda.device_array', (['(rows, rows)'], {}), '((rows, rows))\n', (1251, 1265), False, 'from numba import cuda\n'), ((1729, 1741), 'numba.cuda.grid', 'cuda.grid', (['(2)'], {}), '(2)\n', (1738, 1741), False, 'from numba import cuda\n'), ((2276, 2289), 'numba.cuda.stream', 'cuda.stream', ([], {}), '()\n', (2287, 2289), False, 'from numba import cuda\n'), ((2461, 2499), 'numba.cuda.device_array', 'cuda.device_array', (['(compRows, mixRows)'], {}), '((compRows, mixRows))\n', (2478, 2499), False, 'from numba import cuda\n'), ((1176, 1206), 'numpy.asarray', 'np.asarray', (['mat'], {'dtype': 'np_type'}), '(mat, dtype=np_type)\n', (1186, 1206), True, 'import numpy as np\n'), ((2320, 2354), 'numpy.asarray', 'np.asarray', (['compMat'], {'dtype': 'np_type'}), '(compMat, dtype=np_type)\n', (2330, 2354), True, 'import numpy as np\n'), ((2400, 2433), 'numpy.asarray', 'np.asarray', (['mixMat'], {'dtype': 'np_type'}), '(mixMat, dtype=np_type)\n', (2410, 2433), True, 'import numpy as np\n')] |
#%%
import os
import gym
import pybullet as p
import numpy as np
import pybullet_envs
from gym import wrappers
from models import Agent
from utils import plot_learning_curve
env = gym.make('InvertedPendulumBulletEnv-v0')
agent = Agent(input_dims=env.observation_space.shape, env=env,
n_actions=env.action_space.shape[0])
# uncomment this line and do a mkdir tmp && mkdir tmp/video if you want to
# record video of the agent playing the game.
# env = wrappers.Monitor(env, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'videos/'), video_callable=lambda episode_id: True, force=True)
filename = 'inverted_pendulum.png'
figure_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plots/' + filename)
best_score = env.reward_range[0]
score_history = []
#%%
n_games = 5
load_checkpoint = True
if load_checkpoint:
agent.load_models()
env.render(mode='human')
#%%
for i in range(n_games):
observation = env.reset()
done = False
score = 0
while not done:
action = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
score += reward
agent.remember(observation, action, reward, observation_, done)
agent.learn()
observation = observation_
# p.stepSimulation()
physicsClient = p.connect(p.DIRECT)
env.render(mode='human')
p.disconnect(physicsClient)
score_history.append(score)
avg_score = np.mean(score_history[-100:])
if avg_score > best_score:
best_score = avg_score
agent.save_models()
print('episode ', i, 'score %.1f' % score, 'avg_score %.1f' % avg_score)
if not load_checkpoint:
x = [i+1 for i in range(n_games)]
plot_learning_curve(x, score_history, figure_file)
# %%
| [
"numpy.mean",
"pybullet.connect",
"os.path.realpath",
"models.Agent",
"gym.make",
"pybullet.disconnect",
"utils.plot_learning_curve"
] | [((182, 222), 'gym.make', 'gym.make', (['"""InvertedPendulumBulletEnv-v0"""'], {}), "('InvertedPendulumBulletEnv-v0')\n", (190, 222), False, 'import gym\n'), ((231, 327), 'models.Agent', 'Agent', ([], {'input_dims': 'env.observation_space.shape', 'env': 'env', 'n_actions': 'env.action_space.shape[0]'}), '(input_dims=env.observation_space.shape, env=env, n_actions=env.\n action_space.shape[0])\n', (236, 327), False, 'from models import Agent\n'), ((1489, 1518), 'numpy.mean', 'np.mean', (['score_history[-100:]'], {}), '(score_history[-100:])\n', (1496, 1518), True, 'import numpy as np\n'), ((1755, 1805), 'utils.plot_learning_curve', 'plot_learning_curve', (['x', 'score_history', 'figure_file'], {}), '(x, score_history, figure_file)\n', (1774, 1805), False, 'from utils import plot_learning_curve\n'), ((683, 709), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (699, 709), False, 'import os\n'), ((1334, 1353), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (1343, 1353), True, 'import pybullet as p\n'), ((1395, 1422), 'pybullet.disconnect', 'p.disconnect', (['physicsClient'], {}), '(physicsClient)\n', (1407, 1422), True, 'import pybullet as p\n')] |
#from https://github.com/pytorch/examples/blob/master/mnist/main.py
from __future__ import print_function
import argparse
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
from os.path import join as oj
import torch.utils.data as utils
from torchvision import datasets, transforms
import numpy as np
from model import Net
import os
import sys
import pickle as pkl
from copy import deepcopy
from params_save import S # class to save objects
sys.path.append('../../src')
import score_funcs
from score_funcs import gradient_sum,eg_scores_2d,cdep
import cd
import random
model_path = "../../models/ColorMNIST_test"
import os
os.makedirs(model_path, exist_ok= True)
torch.backends.cudnn.deterministic = True #this makes results reproducible.
def save(p, out_name):
# save final
os.makedirs(model_path, exist_ok=True)
pkl.dump(s._dict(), open(os.path.join(model_path, out_name + '.pkl'), 'wb'))
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--regularizer_rate', type=float, default=0.0, metavar='N',
help='how heavy to regularize lower order interaction (AKA color)')
parser.add_argument('--grad_method', type=int, default=0, metavar='N',
help='which gradient method is used - Grad or CD')
args = parser.parse_args()
s = S(args.epochs)
use_cuda = not args.no_cuda and torch.cuda.is_available()
regularizer_rate = args.regularizer_rate
s.regularizer_rate = regularizer_rate
num_blobs = 8
s.num_blobs = num_blobs
s.seed = args.seed
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 0, 'pin_memory': True, 'worker_init_fn':np.random.seed(12)} if use_cuda else {}
x_numpy_train = np.load(oj("../../data/ColorMNIST", "train_x.npy"))
prob = (x_numpy_train.sum(axis = 1) > 0.0).mean(axis = 0).reshape(-1)
prob /=prob.sum()
mean = x_numpy_train.mean(axis = (0,2,3))
std = x_numpy_train.std(axis = (0,2,3))
#x_numpy /= std[None, :, None, None,]
#x_numpy -= mean[None, :, None, None,]
def load_dataset(name):
x_numpy = np.load(oj("../../data/ColorMNIST", name + "_x.npy"))
x_numpy -= mean[None, :, None, None,]
x_numpy /= std[None, :, None, None,]
y_numpy = np.load(oj("../../data/ColorMNIST", name +"_y.npy"))
x_tensor = torch.Tensor(x_numpy)
y_tensor = torch.Tensor(y_numpy).type(torch.int64)
dataset = utils.TensorDataset(x_tensor,y_tensor)
return dataset
train_dataset = load_dataset("train")
val_dataset = load_dataset("val")
test_dataset = load_dataset("test")
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
train_loader = utils.DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = utils.DataLoader(val_dataset,
batch_size=args.test_batch_size, shuffle=True, **kwargs)
test_loader = utils.DataLoader(test_dataset,
batch_size=args.test_batch_size, shuffle=True, **kwargs)
blobs = np.zeros((28*28,28,28))
for i in range(28):
for j in range(28):
blobs[i*28+j, i, j] =1
model = Net().to(device)
optimizer = optim.Adam(model.parameters(), weight_decay = 0.001)
def train(args, model, device, train_loader, optimizer, epoch, regularizer_rate, until_batch = -1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if until_batch !=-1 and batch_idx > until_batch:
break
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
if regularizer_rate !=0:
add_loss = torch.zeros(1,).cuda()
blob_idxs = np.random.choice(28*28, size = num_blobs, p = prob)
if args.grad_method ==0:
for i in range(num_blobs):
add_loss += score_funcs.cdep(model, data, blobs[blob_idxs[i]],model_type = 'mnist')
(regularizer_rate*add_loss+loss).backward()
elif args.grad_method ==1:
for i in range(num_blobs):
add_loss +=score_funcs.gradient_sum(data, target, torch.FloatTensor(blobs[blob_idxs[i]]).to(device), model, F.nll_loss)
(regularizer_rate*add_loss).backward()
loss = F.nll_loss(output, target)
loss.backward()
elif args.grad_method ==2:
for j in range(len(data)):
for i in range(num_blobs):
add_loss +=(score_funcs.eg_scores_2d(model, data, j, target, 50) * torch.FloatTensor(blobs[blob_idxs[i]]).to(device)).sum()
(regularizer_rate*add_loss).backward()
loss = F.nll_loss(output, target)
loss.backward()
else:
add_loss =torch.zeros(1,)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
pred = output.argmax(dim=1, keepdim=True)
acc = 100.*pred.eq(target.view_as(pred)).sum().item()/len(target)
s.losses_train.append(loss.item())
s.accs_train.append(acc)
s.cd.append(add_loss.item())
def test(args, model, device, dataset_loader, is_test = False):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in dataset_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(dataset_loader.dataset)
if is_test:
s.acc_test = 100. * correct / len(dataset_loader.dataset)
s.loss_test = test_loss
print('\Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(dataset_loader.dataset),
100. * correct / len(dataset_loader.dataset)))
else:
s.losses_dev.append(test_loss)
s.accs_dev.append(100. * correct / len(dataset_loader.dataset))
print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(dataset_loader.dataset),
100. * correct / len(dataset_loader.dataset)))
return test_loss
best_model_weights = None
best_test_loss = 100000
patience = 0
cur_patience = 0
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch, regularizer_rate)
test_loss = test(args, model, device, val_loader)
if test_loss < best_test_loss:
cur_patience = 0
best_test_loss = test_loss
best_model_weights = deepcopy(model.state_dict())
else:
cur_patience +=1
if cur_patience > patience:
break
model.load_state_dict(best_model_weights)
s.dataset= "Color"
test(args, model, device, test_loader, is_test = True)
if args.grad_method ==0:
s.method = "CDEP"
elif args.grad_method ==2:
s.method = "EGradients"
else:
s.method = "Grad"
#s.model_weights = best_model_weights
np.random.seed()
pid = ''.join(["%s" % np.random.randint(0, 9) for num in range(0, 20)])
save(s, pid)
| [
"torch.cuda.is_available",
"sys.path.append",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"params_save.S",
"numpy.random.seed",
"numpy.random.choice",
"torch.Tensor",
"torch.utils.data.TensorDataset",
"score_funcs.cdep",
"score_funcs.eg_scores_2d",
"model.Net",
"torch.device",... | [((485, 513), 'sys.path.append', 'sys.path.append', (['"""../../src"""'], {}), "('../../src')\n", (500, 513), False, 'import sys\n'), ((666, 704), 'os.makedirs', 'os.makedirs', (['model_path'], {'exist_ok': '(True)'}), '(model_path, exist_ok=True)\n', (677, 704), False, 'import os\n'), ((963, 1023), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (986, 1023), False, 'import argparse\n'), ((2445, 2459), 'params_save.S', 'S', (['args.epochs'], {}), '(args.epochs)\n', (2446, 2459), False, 'from params_save import S\n'), ((2665, 2708), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2677, 2708), False, 'import torch\n'), ((3658, 3686), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3675, 3686), False, 'import torch\n'), ((3687, 3720), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3709, 3720), False, 'import torch\n'), ((3721, 3746), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3735, 3746), True, 'import numpy as np\n'), ((3747, 3769), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3758, 3769), False, 'import random\n'), ((3785, 3872), 'torch.utils.data.DataLoader', 'utils.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True,\n **kwargs)\n', (3801, 3872), True, 'import torch.utils.data as utils\n'), ((3890, 3980), 'torch.utils.data.DataLoader', 'utils.DataLoader', (['val_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(True)'}), '(val_dataset, batch_size=args.test_batch_size, shuffle=True,\n **kwargs)\n', (3906, 3980), True, 'import torch.utils.data as utils\n'), ((3996, 4088), 'torch.utils.data.DataLoader', 'utils.DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(True)'}), '(test_dataset, batch_size=args.test_batch_size, shuffle=\n True, **kwargs)\n', (4012, 4088), True, 'import torch.utils.data as utils\n'), ((4105, 4132), 'numpy.zeros', 'np.zeros', (['(28 * 28, 28, 28)'], {}), '((28 * 28, 28, 28))\n', (4113, 4132), True, 'import numpy as np\n'), ((8455, 8471), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (8469, 8471), True, 'import numpy as np\n'), ((828, 866), 'os.makedirs', 'os.makedirs', (['model_path'], {'exist_ok': '(True)'}), '(model_path, exist_ok=True)\n', (839, 866), False, 'import os\n'), ((2492, 2517), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2515, 2517), False, 'import torch\n'), ((2841, 2883), 'os.path.join', 'oj', (['"""../../data/ColorMNIST"""', '"""train_x.npy"""'], {}), "('../../data/ColorMNIST', 'train_x.npy')\n", (2843, 2883), True, 'from os.path import join as oj\n'), ((3392, 3413), 'torch.Tensor', 'torch.Tensor', (['x_numpy'], {}), '(x_numpy)\n', (3404, 3413), False, 'import torch\n'), ((3483, 3522), 'torch.utils.data.TensorDataset', 'utils.TensorDataset', (['x_tensor', 'y_tensor'], {}), '(x_tensor, y_tensor)\n', (3502, 3522), True, 'import torch.utils.data as utils\n'), ((2776, 2794), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (2790, 2794), True, 'import numpy as np\n'), ((3181, 3225), 'os.path.join', 'oj', (['"""../../data/ColorMNIST"""', "(name + '_x.npy')"], {}), "('../../data/ColorMNIST', name + '_x.npy')\n", (3183, 3225), True, 'from os.path import join as oj\n'), ((3332, 3376), 'os.path.join', 'oj', (['"""../../data/ColorMNIST"""', "(name + '_y.npy')"], {}), "('../../data/ColorMNIST', name + '_y.npy')\n", (3334, 3376), True, 'from os.path import join as oj\n'), ((4215, 4220), 'model.Net', 'Net', ([], {}), '()\n', (4218, 4220), False, 'from model import Net\n'), ((4697, 4723), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (4707, 4723), True, 'import torch.nn.functional as F\n'), ((6526, 6541), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6539, 6541), False, 'import torch\n'), ((896, 939), 'os.path.join', 'os.path.join', (['model_path', "(out_name + '.pkl')"], {}), "(model_path, out_name + '.pkl')\n", (908, 939), False, 'import os\n'), ((3429, 3450), 'torch.Tensor', 'torch.Tensor', (['y_numpy'], {}), '(y_numpy)\n', (3441, 3450), False, 'import torch\n'), ((4836, 4885), 'numpy.random.choice', 'np.random.choice', (['(28 * 28)'], {'size': 'num_blobs', 'p': 'prob'}), '(28 * 28, size=num_blobs, p=prob)\n', (4852, 4885), True, 'import numpy as np\n'), ((5995, 6009), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (6006, 6009), False, 'import torch\n'), ((8494, 8517), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (8511, 8517), True, 'import numpy as np\n'), ((4789, 4803), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (4800, 4803), False, 'import torch\n'), ((5001, 5071), 'score_funcs.cdep', 'score_funcs.cdep', (['model', 'data', 'blobs[blob_idxs[i]]'], {'model_type': '"""mnist"""'}), "(model, data, blobs[blob_idxs[i]], model_type='mnist')\n", (5017, 5071), False, 'import score_funcs\n'), ((5435, 5461), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (5445, 5461), True, 'import torch.nn.functional as F\n'), ((6707, 6750), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (6717, 6750), True, 'import torch.nn.functional as F\n'), ((5851, 5877), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (5861, 5877), True, 'import torch.nn.functional as F\n'), ((5286, 5324), 'torch.FloatTensor', 'torch.FloatTensor', (['blobs[blob_idxs[i]]'], {}), '(blobs[blob_idxs[i]])\n', (5303, 5324), False, 'import torch\n'), ((5660, 5712), 'score_funcs.eg_scores_2d', 'score_funcs.eg_scores_2d', (['model', 'data', 'j', 'target', '(50)'], {}), '(model, data, j, target, 50)\n', (5684, 5712), False, 'import score_funcs\n'), ((5715, 5753), 'torch.FloatTensor', 'torch.FloatTensor', (['blobs[blob_idxs[i]]'], {}), '(blobs[blob_idxs[i]])\n', (5732, 5753), False, 'import torch\n')] |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import yfinance as yf
from pandas_datareader import data as pdr
import xlsxwriter
import requests
from yahoo_fin import stock_info as si
import pickle
import bs4 as bs
# You need to change this to a convenient spot on your own hard drive.
my_path = '/Users/shashank/Downloads/Code/Finance'
threshold = 0.80
# You need to go to Yahoo and download a list of the S&P 500 components. Make sure to save it to
# a CSV file with column headers that include "Symbol", "Date" and "Close"
def save_spx_tickers():
resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class':'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.find_all('td') [0].text.strip()
tickers.append(ticker)
with open('spxTickers.pickle', 'wb') as f:
pickle.dump(tickers, f)
return tickers
sp500_tickers = save_spx_tickers()
# Make the ticker symbols readable by Yahoo Finance
sp500_tickers = [item.replace(".", "-") for item in sp500_tickers]
# Upload a list of the S&P 500 components downloaded from Yahoo.
mylist= []
mylist2 = []
df_sp500_tickers = pd.DataFrame(list(zip(sp500_tickers)), columns =['Symbol'])
# This module loops through the S&P 500 tickers, downloads the data from Yahoo and creates a separate CSV
# file of historical data for each ticker (e.g. AAPL.csv).
# Skip this routine if you already have the CSV files available.
'''
for index, ticker in df_sp500_tickers.iterrows():
global df
my_ticker = ticker['Symbol']
yf_ticker = yf.Ticker(my_ticker)
data = yf_ticker.history(period="max")
df = pd.DataFrame(data)
df.reset_index(level=0, inplace=True)
df['Symbol'] = my_ticker
df = df[['Symbol','Date','Close']]
df.drop_duplicates(subset ="Date", keep = 'first', inplace = True) #Yahoo has a tendency to duplicate the last row.
df.to_csv(path_or_buf = my_path + "/data/" + my_ticker +".csv", index=False)
'''
# Creates the dataframe container for the stats data.
df_tradelist = pd.DataFrame(index=[], columns=['my_ticker', 'hold_per', 'pct_uprows', 'max_up_return', 'min_up_return', 'avg_up_return', 'avg_down_return', 'exp_return', 'stdev_returns', 'pct_downside', 'worst_return', 'least_pain_pt', 'total_years', 'max_consec_beat', 'best_buy_date', 'best_sell_date', 'analyzed_years'])
df_tradelist.head()
# Convert prices to holding period returns based on 20 trading days per month.
def convert_prices_to_periods():
global dperiods
global dfr
dfr = df.pct_change(periods = dperiods)
dfr.reset_index(level=0, inplace=True)
dfr.rename(columns={'Close':'Returns'}, inplace=True)
dfr = dfr.round(4)
# Separate out the date column into separate month, year and day values.
def separate_date_column():
global dfr
dfr['Month'] = pd.DatetimeIndex(dfr['Date']).month
dfr['Day'] = pd.DatetimeIndex(dfr['Date']).day
dfr['Year'] = pd.DatetimeIndex(dfr['Date']).year
dfr['M-D'] = dfr['Month'].astype(str)+'-'+dfr['Day'].astype(str)
pd.set_option('display.max_rows', len(dfr))
# Pivot the table to show years across the top and Month-Day values in the first column on the left.
def pivot_the_table():
global dfr_pivot
dfr_pivot = dfr.pivot(index='M-D', columns='Year', values='Returns')
dfr_pivot.reset_index(level=0, inplace=True)
dfr_pivot = pd.DataFrame(dfr_pivot)
dfr_pivot.columns.name="Index"
# The pivot operation created empty cells for weekends and holiday, so I filled them with EOD values from
# the previous trading day.
dfr_pivot.fillna(method='ffill', inplace=True)
# As of this date, 1/22/2020, we are only evaluating results through 12/31/2019, so we will drop the
# 2020 year column.
if 2020 in dfr_pivot.columns:
dfr_pivot.drop(2020, axis=1, inplace=True)
# Add additional calculated columns to facilitate statistic calculations for each stock.
def add_calculated_items():
global dfr_pivot
global lookback
global start
# The lookback figure is the number (must be an integer) of years back from last year (2019) that you want to include in
# analysis, i.e. the calculations below. It's probably a good idea to keep it at 20 years or less
# to reflect more recent market conditions.
lookback = 20
start = 1
if lookback > len(dfr_pivot.columns) - 1:
start = 1
else:
start = len(dfr_pivot.columns) - lookback
dfr_pivot['YearCount'] = dfr_pivot.count(axis=1, numeric_only=True)
dfr_pivot['Lookback'] = lookback
dfr_pivot['UpCount'] = dfr_pivot[dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-2] > 0].count(axis=1)
dfr_pivot['DownCount'] = dfr_pivot[dfr_pivot.iloc[:,start:len(dfr_pivot.columns)] < 0].count(axis=1)
dfr_pivot['PctUp'] = dfr_pivot['UpCount']/dfr_pivot['Lookback']
dfr_pivot['PctDown'] = dfr_pivot['DownCount']/dfr_pivot['Lookback']
dfr_pivot['AvgReturn'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-6].mean(axis=1)
dfr_pivot['StDevReturns'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-7].std(axis=1)
dfr_pivot['67PctDownside'] = dfr_pivot['AvgReturn']-dfr_pivot['StDevReturns']
dfr_pivot['MaxReturn'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-9].max(axis=1)
dfr_pivot['MinReturn'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-10].min(axis=1)
# Add a fictional date column in Python date/time format so the table can be sorted by date. Then sort by Date.
# Reset the index and round the float values to 4 decimals.
def sortbydate_resetindex_export():
global dfr_pivot
dfr_pivot['Date'] = '2000-' + dfr_pivot['M-D'].astype(str)
dfr_pivot['Date'] = pd.to_datetime(dfr_pivot['Date'], infer_datetime_format=True)
dfr_pivot.sort_values(by='Date',ascending=True, inplace=True)
dfr_pivot.reset_index(inplace=True)
dfr_pivot = dfr_pivot.round(4)
# Calculate the trading statistics for the rolling holding periods for the stock.
def calc_trading_stats():
global interval
global dfr_pivot
global pct_uprows
global max_up_return
global min_up_return
global avg_up_return
global avg_down_return
global exp_return
global stdev_returns
global pct_downside
global worst_return
global least_pain_pt
global total_years
global n_consec
global max_n_consec
global max_consec_beat
global best_sell_date
global best_buy_date
global analyzed_years
global lookback
pct_uprows = (dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, 'PctUp'].count() / dfr_pivot.loc[:, 'PctUp'].count()).astype(float).round(4)
max_up_return = dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, 'MaxReturn'].max()
min_up_return = dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, 'MinReturn'].min()
avg_up_return = dfr_pivot.loc[dfr_pivot['PctUp'] > 0.5, 'AvgReturn'].mean()
avg_up_return = np.float64(avg_up_return).round(4)
avg_down_return = dfr_pivot.loc[dfr_pivot['PctDown'] > 0.5, 'AvgReturn'].mean()
avg_down_return = np.float64(avg_down_return).round(4)
exp_return = round(dfr_pivot['AvgReturn'].mean(), 4)
stdev_returns = dfr_pivot['StDevReturns'].mean()
stdev_returns = np.float64(stdev_returns).round(4)
worst_return = dfr_pivot['MinReturn'].min()
pct_downside = exp_return - stdev_returns
pct_downside = np.float64(pct_downside).round(4)
least_pain_pt = dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, '67PctDownside'].max()
total_years = dfr_pivot['YearCount'].max()
analyzed_years = lookback
n_consec = 0
max_n_consec = 0
for x in dfr_pivot['PctUp']:
if (x > threshold):
n_consec += 1
else: # check for new max, then start again from 1
max_n_consec = max(n_consec, max_n_consec)
n_consec = 1
max_consec_beat = max_n_consec
try:
best_sell_date = dfr_pivot.loc[dfr_pivot['67PctDownside'] == least_pain_pt, 'M-D'].iloc[0]
except:
best_sell_date = "nan"
try:
row = dfr_pivot.loc[dfr_pivot['M-D'] == best_sell_date, 'M-D'].index[0] - interval
col = dfr_pivot.columns.get_loc('M-D')
best_buy_date = dfr_pivot.iloc[row,col]
except:
best_buy_date = "nan"
# If the pct_uprows and history conditions are met, then create the array of stat values and append
# it to the recommended trade list.
def filter_and_append_stats():
global statsdata
global df_statsdata
global df_tradelist
# Save the stats data separately to export to Excel for further research on each ticker if desired.
statsdata = np.array([my_ticker, hold_per, pct_uprows, max_up_return, min_up_return, avg_up_return, avg_down_return, exp_return, stdev_returns, pct_downside, worst_return, least_pain_pt, total_years, max_consec_beat, best_buy_date, best_sell_date, analyzed_years])
df_statsdata = pd.DataFrame(statsdata.reshape(-1, len(statsdata)), columns=['my_ticker', 'hold_per', 'pct_uprows', 'max_up_return', 'min_up_return', 'avg_up_return', 'avg_down_return', 'exp_return', 'stdev_returns', 'pct_downside', 'worst_return', 'least_pain_pt', 'total_years', 'max_consec_beat', 'best_buy_date', 'best_sell_date', 'analyzed_years'])
if pct_uprows > 0.1:
if total_years > 9:
df_tradelist = df_tradelist.append(dict(zip(df_tradelist.columns, statsdata)), ignore_index=True)
# This module grabs each ticker file, transforms it and calculates the statistics needed for a 90 day holding period.
def calc_3month_returns():
global dfr
global dfr_pivot
global df_tradelist
global dfr_3mo
global df_statsdata_3mo
global threshold
global hold_per
global dperiods
global interval
dperiods = 60
hold_per = "3 Mos"
interval = 90
convert_prices_to_periods()
separate_date_column()
pivot_the_table()
add_calculated_items()
sortbydate_resetindex_export()
# Export the pivot table to CSV for further research if desired.
#dfr_pivot.to_csv(path_or_buf = my_path + "/data/" + my_ticker + "_dfr_pivot_3mo.csv", index=False)
# Save dfr_pivot to separate dataframe for exporting to Excel
dfr_3mo = pd.DataFrame(dfr_pivot)
calc_trading_stats()
filter_and_append_stats()
# Save statsdata to separate dataframe for exporting to Excel
df_statsdata_3mo = df_statsdata.copy()
# This module grabs each ticker file, transforms it and calculates the statistics needed for a 60 day holding period.
def calc_2month_returns():
global dfr
global dfr_pivot
global df_tradelist
global dfr_2mo
global df_statsdata_2mo
global threshold
global hold_per
global dperiods
global interval
dperiods = 40
hold_per = "2 Mos"
interval = 60
convert_prices_to_periods()
separate_date_column()
pivot_the_table()
add_calculated_items()
sortbydate_resetindex_export()
# Export the pivot table to CSV for further research if desired.
#dfr_pivot.to_csv(path_or_buf = my_path + "/data/" + my_ticker + "_dfr_pivot_2mo.csv", index=False)
# Save dfr_pivot to separate dataframe for exporting to Excel
dfr_2mo = pd.DataFrame(dfr_pivot)
calc_trading_stats()
filter_and_append_stats()
# Save statsdata to separate dataframe for exporting to Excel
df_statsdata_2mo = df_statsdata.copy()
# This module grabs each ticker file, transforms it and calculates the statistics needed for a 30 day holding period.
def calc_1month_returns():
global dfr
global dfr_pivot
global df_tradelist
global dfr_1mo
global df_statsdata_1mo
global threshold
global hold_per
global dperiods
global interval
dperiods = 20
hold_per = "1 Mo"
interval = 30
convert_prices_to_periods()
separate_date_column()
pivot_the_table()
add_calculated_items()
sortbydate_resetindex_export()
# Export the pivot table to CSV for further research if desired.
#dfr_pivot.to_csv(path_or_buf = my_path + "/data/" + my_ticker + "_dfr_pivot_1mo.csv", index=False)
# Save dfr_pivot to separate dataframe for exporting to Excel
dfr_1mo = pd.DataFrame(dfr_pivot)
calc_trading_stats()
filter_and_append_stats()
# Save statsdata to separate dataframe for exporting to Excel
df_statsdata_1mo = df_statsdata.copy()
# Build and export an Excel file for each ticker using XlsxWriter
def export_to_excel():
excel_file_path = my_path + "/data/" + my_ticker + ".xlsx"
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(excel_file_path, engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df_statsdata_1mo.to_excel(writer, sheet_name='Stats', index=False)
df_statsdata_2mo.to_excel(writer, sheet_name='Stats', startrow=2, header=False, index=False)
df_statsdata_3mo.to_excel(writer, sheet_name='Stats', startrow=3, header=False, index=False)
dfr_1mo.to_excel(writer, sheet_name='1 Mo Returns', index=False)
dfr_2mo.to_excel(writer, sheet_name='2 Mo Returns', index=False)
dfr_3mo.to_excel(writer, sheet_name='3 Mo Returns', index=False)
# Get the xlsxwriter objects from the dataframe writer object.
workbook = writer.book
worksheet1 = writer.sheets['Stats']
worksheet2 = writer.sheets['1 Mo Returns']
worksheet3 = writer.sheets['2 Mo Returns']
worksheet4 = writer.sheets['3 Mo Returns']
# Add conditional formatting to highlight positive returns in green
end_column = dfr_1mo.columns.get_loc("YearCount")
grn_format = workbook.add_format({'bg_color': '#C6EFCE','font_color': '#006100'})
worksheet2.conditional_format(1, 2, 365, end_column - 1,{'type':'cell','criteria':'>','value':0,'format':grn_format})
worksheet3.conditional_format(1, 2, 365, end_column - 1,{'type':'cell','criteria':'>','value':0,'format':grn_format})
worksheet4.conditional_format(1, 2, 365, end_column - 1,{'type':'cell','criteria':'>','value':0,'format':grn_format})
# Freeze panes for scrolling
worksheet2.freeze_panes(1, 2)
worksheet3.freeze_panes(1, 2)
worksheet4.freeze_panes(1, 2)
# Save the file
writer.save()
# Read CSV files by ticker, transform and extract stats from each one.
for index, ticker in df_sp500_tickers.iterrows():
global dfr
my_ticker = ticker['Symbol']
df = pd.read_csv (my_path + "/data/" + my_ticker + ".csv")
df.set_index('Date', inplace=True)
df = df['Close']
df = pd.DataFrame(df, columns=['Close'])
calc_1month_returns()
calc_2month_returns()
calc_3month_returns()
export_to_excel()
# Make a copy and convert the trade list to a Pandas dataframe.
df_tradelist_copy = df_tradelist.copy()
df_tradelist = pd.DataFrame(df_tradelist)
#df_tradelist.to_csv(path_or_buf = my_path + "/df_tradelist.csv", index=False)
#df_tradelist_copy.to_csv(path_or_buf = my_path + "/df_tradelist_copy.csv", index=False)
# Clean it up by removing rows with NaN's and infinity values and dropping duplicates.
df_tradelist.replace("inf", np.nan, inplace=True)
df_tradelist.dropna(inplace=True)
df_tradelist = df_tradelist[~df_tradelist.max_up_return.str.contains("nan")]
df_tradelist = df_tradelist[~df_tradelist.avg_down_return.str.contains("nan")]
df_tradelist.sort_values(by=['pct_uprows'], ascending=False)
df_tradelist.drop_duplicates(subset ="my_ticker", keep = 'first', inplace = True)
df_tradelist.tail(10)
df_tradelist.head()
#df_tradelist.shape
# Export the trade list to CSV files for execution and/or further research if desired.
df_tradelist.to_csv(path_or_buf = my_path + "/df_tradelist.csv", index=False) | [
"pickle.dump",
"pandas.read_csv",
"pandas.DatetimeIndex",
"numpy.float64",
"requests.get",
"bs4.BeautifulSoup",
"numpy.array",
"pandas.DataFrame",
"pandas.ExcelWriter",
"pandas.to_datetime"
] | [((2223, 2547), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[]', 'columns': "['my_ticker', 'hold_per', 'pct_uprows', 'max_up_return', 'min_up_return',\n 'avg_up_return', 'avg_down_return', 'exp_return', 'stdev_returns',\n 'pct_downside', 'worst_return', 'least_pain_pt', 'total_years',\n 'max_consec_beat', 'best_buy_date', 'best_sell_date', 'analyzed_years']"}), "(index=[], columns=['my_ticker', 'hold_per', 'pct_uprows',\n 'max_up_return', 'min_up_return', 'avg_up_return', 'avg_down_return',\n 'exp_return', 'stdev_returns', 'pct_downside', 'worst_return',\n 'least_pain_pt', 'total_years', 'max_consec_beat', 'best_buy_date',\n 'best_sell_date', 'analyzed_years'])\n", (2235, 2547), True, 'import pandas as pd\n'), ((15195, 15221), 'pandas.DataFrame', 'pd.DataFrame', (['df_tradelist'], {}), '(df_tradelist)\n', (15207, 15221), True, 'import pandas as pd\n'), ((611, 684), 'requests.get', 'requests.get', (['"""https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"""'], {}), "('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n", (623, 684), False, 'import requests\n'), ((696, 731), 'bs4.BeautifulSoup', 'bs.BeautifulSoup', (['resp.text', '"""lxml"""'], {}), "(resp.text, 'lxml')\n", (712, 731), True, 'import bs4 as bs\n'), ((3579, 3602), 'pandas.DataFrame', 'pd.DataFrame', (['dfr_pivot'], {}), '(dfr_pivot)\n', (3591, 3602), True, 'import pandas as pd\n'), ((5916, 5977), 'pandas.to_datetime', 'pd.to_datetime', (["dfr_pivot['Date']"], {'infer_datetime_format': '(True)'}), "(dfr_pivot['Date'], infer_datetime_format=True)\n", (5930, 5977), True, 'import pandas as pd\n'), ((8855, 9119), 'numpy.array', 'np.array', (['[my_ticker, hold_per, pct_uprows, max_up_return, min_up_return,\n avg_up_return, avg_down_return, exp_return, stdev_returns, pct_downside,\n worst_return, least_pain_pt, total_years, max_consec_beat,\n best_buy_date, best_sell_date, analyzed_years]'], {}), '([my_ticker, hold_per, pct_uprows, max_up_return, min_up_return,\n avg_up_return, avg_down_return, exp_return, stdev_returns, pct_downside,\n worst_return, least_pain_pt, total_years, max_consec_beat,\n best_buy_date, best_sell_date, analyzed_years])\n', (8863, 9119), True, 'import numpy as np\n'), ((10467, 10490), 'pandas.DataFrame', 'pd.DataFrame', (['dfr_pivot'], {}), '(dfr_pivot)\n', (10479, 10490), True, 'import pandas as pd\n'), ((11488, 11511), 'pandas.DataFrame', 'pd.DataFrame', (['dfr_pivot'], {}), '(dfr_pivot)\n', (11500, 11511), True, 'import pandas as pd\n'), ((12522, 12545), 'pandas.DataFrame', 'pd.DataFrame', (['dfr_pivot'], {}), '(dfr_pivot)\n', (12534, 12545), True, 'import pandas as pd\n'), ((12981, 13033), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['excel_file_path'], {'engine': '"""xlsxwriter"""'}), "(excel_file_path, engine='xlsxwriter')\n", (12995, 13033), True, 'import pandas as pd\n'), ((14796, 14848), 'pandas.read_csv', 'pd.read_csv', (["(my_path + '/data/' + my_ticker + '.csv')"], {}), "(my_path + '/data/' + my_ticker + '.csv')\n", (14807, 14848), True, 'import pandas as pd\n'), ((14919, 14954), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'columns': "['Close']"}), "(df, columns=['Close'])\n", (14931, 14954), True, 'import pandas as pd\n'), ((1004, 1027), 'pickle.dump', 'pickle.dump', (['tickers', 'f'], {}), '(tickers, f)\n', (1015, 1027), False, 'import pickle\n'), ((3028, 3057), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["dfr['Date']"], {}), "(dfr['Date'])\n", (3044, 3057), True, 'import pandas as pd\n'), ((3081, 3110), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["dfr['Date']"], {}), "(dfr['Date'])\n", (3097, 3110), True, 'import pandas as pd\n'), ((3133, 3162), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["dfr['Date']"], {}), "(dfr['Date'])\n", (3149, 3162), True, 'import pandas as pd\n'), ((7134, 7159), 'numpy.float64', 'np.float64', (['avg_up_return'], {}), '(avg_up_return)\n', (7144, 7159), True, 'import numpy as np\n'), ((7275, 7302), 'numpy.float64', 'np.float64', (['avg_down_return'], {}), '(avg_down_return)\n', (7285, 7302), True, 'import numpy as np\n'), ((7442, 7467), 'numpy.float64', 'np.float64', (['stdev_returns'], {}), '(stdev_returns)\n', (7452, 7467), True, 'import numpy as np\n'), ((7590, 7614), 'numpy.float64', 'np.float64', (['pct_downside'], {}), '(pct_downside)\n', (7600, 7614), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets.mnist import load_data as load_mnist
def create_pairs(x, digit_indices, num_classes, **kwargs):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
n = kwargs.get('samples_per_class')
if n is None:
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = np.random.randint(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_sets(data, num_classes, **kwargs):
# create training+test positive and negative pairs
(x_train, y_train), (x_test, y_test) = data['train'], data['test']
digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
tr_pairs, tr_y = create_pairs(x_train, digit_indices, 10, **kwargs)
digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]
te_pairs, te_y = create_pairs(x_test, digit_indices, 10, **kwargs)
return (tr_pairs, tr_y), (te_pairs, te_y)
def generate(x, y, batch_size, repetitions):
n_samples = y.shape[0]
n_batches = n_samples // batch_size
n_batches += 1 if (n_batches * batch_size) < n_samples else 0
for _ in range(repetitions):
for batch_idx in range(n_batches):
batch_start = batch_idx * batch_size
batch_end = (1 + batch_idx) * batch_size
x_batch = [x[batch_start:batch_end, 0, ...],
x[batch_start:batch_end, 1, ...]]
y_batch = y[batch_start:batch_end]
yield x_batch, y_batch
def load(n_classes, input_shape, **kwargs):
"""Load mnist data."""
# input_shape = kwargs.get('input_shape', (28, 28, 1))
# n_classes = kwargs.get('n_classes', 0)
print(f"Loading mnist with: shape={input_shape}, n_classes={n_classes}")
(x_train, y_train), (x_test, y_test) = load_mnist()
# Scale data
x_train = x_train.astype(np.float64) / 255.
x_test = x_test.astype(np.float64) / 255.
# Reshape data
input_shape = (-1,) + input_shape
x_train = x_train.reshape(*input_shape)
x_test = x_test.reshape(*input_shape)
data = dict(train=(x_train, y_train), test=(x_test, y_test))
return create_sets(data, n_classes, **kwargs)
| [
"numpy.where",
"numpy.array",
"numpy.random.randint",
"tensorflow.keras.datasets.mnist.load_data"
] | [((2271, 2283), 'tensorflow.keras.datasets.mnist.load_data', 'load_mnist', ([], {}), '()\n', (2281, 2283), True, 'from tensorflow.keras.datasets.mnist import load_data as load_mnist\n'), ((865, 880), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (873, 880), True, 'import numpy as np\n'), ((882, 898), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (890, 898), True, 'import numpy as np\n'), ((649, 682), 'numpy.random.randint', 'np.random.randint', (['(1)', 'num_classes'], {}), '(1, num_classes)\n', (666, 682), True, 'import numpy as np\n'), ((1094, 1116), 'numpy.where', 'np.where', (['(y_train == i)'], {}), '(y_train == i)\n', (1102, 1116), True, 'import numpy as np\n'), ((1243, 1264), 'numpy.where', 'np.where', (['(y_test == i)'], {}), '(y_test == i)\n', (1251, 1264), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
""" spectrogram.py: Utilities for dealing with spectrograms
"""
from scipy import signal
import numpy as np
from opensoundscape.audio import Audio
from opensoundscape.helpers import min_max_scale, linear_scale
import warnings
import pickle
class Spectrogram:
""" Immutable spectrogram container
"""
__slots__ = ("frequencies", "times", "spectrogram", "decibel_limits")
def __init__(self, spectrogram, frequencies, times):
if not isinstance(spectrogram, np.ndarray):
raise TypeError(
f"Spectrogram.spectrogram should be a np.ndarray [shape=(n, m)]. Got {spectrogram.__class__}"
)
if not isinstance(frequencies, np.ndarray):
raise TypeError(
f"Spectrogram.frequencies should be an np.ndarray [shape=(n,)]. Got {frequencies.__class__}"
)
if not isinstance(times, np.ndarray):
raise TypeError(
f"Spectrogram.times should be an np.ndarray [shape=(m,)]. Got {times.__class__}"
)
if spectrogram.ndim != 2:
raise TypeError(
f"spectrogram should be a np.ndarray [shape=(n, m)]. Got {spectrogram.shape}"
)
if frequencies.ndim != 1:
raise TypeError(
f"frequencies should be an np.ndarray [shape=(n,)]. Got {frequencies.shape}"
)
if times.ndim != 1:
raise TypeError(
f"times should be an np.ndarray [shape=(m,)]. Got {times.shape}"
)
if spectrogram.shape != (frequencies.shape[0], times.shape[0]):
raise TypeError(
f"Dimension mismatch, spectrogram.shape: {spectrogram.shape}, frequencies.shape: {frequencies.shape}, times.shape: {times.shape}"
)
super(Spectrogram, self).__setattr__("frequencies", frequencies)
super(Spectrogram, self).__setattr__("times", times)
super(Spectrogram, self).__setattr__("spectrogram", spectrogram)
super(Spectrogram, self).__setattr__("decibel_limits", (-100, -20))
@classmethod
def from_audio(
cls,
audio,
window_type="hann",
window_samples=512,
overlap_samples=256,
decibel_limits=(-100, -20),
):
"""
create a Spectrogram object from an Audio object
Args:
window_type="hann": see scipy.signal.spectrogram docs for description of window parameter
window_samples=512: number of audio samples per spectrogram window (pixel)
overlap_samples=256: number of samples shared by consecutive windows
decibel_limits = (-100,-20) : limit the dB values to (min,max) (lower values set to min, higher values set to max)
Returns:
opensoundscape.spectrogram.Spectrogram object
"""
if not isinstance(audio, Audio):
raise TypeError("Class method expects Audio class as input")
frequencies, times, spectrogram = signal.spectrogram(
audio.samples,
audio.sample_rate,
window=window_type,
nperseg=window_samples,
noverlap=overlap_samples,
scaling="spectrum",
)
# convert to decibels
# -> avoid RuntimeWarning by setting negative values to -np.inf (mapped to min_db later)
spectrogram = 10 * np.log10(
spectrogram, where=spectrogram > 0, out=np.full(spectrogram.shape, -np.inf)
)
# limit the decibel range (-100 to -20 dB by default)
# values below lower limit set to lower limit, values above upper limit set to uper limit
min_db, max_db = decibel_limits
spectrogram[spectrogram > max_db] = max_db
spectrogram[spectrogram < min_db] = min_db
new_obj = cls(spectrogram, frequencies, times)
super(Spectrogram, new_obj).__setattr__("decibel_limits", decibel_limits)
return new_obj
@classmethod
def from_file(file,):
"""
create a Spectrogram object from a file
Args:
file: path of image to load
Returns:
opensoundscape.spectrogram.Spectrogram object
"""
raise NotImplementedError(
"Loading Spectrograms from images is not implemented yet"
)
def __setattr__(self, name, value):
raise AttributeError("Spectrogram's cannot be modified")
def __repr__(self):
return f"<Spectrogram(spectrogram={self.spectrogram.shape}, frequencies={self.frequencies.shape}, times={self.times.shape})>"
def min_max_scale(self, feature_range=(0, 1)):
"""
Linearly rescale spectrogram values to a range of values using
in_range as minimum and maximum
Args:
feature_range: tuple of (low,high) values for output
Returns:
Spectrogram object with values rescaled to feature_range
"""
if len(feature_range) != 2:
raise AttributeError(
"Error: `feature_range` doesn't look like a 2-element tuple?"
)
if feature_range[1] < feature_range[0]:
raise AttributeError("Error: `feature_range` isn't increasing?")
return Spectrogram(
min_max_scale(self.spectrogram, feature_range=feature_range),
self.frequencies,
self.times,
)
def linear_scale(self, feature_range=(0, 1)):
"""
Linearly rescale spectrogram values to a range of values
using in_range as decibel_limits
Args:
feature_range: tuple of (low,high) values for output
Returns:
Spectrogram object with values rescaled to feature_range
"""
if len(feature_range) != 2:
raise AttributeError(
"Error: `feature_range` doesn't look like a 2-element tuple?"
)
if feature_range[1] < feature_range[0]:
raise AttributeError("Error: `feature_range` isn't increasing?")
return Spectrogram(
linear_scale(
self.spectrogram, in_range=self.decibel_limits, out_range=feature_range
),
self.frequencies,
self.times,
)
def limit_db_range(self, min_db=-100, max_db=-20):
""" Limit the decibel values of the spectrogram to range from min_db to max_db
values less than min_db are set to min_db
values greater than max_db are set to max_db
similar to Audacity's gain and range parameters
Args:
min_db: values lower than this are set to this
max_db: values higher than this are set to this
Returns:
Spectrogram object with db range applied
"""
_spec = self.spectrogram
_spec[_spec > max_db] = max_db
_spec[_spec < min_db] = min_db
return Spectrogram(_spec, self.frequencies, self.times)
def bandpass(self, min_f, max_f):
""" extract a frequency band from a spectrogram
crops the 2-d array of the spectrograms to the desired frequency range
Args:
min_f: low frequency in Hz for bandpass
high_f: high frequency in Hz for bandpass
Returns:
bandpassed spectrogram object
"""
# find indices of the frequencies in spec_freq closest to min_f and max_f
lowest_index = np.abs(self.frequencies - min_f).argmin()
highest_index = np.abs(self.frequencies - max_f).argmin()
# take slices of the spectrogram and spec_freq that fall within desired range
return Spectrogram(
self.spectrogram[lowest_index:highest_index, :],
self.frequencies[lowest_index:highest_index],
self.times,
)
def trim(self, start_time, end_time):
""" extract a time segment from a spectrogram
Args:
start_time: in seconds
end_time: in seconds
Returns:
spectrogram object from extracted time segment
"""
# find indices of the times in self.times closest to min_t and max_t
lowest_index = np.abs(self.times - start_time).argmin()
highest_index = np.abs(self.times - end_time).argmin()
# take slices of the spectrogram and spec_freq that fall within desired range
return Spectrogram(
self.spectrogram[:, lowest_index:highest_index],
self.frequencies,
self.times[lowest_index:highest_index],
)
def plot(self, inline=True, fname=None, show_colorbar=False):
"""Plot the spectrogram with matplotlib.pyplot
Args:
inline=True:
fname=None: specify a string path to save the plot to (ending in .png/.pdf)
show_colorbar: include image legend colorbar from pyplot
"""
from matplotlib import pyplot as plt
plt.pcolormesh(self.times, self.frequencies, self.spectrogram, shading="auto")
plt.xlabel("time (sec)")
plt.ylabel("frequency (Hz)")
if show_colorbar:
plt.colorbar()
# if fname is not None, save to file path fname
if fname:
plt.savefig(fname)
# if not saving to file, check if a matplotlib backend is available
if inline:
import os
if os.environ.get("MPLBACKEND") is None:
warnings.warn("MPLBACKEND is 'None' in os.environ. Skipping plot.")
else:
plt.show()
def amplitude(self, freq_range=None):
"""create an amplitude vs time signal from spectrogram
by summing pixels in the vertical dimension
Args
freq_range=None: sum Spectrogrm only in this range of [low, high] frequencies in Hz
(if None, all frequencies are summed)
Returns:
a time-series array of the vertical sum of spectrogram value
"""
if freq_range is None:
return np.sum(self.spectrogram, 0)
else:
return np.sum(self.bandpass(freq_range[0], freq_range[1]).spectrogram, 0)
def net_amplitude(
self, signal_band, reject_bands=None
): # used to be called "net_power_signal" which is misleading (not power)
"""create amplitude signal in signal_band and subtract amplitude from reject_bands
rescale the signal and reject bands by dividing by their bandwidths in Hz
(amplitude of each reject_band is divided by the total bandwidth of all reject_bands.
amplitude of signal_band is divided by badwidth of signal_band. )
Args:
signal_band: [low,high] frequency range in Hz (positive contribution)
reject band: list of [low,high] frequency ranges in Hz (negative contribution)
return: time-series array of net amplitude """
# find the amplitude signal for the desired frequency band
signal_band_amplitude = self.amplitude(signal_band)
signal_band_bandwidth = signal_band[1] - signal_band[0]
# rescale amplitude by 1 / size of frequency band in Hz ("amplitude per unit Hz" ~= color on a spectrogram)
net_amplitude = signal_band_amplitude / signal_band_bandwidth
# then subtract the energy in the the reject_bands from the signal_band_amplitude to get net_amplitude
if not (reject_bands is None):
# we sum up the sizes of the rejection bands (to not overweight signal_band)
reject_bands = np.array(reject_bands)
reject_bands_total_bandwidth = sum(reject_bands[:, 1] - reject_bands[:, 0])
# subtract reject_band_amplitude
for reject_band in reject_bands:
reject_band_amplitude = self.amplitude(reject_band)
net_amplitude = net_amplitude - (
reject_band_amplitude / reject_bands_total_bandwidth
)
# negative signal shouldn't be kept, because it means reject was stronger than signal. Zero it:
net_amplitude = [max(0, s) for s in net_amplitude]
return net_amplitude
# def save(self,destination):
# with open(destination,'wb') as file:
# pickle.dump(self,file)
def to_image(self, shape=None, mode="RGB", spec_range=[-100, -20]):
"""
create a Pillow Image from spectrogram
linearly rescales values from db_range (default [-100, -20]) to [255,0]
(ie, -20 db is loudest -> black, -100 db is quietest -> white)
Args:
destination: a file path (string)
shape=None: tuple of image dimensions, eg (224,224)
mode="RGB": RGB for 3-channel color or "L" for 1-channel grayscale
spec_range=[-100,-20]: the lowest and highest possible values in the spectrogram
Returns:
Pillow Image object
"""
from PIL import Image
# rescale spec_range to [255, 0]
array = linear_scale(self.spectrogram, in_range=spec_range, out_range=(255, 0))
# create and save pillow Image
# we pass the array upside-down to create right-side-up image
image = Image.fromarray(array[::-1, :])
image = image.convert(mode)
if shape is not None:
image = image.resize(shape)
return image
| [
"numpy.abs",
"PIL.Image.fromarray",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"scipy.signal.spectrogram",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"os.environ.get",
"matplotlib.pyplot.pcolormesh",
"numpy.sum",
"opensoundscape.helpers.linear_scale",
"numpy.array"... | [((3021, 3167), 'scipy.signal.spectrogram', 'signal.spectrogram', (['audio.samples', 'audio.sample_rate'], {'window': 'window_type', 'nperseg': 'window_samples', 'noverlap': 'overlap_samples', 'scaling': '"""spectrum"""'}), "(audio.samples, audio.sample_rate, window=window_type,\n nperseg=window_samples, noverlap=overlap_samples, scaling='spectrum')\n", (3039, 3167), False, 'from scipy import signal\n'), ((8979, 9057), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['self.times', 'self.frequencies', 'self.spectrogram'], {'shading': '"""auto"""'}), "(self.times, self.frequencies, self.spectrogram, shading='auto')\n", (8993, 9057), True, 'from matplotlib import pyplot as plt\n'), ((9066, 9090), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (9076, 9090), True, 'from matplotlib import pyplot as plt\n'), ((9099, 9127), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency (Hz)"""'], {}), "('frequency (Hz)')\n", (9109, 9127), True, 'from matplotlib import pyplot as plt\n'), ((13044, 13115), 'opensoundscape.helpers.linear_scale', 'linear_scale', (['self.spectrogram'], {'in_range': 'spec_range', 'out_range': '(255, 0)'}), '(self.spectrogram, in_range=spec_range, out_range=(255, 0))\n', (13056, 13115), False, 'from opensoundscape.helpers import min_max_scale, linear_scale\n'), ((13242, 13273), 'PIL.Image.fromarray', 'Image.fromarray', (['array[::-1, :]'], {}), '(array[::-1, :])\n', (13257, 13273), False, 'from PIL import Image\n'), ((5286, 5346), 'opensoundscape.helpers.min_max_scale', 'min_max_scale', (['self.spectrogram'], {'feature_range': 'feature_range'}), '(self.spectrogram, feature_range=feature_range)\n', (5299, 5346), False, 'from opensoundscape.helpers import min_max_scale, linear_scale\n'), ((6090, 6180), 'opensoundscape.helpers.linear_scale', 'linear_scale', (['self.spectrogram'], {'in_range': 'self.decibel_limits', 'out_range': 'feature_range'}), '(self.spectrogram, in_range=self.decibel_limits, out_range=\n feature_range)\n', (6102, 6180), False, 'from opensoundscape.helpers import min_max_scale, linear_scale\n'), ((9166, 9180), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9178, 9180), True, 'from matplotlib import pyplot as plt\n'), ((9268, 9286), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (9279, 9286), True, 'from matplotlib import pyplot as plt\n'), ((10061, 10088), 'numpy.sum', 'np.sum', (['self.spectrogram', '(0)'], {}), '(self.spectrogram, 0)\n', (10067, 10088), True, 'import numpy as np\n'), ((11570, 11592), 'numpy.array', 'np.array', (['reject_bands'], {}), '(reject_bands)\n', (11578, 11592), True, 'import numpy as np\n'), ((7475, 7507), 'numpy.abs', 'np.abs', (['(self.frequencies - min_f)'], {}), '(self.frequencies - min_f)\n', (7481, 7507), True, 'import numpy as np\n'), ((7541, 7573), 'numpy.abs', 'np.abs', (['(self.frequencies - max_f)'], {}), '(self.frequencies - max_f)\n', (7547, 7573), True, 'import numpy as np\n'), ((8222, 8253), 'numpy.abs', 'np.abs', (['(self.times - start_time)'], {}), '(self.times - start_time)\n', (8228, 8253), True, 'import numpy as np\n'), ((8287, 8316), 'numpy.abs', 'np.abs', (['(self.times - end_time)'], {}), '(self.times - end_time)\n', (8293, 8316), True, 'import numpy as np\n'), ((9421, 9449), 'os.environ.get', 'os.environ.get', (['"""MPLBACKEND"""'], {}), "('MPLBACKEND')\n", (9435, 9449), False, 'import os\n'), ((9475, 9542), 'warnings.warn', 'warnings.warn', (['"""MPLBACKEND is \'None\' in os.environ. Skipping plot."""'], {}), '("MPLBACKEND is \'None\' in os.environ. Skipping plot.")\n', (9488, 9542), False, 'import warnings\n'), ((9577, 9587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9585, 9587), True, 'from matplotlib import pyplot as plt\n'), ((3464, 3499), 'numpy.full', 'np.full', (['spectrogram.shape', '(-np.inf)'], {}), '(spectrogram.shape, -np.inf)\n', (3471, 3499), True, 'import numpy as np\n')] |
#!python -m unittest tests.test_processing
import numba
import numpy as np
import pandas as pd
import tqdm
import h5py
import random
import statsmodels.stats.multitest
import urllib.request, json
import os
import socket
import re
import Bio.PDB.MMCIF2Dict
from itertools import groupby
import unittest
from scipy.spatial.transform import Rotation as R
from Bio import PDB
from structuremap.processing import download_alphafold_cif, \
download_alphafold_pae, \
format_alphafold_data, \
get_3d_dist, \
rotate_vector_around_axis, \
get_angle, \
get_paired_error, \
get_neighbors, \
annotate_accessibility, \
smooth_score, \
get_smooth_score, \
get_avg_3d_dist, \
get_avg_1d_dist, \
find_idr_pattern, \
annotate_proteins_with_idr_pattern, \
extend_flexible_pattern, \
get_extended_flexible_pattern, \
get_mod_ptm_fraction
THIS_FOLDER = os.path.dirname(__file__)
TEST_FOLDER = os.path.join(
f"{os.path.dirname(THIS_FOLDER)}",
"data",
"test_files",
)
class TestProcessing(unittest.TestCase):
def test_download_alphafold_cif(self, ):
valid, invalid, existing = download_alphafold_cif(
proteins=['O15552','Q5VSL9','Q7Z6M3','O15552yy'],
out_folder=TEST_FOLDER)
np.testing.assert_equal(valid, np.array(['Q5VSL9']))
np.testing.assert_equal(invalid, np.array(['O15552yy']))
np.testing.assert_equal(existing, np.array(['O15552','Q7Z6M3']))
os.remove(
os.path.join(
TEST_FOLDER,
'Q5VSL9.cif'
)
)
def test_download_alphafold_pae(self, ):
valid, invalid, existing = download_alphafold_pae(
proteins=['O15552','Q5VSL9','Q7Z6M3','O15552yy'],
out_folder=TEST_FOLDER)
np.testing.assert_equal(valid, np.array(['Q5VSL9']))
np.testing.assert_equal(invalid, np.array(['O15552yy']))
np.testing.assert_equal(existing, np.array(['O15552','Q7Z6M3']))
os.remove(
os.path.join(
TEST_FOLDER,
'pae_Q5VSL9.hdf'
)
)
def test_format_alphafold_data(self, ):
alphafold_formatted = format_alphafold_data(
directory=TEST_FOLDER, protein_ids=["Q7Z6M3","O15552"])
alphafold_formatted_ini = pd.read_csv(
os.path.join(
TEST_FOLDER,
'test_alphafold_annotation.csv'
)
)
pd.testing.assert_frame_equal(alphafold_formatted, alphafold_formatted_ini, check_dtype=False)
def test_get_3d_dist(self, ):
x = np.array([1.1,1.1,1.1,1.1,5.1])
y = np.array([1.1,2.1,3.1,1.1,10.1])
z = np.array([1.1,3.1,5.1,1.1,4.1])
coordinate_array = np.vstack([x,y,z]).T
np.testing.assert_equal(2.236068, np.round(get_3d_dist(coordinate_array, coordinate_array, 0, 1), decimals=6))
np.testing.assert_equal(4.472136, np.round(get_3d_dist(coordinate_array, coordinate_array, 0, 2), decimals=6))
np.testing.assert_equal(4.472136, np.round(get_3d_dist(coordinate_array, coordinate_array, 2, 0), decimals=6))
def rotate_vector_around_axis_scipy(self, vector, axis, theta):
theta = np.radians(theta)
axis_norm = axis / np.linalg.norm(axis)
r = R.from_rotvec(theta * axis_norm)
return(r.apply(vector))
def test_rotate_vector_around_axis(self, ):
v = np.array([3.0, 5.0, 0.0])
a = np.array([4.0, 4.0, 1.0])
t = 90
res_real = rotate_vector_around_axis(v, a, t)
res_scipy = self.rotate_vector_around_axis_scipy(v, a, t)
np.testing.assert_almost_equal(res_real, res_scipy, decimal=10)
def test_get_angle(self, ):
x_a = np.array([1.1,1.1,1.1])
y_a = np.array([1.1,2.1,-3.1])
z_a = np.array([1.1,3.1,5.1])
x_b = np.array([1.5,np.nan,1.5])
y_b = np.array([1.5,2.5,3.5])
z_b = np.array([1.5,3.5,5.5])
x_c = np.array([1.5,1.5,10.6])
y_c = np.array([1.5,2.5,11.6])
z_c = np.array([1.5,3.5,5.6])
x_n = np.array([4.5,1.8,1.5])
y_n = np.array([40.5,7.8,3.5])
z_n = np.array([3.5,3.8,5.5])
coordinate_array_a = np.vstack([x_a,y_a,z_a]).T
coordinate_array_b = np.vstack([x_b,y_b,z_b]).T
coordinate_array_c = np.vstack([x_c,y_c,z_c]).T
coordinate_array_n = np.vstack([x_n,y_n,z_n]).T
np.testing.assert_equal(39.231520,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
0, 1), decimals=6))
np.testing.assert_equal(91.140756,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
0, 2), decimals=6))
np.testing.assert_equal(47.168228,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
2, 0), decimals=6))
# test gly
np.testing.assert_equal(93.985035,
np.round(get_angle(coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
1, 2), decimals=6))
def test_get_paired_error(self, ):
pos = np.array([1,2,3])
error = np.array([[0,2,10],[1,0,5],[10,4,0]])
np.testing.assert_equal(2, get_paired_error(pos, error, 0,1))
np.testing.assert_equal(0, get_paired_error(pos, error, 2,2))
pos = np.array([1,3])
np.testing.assert_equal(10, get_paired_error(pos, error, 0,1))
def test_get_neighbors(self, ):
idxl = np.array([0,1,2])
x_a = np.array([1.1,1.1,1.1])
y_a = np.array([1.1,2.1,-3.1])
z_a = np.array([1.1,3.1,5.1])
x_b = np.array([1.5,np.nan,1.5])
y_b = np.array([1.5,2.5,3.5])
z_b = np.array([1.5,3.5,5.5])
x_c = np.array([1.5,1.5,10.6])
y_c = np.array([1.5,2.5,11.6])
z_c = np.array([1.5,3.5,5.6])
x_n = np.array([4.5,1.8,1.5])
y_n = np.array([40.5,7.8,3.5])
z_n = np.array([3.5,3.8,5.5])
coordinate_array_a = np.vstack([x_a,y_a,z_a]).T
coordinate_array_b = np.vstack([x_b,y_b,z_b]).T
coordinate_array_c = np.vstack([x_c,y_c,z_c]).T
coordinate_array_n = np.vstack([x_n,y_n,z_n]).T
pos=np.array([1,2,3])
error = np.array([[0,2,10],[1,0,5],[10,4,0]])
np.testing.assert_equal(np.array([1, 0, 0]),
get_neighbors(idxl, coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
pos, error, 5, 40))
np.testing.assert_equal(np.array([1, 1, 0]),
get_neighbors(idxl, coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
pos, error, 5, 150))
np.testing.assert_equal(np.array([2, 2, 2]),
get_neighbors(idxl, coordinate_array_a, coordinate_array_b,
coordinate_array_c, coordinate_array_n,
pos, error, 50, 140))
def test_annotate_accessibility(self, ):
radius = 12.0
alphafold_annotation = pd.read_csv(
os.path.join(
TEST_FOLDER,
'test_alphafold_annotation.csv'
)
)
res_accessability = annotate_accessibility(
df=alphafold_annotation[alphafold_annotation.protein_id=="Q7Z6M3"],
max_dist=12,
max_angle=90,
error_dir=None)
# comparison to https://biopython.org/docs/dev/api/Bio.PDB.HSExposure.html#Bio.PDB.HSExposure.HSExposureCB
with open(
os.path.join(
TEST_FOLDER,
'Q7Z6M3.pdb'
)
) as pdbfile:
p=PDB.PDBParser()
s=p.get_structure('X', pdbfile)
m=s[0]
hse=PDB.HSExposureCB(m, radius)
residue_list=PDB.Selection.unfold_entities(m,'R')
res_hse = []
for r in residue_list:
res_hse.append(r.xtra['EXP_HSE_B_U'])
np.testing.assert_equal(np.array(res_hse), res_accessability.nAA_12_90_nopae.values)
# @ToDo: test with actual error_dir
def test_smooth_score(self, ):
np.testing.assert_equal(np.array([1.5, 2. , 3. , 4. , 4.5]),smooth_score(score=np.array([1,2,3,4,5]), half_window=1))
def test_get_smooth_score(self, ):
testdata = pd.DataFrame({'protein_id':[1,1,1,1,1,1,2,2,2,2,2,2],
'protein_number':[1,1,1,1,1,1,2,2,2,2,2,2],
'position':[1,2,3,4,5,6,1,2,3,4,5,6],
'score':[1,2,3,4,5,6,7,8,9,10,11,12],
'score_2':[10,20,30,40,50,60,70,80,90,100,110,120]})
test_res = get_smooth_score(testdata, np.array(['score','score_2']), [1])
np.testing.assert_equal([1.5,2,3,4,5,5.5,7.5,8,9,10,11,11.5], test_res.score_smooth1.values)
np.testing.assert_equal([15,20,30,40,50,55,75,80,90,100,110,115], test_res.score_2_smooth1.values)
def test_get_avg_3d_dist(self, ):
x = np.array([1.1,1.1,1.1,1.1,1.1,1.1])
y = np.array([1.1,2.1,3.1,1.1,10.1,20.1])
z = np.array([1.1,3.1,5.1,10.1,11.1,12.1])
pos = np.array([1,2,3,4,5,6])
error = np.array([[0,2,10,2,3,4],[1,0,5,3,2,9],[10,4,0,3,6,7],[10,4,5,0,6,7],[10,4,5,3,0,7],[10,4,0,3,6,0]])
coordinate_array = np.vstack([x,y,z]).T
np.testing.assert_equal(6.976812, np.round(get_avg_3d_dist(np.array([0,4]), coordinate_array, pos, error), decimals=6))
np.testing.assert_equal(3.5, np.round(get_avg_3d_dist(np.array([0,2]), coordinate_array, pos, error), decimals=6))
np.testing.assert_equal(5.668168, np.round(get_avg_3d_dist(np.array([0,3,4]), coordinate_array, pos, error), decimals=6))
np.testing.assert_equal(4.666667, np.round(get_avg_3d_dist(np.array([0,3,4]), coordinate_array, pos, error, metric='min'), decimals=6))
np.testing.assert_equal(14, np.round(get_avg_3d_dist(np.array([0,4]), coordinate_array, pos, error, error_operation='plus'), decimals=6))
error = 0.1*error
np.testing.assert_equal(13.876812, np.round(get_avg_3d_dist(np.array([0,4]), coordinate_array, pos, error, error_operation='plus'), decimals=6))
x = np.array([1.1,1.1,1.1,1.1])
y = np.array([1.1,1.1,10.1,20.1])
z = np.array([1.1,10.1,11.1,12.1])
pos = np.array([1,4,5,6])
error = np.array([[0,2,10,2,3,4],[1,0,5,3,2,9],[10,4,0,3,6,7],[10,4,5,0,6,7],[10,4,5,3,0,7],[10,4,0,3,6,0]])
coordinate_array = np.vstack([x,y,z]).T
np.testing.assert_equal(6.976812, np.round(get_avg_3d_dist(np.array([0,2]), coordinate_array, pos, error), decimals=6))
def test_get_avg_1d_dist(self, ):
pos = np.array([1,2,3,4,5,6])
np.testing.assert_equal(4, np.round(get_avg_1d_dist(np.array([0,4]), pos), decimals=6))
np.testing.assert_equal(2.666667, np.round(get_avg_1d_dist(np.array([0,3,4]), pos), decimals=6))
np.testing.assert_equal(1.666667, np.round(get_avg_1d_dist(np.array([0,3,4]), pos, metric='min'), decimals=6))
pos = np.array([1,4,5,6])
np.testing.assert_equal(4, np.round(get_avg_1d_dist(np.array([0,2]), pos), decimals=6))
np.testing.assert_equal(2.666667, np.round(get_avg_1d_dist(np.array([0,1,2]), pos), decimals=6))
def test_find_idr_pattern(self, ):
assert find_idr_pattern(idr_list = [[0,300],[1,10],[0,500],[1,500]])[0] == True
assert find_idr_pattern(idr_list = [[0,300],[1,50],[0,500]])[0] == False
assert find_idr_pattern(idr_list = [[0,50],[0,50],[1,50],[0,500]])[0] == False
assert find_idr_pattern(idr_list = [[0,30],[0,300],[1,50],[0,50]])[0] == False
assert find_idr_pattern(idr_list = [[0,30]])[0] == False
assert find_idr_pattern(idr_list = [[0,300],[1,10],[0,500],[1,500]])[1][0][0] == [301]
assert find_idr_pattern(idr_list = [[0,300],[1,10],[0,500],[1,500]])[1][0][1] == [310]
assert find_idr_pattern(idr_list = [[1,10],[0,300],[1,10],[0,500],[1,500]])[1][0][0] == [311]
assert find_idr_pattern(idr_list = [[1,10],[0,300],[1,10],[0,500],[1,500]])[1][0][1] == [320]
def test_annotate_proteins_with_idr_pattern(self, ):
testdata = pd.DataFrame({'protein_id':[1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2],
'protein_number':[1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2],
'position':[1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6],
'IDR':[0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,1,0,0]})
test_res = annotate_proteins_with_idr_pattern(testdata, 3, 3)
np.testing.assert_equal([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
list(test_res.flexible_pattern.values))
def test_extend_flexible_pattern(self, ):
np.testing.assert_equal(np.array([1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0]),
extend_flexible_pattern(np.array([1,1,1,0,0,0,0,1,1,0,0,0,0]),1))
def test_get_extended_flexible_pattern(self, ):
testdata = pd.DataFrame({'protein_id':[1,1,1,1,1,1,2,2,2,2,2,2],
'protein_number':[1,1,1,1,1,1,2,2,2,2,2,2],
'position':[1,2,3,4,5,6,1,2,3,4,5,6],
'score':[1,1,0,0,0,1,1,1,0,0,0,0],
'score_2':[0,0,0,0,0,0,0,0,0,0,0,1]})
test_res = get_extended_flexible_pattern(testdata, np.array(['score','score_2']), [1])
np.testing.assert_equal([1,1,1,0,1,1,1,1,1,0,0,0], test_res.score_extended_1.values)
test_res = get_extended_flexible_pattern(testdata, np.array(['score','score_2']), [2])
np.testing.assert_equal([1,1,1,1,1,1,1,1,1,1,0,0], test_res.score_extended_2.values)
np.testing.assert_equal([0,0,0,0,0,0,0,0,0,1,1,1], test_res.score_2_extended_2.values)
def test_get_mod_ptm_fraction(self, ):
# Example with 2 proteins and 2 randomizations
# 1st protein with 3 modified lysines and 3 STY sites > 1 phospho
# 2nd protein with 2 modified lysines and 4 STY sites > 2 phospho
distances = [
[[[10, 20, 30], [2, 10, 20], [5, 8, 30]], # protein 1 > real
[[30, 20, 50], [20, 10, 20], [50, 10, 30]], # protein 1 > random 1
[[20, 50, 10], [50, 40, 10], [50, 20, 30]]], # protein 1 > random 2
[[[10, 10, 30, 50], [50, 10, 5, 50]], # protein 2 > real
[[50, 20, 30, 40], [20, 20, 10, 80]], # protein 2 > random 1
[[15, 10, 30, 10], [10, 10, 20, 20]]]] # protein 2 > random 2
mod_idx = [[0], # protein 1
[1, 2]] # protein 2
modidied_fraction = get_mod_ptm_fraction(
distances, mod_idx, min_dist=0, max_dist=10)
# Real:
# n_aa: 1,2,2,2,2
# n_mod: 1,1,1,1,2
# final: 9,6
# Random 1:
# n_aa: 0,1,1,0,1
# n_mod: 0,0,0,0,1
# final: 3,1
# Random 2:
# n_aa: 1,1,0,2,2
# n_mod: 0,0,0,1,1
# final: 6,2
# Fractions: 0.66, 0.33, 0.33
np.testing.assert_almost_equal(
modidied_fraction,
[0.66666666, 0.33333333, 0.33333333])
modidied_fraction = get_mod_ptm_fraction(
distances, mod_idx, min_dist=5, max_dist=10)
np.testing.assert_almost_equal(
modidied_fraction,
[0.5, 0.33333333, 0.33333333])
if __name__ == "__main__":
unittest.main()
| [
"numpy.radians",
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.testing.assert_equal",
"numpy.array",
"structuremap.processing.annotate_accessibility",
"numpy.linalg.norm",
"unittest.main",
"pandas.testing.assert_frame_equal",
"structuremap.processing.get_paired_error",
"Bio.PDB.HSExposure... | [((903, 928), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (918, 928), False, 'import os\n'), ((16296, 16311), 'unittest.main', 'unittest.main', ([], {}), '()\n', (16309, 16311), False, 'import unittest\n'), ((1150, 1253), 'structuremap.processing.download_alphafold_cif', 'download_alphafold_cif', ([], {'proteins': "['O15552', 'Q5VSL9', 'Q7Z6M3', 'O15552yy']", 'out_folder': 'TEST_FOLDER'}), "(proteins=['O15552', 'Q5VSL9', 'Q7Z6M3', 'O15552yy'],\n out_folder=TEST_FOLDER)\n", (1172, 1253), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((1681, 1784), 'structuremap.processing.download_alphafold_pae', 'download_alphafold_pae', ([], {'proteins': "['O15552', 'Q5VSL9', 'Q7Z6M3', 'O15552yy']", 'out_folder': 'TEST_FOLDER'}), "(proteins=['O15552', 'Q5VSL9', 'Q7Z6M3', 'O15552yy'],\n out_folder=TEST_FOLDER)\n", (1703, 1784), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((2210, 2288), 'structuremap.processing.format_alphafold_data', 'format_alphafold_data', ([], {'directory': 'TEST_FOLDER', 'protein_ids': "['Q7Z6M3', 'O15552']"}), "(directory=TEST_FOLDER, protein_ids=['Q7Z6M3', 'O15552'])\n", (2231, 2288), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((2484, 2582), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['alphafold_formatted', 'alphafold_formatted_ini'], {'check_dtype': '(False)'}), '(alphafold_formatted, alphafold_formatted_ini,\n check_dtype=False)\n', (2513, 2582), True, 'import pandas as pd\n'), ((2626, 2661), 'numpy.array', 'np.array', (['[1.1, 1.1, 1.1, 1.1, 5.1]'], {}), '([1.1, 1.1, 1.1, 1.1, 5.1])\n', (2634, 2661), True, 'import numpy as np\n'), ((2670, 2706), 'numpy.array', 'np.array', (['[1.1, 2.1, 3.1, 1.1, 10.1]'], {}), '([1.1, 2.1, 3.1, 1.1, 10.1])\n', (2678, 2706), True, 'import numpy as np\n'), ((2715, 2750), 'numpy.array', 'np.array', (['[1.1, 3.1, 5.1, 1.1, 4.1]'], {}), '([1.1, 3.1, 5.1, 1.1, 4.1])\n', (2723, 2750), True, 'import numpy as np\n'), ((3237, 3254), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (3247, 3254), True, 'import numpy as np\n'), ((3315, 3347), 'scipy.spatial.transform.Rotation.from_rotvec', 'R.from_rotvec', (['(theta * axis_norm)'], {}), '(theta * axis_norm)\n', (3328, 3347), True, 'from scipy.spatial.transform import Rotation as R\n'), ((3441, 3466), 'numpy.array', 'np.array', (['[3.0, 5.0, 0.0]'], {}), '([3.0, 5.0, 0.0])\n', (3449, 3466), True, 'import numpy as np\n'), ((3479, 3504), 'numpy.array', 'np.array', (['[4.0, 4.0, 1.0]'], {}), '([4.0, 4.0, 1.0])\n', (3487, 3504), True, 'import numpy as np\n'), ((3540, 3574), 'structuremap.processing.rotate_vector_around_axis', 'rotate_vector_around_axis', (['v', 'a', 't'], {}), '(v, a, t)\n', (3565, 3574), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((3650, 3713), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res_real', 'res_scipy'], {'decimal': '(10)'}), '(res_real, res_scipy, decimal=10)\n', (3680, 3713), True, 'import numpy as np\n'), ((3761, 3786), 'numpy.array', 'np.array', (['[1.1, 1.1, 1.1]'], {}), '([1.1, 1.1, 1.1])\n', (3769, 3786), True, 'import numpy as np\n'), ((3799, 3825), 'numpy.array', 'np.array', (['[1.1, 2.1, -3.1]'], {}), '([1.1, 2.1, -3.1])\n', (3807, 3825), True, 'import numpy as np\n'), ((3838, 3863), 'numpy.array', 'np.array', (['[1.1, 3.1, 5.1]'], {}), '([1.1, 3.1, 5.1])\n', (3846, 3863), True, 'import numpy as np\n'), ((3876, 3904), 'numpy.array', 'np.array', (['[1.5, np.nan, 1.5]'], {}), '([1.5, np.nan, 1.5])\n', (3884, 3904), True, 'import numpy as np\n'), ((3917, 3942), 'numpy.array', 'np.array', (['[1.5, 2.5, 3.5]'], {}), '([1.5, 2.5, 3.5])\n', (3925, 3942), True, 'import numpy as np\n'), ((3955, 3980), 'numpy.array', 'np.array', (['[1.5, 3.5, 5.5]'], {}), '([1.5, 3.5, 5.5])\n', (3963, 3980), True, 'import numpy as np\n'), ((3993, 4019), 'numpy.array', 'np.array', (['[1.5, 1.5, 10.6]'], {}), '([1.5, 1.5, 10.6])\n', (4001, 4019), True, 'import numpy as np\n'), ((4032, 4058), 'numpy.array', 'np.array', (['[1.5, 2.5, 11.6]'], {}), '([1.5, 2.5, 11.6])\n', (4040, 4058), True, 'import numpy as np\n'), ((4071, 4096), 'numpy.array', 'np.array', (['[1.5, 3.5, 5.6]'], {}), '([1.5, 3.5, 5.6])\n', (4079, 4096), True, 'import numpy as np\n'), ((4109, 4134), 'numpy.array', 'np.array', (['[4.5, 1.8, 1.5]'], {}), '([4.5, 1.8, 1.5])\n', (4117, 4134), True, 'import numpy as np\n'), ((4147, 4173), 'numpy.array', 'np.array', (['[40.5, 7.8, 3.5]'], {}), '([40.5, 7.8, 3.5])\n', (4155, 4173), True, 'import numpy as np\n'), ((4186, 4211), 'numpy.array', 'np.array', (['[3.5, 3.8, 5.5]'], {}), '([3.5, 3.8, 5.5])\n', (4194, 4211), True, 'import numpy as np\n'), ((5694, 5713), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5702, 5713), True, 'import numpy as np\n'), ((5728, 5773), 'numpy.array', 'np.array', (['[[0, 2, 10], [1, 0, 5], [10, 4, 0]]'], {}), '([[0, 2, 10], [1, 0, 5], [10, 4, 0]])\n', (5736, 5773), True, 'import numpy as np\n'), ((5921, 5937), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (5929, 5937), True, 'import numpy as np\n'), ((6060, 6079), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (6068, 6079), True, 'import numpy as np\n'), ((6092, 6117), 'numpy.array', 'np.array', (['[1.1, 1.1, 1.1]'], {}), '([1.1, 1.1, 1.1])\n', (6100, 6117), True, 'import numpy as np\n'), ((6130, 6156), 'numpy.array', 'np.array', (['[1.1, 2.1, -3.1]'], {}), '([1.1, 2.1, -3.1])\n', (6138, 6156), True, 'import numpy as np\n'), ((6169, 6194), 'numpy.array', 'np.array', (['[1.1, 3.1, 5.1]'], {}), '([1.1, 3.1, 5.1])\n', (6177, 6194), True, 'import numpy as np\n'), ((6207, 6235), 'numpy.array', 'np.array', (['[1.5, np.nan, 1.5]'], {}), '([1.5, np.nan, 1.5])\n', (6215, 6235), True, 'import numpy as np\n'), ((6248, 6273), 'numpy.array', 'np.array', (['[1.5, 2.5, 3.5]'], {}), '([1.5, 2.5, 3.5])\n', (6256, 6273), True, 'import numpy as np\n'), ((6286, 6311), 'numpy.array', 'np.array', (['[1.5, 3.5, 5.5]'], {}), '([1.5, 3.5, 5.5])\n', (6294, 6311), True, 'import numpy as np\n'), ((6324, 6350), 'numpy.array', 'np.array', (['[1.5, 1.5, 10.6]'], {}), '([1.5, 1.5, 10.6])\n', (6332, 6350), True, 'import numpy as np\n'), ((6363, 6389), 'numpy.array', 'np.array', (['[1.5, 2.5, 11.6]'], {}), '([1.5, 2.5, 11.6])\n', (6371, 6389), True, 'import numpy as np\n'), ((6402, 6427), 'numpy.array', 'np.array', (['[1.5, 3.5, 5.6]'], {}), '([1.5, 3.5, 5.6])\n', (6410, 6427), True, 'import numpy as np\n'), ((6440, 6465), 'numpy.array', 'np.array', (['[4.5, 1.8, 1.5]'], {}), '([4.5, 1.8, 1.5])\n', (6448, 6465), True, 'import numpy as np\n'), ((6478, 6504), 'numpy.array', 'np.array', (['[40.5, 7.8, 3.5]'], {}), '([40.5, 7.8, 3.5])\n', (6486, 6504), True, 'import numpy as np\n'), ((6517, 6542), 'numpy.array', 'np.array', (['[3.5, 3.8, 5.5]'], {}), '([3.5, 3.8, 5.5])\n', (6525, 6542), True, 'import numpy as np\n'), ((6779, 6798), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (6787, 6798), True, 'import numpy as np\n'), ((6813, 6858), 'numpy.array', 'np.array', (['[[0, 2, 10], [1, 0, 5], [10, 4, 0]]'], {}), '([[0, 2, 10], [1, 0, 5], [10, 4, 0]])\n', (6821, 6858), True, 'import numpy as np\n'), ((8015, 8155), 'structuremap.processing.annotate_accessibility', 'annotate_accessibility', ([], {'df': "alphafold_annotation[alphafold_annotation.protein_id == 'Q7Z6M3']", 'max_dist': '(12)', 'max_angle': '(90)', 'error_dir': 'None'}), "(df=alphafold_annotation[alphafold_annotation.\n protein_id == 'Q7Z6M3'], max_dist=12, max_angle=90, error_dir=None)\n", (8037, 8155), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((9126, 9430), 'pandas.DataFrame', 'pd.DataFrame', (["{'protein_id': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'protein_number': [1, \n 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'position': [1, 2, 3, 4, 5, 6, 1, 2, \n 3, 4, 5, 6], 'score': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],\n 'score_2': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]}"], {}), "({'protein_id': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2],\n 'protein_number': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'position': [1,\n 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6], 'score': [1, 2, 3, 4, 5, 6, 7, 8, 9, \n 10, 11, 12], 'score_2': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, \n 120]})\n", (9138, 9430), True, 'import pandas as pd\n'), ((9575, 9682), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['[1.5, 2, 3, 4, 5, 5.5, 7.5, 8, 9, 10, 11, 11.5]', 'test_res.score_smooth1.values'], {}), '([1.5, 2, 3, 4, 5, 5.5, 7.5, 8, 9, 10, 11, 11.5],\n test_res.score_smooth1.values)\n', (9598, 9682), True, 'import numpy as np\n'), ((9676, 9789), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['[15, 20, 30, 40, 50, 55, 75, 80, 90, 100, 110, 115]', 'test_res.score_2_smooth1.values'], {}), '([15, 20, 30, 40, 50, 55, 75, 80, 90, 100, 110, 115],\n test_res.score_2_smooth1.values)\n', (9699, 9789), True, 'import numpy as np\n'), ((9826, 9866), 'numpy.array', 'np.array', (['[1.1, 1.1, 1.1, 1.1, 1.1, 1.1]'], {}), '([1.1, 1.1, 1.1, 1.1, 1.1, 1.1])\n', (9834, 9866), True, 'import numpy as np\n'), ((9874, 9916), 'numpy.array', 'np.array', (['[1.1, 2.1, 3.1, 1.1, 10.1, 20.1]'], {}), '([1.1, 2.1, 3.1, 1.1, 10.1, 20.1])\n', (9882, 9916), True, 'import numpy as np\n'), ((9924, 9967), 'numpy.array', 'np.array', (['[1.1, 3.1, 5.1, 10.1, 11.1, 12.1]'], {}), '([1.1, 3.1, 5.1, 10.1, 11.1, 12.1])\n', (9932, 9967), True, 'import numpy as np\n'), ((9977, 10005), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (9985, 10005), True, 'import numpy as np\n'), ((10017, 10156), 'numpy.array', 'np.array', (['[[0, 2, 10, 2, 3, 4], [1, 0, 5, 3, 2, 9], [10, 4, 0, 3, 6, 7], [10, 4, 5, 0,\n 6, 7], [10, 4, 5, 3, 0, 7], [10, 4, 0, 3, 6, 0]]'], {}), '([[0, 2, 10, 2, 3, 4], [1, 0, 5, 3, 2, 9], [10, 4, 0, 3, 6, 7], [10,\n 4, 5, 0, 6, 7], [10, 4, 5, 3, 0, 7], [10, 4, 0, 3, 6, 0]])\n', (10025, 10156), True, 'import numpy as np\n'), ((11033, 11063), 'numpy.array', 'np.array', (['[1.1, 1.1, 1.1, 1.1]'], {}), '([1.1, 1.1, 1.1, 1.1])\n', (11041, 11063), True, 'import numpy as np\n'), ((11073, 11105), 'numpy.array', 'np.array', (['[1.1, 1.1, 10.1, 20.1]'], {}), '([1.1, 1.1, 10.1, 20.1])\n', (11081, 11105), True, 'import numpy as np\n'), ((11115, 11148), 'numpy.array', 'np.array', (['[1.1, 10.1, 11.1, 12.1]'], {}), '([1.1, 10.1, 11.1, 12.1])\n', (11123, 11148), True, 'import numpy as np\n'), ((11160, 11182), 'numpy.array', 'np.array', (['[1, 4, 5, 6]'], {}), '([1, 4, 5, 6])\n', (11168, 11182), True, 'import numpy as np\n'), ((11196, 11335), 'numpy.array', 'np.array', (['[[0, 2, 10, 2, 3, 4], [1, 0, 5, 3, 2, 9], [10, 4, 0, 3, 6, 7], [10, 4, 5, 0,\n 6, 7], [10, 4, 5, 3, 0, 7], [10, 4, 0, 3, 6, 0]]'], {}), '([[0, 2, 10, 2, 3, 4], [1, 0, 5, 3, 2, 9], [10, 4, 0, 3, 6, 7], [10,\n 4, 5, 0, 6, 7], [10, 4, 5, 3, 0, 7], [10, 4, 0, 3, 6, 0]])\n', (11204, 11335), True, 'import numpy as np\n'), ((11528, 11556), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (11536, 11556), True, 'import numpy as np\n'), ((11887, 11909), 'numpy.array', 'np.array', (['[1, 4, 5, 6]'], {}), '([1, 4, 5, 6])\n', (11895, 11909), True, 'import numpy as np\n'), ((13028, 13334), 'pandas.DataFrame', 'pd.DataFrame', (["{'protein_id': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2],\n 'protein_number': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2\n ], 'position': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6\n ], 'IDR': [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]}"], {}), "({'protein_id': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,\n 2, 2], 'protein_number': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, \n 2, 2, 2], 'position': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, \n 4, 5, 6], 'IDR': [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]})\n", (13040, 13334), True, 'import pandas as pd\n'), ((13367, 13417), 'structuremap.processing.annotate_proteins_with_idr_pattern', 'annotate_proteins_with_idr_pattern', (['testdata', '(3)', '(3)'], {}), '(testdata, 3, 3)\n', (13401, 13417), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((13878, 14159), 'pandas.DataFrame', 'pd.DataFrame', (["{'protein_id': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'protein_number': [1, \n 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'position': [1, 2, 3, 4, 5, 6, 1, 2, \n 3, 4, 5, 6], 'score': [1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], 'score_2':\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]}"], {}), "({'protein_id': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2],\n 'protein_number': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'position': [1,\n 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6], 'score': [1, 1, 0, 0, 0, 1, 1, 1, 0, \n 0, 0, 0], 'score_2': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]})\n", (13890, 14159), True, 'import pandas as pd\n'), ((14322, 14422), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['[1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0]', 'test_res.score_extended_1.values'], {}), '([1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0], test_res.\n score_extended_1.values)\n', (14345, 14422), True, 'import numpy as np\n'), ((14510, 14610), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]', 'test_res.score_extended_2.values'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], test_res.\n score_extended_2.values)\n', (14533, 14610), True, 'import numpy as np\n'), ((14603, 14705), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]', 'test_res.score_2_extended_2.values'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], test_res.\n score_2_extended_2.values)\n', (14626, 14705), True, 'import numpy as np\n'), ((15522, 15587), 'structuremap.processing.get_mod_ptm_fraction', 'get_mod_ptm_fraction', (['distances', 'mod_idx'], {'min_dist': '(0)', 'max_dist': '(10)'}), '(distances, mod_idx, min_dist=0, max_dist=10)\n', (15542, 15587), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((15929, 16021), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['modidied_fraction', '[0.66666666, 0.33333333, 0.33333333]'], {}), '(modidied_fraction, [0.66666666, 0.33333333, \n 0.33333333])\n', (15959, 16021), True, 'import numpy as np\n'), ((16070, 16135), 'structuremap.processing.get_mod_ptm_fraction', 'get_mod_ptm_fraction', (['distances', 'mod_idx'], {'min_dist': '(5)', 'max_dist': '(10)'}), '(distances, mod_idx, min_dist=5, max_dist=10)\n', (16090, 16135), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((16157, 16242), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['modidied_fraction', '[0.5, 0.33333333, 0.33333333]'], {}), '(modidied_fraction, [0.5, 0.33333333, 0.33333333]\n )\n', (16187, 16242), True, 'import numpy as np\n'), ((964, 992), 'os.path.dirname', 'os.path.dirname', (['THIS_FOLDER'], {}), '(THIS_FOLDER)\n', (979, 992), False, 'import os\n'), ((1312, 1332), 'numpy.array', 'np.array', (["['Q5VSL9']"], {}), "(['Q5VSL9'])\n", (1320, 1332), True, 'import numpy as np\n'), ((1375, 1397), 'numpy.array', 'np.array', (["['O15552yy']"], {}), "(['O15552yy'])\n", (1383, 1397), True, 'import numpy as np\n'), ((1441, 1471), 'numpy.array', 'np.array', (["['O15552', 'Q7Z6M3']"], {}), "(['O15552', 'Q7Z6M3'])\n", (1449, 1471), True, 'import numpy as np\n'), ((1504, 1543), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""Q5VSL9.cif"""'], {}), "(TEST_FOLDER, 'Q5VSL9.cif')\n", (1516, 1543), False, 'import os\n'), ((1843, 1863), 'numpy.array', 'np.array', (["['Q5VSL9']"], {}), "(['Q5VSL9'])\n", (1851, 1863), True, 'import numpy as np\n'), ((1906, 1928), 'numpy.array', 'np.array', (["['O15552yy']"], {}), "(['O15552yy'])\n", (1914, 1928), True, 'import numpy as np\n'), ((1972, 2002), 'numpy.array', 'np.array', (["['O15552', 'Q7Z6M3']"], {}), "(['O15552', 'Q7Z6M3'])\n", (1980, 2002), True, 'import numpy as np\n'), ((2035, 2078), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""pae_Q5VSL9.hdf"""'], {}), "(TEST_FOLDER, 'pae_Q5VSL9.hdf')\n", (2047, 2078), False, 'import os\n'), ((2361, 2419), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_alphafold_annotation.csv"""'], {}), "(TEST_FOLDER, 'test_alphafold_annotation.csv')\n", (2373, 2419), False, 'import os\n'), ((2774, 2794), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (2783, 2794), True, 'import numpy as np\n'), ((3282, 3302), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (3296, 3302), True, 'import numpy as np\n'), ((4240, 4266), 'numpy.vstack', 'np.vstack', (['[x_a, y_a, z_a]'], {}), '([x_a, y_a, z_a])\n', (4249, 4266), True, 'import numpy as np\n'), ((4296, 4322), 'numpy.vstack', 'np.vstack', (['[x_b, y_b, z_b]'], {}), '([x_b, y_b, z_b])\n', (4305, 4322), True, 'import numpy as np\n'), ((4352, 4378), 'numpy.vstack', 'np.vstack', (['[x_c, y_c, z_c]'], {}), '([x_c, y_c, z_c])\n', (4361, 4378), True, 'import numpy as np\n'), ((4408, 4434), 'numpy.vstack', 'np.vstack', (['[x_n, y_n, z_n]'], {}), '([x_n, y_n, z_n])\n', (4417, 4434), True, 'import numpy as np\n'), ((5801, 5835), 'structuremap.processing.get_paired_error', 'get_paired_error', (['pos', 'error', '(0)', '(1)'], {}), '(pos, error, 0, 1)\n', (5817, 5835), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((5871, 5905), 'structuremap.processing.get_paired_error', 'get_paired_error', (['pos', 'error', '(2)', '(2)'], {}), '(pos, error, 2, 2)\n', (5887, 5905), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((5973, 6007), 'structuremap.processing.get_paired_error', 'get_paired_error', (['pos', 'error', '(0)', '(1)'], {}), '(pos, error, 0, 1)\n', (5989, 6007), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((6571, 6597), 'numpy.vstack', 'np.vstack', (['[x_a, y_a, z_a]'], {}), '([x_a, y_a, z_a])\n', (6580, 6597), True, 'import numpy as np\n'), ((6627, 6653), 'numpy.vstack', 'np.vstack', (['[x_b, y_b, z_b]'], {}), '([x_b, y_b, z_b])\n', (6636, 6653), True, 'import numpy as np\n'), ((6683, 6709), 'numpy.vstack', 'np.vstack', (['[x_c, y_c, z_c]'], {}), '([x_c, y_c, z_c])\n', (6692, 6709), True, 'import numpy as np\n'), ((6739, 6765), 'numpy.vstack', 'np.vstack', (['[x_n, y_n, z_n]'], {}), '([x_n, y_n, z_n])\n', (6748, 6765), True, 'import numpy as np\n'), ((6884, 6903), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (6892, 6903), True, 'import numpy as np\n'), ((6937, 7059), 'structuremap.processing.get_neighbors', 'get_neighbors', (['idxl', 'coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', 'pos', 'error', '(5)', '(40)'], {}), '(idxl, coordinate_array_a, coordinate_array_b,\n coordinate_array_c, coordinate_array_n, pos, error, 5, 40)\n', (6950, 7059), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((7181, 7200), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (7189, 7200), True, 'import numpy as np\n'), ((7234, 7357), 'structuremap.processing.get_neighbors', 'get_neighbors', (['idxl', 'coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', 'pos', 'error', '(5)', '(150)'], {}), '(idxl, coordinate_array_a, coordinate_array_b,\n coordinate_array_c, coordinate_array_n, pos, error, 5, 150)\n', (7247, 7357), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((7479, 7498), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (7487, 7498), True, 'import numpy as np\n'), ((7532, 7656), 'structuremap.processing.get_neighbors', 'get_neighbors', (['idxl', 'coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', 'pos', 'error', '(50)', '(140)'], {}), '(idxl, coordinate_array_a, coordinate_array_b,\n coordinate_array_c, coordinate_array_n, pos, error, 50, 140)\n', (7545, 7656), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((7871, 7929), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_alphafold_annotation.csv"""'], {}), "(TEST_FOLDER, 'test_alphafold_annotation.csv')\n", (7883, 7929), False, 'import os\n'), ((8467, 8482), 'Bio.PDB.PDBParser', 'PDB.PDBParser', ([], {}), '()\n', (8480, 8482), False, 'from Bio import PDB\n'), ((8562, 8589), 'Bio.PDB.HSExposureCB', 'PDB.HSExposureCB', (['m', 'radius'], {}), '(m, radius)\n', (8578, 8589), False, 'from Bio import PDB\n'), ((8615, 8652), 'Bio.PDB.Selection.unfold_entities', 'PDB.Selection.unfold_entities', (['m', '"""R"""'], {}), "(m, 'R')\n", (8644, 8652), False, 'from Bio import PDB\n'), ((8799, 8816), 'numpy.array', 'np.array', (['res_hse'], {}), '(res_hse)\n', (8807, 8816), True, 'import numpy as np\n'), ((8973, 9008), 'numpy.array', 'np.array', (['[1.5, 2.0, 3.0, 4.0, 4.5]'], {}), '([1.5, 2.0, 3.0, 4.0, 4.5])\n', (8981, 9008), True, 'import numpy as np\n'), ((9531, 9561), 'numpy.array', 'np.array', (["['score', 'score_2']"], {}), "(['score', 'score_2'])\n", (9539, 9561), True, 'import numpy as np\n'), ((10146, 10166), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (10155, 10166), True, 'import numpy as np\n'), ((11325, 11345), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (11334, 11345), True, 'import numpy as np\n'), ((13657, 13706), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0]'], {}), '([1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0])\n', (13665, 13706), True, 'import numpy as np\n'), ((14278, 14308), 'numpy.array', 'np.array', (["['score', 'score_2']"], {}), "(['score', 'score_2'])\n", (14286, 14308), True, 'import numpy as np\n'), ((14466, 14496), 'numpy.array', 'np.array', (["['score', 'score_2']"], {}), "(['score', 'score_2'])\n", (14474, 14496), True, 'import numpy as np\n'), ((2846, 2899), 'structuremap.processing.get_3d_dist', 'get_3d_dist', (['coordinate_array', 'coordinate_array', '(0)', '(1)'], {}), '(coordinate_array, coordinate_array, 0, 1)\n', (2857, 2899), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((2965, 3018), 'structuremap.processing.get_3d_dist', 'get_3d_dist', (['coordinate_array', 'coordinate_array', '(0)', '(2)'], {}), '(coordinate_array, coordinate_array, 0, 2)\n', (2976, 3018), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((3084, 3137), 'structuremap.processing.get_3d_dist', 'get_3d_dist', (['coordinate_array', 'coordinate_array', '(2)', '(0)'], {}), '(coordinate_array, coordinate_array, 2, 0)\n', (3095, 3137), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((4520, 4619), 'structuremap.processing.get_angle', 'get_angle', (['coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', '(0)', '(1)'], {}), '(coordinate_array_a, coordinate_array_b, coordinate_array_c,\n coordinate_array_n, 0, 1)\n', (4529, 4619), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((4816, 4915), 'structuremap.processing.get_angle', 'get_angle', (['coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', '(0)', '(2)'], {}), '(coordinate_array_a, coordinate_array_b, coordinate_array_c,\n coordinate_array_n, 0, 2)\n', (4825, 4915), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((5112, 5211), 'structuremap.processing.get_angle', 'get_angle', (['coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', '(2)', '(0)'], {}), '(coordinate_array_a, coordinate_array_b, coordinate_array_c,\n coordinate_array_n, 2, 0)\n', (5121, 5211), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((5428, 5527), 'structuremap.processing.get_angle', 'get_angle', (['coordinate_array_a', 'coordinate_array_b', 'coordinate_array_c', 'coordinate_array_n', '(1)', '(2)'], {}), '(coordinate_array_a, coordinate_array_b, coordinate_array_c,\n coordinate_array_n, 1, 2)\n', (5437, 5527), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((8345, 8384), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""Q7Z6M3.pdb"""'], {}), "(TEST_FOLDER, 'Q7Z6M3.pdb')\n", (8357, 8384), False, 'import os\n'), ((12163, 12229), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 300], [1, 10], [0, 500], [1, 500]]'}), '(idr_list=[[0, 300], [1, 10], [0, 500], [1, 500]])\n', (12179, 12229), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12251, 12307), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 300], [1, 50], [0, 500]]'}), '(idr_list=[[0, 300], [1, 50], [0, 500]])\n', (12267, 12307), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12332, 12396), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 50], [0, 50], [1, 50], [0, 500]]'}), '(idr_list=[[0, 50], [0, 50], [1, 50], [0, 500]])\n', (12348, 12396), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12419, 12483), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 30], [0, 300], [1, 50], [0, 50]]'}), '(idr_list=[[0, 30], [0, 300], [1, 50], [0, 50]])\n', (12435, 12483), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12506, 12542), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 30]]'}), '(idr_list=[[0, 30]])\n', (12522, 12542), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((13764, 13813), 'numpy.array', 'np.array', (['[1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]'], {}), '([1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0])\n', (13772, 13813), True, 'import numpy as np\n'), ((9028, 9053), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (9036, 9053), True, 'import numpy as np\n'), ((10235, 10251), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (10243, 10251), True, 'import numpy as np\n'), ((10358, 10374), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (10366, 10374), True, 'import numpy as np\n'), ((10487, 10506), 'numpy.array', 'np.array', (['[0, 3, 4]'], {}), '([0, 3, 4])\n', (10495, 10506), True, 'import numpy as np\n'), ((10617, 10636), 'numpy.array', 'np.array', (['[0, 3, 4]'], {}), '([0, 3, 4])\n', (10625, 10636), True, 'import numpy as np\n'), ((10756, 10772), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (10764, 10772), True, 'import numpy as np\n'), ((10935, 10951), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (10943, 10951), True, 'import numpy as np\n'), ((11414, 11430), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (11422, 11430), True, 'import numpy as np\n'), ((11612, 11628), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (11620, 11628), True, 'import numpy as np\n'), ((11715, 11734), 'numpy.array', 'np.array', (['[0, 3, 4]'], {}), '([0, 3, 4])\n', (11723, 11734), True, 'import numpy as np\n'), ((11820, 11839), 'numpy.array', 'np.array', (['[0, 3, 4]'], {}), '([0, 3, 4])\n', (11828, 11839), True, 'import numpy as np\n'), ((11967, 11983), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (11975, 11983), True, 'import numpy as np\n'), ((12070, 12089), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (12078, 12089), True, 'import numpy as np\n'), ((12572, 12638), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 300], [1, 10], [0, 500], [1, 500]]'}), '(idr_list=[[0, 300], [1, 10], [0, 500], [1, 500]])\n', (12588, 12638), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12667, 12733), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[0, 300], [1, 10], [0, 500], [1, 500]]'}), '(idr_list=[[0, 300], [1, 10], [0, 500], [1, 500]])\n', (12683, 12733), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12762, 12837), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[1, 10], [0, 300], [1, 10], [0, 500], [1, 500]]'}), '(idr_list=[[1, 10], [0, 300], [1, 10], [0, 500], [1, 500]])\n', (12778, 12837), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n'), ((12864, 12939), 'structuremap.processing.find_idr_pattern', 'find_idr_pattern', ([], {'idr_list': '[[1, 10], [0, 300], [1, 10], [0, 500], [1, 500]]'}), '(idr_list=[[1, 10], [0, 300], [1, 10], [0, 500], [1, 500]])\n', (12880, 12939), False, 'from structuremap.processing import download_alphafold_cif, download_alphafold_pae, format_alphafold_data, get_3d_dist, rotate_vector_around_axis, get_angle, get_paired_error, get_neighbors, annotate_accessibility, smooth_score, get_smooth_score, get_avg_3d_dist, get_avg_1d_dist, find_idr_pattern, annotate_proteins_with_idr_pattern, extend_flexible_pattern, get_extended_flexible_pattern, get_mod_ptm_fraction\n')] |
import numpy as np
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
class SPClustering(nn.Module):
def __init__(self,d=256, k =10):
super(SPClustering, self).__init__()
self.d = d
self.k = k
def myKNN(self, S, k, sigma=1.0):
N = len(S)
A = np.zeros((N, N))
for i in range(N):
dist_with_index = zip(S[i], range(N))
dist_with_index = sorted(dist_with_index, key=lambda x: x[0])
neighbours_id = [dist_with_index[m][1] for m in range(k + 1)] # xi's k nearest neighbours
for j in neighbours_id: # xj is xi's neighbour
A[i][j] = np.exp(-S[i][j] / 2 / sigma / sigma)
A[j][i] = A[i][j] # mutually
return A
def calLaplacianMatrix(self, adjacentMatrix):
# compute the Degree Matrix: D=sum(A)
degreeMatrix = np.sum(adjacentMatrix, axis=1)
# print degreeMatrix
# compute the Laplacian Matrix: L=D-A
laplacianMatrix = np.diag(degreeMatrix) - adjacentMatrix
# normailze
# D^(-1/2) L D^(-1/2)
sqrtDegreeMatrix = np.diag(1.0 / (degreeMatrix ** (0.5)))
return np.dot(np.dot(sqrtDegreeMatrix, laplacianMatrix), sqrtDegreeMatrix)
def euclidDistance(self, x1, x2, sqrt_flag=False):
res = np.sum((x1-x2)**2)
if sqrt_flag:
res = np.sqrt(res)
return res
def calEuclidDistanceMatrix(self, X):
X = np.array(X)
S = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(i+1, len(X)):
S[i][j] = 1.0 * self.euclidDistance(X[i], X[j])
S[j][i] = S[i][j]
return S
def forward(self, nodes, labels):
Similarity = self.calEuclidDistanceMatrix(nodes)
Adjacent = self.myKNN(Similarity, k=self.k)
Laplacian = self.calLaplacianMatrix(Adjacent)
x, V = np.linalg.eig(Laplacian)
x = zip(x, range(len(x)))
x = sorted(x, key=lambda x: x[0])
H = np.vstack([V[:, i] for (v, i) in x]).T
sp_kmeans = KMeans(n_clusters=2).fit(H).cluster_centers_
| [
"sklearn.cluster.KMeans",
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.diag",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.vstack"
] | [((311, 327), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (319, 327), True, 'import numpy as np\n'), ((891, 921), 'numpy.sum', 'np.sum', (['adjacentMatrix'], {'axis': '(1)'}), '(adjacentMatrix, axis=1)\n', (897, 921), True, 'import numpy as np\n'), ((1140, 1174), 'numpy.diag', 'np.diag', (['(1.0 / degreeMatrix ** 0.5)'], {}), '(1.0 / degreeMatrix ** 0.5)\n', (1147, 1174), True, 'import numpy as np\n'), ((1333, 1355), 'numpy.sum', 'np.sum', (['((x1 - x2) ** 2)'], {}), '((x1 - x2) ** 2)\n', (1339, 1355), True, 'import numpy as np\n'), ((1479, 1490), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1487, 1490), True, 'import numpy as np\n'), ((1938, 1962), 'numpy.linalg.eig', 'np.linalg.eig', (['Laplacian'], {}), '(Laplacian)\n', (1951, 1962), True, 'import numpy as np\n'), ((1023, 1044), 'numpy.diag', 'np.diag', (['degreeMatrix'], {}), '(degreeMatrix)\n', (1030, 1044), True, 'import numpy as np\n'), ((1201, 1242), 'numpy.dot', 'np.dot', (['sqrtDegreeMatrix', 'laplacianMatrix'], {}), '(sqrtDegreeMatrix, laplacianMatrix)\n', (1207, 1242), True, 'import numpy as np\n'), ((1392, 1404), 'numpy.sqrt', 'np.sqrt', (['res'], {}), '(res)\n', (1399, 1404), True, 'import numpy as np\n'), ((2051, 2085), 'numpy.vstack', 'np.vstack', (['[V[:, i] for v, i in x]'], {}), '([V[:, i] for v, i in x])\n', (2060, 2085), True, 'import numpy as np\n'), ((669, 705), 'numpy.exp', 'np.exp', (['(-S[i][j] / 2 / sigma / sigma)'], {}), '(-S[i][j] / 2 / sigma / sigma)\n', (675, 705), True, 'import numpy as np\n'), ((2110, 2130), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (2116, 2130), False, 'from sklearn.cluster import KMeans\n')] |
import os
import math
from functools import singledispatch
from typing import overload, Union
import numba
import numpy as np
from .util import (
TempFileHolder,
glue_csv,
glue_hdf,
glue_parquet,
parse_csv,
parse_hdf,
parse_parquet,
_parallel_argsort,
)
try:
import pandas as pd
pandas_import = True
except ModuleNotFoundError:
pandas_import = False
if pandas_import:
# fmt: off
# function overloading for the correct return type depending on the input
@overload
def quantile_normalize(data: pd.DataFrame,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> pd.DataFrame: ...
@overload
def quantile_normalize(data: np.ndarray,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> np.ndarray: ...
# fmt: on
@singledispatch
def quantile_normalize(
data: Union[pd.DataFrame, np.ndarray],
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> Union[pd.DataFrame, np.ndarray]:
"""
Quantile normalize your array/dataframe.
It does quantile normalization in the "correct" way in the sense that
it takes the mean of duplicate values instead of ignoring them.
Args:
data: numpy.ndarray or pandas.DataFrame to be normalized
axis: axis along to normalize. Axis=1 (default) normalizes each
column/sample which gives them identical distributions.
Axis=0 normalizes each row/feature giving them all identical
distributions.
target: distribution to normalize onto
ncpus: number of cpus to use for normalization
Returns: a quantile normalized copy of the input.
"""
raise NotImplementedError(
f"quantile_normalize not implemented for type {type(data)}"
)
@quantile_normalize.register(pd.DataFrame)
def quantile_normalize_pd(
data: pd.DataFrame,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> pd.DataFrame:
qn_data = data.copy()
# if we use axis 0, then already transpose here, and not later
if axis == 0:
qn_data[:] = quantile_normalize_np(
qn_data.values.astype(float), axis, target, ncpus
)
else:
qn_data[:] = quantile_normalize_np(
qn_data.values.astype(float), axis, target, ncpus
)
return qn_data
def incremental_quantile_normalize(
infile: str,
outfile: str,
rowchunksize: int = 100_000,
colchunksize: int = 8,
ncpus: int = 1,
) -> None:
"""
Memory-efficient quantile normalization implementation by splitting
the task into sequential subtasks, and writing the intermediate results
to disk instead of keeping them in memory. This makes the memory
footprint independent of the input table, however also slower..
Args:
infile: path to input table. The table can be either a csv-like file
of which the delimiter is auto detected. Or the infile can be a
hdf file, which requires to be stored with format=table.
outfile: path to the output table. Has the same layout and delimiter
as the input file. If the input is csv-like, the output is csv-
like. If the input is hdf, then the output is hdf.
rowchunksize: how many rows to read/write at the same time when
combining intermediate results. More is faster, but also uses
more memory.
colchunksize: how many columns to use at the same time when
calculating the mean and normalizing. More is faster, but also
uses more memory.
ncpus: The number of cpus to use. Scales diminishingly, and more
than four is generally not useful.
"""
if infile.endswith((".hdf", ".h5")):
dataformat = "hdf"
columns, index = parse_hdf(infile)
elif infile.endswith((".csv", ".tsv", ".txt")):
dataformat = "csv"
columns, index, delimiter = parse_csv(infile)
elif infile.endswith((".parquet")):
dataformat = "parquet"
columns, index, index_used, schema = parse_parquet(infile)
else:
raise NotImplementedError(
"Only HDF ('.hdf', '.h5'), "
"text ('.csv', '.tsv', '.txt'), "
"and parquet ('.parquet') formats are supported."
)
# now scan the table for which columns and indices it contains
nr_cols = len(columns)
nr_rows = len(index)
# store intermediate tables
tmp_vals = []
tmp_sorted_vals = []
tmp_idxs = []
# calculate the target (rank means)
target = np.zeros(nr_rows)
with TempFileHolder() as tfh:
# loop over our column chunks and keep updating our target
for i in range(math.ceil(nr_cols / colchunksize)):
col_start, col_end = (
i * colchunksize,
np.clip((i + 1) * colchunksize, 0, nr_cols),
)
# read relevant columns
if dataformat == "hdf":
with pd.HDFStore(infile) as hdf:
assert len(hdf.keys()) == 1
key = hdf.keys()[0]
cols = [
hdf.select_column(key, columns[i])
for i in range(col_start, col_end)
]
df = pd.concat(cols, axis=1).astype("float32")
elif dataformat == "csv":
df = pd.read_csv(
infile,
sep=delimiter,
comment="#",
index_col=0,
usecols=[0, *list(range(col_start + 1, col_end + 1))],
).astype("float32")
elif dataformat == "parquet":
df = pd.read_parquet(
infile, columns=columns[col_start:col_end]
)
# get the rank means
data, sorted_idx = _parallel_argsort(
df.values, ncpus, df.values.dtype
)
del df
sorted_vals = np.take_along_axis(
data,
sorted_idx,
axis=0,
)
rankmeans = np.mean(sorted_vals, axis=1)
# update the target
target += (rankmeans - target) * (
(col_end - col_start) / (col_end)
)
# save all our intermediate stuff
tmp_vals.append(
tfh.get_filename(prefix="qnorm_", suffix=".npy")
)
tmp_sorted_vals.append(
tfh.get_filename(prefix="qnorm_", suffix=".npy")
)
tmp_idxs.append(
tfh.get_filename(prefix="qnorm_", suffix=".npy")
)
np.save(tmp_vals[-1], data)
np.save(tmp_sorted_vals[-1], sorted_vals)
np.save(tmp_idxs[-1], sorted_idx)
del data, sorted_idx, sorted_vals
# now that we have our target we can start normalizing in chunks
qnorm_tmp = []
# store intermediate results
# and start with our index and store it
index_tmpfiles = []
for chunk in np.array_split(
index, math.ceil(len(index) / rowchunksize)
):
index_tmpfiles.append(
tfh.get_filename(prefix="qnorm_", suffix=".p")
)
pd.DataFrame(chunk).to_pickle(
index_tmpfiles[-1], compression=None
)
qnorm_tmp.append(index_tmpfiles)
del index
# for each column chunk quantile normalize it onto our distribution
for i in range(math.ceil(nr_cols / colchunksize)):
# read the relevant columns in
data = np.load(tmp_vals[i], allow_pickle=True)
sorted_idx = np.load(tmp_idxs[i], allow_pickle=True)
sorted_vals = np.load(tmp_sorted_vals[i], allow_pickle=True)
# quantile normalize
qnormed = _numba_accel_qnorm(
data, sorted_idx, sorted_vals, target
)
del data, sorted_idx, sorted_vals
# store it in tempfile
col_tmpfiles = []
for j, chunk in enumerate(
np.array_split(
qnormed, math.ceil(qnormed.shape[0] / rowchunksize)
)
):
tmpfile = tfh.get_filename(
prefix=f"qnorm_{i}_{j}_", suffix=".npy"
)
col_tmpfiles.append(tmpfile)
np.save(tmpfile, chunk)
del qnormed, chunk
qnorm_tmp.append(col_tmpfiles)
if os.path.exists(outfile):
os.remove(outfile)
# glue the separate files together and save them
if dataformat == "hdf":
glue_hdf(outfile, columns, qnorm_tmp)
elif dataformat == "csv":
glue_csv(outfile, columns, qnorm_tmp, delimiter)
elif dataformat == "parquet":
glue_parquet(outfile, columns, qnorm_tmp, index_used, schema)
else:
@singledispatch
def quantile_normalize(
data: np.ndarray,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> np.ndarray:
"""
Quantile normalize your array.
It does quantile normalization in the "correct" way in the sense that
it takes the mean of duplicate values instead of ignoring them.
Args:
data: numpy.ndarray or pandas.DataFrame to be normalized
axis: axis along to normalize. Axis=1 (default) normalizes each
column/sample which gives them identical distributions.
Axis=0 normalizes each row/feature giving them all identical
distributions.
target: distribution to normalize onto
ncpus: number of cpus to use for normalization
Returns: a quantile normalized copy of the input.
"""
raise NotImplementedError(
f"quantile_normalize not implemented for type {type(data)}"
)
@quantile_normalize.register(np.ndarray)
def quantile_normalize_np(
_data: np.ndarray,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> np.ndarray:
# check for supported dtypes
if not np.issubdtype(_data.dtype, np.number):
raise ValueError(
f"The type of your data ({_data.dtype}) is is not "
f"supported, and might lead to undefined behaviour. "
f"Please use numeric data only."
)
# numba does not (yet) support smaller
elif any(
np.issubdtype(_data.dtype, dtype) for dtype in [np.int32, np.float32]
):
dtype = np.float32
else:
dtype = np.float64
# take a transposed view of our data if axis is one
if axis == 0:
_data = np.transpose(_data)
elif axis == 1:
pass
else:
raise ValueError(
f"qnorm only supports 2 dimensional data, so the axis"
f"has to be either 0 or 1, but you set axis to "
f"{axis}."
)
# sort the array, single process or multiprocessing
if ncpus == 1:
# single process sorting
data = _data.astype(dtype=dtype)
# we do the sorting outside of numba because the numpy implementation
# is faster, and numba does not support the axis argument.
sorted_idx = np.argsort(data, axis=0)
elif ncpus > 1:
# multiproces sorting
data, sorted_idx = _parallel_argsort(_data, ncpus, dtype)
else:
raise ValueError("The number of cpus needs to be a positive integer.")
sorted_val = np.take_along_axis(data, sorted_idx, axis=0)
if target is None:
# if no target supplied get the (sorted) rowmeans
target = np.mean(sorted_val, axis=1)
else:
# otherwise make sure target is correct data type and shape
if not isinstance(target, np.ndarray):
try:
target = np.array(target)
except Exception:
raise ValueError(
"The target could not be converted to a " "numpy.ndarray."
)
if target.ndim != 1:
raise ValueError(
f"The target array should be a 1-dimensionsal vector, however "
f"you supplied a vector with {target.ndim} dimensions"
)
if target.shape[0] != data.shape[0]:
raise ValueError(
f"The target array does not contain the same amount of values "
f"({target.shape[0]}) as the data contains rows "
f"({data.shape[0]})"
)
if not np.issubdtype(target.dtype, np.number):
raise ValueError(
f"The type of your target ({data.dtype}) is is not "
f"supported, and might lead to undefined behaviour. "
f"Please use numeric data only."
)
target = np.sort(target.astype(dtype=dtype))
final_res = _numba_accel_qnorm(data, sorted_idx, sorted_val, target)
if axis == 0:
final_res = final_res.T
return final_res
@numba.jit(nopython=True, fastmath=True, cache=True)
def _numba_accel_qnorm(
qnorm: np.ndarray,
sorted_idx: np.ndarray,
sorted_val: np.ndarray,
target: np.ndarray,
) -> np.ndarray:
"""
numba accelerated "actual" qnorm normalization.
"""
# get the shape of the input
n_rows = qnorm.shape[0]
n_cols = qnorm.shape[1]
for col_i in range(n_cols):
i = 0
# we fill out a column not from lowest index to highest index,
# but we fill out qnorm from lowest value to highest value
while i < n_rows:
n = 0
val = 0.0
# since there might be duplicate numbers in a column, we search for
# all the indices that have these duplcate numbers. Then we take
# the mean of their rowmeans.
while (
i + n < n_rows
and sorted_val[i, col_i] == sorted_val[i + n, col_i]
):
val += target[i + n]
n += 1
# fill out qnorm with our new value
if n > 0:
val /= n
for j in range(n):
idx = sorted_idx[i + j, col_i]
qnorm[idx, col_i] = val
i += n
return qnorm
| [
"numpy.clip",
"numpy.mean",
"os.path.exists",
"math.ceil",
"pandas.read_parquet",
"numpy.argsort",
"numpy.issubdtype",
"numpy.zeros",
"numba.jit",
"os.remove",
"numpy.array",
"pandas.HDFStore",
"pandas.DataFrame",
"numpy.take_along_axis",
"numpy.transpose",
"pandas.concat",
"numpy.sa... | [((14198, 14249), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'fastmath': '(True)', 'cache': '(True)'}), '(nopython=True, fastmath=True, cache=True)\n', (14207, 14249), False, 'import numba\n'), ((12697, 12741), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'sorted_idx'], {'axis': '(0)'}), '(data, sorted_idx, axis=0)\n', (12715, 12741), True, 'import numpy as np\n'), ((5219, 5236), 'numpy.zeros', 'np.zeros', (['nr_rows'], {}), '(nr_rows)\n', (5227, 5236), True, 'import numpy as np\n'), ((11336, 11373), 'numpy.issubdtype', 'np.issubdtype', (['_data.dtype', 'np.number'], {}), '(_data.dtype, np.number)\n', (11349, 11373), True, 'import numpy as np\n'), ((11883, 11902), 'numpy.transpose', 'np.transpose', (['_data'], {}), '(_data)\n', (11895, 11902), True, 'import numpy as np\n'), ((12449, 12473), 'numpy.argsort', 'np.argsort', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (12459, 12473), True, 'import numpy as np\n'), ((12841, 12868), 'numpy.mean', 'np.mean', (['sorted_val'], {'axis': '(1)'}), '(sorted_val, axis=1)\n', (12848, 12868), True, 'import numpy as np\n'), ((9619, 9642), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (9633, 9642), False, 'import os\n'), ((13725, 13763), 'numpy.issubdtype', 'np.issubdtype', (['target.dtype', 'np.number'], {}), '(target.dtype, np.number)\n', (13738, 13763), True, 'import numpy as np\n'), ((5374, 5407), 'math.ceil', 'math.ceil', (['(nr_cols / colchunksize)'], {}), '(nr_cols / colchunksize)\n', (5383, 5407), False, 'import math\n'), ((6793, 6837), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'sorted_idx'], {'axis': '(0)'}), '(data, sorted_idx, axis=0)\n', (6811, 6837), True, 'import numpy as np\n'), ((6945, 6973), 'numpy.mean', 'np.mean', (['sorted_vals'], {'axis': '(1)'}), '(sorted_vals, axis=1)\n', (6952, 6973), True, 'import numpy as np\n'), ((7568, 7595), 'numpy.save', 'np.save', (['tmp_vals[-1]', 'data'], {}), '(tmp_vals[-1], data)\n', (7575, 7595), True, 'import numpy as np\n'), ((7612, 7653), 'numpy.save', 'np.save', (['tmp_sorted_vals[-1]', 'sorted_vals'], {}), '(tmp_sorted_vals[-1], sorted_vals)\n', (7619, 7653), True, 'import numpy as np\n'), ((7670, 7703), 'numpy.save', 'np.save', (['tmp_idxs[-1]', 'sorted_idx'], {}), '(tmp_idxs[-1], sorted_idx)\n', (7677, 7703), True, 'import numpy as np\n'), ((8522, 8555), 'math.ceil', 'math.ceil', (['(nr_cols / colchunksize)'], {}), '(nr_cols / colchunksize)\n', (8531, 8555), False, 'import math\n'), ((8628, 8667), 'numpy.load', 'np.load', (['tmp_vals[i]'], {'allow_pickle': '(True)'}), '(tmp_vals[i], allow_pickle=True)\n', (8635, 8667), True, 'import numpy as np\n'), ((8697, 8736), 'numpy.load', 'np.load', (['tmp_idxs[i]'], {'allow_pickle': '(True)'}), '(tmp_idxs[i], allow_pickle=True)\n', (8704, 8736), True, 'import numpy as np\n'), ((8767, 8813), 'numpy.load', 'np.load', (['tmp_sorted_vals[i]'], {'allow_pickle': '(True)'}), '(tmp_sorted_vals[i], allow_pickle=True)\n', (8774, 8813), True, 'import numpy as np\n'), ((9660, 9678), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (9669, 9678), False, 'import os\n'), ((11651, 11684), 'numpy.issubdtype', 'np.issubdtype', (['_data.dtype', 'dtype'], {}), '(_data.dtype, dtype)\n', (11664, 11684), True, 'import numpy as np\n'), ((13036, 13052), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (13044, 13052), True, 'import numpy as np\n'), ((5507, 5550), 'numpy.clip', 'np.clip', (['((i + 1) * colchunksize)', '(0)', 'nr_cols'], {}), '((i + 1) * colchunksize, 0, nr_cols)\n', (5514, 5550), True, 'import numpy as np\n'), ((9497, 9520), 'numpy.save', 'np.save', (['tmpfile', 'chunk'], {}), '(tmpfile, chunk)\n', (9504, 9520), True, 'import numpy as np\n'), ((5675, 5694), 'pandas.HDFStore', 'pd.HDFStore', (['infile'], {}), '(infile)\n', (5686, 5694), True, 'import pandas as pd\n'), ((8241, 8260), 'pandas.DataFrame', 'pd.DataFrame', (['chunk'], {}), '(chunk)\n', (8253, 8260), True, 'import pandas as pd\n'), ((9210, 9252), 'math.ceil', 'math.ceil', (['(qnormed.shape[0] / rowchunksize)'], {}), '(qnormed.shape[0] / rowchunksize)\n', (9219, 9252), False, 'import math\n'), ((6470, 6529), 'pandas.read_parquet', 'pd.read_parquet', (['infile'], {'columns': 'columns[col_start:col_end]'}), '(infile, columns=columns[col_start:col_end])\n', (6485, 6529), True, 'import pandas as pd\n'), ((6013, 6036), 'pandas.concat', 'pd.concat', (['cols'], {'axis': '(1)'}), '(cols, axis=1)\n', (6022, 6036), True, 'import pandas as pd\n')] |
# Copyright (C) 2011 <NAME>
#
# This file was originary part of phonopy.
#
# Phonopy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Phonopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with phonopy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
class Atoms:
"""Atoms class compatible with the ASE Atoms class
Only the necessary stuffs to phonpy are implemented. """
def __init__(self,
symbols=None,
positions=None,
numbers=None,
masses=None,
magmoms=None,
scaled_positions=None,
cell=None,
pbc=None):
# cell
if cell is None:
self.cell=None
else:
self.cell = np.array(cell, dtype=float)
# position
self.scaled_positions = None
if (not self.cell is None) and (not positions is None):
self.set_positions(positions)
if (not scaled_positions is None):
self.set_scaled_positions(scaled_positions)
# Atom symbols
self.symbols = symbols
# Atomic numbers
if numbers is None:
self.numbers = None
else:
self.numbers = np.array(numbers, dtype=int)
# masses
self.set_masses(masses)
# (initial) magnetic moments
self.set_magnetic_moments(magmoms)
# number --> symbol
if not self.numbers is None:
self.numbers_to_symbols()
# symbol --> number
elif not self.symbols is None:
self.symbols_to_numbers()
# symbol --> mass
if self.symbols is not None and (self.masses is None):
self.symbols_to_masses()
def set_cell(self, cell):
self.cell = np.array(cell, dtype=float)
def get_cell(self):
return self.cell.copy()
def set_positions(self, cart_positions):
self.scaled_positions = np.dot(cart_positions,
np.linalg.inv(self.cell))
def get_positions(self):
return np.dot(self.scaled_positions, self.cell)
def set_scaled_positions(self, scaled_positions):
self.scaled_positions = np.array(scaled_positions, dtype=float)
def get_scaled_positions(self):
return self.scaled_positions.copy()
def set_masses(self, masses):
if masses is None:
self.masses = None
else:
self.masses = np.array(masses, dtype=float)
def get_masses(self):
return self.masses.copy()
def set_magnetic_moments(self, magmoms):
if magmoms is None:
self.magmoms = None
else:
self.magmoms = np.array(magmoms, dtype=float)
def get_magnetic_moments(self):
if self.magmoms is None:
return None
else:
return self.magmoms.copy()
def set_chemical_symbols(self, symbols):
self.symbols = symbols
def get_chemical_symbols(self):
return self.symbols[:]
def get_number_of_atoms(self):
return len(self.scaled_positions)
def get_atomic_numbers(self):
return self.numbers.copy()
def numbers_to_symbols(self):
self.symbols = [atom_data[n][1] for n in self.numbers]
def symbols_to_numbers(self):
self.numbers = np.array([symbol_map[s]
for s in self.symbols])
def symbols_to_masses(self):
self.masses = np.array([atom_data[symbol_map[s]][3]
for s in self.symbols])
def get_volume(self):
return np.linalg.det(self.cell)
atom_data = [
[ 0, "X", "X", 0], # 0
[ 1, "H", "Hydrogen", 1.00794], # 1
[ 2, "He", "Helium", 4.002602], # 2
[ 3, "Li", "Lithium", 6.941], # 3
[ 4, "Be", "Beryllium", 9.012182], # 4
[ 5, "B", "Boron", 10.811], # 5
[ 6, "C", "Carbon", 12.0107], # 6
[ 7, "N", "Nitrogen", 14.0067], # 7
[ 8, "O", "Oxygen", 15.9994], # 8
[ 9, "F", "Fluorine", 18.9984032], # 9
[ 10, "Ne", "Neon", 20.1797], # 10
[ 11, "Na", "Sodium", 22.98976928], # 11
[ 12, "Mg", "Magnesium", 24.3050], # 12
[ 13, "Al", "Aluminium", 26.9815386], # 13
[ 14, "Si", "Silicon", 28.0855], # 14
[ 15, "P", "Phosphorus", 30.973762], # 15
[ 16, "S", "Sulfur", 32.065], # 16
[ 17, "Cl", "Chlorine", 35.453], # 17
[ 18, "Ar", "Argon", 39.948], # 18
[ 19, "K", "Potassium", 39.0983], # 19
[ 20, "Ca", "Calcium", 40.078], # 20
[ 21, "Sc", "Scandium", 44.955912], # 21
[ 22, "Ti", "Titanium", 47.867], # 22
[ 23, "V", "Vanadium", 50.9415], # 23
[ 24, "Cr", "Chromium", 51.9961], # 24
[ 25, "Mn", "Manganese", 54.938045], # 25
[ 26, "Fe", "Iron", 55.845], # 26
[ 27, "Co", "Cobalt", 58.933195], # 27
[ 28, "Ni", "Nickel", 58.6934], # 28
[ 29, "Cu", "Copper", 63.546], # 29
[ 30, "Zn", "Zinc", 65.38], # 30
[ 31, "Ga", "Gallium", 69.723], # 31
[ 32, "Ge", "Germanium", 72.64], # 32
[ 33, "As", "Arsenic", 74.92160], # 33
[ 34, "Se", "Selenium", 78.96], # 34
[ 35, "Br", "Bromine", 79.904], # 35
[ 36, "Kr", "Krypton", 83.798], # 36
[ 37, "Rb", "Rubidium", 85.4678], # 37
[ 38, "Sr", "Strontium", 87.62], # 38
[ 39, "Y", "Yttrium", 88.90585], # 39
[ 40, "Zr", "Zirconium", 91.224], # 40
[ 41, "Nb", "Niobium", 92.90638], # 41
[ 42, "Mo", "Molybdenum", 95.96], # 42
[ 43, "Tc", "Technetium", 0], # 43
[ 44, "Ru", "Ruthenium", 101.07], # 44
[ 45, "Rh", "Rhodium", 102.90550], # 45
[ 46, "Pd", "Palladium", 106.42], # 46
[ 47, "Ag", "Silver", 107.8682], # 47
[ 48, "Cd", "Cadmium", 112.411], # 48
[ 49, "In", "Indium", 114.818], # 49
[ 50, "Sn", "Tin", 118.710], # 50
[ 51, "Sb", "Antimony", 121.760], # 51
[ 52, "Te", "Tellurium", 127.60], # 52
[ 53, "I", "Iodine", 126.90447], # 53
[ 54, "Xe", "Xenon", 131.293], # 54
[ 55, "Cs", "Caesium", 132.9054519], # 55
[ 56, "Ba", "Barium", 137.327], # 56
[ 57, "La", "Lanthanum", 138.90547], # 57
[ 58, "Ce", "Cerium", 140.116], # 58
[ 59, "Pr", "Praseodymium", 140.90765], # 59
[ 60, "Nd", "Neodymium", 144.242], # 60
[ 61, "Pm", "Promethium", 0], # 61
[ 62, "Sm", "Samarium", 150.36], # 62
[ 63, "Eu", "Europium", 151.964], # 63
[ 64, "Gd", "Gadolinium", 157.25], # 64
[ 65, "Tb", "Terbium", 158.92535], # 65
[ 66, "Dy", "Dysprosium", 162.500], # 66
[ 67, "Ho", "Holmium", 164.93032], # 67
[ 68, "Er", "Erbium", 167.259], # 68
[ 69, "Tm", "Thulium", 168.93421], # 69
[ 70, "Yb", "Ytterbium", 173.054], # 70
[ 71, "Lu", "Lutetium", 174.9668], # 71
[ 72, "Hf", "Hafnium", 178.49], # 72
[ 73, "Ta", "Tantalum", 180.94788], # 73
[ 74, "W", "Tungsten", 183.84], # 74
[ 75, "Re", "Rhenium", 186.207], # 75
[ 76, "Os", "Osmium", 190.23], # 76
[ 77, "Ir", "Iridium", 192.217], # 77
[ 78, "Pt", "Platinum", 195.084], # 78
[ 79, "Au", "Gold", 196.966569], # 79
[ 80, "Hg", "Mercury", 200.59], # 80
[ 81, "Tl", "Thallium", 204.3833], # 81
[ 82, "Pb", "Lead", 207.2], # 82
[ 83, "Bi", "Bismuth", 208.98040], # 83
[ 84, "Po", "Polonium", 0], # 84
[ 85, "At", "Astatine", 0], # 85
[ 86, "Rn", "Radon", 0], # 86
[ 87, "Fr", "Francium", 0], # 87
[ 88, "Ra", "Radium", 0], # 88
[ 89, "Ac", "Actinium", 0], # 89
[ 90, "Th", "Thorium", 232.03806], # 90
[ 91, "Pa", "Protactinium", 231.03588], # 91
[ 92, "U", "Uranium", 238.02891], # 92
[ 93, "Np", "Neptunium", 0], # 93
[ 94, "Pu", "Plutonium", 0], # 94
[ 95, "Am", "Americium", 0], # 95
[ 96, "Cm", "Curium", 0], # 96
[ 97, "Bk", "Berkelium", 0], # 97
[ 98, "Cf", "Californium", 0], # 98
[ 99, "Es", "Einsteinium", 0], # 99
[100, "Fm", "Fermium", 0], # 100
[101, "Md", "Mendelevium", 0], # 101
[102, "No", "Nobelium", 0], # 102
[103, "Lr", "Lawrencium", 0], # 103
[104, "Rf", "Rutherfordium", 0], # 104
[105, "Db", "Dubnium", 0], # 105
[106, "Sg", "Seaborgium", 0], # 106
[107, "Bh", "Bohrium", 0], # 107
[108, "Hs", "Hassium", 0], # 108
[109, "Mt", "Meitnerium", 0], # 109
[110, "Ds", "Darmstadtium", 0], # 110
[111, "Rg", "Roentgenium", 0], # 111
[112, "Cn", "Copernicium", 0], # 112
[113, "Uut", "Ununtrium", 0], # 113
[114, "Uuq", "Ununquadium", 0], # 114
[115, "Uup", "Ununpentium", 0], # 115
[116, "Uuh", "Ununhexium", 0], # 116
[117, "Uus", "Ununseptium", 0], # 117
[118, "Uuo", "Ununoctium", 0], # 118
]
symbol_map = {
"H":1,
"He":2,
"Li":3,
"Be":4,
"B":5,
"C":6,
"N":7,
"O":8,
"F":9,
"Ne":10,
"Na":11,
"Mg":12,
"Al":13,
"Si":14,
"P":15,
"S":16,
"Cl":17,
"Ar":18,
"K":19,
"Ca":20,
"Sc":21,
"Ti":22,
"V":23,
"Cr":24,
"Mn":25,
"Fe":26,
"Co":27,
"Ni":28,
"Cu":29,
"Zn":30,
"Ga":31,
"Ge":32,
"As":33,
"Se":34,
"Br":35,
"Kr":36,
"Rb":37,
"Sr":38,
"Y":39,
"Zr":40,
"Nb":41,
"Mo":42,
"Tc":43,
"Ru":44,
"Rh":45,
"Pd":46,
"Ag":47,
"Cd":48,
"In":49,
"Sn":50,
"Sb":51,
"Te":52,
"I":53,
"Xe":54,
"Cs":55,
"Ba":56,
"La":57,
"Ce":58,
"Pr":59,
"Nd":60,
"Pm":61,
"Sm":62,
"Eu":63,
"Gd":64,
"Tb":65,
"Dy":66,
"Ho":67,
"Er":68,
"Tm":69,
"Yb":70,
"Lu":71,
"Hf":72,
"Ta":73,
"W":74,
"Re":75,
"Os":76,
"Ir":77,
"Pt":78,
"Au":79,
"Hg":80,
"Tl":81,
"Pb":82,
"Bi":83,
"Po":84,
"At":85,
"Rn":86,
"Fr":87,
"Ra":88,
"Ac":89,
"Th":90,
"Pa":91,
"U":92,
"Np":93,
"Pu":94,
"Am":95,
"Cm":96,
"Bk":97,
"Cf":98,
"Es":99,
"Fm":100,
"Md":101,
"No":102,
"Lr":103,
"Rf":104,
"Db":105,
"Sg":106,
"Bh":107,
"Hs":108,
"Mt":109,
"Ds":110,
"Rg":111,
"Cn":112,
"Uut":113,
"Uuq":114,
"Uup":115,
"Uuh":116,
"Uus":117,
"Uuo":118,
}
| [
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.linalg.det"
] | [((2269, 2296), 'numpy.array', 'np.array', (['cell'], {'dtype': 'float'}), '(cell, dtype=float)\n', (2277, 2296), True, 'import numpy as np\n'), ((2566, 2606), 'numpy.dot', 'np.dot', (['self.scaled_positions', 'self.cell'], {}), '(self.scaled_positions, self.cell)\n', (2572, 2606), True, 'import numpy as np\n'), ((2694, 2733), 'numpy.array', 'np.array', (['scaled_positions'], {'dtype': 'float'}), '(scaled_positions, dtype=float)\n', (2702, 2733), True, 'import numpy as np\n'), ((3813, 3860), 'numpy.array', 'np.array', (['[symbol_map[s] for s in self.symbols]'], {}), '([symbol_map[s] for s in self.symbols])\n', (3821, 3860), True, 'import numpy as np\n'), ((3950, 4011), 'numpy.array', 'np.array', (['[atom_data[symbol_map[s]][3] for s in self.symbols]'], {}), '([atom_data[symbol_map[s]][3] for s in self.symbols])\n', (3958, 4011), True, 'import numpy as np\n'), ((4086, 4110), 'numpy.linalg.det', 'np.linalg.det', (['self.cell'], {}), '(self.cell)\n', (4099, 4110), True, 'import numpy as np\n'), ((1247, 1274), 'numpy.array', 'np.array', (['cell'], {'dtype': 'float'}), '(cell, dtype=float)\n', (1255, 1274), True, 'import numpy as np\n'), ((1720, 1748), 'numpy.array', 'np.array', (['numbers'], {'dtype': 'int'}), '(numbers, dtype=int)\n', (1728, 1748), True, 'import numpy as np\n'), ((2495, 2519), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cell'], {}), '(self.cell)\n', (2508, 2519), True, 'import numpy as np\n'), ((2948, 2977), 'numpy.array', 'np.array', (['masses'], {'dtype': 'float'}), '(masses, dtype=float)\n', (2956, 2977), True, 'import numpy as np\n'), ((3186, 3216), 'numpy.array', 'np.array', (['magmoms'], {'dtype': 'float'}), '(magmoms, dtype=float)\n', (3194, 3216), True, 'import numpy as np\n')] |
# python3 Steven
import random
import numpy as np
from svg.file import SVGFileV2
from svg.basic import clip_float, draw_only_path, add_style_path, draw_path
from svg.basic import draw_circle, draw_any, random_color
from svgFunction import circleFuc, getCirclePoints, heartFuc, getRectanglePoints
from svg.geo_transformation import rotation_pts_xy, rotation_pts_xy_point
from common import gImageOutputPath
def addNoise(x, y, alpha=2):
x = x + np.random.randn(len(x)) * alpha
y = y + np.random.randn(len(y)) * alpha
return x, y
def drawOnePathcSVG(svg, ptX, ptY, width=1, onlyPath=True):
x = ptX[0]
y = ptY[0]
path = 'M %.1f %.1f L ' % (x, y)
for x, y in zip(ptX, ptY):
path = path + ' ' + str(clip_float(x)) + ' ' + str(clip_float(y))
path = path + 'z'
if onlyPath:
svg.draw((path))
else:
svg.draw(draw_path(path, stroke_width=width, color=random_color()))
def getCirclePtsSVG(svg, r=1, N=10, offsetX=50, offsetY=50, noise=True, onlyPath=True):
ptX, ptY = getCirclePoints(r=r, N=N, func=circleFuc)
ptX = ptX + offsetX
ptY = ptY + offsetY
if noise:
ptX, ptY = addNoise(ptX, ptY)
return ptX, ptY
def drawRandomPath():
file = gImageOutputPath + r'\randomShapePath.svg'
H, W = 500, 1000
svg = SVGFileV2(file, W, H)
singleColor = False
if singleColor:
onlyPath = True
color = '#33FFC9'
svg.draw(add_style_path(stroke=color, stroke_width=1, fill='transparent'))
else:
onlyPath = False
times = 200
r = 1
offsetX = 50 # W//2 #
offsetY = H // 2
for _ in range(times):
r = r + random.random() * 2
# r = r + random.normalvariate(mu=0,sigma=1)*8
offsetX = offsetX + random.random() * 5 # 8
# offsetX = offsetX + random.normalvariate(mu=0,sigma=1)*1
# offsetY = offsetY + random.random()*1
# offsetX = 50 + random.random()*10
# offsetY = 50 + random.random()*2
ptX, ptY = getCirclePtsSVG(svg, r=r, N=80, offsetX=offsetX, offsetY=offsetY, noise=True, onlyPath=onlyPath)
drawOnePathcSVG(svg, ptX, ptY, onlyPath=onlyPath)
def draw_heart_curve():
file = gImageOutputPath + r'\heartPath.svg'
H, W = 100, 100
svg = SVGFileV2(file, W, H)
offsetX = W // 2
offsetY = H // 2
svg.draw(add_style_path(stroke='red', stroke_width=0.5, fill='red'))
N = 100
r = 30
path = 'M %.1f %.1f L ' % (0 + offsetX, heartFuc(0, r) + offsetY) # start point
x = np.linspace(-r, r, N)
y = heartFuc(x, r=r) # Up part points of heart curve, set sqrt value positive
xr = np.flip(x) # Down part points of heart curve, set sqrt value negative
yr = heartFuc(xr, r=r, up=False)
x = np.concatenate((x, xr), axis=0)
y = np.concatenate((y, yr), axis=0) * -1 # *-1 svg coordinate system different from standard cod system
# print('x=', x)
# print('y=', y)
x = x + offsetX
y = y + offsetY
for i, j in zip(x, y):
path = path + ' ' + str(clip_float(i)) + ' ' + str(clip_float(j))
svg.draw(draw_only_path(path))
svg.close()
def drawRandomCirclePath(svg):
W, H = svg.get_size()
styles = ['circles', 'circle points', 'circle points random']
onlyPath = False
times = 100
r = 2
offsetX = W // 2
offsetY = H // 2
style = styles[1]
if style == styles[0]:
for _ in range(times):
r = r + random.random() * 8
ptX, ptY = getCirclePtsSVG(svg, r=r, N=200, offsetX=offsetX, offsetY=offsetY, noise=False, onlyPath=onlyPath)
drawOnePathcSVG(svg, ptX, ptY, onlyPath=onlyPath)
elif style == styles[1]:
times = 10
for _ in range(times):
r = r + random.random() * 18
ptX, ptY = getCirclePtsSVG(svg, r=r, N=20, offsetX=offsetX, offsetY=offsetY, noise=False, onlyPath=onlyPath)
ptNumber = int(5 * r)
# ptX = np.random.choice(ptX, ptNumber)
ptX = ptX.reshape((len(ptX), 1))
ptY = ptY.reshape((len(ptY), 1))
pts = np.concatenate((ptX, ptY), axis=1)
# print(ptX.shape, pts.shape)
ptsIndex = np.random.randint(len(pts), size=ptNumber)
# print('ptsIndex=', ptsIndex, len(pts))
pts = pts[ptsIndex, :]
for i in pts:
# print('i=', i)
# ra = 0.5
ra = np.random.random() * (3 - 0.2) + 0.2
ra = clip_float(ra)
x = clip_float(i[0])
y = clip_float(i[1])
svg.draw(draw_circle(x, y, radius=ra, color=random_color()))
def getRectanglePtsSVG(w, h, N=10, noise=True):
ptX, ptY, center = getRectanglePoints(w=w, h=h, N=N)
if noise:
ptX, ptY = addNoise(ptX, ptY, alpha=0.8)
return ptX, ptY, center
def drawRandomRectanglePath(svg):
W, H = svg.get_size()
styles = ['rectangle', 'rectangle roation', 'rotataion Center']
onlyPath = False
times = 80
w = 2
h = w
offsetX = 5
offsetY = 5
style = styles[2]
if style == styles[0]:
for _ in range(times):
w = w + random.random() * 4
h = w
ptX, ptY, _ = getRectanglePtsSVG(w, h, N=20, noise=True)
ptX = ptX + offsetX
ptY = ptY + offsetY
drawOnePathcSVG(svg, ptX, ptY, onlyPath=onlyPath)
elif style == styles[1]:
times = 150
offsetX = W // 2
offsetY = H // 2
theta = 0
for _ in range(times):
w = w + random.random() * 1
h = w
ptX, ptY, center = getRectanglePtsSVG(w, h, N=20, noise=False)
ptX, ptY = rotation_pts_xy(ptX, ptY, theta)
theta = theta + 2 * np.pi / (times - 1)
ptX = ptX + offsetX
ptY = ptY + offsetY
drawOnePathcSVG(svg, ptX, ptY, width=0.5, onlyPath=onlyPath)
elif style == styles[2]:
times = 120
offsetX = 20 # W//2
offsetY = 20 # H//2
theta = 0
for _ in range(times):
offsetX = offsetX + random.random() * 1 # 8
w = w + random.random() * 1
h = w
ptX, ptY, center = getRectanglePtsSVG(w, h, N=30, noise=True)
ptX, ptY = rotation_pts_xy_point(ptX, ptY, center, theta)
theta = theta + 2 * np.pi / (times - 1)
ptX = ptX + offsetX
ptY = ptY + offsetY
drawOnePathcSVG(svg, ptX, ptY, width=0.5, onlyPath=onlyPath)
print(w, H)
def drawAllTypePath(svg):
H, W = svg.get_size()
cx, cy = W // 2, H // 2
svg.set_title('draw path')
g = svg.draw(draw_any('g', opacity=1.0))
anyDict = {}
anyDict['stroke'] = 'black'
anyDict['fill'] = 'transparent'
anyDict['d'] = 'M 10 10 C 20 20, 40 20, 50 10'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 70 10 C 70 20, 110 20, 110 10'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 130 10 C 120 20, 180 20, 170 10'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 10 30 C 20 50, 40 50, 50 30'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 70 30 C 70 50, 110 50, 110 30'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'MM 130 30 C 120 50, 180 50, 170 30'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 10 50 C 20 80, 40 80, 50 50'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 70 50 C 70 80, 110 80, 110 50'
svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 130 50 C 120 80, 180 80, 170 50'
svg.draw_node(g, draw_any('path', **anyDict))
# anyDict['d'] = 'M 10 10 10 60 60 30'
# svg.draw_node(g, draw_any('path', **anyDict))
anyDict['d'] = 'M 10 315 \
L 110 215 \
A 30 50 0 0 1 162.55 162.45 \
L 172.55 152.45 \
A 30 50 -45 0 1 215.1 109.9 \
L 315 10'
anyDict['fill'] = 'green'
anyDict['stroke-width'] = '2'
svg.draw_node(g, draw_any('path', **anyDict))
if __name__ == '__main__':
# drawRandomPath()
# draw_heart_curve()
file = gImageOutputPath + r'\randomShapePath.svg'
svg = SVGFileV2(file, W=200, H=200, border=True)
drawRandomCirclePath(svg)
# drawRandomRectanglePath(svg)
# drawAllTypePath(svg)
| [
"svgFunction.getRectanglePoints",
"numpy.flip",
"numpy.random.random",
"svgFunction.heartFuc",
"svg.basic.draw_any",
"svg.basic.add_style_path",
"svg.geo_transformation.rotation_pts_xy_point",
"numpy.linspace",
"svgFunction.getCirclePoints",
"svg.basic.draw_only_path",
"random.random",
"numpy.... | [((1031, 1072), 'svgFunction.getCirclePoints', 'getCirclePoints', ([], {'r': 'r', 'N': 'N', 'func': 'circleFuc'}), '(r=r, N=N, func=circleFuc)\n', (1046, 1072), False, 'from svgFunction import circleFuc, getCirclePoints, heartFuc, getRectanglePoints\n'), ((1303, 1324), 'svg.file.SVGFileV2', 'SVGFileV2', (['file', 'W', 'H'], {}), '(file, W, H)\n', (1312, 1324), False, 'from svg.file import SVGFileV2\n'), ((2268, 2289), 'svg.file.SVGFileV2', 'SVGFileV2', (['file', 'W', 'H'], {}), '(file, W, H)\n', (2277, 2289), False, 'from svg.file import SVGFileV2\n'), ((2524, 2545), 'numpy.linspace', 'np.linspace', (['(-r)', 'r', 'N'], {}), '(-r, r, N)\n', (2535, 2545), True, 'import numpy as np\n'), ((2554, 2570), 'svgFunction.heartFuc', 'heartFuc', (['x'], {'r': 'r'}), '(x, r=r)\n', (2562, 2570), False, 'from svgFunction import circleFuc, getCirclePoints, heartFuc, getRectanglePoints\n'), ((2640, 2650), 'numpy.flip', 'np.flip', (['x'], {}), '(x)\n', (2647, 2650), True, 'import numpy as np\n'), ((2727, 2754), 'svgFunction.heartFuc', 'heartFuc', (['xr'], {'r': 'r', 'up': '(False)'}), '(xr, r=r, up=False)\n', (2735, 2754), False, 'from svgFunction import circleFuc, getCirclePoints, heartFuc, getRectanglePoints\n'), ((2764, 2795), 'numpy.concatenate', 'np.concatenate', (['(x, xr)'], {'axis': '(0)'}), '((x, xr), axis=0)\n', (2778, 2795), True, 'import numpy as np\n'), ((4737, 4770), 'svgFunction.getRectanglePoints', 'getRectanglePoints', ([], {'w': 'w', 'h': 'h', 'N': 'N'}), '(w=w, h=h, N=N)\n', (4755, 4770), False, 'from svgFunction import circleFuc, getCirclePoints, heartFuc, getRectanglePoints\n'), ((8289, 8331), 'svg.file.SVGFileV2', 'SVGFileV2', (['file'], {'W': '(200)', 'H': '(200)', 'border': '(True)'}), '(file, W=200, H=200, border=True)\n', (8298, 8331), False, 'from svg.file import SVGFileV2\n'), ((2347, 2405), 'svg.basic.add_style_path', 'add_style_path', ([], {'stroke': '"""red"""', 'stroke_width': '(0.5)', 'fill': '"""red"""'}), "(stroke='red', stroke_width=0.5, fill='red')\n", (2361, 2405), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((2804, 2835), 'numpy.concatenate', 'np.concatenate', (['(y, yr)'], {'axis': '(0)'}), '((y, yr), axis=0)\n', (2818, 2835), True, 'import numpy as np\n'), ((3104, 3124), 'svg.basic.draw_only_path', 'draw_only_path', (['path'], {}), '(path)\n', (3118, 3124), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((6697, 6723), 'svg.basic.draw_any', 'draw_any', (['"""g"""'], {'opacity': '(1.0)'}), "('g', opacity=1.0)\n", (6705, 6723), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((6884, 6911), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (6892, 6911), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((6987, 7014), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (6995, 7014), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7092, 7119), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7100, 7119), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7193, 7220), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7201, 7220), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7296, 7323), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7304, 7323), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7402, 7429), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7410, 7429), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7503, 7530), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7511, 7530), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7606, 7633), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7614, 7633), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((7711, 7738), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (7719, 7738), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((8118, 8145), 'svg.basic.draw_any', 'draw_any', (['"""path"""'], {}), "('path', **anyDict)\n", (8126, 8145), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((1437, 1501), 'svg.basic.add_style_path', 'add_style_path', ([], {'stroke': 'color', 'stroke_width': '(1)', 'fill': '"""transparent"""'}), "(stroke=color, stroke_width=1, fill='transparent')\n", (1451, 1501), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((760, 773), 'svg.basic.clip_float', 'clip_float', (['y'], {}), '(y)\n', (770, 773), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((1657, 1672), 'random.random', 'random.random', ([], {}), '()\n', (1670, 1672), False, 'import random\n'), ((1761, 1776), 'random.random', 'random.random', ([], {}), '()\n', (1774, 1776), False, 'import random\n'), ((2475, 2489), 'svgFunction.heartFuc', 'heartFuc', (['(0)', 'r'], {}), '(0, r)\n', (2483, 2489), False, 'from svgFunction import circleFuc, getCirclePoints, heartFuc, getRectanglePoints\n'), ((3075, 3088), 'svg.basic.clip_float', 'clip_float', (['j'], {}), '(j)\n', (3085, 3088), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((4100, 4134), 'numpy.concatenate', 'np.concatenate', (['(ptX, ptY)'], {'axis': '(1)'}), '((ptX, ptY), axis=1)\n', (4114, 4134), True, 'import numpy as np\n'), ((5721, 5753), 'svg.geo_transformation.rotation_pts_xy', 'rotation_pts_xy', (['ptX', 'ptY', 'theta'], {}), '(ptX, ptY, theta)\n', (5736, 5753), False, 'from svg.geo_transformation import rotation_pts_xy, rotation_pts_xy_point\n'), ((909, 923), 'svg.basic.random_color', 'random_color', ([], {}), '()\n', (921, 923), False, 'from svg.basic import draw_circle, draw_any, random_color\n'), ((3459, 3474), 'random.random', 'random.random', ([], {}), '()\n', (3472, 3474), False, 'import random\n'), ((4498, 4512), 'svg.basic.clip_float', 'clip_float', (['ra'], {}), '(ra)\n', (4508, 4512), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((4533, 4549), 'svg.basic.clip_float', 'clip_float', (['i[0]'], {}), '(i[0])\n', (4543, 4549), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((4570, 4586), 'svg.basic.clip_float', 'clip_float', (['i[1]'], {}), '(i[1])\n', (4580, 4586), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((5182, 5197), 'random.random', 'random.random', ([], {}), '()\n', (5195, 5197), False, 'import random\n'), ((6313, 6359), 'svg.geo_transformation.rotation_pts_xy_point', 'rotation_pts_xy_point', (['ptX', 'ptY', 'center', 'theta'], {}), '(ptX, ptY, center, theta)\n', (6334, 6359), False, 'from svg.geo_transformation import rotation_pts_xy, rotation_pts_xy_point\n'), ((733, 746), 'svg.basic.clip_float', 'clip_float', (['x'], {}), '(x)\n', (743, 746), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((3048, 3061), 'svg.basic.clip_float', 'clip_float', (['i'], {}), '(i)\n', (3058, 3061), False, 'from svg.basic import clip_float, draw_only_path, add_style_path, draw_path\n'), ((3763, 3778), 'random.random', 'random.random', ([], {}), '()\n', (3776, 3778), False, 'import random\n'), ((5584, 5599), 'random.random', 'random.random', ([], {}), '()\n', (5597, 5599), False, 'import random\n'), ((4440, 4458), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4456, 4458), True, 'import numpy as np\n'), ((6132, 6147), 'random.random', 'random.random', ([], {}), '()\n', (6145, 6147), False, 'import random\n'), ((6177, 6192), 'random.random', 'random.random', ([], {}), '()\n', (6190, 6192), False, 'import random\n'), ((4647, 4661), 'svg.basic.random_color', 'random_color', ([], {}), '()\n', (4659, 4661), False, 'from svg.basic import draw_circle, draw_any, random_color\n')] |
import pygame
from pygame.locals import *
import numpy as np
import sys
class Board:
def __init__(self, width, height):
pygame.init()
self.width = width
self.height = height
self.wstep = self.width / 8
self.hstep = self.height / 8
self.whitePlays = True
self.screen = pygame.display.set_mode((width, height), 0, 32)
self.colors = [(255, 255, 255), (0, 0, 0), (255, 178, 102), (80, 40, 0)]
self.empty_board = [[0, 0], [0, 3], [0, 6],
[1, 1], [1, 3], [1, 5],
[2, 2], [2, 3], [2, 4],
[3, 0], [3, 1], [3, 2], [3, 4], [3, 5], [3, 6],
[4, 2], [4, 3], [4, 4],
[5, 1], [5, 3], [5, 5],
[6, 0], [6, 3], [6, 6]]
self.board = np.ones((7, 7))*3
for coords in self.empty_board:
self.board[coords[0], coords[1]] = 0
def draw_board(self):
wstep = self.width / 8
hstep = self.height / 8
self.screen.fill(self.colors[0])
for i in range(1, 4):
pygame.draw.rect(self.screen, self.colors[1], [wstep*i, hstep*i, wstep*(6-2*(i-1)), hstep*(6-2*(i-1))], 3)
pygame.draw.line(self.screen, self.colors[1], [wstep, hstep*4], [wstep*3, hstep*4], 3)
pygame.draw.line(self.screen, self.colors[1], [wstep*5, hstep*4], [wstep*7, hstep*4], 3)
pygame.draw.line(self.screen, self.colors[1], [wstep*4, hstep], [wstep*4, hstep*3], 3)
pygame.draw.line(self.screen, self.colors[1], [wstep*4, hstep*5], [wstep*4, hstep*7], 3)
for coord in self.empty_board:
self.draw_coord(coord, 4, self.colors[1])
# draws to the coordinates [row, column] (starts by 1)
def draw_coord(self, coords, radius, col):
y = coords[0]
x = coords[1]
h = (y+1) * self.hstep
w = (x+1) * self.wstep
pygame.draw.circle(self.screen, col, [w, h], radius)
pygame.draw.circle(self.screen, (0, 0, 0), [w, h], radius, 1)
def set_pawn(self, coords):
if self.whitePlays:
player = 1
else:
player = 2
col = self.colors[player+1]
self.draw_coord(coords, 15, col)
self.board[coords[0], coords[1]] = player
self.whitePlays = not self.whitePlays
def play_move(self):
self.dummy_thinking()
def dummy_thinking(self):
for i, row in enumerate(self.board):
for j, spot in enumerate(row):
if spot == 0:
self.set_pawn([i, j])
return
def start(self):
self.draw_board()
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
# detect clicked positions and set pawns
for coords in self.empty_board:
w = (coords[1]+1) * self.wstep
h = (coords[0]+1) * self.hstep
if w-self.wstep/3 < pos[0] < w+self.wstep/3 and h-self.hstep/3 < pos[1] < h+self.hstep/3:
self.set_pawn(coords)
# reply
self.play_move()
print("ok")
break
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
| [
"pygame.draw.circle",
"numpy.ones",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.quit",
"pygame.mouse.get_pos",
"pygame.draw.rect",
"sys.exit",
"pygame.display.update"
] | [((134, 147), 'pygame.init', 'pygame.init', ([], {}), '()\n', (145, 147), False, 'import pygame\n'), ((333, 380), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)', '(0)', '(32)'], {}), '((width, height), 0, 32)\n', (356, 380), False, 'import pygame\n'), ((1275, 1372), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'self.colors[1]', '[wstep, hstep * 4]', '[wstep * 3, hstep * 4]', '(3)'], {}), '(self.screen, self.colors[1], [wstep, hstep * 4], [wstep * \n 3, hstep * 4], 3)\n', (1291, 1372), False, 'import pygame\n'), ((1370, 1471), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'self.colors[1]', '[wstep * 5, hstep * 4]', '[wstep * 7, hstep * 4]', '(3)'], {}), '(self.screen, self.colors[1], [wstep * 5, hstep * 4], [\n wstep * 7, hstep * 4], 3)\n', (1386, 1471), False, 'import pygame\n'), ((1467, 1564), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'self.colors[1]', '[wstep * 4, hstep]', '[wstep * 4, hstep * 3]', '(3)'], {}), '(self.screen, self.colors[1], [wstep * 4, hstep], [wstep * \n 4, hstep * 3], 3)\n', (1483, 1564), False, 'import pygame\n'), ((1562, 1663), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'self.colors[1]', '[wstep * 4, hstep * 5]', '[wstep * 4, hstep * 7]', '(3)'], {}), '(self.screen, self.colors[1], [wstep * 4, hstep * 5], [\n wstep * 4, hstep * 7], 3)\n', (1578, 1663), False, 'import pygame\n'), ((1968, 2020), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', 'col', '[w, h]', 'radius'], {}), '(self.screen, col, [w, h], radius)\n', (1986, 2020), False, 'import pygame\n'), ((2029, 2090), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', '(0, 0, 0)', '[w, h]', 'radius', '(1)'], {}), '(self.screen, (0, 0, 0), [w, h], radius, 1)\n', (2047, 2090), False, 'import pygame\n'), ((872, 887), 'numpy.ones', 'np.ones', (['(7, 7)'], {}), '((7, 7))\n', (879, 887), True, 'import numpy as np\n'), ((1159, 1289), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'self.colors[1]', '[wstep * i, hstep * i, wstep * (6 - 2 * (i - 1)), hstep * (6 - 2 * (i - 1))]', '(3)'], {}), '(self.screen, self.colors[1], [wstep * i, hstep * i, wstep *\n (6 - 2 * (i - 1)), hstep * (6 - 2 * (i - 1))], 3)\n', (1175, 1289), False, 'import pygame\n'), ((2757, 2775), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2773, 2775), False, 'import pygame\n'), ((3544, 3567), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3565, 3567), False, 'import pygame\n'), ((2858, 2880), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2878, 2880), False, 'import pygame\n'), ((3487, 3500), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3498, 3500), False, 'import pygame\n'), ((3521, 3531), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3529, 3531), False, 'import sys\n')] |
# Runs a simple Metropolis-Hastings (ie MCMC) algoritm to simulate an
# exponential distribution, which has the probability density
# p(x)=exp(-x/m), where m>0.
#
# Author: <NAME>, 2019.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
import numpy as np; # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # For plotting
plt.close('all'); # close all figures
numbSim = 10 ** 4; # number of random variables simulated
numbSteps = 25; # number of steps for the Markov process
numbBins = 50; # number of bins for histogram
sigma = 1; # standard deviation for normal random steps
m = 2; # parameter (ie mean) for distribution to be simulated
def fun_p(x):
return (np.exp(-x / m) / m) * (x >= 0); # intensity function
xRand = np.random.uniform(0, 1, numbSim); # random initial values
probCurrent = fun_p(xRand); # current transition probabilities
for jj in range(numbSteps):
zRand = xRand + sigma * np.random.normal(0, 1, numbSim); # take a (normally distributed) random step
# zRand= xRand +2*sigma*np.random.uniform(0,1,simNumb);#take a (uniformly distributed) random step
probProposal = fun_p(zRand); # proposed probability
# acceptance rejection step
booleAccept = np.random.uniform(0, 1, numbSim) < probProposal / probCurrent;
# update state of random walk/Markov chain
xRand[booleAccept] = zRand[booleAccept];
# update transition probabilities
probCurrent[booleAccept] = probProposal[booleAccept];
# histogram section: empirical probability density
pdfEmp, binEdges = np.histogram(xRand, bins=numbBins, density=bool);
xValues = (binEdges[1:] + binEdges[:-1]) / 2; # mid-points of bins
# analytic solution of probability density
pdfExact = fun_p(xValues);
# Plotting
plt.plot(xValues, pdfExact)
plt.scatter(xValues, pdfEmp, marker='x', c='r');
plt.grid(True);
plt.xlabel('x');
plt.ylabel('p(x)');
plt.show();
| [
"numpy.random.normal",
"numpy.histogram",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"matplotlib.pyplot.show"
] | [((386, 402), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (395, 402), True, 'import matplotlib.pyplot as plt\n'), ((803, 835), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'numbSim'], {}), '(0, 1, numbSim)\n', (820, 835), True, 'import numpy as np\n'), ((1594, 1642), 'numpy.histogram', 'np.histogram', (['xRand'], {'bins': 'numbBins', 'density': 'bool'}), '(xRand, bins=numbBins, density=bool)\n', (1606, 1642), True, 'import numpy as np\n'), ((1795, 1822), 'matplotlib.pyplot.plot', 'plt.plot', (['xValues', 'pdfExact'], {}), '(xValues, pdfExact)\n', (1803, 1822), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1870), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xValues', 'pdfEmp'], {'marker': '"""x"""', 'c': '"""r"""'}), "(xValues, pdfEmp, marker='x', c='r')\n", (1834, 1870), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1886), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1880, 1886), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1898, 1903), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1923), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(x)"""'], {}), "('p(x)')\n", (1915, 1923), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1933, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1304), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'numbSim'], {}), '(0, 1, numbSim)\n', (1289, 1304), True, 'import numpy as np\n'), ((739, 753), 'numpy.exp', 'np.exp', (['(-x / m)'], {}), '(-x / m)\n', (745, 753), True, 'import numpy as np\n'), ((983, 1014), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'numbSim'], {}), '(0, 1, numbSim)\n', (999, 1014), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
def make_a_figure():
data = np.arange(10)
p = plt.figure(figsize=(8, 6))
plt.title('line')
plt.xlabel('X')
plt.xlabel('Y')
plt.xlim(0, 5)
plt.ylim(0, 100)
plt.xticks(range(0, 12, 2))
plt.yticks(range(0, 120, 20))
plt.plot(data, data) # y=x
plt.plot(data, data ** 2) # y=x*x
plt.plot([2], [4], 'o')
plt.annotate('(2,4)', xy=(2, 4), xytext=(2, 4),)
if not os.path.exists('pic'):
os.mkdir('pic')
plt.savefig('pic/line.png')
plt.show()
if not os.path.exists('pic'):
os.mkdir('pic')
plt.savefig('pic/line.png')
plt.show()
def make_sub_figure():
data = np.arange(0, np.pi * 2, 0.01)
p = plt.figure(figsize=(8, 6)) # 画布大小
sub1 = p.add_subplot(2, 1, 1)
plt.title('line')
plt.xlabel('X')
plt.xlabel('Y')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xticks(np.arange(0, 1.2, 0.2))
plt.yticks(np.arange(0, 1.2, 0.2))
plt.plot(data, data ** 2)
plt.plot(data, data ** 4)
plt.legend(['y=x^2', 'y=x^4'])
sub1 = p.add_subplot(2, 1, 2)
plt.title('sin/cos')
plt.xlabel('rad')
plt.xlabel('value')
plt.xlim(0, np.pi * 2)
plt.ylim(-1, 1)
plt.xticks(np.arange(0, np.pi * 2.5, np.pi * 0.5))
plt.yticks(np.arange(-1, 1.5, 0.5))
plt.plot(data, np.sin(data))
plt.plot(data, np.cos(data))
plt.legend(['sin', 'cos'])
plt.show()
def make_a_bar():
week = np.array(['周一', '周二', '周三', '周四', '周五', '周六', '周七'])
total = np.random.randint(1000, 5000, size=7)
color = np.random.rand(15).reshape(5, 3)
p = plt.figure(figsize=(8, 6))
sub1 = p.add_subplot(2, 1, 1)
plt.bar(week, total, color=color)
sub2 = p.add_subplot(2, 1, 2)
plt.barh(week, total, color=color)
plt.show()
def make_a_hist():
x = [np.random.randint(0, n, n) for n in [3000, 4000, 5000]]
bins = [0, 100, 500, 1000, 2000, 3000, 4000, 5000]
labels = ['3k', '4k', '5k']
plt.hist(x, bins=bins, label=labels)
plt.legend()
plt.show()
def make_a_pie():
data = np.array([6, 1, 2])
# data = np.array([0.6, 0.1, 0.2])
pet = ['Dog', 'Cat', 'Pig']
# plt.pie(data, labels=pet, autopct='%1.2f%%', colors=['red', 'yellow', 'green'])
plt.pie(data, labels=pet, autopct='%1.2f%%', colors=['red', 'yellow', 'green'],
labeldistance=1.2, pctdistance=0.5,
explode=[0.1, 0.1, 0.1],
shadow=True, startangle=90)
plt.legend()
plt.show()
def make_a_scatter():
x = np.random.randn(1000)
y = np.random.randn(1000)
color = np.random.rand(3000).reshape(1000, 3)
size = np.random.randint(0, 100, 1000) # 设置大小
plt.scatter(x, y, color=color, s=size, alpha=0.5)
plt.show()
def make_a_box():
data = np.random.randint(90, 150, 15).reshape(5, 3)
labels = ['2018', '2019', '2020']
plt.title('1-5年级总人口')
plt.boxplot(data, notch=True, labels=labels, meanline=True)
plt.show()
if __name__ == '__main__':
make_a_figure()
# makae_sub_figure()
# make_a_bar()
# make_a_hist()
# make_a_pie()
# make_a_scatter()
# make_a_box()
| [
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.hist",
"numpy.random.rand",
"matplotlib.pyplot.annotate",
"numpy.array",
"numpy.sin",
"numpy.arange",
"os.path.exists",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.barh",
"os.mkdir",
"matplotlib.pyplot.scatter",
"... | [((200, 213), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (209, 213), True, 'import numpy as np\n'), ((220, 246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (230, 246), True, 'import matplotlib.pyplot as plt\n'), ((249, 266), 'matplotlib.pyplot.title', 'plt.title', (['"""line"""'], {}), "('line')\n", (258, 266), True, 'import matplotlib.pyplot as plt\n'), ((269, 284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (279, 284), True, 'import matplotlib.pyplot as plt\n'), ((287, 302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Y"""'], {}), "('Y')\n", (297, 302), True, 'import matplotlib.pyplot as plt\n'), ((305, 319), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(5)'], {}), '(0, 5)\n', (313, 319), True, 'import matplotlib.pyplot as plt\n'), ((322, 338), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (330, 338), True, 'import matplotlib.pyplot as plt\n'), ((403, 423), 'matplotlib.pyplot.plot', 'plt.plot', (['data', 'data'], {}), '(data, data)\n', (411, 423), True, 'import matplotlib.pyplot as plt\n'), ((433, 458), 'matplotlib.pyplot.plot', 'plt.plot', (['data', '(data ** 2)'], {}), '(data, data ** 2)\n', (441, 458), True, 'import matplotlib.pyplot as plt\n'), ((471, 494), 'matplotlib.pyplot.plot', 'plt.plot', (['[2]', '[4]', '"""o"""'], {}), "([2], [4], 'o')\n", (479, 494), True, 'import matplotlib.pyplot as plt\n'), ((497, 544), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""(2,4)"""'], {'xy': '(2, 4)', 'xytext': '(2, 4)'}), "('(2,4)', xy=(2, 4), xytext=(2, 4))\n", (509, 544), True, 'import matplotlib.pyplot as plt\n'), ((600, 627), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pic/line.png"""'], {}), "('pic/line.png')\n", (611, 627), True, 'import matplotlib.pyplot as plt\n'), ((630, 640), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (638, 640), True, 'import matplotlib.pyplot as plt\n'), ((696, 723), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pic/line.png"""'], {}), "('pic/line.png')\n", (707, 723), True, 'import matplotlib.pyplot as plt\n'), ((726, 736), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (734, 736), True, 'import matplotlib.pyplot as plt\n'), ((771, 800), 'numpy.arange', 'np.arange', (['(0)', '(np.pi * 2)', '(0.01)'], {}), '(0, np.pi * 2, 0.01)\n', (780, 800), True, 'import numpy as np\n'), ((807, 833), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (817, 833), True, 'import matplotlib.pyplot as plt\n'), ((876, 893), 'matplotlib.pyplot.title', 'plt.title', (['"""line"""'], {}), "('line')\n", (885, 893), True, 'import matplotlib.pyplot as plt\n'), ((896, 911), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (906, 911), True, 'import matplotlib.pyplot as plt\n'), ((914, 929), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Y"""'], {}), "('Y')\n", (924, 929), True, 'import matplotlib.pyplot as plt\n'), ((932, 946), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (940, 946), True, 'import matplotlib.pyplot as plt\n'), ((949, 963), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (957, 963), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1065), 'matplotlib.pyplot.plot', 'plt.plot', (['data', '(data ** 2)'], {}), '(data, data ** 2)\n', (1048, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1093), 'matplotlib.pyplot.plot', 'plt.plot', (['data', '(data ** 4)'], {}), '(data, data ** 4)\n', (1076, 1093), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1126), 'matplotlib.pyplot.legend', 'plt.legend', (["['y=x^2', 'y=x^4']"], {}), "(['y=x^2', 'y=x^4'])\n", (1106, 1126), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1181), 'matplotlib.pyplot.title', 'plt.title', (['"""sin/cos"""'], {}), "('sin/cos')\n", (1170, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rad"""'], {}), "('rad')\n", (1194, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1223), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""value"""'], {}), "('value')\n", (1214, 1223), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1248), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(np.pi * 2)'], {}), '(0, np.pi * 2)\n', (1234, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1266), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1259, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1448), 'matplotlib.pyplot.legend', 'plt.legend', (["['sin', 'cos']"], {}), "(['sin', 'cos'])\n", (1432, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1451, 1461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1459, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1491, 1543), 'numpy.array', 'np.array', (["['周一', '周二', '周三', '周四', '周五', '周六', '周七']"], {}), "(['周一', '周二', '周三', '周四', '周五', '周六', '周七'])\n", (1499, 1543), True, 'import numpy as np\n'), ((1554, 1591), 'numpy.random.randint', 'np.random.randint', (['(1000)', '(5000)'], {'size': '(7)'}), '(1000, 5000, size=7)\n', (1571, 1591), True, 'import numpy as np\n'), ((1641, 1667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (1651, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1735), 'matplotlib.pyplot.bar', 'plt.bar', (['week', 'total'], {'color': 'color'}), '(week, total, color=color)\n', (1709, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1770, 1804), 'matplotlib.pyplot.barh', 'plt.barh', (['week', 'total'], {'color': 'color'}), '(week, total, color=color)\n', (1778, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1817), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1815, 1817), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2022), 'matplotlib.pyplot.hist', 'plt.hist', (['x'], {'bins': 'bins', 'label': 'labels'}), '(x, bins=bins, label=labels)\n', (1994, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2037), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2035, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2040, 2050), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2048, 2050), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2099), 'numpy.array', 'np.array', (['[6, 1, 2]'], {}), '([6, 1, 2])\n', (2088, 2099), True, 'import numpy as np\n'), ((2253, 2429), 'matplotlib.pyplot.pie', 'plt.pie', (['data'], {'labels': 'pet', 'autopct': '"""%1.2f%%"""', 'colors': "['red', 'yellow', 'green']", 'labeldistance': '(1.2)', 'pctdistance': '(0.5)', 'explode': '[0.1, 0.1, 0.1]', 'shadow': '(True)', 'startangle': '(90)'}), "(data, labels=pet, autopct='%1.2f%%', colors=['red', 'yellow',\n 'green'], labeldistance=1.2, pctdistance=0.5, explode=[0.1, 0.1, 0.1],\n shadow=True, startangle=90)\n", (2260, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2454, 2466), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2464, 2466), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2477, 2479), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2531), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (2525, 2531), True, 'import numpy as np\n'), ((2538, 2559), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (2553, 2559), True, 'import numpy as np\n'), ((2617, 2648), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (2634, 2648), True, 'import numpy as np\n'), ((2659, 2708), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': 'color', 's': 'size', 'alpha': '(0.5)'}), '(x, y, color=color, s=size, alpha=0.5)\n', (2670, 2708), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2721), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2719, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2855), 'matplotlib.pyplot.title', 'plt.title', (['"""1-5年级总人口"""'], {}), "('1-5年级总人口')\n", (2843, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2917), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['data'], {'notch': '(True)', 'labels': 'labels', 'meanline': '(True)'}), '(data, notch=True, labels=labels, meanline=True)\n', (2869, 2917), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2930), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2928, 2930), True, 'import matplotlib.pyplot as plt\n'), ((555, 576), 'os.path.exists', 'os.path.exists', (['"""pic"""'], {}), "('pic')\n", (569, 576), False, 'import os\n'), ((582, 597), 'os.mkdir', 'os.mkdir', (['"""pic"""'], {}), "('pic')\n", (590, 597), False, 'import os\n'), ((651, 672), 'os.path.exists', 'os.path.exists', (['"""pic"""'], {}), "('pic')\n", (665, 672), False, 'import os\n'), ((678, 693), 'os.mkdir', 'os.mkdir', (['"""pic"""'], {}), "('pic')\n", (686, 693), False, 'import os\n'), ((977, 999), 'numpy.arange', 'np.arange', (['(0)', '(1.2)', '(0.2)'], {}), '(0, 1.2, 0.2)\n', (986, 999), True, 'import numpy as np\n'), ((1014, 1036), 'numpy.arange', 'np.arange', (['(0)', '(1.2)', '(0.2)'], {}), '(0, 1.2, 0.2)\n', (1023, 1036), True, 'import numpy as np\n'), ((1280, 1318), 'numpy.arange', 'np.arange', (['(0)', '(np.pi * 2.5)', '(np.pi * 0.5)'], {}), '(0, np.pi * 2.5, np.pi * 0.5)\n', (1289, 1318), True, 'import numpy as np\n'), ((1333, 1356), 'numpy.arange', 'np.arange', (['(-1)', '(1.5)', '(0.5)'], {}), '(-1, 1.5, 0.5)\n', (1342, 1356), True, 'import numpy as np\n'), ((1375, 1387), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (1381, 1387), True, 'import numpy as np\n'), ((1406, 1418), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (1412, 1418), True, 'import numpy as np\n'), ((1845, 1871), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n', 'n'], {}), '(0, n, n)\n', (1862, 1871), True, 'import numpy as np\n'), ((1602, 1620), 'numpy.random.rand', 'np.random.rand', (['(15)'], {}), '(15)\n', (1616, 1620), True, 'import numpy as np\n'), ((2570, 2590), 'numpy.random.rand', 'np.random.rand', (['(3000)'], {}), '(3000)\n', (2584, 2590), True, 'import numpy as np\n'), ((2751, 2781), 'numpy.random.randint', 'np.random.randint', (['(90)', '(150)', '(15)'], {}), '(90, 150, 15)\n', (2768, 2781), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2017 Kakao, Recommendation Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import fire
import h5py
import numpy as np
import six
from six.moves import zip, cPickle
from tqdm import tqdm
def get_size(data_path, div):
h = h5py.File(data_path)[div]
size = h['img'].shape[0]
return size
def toss_answer(data_path, div):
h = h5py.File(data_path)[div]
size = h['cate'].shape[0]
for i in range(size):
yield np.argmax(h['cate'][i])
def toss_chunk_answer(data_path, div):
h = h5py.File(data_path)[div]
size = h['img'].shape[0]
chunk_sz = 1000000
chunk_ix = [(i, min(i+chunk_sz, size)) for i in range(0, size, chunk_sz)]
for start, end in chunk_ix:
b = h['bindex'][start:end]
m = h['mindex'][start:end]
s = h['sindex'][start:end]
d = h['dindex'][start:end]
answer = [(a, b, c, d) for a, b, c, d in zip(b, m, s, d)]
yield answer
#yield np.argmax(h['cate'][start:end], axis=1)
def evaluate(predict_path, data_path, div, y_vocab_path, log_path):
"""
python evaluate.py evaluate onlym_khaiii_textimg1024_predict.tsv ./data/train/data.h5py dev ./data/y_vocab.py3.cPickle onlym_khaiii_textimg1024_score.txt
#khaiii textimg 1024
python evaluate.py evaluate valid_khaiii_textimg1024_predict.tsv ./data/train/khaiii_data.h5py dev ./data/y_vocab.py3.cPickle valid_khaiii_textimg1024_score.txt
#khaii2 textimg 1024
python evaluate.py evaluate valid_khaiii2_textimg1024_predict.tsv ./data/train/khaiii2_data.h5py dev ./data/y_vocab.py3.cPickle valid_khaiii2_textimg1024_score.txt
#khaiii2_12_512textimgdrop_relu_cw_1024
python evaluate.py evaluate valid_khaiii2_12_512textimgdrop_relu_cw_1024_predict.tsv ./data/train/khaiii2_data_120000.h5py dev ./data/y_vocab.py3.cPickle valid_khaiii2_12_512textimgdrop_relu_cw_1024_score.txt
"""
#h = h5py.File(data_path, 'r')[div]
y_vocab = cPickle.loads(open(y_vocab_path, 'rb').read())
inv_y_vocab = {v: k for k, v in six.iteritems(y_vocab)}
b_vocab = cPickle.loads(open("./data/b_vocab.cPickle", 'rb').read())
m_vocab = cPickle.loads(open("./data/m_vocab.cPickle", 'rb').read())
s_vocab = cPickle.loads(open("./data/s_vocab.cPickle", 'rb').read())
d_vocab = cPickle.loads(open("./data/d_vocab.cPickle", 'rb').read())
inv_b_vocab = {i: s for s, i in six.iteritems(b_vocab)}
inv_m_vocab = {i: s for s, i in six.iteritems(m_vocab)}
inv_s_vocab = {i: s for s, i in six.iteritems(s_vocab)}
inv_d_vocab = {i: s for s, i in six.iteritems(d_vocab)}
fin = open(predict_path, 'r')
hit, n = defaultdict(lambda: 0), defaultdict(lambda: 0)
print('loading ground-truth...')
#CATE = np.argmax(h['cate'], axis=1)
size = get_size(data_path, div)
#CATE = toss_answer(data_path, div)
bomb = toss_chunk_answer(data_path, div)
for bx in bomb:
for p, y in tqdm(zip(fin, bx), desc='bomb', total=len(list(bx))):
# format y = (b, m, s, d) this is answer
pid, b, m, s, d = p.split('\t')
b, m, s, d = list(map(int, [b, m, s, d])) # 나의 prediction
#gt = list(map(int, inv_y_vocab[y].split('>'))) # 정답
gt_b = inv_b_vocab[y[0]]
gt_m = inv_m_vocab[y[1]]
gt_s = inv_s_vocab[y[2]]
gt_d = inv_d_vocab[y[3]]
gt = [gt_b, gt_m, gt_s, gt_d]
for depth, _p, _g in zip(['b', 'm', 's', 'd'],
[b, m, s, d],
gt):
if _g == -1:
continue
n[depth] = n.get(depth, 0) + 1 # 총 개수 파악
if _p == _g:
hit[depth] = hit.get(depth, 0) + 1 # 맞은 개수 기록
with open(log_path, 'w') as f:
for d in ['b', 'm', 's', 'd']:
if n[d] > 0:
print('%s-Accuracy: %.3f(%s/%s)' % (d, hit[d] / float(n[d]), hit[d], n[d]))
f.write('%s-Accuracy: %.3f(%s/%s) \n' % (d, hit[d] / float(n[d]), hit[d], n[d]))
score = sum([hit[d] / float(n[d]) * w
for d, w in zip(['b', 'm', 's', 'd'],
[1.0, 1.2, 1.3, 1.4])]) / 4.0
print('score: %.3f' % score)
f.write('score: %.3f\n' % score)
if __name__ == '__main__':
fire.Fire({'evaluate': evaluate})
| [
"fire.Fire",
"numpy.argmax",
"h5py.File",
"collections.defaultdict",
"six.iteritems",
"six.moves.zip"
] | [((4937, 4970), 'fire.Fire', 'fire.Fire', (["{'evaluate': evaluate}"], {}), "({'evaluate': evaluate})\n", (4946, 4970), False, 'import fire\n'), ((800, 820), 'h5py.File', 'h5py.File', (['data_path'], {}), '(data_path)\n', (809, 820), False, 'import h5py\n'), ((914, 934), 'h5py.File', 'h5py.File', (['data_path'], {}), '(data_path)\n', (923, 934), False, 'import h5py\n'), ((1083, 1103), 'h5py.File', 'h5py.File', (['data_path'], {}), '(data_path)\n', (1092, 1103), False, 'import h5py\n'), ((3202, 3225), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (3213, 3225), False, 'from collections import defaultdict\n'), ((3226, 3249), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (3237, 3249), False, 'from collections import defaultdict\n'), ((1010, 1033), 'numpy.argmax', 'np.argmax', (["h['cate'][i]"], {}), "(h['cate'][i])\n", (1019, 1033), True, 'import numpy as np\n'), ((2592, 2614), 'six.iteritems', 'six.iteritems', (['y_vocab'], {}), '(y_vocab)\n', (2605, 2614), False, 'import six\n'), ((2950, 2972), 'six.iteritems', 'six.iteritems', (['b_vocab'], {}), '(b_vocab)\n', (2963, 2972), False, 'import six\n'), ((3010, 3032), 'six.iteritems', 'six.iteritems', (['m_vocab'], {}), '(m_vocab)\n', (3023, 3032), False, 'import six\n'), ((3070, 3092), 'six.iteritems', 'six.iteritems', (['s_vocab'], {}), '(s_vocab)\n', (3083, 3092), False, 'import six\n'), ((3130, 3152), 'six.iteritems', 'six.iteritems', (['d_vocab'], {}), '(d_vocab)\n', (3143, 3152), False, 'import six\n'), ((3503, 3515), 'six.moves.zip', 'zip', (['fin', 'bx'], {}), '(fin, bx)\n', (3506, 3515), False, 'from six.moves import zip, cPickle\n'), ((4027, 4070), 'six.moves.zip', 'zip', (["['b', 'm', 's', 'd']", '[b, m, s, d]', 'gt'], {}), "(['b', 'm', 's', 'd'], [b, m, s, d], gt)\n", (4030, 4070), False, 'from six.moves import zip, cPickle\n'), ((1463, 1478), 'six.moves.zip', 'zip', (['b', 'm', 's', 'd'], {}), '(b, m, s, d)\n', (1466, 1478), False, 'from six.moves import zip, cPickle\n'), ((4725, 4772), 'six.moves.zip', 'zip', (["['b', 'm', 's', 'd']", '[1.0, 1.2, 1.3, 1.4]'], {}), "(['b', 'm', 's', 'd'], [1.0, 1.2, 1.3, 1.4])\n", (4728, 4772), False, 'from six.moves import zip, cPickle\n')] |
import configargparse
import torch
import torch.optim as optim
import sys
sys.path.append('../')
from environments import MountainCarEnv, Continuous_MountainCarEnv
from models.agents import NFQAgent
from models.networks import NFQNetwork, ContrastiveNFQNetwork
from util import get_logger, close_logger, load_models, make_reproducible, save_models
import matplotlib.pyplot as plt
import numpy as np
import itertools
import seaborn as sns
import tqdm
# def generate_data(init_experience=400, dataset='train'):
# env_bg = Continuous_MountainCarEnv(group=0)
# env_fg = Continuous_MountainCarEnv(group=1)
# bg_rollouts = []
# fg_rollouts = []
# if init_experience > 0:
# for _ in range(init_experience):
# rollout_bg, episode_cost = env_bg.generate_rollout(
# None, render=False, group=0, dataset=dataset
# )
# rollout_fg, episode_cost = env_fg.generate_rollout(
# None, render=False, group=1, dataset=dataset
# )
# bg_rollouts.extend(rollout_bg)
# fg_rollouts.extend(rollout_fg)
# bg_rollouts.extend(fg_rollouts)
# all_rollouts = bg_rollouts.copy()
# return all_rollouts, env_bg, env_fg
#
# is_contrastive=False
# epoch = 100
# evaluations = 10
# verbose=True
# print("Generating Data")
# train_rollouts, train_env_bg, train_env_fg = generate_data(dataset='train')
# test_rollouts, eval_env_bg, eval_env_fg = generate_data(dataset='test')
#
# nfq_net = ContrastiveNFQNetwork(
# state_dim=train_env_bg.state_dim, is_contrastive=is_contrastive
# )
# optimizer = optim.Adam(nfq_net.parameters(), lr=1e-1)
#
# nfq_agent = NFQAgent(nfq_net, optimizer)
#
# # NFQ Main loop
# bg_success_queue = [0] * 3
# fg_success_queue = [0] * 3
# epochs_fg = 0
# eval_fg = 0
# for k, epoch in enumerate(tqdm.tqdm(range(epoch + 1))):
# state_action_b, target_q_values, groups = nfq_agent.generate_pattern_set(
# train_rollouts
# )
# X = state_action_b
# train_groups = groups
#
# if not nfq_net.freeze_shared:
# loss = nfq_agent.train((state_action_b, target_q_values, groups))
#
# eval_episode_length_fg, eval_success_fg, eval_episode_cost_fg = 0, 0, 0
# if nfq_net.freeze_shared:
# eval_fg += 1
#
# if eval_fg > 50:
# loss = nfq_agent.train((state_action_b, target_q_values, groups))
#
# (eval_episode_length_bg, eval_success_bg, eval_episode_cost_bg) = nfq_agent.evaluate_car(eval_env_bg, render=False)
# (eval_episode_length_fg,eval_success_fg, eval_episode_cost_fg) = nfq_agent.evaluate_car(eval_env_fg, render=False)
#
# bg_success_queue = bg_success_queue[1:]
# bg_success_queue.append(1 if eval_success_bg else 0)
#
# fg_success_queue = fg_success_queue[1:]
# fg_success_queue.append(1 if eval_success_fg else 0)
#
# printed_bg = False
# printed_fg = False
#
# if sum(bg_success_queue) == 3 and not nfq_net.freeze_shared == True:
# if epochs_fg == 0:
# epochs_fg = epoch
# printed_bg = True
# nfq_net.freeze_shared = True
# if verbose:
# print("FREEZING SHARED")
# if is_contrastive:
# for param in nfq_net.layers_shared.parameters():
# param.requires_grad = False
# for param in nfq_net.layers_last_shared.parameters():
# param.requires_grad = False
# for param in nfq_net.layers_fg.parameters():
# param.requires_grad = True
# for param in nfq_net.layers_last_fg.parameters():
# param.requires_grad = True
# else:
# for param in nfq_net.layers_fg.parameters():
# param.requires_grad = False
# for param in nfq_net.layers_last_fg.parameters():
# param.requires_grad = False
#
# optimizer = optim.Adam(
# itertools.chain(
# nfq_net.layers_fg.parameters(),
# nfq_net.layers_last_fg.parameters(),
# ),
# lr=1e-1,
# )
# nfq_agent._optimizer = optimizer
#
#
# if sum(fg_success_queue) == 3:
# printed_fg = True
# break
#
# eval_env_bg.step_number = 0
# eval_env_fg.step_number = 0
#
# eval_env_bg.max_steps = 1000
# eval_env_fg.max_steps = 1000
#
# performance_fg = []
# performance_bg = []
# num_steps_bg = []
# num_steps_fg = []
# total = 0
import configargparse
import torch
import torch.optim as optim
import sys
sys.path.append('../')
from environments import MountainCarEnv, Continuous_MountainCarEnv
from models.agents import NFQAgent
from models.networks import NFQNetwork, ContrastiveNFQNetwork
from util import get_logger, close_logger, load_models, make_reproducible, save_models
import matplotlib.pyplot as plt
import numpy as np
import itertools
import seaborn as sns
import tqdm
def generate_data(init_experience=50, bg_only=False, continuous=False, agent=None):
if continuous:
env_bg = Continuous_MountainCarEnv(group=0)
env_fg = Continuous_MountainCarEnv(group=1)
else:
env_bg = MountainCarEnv(group=0)
env_fg = MountainCarEnv(group=1)
bg_rollouts = []
fg_rollouts = []
if init_experience > 0:
for _ in range(init_experience):
rollout_bg, episode_cost = env_bg.generate_rollout(
agent, render=False, group=0
)
bg_rollouts.extend(rollout_bg)
if not bg_only:
rollout_fg, episode_cost = env_fg.generate_rollout(
agent, render=False, group=1
)
fg_rollouts.extend(rollout_fg)
bg_rollouts.extend(fg_rollouts)
all_rollouts = bg_rollouts.copy()
return all_rollouts, env_bg, env_fg
train_rollouts, train_env_bg, train_env_fg = generate_data(bg_only=True, continuous=False)
test_rollouts, eval_env_bg, eval_env_fg = generate_data(bg_only=True, continuous=False)
is_contrastive = False
epochs = 100
evaluations = 1
nfq_net = ContrastiveNFQNetwork(
state_dim=train_env_bg.state_dim, is_contrastive=is_contrastive, deep=True
)
optimizer = optim.Adam(nfq_net.parameters(), lr=1e-1)
nfq_agent = NFQAgent(nfq_net, optimizer)
# NFQ Main loop
bg_success_queue = [0] * 3
fg_success_queue = [0] * 3
epochs_fg = 0
eval_fg = 0
train_rewards = [r[2] for r in train_rollouts]
test_rewards = [r[2] for r in test_rollouts]
print("Average Train Reward: " + str(np.average(train_rewards)) + " Average Test Reward: " + str(np.average(test_rewards)))
print("Epochs: " + str(epochs))
for k, ep in enumerate(tqdm.tqdm(range(epochs + 1))):
state_action_b, target_q_values, groups = nfq_agent.generate_pattern_set(train_rollouts)
if not nfq_net.freeze_shared:
loss = nfq_agent.train((state_action_b, target_q_values, groups))
eval_episode_length_fg, eval_success_fg, eval_episode_cost_fg = 0, 0, 0
if nfq_net.freeze_shared:
eval_fg += 1
if eval_fg > 50:
loss = nfq_agent.train((state_action_b, target_q_values, groups))
(eval_episode_length_bg, eval_success_bg, eval_episode_cost_bg) = nfq_agent.evaluate_car(eval_env_bg, render=False)
#(eval_episode_length_fg, eval_success_fg, eval_episode_cost_fg) = nfq_agent.evaluate_car(eval_env_fg, render=False)
bg_success_queue = bg_success_queue[1:]
bg_success_queue.append(1 if eval_success_bg else 0)
#fg_success_queue = fg_success_queue[1:]
#fg_success_queue.append(1 if eval_success_fg else 0)
if sum(bg_success_queue) == 3 and not nfq_net.freeze_shared == True:
if epochs_fg == 0:
epochs_fg = ep
nfq_net.freeze_shared = True
print("FREEZING SHARED")
if is_contrastive:
for param in nfq_net.layers_shared.parameters():
param.requires_grad = False
for param in nfq_net.layers_last_shared.parameters():
param.requires_grad = False
for param in nfq_net.layers_fg.parameters():
param.requires_grad = True
for param in nfq_net.layers_last_fg.parameters():
param.requires_grad = True
else:
for param in nfq_net.layers_fg.parameters():
param.requires_grad = False
for param in nfq_net.layers_last_fg.parameters():
param.requires_grad = False
optimizer = optim.Adam(
itertools.chain(
nfq_net.layers_fg.parameters(),
nfq_net.layers_last_fg.parameters(),
),
lr=1e-1,
)
nfq_agent._optimizer = optimizer
break
if sum(fg_success_queue) == 3:
break
train_rollouts, train_env_bg, train_env_fg = generate_data(bg_only=True, continuous=False, agent=nfq_agent)
test_rollouts, eval_env_bg, eval_env_fg = generate_data(bg_only=True, continuous=False, agent=nfq_agent)
train_rewards = [r[2] for r in train_rollouts]
test_rewards = [r[2] for r in test_rollouts]
print("Average Train Reward: " + str(np.average(train_rewards)) + " Average Test Reward: " + str(np.average(test_rewards)))
if ep % 10 == 0:
for it in range(evaluations):
(
eval_episode_length_bg,
eval_success_bg,
eval_episode_cost_bg,
) = nfq_agent.evaluate_car(eval_env_bg, render=True)
print(eval_episode_length_bg, eval_success_bg, eval_episode_cost_bg)
train_env_bg.close()
eval_env_bg.close() | [
"numpy.average",
"models.agents.NFQAgent",
"environments.MountainCarEnv",
"environments.Continuous_MountainCarEnv",
"sys.path.append",
"models.networks.ContrastiveNFQNetwork"
] | [((74, 96), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (89, 96), False, 'import sys\n'), ((4540, 4562), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (4555, 4562), False, 'import sys\n'), ((6060, 6162), 'models.networks.ContrastiveNFQNetwork', 'ContrastiveNFQNetwork', ([], {'state_dim': 'train_env_bg.state_dim', 'is_contrastive': 'is_contrastive', 'deep': '(True)'}), '(state_dim=train_env_bg.state_dim, is_contrastive=\n is_contrastive, deep=True)\n', (6081, 6162), False, 'from models.networks import NFQNetwork, ContrastiveNFQNetwork\n'), ((6231, 6259), 'models.agents.NFQAgent', 'NFQAgent', (['nfq_net', 'optimizer'], {}), '(nfq_net, optimizer)\n', (6239, 6259), False, 'from models.agents import NFQAgent\n'), ((5038, 5072), 'environments.Continuous_MountainCarEnv', 'Continuous_MountainCarEnv', ([], {'group': '(0)'}), '(group=0)\n', (5063, 5072), False, 'from environments import MountainCarEnv, Continuous_MountainCarEnv\n'), ((5090, 5124), 'environments.Continuous_MountainCarEnv', 'Continuous_MountainCarEnv', ([], {'group': '(1)'}), '(group=1)\n', (5115, 5124), False, 'from environments import MountainCarEnv, Continuous_MountainCarEnv\n'), ((5152, 5175), 'environments.MountainCarEnv', 'MountainCarEnv', ([], {'group': '(0)'}), '(group=0)\n', (5166, 5175), False, 'from environments import MountainCarEnv, Continuous_MountainCarEnv\n'), ((5193, 5216), 'environments.MountainCarEnv', 'MountainCarEnv', ([], {'group': '(1)'}), '(group=1)\n', (5207, 5216), False, 'from environments import MountainCarEnv, Continuous_MountainCarEnv\n'), ((6546, 6570), 'numpy.average', 'np.average', (['test_rewards'], {}), '(test_rewards)\n', (6556, 6570), True, 'import numpy as np\n'), ((9175, 9199), 'numpy.average', 'np.average', (['test_rewards'], {}), '(test_rewards)\n', (9185, 9199), True, 'import numpy as np\n'), ((6486, 6511), 'numpy.average', 'np.average', (['train_rewards'], {}), '(train_rewards)\n', (6496, 6511), True, 'import numpy as np\n'), ((9115, 9140), 'numpy.average', 'np.average', (['train_rewards'], {}), '(train_rewards)\n', (9125, 9140), True, 'import numpy as np\n')] |
import tqdm
import argparse
import torch
import numpy as np
import cv2
import glob
import streamer_pytorch as streamer
parser = argparse.ArgumentParser(description='.')
parser.add_argument(
'--camera', action="store_true", help="whether to use webcam.")
parser.add_argument(
'--images', default="", nargs="*", help="paths of image.")
parser.add_argument(
'--image_folder', default=None, help="path of image folder.")
parser.add_argument(
'--videos', default="", nargs="*", help="paths of video.")
parser.add_argument(
'--loop', action="store_true", help="whether to repeat images/video.")
parser.add_argument(
'--vis', action="store_true", help="whether to visualize.")
args = parser.parse_args()
def visulization(data):
window = data[0].numpy()
window = window.transpose(1, 2, 0)
window = (window * 0.5 + 0.5) * 255.0
window = np.uint8(window)
window = cv2.cvtColor(window, cv2.COLOR_BGR2RGB)
window = cv2.resize(window, (0, 0), fx=2, fy=2)
cv2.imshow('window', window)
cv2.waitKey(1)
if args.camera:
data_stream = streamer.CaptureStreamer(pad=False)
elif len(args.videos) > 0:
data_stream = streamer.VideoListStreamer(
args.videos * (10 if args.loop else 1))
elif len(args.images) > 0:
data_stream = streamer.ImageListStreamer(
args.images * (10000 if args.loop else 1))
elif args.image_folder is not None:
images = sorted(glob.glob(args.image_folder+'/*.jpg'))
images += sorted(glob.glob(args.image_folder+'/*.png'))
data_stream = streamer.ImageListStreamer(
images * (10 if args.loop else 1))
loader = torch.utils.data.DataLoader(
data_stream,
batch_size=1,
num_workers=1,
pin_memory=False,
)
try:
for data in tqdm.tqdm(loader):
if args.vis:
visulization(data)
except Exception as e:
print (e)
del data_stream
| [
"numpy.uint8",
"streamer_pytorch.CaptureStreamer",
"argparse.ArgumentParser",
"tqdm.tqdm",
"streamer_pytorch.ImageListStreamer",
"cv2.imshow",
"cv2.cvtColor",
"torch.utils.data.DataLoader",
"streamer_pytorch.VideoListStreamer",
"cv2.resize",
"cv2.waitKey",
"glob.glob"
] | [((130, 170), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""."""'}), "(description='.')\n", (153, 170), False, 'import argparse\n'), ((1620, 1711), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data_stream'], {'batch_size': '(1)', 'num_workers': '(1)', 'pin_memory': '(False)'}), '(data_stream, batch_size=1, num_workers=1,\n pin_memory=False)\n', (1647, 1711), False, 'import torch\n'), ((872, 888), 'numpy.uint8', 'np.uint8', (['window'], {}), '(window)\n', (880, 888), True, 'import numpy as np\n'), ((902, 941), 'cv2.cvtColor', 'cv2.cvtColor', (['window', 'cv2.COLOR_BGR2RGB'], {}), '(window, cv2.COLOR_BGR2RGB)\n', (914, 941), False, 'import cv2\n'), ((956, 994), 'cv2.resize', 'cv2.resize', (['window', '(0, 0)'], {'fx': '(2)', 'fy': '(2)'}), '(window, (0, 0), fx=2, fy=2)\n', (966, 994), False, 'import cv2\n'), ((1000, 1028), 'cv2.imshow', 'cv2.imshow', (['"""window"""', 'window'], {}), "('window', window)\n", (1010, 1028), False, 'import cv2\n'), ((1033, 1047), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1044, 1047), False, 'import cv2\n'), ((1084, 1119), 'streamer_pytorch.CaptureStreamer', 'streamer.CaptureStreamer', ([], {'pad': '(False)'}), '(pad=False)\n', (1108, 1119), True, 'import streamer_pytorch as streamer\n'), ((1753, 1770), 'tqdm.tqdm', 'tqdm.tqdm', (['loader'], {}), '(loader)\n', (1762, 1770), False, 'import tqdm\n'), ((1165, 1231), 'streamer_pytorch.VideoListStreamer', 'streamer.VideoListStreamer', (['(args.videos * (10 if args.loop else 1))'], {}), '(args.videos * (10 if args.loop else 1))\n', (1191, 1231), True, 'import streamer_pytorch as streamer\n'), ((1286, 1355), 'streamer_pytorch.ImageListStreamer', 'streamer.ImageListStreamer', (['(args.images * (10000 if args.loop else 1))'], {}), '(args.images * (10000 if args.loop else 1))\n', (1312, 1355), True, 'import streamer_pytorch as streamer\n'), ((1538, 1599), 'streamer_pytorch.ImageListStreamer', 'streamer.ImageListStreamer', (['(images * (10 if args.loop else 1))'], {}), '(images * (10 if args.loop else 1))\n', (1564, 1599), True, 'import streamer_pytorch as streamer\n'), ((1421, 1460), 'glob.glob', 'glob.glob', (["(args.image_folder + '/*.jpg')"], {}), "(args.image_folder + '/*.jpg')\n", (1430, 1460), False, 'import glob\n'), ((1481, 1520), 'glob.glob', 'glob.glob', (["(args.image_folder + '/*.png')"], {}), "(args.image_folder + '/*.png')\n", (1490, 1520), False, 'import glob\n')] |
import numpy as np
import numpy.typing as npt
AR_b: npt.NDArray[np.bool_]
AR_i8: npt.NDArray[np.int64]
AR_f8: npt.NDArray[np.float64]
AR_M: npt.NDArray[np.datetime64]
AR_O: npt.NDArray[np.object_]
AR_LIKE_f8: list[float]
reveal_type(np.ediff1d(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]]
reveal_type(np.ediff1d(AR_i8, to_end=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.ediff1d(AR_M)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]
reveal_type(np.ediff1d(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
reveal_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.intersect1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.intersect1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.intersect1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.setxor1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.setxor1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.setxor1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.in1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.in1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.in1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_f8, AR_LIKE_f8, invert=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.union1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.union1d(AR_M, AR_M)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.union1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.setdiff1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.setdiff1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.setdiff1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.unique(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.unique(AR_LIKE_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.unique(AR_f8, return_index=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_index=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_f8, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_f8, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_f8, return_index=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_f8, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
| [
"numpy.intersect1d",
"numpy.union1d",
"numpy.unique",
"numpy.in1d",
"numpy.ediff1d",
"numpy.isin",
"numpy.setdiff1d",
"numpy.setxor1d"
] | [((236, 252), 'numpy.ediff1d', 'np.ediff1d', (['AR_b'], {}), '(AR_b)\n', (246, 252), True, 'import numpy as np\n'), ((312, 347), 'numpy.ediff1d', 'np.ediff1d', (['AR_i8'], {'to_end': '[1, 2, 3]'}), '(AR_i8, to_end=[1, 2, 3])\n', (322, 347), True, 'import numpy as np\n'), ((408, 424), 'numpy.ediff1d', 'np.ediff1d', (['AR_M'], {}), '(AR_M)\n', (418, 424), True, 'import numpy as np\n'), ((495, 511), 'numpy.ediff1d', 'np.ediff1d', (['AR_O'], {}), '(AR_O)\n', (505, 511), True, 'import numpy as np\n'), ((578, 619), 'numpy.ediff1d', 'np.ediff1d', (['AR_LIKE_f8'], {'to_begin': '[1, 1.5]'}), '(AR_LIKE_f8, to_begin=[1, 1.5])\n', (588, 619), True, 'import numpy as np\n'), ((677, 705), 'numpy.intersect1d', 'np.intersect1d', (['AR_i8', 'AR_i8'], {}), '(AR_i8, AR_i8)\n', (691, 705), True, 'import numpy as np\n'), ((766, 812), 'numpy.intersect1d', 'np.intersect1d', (['AR_M', 'AR_M'], {'assume_unique': '(True)'}), '(AR_M, AR_M, assume_unique=True)\n', (780, 812), True, 'import numpy as np\n'), ((882, 910), 'numpy.intersect1d', 'np.intersect1d', (['AR_f8', 'AR_i8'], {}), '(AR_f8, AR_i8)\n', (896, 910), True, 'import numpy as np\n'), ((967, 1016), 'numpy.intersect1d', 'np.intersect1d', (['AR_f8', 'AR_f8'], {'return_indices': '(True)'}), '(AR_f8, AR_f8, return_indices=True)\n', (981, 1016), True, 'import numpy as np\n'), ((1169, 1194), 'numpy.setxor1d', 'np.setxor1d', (['AR_i8', 'AR_i8'], {}), '(AR_i8, AR_i8)\n', (1180, 1194), True, 'import numpy as np\n'), ((1255, 1298), 'numpy.setxor1d', 'np.setxor1d', (['AR_M', 'AR_M'], {'assume_unique': '(True)'}), '(AR_M, AR_M, assume_unique=True)\n', (1266, 1298), True, 'import numpy as np\n'), ((1368, 1393), 'numpy.setxor1d', 'np.setxor1d', (['AR_f8', 'AR_i8'], {}), '(AR_f8, AR_i8)\n', (1379, 1393), True, 'import numpy as np\n'), ((1451, 1472), 'numpy.in1d', 'np.in1d', (['AR_i8', 'AR_i8'], {}), '(AR_i8, AR_i8)\n', (1458, 1472), True, 'import numpy as np\n'), ((1537, 1576), 'numpy.in1d', 'np.in1d', (['AR_M', 'AR_M'], {'assume_unique': '(True)'}), '(AR_M, AR_M, assume_unique=True)\n', (1544, 1576), True, 'import numpy as np\n'), ((1641, 1662), 'numpy.in1d', 'np.in1d', (['AR_f8', 'AR_i8'], {}), '(AR_f8, AR_i8)\n', (1648, 1662), True, 'import numpy as np\n'), ((1727, 1766), 'numpy.in1d', 'np.in1d', (['AR_f8', 'AR_LIKE_f8'], {'invert': '(True)'}), '(AR_f8, AR_LIKE_f8, invert=True)\n', (1734, 1766), True, 'import numpy as np\n'), ((1832, 1853), 'numpy.isin', 'np.isin', (['AR_i8', 'AR_i8'], {}), '(AR_i8, AR_i8)\n', (1839, 1853), True, 'import numpy as np\n'), ((1918, 1957), 'numpy.isin', 'np.isin', (['AR_M', 'AR_M'], {'assume_unique': '(True)'}), '(AR_M, AR_M, assume_unique=True)\n', (1925, 1957), True, 'import numpy as np\n'), ((2022, 2043), 'numpy.isin', 'np.isin', (['AR_f8', 'AR_i8'], {}), '(AR_f8, AR_i8)\n', (2029, 2043), True, 'import numpy as np\n'), ((2108, 2147), 'numpy.isin', 'np.isin', (['AR_f8', 'AR_LIKE_f8'], {'invert': '(True)'}), '(AR_f8, AR_LIKE_f8, invert=True)\n', (2115, 2147), True, 'import numpy as np\n'), ((2213, 2237), 'numpy.union1d', 'np.union1d', (['AR_i8', 'AR_i8'], {}), '(AR_i8, AR_i8)\n', (2223, 2237), True, 'import numpy as np\n'), ((2298, 2320), 'numpy.union1d', 'np.union1d', (['AR_M', 'AR_M'], {}), '(AR_M, AR_M)\n', (2308, 2320), True, 'import numpy as np\n'), ((2390, 2414), 'numpy.union1d', 'np.union1d', (['AR_f8', 'AR_i8'], {}), '(AR_f8, AR_i8)\n', (2400, 2414), True, 'import numpy as np\n'), ((2472, 2498), 'numpy.setdiff1d', 'np.setdiff1d', (['AR_i8', 'AR_i8'], {}), '(AR_i8, AR_i8)\n', (2484, 2498), True, 'import numpy as np\n'), ((2559, 2603), 'numpy.setdiff1d', 'np.setdiff1d', (['AR_M', 'AR_M'], {'assume_unique': '(True)'}), '(AR_M, AR_M, assume_unique=True)\n', (2571, 2603), True, 'import numpy as np\n'), ((2673, 2699), 'numpy.setdiff1d', 'np.setdiff1d', (['AR_f8', 'AR_i8'], {}), '(AR_f8, AR_i8)\n', (2685, 2699), True, 'import numpy as np\n'), ((2757, 2773), 'numpy.unique', 'np.unique', (['AR_f8'], {}), '(AR_f8)\n', (2766, 2773), True, 'import numpy as np\n'), ((2836, 2865), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'axis': '(0)'}), '(AR_LIKE_f8, axis=0)\n', (2845, 2865), True, 'import numpy as np\n'), ((2922, 2957), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_index': '(True)'}), '(AR_f8, return_index=True)\n', (2931, 2957), True, 'import numpy as np\n'), ((3068, 3108), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_index': '(True)'}), '(AR_LIKE_f8, return_index=True)\n', (3077, 3108), True, 'import numpy as np\n'), ((3213, 3250), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_inverse': '(True)'}), '(AR_f8, return_inverse=True)\n', (3222, 3250), True, 'import numpy as np\n'), ((3361, 3403), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_inverse': '(True)'}), '(AR_LIKE_f8, return_inverse=True)\n', (3370, 3403), True, 'import numpy as np\n'), ((3508, 3544), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_counts': '(True)'}), '(AR_f8, return_counts=True)\n', (3517, 3544), True, 'import numpy as np\n'), ((3655, 3696), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_counts': '(True)'}), '(AR_LIKE_f8, return_counts=True)\n', (3664, 3696), True, 'import numpy as np\n'), ((3801, 3857), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(AR_f8, return_index=True, return_inverse=True)\n', (3810, 3857), True, 'import numpy as np\n'), ((4009, 4070), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(AR_LIKE_f8, return_index=True, return_inverse=True)\n', (4018, 4070), True, 'import numpy as np\n'), ((4216, 4271), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_index': '(True)', 'return_counts': '(True)'}), '(AR_f8, return_index=True, return_counts=True)\n', (4225, 4271), True, 'import numpy as np\n'), ((4423, 4483), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_index': '(True)', 'return_counts': '(True)'}), '(AR_LIKE_f8, return_index=True, return_counts=True)\n', (4432, 4483), True, 'import numpy as np\n'), ((4629, 4686), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(AR_f8, return_inverse=True, return_counts=True)\n', (4638, 4686), True, 'import numpy as np\n'), ((4838, 4900), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(AR_LIKE_f8, return_inverse=True, return_counts=True)\n', (4847, 4900), True, 'import numpy as np\n'), ((5046, 5122), 'numpy.unique', 'np.unique', (['AR_f8'], {'return_index': '(True)', 'return_inverse': '(True)', 'return_counts': '(True)'}), '(AR_f8, return_index=True, return_inverse=True, return_counts=True)\n', (5055, 5122), True, 'import numpy as np\n'), ((5315, 5401), 'numpy.unique', 'np.unique', (['AR_LIKE_f8'], {'return_index': '(True)', 'return_inverse': '(True)', 'return_counts': '(True)'}), '(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts\n =True)\n', (5324, 5401), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 13:51:33 2020
@author: montanelli
"""
# Standard imports:
import numpy as np
from scipy.linalg import toeplitz
def fbmc(F):
"""Enforce the BMC-I symmetry conditions for the DFS coefficients F."""
# Get the dimension:
n = len(F)
Fbmc = F.copy()
# %% Step 1: enforce f_{j,k} = -f_{-j,k} for odd k.
# Exctract odd modes in k and all modes in j:
idx_k = 2*np.arange(int(n/2)) + 1
idx_j = np.arange(n)
idx_k, idx_j = np.meshgrid(idx_k, idx_j)
Fodd = F[idx_j, idx_k]
# Matrices:
I = np.eye(int(n/2)+1, n)
col = np.zeros(int(n/2)+1)
col[1] = 1
row = np.zeros(n)
J = toeplitz(col, row)
A = I + np.fliplr(J)
A[-1, int(n/2)] = 1
# Minimum Frobenius-norm solution:
C = A.T @ np.linalg.inv(A @ A.T) @ (A @ Fodd)
Fbmc[idx_j, idx_k] = F[idx_j, idx_k] - C
# %% Step 2: enforce f_{j,k} = f_{-j,k} for even k.
# Exctract even modes in k and all modes in j, and enforce pole condition:
idx_k = 2*np.arange(int(n/2))
idx_j = np.arange(n)
idx_k, idx_j = np.meshgrid(idx_k, idx_j)
Feven = F[idx_j, idx_k]
# Matrices:
I = np.eye(int(n/2), n)
col = np.zeros(int(n/2))
col[1] = 1
row = np.zeros(n)
J = toeplitz(col, row)
A = I - np.fliplr(J)
A[0, :] = 1
P = np.zeros([1, n])
P[0, :] = (-1)**np.arange(n)
A = np.concatenate((P, A), axis=0)
# Minimum Frobenius-norm solution:
C = A.T @ np.linalg.inv(A @ A.T) @ (A @ Feven)
Fbmc[idx_j, idx_k] = F[idx_j, idx_k] - C
# %% Step 3: enforce Re(f_{j,k}) = -Re(f_{j,-k}) for odd k.
# Exctract odd modes in k and all modes in j:
idx_k = 2*np.arange(int(n/2)) + 1
idx_j = np.arange(n)
idx_k, idx_j = np.meshgrid(idx_k, idx_j)
Fodd = np.real(Fbmc[idx_j, idx_k])
# Matrices:
I = np.eye(int(n/4), int(n/2))
B = I + np.fliplr(I)
# Minimum Frobenius-norm solution:
C = (Fodd @ B.T) @ np.linalg.inv(B @ B.T) @ B
Fbmc[idx_j, idx_k] = Fbmc[idx_j, idx_k] - C
# %% Step 4: enforce Re(f_{j,k}) = Re(f_{j,-k}) for even k.
# Exctract even modes in k (but exclude k=-n/2, 0) and all modes in j:
idx_k = 2*np.arange(1, int(n/4))
idx_k = np.concatenate((idx_k, 2*np.arange(int(n/4)+1, int(n/2))))
idx_j = np.arange(n)
idx_k, idx_j = np.meshgrid(idx_k, idx_j)
Feven = np.real(Fbmc[idx_j, idx_k])
# Matrices:
I = np.eye(int(n/4)-1, int(n/2)-2)
B = I - np.fliplr(I)
# Minimum Frobenius-norm solution:
C = (Feven @ B.T) @ np.linalg.inv(B @ B.T) @ B
Fbmc[idx_j, idx_k] = Fbmc[idx_j, idx_k] - C
# %% Step 5: enforce Im(f_{j,k}) = Im(f_{j,-k}) for odd k.
# Exctract odd modes in k and all modes in j:
idx_k = 2*np.arange(int(n/2)) + 1
idx_j = np.arange(n)
idx_k, idx_j = np.meshgrid(idx_k, idx_j)
Fodd = np.imag(Fbmc[idx_j, idx_k])
# Matrices:
I = np.eye(int(n/4), int(n/2))
B = I - np.fliplr(I)
# Minimum Frobenius-norm solution:
C = (Fodd @ B.T) @ np.linalg.inv(B @ B.T) @ B
Fbmc[idx_j, idx_k] = Fbmc[idx_j, idx_k] - 1j*C
# %% Step 6: enforce Im(f_{j,k}) = -Im(f_{j,-k}) for even k.
# Exctract even modes in k and all modes in j:
idx_k = 2*np.arange(int(n/2))
idx_j = np.arange(n)
idx_k, idx_j = np.meshgrid(idx_k, idx_j)
Feven = np.imag(Fbmc[idx_j, idx_k])
# Matrices:
I = np.eye(int(n/4)+1, int(n/2))
col = np.zeros(int(n/4)+1)
col[1] = 1
row = np.zeros(int(n/2))
J = toeplitz(col, row)
B = I + np.fliplr(J)
B[B==2] = 1
# Minimum Frobenius-norm solution:
C = (Feven @ B.T) @ np.linalg.inv(B @ B.T) @ B
Fbmc[idx_j, idx_k] = Fbmc[idx_j, idx_k] - 1j*C
return Fbmc | [
"numpy.fliplr",
"numpy.real",
"numpy.zeros",
"scipy.linalg.toeplitz",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.meshgrid",
"numpy.imag",
"numpy.arange"
] | [((509, 521), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (518, 521), True, 'import numpy as np\n'), ((541, 566), 'numpy.meshgrid', 'np.meshgrid', (['idx_k', 'idx_j'], {}), '(idx_k, idx_j)\n', (552, 566), True, 'import numpy as np\n'), ((701, 712), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (709, 712), True, 'import numpy as np\n'), ((721, 739), 'scipy.linalg.toeplitz', 'toeplitz', (['col', 'row'], {}), '(col, row)\n', (729, 739), False, 'from scipy.linalg import toeplitz\n'), ((1122, 1134), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1131, 1134), True, 'import numpy as np\n'), ((1154, 1179), 'numpy.meshgrid', 'np.meshgrid', (['idx_k', 'idx_j'], {}), '(idx_k, idx_j)\n', (1165, 1179), True, 'import numpy as np\n'), ((1311, 1322), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1319, 1322), True, 'import numpy as np\n'), ((1331, 1349), 'scipy.linalg.toeplitz', 'toeplitz', (['col', 'row'], {}), '(col, row)\n', (1339, 1349), False, 'from scipy.linalg import toeplitz\n'), ((1399, 1415), 'numpy.zeros', 'np.zeros', (['[1, n]'], {}), '([1, n])\n', (1407, 1415), True, 'import numpy as np\n'), ((1457, 1487), 'numpy.concatenate', 'np.concatenate', (['(P, A)'], {'axis': '(0)'}), '((P, A), axis=0)\n', (1471, 1487), True, 'import numpy as np\n'), ((1805, 1817), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1814, 1817), True, 'import numpy as np\n'), ((1837, 1862), 'numpy.meshgrid', 'np.meshgrid', (['idx_k', 'idx_j'], {}), '(idx_k, idx_j)\n', (1848, 1862), True, 'import numpy as np\n'), ((1874, 1901), 'numpy.real', 'np.real', (['Fbmc[idx_j, idx_k]'], {}), '(Fbmc[idx_j, idx_k])\n', (1881, 1901), True, 'import numpy as np\n'), ((2390, 2402), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2399, 2402), True, 'import numpy as np\n'), ((2422, 2447), 'numpy.meshgrid', 'np.meshgrid', (['idx_k', 'idx_j'], {}), '(idx_k, idx_j)\n', (2433, 2447), True, 'import numpy as np\n'), ((2460, 2487), 'numpy.real', 'np.real', (['Fbmc[idx_j, idx_k]'], {}), '(Fbmc[idx_j, idx_k])\n', (2467, 2487), True, 'import numpy as np\n'), ((2890, 2902), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2899, 2902), True, 'import numpy as np\n'), ((2922, 2947), 'numpy.meshgrid', 'np.meshgrid', (['idx_k', 'idx_j'], {}), '(idx_k, idx_j)\n', (2933, 2947), True, 'import numpy as np\n'), ((2959, 2986), 'numpy.imag', 'np.imag', (['Fbmc[idx_j, idx_k]'], {}), '(Fbmc[idx_j, idx_k])\n', (2966, 2986), True, 'import numpy as np\n'), ((3382, 3394), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3391, 3394), True, 'import numpy as np\n'), ((3414, 3439), 'numpy.meshgrid', 'np.meshgrid', (['idx_k', 'idx_j'], {}), '(idx_k, idx_j)\n', (3425, 3439), True, 'import numpy as np\n'), ((3452, 3479), 'numpy.imag', 'np.imag', (['Fbmc[idx_j, idx_k]'], {}), '(Fbmc[idx_j, idx_k])\n', (3459, 3479), True, 'import numpy as np\n'), ((3621, 3639), 'scipy.linalg.toeplitz', 'toeplitz', (['col', 'row'], {}), '(col, row)\n', (3629, 3639), False, 'from scipy.linalg import toeplitz\n'), ((752, 764), 'numpy.fliplr', 'np.fliplr', (['J'], {}), '(J)\n', (761, 764), True, 'import numpy as np\n'), ((1362, 1374), 'numpy.fliplr', 'np.fliplr', (['J'], {}), '(J)\n', (1371, 1374), True, 'import numpy as np\n'), ((1436, 1448), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1445, 1448), True, 'import numpy as np\n'), ((1970, 1982), 'numpy.fliplr', 'np.fliplr', (['I'], {}), '(I)\n', (1979, 1982), True, 'import numpy as np\n'), ((2560, 2572), 'numpy.fliplr', 'np.fliplr', (['I'], {}), '(I)\n', (2569, 2572), True, 'import numpy as np\n'), ((3055, 3067), 'numpy.fliplr', 'np.fliplr', (['I'], {}), '(I)\n', (3064, 3067), True, 'import numpy as np\n'), ((3652, 3664), 'numpy.fliplr', 'np.fliplr', (['J'], {}), '(J)\n', (3661, 3664), True, 'import numpy as np\n'), ((847, 869), 'numpy.linalg.inv', 'np.linalg.inv', (['(A @ A.T)'], {}), '(A @ A.T)\n', (860, 869), True, 'import numpy as np\n'), ((1546, 1568), 'numpy.linalg.inv', 'np.linalg.inv', (['(A @ A.T)'], {}), '(A @ A.T)\n', (1559, 1568), True, 'import numpy as np\n'), ((2046, 2068), 'numpy.linalg.inv', 'np.linalg.inv', (['(B @ B.T)'], {}), '(B @ B.T)\n', (2059, 2068), True, 'import numpy as np\n'), ((2641, 2663), 'numpy.linalg.inv', 'np.linalg.inv', (['(B @ B.T)'], {}), '(B @ B.T)\n', (2654, 2663), True, 'import numpy as np\n'), ((3131, 3153), 'numpy.linalg.inv', 'np.linalg.inv', (['(B @ B.T)'], {}), '(B @ B.T)\n', (3144, 3153), True, 'import numpy as np\n'), ((3749, 3771), 'numpy.linalg.inv', 'np.linalg.inv', (['(B @ B.T)'], {}), '(B @ B.T)\n', (3762, 3771), True, 'import numpy as np\n')] |
from func import *
import numpy as np
class TwoLayerNet:
def __init__(self, input_size: int, hidden_size: int, output_size: int, weight_init_std: float = 0.01):
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x: np.ndarray) -> np.ndarray: # prediction by NN / x:input to NN / return output of NN
# get param
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
# calc of NN
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
def loss(self, x: np.ndarray, t: np.ndarray) -> float: # loss func / x:input to NN, t:correct label / return value of loss func
# prediction
y = self.predict(x)
# calc of error
loss = cross_entropy_error(y, t)
return loss
def accuracy(self, x: np.ndarray, t: np.ndarray) -> float: # x:input to NN, t:correct label / return value of recognition accuracy
# prediction
y = self.predict(x)
# get index of max element
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
# "np.sum(y == t)" counts the number of matching elements for each element of y and t.
# This is then divided by "x.shpae[0]" to get the percentage of matches per block.
accuracy = np.sum(y == t) / x.shape[0]
return accuracy
def gradient(self, x:np.ndarray, t:np.ndarray) -> dict : # calc the gradient of weight / x:input to NN, t:correct label / return dictionary of gradient
# get param
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
batch_num = x.shape[0]
# forward
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
# backward
dy = (y - t) / batch_num
grads['W2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
dz1 = np.dot(dy, W2.T)
da1 = sigmoid_grad(a1) * dz1
grads['W1'] = np.dot(x.T, da1)
grads['b1'] = np.sum(da1, axis=0)
return grads
| [
"numpy.argmax",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.random.randn"
] | [((293, 314), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (301, 314), True, 'import numpy as np\n'), ((423, 444), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (431, 444), True, 'import numpy as np\n'), ((1265, 1285), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1274, 1285), True, 'import numpy as np\n'), ((1294, 1314), 'numpy.argmax', 'np.argmax', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (1303, 1314), True, 'import numpy as np\n'), ((2049, 2065), 'numpy.dot', 'np.dot', (['z1.T', 'dy'], {}), '(z1.T, dy)\n', (2055, 2065), True, 'import numpy as np\n'), ((2084, 2102), 'numpy.sum', 'np.sum', (['dy'], {'axis': '(0)'}), '(dy, axis=0)\n', (2090, 2102), True, 'import numpy as np\n'), ((2114, 2130), 'numpy.dot', 'np.dot', (['dy', 'W2.T'], {}), '(dy, W2.T)\n', (2120, 2130), True, 'import numpy as np\n'), ((2182, 2198), 'numpy.dot', 'np.dot', (['x.T', 'da1'], {}), '(x.T, da1)\n', (2188, 2198), True, 'import numpy as np\n'), ((2217, 2236), 'numpy.sum', 'np.sum', (['da1'], {'axis': '(0)'}), '(da1, axis=0)\n', (2223, 2236), True, 'import numpy as np\n'), ((228, 268), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (243, 268), True, 'import numpy as np\n'), ((357, 398), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (372, 398), True, 'import numpy as np\n'), ((697, 710), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (703, 710), True, 'import numpy as np\n'), ((747, 761), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (753, 761), True, 'import numpy as np\n'), ((1509, 1523), 'numpy.sum', 'np.sum', (['(y == t)'], {}), '(y == t)\n', (1515, 1523), True, 'import numpy as np\n'), ((1897, 1910), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (1903, 1910), True, 'import numpy as np\n'), ((1946, 1960), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (1952, 1960), True, 'import numpy as np\n')] |
import json
import os
os.environ["OMP_NUM_THREADS"]="1"
# os.environ["MUJOCO_GL"]="osmesa"
import numpy as np
import multiprocessing as mp
import math
from trajopt.envs.mujoco_env import MujocoEnv
import trimesh
from pose_model_estimator import get_mesh_list, compute_mujoco_int_transform
import shutil
from xml.dom import minidom
from trajopt.utils import generate_perturbed_actions
from trajopt.sandbox.examples.herb_pushing_mppi import convex_decomp_target_object_env
import random
import cv2
from pyquaternion import Quaternion
import pickle
from optparse import OptionParser
import traceback
import pybullet as p
from dm_control.mujoco.engine import Camera
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from trajopt.envs.herb_pushing_env import HerbEnv
target_objects=["maytoni",
"potted_plant_2",
"lamp_1",
"cup_1",
"vase_1",
"vase_2"]
target_objects+=["002_master_chef_can", "003_cracker_box", "004_sugar_box", "005_tomato_soup_can","006_mustard_bottle","007_tuna_fish_can", "008_pudding_box", "009_gelatin_box",
"010_potted_meat_can",
"011_banana",
"012_strawberry",
"013_apple",
"014_lemon",
"015_peach",
"016_pear",
"017_orange",
"018_plum",
"019_pitcher_base",
"021_bleach_cleanser",
"024_bowl",
"025_mug",
"026_sponge",
"035_power_drill",
"036_wood_block",
"053_mini_soccer_ball",
"055_baseball",
"056_tennis_ball",
"057_racquetball",
"061_foam_brick",
"077_rubiks_cube"]
parser = OptionParser()
#path to shapenet dataset
parser.add_option("--shapenet_filepath", dest="shapenet_filepath", default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/ShapeNetCore.v2/')
#filepath to convex decompositions of shapenet objects. I posted this in the slack channel
parser.add_option("--shapenet_decomp_filepath", dest="shapenet_decomp_filepath", default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/shapenet_conv_decmops/')
#root project dir
parser.add_option("--top_dir", dest="top_dir", default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/cluttered_manipulation_scenes/')
#roo project dir+/inhand_datagen
parser.add_option("--instances_dir", dest="instances_dir", default='/home/willie/workspace/SSC/inhand_datagen')
#where to save generated data to
parser.add_option("--save_dir", dest="save_dir", default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/cluttered_manipulation_scenes/')
parser.add_option("--num_threads", dest="num_threads", type="int", default=12)
(options, args) = parser.parse_args()
def add_object(scene_name, object_name, mesh_name, xpos, y_pos, size, color, rot, other_objects, run_id, top_dir):
print('1')
geom_args=[]
if object_name in target_objects[6:]:
mesh_filename=os.path.join(top_dir, f'herb_reconf/assets/ycb_objects/{mesh_name}/google_16k/nontextured.stl')
type='ycb'
else:
mesh_filename=os.path.join(top_dir, f'herb_reconf/cluttered_scenes/assets/downloaded_assets/{mesh_name}/scene.stl')
type='downloaded'
print('1a')
mujoco_center, _=compute_mujoco_int_transform(mesh_filename, run_id, size=size)
print('1b')
mic=mujoco_center[2]
mesh=trimesh.load(mesh_filename)
#lower_z=-mic
z_offset=0.3-mesh.bounds[0,2]
print('1c')
contact_geom_list=[
("herb/wam_1/bhand//unnamed_geom_24", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_22", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_20", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_18", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_16", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_15", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_14", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_12", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_10", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_8", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_7", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_6", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_4", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_3", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_2", "4 4 0.2 0.04 0.04"),
("herb/wam_1/bhand//unnamed_geom_1", "4 4 0.2 0.04 0.04"),
("herb/wam_1//unnamed_geom_24", "4 4 0.2 0.04 0.04"),
("herb/wam_1//unnamed_geom_22", "4 4 0.2 0.04 0.04"),
("herb/wam_1//unnamed_geom_21", "4 4 0.2 0.04 0.04"),
("herb/wam_1//unnamed_geom_20", "4 4 0.2 0.04 0.04"),
("herb/wam_1//unnamed_geom_18", "4 4 0.2 0.04 0.04"),
("herb/wam_1//unnamed_geom_17", "4 4 0.2 0.04 0.04"),
("table_plane", "0.5 0.5 0.005 0.0001 0.0001")
]
xmldoc = minidom.parse(scene_name)
print('2')
assets = xmldoc.getElementsByTagName('asset')[0]
new_mesh=xmldoc.createElement('mesh')
new_mesh.setAttribute('name', f'gen_mesh_{object_name}')
new_mesh.setAttribute('class', 'geom0')
new_mesh.setAttribute('scale', f'{size} {size} {size}')
if type=='ycb':
new_mesh.setAttribute('file', f'ycb_objects/{mesh_name}/google_16k/nontextured.stl')
elif type=='downloaded':
new_mesh.setAttribute('file', f'downloaded_assets/{mesh_name}/scene.stl')
assets.appendChild(new_mesh)
world_body = xmldoc.getElementsByTagName('worldbody')[0]
new_body=xmldoc.createElement('body')
body_name=f'gen_body_{object_name}'
new_body.setAttribute('name', body_name)
new_body.setAttribute('pos', f'{xpos} {y_pos} {z_offset}')
new_body.setAttribute('euler', f'{rot[0]} {rot[1]} {rot[2]}')
geom_names=[]
new_geom=xmldoc.createElement('geom')
geom_name=f'gen_geom_{object_name}'
geom_names.append(geom_name)
new_geom.setAttribute('name', geom_name)
new_geom.setAttribute('class', '/')
new_geom.setAttribute('type', 'mesh')
new_geom.setAttribute('rgba', f'{color[0]} {color[1]} {color[2]} 1')
new_geom.setAttribute('mesh', f'gen_mesh_{object_name}')
for geom_arg in geom_args:
new_geom.setAttribute(geom_arg[0], geom_arg[1])
new_body.appendChild(new_geom)
new_joint=xmldoc.createElement('joint')
new_joint.setAttribute('name', f'gen_joint_{object_name}')
new_joint.setAttribute('class', '/')
new_joint.setAttribute('type', 'free')
#new_joint.setAttribute('damping', '0.001')
new_body.appendChild(new_joint)
world_body.appendChild(new_body)
print('3')
# contact = xmldoc.getElementsByTagName('contact')[0]
# for contact_geom in contact_geom_list:
# new_contact=xmldoc.createElement('pair')
# geom_name=f'gen_geom_{object_name}'
# new_contact.setAttribute('geom1', geom_name)
# new_contact.setAttribute('geom2', contact_geom[0])
# new_contact.setAttribute('friction', contact_geom[1])
# new_contact.setAttribute('solref', "0.01 1")
# new_contact.setAttribute('solimp', "0.999 0.999 0.01")
# new_contact.setAttribute('condim', "4")
# contact.appendChild(new_contact)
# for added_object_name in other_objects:
# new_contact=xmldoc.createElement('pair')
# geom_name=f'gen_geom_{object_name}'
# geom2_name=f'gen_geom_{added_object_name}'
# new_contact.setAttribute('geom1', geom_name)
# new_contact.setAttribute('geom2', geom2_name)
# new_contact.setAttribute('friction', "0.5 0.5 0.005 0.0001 0.0001")
# new_contact.setAttribute('solref', "0.01 1")
# new_contact.setAttribute('solimp', "0.999 0.999 0.01")
# new_contact.setAttribute('condim', "4")
# contact.appendChild(new_contact)
with open(scene_name, "w") as f:
xmldoc.writexml(f)
return body_name, geom_names
def transform_to_camera_vector(vector, camera_pos, lookat_pos, camera_up_vector):
view_matrix = p.computeViewMatrix(camera_pos, lookat_pos, camera_up_vector)
view_matrix = np.array(view_matrix).reshape(4,4, order='F')
vector=np.concatenate((vector, np.array([1])))
transformed_vector=view_matrix.dot(vector)
return transformed_vector[:3]
#transform robot hand meshes into current pose
def make_known_meshes(known_meshes, physics, geom_names):
transformed_known_meshes=[]
for known_mesh_ind in range(len(known_meshes)):
transformed_known_mesh=known_meshes[known_mesh_ind].copy()
transform=np.eye(4)
transform[0:3,0:3]=np.reshape(physics.named.data.geom_xmat[geom_names[known_mesh_ind]],(3,3))
transformed_known_mesh.apply_transform(transform)
transform=np.eye(4)
transform[0:3,3]=physics.named.data.geom_xpos[geom_names[known_mesh_ind]]
transformed_known_mesh.apply_transform(transform)
transformed_known_meshes.append(transformed_known_mesh)
return transformed_known_meshes
#enable/disable gravity in xml
def set_gravity(scene_name, set_unset):
xmldoc = minidom.parse(scene_name)
options = xmldoc.getElementsByTagName('option')[0]
if set_unset:
options.setAttribute('gravity', "0 0 -9.81")
else:
options.setAttribute('gravity', "0 0 0")
with open(scene_name, "w") as f:
xmldoc.writexml(f)
def add_camera(scene_name, cam_name, cam_pos, cam_target, cam_id):
xmldoc = minidom.parse(scene_name)
world_body = xmldoc.getElementsByTagName('worldbody')[0]
new_body=xmldoc.createElement('camera')
new_body.setAttribute('name', cam_name)
new_body.setAttribute('mode', 'targetbody')
new_body.setAttribute('pos', f'{cam_pos[0]} {cam_pos[1]} {cam_pos[2]}')
new_body.setAttribute('target', f'added_cam_target_{cam_id}')
world_body.appendChild(new_body)
new_body=xmldoc.createElement('body')
new_body.setAttribute('name', f'added_cam_target_{cam_id}')
new_body.setAttribute('pos', f'{cam_target[0]} {cam_target[1]} {cam_target[2]}')
new_geom=xmldoc.createElement('geom')
geom_name=f'added_cam_target_geom_{cam_id}'
new_geom.setAttribute('name', geom_name)
new_geom.setAttribute('class', '/')
new_geom.setAttribute('type', 'box')
new_geom.setAttribute('contype', '0')
new_geom.setAttribute('conaffinity', '0')
new_geom.setAttribute('group', '1')
new_geom.setAttribute('size', "1 1 1")
new_geom.setAttribute('rgba', f'0 0 0 0')
new_body.appendChild(new_geom)
world_body.appendChild(new_body)
with open(scene_name, "w") as f:
xmldoc.writexml(f)
def add_objects(scene_name, object_name, mesh_names, pos, size, color, rot, run_id, other_objects):
xmldoc = minidom.parse(scene_name)
assets = xmldoc.getElementsByTagName('asset')[0]
for mesh_ind in range(len(mesh_names)):
new_mesh=xmldoc.createElement('mesh')
new_mesh.setAttribute('name', f'gen_mesh_{object_name}_{mesh_ind}')
new_mesh.setAttribute('class', 'geom0')
new_mesh.setAttribute('scale', f'{size} {size} {size}')
new_mesh.setAttribute('file', mesh_names[mesh_ind])
assets.appendChild(new_mesh)
world_body = xmldoc.getElementsByTagName('worldbody')[0]
new_body=xmldoc.createElement('body')
body_name=f'gen_body_{object_name}'
new_body.setAttribute('name', body_name)
new_body.setAttribute('pos', f'{pos[0]} {pos[1]} {pos[2]}')
new_body.setAttribute('euler', f'{rot[0]} {rot[1]} {rot[2]}')
geom_names=[]
for geom_ind in range(len(mesh_names)):
new_geom=xmldoc.createElement('geom')
geom_name=f'gen_geom_{object_name}_{geom_ind}'
other_objects.append(f'gen_geom_{object_name}_{geom_ind}')
geom_names.append(geom_name)
new_geom.setAttribute('name', geom_name)
new_geom.setAttribute('class', '/')
new_geom.setAttribute('type', 'mesh')
new_geom.setAttribute('rgba', f'{color[0]} {color[1]} {color[2]} 1')
new_geom.setAttribute('mesh', f'gen_mesh_{object_name}_{geom_ind}')
new_body.appendChild(new_geom)
new_joint=xmldoc.createElement('joint')
new_joint.setAttribute('name', f'gen_joint_{object_name}')
new_joint.setAttribute('class', '/')
new_joint.setAttribute('type', 'free')
new_joint.setAttribute('damping', '0.001')
new_body.appendChild(new_joint)
world_body.appendChild(new_body)
# contact_geom_list=[
# ("herb/wam_1/bhand//unnamed_geom_24", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_22", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_20", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_18", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_16", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_15", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_14", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_12", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_10", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_8", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_7", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_6", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_4", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_3", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_2", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1/bhand//unnamed_geom_1", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1//unnamed_geom_24", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1//unnamed_geom_22", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1//unnamed_geom_21", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1//unnamed_geom_20", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1//unnamed_geom_18", "4 4 0.2 0.04 0.04"),
# ("herb/wam_1//unnamed_geom_17", "4 4 0.2 0.04 0.04"),
# ("table_plane", "0.5 0.5 0.005 0.0001 0.0001")
# ]
#
# contact = xmldoc.getElementsByTagName('contact')[0]
# for geom_ind in range(len(mesh_names)):
# geom_name=f'gen_geom_{object_name}_{geom_ind}'
# for contact_geom in contact_geom_list:
# new_contact=xmldoc.createElement('pair')
# new_contact.setAttribute('geom1', geom_name)
# new_contact.setAttribute('geom2', contact_geom[0])
# new_contact.setAttribute('friction', contact_geom[1])
# new_contact.setAttribute('solref', "0.01 1")
# new_contact.setAttribute('solimp', "0.999 0.999 0.01")
# new_contact.setAttribute('condim', "4")
# contact.appendChild(new_contact)
# for added_object_name in other_objects:
# if added_object_name!=geom_name:
# new_contact=xmldoc.createElement('pair')
# new_contact.setAttribute('geom1', geom_name)
# new_contact.setAttribute('geom2', added_object_name)
# new_contact.setAttribute('friction', "0.5 0.5 0.005 0.0001 0.0001")
# new_contact.setAttribute('solref', "0.01 1")
# new_contact.setAttribute('solimp', "0.999 0.999 0.01")
# new_contact.setAttribute('condim', "4")
# contact.appendChild(new_contact)
with open(scene_name, "w") as f:
xmldoc.writexml(f)
return body_name, geom_names
def move_object_in_xml(scene_name, object_name, object_pos, object_rot):
xmldoc = minidom.parse(scene_name)
world_body = xmldoc.getElementsByTagName('worldbody')[0]
for ind in range(len(world_body.childNodes)):
if isinstance(world_body.childNodes[ind], minidom.Element) and world_body.childNodes[ind]._attrs['name'].nodeValue==object_name:
break
world_body.childNodes[ind].setAttribute('pos', f'{object_pos[0]} {object_pos[1]} {object_pos[2]}')
world_body.childNodes[ind].setAttribute('quat', f'{object_rot[0]} {object_rot[1]} {object_rot[2]} {object_rot[3]}')
with open(scene_name, "w") as f:
xmldoc.writexml(f)
def move_object(e, ind, pos):
all_poses=e.data.qpos.ravel().copy()
all_vels=e.data.qvel.ravel().copy()
all_poses[22+7*ind:22+7*ind+3]=pos
all_vels[21+6*ind:21+6*ind+6]=0
e.set_state(all_poses, all_vels)
def get_visible_pixels(env, object_num):
#env = HerbEnv(scene_name, np.zeros((1,3)), task='grasping', obs=False)
#env.reset_model(seed=2)
segs=env.model.render(height=480, width=640, camera_id=1, depth=False, segmentation=True)
return np.sum(segs==object_num)
def get_random_object_params(upright_chance):
min_size=0.5
max_size=4.0
object_color=np.random.uniform(size=3)
object_size=np.random.uniform(low=min_size, high=max_size, size=1)[0]
if random.random()<upright_chance:
object_rot=np.array([0,0,np.random.uniform(low=0, high=2*math.pi, size=1)[0]])
else:
object_rot=np.random.uniform(low=0, high=2*math.pi, size=3)
return object_size, object_color, object_rot
def get_num_contacts(e, body_num):
contacts=e.model._data.contact
num_contacts=0
for contact in contacts:
geom_name=e.model.model.id2name(contact[10], "geom")
if 'gen_geom_object' in geom_name and body_num==int(geom_name.split('_')[3]):
num_contacts+=1
geom_name=e.model.model.id2name(contact[11], "geom")
if 'gen_geom_object' in geom_name and body_num==int(geom_name.split('_')[3]):
num_contacts+=1
return num_contacts
#@profile
def gen_data(scene_num, shapenet_filepath, shapenet_decomp_filepath, instances_dir, top_dir, save_dir, target_object, task, num_generated, run_id):
global_gauss_std=np.array([0.25, 0.25])
if task=='hard_pushing' or task=='grasping':
global_gauss_center=np.array([0.0, 0])
else:
global_gauss_center=np.array([0.05, -0.35])
prob_upright=0.8
max_height=0.35
lib_type='downloaded' if target_object in target_objects[0:6] else 'ycb'
np.random.seed(scene_num)
target_obj_geom_id=72
train_or_test='training_set'
num_images=10000000
box=trimesh.creation.box(np.array([0.1, 0.1, 0.1]))
min_object_scale=1.0
max_object_scale=4.0
print(f'generating {train_or_test} dataset')
training_instances_filename = os.path.join(instances_dir, 'training_instances.json')
test_instances_filename = os.path.join(instances_dir, 'novel_class_test_instances.json')
train_models = json.load(open(training_instances_filename))
test_models = json.load(open(test_instances_filename))
object_ids = train_models if train_or_test == 'training_set' else test_models
training_tables_filename = os.path.join(instances_dir, 'training_shapenet_tables.json')
test_tables_filename = os.path.join(instances_dir, 'test_shapenet_tables.json')
train_tables = json.load(open(training_tables_filename))
test_tables = json.load(open(test_tables_filename))
valid_tables = train_tables if train_or_test == 'training_set' else test_tables
new_object_ids=[]
for cat in object_ids:
for obj_id in object_ids[cat]:
new_object_ids.append((cat, obj_id))
object_ids=new_object_ids
temp = json.load(open(shapenet_filepath + 'taxonomy.json'))
taxonomy_dict = {x['name'] : x['synsetId'] for x in temp}
# weirdly, the synsets in the taxonomy file are not the same as what's in the ShapeNetCore.v2 directory. Filter this out
synsets_in_dir = os.listdir(shapenet_filepath)
synsets_in_dir.remove('taxonomy.json')
taxonomy_dict = {k:v for (k,v) in taxonomy_dict.items() if v in synsets_in_dir}
# selected_index = np.random.randint(0, object_ids.shape[0])
# useful synsets for simulation
useful_named_synsets = [
'ashcan,trash can,garbage can,wastebin,ash bin,ash-bin,ashbin,dustbin,trash barrel,trash bin',
'bag,traveling bag,travelling bag,grip,suitcase',
'birdhouse',
'bottle',
'bowl',
'camera,photographic camera',
'can,tin,tin can',
'cap',
'clock',
'computer keyboard,keypad',
'dishwasher,dish washer,dishwashing machine',
'display,video display',
'helmet',
'jar',
'knife',
'laptop,laptop computer',
'loudspeaker,speaker,speaker unit,loudspeaker system,speaker system',
'microwave,microwave oven',
'mug',
'pillow',
'printer,printing machine',
'remote control,remote',
'telephone,phone,telephone set',
'cellular telephone,cellular phone,cellphone,cell,mobile phone',
'washer,automatic washer,washing machine'
]
included_meshes=[]
geom_names=[]
pred_obj_meshes=[]
if os.path.exists(save_dir+f'/{target_object}/'):
shutil.rmtree(save_dir+f'/{target_object}/')
os.mkdir(save_dir+f'/{target_object}/')
# num_generated=thread_num
# view_num=thread_num
generated=False
occlusions=[]
thread_num=scene_num
env_info={} #name: (occlusion level, start poses)
print('a')
while not generated:#num_generated<num_images/num_threads:
try:
scene_xml_file=os.path.join(top_dir, f'herb_reconf/{task}_scene.xml')
decomp_scene_xml_file=os.path.join(save_dir, f'{target_object}_{task}_{num_generated}_decomp_scene.xml')
shutil.copyfile(scene_xml_file, decomp_scene_xml_file)
gen_scene_xml_file=os.path.join(save_dir, f'{target_object}_{task}_{num_generated}_scene.xml')
shutil.copyfile(scene_xml_file, gen_scene_xml_file)
#print('b')
if target_object in target_objects[6:]:
mesh_filename=os.path.join(top_dir, f'herb_reconf/assets/ycb_objects/{target_object}/google_16k/nontextured.stl')
else:
mesh_filename=os.path.join(top_dir, f'herb_reconf/cluttered_scenes/assets/downloaded_assets/{target_object}/scene.stl')
#choose num objects, pos dist center (on table)
num_objects=random.randint(3,10)
_, color, rot=get_random_object_params(prob_upright)
if task=='hard_pushing' or task=="grasping":
#add_object(decomp_scene_xml_file, '0', target_object, 0, 0, 1, color, rot, [], id, top_dir)
add_object(gen_scene_xml_file, '0', target_object, 0, 0, 1, color, rot, [], id, top_dir)
#scene_file, _, other_objects=convex_decomp_target_object_env(gen_scene_xml_file, 'gen_body_0', mesh_filename, save_dir, run_id, top_dir, new_scene_name=decomp_scene_xml_file)
elif task=='easy_pushing':
#add_object(decomp_scene_xml_file, '0', target_object, -0.05,-0.35, 1, color, rot, [], id, top_dir)
add_object(gen_scene_xml_file, '0', target_object, -0.05,-0.35, 1, color, rot, [], id, top_dir)
scene_file, _, other_objects=convex_decomp_target_object_env(gen_scene_xml_file, 'gen_body_0', mesh_filename, save_dir, run_id, top_dir, new_scene_name=decomp_scene_xml_file)
#other_objects=['gen_geom_0']
print('c')
#drop one by one onto table
if target_object in target_objects[6:]:
push_object=trimesh.load(os.path.join(top_dir, f'herb_reconf/assets/ycb_objects/{target_object}/google_16k/nontextured.stl'))
else:
push_object=trimesh.load(os.path.join(top_dir, f'herb_reconf/cluttered_scenes/assets/downloaded_assets/{target_object}/scene.stl'))
e=HerbEnv(decomp_scene_xml_file, box, task=task, obs=False, push_mesh_vertices=box, skip=1)
sigma=1.0*np.ones(e.action_dim)
sigma[0:7]=sigma[0:7]*(e.action_space.high[0:7]-e.action_space.low[0:7])
sigma[0]=1*sigma[0]
sigma[1]=0.5*sigma[1]
sigma[2]=2*sigma[1]
sigma[3]=0.5*sigma[3]
sigma[7:]=sigma[7:]*(e.action_space.high[14:22]-e.action_space.low[14:22])
sigma[7:]=50*sigma[7:]
sigma[5]=sigma[5]
sigma[-2:]=20*sigma[-2:]
filter_coefs = [sigma, 0.25, 0.8, 0.0, np.concatenate((e.action_space.high[0:7]-e.action_space.low[0:7], e.action_space.high[14:22]-e.action_space.low[14:22])), np.concatenate((e.action_space.low[0:7], e.action_space.low[14:22])), np.concatenate((e.action_space.high[0:7], e.action_space.high[14:22]))]
state=e.get_env_state().copy()
e.set_env_state(state)
base_act=np.repeat(np.expand_dims(state['qp'][:15], axis=0), 1000, axis=0)
act, vel=generate_perturbed_actions(state, base_act, filter_coefs, 0.5, 0.15, state['qp'][4], 1.59, hand_open=0, move=False)
for added_object_ind in range(num_objects):
for step in range(100):
# if step%50==0:
# rgb=e.model.render(height=480, width=640, camera_id=0, depth=False, segmentation=False)
# cv2.imshow('rbg', rgb)
# cv2.waitKey(20)
e.step(act[step])
state=e.get_env_state().copy()
real_env=HerbEnv(gen_scene_xml_file, box, task=task, obs=False, push_mesh_vertices=box, skip=1)
real_env.set_env_state(state)
real_env.sim.physics.forward()
print('d')
rgb=real_env.model.render(height=480, width=640, camera_id=1, depth=False, segmentation=False)
cv2.imshow('first rbg', rgb)
cv2.waitKey(20)
visible_pix=get_visible_pixels(real_env, 72)
unobscured_visible_pix=visible_pix
#choose objects, add to scene
added_objects=0
obj_mesh_filenames=[]
obj_initial_positions=[]
obj_colors=[]
obj_scales=[]
print('e')
while added_objects<num_objects:
selected_index = np.random.randint(0, len(object_ids))
obj_id = object_ids[selected_index]
obj_cat=taxonomy_dict[obj_id[0]]
obj_mesh_filename = shapenet_filepath + f'/{obj_cat}/{obj_id[1]}/models/model_normalized.obj'
object_mesh=trimesh.load(obj_mesh_filename)
stl_obj_mesh_filename=os.path.join(top_dir, f'assets/model_normalized_{thread_num}_{added_objects}.stl')
object_mesh.export(stl_obj_mesh_filename)
object_color=np.random.uniform(size=3)
object_size=np.random.uniform(low=min_object_scale, high=max_object_scale, size=1)[0]
diag=np.sqrt(np.sum(np.square(object_mesh.bounds[0]-object_mesh.bounds[1])))
object_size*=0.1/diag
object_rot=np.random.uniform(low=0, high=2*math.pi, size=3)
if random.random()<prob_upright:
object_rot[:2]=0
object_drop_pos=np.random.normal(loc=global_gauss_center, scale=global_gauss_std)
object_mesh=trimesh.load(stl_obj_mesh_filename)
if object_mesh.faces.shape[0]>200000:
print('too many mesh faces!')
continue
obj_mesh_filenames+=[obj_mesh_filename]
obj_initial_positions.append(object_drop_pos)
obj_colors.append(object_color)
obj_scales.append(object_size)
#load conv decomp meshes
mesh_names=[]
mesh_masses=[]
decomp_shapenet_decomp_filepath=os.path.join(shapenet_decomp_filepath, f'{obj_cat}/{obj_id[1]}')
for mesh_file in os.listdir(decomp_shapenet_decomp_filepath):
decomp_object_mesh=trimesh.load(os.path.join(decomp_shapenet_decomp_filepath, mesh_file))
trimesh.repair.fix_inversion(decomp_object_mesh)
if decomp_object_mesh.faces.shape[0]>5 and decomp_object_mesh.mass>10e-8:
obj_mesh_filename=os.path.join(shapenet_decomp_filepath, f'{obj_cat}/{obj_id[1]}', mesh_file[:-3]+'stl')
decomp_object_mesh.export(obj_mesh_filename)
mesh_names.append(obj_mesh_filename)
mesh_masses.append(decomp_object_mesh.mass)
if len(mesh_names)>25:
heavy_inds=np.argsort(np.array(mesh_masses))
new_mesh_names=[]
for ind in range(25):
new_mesh_names.append(mesh_names[heavy_inds[-ind]])
mesh_names=new_mesh_names
add_objects(decomp_scene_xml_file, f'object_{added_objects}_{thread_num}', mesh_names, [50,50,-5-added_objects], object_size, object_color, object_rot, thread_num, other_objects)
add_objects(gen_scene_xml_file, f'object_{added_objects}_{thread_num}', [stl_obj_mesh_filename], [50,50,-5-added_objects], object_size, object_color, object_rot, thread_num, other_objects)
added_objects+=1
#drop one by one onto table
e=HerbEnv(decomp_scene_xml_file, box, task=task, obs=False, push_mesh_vertices=box, skip=1)
new_state=e.get_env_state().copy()
new_state['qp'][:state['qp'].shape[0]]=state['qp']
e.set_env_state(new_state)
e.sim.physics.forward()
for added_object_ind in range(num_objects):
obj_drop_pos=obj_initial_positions[added_object_ind]
max_h=0.75+max_height
min_h=max_height
move_object(e, added_object_ind, [obj_drop_pos[0], obj_drop_pos[1], max_h])
e.sim.physics.forward()
a=e.model._data.contact
init_num_contacts=get_num_contacts(e, added_object_ind)
best_h=max_h
for i in range(10):
height=(max_h+min_h)/2.0
move_object(e, added_object_ind, [obj_drop_pos[0], obj_drop_pos[1], height])
e.sim.physics.forward()
num_contacts=get_num_contacts(e, added_object_ind)
if num_contacts>init_num_contacts:
min_h=height
best_h=min_h
else:
max_h=height
move_object(e, added_object_ind, [obj_drop_pos[0], obj_drop_pos[1], height])
for step in range(100):
if step%10==0:
rgb=e.model.render(height=480, width=640, camera_id=0, depth=False, segmentation=False)
cv2.imshow('rbg', rgb)
cv2.waitKey(20)
e.step(act[step])
state=e.get_env_state().copy()
real_env=HerbEnv(gen_scene_xml_file, box, task=task, obs=False, push_mesh_vertices=box, skip=1)
real_env.set_env_state(state)
real_env.sim.physics.forward()
rgb=real_env.model.render(height=480, width=640, camera_id=1, depth=False, segmentation=False)
cv2.imshow('final rbg', rgb)
cv2.waitKey(20)
rgb=e.model.render(height=480, width=640, camera_id=1, depth=False, segmentation=False)
cv2.imshow('final decomp', rgb)
cv2.imwrite(save_dir+f'/{target_object}_{task}_{num_generated}_img.png', rgb)
cv2.waitKey(20)
visible_pix=get_visible_pixels(real_env, 72)
with open(save_dir+f'/{target_object}_{task}_{num_generated}_scene_info.p', 'wb') as save_file:
pickle.dump((visible_pix/unobscured_visible_pix, state), save_file)
num_generated+=1
except:
print('gen error!')
traceback.print_exc()
def abortable_worker(func, *args, **kwargs):
timeout = kwargs.get('timeout', None)
p = ThreadPool(1)
res = p.apply_async(func, args=args)
try:
out = res.get(timeout) # Wait timeout seconds for func to complete.
return out
except multiprocessing.TimeoutError:
print("Aborting due to timeout")
raise
if __name__ == '__main__':
gen_data(0, options.shapenet_filepath, options.shapenet_decomp_filepath, options.instances_dir, options.top_dir, options.save_dir, "maytoni", 'hard_pushing', 0, 0)
# num_processes=options.num_threads
# pool = mp.Pool(processes=num_processes, maxtasksperchild=1)
# for target_object_ind in range(len(target_objects)):
# for scene_num in range(1000):
# abortable_func = partial(abortable_worker, gen_data, timeout=240)
# pool.apply_async(abortable_func, args=(scene_num, options.shapenet_filepath, options.shapenet_decomp_filepath, options.instances_dir, options.top_dir, options.save_dir, 0.0))
# pool.close()
# pool.join()
# parallel_runs = [pool.apply_async(gen_data, args= for i in range(num_processes)]
# results = [p.get() for p in parallel_runs]
| [
"pybullet.computeViewMatrix",
"trajopt.utils.generate_perturbed_actions",
"cv2.imshow",
"numpy.array",
"os.path.exists",
"os.listdir",
"xml.dom.minidom.parse",
"numpy.reshape",
"numpy.random.seed",
"os.mkdir",
"numpy.concatenate",
"traceback.print_exc",
"cv2.waitKey",
"random.randint",
"... | [((1585, 1599), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1597, 1599), False, 'from optparse import OptionParser\n'), ((3183, 3245), 'pose_model_estimator.compute_mujoco_int_transform', 'compute_mujoco_int_transform', (['mesh_filename', 'run_id'], {'size': 'size'}), '(mesh_filename, run_id, size=size)\n', (3211, 3245), False, 'from pose_model_estimator import get_mesh_list, compute_mujoco_int_transform\n'), ((3306, 3333), 'trimesh.load', 'trimesh.load', (['mesh_filename'], {}), '(mesh_filename)\n', (3318, 3333), False, 'import trimesh\n'), ((4964, 4989), 'xml.dom.minidom.parse', 'minidom.parse', (['scene_name'], {}), '(scene_name)\n', (4977, 4989), False, 'from xml.dom import minidom\n'), ((8124, 8185), 'pybullet.computeViewMatrix', 'p.computeViewMatrix', (['camera_pos', 'lookat_pos', 'camera_up_vector'], {}), '(camera_pos, lookat_pos, camera_up_vector)\n', (8143, 8185), True, 'import pybullet as p\n'), ((9190, 9215), 'xml.dom.minidom.parse', 'minidom.parse', (['scene_name'], {}), '(scene_name)\n', (9203, 9215), False, 'from xml.dom import minidom\n'), ((9546, 9571), 'xml.dom.minidom.parse', 'minidom.parse', (['scene_name'], {}), '(scene_name)\n', (9559, 9571), False, 'from xml.dom import minidom\n'), ((10838, 10863), 'xml.dom.minidom.parse', 'minidom.parse', (['scene_name'], {}), '(scene_name)\n', (10851, 10863), False, 'from xml.dom import minidom\n'), ((15650, 15675), 'xml.dom.minidom.parse', 'minidom.parse', (['scene_name'], {}), '(scene_name)\n', (15663, 15675), False, 'from xml.dom import minidom\n'), ((16714, 16740), 'numpy.sum', 'np.sum', (['(segs == object_num)'], {}), '(segs == object_num)\n', (16720, 16740), True, 'import numpy as np\n'), ((16837, 16862), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3)'}), '(size=3)\n', (16854, 16862), True, 'import numpy as np\n'), ((17863, 17885), 'numpy.array', 'np.array', (['[0.25, 0.25]'], {}), '([0.25, 0.25])\n', (17871, 17885), True, 'import numpy as np\n'), ((18171, 18196), 'numpy.random.seed', 'np.random.seed', (['scene_num'], {}), '(scene_num)\n', (18185, 18196), True, 'import numpy as np\n'), ((18489, 18543), 'os.path.join', 'os.path.join', (['instances_dir', '"""training_instances.json"""'], {}), "(instances_dir, 'training_instances.json')\n", (18501, 18543), False, 'import os\n'), ((18574, 18636), 'os.path.join', 'os.path.join', (['instances_dir', '"""novel_class_test_instances.json"""'], {}), "(instances_dir, 'novel_class_test_instances.json')\n", (18586, 18636), False, 'import os\n'), ((18878, 18938), 'os.path.join', 'os.path.join', (['instances_dir', '"""training_shapenet_tables.json"""'], {}), "(instances_dir, 'training_shapenet_tables.json')\n", (18890, 18938), False, 'import os\n'), ((18966, 19022), 'os.path.join', 'os.path.join', (['instances_dir', '"""test_shapenet_tables.json"""'], {}), "(instances_dir, 'test_shapenet_tables.json')\n", (18978, 19022), False, 'import os\n'), ((19688, 19717), 'os.listdir', 'os.listdir', (['shapenet_filepath'], {}), '(shapenet_filepath)\n', (19698, 19717), False, 'import os\n'), ((20977, 21024), 'os.path.exists', 'os.path.exists', (["(save_dir + f'/{target_object}/')"], {}), "(save_dir + f'/{target_object}/')\n", (20991, 21024), False, 'import os\n'), ((21081, 21122), 'os.mkdir', 'os.mkdir', (["(save_dir + f'/{target_object}/')"], {}), "(save_dir + f'/{target_object}/')\n", (21089, 21122), False, 'import os\n'), ((32289, 32302), 'multiprocessing.dummy.Pool', 'ThreadPool', (['(1)'], {}), '(1)\n', (32299, 32302), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((32313, 32343), 'pybullet.apply_async', 'p.apply_async', (['func'], {'args': 'args'}), '(func, args=args)\n', (32326, 32343), True, 'import pybullet as p\n'), ((2861, 2960), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/assets/ycb_objects/{mesh_name}/google_16k/nontextured.stl"""'], {}), "(top_dir,\n f'herb_reconf/assets/ycb_objects/{mesh_name}/google_16k/nontextured.stl')\n", (2873, 2960), False, 'import os\n'), ((3008, 3118), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/cluttered_scenes/assets/downloaded_assets/{mesh_name}/scene.stl"""'], {}), "(top_dir,\n f'herb_reconf/cluttered_scenes/assets/downloaded_assets/{mesh_name}/scene.stl'\n )\n", (3020, 3118), False, 'import os\n'), ((8666, 8675), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8672, 8675), True, 'import numpy as np\n'), ((8703, 8779), 'numpy.reshape', 'np.reshape', (['physics.named.data.geom_xmat[geom_names[known_mesh_ind]]', '(3, 3)'], {}), '(physics.named.data.geom_xmat[geom_names[known_mesh_ind]], (3, 3))\n', (8713, 8779), True, 'import numpy as np\n'), ((8854, 8863), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8860, 8863), True, 'import numpy as np\n'), ((16879, 16933), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'min_size', 'high': 'max_size', 'size': '(1)'}), '(low=min_size, high=max_size, size=1)\n', (16896, 16933), True, 'import numpy as np\n'), ((16944, 16959), 'random.random', 'random.random', ([], {}), '()\n', (16957, 16959), False, 'import random\n'), ((17092, 17142), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(2 * math.pi)', 'size': '(3)'}), '(low=0, high=2 * math.pi, size=3)\n', (17109, 17142), True, 'import numpy as np\n'), ((17963, 17981), 'numpy.array', 'np.array', (['[0.0, 0]'], {}), '([0.0, 0])\n', (17971, 17981), True, 'import numpy as np\n'), ((18020, 18043), 'numpy.array', 'np.array', (['[0.05, -0.35]'], {}), '([0.05, -0.35])\n', (18028, 18043), True, 'import numpy as np\n'), ((18314, 18339), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (18322, 18339), True, 'import numpy as np\n'), ((21032, 21078), 'shutil.rmtree', 'shutil.rmtree', (["(save_dir + f'/{target_object}/')"], {}), "(save_dir + f'/{target_object}/')\n", (21045, 21078), False, 'import shutil\n'), ((8204, 8225), 'numpy.array', 'np.array', (['view_matrix'], {}), '(view_matrix)\n', (8212, 8225), True, 'import numpy as np\n'), ((8285, 8298), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8293, 8298), True, 'import numpy as np\n'), ((21428, 21482), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/{task}_scene.xml"""'], {}), "(top_dir, f'herb_reconf/{task}_scene.xml')\n", (21440, 21482), False, 'import os\n'), ((21517, 21603), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{target_object}_{task}_{num_generated}_decomp_scene.xml"""'], {}), "(save_dir,\n f'{target_object}_{task}_{num_generated}_decomp_scene.xml')\n", (21529, 21603), False, 'import os\n'), ((21612, 21666), 'shutil.copyfile', 'shutil.copyfile', (['scene_xml_file', 'decomp_scene_xml_file'], {}), '(scene_xml_file, decomp_scene_xml_file)\n', (21627, 21666), False, 'import shutil\n'), ((21698, 21773), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{target_object}_{task}_{num_generated}_scene.xml"""'], {}), "(save_dir, f'{target_object}_{task}_{num_generated}_scene.xml')\n", (21710, 21773), False, 'import os\n'), ((21786, 21837), 'shutil.copyfile', 'shutil.copyfile', (['scene_xml_file', 'gen_scene_xml_file'], {}), '(scene_xml_file, gen_scene_xml_file)\n', (21801, 21837), False, 'import shutil\n'), ((22321, 22342), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (22335, 22342), False, 'import random\n'), ((23178, 23332), 'trajopt.sandbox.examples.herb_pushing_mppi.convex_decomp_target_object_env', 'convex_decomp_target_object_env', (['gen_scene_xml_file', '"""gen_body_0"""', 'mesh_filename', 'save_dir', 'run_id', 'top_dir'], {'new_scene_name': 'decomp_scene_xml_file'}), "(gen_scene_xml_file, 'gen_body_0',\n mesh_filename, save_dir, run_id, top_dir, new_scene_name=\n decomp_scene_xml_file)\n", (23209, 23332), False, 'from trajopt.sandbox.examples.herb_pushing_mppi import convex_decomp_target_object_env\n'), ((23831, 23924), 'trajopt.envs.herb_pushing_env.HerbEnv', 'HerbEnv', (['decomp_scene_xml_file', 'box'], {'task': 'task', 'obs': '(False)', 'push_mesh_vertices': 'box', 'skip': '(1)'}), '(decomp_scene_xml_file, box, task=task, obs=False,\n push_mesh_vertices=box, skip=1)\n', (23838, 23924), False, 'from trajopt.envs.herb_pushing_env import HerbEnv\n'), ((24877, 24997), 'trajopt.utils.generate_perturbed_actions', 'generate_perturbed_actions', (['state', 'base_act', 'filter_coefs', '(0.5)', '(0.15)', "state['qp'][4]", '(1.59)'], {'hand_open': '(0)', 'move': '(False)'}), "(state, base_act, filter_coefs, 0.5, 0.15, state[\n 'qp'][4], 1.59, hand_open=0, move=False)\n", (24903, 24997), False, 'from trajopt.utils import generate_perturbed_actions\n'), ((25450, 25541), 'trajopt.envs.herb_pushing_env.HerbEnv', 'HerbEnv', (['gen_scene_xml_file', 'box'], {'task': 'task', 'obs': '(False)', 'push_mesh_vertices': 'box', 'skip': '(1)'}), '(gen_scene_xml_file, box, task=task, obs=False, push_mesh_vertices=\n box, skip=1)\n', (25457, 25541), False, 'from trajopt.envs.herb_pushing_env import HerbEnv\n'), ((25790, 25818), 'cv2.imshow', 'cv2.imshow', (['"""first rbg"""', 'rgb'], {}), "('first rbg', rgb)\n", (25800, 25818), False, 'import cv2\n'), ((25831, 25846), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (25842, 25846), False, 'import cv2\n'), ((29446, 29539), 'trajopt.envs.herb_pushing_env.HerbEnv', 'HerbEnv', (['decomp_scene_xml_file', 'box'], {'task': 'task', 'obs': '(False)', 'push_mesh_vertices': 'box', 'skip': '(1)'}), '(decomp_scene_xml_file, box, task=task, obs=False,\n push_mesh_vertices=box, skip=1)\n', (29453, 29539), False, 'from trajopt.envs.herb_pushing_env import HerbEnv\n'), ((31158, 31249), 'trajopt.envs.herb_pushing_env.HerbEnv', 'HerbEnv', (['gen_scene_xml_file', 'box'], {'task': 'task', 'obs': '(False)', 'push_mesh_vertices': 'box', 'skip': '(1)'}), '(gen_scene_xml_file, box, task=task, obs=False, push_mesh_vertices=\n box, skip=1)\n', (31165, 31249), False, 'from trajopt.envs.herb_pushing_env import HerbEnv\n'), ((31449, 31477), 'cv2.imshow', 'cv2.imshow', (['"""final rbg"""', 'rgb'], {}), "('final rbg', rgb)\n", (31459, 31477), False, 'import cv2\n'), ((31490, 31505), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (31501, 31505), False, 'import cv2\n'), ((31631, 31662), 'cv2.imshow', 'cv2.imshow', (['"""final decomp"""', 'rgb'], {}), "('final decomp', rgb)\n", (31641, 31662), False, 'import cv2\n'), ((31675, 31754), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + f'/{target_object}_{task}_{num_generated}_img.png')", 'rgb'], {}), "(save_dir + f'/{target_object}_{task}_{num_generated}_img.png', rgb)\n", (31686, 31754), False, 'import cv2\n'), ((31765, 31780), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (31776, 31780), False, 'import cv2\n'), ((21970, 22078), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/assets/ycb_objects/{target_object}/google_16k/nontextured.stl"""'], {}), "(top_dir,\n f'herb_reconf/assets/ycb_objects/{target_object}/google_16k/nontextured.stl'\n )\n", (21982, 22078), False, 'import os\n'), ((22118, 22232), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/cluttered_scenes/assets/downloaded_assets/{target_object}/scene.stl"""'], {}), "(top_dir,\n f'herb_reconf/cluttered_scenes/assets/downloaded_assets/{target_object}/scene.stl'\n )\n", (22130, 22232), False, 'import os\n'), ((23943, 23964), 'numpy.ones', 'np.ones', (['e.action_dim'], {}), '(e.action_dim)\n', (23950, 23964), True, 'import numpy as np\n'), ((24422, 24551), 'numpy.concatenate', 'np.concatenate', (['(e.action_space.high[0:7] - e.action_space.low[0:7], e.action_space.high[14\n :22] - e.action_space.low[14:22])'], {}), '((e.action_space.high[0:7] - e.action_space.low[0:7], e.\n action_space.high[14:22] - e.action_space.low[14:22]))\n', (24436, 24551), True, 'import numpy as np\n'), ((24544, 24612), 'numpy.concatenate', 'np.concatenate', (['(e.action_space.low[0:7], e.action_space.low[14:22])'], {}), '((e.action_space.low[0:7], e.action_space.low[14:22]))\n', (24558, 24612), True, 'import numpy as np\n'), ((24614, 24684), 'numpy.concatenate', 'np.concatenate', (['(e.action_space.high[0:7], e.action_space.high[14:22])'], {}), '((e.action_space.high[0:7], e.action_space.high[14:22]))\n', (24628, 24684), True, 'import numpy as np\n'), ((24800, 24840), 'numpy.expand_dims', 'np.expand_dims', (["state['qp'][:15]"], {'axis': '(0)'}), "(state['qp'][:15], axis=0)\n", (24814, 24840), True, 'import numpy as np\n'), ((26574, 26605), 'trimesh.load', 'trimesh.load', (['obj_mesh_filename'], {}), '(obj_mesh_filename)\n', (26586, 26605), False, 'import trimesh\n'), ((26644, 26730), 'os.path.join', 'os.path.join', (['top_dir', 'f"""assets/model_normalized_{thread_num}_{added_objects}.stl"""'], {}), "(top_dir,\n f'assets/model_normalized_{thread_num}_{added_objects}.stl')\n", (26656, 26730), False, 'import os\n'), ((26814, 26839), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3)'}), '(size=3)\n', (26831, 26839), True, 'import numpy as np\n'), ((27100, 27150), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(2 * math.pi)', 'size': '(3)'}), '(low=0, high=2 * math.pi, size=3)\n', (27117, 27150), True, 'import numpy as np\n'), ((27267, 27332), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'global_gauss_center', 'scale': 'global_gauss_std'}), '(loc=global_gauss_center, scale=global_gauss_std)\n', (27283, 27332), True, 'import numpy as np\n'), ((27361, 27396), 'trimesh.load', 'trimesh.load', (['stl_obj_mesh_filename'], {}), '(stl_obj_mesh_filename)\n', (27373, 27396), False, 'import trimesh\n'), ((27910, 27974), 'os.path.join', 'os.path.join', (['shapenet_decomp_filepath', 'f"""{obj_cat}/{obj_id[1]}"""'], {}), "(shapenet_decomp_filepath, f'{obj_cat}/{obj_id[1]}')\n", (27922, 27974), False, 'import os\n'), ((28008, 28051), 'os.listdir', 'os.listdir', (['decomp_shapenet_decomp_filepath'], {}), '(decomp_shapenet_decomp_filepath)\n', (28018, 28051), False, 'import os\n'), ((31988, 32057), 'pickle.dump', 'pickle.dump', (['(visible_pix / unobscured_visible_pix, state)', 'save_file'], {}), '((visible_pix / unobscured_visible_pix, state), save_file)\n', (31999, 32057), False, 'import pickle\n'), ((32161, 32182), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (32180, 32182), False, 'import traceback\n'), ((17009, 17059), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(2 * math.pi)', 'size': '(1)'}), '(low=0, high=2 * math.pi, size=1)\n', (17026, 17059), True, 'import numpy as np\n'), ((23548, 23656), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/assets/ycb_objects/{target_object}/google_16k/nontextured.stl"""'], {}), "(top_dir,\n f'herb_reconf/assets/ycb_objects/{target_object}/google_16k/nontextured.stl'\n )\n", (23560, 23656), False, 'import os\n'), ((23708, 23822), 'os.path.join', 'os.path.join', (['top_dir', 'f"""herb_reconf/cluttered_scenes/assets/downloaded_assets/{target_object}/scene.stl"""'], {}), "(top_dir,\n f'herb_reconf/cluttered_scenes/assets/downloaded_assets/{target_object}/scene.stl'\n )\n", (23720, 23822), False, 'import os\n'), ((26868, 26938), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'min_object_scale', 'high': 'max_object_scale', 'size': '(1)'}), '(low=min_object_scale, high=max_object_scale, size=1)\n', (26885, 26938), True, 'import numpy as np\n'), ((27168, 27183), 'random.random', 'random.random', ([], {}), '()\n', (27181, 27183), False, 'import random\n'), ((28183, 28231), 'trimesh.repair.fix_inversion', 'trimesh.repair.fix_inversion', (['decomp_object_mesh'], {}), '(decomp_object_mesh)\n', (28211, 28231), False, 'import trimesh\n'), ((26978, 27034), 'numpy.square', 'np.square', (['(object_mesh.bounds[0] - object_mesh.bounds[1])'], {}), '(object_mesh.bounds[0] - object_mesh.bounds[1])\n', (26987, 27034), True, 'import numpy as np\n'), ((28105, 28161), 'os.path.join', 'os.path.join', (['decomp_shapenet_decomp_filepath', 'mesh_file'], {}), '(decomp_shapenet_decomp_filepath, mesh_file)\n', (28117, 28161), False, 'import os\n'), ((28368, 28461), 'os.path.join', 'os.path.join', (['shapenet_decomp_filepath', 'f"""{obj_cat}/{obj_id[1]}"""', "(mesh_file[:-3] + 'stl')"], {}), "(shapenet_decomp_filepath, f'{obj_cat}/{obj_id[1]}', mesh_file[\n :-3] + 'stl')\n", (28380, 28461), False, 'import os\n'), ((28734, 28755), 'numpy.array', 'np.array', (['mesh_masses'], {}), '(mesh_masses)\n', (28742, 28755), True, 'import numpy as np\n'), ((30979, 31001), 'cv2.imshow', 'cv2.imshow', (['"""rbg"""', 'rgb'], {}), "('rbg', rgb)\n", (30989, 31001), False, 'import cv2\n'), ((31026, 31041), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (31037, 31041), False, 'import cv2\n')] |
import numpy as np
a=np.arange(24)
print(a.ndim)
b=a.reshape(2,4,3)
print(b.ndim) | [
"numpy.arange"
] | [((21, 34), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (30, 34), True, 'import numpy as np\n')] |
import finalGetDigits
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVR
from sklearn.model_selection import KFold
# get digits data X (training input) and y (target output)
X, y, X_te, y_te = finalGetDigits.getDataSet()
#penC <- Penalty parameter C of the error term
#tubEpsilon <- the epsilon-tube within which no penalty is associated
bestC=0
bestEpsilon=0
bestGamma=0
bestScore=float('-inf')
score=0
for penC in np.logspace(6, 12, num=7, base=2):
for tubEpsilon in np.linspace(0.5, 2.5, num=21):
for paramGamma in np.logspace(-6, -2, num=5, base=2):
kf = KFold(n_splits=np.random.randint(2,11))
cvscore=[]
for train, validation in kf.split(X):
X_train, X_validation, y_train, y_validation = X[train, :], X[validation, :], y[train], y[validation]
# here we create the SVR
svr = SVR(C=penC, epsilon=tubEpsilon, gamma=paramGamma, kernel='rbf', verbose=False)
# here we train the SVR
svr.fit(X_train, y_train)
# now we get E_out for validation set
score=svr.score(X_validation, y_validation)
cvscore.append(score)
# average CV score
score=sum(cvscore)/len(cvscore)
if (score > bestScore):
bestScore=score
bestC=penC
bestEpsilon=tubEpsilon
bestGamma=paramGamma
print("BEST! -> C " + str(penC) + ", epsilon " + str(tubEpsilon) + ", gamma " + str(paramGamma) + ". Testing set CV score: %f" % score)
else:
print("C " + str(penC) + ", epsilon " + str(tubEpsilon) + ", gamma " + str(paramGamma) + ". Testing set CV score: %f" % score)
# here we create the final SVR
svr = SVR(C=bestC, epsilon=bestEpsilon, gamma=bestGamma, kernel='rbf', verbose=True)
# here we train the final SVR
svr.fit(X, y)
# E_out in training
print("Training set score: %f" % svr.score(X, y))
# here test the final SVR and get E_out for testing set
ypred=svr.predict(X_te)
score=svr.score(X_te, y_te)
print("Testing set score: %f" % score)
x_min, x_max = np.min(X_te, axis=0), np.max(X_te, axis=0)
X_te = (X_te - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_te.shape[0]):
plt.text(X_te[i, 0], X_te[i, 1], str(y_te[i]), color=plt.cm.spectral(round(ypred[i]) / 10.), fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.xticks",
"finalGetDigits.getDataSet",
"matplotlib.pyplot.axis",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"numpy.logspace",
"sklearn.svm.SVR",
"matplotlib.py... | [((222, 249), 'finalGetDigits.getDataSet', 'finalGetDigits.getDataSet', ([], {}), '()\n', (247, 249), False, 'import finalGetDigits\n'), ((449, 482), 'numpy.logspace', 'np.logspace', (['(6)', '(12)'], {'num': '(7)', 'base': '(2)'}), '(6, 12, num=7, base=2)\n', (460, 482), True, 'import numpy as np\n'), ((1663, 1741), 'sklearn.svm.SVR', 'SVR', ([], {'C': 'bestC', 'epsilon': 'bestEpsilon', 'gamma': 'bestGamma', 'kernel': '"""rbf"""', 'verbose': '(True)'}), "(C=bestC, epsilon=bestEpsilon, gamma=bestGamma, kernel='rbf', verbose=True)\n", (1666, 1741), False, 'from sklearn.svm import SVR\n'), ((2104, 2130), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2114, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2312), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2308, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2313, 2327), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2323, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2328, 2343), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2336, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2362), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2372, 2374), True, 'import matplotlib.pyplot as plt\n'), ((504, 533), 'numpy.linspace', 'np.linspace', (['(0.5)', '(2.5)'], {'num': '(21)'}), '(0.5, 2.5, num=21)\n', (515, 533), True, 'import numpy as np\n'), ((2020, 2040), 'numpy.min', 'np.min', (['X_te'], {'axis': '(0)'}), '(X_te, axis=0)\n', (2026, 2040), True, 'import numpy as np\n'), ((2042, 2062), 'numpy.max', 'np.max', (['X_te'], {'axis': '(0)'}), '(X_te, axis=0)\n', (2048, 2062), True, 'import numpy as np\n'), ((557, 591), 'numpy.logspace', 'np.logspace', (['(-6)', '(-2)'], {'num': '(5)', 'base': '(2)'}), '(-6, -2, num=5, base=2)\n', (568, 591), True, 'import numpy as np\n'), ((863, 941), 'sklearn.svm.SVR', 'SVR', ([], {'C': 'penC', 'epsilon': 'tubEpsilon', 'gamma': 'paramGamma', 'kernel': '"""rbf"""', 'verbose': '(False)'}), "(C=penC, epsilon=tubEpsilon, gamma=paramGamma, kernel='rbf', verbose=False)\n", (866, 941), False, 'from sklearn.svm import SVR\n'), ((619, 643), 'numpy.random.randint', 'np.random.randint', (['(2)', '(11)'], {}), '(2, 11)\n', (636, 643), True, 'import numpy as np\n')] |
import numpy as np
from glhe.aggregation.agg_types import AggregationTypes
from glhe.aggregation.base_agg import BaseAgg
class NoAgg(BaseAgg):
"""
No aggregation. Just keep all of the values.
"""
Type = AggregationTypes.NO_AGG
def __init__(self, inputs):
BaseAgg.__init__(self, inputs)
def aggregate(self, time: int, energy: float):
# check for iteration
if self.prev_update_time == time:
return
# log the values
self.energy = np.append(self.energy, energy)
dt = time - self.prev_update_time
self.dts = np.append(self.dts, dt)
# update time
self.prev_update_time = time
def calc_temporal_superposition(self, time_step: int) -> float:
# compute temporal superposition
# this includes all thermal history before the present time
q = self.energy / self.dts
dq = np.diff(q, prepend=0)
# g-function values
dts = np.append(self.dts, time_step)
times = np.flipud(np.cumsum(np.flipud(dts)))[:-1]
lntts = np.log(times / self.ts)
g = self.interp_g(lntts)
# convolution of delta_q and the g-function values
if self.interp_g_b:
# convolution for "g" and "g_b" g-functions
g_b = self.interp_g_b(lntts)
return float(np.dot(dq, np.add(g, g_b)))
else:
# convolution for "g" g-functions only
return float(np.dot(dq, g))
def get_g_value(self, time_step: int) -> float:
pass # pragma: no cover
def get_g_b_value(self, time_step: int) -> float:
pass # pragma: no cover
def get_q_prev(self) -> float:
pass # pragma: no cover
| [
"numpy.add",
"numpy.flipud",
"numpy.log",
"numpy.diff",
"numpy.append",
"numpy.dot",
"glhe.aggregation.base_agg.BaseAgg.__init__"
] | [((288, 318), 'glhe.aggregation.base_agg.BaseAgg.__init__', 'BaseAgg.__init__', (['self', 'inputs'], {}), '(self, inputs)\n', (304, 318), False, 'from glhe.aggregation.base_agg import BaseAgg\n'), ((510, 540), 'numpy.append', 'np.append', (['self.energy', 'energy'], {}), '(self.energy, energy)\n', (519, 540), True, 'import numpy as np\n'), ((602, 625), 'numpy.append', 'np.append', (['self.dts', 'dt'], {}), '(self.dts, dt)\n', (611, 625), True, 'import numpy as np\n'), ((912, 933), 'numpy.diff', 'np.diff', (['q'], {'prepend': '(0)'}), '(q, prepend=0)\n', (919, 933), True, 'import numpy as np\n'), ((977, 1007), 'numpy.append', 'np.append', (['self.dts', 'time_step'], {}), '(self.dts, time_step)\n', (986, 1007), True, 'import numpy as np\n'), ((1082, 1105), 'numpy.log', 'np.log', (['(times / self.ts)'], {}), '(times / self.ts)\n', (1088, 1105), True, 'import numpy as np\n'), ((1467, 1480), 'numpy.dot', 'np.dot', (['dq', 'g'], {}), '(dq, g)\n', (1473, 1480), True, 'import numpy as np\n'), ((1044, 1058), 'numpy.flipud', 'np.flipud', (['dts'], {}), '(dts)\n', (1053, 1058), True, 'import numpy as np\n'), ((1360, 1374), 'numpy.add', 'np.add', (['g', 'g_b'], {}), '(g, g_b)\n', (1366, 1374), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 11 09:16:20 2021
@author: Administrator
"""
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt
import DI_distance as dist
from sklearn import manifold
import umap
from gudhi.wasserstein.barycenter import lagrangian_barycenter
def read_pickle_object(path):
with open(path, 'rb') as handle:
b = pickle.load(handle)
return b
def make_diagramlist(pathname, diagramlist):
path = os.getcwd()+"\\PH Diagrams\\" + pathname.replace(' ','_') + "\\"
pklfiles = os.listdir(path)
for file in pklfiles:
filepath = path + file
phdiagram = read_pickle_object(filepath)['diagram']
for i in range(0, len(phdiagram)):
phdiagram[i] = np.delete(phdiagram[i], -1, axis=1)
bary = lagrangian_barycenter(pdiagset=phdiagram) # this function may take time if phdiagram is large
if phdiagram == []:
print(filepath)
else:
diagramlist.append(bary)
index = len(diagramlist)
return diagramlist, index
diagramlist = []
namelist = ['breastmnist class',
'chestmnist class',
'dermamnist class',
'octmnist class',
'organmnist axial class',
'organmnist coronal class',
'organmnist sagittal class',
'pathmnist class',
'pneumoniamnist class',
'retinamnist class']
indxlist = []
# obtain list od diagrams
for name in namelist:
diagramlist, index = make_diagramlist(name, diagramlist)
indxlist.append(index)
# log inf error
ep = 1e-4
# compute distance matrix
Dmatrix = np.zeros((index,index))
for i in range(0,index):
for j in range(0,index):
dataA = np.log(diagramlist[i]+ep)
dataB = np.log(diagramlist[j]+ep)
dilaInv, bottleneckDistance, minBottleneckDistance = dist.myDistance(dataA, dataB, 100)
Dmatrix[i,j] = minBottleneckDistance
# mds visualization
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=10,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(Dmatrix).embedding_
indxlist = [0] + indxlist
plt.figure()
for i in range(len(namelist)):
plt.scatter(pos[indxlist[i]:indxlist[i+1]+1, 0], pos[indxlist[i]:indxlist[i+1]+1, 1], label = namelist[i])
plt.legend()
plt.savefig('mds_visual.png')
# umap visualization
RANDOM_SEED = 1
reducer = umap.UMAP(
n_neighbors = 5, # default: 15
n_components = 2, # 2D atlas
metric = 'precomputed', # we have already computed the pairwise distances
min_dist = .05, # default: 0.1
spread = 1, # default: 1
random_state = RANDOM_SEED)
embedding = reducer.fit_transform(Dmatrix)
# Plot the UMAP atlas
plt.figure()
for i in range(len(namelist)):
plt.scatter(embedding[indxlist[i]:indxlist[i+1]+1, 0], embedding[indxlist[i]:indxlist[i+1]+1, 1], label = namelist[i])
plt.legend()
plt.savefig('umap_visual.png')
| [
"os.listdir",
"matplotlib.pyplot.savefig",
"gudhi.wasserstein.barycenter.lagrangian_barycenter",
"DI_distance.myDistance",
"numpy.delete",
"numpy.log",
"pickle.load",
"os.getcwd",
"numpy.zeros",
"matplotlib.pyplot.figure",
"umap.UMAP",
"matplotlib.pyplot.scatter",
"sklearn.manifold.MDS",
"... | [((1711, 1735), 'numpy.zeros', 'np.zeros', (['(index, index)'], {}), '((index, index))\n', (1719, 1735), True, 'import numpy as np\n'), ((2062, 2176), 'sklearn.manifold.MDS', 'manifold.MDS', ([], {'n_components': '(2)', 'max_iter': '(3000)', 'eps': '(1e-09)', 'random_state': '(10)', 'dissimilarity': '"""precomputed"""', 'n_jobs': '(1)'}), "(n_components=2, max_iter=3000, eps=1e-09, random_state=10,\n dissimilarity='precomputed', n_jobs=1)\n", (2074, 2176), False, 'from sklearn import manifold\n'), ((2259, 2271), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2269, 2271), True, 'import matplotlib.pyplot as plt\n'), ((2417, 2429), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2427, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2460), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mds_visual.png"""'], {}), "('mds_visual.png')\n", (2442, 2460), True, 'import matplotlib.pyplot as plt\n'), ((2516, 2634), 'umap.UMAP', 'umap.UMAP', ([], {'n_neighbors': '(5)', 'n_components': '(2)', 'metric': '"""precomputed"""', 'min_dist': '(0.05)', 'spread': '(1)', 'random_state': 'RANDOM_SEED'}), "(n_neighbors=5, n_components=2, metric='precomputed', min_dist=\n 0.05, spread=1, random_state=RANDOM_SEED)\n", (2525, 2634), False, 'import umap\n'), ((2881, 2893), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2891, 2893), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3063), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3061, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3065, 3095), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""umap_visual.png"""'], {}), "('umap_visual.png')\n", (3076, 3095), True, 'import matplotlib.pyplot as plt\n'), ((572, 588), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (582, 588), False, 'import os\n'), ((2309, 2426), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pos[indxlist[i]:indxlist[i + 1] + 1, 0]', 'pos[indxlist[i]:indxlist[i + 1] + 1, 1]'], {'label': 'namelist[i]'}), '(pos[indxlist[i]:indxlist[i + 1] + 1, 0], pos[indxlist[i]:\n indxlist[i + 1] + 1, 1], label=namelist[i])\n', (2320, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2931, 3060), 'matplotlib.pyplot.scatter', 'plt.scatter', (['embedding[indxlist[i]:indxlist[i + 1] + 1, 0]', 'embedding[indxlist[i]:indxlist[i + 1] + 1, 1]'], {'label': 'namelist[i]'}), '(embedding[indxlist[i]:indxlist[i + 1] + 1, 0], embedding[\n indxlist[i]:indxlist[i + 1] + 1, 1], label=namelist[i])\n', (2942, 3060), True, 'import matplotlib.pyplot as plt\n'), ((397, 416), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (408, 416), False, 'import pickle\n'), ((833, 874), 'gudhi.wasserstein.barycenter.lagrangian_barycenter', 'lagrangian_barycenter', ([], {'pdiagset': 'phdiagram'}), '(pdiagset=phdiagram)\n', (854, 874), False, 'from gudhi.wasserstein.barycenter import lagrangian_barycenter\n'), ((1810, 1837), 'numpy.log', 'np.log', (['(diagramlist[i] + ep)'], {}), '(diagramlist[i] + ep)\n', (1816, 1837), True, 'import numpy as np\n'), ((1853, 1880), 'numpy.log', 'np.log', (['(diagramlist[j] + ep)'], {}), '(diagramlist[j] + ep)\n', (1859, 1880), True, 'import numpy as np\n'), ((1941, 1975), 'DI_distance.myDistance', 'dist.myDistance', (['dataA', 'dataB', '(100)'], {}), '(dataA, dataB, 100)\n', (1956, 1975), True, 'import DI_distance as dist\n'), ((781, 816), 'numpy.delete', 'np.delete', (['phdiagram[i]', '(-1)'], {'axis': '(1)'}), '(phdiagram[i], -1, axis=1)\n', (790, 816), True, 'import numpy as np\n'), ((491, 502), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (500, 502), False, 'import os\n')] |
'''
Created on Aug 21, 2017
@author: optas
'''
import warnings
import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.sparse.linalg import eigs
from numpy.linalg import norm
from .. fundamentals import Graph
from .. utils.linalg_utils import l2_norm
def greedy_match_pc_to_pc(from_pc, to_pc):
'''map from_pc points to to_pc by minimizing the from-to-to euclidean distance.'''
nn = NearestNeighbors(n_neighbors=1).fit(to_pc)
distances, indices = nn.kneighbors(from_pc)
return indices, distances
def chamfer_pseudo_distance(pc1, pc2):
_, d1 = greedy_match_pc_to_pc(pc1, pc2)
_, d2 = greedy_match_pc_to_pc(pc2, pc1)
return np.sum(d1) + np.sum(d2)
def laplacian_spectrum(pc, n_evecs, k=6):
''' k: (int) number of nearest neighbors each point is connected with in the constructed Adjacency
matrix that will be used to derive the Laplacian.
'''
neighbors_ids, distances = pc.k_nearest_neighbors(k)
A = Graph.knn_to_adjacency(neighbors_ids, distances)
if Graph.connected_components(A)[0] != 1:
raise ValueError('Graph has more than one connected component, increase k.')
A = (A + A.T) / 2.0
L = Graph.adjacency_to_laplacian(A, 'norm').astype('f4')
evals, evecs = eigs(L, n_evecs + 1, sigma=-10e-1, which='LM')
if np.any(l2_norm(evecs.imag, axis=0) / l2_norm(evecs.real, axis=0) > 1.0 / 100):
warnings.warn('Produced eigen-vectors are complex and contain significant mass on the imaginary part.')
evecs = evecs.real # eigs returns complex values by default.
evals = evals.real
index = np.argsort(evals) # Sort evals from smallest to largest
evals = evals[index]
evecs = evecs[:, index]
return evals, evecs
def unit_cube_grid_point_cloud(resolution, clip_sphere=False):
'''Returns the center coordinates of each cell of a 3D grid with resolution^3 cells,
that is placed in the unit-cube.
If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere.
'''
grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)
spacing = 1.0 / float(resolution - 1)
for i in xrange(resolution):
for j in xrange(resolution):
for k in xrange(resolution):
grid[i, j, k, 0] = i * spacing - 0.5
grid[i, j, k, 1] = j * spacing - 0.5
grid[i, j, k, 2] = k * spacing - 0.5
if clip_sphere:
grid = grid.reshape(-1, 3)
grid = grid[norm(grid, axis=1) <= 0.5]
return grid, spacing | [
"numpy.argsort",
"numpy.sum",
"numpy.ndarray",
"sklearn.neighbors.NearestNeighbors",
"numpy.linalg.norm",
"warnings.warn",
"scipy.sparse.linalg.eigs"
] | [((1257, 1301), 'scipy.sparse.linalg.eigs', 'eigs', (['L', '(n_evecs + 1)'], {'sigma': '(-1.0)', 'which': '"""LM"""'}), "(L, n_evecs + 1, sigma=-1.0, which='LM')\n", (1261, 1301), False, 'from scipy.sparse.linalg import eigs\n'), ((1606, 1623), 'numpy.argsort', 'np.argsort', (['evals'], {}), '(evals)\n', (1616, 1623), True, 'import numpy as np\n'), ((2039, 2102), 'numpy.ndarray', 'np.ndarray', (['(resolution, resolution, resolution, 3)', 'np.float32'], {}), '((resolution, resolution, resolution, 3), np.float32)\n', (2049, 2102), True, 'import numpy as np\n'), ((675, 685), 'numpy.sum', 'np.sum', (['d1'], {}), '(d1)\n', (681, 685), True, 'import numpy as np\n'), ((688, 698), 'numpy.sum', 'np.sum', (['d2'], {}), '(d2)\n', (694, 698), True, 'import numpy as np\n'), ((1398, 1511), 'warnings.warn', 'warnings.warn', (['"""Produced eigen-vectors are complex and contain significant mass on the imaginary part."""'], {}), "(\n 'Produced eigen-vectors are complex and contain significant mass on the imaginary part.'\n )\n", (1411, 1511), False, 'import warnings\n'), ((414, 445), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (430, 445), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((2491, 2509), 'numpy.linalg.norm', 'norm', (['grid'], {'axis': '(1)'}), '(grid, axis=1)\n', (2495, 2509), False, 'from numpy.linalg import norm\n')] |
# Copyright (c) 2019 Alliance for Sustainable Energy, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
import csv
# import y_helper_functions
class DER_Dispatch_base:
def __init__(self, output_fn):
self.output_fn = output_fn
self.present_step = 0
self.res_allVolt = []
self.res_PV_Pout = []
self.res_PV_Qout = []
self.subKW = []
self.subKVAr = []
self.loadDemandKw = []
self.loadDemandKvar = []
self.pvGenerationKw = []
self.pvGenerationKvar = []
self.v = []
self.vmin = []
self.vmax = []
self.vmean = []
self.regPos = []
self.capState = []
self.losses = []
self.PV_Ppower_output = []
self.PV_Qpower_output = []
self.buffer_count = 10
self.slack_number = 6
self.Tmax = 1440 * 2
self.startH = 0 # in hour
self.stepsize = 60
# self.fidselect = '_67AB291F-DCCD-31B7-B499-338206B9828F' # j1
# self.ymatirx_dir = 'J1_feeder'
# self.fidselect = '_40B1F0FA-8150-071D-A08F-CD104687EA5D' # ieee123pv
# self.ymatirx_dir = './IEEE123Bus'
# self.fidselect = '_E407CBB6-8C8D-9BC9-589C-AB83FBF0826D' # ieee123
# self.ymatirx_dir = './IEEE123BusFinal'
# self.ymatirx_dir = '../notebooks/123bus/'
# self.fidselect = '_4ACDF48A-0C6F-8C70-02F8-09AABAFFA2F5' # ieee13
# self.ymatirx_dir = './IEEE13'
def ybus_setup(self):
# get loads
# get pvs
# get y matrix
# get nodes
# self.node_names = query_model.get_node_names()
# self.source = query_model.get_source()
# self.pvs = query_model.get_pv_query()
# self.loads = query_model.get_loads_query()
# self.yMatirx = query_model.get_y_matirx()
# TODO get feeder name
# TODO request ymatrix, wait until we get it
# request_ybus.request_ybus()
# TODO read data files from directory that is returned
# TODO run scripts for ymatix with out load and ymatrix with load
# base_no_ysparse.csv will be the no load ymatirx and
# base_load_ysparse.csv will be the load ymatrix
# with open('opendsscmd_noload.txt', 'w') as out:
# out.writelines(y_helper_functions.no_load_txt)
# with open('opendsscmd_load.txt', 'w') as out:
# out.writelines(y_helper_functions.no_load_txt)
# self.lookup = {'A': '1', 'B': '2', 'C': '3', 'N':'4','S1':'1','S2':'2', 's1':'1','s2':'2', 's1\ns2': ['1','2'],'': '1.2.3'} # TODO s1 s12?
# current_dir = os.getcwd()
# if 'der_dispatch_app' not in current_dir:
# current_dir = os.path.join(current_dir,'der_dispatch_app')
# current_dir = os.path.join(current_dir, self.ymatirx_dir )
# nodelist_file = os.path.join(current_dir, 'base_load_nodelist.csv')
# Ysparse_file = os.path.join(current_dir, 'base_load_ysparse.csv')
# self.AllNodeNames = y_helper_functions.get_nodelist(nodelist_file)
# self.Ymatrix =y_helper_functions.construct_Ymatrix(Ysparse_file, self.AllNodeNames)
pass
def input(self, der_message_dict):
"""
Updates the internal state of the feeder measurements being monitored
and controlled from output from the GridAPPS-D simulator.
"""
self.der_message_dict = der_message_dict
#TODO get load p and q
# Initialize regulator tap dict
for reg_index in range(self.num_regs):
self.reg_tap[self.reg_list[reg_index]] = [0] * 3 # 3-phase taps
# Update regulator taps
for reg_index in range(self.num_regs):
self.reg_tap[self.reg_list[reg_index]][0] = self.vvc_message[self.simulation_name][self.reg_list[reg_index]]['tap_A']
self.reg_tap[self.reg_list[reg_index]][1] = self.vvc_message[self.simulation_name][self.reg_list[reg_index]]['tap_B']
self.reg_tap[self.reg_list[reg_index]][2] = self.vvc_message[self.simulation_name][self.reg_list[reg_index]]['tap_C']
def output(self, PVsystem):
"""
Collect all regulator and capacitor control actions and formulate the
message dictionary to send to the GridAPPS-D simulator and pass that in
as an argument to the function specified by output_fn.
:return None.
"""
self.output_dict = {}
self.output_dict[self.simulation_name] = {}
## Set control values
count = 0
for pv in PVsystem:
# dss.run_command('edit ' + str(pv["name"]) + ' Pmpp=' + str(x1[count]) + ' kvar=' + str(x1[count + NPV]))
# Set PV setpoints
pass
self.output_fn(self.output_dict.copy())
def process_results(self):
print('\n********** Processing results ! ********************\n')
nstep = int(np.ceil(self.Tmax-self.startH*60))
last = self.present_step // self.buffer_count * self.buffer_count
tseries = np.asarray(range(last, nstep))
if self.loadDemandKvar:
self.d = np.column_stack((tseries, np.asarray(self.loadDemandKw) / 1000, np.asarray(self.loadDemandKvar) / 1000,
np.asarray(self.subKW) / 1000, np.asarray(self.subKVAr) / 1000, np.asarray(self.pvGenerationKw) / 1000,
np.asarray(self.pvGenerationKvar) / 1000,
np.asarray(self.vmean), np.asarray(self.vmax), np.asarray(self.vmin), np.asarray(self.capState),
np.asarray(self.losses) / 1000)) #np.asarray(regPos),
np.savetxt(self.fn, self.d, fmt='%.8e', delimiter=',')
if self.v:
self.d = np.row_stack(self.v)
np.savetxt(self.vn, self.d, fmt='%.6e', delimiter=',')
self.fn.flush()
self.vn.flush()
with open(os.path.join(self.resFolder,'PVoutput_P.csv'),'w') as f:
csvwriter = csv.writer(f)
csvwriter.writerows(self.PV_Ppower_output)
with open(os.path.join(self.resFolder,'PVoutput_Q.csv'),'w') as f:
csvwriter = csv.writer(f)
csvwriter.writerows(self.PV_Qpower_output) | [
"numpy.ceil",
"csv.writer",
"os.path.join",
"numpy.asarray",
"numpy.row_stack",
"numpy.savetxt"
] | [((7285, 7339), 'numpy.savetxt', 'np.savetxt', (['self.vn', 'self.d'], {'fmt': '"""%.6e"""', 'delimiter': '""","""'}), "(self.vn, self.d, fmt='%.6e', delimiter=',')\n", (7295, 7339), True, 'import numpy as np\n'), ((6405, 6442), 'numpy.ceil', 'np.ceil', (['(self.Tmax - self.startH * 60)'], {}), '(self.Tmax - self.startH * 60)\n', (6412, 6442), True, 'import numpy as np\n'), ((7161, 7215), 'numpy.savetxt', 'np.savetxt', (['self.fn', 'self.d'], {'fmt': '"""%.8e"""', 'delimiter': '""","""'}), "(self.fn, self.d, fmt='%.8e', delimiter=',')\n", (7171, 7215), True, 'import numpy as np\n'), ((7256, 7276), 'numpy.row_stack', 'np.row_stack', (['self.v'], {}), '(self.v)\n', (7268, 7276), True, 'import numpy as np\n'), ((7488, 7501), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (7498, 7501), False, 'import csv\n'), ((7657, 7670), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (7667, 7670), False, 'import csv\n'), ((7407, 7453), 'os.path.join', 'os.path.join', (['self.resFolder', '"""PVoutput_P.csv"""'], {}), "(self.resFolder, 'PVoutput_P.csv')\n", (7419, 7453), False, 'import os\n'), ((7576, 7622), 'os.path.join', 'os.path.join', (['self.resFolder', '"""PVoutput_Q.csv"""'], {}), "(self.resFolder, 'PVoutput_Q.csv')\n", (7588, 7622), False, 'import os\n'), ((6965, 6987), 'numpy.asarray', 'np.asarray', (['self.vmean'], {}), '(self.vmean)\n', (6975, 6987), True, 'import numpy as np\n'), ((6989, 7010), 'numpy.asarray', 'np.asarray', (['self.vmax'], {}), '(self.vmax)\n', (6999, 7010), True, 'import numpy as np\n'), ((7012, 7033), 'numpy.asarray', 'np.asarray', (['self.vmin'], {}), '(self.vmin)\n', (7022, 7033), True, 'import numpy as np\n'), ((7035, 7060), 'numpy.asarray', 'np.asarray', (['self.capState'], {}), '(self.capState)\n', (7045, 7060), True, 'import numpy as np\n'), ((6642, 6671), 'numpy.asarray', 'np.asarray', (['self.loadDemandKw'], {}), '(self.loadDemandKw)\n', (6652, 6671), True, 'import numpy as np\n'), ((6680, 6711), 'numpy.asarray', 'np.asarray', (['self.loadDemandKvar'], {}), '(self.loadDemandKvar)\n', (6690, 6711), True, 'import numpy as np\n'), ((6753, 6775), 'numpy.asarray', 'np.asarray', (['self.subKW'], {}), '(self.subKW)\n', (6763, 6775), True, 'import numpy as np\n'), ((6784, 6808), 'numpy.asarray', 'np.asarray', (['self.subKVAr'], {}), '(self.subKVAr)\n', (6794, 6808), True, 'import numpy as np\n'), ((6817, 6848), 'numpy.asarray', 'np.asarray', (['self.pvGenerationKw'], {}), '(self.pvGenerationKw)\n', (6827, 6848), True, 'import numpy as np\n'), ((6890, 6923), 'numpy.asarray', 'np.asarray', (['self.pvGenerationKvar'], {}), '(self.pvGenerationKvar)\n', (6900, 6923), True, 'import numpy as np\n'), ((7095, 7118), 'numpy.asarray', 'np.asarray', (['self.losses'], {}), '(self.losses)\n', (7105, 7118), True, 'import numpy as np\n')] |
import numpy as np
import math
def main():
""" # Main
This is a backbone of the project. This Fuction runs all the others.
"""
c_a = [0.2, 1, 0.7]
c_b = [0.3, 0.9, 0.8]
template = [0.2, 0.9, 0.9]
d1 = calculate_euclidian_distance(template, c_a)
d2 = calculate_euclidian_distance(template, c_b)
print(
'Template: {0};\nClass A: {2}\n - dist: {4}\nClass B: {3};\n - dist: {5}\n\
The class closest to the template is the class: {1}.'.format(
template,
c_b if d1 > d2 else c_a,
c_a,
c_b,
d1,
d2
)
)
def calculate_euclidian_distance(_template, _class):
"""# Calculate Euclidian Distance
Args:
_template (list): list with the template samples.
_class (list): list to compare with template.
Returns:
float: distance between template and class.
"""
return math.sqrt(sum((np.array(_template) - np.array(_class))**2))
if __name__ == '__main__':
main()
| [
"numpy.array"
] | [((946, 965), 'numpy.array', 'np.array', (['_template'], {}), '(_template)\n', (954, 965), True, 'import numpy as np\n'), ((968, 984), 'numpy.array', 'np.array', (['_class'], {}), '(_class)\n', (976, 984), True, 'import numpy as np\n')] |
import numpy as np
from scipy.optimize import check_grad
from value_iter import value_iter
from utils import norm_distr, laplace_distr, printoptions
def compute_g(mdp, policy, p_0, T, d_last_step_list, expected_features_list):
nS, nA, nF = mdp.nS, mdp.nA, mdp.num_features
# base case
G = np.zeros((nS, nF))
# recursive case
for t in range(T-1):
# G(s') = \sum_{s, a} p(a | s) p(s' | s, a) [ p(s) g(s, a) + G_prev[s] ]
# p(s) is given by d_last_step_list[t]
# g(s, a) = f(s) - F(s) + \sum_{s'} p(s' | s, a) F(s')
# Distribute the addition to get three different terms:
# First term: p(s) [f(s') - F(s')]
# Second term: p(s) \sum_{s2} p(s2 | s, a) F(s2)
# Third term: G_prev[s]
g_first = mdp.f_matrix - expected_features_list[t]
g_second = mdp.T_matrix.dot(expected_features_list[t+1])
g_second = g_second.reshape((nS, nA, nF))
g_total = np.expand_dims(g_first, axis=1) + g_second
prob_s_a = np.expand_dims(d_last_step_list[t].reshape(nS), axis=1) * policy[t]
G_value = np.expand_dims(prob_s_a, axis=2) * g_total
G_value = mdp.T_matrix_transpose.dot(G_value.reshape((nS * nA, nF)))
G_recurse = np.expand_dims(policy[t], axis=-1) * np.expand_dims(G, axis=1)
G_recurse = mdp.T_matrix_transpose.dot(G_recurse.reshape((nS * nA, nF)))
G = G_value + G_recurse
return G
def compute_d_last_step(mdp, policy, p_0, T, gamma=1, verbose=False, return_all=False):
"""Computes the last-step occupancy measure"""
D, d_last_step_list = p_0, [p_0]
for t in range(T-1):
# D(s') = \sum_{s, a} D_prev(s) * p(a | s) * p(s' | s, a)
state_action_prob = np.expand_dims(D, axis=1) * policy[t]
D = mdp.T_matrix_transpose.dot(state_action_prob.flatten())
if verbose is True: print(D)
if return_all: d_last_step_list.append(D)
return (D, d_last_step_list) if return_all else D
def compute_feature_expectations(mdp, policy, p_0, T):
nS, nA, nF = mdp.nS, mdp.nA, mdp.num_features
expected_features = mdp.f_matrix
expected_feature_list = [expected_features]
for t in range(T-2, -1, -1):
# F(s) = f(s) + \sum_{a, s'} p(a | s) * p(s' | s, a) * F(s')
future_features = mdp.T_matrix.dot(expected_features).reshape((nS, nA, nF))
future_features = future_features * np.expand_dims(policy[t], axis=2)
expected_features = mdp.f_matrix + np.sum(future_features, axis=1)
expected_feature_list.append(expected_features)
return expected_features, expected_feature_list[::-1]
def rlsp(mdp, s_current, p_0, horizon, temp=1, epochs=1, learning_rate=0.2,
r_prior=None, r_vec=None, threshold=1e-3, check_grad_flag=False):
"""The RLSP algorithm"""
def compute_grad(r_vec):
# Compute the Boltzmann rational policy \pi_{s,a} = \exp(Q_{s,a} - V_s)
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step, d_last_step_list = compute_d_last_step(
mdp, policy, p_0, horizon, return_all=True)
if d_last_step[s_current] == 0:
print('Error in om_method: No feasible trajectories!')
return r_vec
expected_features, expected_features_list = compute_feature_expectations(
mdp, policy, p_0, horizon)
G = compute_g(mdp, policy, p_0, horizon, d_last_step_list, expected_features_list)
# Compute the gradient
dL_dr_vec = G[s_current] / d_last_step[s_current]
# Gradient of the prior
if r_prior!= None: dL_dr_vec += r_prior.logdistr_grad(r_vec)
return dL_dr_vec
def compute_log_likelihood(r_vec):
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step = compute_d_last_step(mdp, policy, p_0, horizon)
log_likelihood = np.log(d_last_step[s_current])
if r_prior!= None: log_likelihood += np.sum(r_prior.logpdf(r_vec))
return log_likelihood
def get_grad(_):
"""dummy function for use with check_grad()"""
return dL_dr_vec
if r_vec is None:
r_vec = 0.01*np.random.randn(mdp.f_matrix.shape[1])
print('Initial reward vector: {}'.format(r_vec))
if check_grad_flag: grad_error_list=[]
for i in range(epochs):
dL_dr_vec = compute_grad(r_vec)
if check_grad_flag:
grad_error_list.append(check_grad(compute_log_likelihood, get_grad, r_vec))
# Gradient ascent
r_vec = r_vec + learning_rate * dL_dr_vec
# with printoptions(precision=4, suppress=True):
# print('Epoch {}; Reward vector: {}'.format(i, r_vec))
# if check_grad_flag: print('grad error: {}'.format(grad_error_list[-1]))
if np.linalg.norm(dL_dr_vec) < threshold:
if check_grad_flag:
print()
print('Max grad error: {}'.format(np.amax(np.asarray(grad_error_list))))
print('Median grad error: {}'.format(np.median(np.asarray(grad_error_list))))
break
return r_vec
| [
"value_iter.value_iter",
"numpy.log",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"numpy.expand_dims",
"numpy.linalg.norm",
"scipy.optimize.check_grad",
"numpy.random.randn"
] | [((305, 323), 'numpy.zeros', 'np.zeros', (['(nS, nF)'], {}), '((nS, nF))\n', (313, 323), True, 'import numpy as np\n'), ((2931, 2986), 'value_iter.value_iter', 'value_iter', (['mdp', '(1)', '(mdp.f_matrix @ r_vec)', 'horizon', 'temp'], {}), '(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)\n', (2941, 2986), False, 'from value_iter import value_iter\n'), ((3722, 3777), 'value_iter.value_iter', 'value_iter', (['mdp', '(1)', '(mdp.f_matrix @ r_vec)', 'horizon', 'temp'], {}), '(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)\n', (3732, 3777), False, 'from value_iter import value_iter\n'), ((3872, 3902), 'numpy.log', 'np.log', (['d_last_step[s_current]'], {}), '(d_last_step[s_current])\n', (3878, 3902), True, 'import numpy as np\n'), ((951, 982), 'numpy.expand_dims', 'np.expand_dims', (['g_first'], {'axis': '(1)'}), '(g_first, axis=1)\n', (965, 982), True, 'import numpy as np\n'), ((1101, 1133), 'numpy.expand_dims', 'np.expand_dims', (['prob_s_a'], {'axis': '(2)'}), '(prob_s_a, axis=2)\n', (1115, 1133), True, 'import numpy as np\n'), ((1242, 1276), 'numpy.expand_dims', 'np.expand_dims', (['policy[t]'], {'axis': '(-1)'}), '(policy[t], axis=-1)\n', (1256, 1276), True, 'import numpy as np\n'), ((1279, 1304), 'numpy.expand_dims', 'np.expand_dims', (['G'], {'axis': '(1)'}), '(G, axis=1)\n', (1293, 1304), True, 'import numpy as np\n'), ((1730, 1755), 'numpy.expand_dims', 'np.expand_dims', (['D'], {'axis': '(1)'}), '(D, axis=1)\n', (1744, 1755), True, 'import numpy as np\n'), ((2400, 2433), 'numpy.expand_dims', 'np.expand_dims', (['policy[t]'], {'axis': '(2)'}), '(policy[t], axis=2)\n', (2414, 2433), True, 'import numpy as np\n'), ((2477, 2508), 'numpy.sum', 'np.sum', (['future_features'], {'axis': '(1)'}), '(future_features, axis=1)\n', (2483, 2508), True, 'import numpy as np\n'), ((4154, 4192), 'numpy.random.randn', 'np.random.randn', (['mdp.f_matrix.shape[1]'], {}), '(mdp.f_matrix.shape[1])\n', (4169, 4192), True, 'import numpy as np\n'), ((4776, 4801), 'numpy.linalg.norm', 'np.linalg.norm', (['dL_dr_vec'], {}), '(dL_dr_vec)\n', (4790, 4801), True, 'import numpy as np\n'), ((4422, 4473), 'scipy.optimize.check_grad', 'check_grad', (['compute_log_likelihood', 'get_grad', 'r_vec'], {}), '(compute_log_likelihood, get_grad, r_vec)\n', (4432, 4473), False, 'from scipy.optimize import check_grad\n'), ((4929, 4956), 'numpy.asarray', 'np.asarray', (['grad_error_list'], {}), '(grad_error_list)\n', (4939, 4956), True, 'import numpy as np\n'), ((5023, 5050), 'numpy.asarray', 'np.asarray', (['grad_error_list'], {}), '(grad_error_list)\n', (5033, 5050), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Configure folder for RCC drift correction testing.
Note: This test takes approximately 20-30 minutes.
Hazen 09/18
"""
import numpy
import storm_analysis.simulator.emitters_in_clusters as emittersInClusters
import storm_analysis.simulator.emitters_on_lines as emittersOnLines
import storm_analysis.diagnostics.rcc.settings as settings
def configure():
# Create localizations in clusters file.
#
print("Creating clustered localizations file.")
emittersInClusters.emittersInClusters("cluster_list.hdf5",
50,
200,
1.0,
sx = settings.x_size,
sy = settings.y_size)
# Create localizations on lines file.
#
print("Creating lines localizations file.")
emittersOnLines.emittersOnLines("lines_list.hdf5",
50,
100000,
sx = settings.x_size,
sy = settings.y_size)
# Create drift file. This is used in the simulations to displace
# the localizations.
#
dx = settings.x_drift/settings.n_frames
drift_x = numpy.arange(0.0, settings.x_drift + 0.5 * dx, dx)
dy = settings.y_drift/settings.n_frames
drift_y = numpy.arange(0.0, settings.y_drift + 0.5 * dy, dy)
dz = settings.z_drift/settings.n_frames
drift_z = numpy.arange(0.0, settings.z_drift + 0.5 * dz, dz)
drift_data = numpy.zeros((drift_x.size, 3))
drift_data[:,0] = drift_x
drift_data[:,1] = drift_y
numpy.savetxt("drift_xy.txt", drift_data)
drift_data[:,2] = drift_z
numpy.savetxt("drift_xyz.txt", drift_data)
if (__name__ == "__main__"):
configure()
| [
"storm_analysis.simulator.emitters_on_lines.emittersOnLines",
"storm_analysis.simulator.emitters_in_clusters.emittersInClusters",
"numpy.zeros",
"numpy.savetxt",
"numpy.arange"
] | [((499, 616), 'storm_analysis.simulator.emitters_in_clusters.emittersInClusters', 'emittersInClusters.emittersInClusters', (['"""cluster_list.hdf5"""', '(50)', '(200)', '(1.0)'], {'sx': 'settings.x_size', 'sy': 'settings.y_size'}), "('cluster_list.hdf5', 50, 200, 1.0, sx\n =settings.x_size, sy=settings.y_size)\n", (536, 616), True, 'import storm_analysis.simulator.emitters_in_clusters as emittersInClusters\n'), ((927, 1034), 'storm_analysis.simulator.emitters_on_lines.emittersOnLines', 'emittersOnLines.emittersOnLines', (['"""lines_list.hdf5"""', '(50)', '(100000)'], {'sx': 'settings.x_size', 'sy': 'settings.y_size'}), "('lines_list.hdf5', 50, 100000, sx=settings.\n x_size, sy=settings.y_size)\n", (958, 1034), True, 'import storm_analysis.simulator.emitters_on_lines as emittersOnLines\n'), ((1337, 1387), 'numpy.arange', 'numpy.arange', (['(0.0)', '(settings.x_drift + 0.5 * dx)', 'dx'], {}), '(0.0, settings.x_drift + 0.5 * dx, dx)\n', (1349, 1387), False, 'import numpy\n'), ((1451, 1501), 'numpy.arange', 'numpy.arange', (['(0.0)', '(settings.y_drift + 0.5 * dy)', 'dy'], {}), '(0.0, settings.y_drift + 0.5 * dy, dy)\n', (1463, 1501), False, 'import numpy\n'), ((1561, 1611), 'numpy.arange', 'numpy.arange', (['(0.0)', '(settings.z_drift + 0.5 * dz)', 'dz'], {}), '(0.0, settings.z_drift + 0.5 * dz, dz)\n', (1573, 1611), False, 'import numpy\n'), ((1630, 1660), 'numpy.zeros', 'numpy.zeros', (['(drift_x.size, 3)'], {}), '((drift_x.size, 3))\n', (1641, 1660), False, 'import numpy\n'), ((1730, 1771), 'numpy.savetxt', 'numpy.savetxt', (['"""drift_xy.txt"""', 'drift_data'], {}), "('drift_xy.txt', drift_data)\n", (1743, 1771), False, 'import numpy\n'), ((1811, 1853), 'numpy.savetxt', 'numpy.savetxt', (['"""drift_xyz.txt"""', 'drift_data'], {}), "('drift_xyz.txt', drift_data)\n", (1824, 1853), False, 'import numpy\n')] |
import numpy as np
import pylab
def plot_all():
sigmoid_x = np.linspace(-10, 10, num=200)
sigmoid_y = [1 / (1 + np.exp(-1 * value)) for value in sigmoid_x]
tanh_x = np.linspace(-10, 10, num=200)
tanh_y = [2 * (1 / (1 + np.exp(-1 * 2 * value))) - 1 for value in tanh_x]
ReLU_x = np.linspace(-10, 10, num=200)
ReLU_y = []
for value in ReLU_x:
if value < 0:
ReLU_y.append(0)
else:
ReLU_y.append(value)
PReLU_x = np.linspace(-10, 10, num=200)
PReLU_y = []
for value in PReLU_x:
if value < 0:
PReLU_y.append(0.3 * value)
else:
PReLU_y.append(value)
pylab.figure()
pylab.subplot(2, 2, 1)
pylab.plot(sigmoid_x, sigmoid_y, 'yellowgreen')
pylab.grid()
pylab.title("Sigmoid")
pylab.subplot(2, 2, 2)
pylab.plot(tanh_x, tanh_y, 'yellowgreen')
pylab.grid()
pylab.title("Tanh")
pylab.subplot(2, 2, 3)
pylab.plot(ReLU_x, ReLU_y, 'yellowgreen')
pylab.grid()
pylab.title("ReLU")
pylab.subplot(2, 2, 4)
pylab.plot(PReLU_x, PReLU_y, 'yellowgreen')
pylab.grid()
pylab.title("PReLU")
pylab.show()
plot_all()
| [
"pylab.title",
"pylab.subplot",
"pylab.plot",
"pylab.grid",
"pylab.figure",
"numpy.exp",
"numpy.linspace",
"pylab.show"
] | [((66, 95), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(200)'}), '(-10, 10, num=200)\n', (77, 95), True, 'import numpy as np\n'), ((180, 209), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(200)'}), '(-10, 10, num=200)\n', (191, 209), True, 'import numpy as np\n'), ((302, 331), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(200)'}), '(-10, 10, num=200)\n', (313, 331), True, 'import numpy as np\n'), ((486, 515), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(200)'}), '(-10, 10, num=200)\n', (497, 515), True, 'import numpy as np\n'), ((674, 688), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (686, 688), False, 'import pylab\n'), ((694, 716), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (707, 716), False, 'import pylab\n'), ((721, 768), 'pylab.plot', 'pylab.plot', (['sigmoid_x', 'sigmoid_y', '"""yellowgreen"""'], {}), "(sigmoid_x, sigmoid_y, 'yellowgreen')\n", (731, 768), False, 'import pylab\n'), ((773, 785), 'pylab.grid', 'pylab.grid', ([], {}), '()\n', (783, 785), False, 'import pylab\n'), ((790, 812), 'pylab.title', 'pylab.title', (['"""Sigmoid"""'], {}), "('Sigmoid')\n", (801, 812), False, 'import pylab\n'), ((818, 840), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (831, 840), False, 'import pylab\n'), ((845, 886), 'pylab.plot', 'pylab.plot', (['tanh_x', 'tanh_y', '"""yellowgreen"""'], {}), "(tanh_x, tanh_y, 'yellowgreen')\n", (855, 886), False, 'import pylab\n'), ((891, 903), 'pylab.grid', 'pylab.grid', ([], {}), '()\n', (901, 903), False, 'import pylab\n'), ((908, 927), 'pylab.title', 'pylab.title', (['"""Tanh"""'], {}), "('Tanh')\n", (919, 927), False, 'import pylab\n'), ((933, 955), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (946, 955), False, 'import pylab\n'), ((960, 1001), 'pylab.plot', 'pylab.plot', (['ReLU_x', 'ReLU_y', '"""yellowgreen"""'], {}), "(ReLU_x, ReLU_y, 'yellowgreen')\n", (970, 1001), False, 'import pylab\n'), ((1006, 1018), 'pylab.grid', 'pylab.grid', ([], {}), '()\n', (1016, 1018), False, 'import pylab\n'), ((1023, 1042), 'pylab.title', 'pylab.title', (['"""ReLU"""'], {}), "('ReLU')\n", (1034, 1042), False, 'import pylab\n'), ((1048, 1070), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (1061, 1070), False, 'import pylab\n'), ((1075, 1118), 'pylab.plot', 'pylab.plot', (['PReLU_x', 'PReLU_y', '"""yellowgreen"""'], {}), "(PReLU_x, PReLU_y, 'yellowgreen')\n", (1085, 1118), False, 'import pylab\n'), ((1123, 1135), 'pylab.grid', 'pylab.grid', ([], {}), '()\n', (1133, 1135), False, 'import pylab\n'), ((1140, 1160), 'pylab.title', 'pylab.title', (['"""PReLU"""'], {}), "('PReLU')\n", (1151, 1160), False, 'import pylab\n'), ((1166, 1178), 'pylab.show', 'pylab.show', ([], {}), '()\n', (1176, 1178), False, 'import pylab\n'), ((122, 140), 'numpy.exp', 'np.exp', (['(-1 * value)'], {}), '(-1 * value)\n', (128, 140), True, 'import numpy as np\n'), ((238, 260), 'numpy.exp', 'np.exp', (['(-1 * 2 * value)'], {}), '(-1 * 2 * value)\n', (244, 260), True, 'import numpy as np\n')] |
""" Run weighted retraining for shapes with the optimal model """
import sys
import logging
import itertools
from tqdm.auto import tqdm
import argparse
from pathlib import Path
import numpy as np
import torch
import pytorch_lightning as pl
# My imports
from weighted_retraining.shapes.shapes_data import WeightedNumpyDataset
from weighted_retraining.shapes.shapes_model import ShapesVAE
from weighted_retraining import utils
from weighted_retraining.opt_scripts import base as wr_base
def retrain_model(model, datamodule, save_dir, version_str, num_epochs, gpu):
# Make sure logs don't get in the way of progress bars
pl._logger.setLevel(logging.CRITICAL)
train_pbar = utils.SubmissivePlProgressbar(process_position=1)
# Create custom saver and logger
tb_logger = pl.loggers.TensorBoardLogger(
save_dir=save_dir, version=version_str, name=""
)
checkpointer = pl.callbacks.ModelCheckpoint(save_last=True, monitor="loss/val",)
# Handle fractional epochs
if num_epochs < 1:
max_epochs = 1
limit_train_batches = num_epochs
elif int(num_epochs) == num_epochs:
max_epochs = int(num_epochs)
limit_train_batches = 1.0
else:
raise ValueError(f"invalid num epochs {num_epochs}")
# Create trainer
trainer = pl.Trainer(
gpus=1 if gpu else 0,
max_epochs=max_epochs,
limit_train_batches=limit_train_batches,
limit_val_batches=1,
checkpoint_callback=checkpointer,
terminate_on_nan=True,
logger=tb_logger,
callbacks=[train_pbar],
)
# Fit model
trainer.fit(model, datamodule)
def _batch_decode_z_and_props(model, z, args, filter_unique=True):
"""
helper function to decode some latent vectors and calculate their properties
"""
# Decode all points in a fixed decoding radius
z_decode = []
batch_size = 1000
for j in range(0, len(z), batch_size):
with torch.no_grad():
img = model.decode_deterministic(z[j : j + batch_size])
img = img.cpu().numpy()
z_decode.append(img)
del img
# Concatentate all points and convert to numpy
z_decode = np.concatenate(z_decode, axis=0)
z_decode = np.around(z_decode) # convert to int
z_decode = z_decode[:, 0, ...] # Correct slicing
if filter_unique:
z_decode, uniq_indices = np.unique(
z_decode, axis=0, return_index=True
) # Unique elements only
z = z.cpu().numpy()[uniq_indices]
# Calculate objective function values and choose which points to keep
if args.property_key == "areas":
z_prop = np.sum(z_decode, axis=(-1, -2))
else:
raise ValueError(args.property)
if filter_unique:
return z_decode, z_prop, z
else:
return z_decode, z_prop
def latent_optimization(args, model, datamodule, num_queries_to_do):
""" do latent space optimization with the optimal model (aka cheating) """
unit_line = np.linspace(-args.opt_bounds, args.opt_bounds, args.opt_grid_len)
latent_grid = list(itertools.product(unit_line, repeat=model.latent_dim))
latent_grid = np.array(latent_grid, dtype=np.float32)
z_latent_opt = torch.as_tensor(latent_grid, device=model.device)
z_decode, z_prop, z = _batch_decode_z_and_props(model, z_latent_opt, args)
z_prop_argsort = np.argsort(-1 * z_prop) # assuming maximization of property
# Choose new points
new_points = z_decode[z_prop_argsort[:num_queries_to_do]]
y_new = z_prop[z_prop_argsort[:num_queries_to_do]]
z_query = z[z_prop_argsort[:num_queries_to_do]]
return new_points, y_new, z_query
def latent_sampling(args, model, datamodule, num_queries_to_do, filter_unique=True):
""" Draws samples from latent space and appends to the dataset """
z_sample = torch.randn(num_queries_to_do, model.latent_dim, device=model.device)
return _batch_decode_z_and_props(model, z_sample, args, filter_unique=filter_unique)
def main_loop(args):
# Seeding
pl.seed_everything(args.seed)
# Make results directory
result_dir = Path(args.result_root).resolve()
result_dir.mkdir(parents=True)
data_dir = result_dir / "data"
data_dir.mkdir()
# Load data
datamodule = WeightedNumpyDataset(args, utils.DataWeighter(args))
datamodule.setup("fit")
# Load model
model = ShapesVAE.load_from_checkpoint(args.pretrained_model_file)
model.beta = model.hparams.beta_final # Override any beta annealing
# Set up results tracking
results = dict(
opt_points=[],
opt_latent_points=[],
opt_point_properties=[],
opt_model_version=[],
params=str(sys.argv),
sample_points=[],
sample_versions=[],
sample_properties=[],
latent_space_snapshots=[],
latent_space_snapshot_version=[],
)
# Set up latent space snapshot!
results["latent_space_grid"] = np.array(
list(itertools.product(np.arange(-4, 4.01, 0.5), repeat=model.latent_dim)),
dtype=np.float32,
)
# Set up some stuff for the progress bar
num_retrain = int(np.ceil(args.query_budget / args.retraining_frequency))
postfix = dict(
retrain_left=num_retrain, best=-float("inf"), n_train=len(datamodule.data_train)
)
# Main loop
with tqdm(
total=args.query_budget, dynamic_ncols=True, smoothing=0.0, file=sys.stdout
) as pbar:
for ret_idx in range(num_retrain):
pbar.set_postfix(postfix)
pbar.set_description("retraining")
# Decide whether to retrain
samples_so_far = args.retraining_frequency * ret_idx
# Optionally do retraining
num_epochs = args.n_retrain_epochs
if ret_idx == 0 and args.n_init_retrain_epochs is not None:
num_epochs = args.n_init_retrain_epochs
if num_epochs > 0:
retrain_dir = result_dir / "retraining"
version = f"retrain_{samples_so_far}"
retrain_model(
model, datamodule, retrain_dir, version, num_epochs, args.gpu
)
# Draw samples for logs!
if args.samples_per_model > 0:
pbar.set_description("sampling")
sample_x, sample_y = latent_sampling(
args, model, datamodule, args.samples_per_model, filter_unique=False
)
# Append to results dict
results["sample_points"].append(sample_x)
results["sample_properties"].append(sample_y)
results["sample_versions"].append(ret_idx)
# Take latent snapshot
latent_snapshot = _batch_decode_z_and_props(
model,
torch.as_tensor(results["latent_space_grid"], device=model.device),
args,
filter_unique=False,
)[0]
results["latent_space_snapshots"].append(latent_snapshot)
results["latent_space_snapshot_version"].append(ret_idx)
# Update progress bar
postfix["retrain_left"] -= 1
pbar.set_postfix(postfix)
pbar.set_description("querying")
# Do querying!
num_queries_to_do = min(
args.retraining_frequency, args.query_budget - samples_so_far
)
if args.lso_strategy == "opt":
x_new, y_new, z_query = latent_optimization(
args, model, datamodule, num_queries_to_do
)
elif args.lso_strategy == "sample":
x_new, y_new, z_query = latent_sampling(
args, model, datamodule, num_queries_to_do
)
else:
raise NotImplementedError(args.lso_strategy)
# Append new points to dataset
datamodule.append_train_data(x_new, y_new)
# Save a new dataset
new_data_file = (
data_dir / f"train_data_iter{samples_so_far + num_queries_to_do}.npz"
)
np.savez_compressed(
str(new_data_file),
data=datamodule.data_train,
**{args.property_key: datamodule.prop_train},
)
# Save results
results["opt_latent_points"] += [z for z in z_query]
results["opt_points"] += [x for x in x_new]
results["opt_point_properties"] += [y for y in y_new]
results["opt_model_version"] += [ret_idx] * len(x_new)
np.savez_compressed(str(result_dir / "results.npz"), **results)
# Final update of progress bar
postfix["best"] = max(postfix["best"], float(y_new.max()))
postfix["n_train"] = len(datamodule.data_train)
pbar.set_postfix(postfix)
pbar.update(n=num_queries_to_do)
if __name__ == "__main__":
# arguments and argument checking
parser = argparse.ArgumentParser()
parser = WeightedNumpyDataset.add_model_specific_args(parser)
parser = utils.DataWeighter.add_weight_args(parser)
parser = wr_base.add_common_args(parser)
# Optimal model arguments
opt_group = parser.add_argument_group(title="opt-model")
opt_group.add_argument("--opt_bounds", type=float, default=4)
opt_group.add_argument("--opt_grid_len", type=float, default=50)
args = parser.parse_args()
main_loop(args)
| [
"pytorch_lightning._logger.setLevel",
"torch.as_tensor",
"weighted_retraining.shapes.shapes_model.ShapesVAE.load_from_checkpoint",
"weighted_retraining.opt_scripts.base.add_common_args",
"weighted_retraining.shapes.shapes_data.WeightedNumpyDataset.add_model_specific_args",
"numpy.argsort",
"pytorch_ligh... | [((631, 668), 'pytorch_lightning._logger.setLevel', 'pl._logger.setLevel', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (650, 668), True, 'import pytorch_lightning as pl\n'), ((686, 735), 'weighted_retraining.utils.SubmissivePlProgressbar', 'utils.SubmissivePlProgressbar', ([], {'process_position': '(1)'}), '(process_position=1)\n', (715, 735), False, 'from weighted_retraining import utils\n'), ((790, 867), 'pytorch_lightning.loggers.TensorBoardLogger', 'pl.loggers.TensorBoardLogger', ([], {'save_dir': 'save_dir', 'version': 'version_str', 'name': '""""""'}), "(save_dir=save_dir, version=version_str, name='')\n", (818, 867), True, 'import pytorch_lightning as pl\n'), ((901, 965), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', ([], {'save_last': '(True)', 'monitor': '"""loss/val"""'}), "(save_last=True, monitor='loss/val')\n", (929, 965), True, 'import pytorch_lightning as pl\n'), ((1304, 1535), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'gpus': '(1 if gpu else 0)', 'max_epochs': 'max_epochs', 'limit_train_batches': 'limit_train_batches', 'limit_val_batches': '(1)', 'checkpoint_callback': 'checkpointer', 'terminate_on_nan': '(True)', 'logger': 'tb_logger', 'callbacks': '[train_pbar]'}), '(gpus=1 if gpu else 0, max_epochs=max_epochs, limit_train_batches\n =limit_train_batches, limit_val_batches=1, checkpoint_callback=\n checkpointer, terminate_on_nan=True, logger=tb_logger, callbacks=[\n train_pbar])\n', (1314, 1535), True, 'import pytorch_lightning as pl\n'), ((2198, 2230), 'numpy.concatenate', 'np.concatenate', (['z_decode'], {'axis': '(0)'}), '(z_decode, axis=0)\n', (2212, 2230), True, 'import numpy as np\n'), ((2246, 2265), 'numpy.around', 'np.around', (['z_decode'], {}), '(z_decode)\n', (2255, 2265), True, 'import numpy as np\n'), ((3006, 3071), 'numpy.linspace', 'np.linspace', (['(-args.opt_bounds)', 'args.opt_bounds', 'args.opt_grid_len'], {}), '(-args.opt_bounds, args.opt_bounds, args.opt_grid_len)\n', (3017, 3071), True, 'import numpy as np\n'), ((3168, 3207), 'numpy.array', 'np.array', (['latent_grid'], {'dtype': 'np.float32'}), '(latent_grid, dtype=np.float32)\n', (3176, 3207), True, 'import numpy as np\n'), ((3227, 3276), 'torch.as_tensor', 'torch.as_tensor', (['latent_grid'], {'device': 'model.device'}), '(latent_grid, device=model.device)\n', (3242, 3276), False, 'import torch\n'), ((3379, 3402), 'numpy.argsort', 'np.argsort', (['(-1 * z_prop)'], {}), '(-1 * z_prop)\n', (3389, 3402), True, 'import numpy as np\n'), ((3847, 3916), 'torch.randn', 'torch.randn', (['num_queries_to_do', 'model.latent_dim'], {'device': 'model.device'}), '(num_queries_to_do, model.latent_dim, device=model.device)\n', (3858, 3916), False, 'import torch\n'), ((4048, 4077), 'pytorch_lightning.seed_everything', 'pl.seed_everything', (['args.seed'], {}), '(args.seed)\n', (4066, 4077), True, 'import pytorch_lightning as pl\n'), ((4394, 4452), 'weighted_retraining.shapes.shapes_model.ShapesVAE.load_from_checkpoint', 'ShapesVAE.load_from_checkpoint', (['args.pretrained_model_file'], {}), '(args.pretrained_model_file)\n', (4424, 4452), False, 'from weighted_retraining.shapes.shapes_model import ShapesVAE\n'), ((9022, 9047), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9045, 9047), False, 'import argparse\n'), ((9061, 9113), 'weighted_retraining.shapes.shapes_data.WeightedNumpyDataset.add_model_specific_args', 'WeightedNumpyDataset.add_model_specific_args', (['parser'], {}), '(parser)\n', (9105, 9113), False, 'from weighted_retraining.shapes.shapes_data import WeightedNumpyDataset\n'), ((9127, 9169), 'weighted_retraining.utils.DataWeighter.add_weight_args', 'utils.DataWeighter.add_weight_args', (['parser'], {}), '(parser)\n', (9161, 9169), False, 'from weighted_retraining import utils\n'), ((9183, 9214), 'weighted_retraining.opt_scripts.base.add_common_args', 'wr_base.add_common_args', (['parser'], {}), '(parser)\n', (9206, 9214), True, 'from weighted_retraining.opt_scripts import base as wr_base\n'), ((2393, 2439), 'numpy.unique', 'np.unique', (['z_decode'], {'axis': '(0)', 'return_index': '(True)'}), '(z_decode, axis=0, return_index=True)\n', (2402, 2439), True, 'import numpy as np\n'), ((2657, 2688), 'numpy.sum', 'np.sum', (['z_decode'], {'axis': '(-1, -2)'}), '(z_decode, axis=(-1, -2))\n', (2663, 2688), True, 'import numpy as np\n'), ((3095, 3148), 'itertools.product', 'itertools.product', (['unit_line'], {'repeat': 'model.latent_dim'}), '(unit_line, repeat=model.latent_dim)\n', (3112, 3148), False, 'import itertools\n'), ((4310, 4334), 'weighted_retraining.utils.DataWeighter', 'utils.DataWeighter', (['args'], {}), '(args)\n', (4328, 4334), False, 'from weighted_retraining import utils\n'), ((5156, 5210), 'numpy.ceil', 'np.ceil', (['(args.query_budget / args.retraining_frequency)'], {}), '(args.query_budget / args.retraining_frequency)\n', (5163, 5210), True, 'import numpy as np\n'), ((5353, 5439), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'args.query_budget', 'dynamic_ncols': '(True)', 'smoothing': '(0.0)', 'file': 'sys.stdout'}), '(total=args.query_budget, dynamic_ncols=True, smoothing=0.0, file=sys.\n stdout)\n', (5357, 5439), False, 'from tqdm.auto import tqdm\n'), ((1957, 1972), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1970, 1972), False, 'import torch\n'), ((4125, 4147), 'pathlib.Path', 'Path', (['args.result_root'], {}), '(args.result_root)\n', (4129, 4147), False, 'from pathlib import Path\n'), ((5003, 5027), 'numpy.arange', 'np.arange', (['(-4)', '(4.01)', '(0.5)'], {}), '(-4, 4.01, 0.5)\n', (5012, 5027), True, 'import numpy as np\n'), ((6824, 6890), 'torch.as_tensor', 'torch.as_tensor', (["results['latent_space_grid']"], {'device': 'model.device'}), "(results['latent_space_grid'], device=model.device)\n", (6839, 6890), False, 'import torch\n')] |
#! -*- coding: utf-8 -*-
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import argparse
import json
import pickle
import cv2
import numpy as np
import pandas as pd
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
def ArgParser():
parser = argparse.ArgumentParser(description='CIFAR-10データセットをAI Dashboardのカスタムデータセットの形式に変換する',
formatter_class=argparse.RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--input_dir', dest='input_dir', type=str, default='input', required=False, \
help='CIFAR-10データセット(cifar-10-batches-py)のディレクトリパス')
parser.add_argument('--output_dir', dest='output_dir', type=str, default='output', required=False, \
help='カスタムデータセットの出力ディレクトリ')
parser.add_argument('--n_data', dest='n_data', type=int, default=0, required=False, \
help='取得するデータサンプル数(0以下指定で全データを取得)')
args = parser.parse_args()
return args
def load_cifar10_dataset(input_dir):
# --- local function for unpickle ---
def _unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
# --- training data ---
train_data_list = ["data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5"]
dict_data = _unpickle(os.path.join(input_dir, train_data_list[0]))
train_images = dict_data[b'data']
train_labels = dict_data[b'labels'].copy()
for train_data in train_data_list[1:]:
dict_data = _unpickle(os.path.join(input_dir, train_data))
train_images = np.vstack((train_images, dict_data[b'data']))
train_labels = np.hstack((train_labels, dict_data[b'labels']))
# --- test data ---
test_data = "test_batch"
dict_data = _unpickle(os.path.join(input_dir, test_data))
test_images = dict_data[b'data']
test_labels = dict_data[b'labels'].copy()
# --- transpose: [N, C, H, W] -> [N, H, W, C] ---
train_images = train_images.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
test_images = test_images.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
return train_images, train_labels, test_images, test_labels
def save_image_files(images, image_shape, labels, output_dir, name='images', n_data=0):
dict_image_file = {
'id': [],
'file': [],
'class_id': [],
}
os.makedirs(os.path.join(output_dir, name), exist_ok=True)
if ((n_data <= 0) or (n_data > len(images))):
n_data = len(images)
for i, (image, label) in enumerate(zip(images[0:n_data], labels[0:n_data])):
image_file = os.path.join(name, f'{i:08}.png')
image = image.reshape(image_shape)
cv2.imwrite(os.path.join(output_dir, image_file), image)
dict_image_file['id'].append(i)
dict_image_file['file'].append(image_file)
# dict_image_file['class_id'].append(int(np.argmax(label)))
dict_image_file['class_id'].append(int(label))
# --- save image files information to json file ---
df_image_file = pd.DataFrame(dict_image_file)
with open(os.path.join(output_dir, 'info.json'), 'w') as f:
json.dump(json.loads(df_image_file.to_json(orient='records')), f, ensure_ascii=False, indent=4)
return None
def main():
# --- 引数処理 ---
args = ArgParser()
print('args.input_dir : {}'.format(args.input_dir))
print('args.output_dir : {}'.format(args.output_dir))
print('args.n_data : {}'.format(args.n_data))
# --- CIFAR-10データセット読み込み ---
train_images, train_labels, test_images, test_labels = load_cifar10_dataset(args.input_dir)
# --- save image files(train) ---
output_dir = os.path.join(args.output_dir, 'train_data')
os.makedirs(output_dir, exist_ok=True)
save_image_files(train_images, train_images.shape[1:], train_labels, output_dir, name='images', n_data=args.n_data)
# --- save image files(test) ---
output_dir = os.path.join(args.output_dir, 'test_data')
os.makedirs(output_dir, exist_ok=True)
save_image_files(test_images, test_images.shape[1:], test_labels, output_dir, name='images', n_data=args.n_data)
return
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"os.makedirs",
"numpy.hstack",
"os.path.join",
"pickle.load",
"numpy.vstack",
"pandas.DataFrame"
] | [((393, 535), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CIFAR-10データセットをAI Dashboardのカスタムデータセットの形式に変換する"""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), "(description=\n 'CIFAR-10データセットをAI Dashboardのカスタムデータセットの形式に変換する', formatter_class=\n argparse.RawTextHelpFormatter)\n", (416, 535), False, 'import argparse\n'), ((2943, 2972), 'pandas.DataFrame', 'pd.DataFrame', (['dict_image_file'], {}), '(dict_image_file)\n', (2955, 2972), True, 'import pandas as pd\n'), ((3528, 3571), 'os.path.join', 'os.path.join', (['args.output_dir', '"""train_data"""'], {}), "(args.output_dir, 'train_data')\n", (3540, 3571), False, 'import os\n'), ((3573, 3611), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (3584, 3611), False, 'import os\n'), ((3779, 3821), 'os.path.join', 'os.path.join', (['args.output_dir', '"""test_data"""'], {}), "(args.output_dir, 'test_data')\n", (3791, 3821), False, 'import os\n'), ((3823, 3861), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (3834, 3861), False, 'import os\n'), ((1359, 1402), 'os.path.join', 'os.path.join', (['input_dir', 'train_data_list[0]'], {}), '(input_dir, train_data_list[0])\n', (1371, 1402), False, 'import os\n'), ((1601, 1646), 'numpy.vstack', 'np.vstack', (["(train_images, dict_data[b'data'])"], {}), "((train_images, dict_data[b'data']))\n", (1610, 1646), True, 'import numpy as np\n'), ((1664, 1711), 'numpy.hstack', 'np.hstack', (["(train_labels, dict_data[b'labels'])"], {}), "((train_labels, dict_data[b'labels']))\n", (1673, 1711), True, 'import numpy as np\n'), ((1784, 1818), 'os.path.join', 'os.path.join', (['input_dir', 'test_data'], {}), '(input_dir, test_data)\n', (1796, 1818), False, 'import os\n'), ((2332, 2362), 'os.path.join', 'os.path.join', (['output_dir', 'name'], {}), '(output_dir, name)\n', (2344, 2362), False, 'import os\n'), ((2547, 2580), 'os.path.join', 'os.path.join', (['name', 'f"""{i:08}.png"""'], {}), "(name, f'{i:08}.png')\n", (2559, 2580), False, 'import os\n'), ((1161, 1194), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (1172, 1194), False, 'import pickle\n'), ((1547, 1582), 'os.path.join', 'os.path.join', (['input_dir', 'train_data'], {}), '(input_dir, train_data)\n', (1559, 1582), False, 'import os\n'), ((2632, 2668), 'os.path.join', 'os.path.join', (['output_dir', 'image_file'], {}), '(output_dir, image_file)\n', (2644, 2668), False, 'import os\n'), ((2984, 3021), 'os.path.join', 'os.path.join', (['output_dir', '"""info.json"""'], {}), "(output_dir, 'info.json')\n", (2996, 3021), False, 'import os\n')] |
import os
import warnings
import numpy as np
import cv2
import create_patch
import caffe
import pdb
def get_patch(bottom_data, c, output_size):
h, w = bottom_data.shape[2:]
patch_img = np.zeros((3, c[4], c[4]))
im_r0 = max(0, c[2]-c[4]/2)
im_c0 = max(0, c[3]-c[4]/2)
im_r1 = min(h, c[2]+c[4]/2)
im_c1 = min(h, c[3]+c[4]/2)
p_r0 = max(0, c[4]/2-c[2])
p_c0 = max(0, c[4]/2-c[3])
p_r1 = min(c[4], h+c[4]/2-c[2])
p_c1 = min(c[4], w+c[4]/2-c[3])
patch_img[:, p_r0:p_r1, p_c0:p_c1] = bottom_data[c[1], :, im_r0:im_r1, im_c0:im_c1].copy()
patch_img = patch_img.transpose((1,2,0))
return cv2.resize(patch_img, (output_size, output_size)).transpose((2,0,1)).astype(np.float)
class RandomSamplingLayer(caffe.Layer):
def setup(self, bottom, top):
warnings.filterwarnings("ignore")
params = eval(self.param_str)
self.output_size = params['output_size']
self.num = params['num']
self.by_ovlp = params['by_ovlp']
self.minsz = params['minsz']
self.maxsz = params['maxsz']
if self.num % bottom[0].data.shape[0] != 0:
raise Exception("num should be divided by batch size.")
self.num_cand = self.num / bottom[0].data.shape[0]
if len(top) != 2:
raise Exception("Need exact two tops.")
if len(bottom) != 2:
raise Exception("Need exact two bottoms.")
def reshape(self, bottom, top):
top[0].reshape(self.num, bottom[0].data.shape[1], self.output_size, self.output_size)
top[1].reshape(self.num, 1)
def forward(self, bottom, top):
idx = 0
for i in range(bottom[0].data.shape[0]):
img = bottom[0].data[i,...].transpose((1,2,0)).copy()
seg = bottom[1].data[i,...].transpose((1,2,0)).copy()
patch, cls = create_patch.createRandomPatchImg(img, seg, self.num_cand, [self.minsz, self.maxsz], 0.1, self.output_size, by_ovlp=self.by_ovlp, show=False)
if patch.shape[0] != self.num_cand:
raise Exception("Number of patches not consistent: %d vs. %d" % (patch.shape[0], self.num_cand))
for k in range(patch.shape[0]):
top[0].data[idx,...] = get_patch(bottom[0].data, np.hstack((np.array([cls[k], i]), patch[k,:])), self.output_size)
top[1].data[idx,:] = cls[k]
idx += 1
# check
if False:
if not os.path.isdir('output/class/'):
os.makedirs('output/class/')
show_data = top[0].data.copy()
show_data[:,0,...] += 104
show_data[:,1,...] += 117
show_data[:,2,...] += 123
num = np.zeros((21,), dtype=np.int)
for i in range(show_data.shape[0]):
cv2.imwrite('output/class/' + str(top[1].data[i,:].astype(np.int)) + '_' + str(num[top[1].data[i,:].astype(np.int)])+ '.jpg', show_data[i,...].transpose((1,2,0)).astype(np.uint8))
num[top[1].data[i,:].astype(np.int)] += 1
pdb.set_trace()
def backward(self, top, propagate_down, bottom):
pass
class GraphToTripletLayer(caffe.Layer):
def setup(self, bottom, top):
warnings.filterwarnings("ignore")
self.N = bottom[0].data.shape[0]
if len(top) != 3:
raise Exception("Need exact three tops")
if len(bottom) != 2:
raise Exception("Need exact two bottoms")
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
top[1].reshape(*bottom[0].data.shape)
top[2].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
in_graph = []
self.triplet_idx = -1 * np.ones((self.N, 3), dtype=np.int)
labels = bottom[1].data[...]
for i in np.random.permutation(self.N):
self.triplet_idx[i,0] = i
label = labels[i]
pos_cand = [idx for idx in in_graph if labels[idx] == label]
neg_cand = [idx for idx in in_graph if labels[idx] != label]
if len(pos_cand) != 0:
self.triplet_idx[i,1] = np.random.choice(pos_cand)
if len(neg_cand) != 0:
self.triplet_idx[i,2] = np.random.choice(neg_cand)
in_graph.append(i)
for i in range(self.N):
if self.triplet_idx[i,1] == -1:
label = labels[i]
pos_cand = [idx for idx in in_graph if labels[idx] == label and idx != i]
if len(pos_cand) != 0:
self.triplet_idx[i,1] = np.random.choice(pos_cand)
else:
self.triplet_idx[i,1] = i
if self.triplet_idx[i,2] == -1:
label = labels[i]
neg_cand = [idx for idx in in_graph if labels[idx] != label]
if len(neg_cand) != 0:
self.triplet_idx[i,2] = np.random.choice(neg_cand)
for i in range(self.N):
top[0].data[i,...] = bottom[0].data[self.triplet_idx[i,0],...]
top[1].data[i,...] = bottom[0].data[self.triplet_idx[i,1],...]
if self.triplet_idx[i,2] != -1:
top[2].data[i,...] = bottom[0].data[self.triplet_idx[i,2],...]
else:
top[2].data[i,...] = 0.
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = 0.
for i in range(self.N):
bottom[0].diff[self.triplet_idx[i,0],...] += top[0].diff[i,...]
bottom[0].diff[self.triplet_idx[i,1],...] += top[1].diff[i,...]
if self.triplet_idx[i,2] != -1:
bottom[0].diff[self.triplet_idx[i,2],...] += top[2].diff[i,...]
| [
"create_patch.createRandomPatchImg",
"numpy.ones",
"os.makedirs",
"numpy.random.choice",
"numpy.array",
"numpy.zeros",
"os.path.isdir",
"pdb.set_trace",
"cv2.resize",
"warnings.filterwarnings",
"numpy.random.permutation"
] | [((194, 219), 'numpy.zeros', 'np.zeros', (['(3, c[4], c[4])'], {}), '((3, c[4], c[4]))\n', (202, 219), True, 'import numpy as np\n'), ((802, 835), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (825, 835), False, 'import warnings\n'), ((3197, 3230), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3220, 3230), False, 'import warnings\n'), ((3787, 3816), 'numpy.random.permutation', 'np.random.permutation', (['self.N'], {}), '(self.N)\n', (3808, 3816), True, 'import numpy as np\n'), ((1836, 1981), 'create_patch.createRandomPatchImg', 'create_patch.createRandomPatchImg', (['img', 'seg', 'self.num_cand', '[self.minsz, self.maxsz]', '(0.1)', 'self.output_size'], {'by_ovlp': 'self.by_ovlp', 'show': '(False)'}), '(img, seg, self.num_cand, [self.minsz,\n self.maxsz], 0.1, self.output_size, by_ovlp=self.by_ovlp, show=False)\n', (1869, 1981), False, 'import create_patch\n'), ((2688, 2717), 'numpy.zeros', 'np.zeros', (['(21,)'], {'dtype': 'np.int'}), '((21,), dtype=np.int)\n', (2696, 2717), True, 'import numpy as np\n'), ((3032, 3047), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3045, 3047), False, 'import pdb\n'), ((3698, 3732), 'numpy.ones', 'np.ones', (['(self.N, 3)'], {'dtype': 'np.int'}), '((self.N, 3), dtype=np.int)\n', (3705, 3732), True, 'import numpy as np\n'), ((2436, 2466), 'os.path.isdir', 'os.path.isdir', (['"""output/class/"""'], {}), "('output/class/')\n", (2449, 2466), False, 'import os\n'), ((2484, 2512), 'os.makedirs', 'os.makedirs', (['"""output/class/"""'], {}), "('output/class/')\n", (2495, 2512), False, 'import os\n'), ((4107, 4133), 'numpy.random.choice', 'np.random.choice', (['pos_cand'], {}), '(pos_cand)\n', (4123, 4133), True, 'import numpy as np\n'), ((4209, 4235), 'numpy.random.choice', 'np.random.choice', (['neg_cand'], {}), '(neg_cand)\n', (4225, 4235), True, 'import numpy as np\n'), ((633, 682), 'cv2.resize', 'cv2.resize', (['patch_img', '(output_size, output_size)'], {}), '(patch_img, (output_size, output_size))\n', (643, 682), False, 'import cv2\n'), ((4550, 4576), 'numpy.random.choice', 'np.random.choice', (['pos_cand'], {}), '(pos_cand)\n', (4566, 4576), True, 'import numpy as np\n'), ((4883, 4909), 'numpy.random.choice', 'np.random.choice', (['neg_cand'], {}), '(neg_cand)\n', (4899, 4909), True, 'import numpy as np\n'), ((2259, 2280), 'numpy.array', 'np.array', (['[cls[k], i]'], {}), '([cls[k], i])\n', (2267, 2280), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import my_txtutils
# these must match what was saved !
ALPHASIZE = my_txtutils.ALPHASIZE
NLAYERS = 4
INTERNALSIZE = 1024
shahnameh = "./checkpoints/rnn_train_1492872774-1088500000"
# use topn=10 for all but the last which works with topn=2 for Shakespeare and topn=3 for Python
author = shahnameh
meta_graph = "./checkpoints/rnn_train_1492872774-1088500000.meta"
ncnt = 0
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(meta_graph)
new_saver.restore(sess, author)
file = open("sher.txt", "w")
inputFile = open("test.txt", "r")
init_text = inputFile.read().decode('utf8')
encoded_text = my_txtutils.encode_text(init_text);
# y = np.array([[encoded_text]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
h = np.zeros([1, INTERNALSIZE * NLAYERS], dtype=np.float32) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
for char in init_text:
file.write(char.encode('utf8'));
for i in range(len(encoded_text)-1):
y = np.array([[encoded_text[i]]])
yo, h = sess.run(['Yo:0', 'H:0'], feed_dict={'X:0': y, 'pkeep:0': 1., 'Hin:0': h, 'batchsize:0': 1})
y = np.array([[encoded_text[-1]]])
for i in range(50):
yo, h = sess.run(['Yo:0', 'H:0'], feed_dict={'X:0': y, 'pkeep:0': 1., 'Hin:0': h, 'batchsize:0': 1})
# If sampling is be done from the topn most likely characters, the generated text
# is more credible and more "english". If topn is not set, it defaults to the full
# distribution (ALPHASIZE)
# Recommended: topn = 10 for intermediate checkpoints, topn=2 for fully trained checkpoints
c = my_txtutils.sample_from_probabilities(yo, topn=1)
y = np.array([[c]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
# c = chr(my_txtutils.convert_to_alphabet(c))
if(c == 37):
continue
c = chr(my_txtutils.convert_to_alphabet(c))
print(c, end="")
file.write(c.encode('utf8'))
if c == '\n':
ncnt = 0
else:
ncnt += 1
if ncnt == 100:
print("")
ncnt = 0
file.close()
| [
"tensorflow.Session",
"numpy.array",
"numpy.zeros",
"tensorflow.train.import_meta_graph",
"my_txtutils.sample_from_probabilities",
"my_txtutils.encode_text",
"my_txtutils.convert_to_alphabet"
] | [((424, 436), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (434, 436), True, 'import tensorflow as tf\n'), ((462, 500), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['meta_graph'], {}), '(meta_graph)\n', (488, 500), True, 'import tensorflow as tf\n'), ((677, 711), 'my_txtutils.encode_text', 'my_txtutils.encode_text', (['init_text'], {}), '(init_text)\n', (700, 711), False, 'import my_txtutils\n'), ((817, 872), 'numpy.zeros', 'np.zeros', (['[1, INTERNALSIZE * NLAYERS]'], {'dtype': 'np.float32'}), '([1, INTERNALSIZE * NLAYERS], dtype=np.float32)\n', (825, 872), True, 'import numpy as np\n'), ((1183, 1213), 'numpy.array', 'np.array', (['[[encoded_text[-1]]]'], {}), '([[encoded_text[-1]]])\n', (1191, 1213), True, 'import numpy as np\n'), ((1035, 1064), 'numpy.array', 'np.array', (['[[encoded_text[i]]]'], {}), '([[encoded_text[i]]])\n', (1043, 1064), True, 'import numpy as np\n'), ((1679, 1728), 'my_txtutils.sample_from_probabilities', 'my_txtutils.sample_from_probabilities', (['yo'], {'topn': '(1)'}), '(yo, topn=1)\n', (1716, 1728), False, 'import my_txtutils\n'), ((1741, 1756), 'numpy.array', 'np.array', (['[[c]]'], {}), '([[c]])\n', (1749, 1756), True, 'import numpy as np\n'), ((1929, 1963), 'my_txtutils.convert_to_alphabet', 'my_txtutils.convert_to_alphabet', (['c'], {}), '(c)\n', (1960, 1963), False, 'import my_txtutils\n')] |
params = [
('epochs', [75]),
('batch_size', [64]),
('validation_split', [0.]),
('filters', [128, 256]),
('kernel_size', [5]),
('conv_activation', ['relu', 'tanh']),
('conv_l2_regularizer', [0.001]),
('dropout_rate', [0., 0.2, 0.5]),
('dense_activation', ['relu', 'tanh']),
('dense_l2_regularizer', [0.01]),
('activation', ['sigmoid']),
('optimizer', ['nadam']),
('loss_function', ['binary_crossentropy']),
('units', [32, 64, 128]),
('trainable', [False]),
('dense_layers', [1, 2, 3]),
]
"""
params = [
('epochs', [1]),
('batch_size', [128]),
('validation_split', [0.]),
('filters', [1]),
('kernel_size', [3]),
('conv_activation', ['relu']),
('conv_l2_regularizer', [0.]),
('dropout_rate', [0.9]),
('dense_activation', ['relu']),
('dense_l2_regularizer', [0.]),
('activation', ['sigmoid']),
('optimizer', ['nadam']),
('loss_function', ['binary_crossentropy']),
('units', [4]),
]
"""
param_grid = dict(params)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
try:
sys.path.insert(0, '.')
from constants import Const
sys.path.insert(0, Const.ROOT)
except:
sys.path.insert(0, '..')
from constants import Const
import utils
from ItemSelector import ItemSelector
from MyClassifier import MyClassifier, MultilabelKerasClassifier, KerasClassifier, Model
from MyOneVsRestClassifier import MyOneVsRestClassifier
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers, regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import (GRU, LSTM, RNN, Bidirectional,
Dense, Dropout, Lambda, RepeatVector,
TimeDistributed, Concatenate)
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import (AveragePooling1D, GlobalMaxPooling1D,
MaxPooling1D)
from tensorflow.keras import Input, Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import sequence, text
import dill
import numpy as np
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import FeatureUnion, Pipeline
class CNNCategoryExtractor (MyClassifier):
def __init__(self, threshold = 0.5, **kwargs):
super().__init__(**kwargs)
self.MODEL_PATH = Const.CE_ROOT + 'model/cnn/CNN.model'
self.WE_PATH = Const.WE_ROOT + 'embedding_matrix.pkl'
self.target_names = ['food', 'service', 'price', 'place']
self.cnn_model = None
self.threshold = threshold
for key, value in kwargs.items():
setattr(self, key, value)
def fit(self, X, y,
filters = 320,
kernel_size = 5,
conv_activation = 'tanh',
conv_l2_regularizer = 0.01,
dropout_rate = 0.6,
dense_activation = 'relu',
dense_l2_regularizer = 0.01,
activation = 'sigmoid',
optimizer = "nadam",
loss_function = 'binary_crossentropy',
units = 256,
trainable = False,
dense_layers = 1,
is_save = False,
show_summary = False,
**kwargs):
self.cnn_model = self._create_model(
filters,
kernel_size,
conv_activation,
conv_l2_regularizer,
dropout_rate,
dense_activation,
dense_l2_regularizer,
activation,
optimizer,
loss_function,
units,
trainable,
dense_layers,
)
if show_summary:
self.cnn_model.summary()
mode = kwargs.get('mode', 'train_validate_split')
if mode == "train_validate_split":
self.cnn_model.fit(
X, y,
**kwargs
)
if is_save:
self.cnn_model.save(self.MODEL_PATH)
def predict(self, X):
threshold = self.threshold
y_pred = self.cnn_model.predict(X)
y_pred[y_pred >= threshold] = 1.
y_pred[y_pred < threshold] = 0.
return y_pred
def predict_proba(self, X):
threshold = self.threshold
y_pred = self.cnn_model.predict(X)
return y_pred
def _fit_train_validate_split(self, X, y):
pass
def _fit_gridsearch_cv(self, X, y, param_grid, **kwargs):
from sklearn.model_selection import GridSearchCV
np.random.seed(7)
# Wrap in sklearn wrapper
model = MultilabelKerasClassifier(build_fn = self._create_model, verbose=0)
# model.fit(X, y)
# print(model.predict(X))
# train
IS_REFIT = kwargs.get('is_refit', 'f1_macro')
grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=5, refit=IS_REFIT, verbose=1, scoring=['f1_macro', 'precision_macro', 'recall_macro'])
grid_result = grid.fit(X, y)
# print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
print(grid_result.cv_results_.keys())
means = [grid_result.cv_results_['mean_test_f1_macro'], grid_result.cv_results_['mean_test_precision_macro'], grid_result.cv_results_['mean_test_recall_macro']]
stds = [grid_result.cv_results_['std_test_f1_macro'], grid_result.cv_results_['std_test_precision_macro'], grid_result.cv_results_['std_test_recall_macro']]
for mean, stdev in zip(means, stds):
print("\n{} ({})".format(mean, stdev))
params = grid_result.best_params_
print("with:", params)
with open('output/gridsearch_cnn.pkl', 'wb') as fo:
dill.dump(grid_result.cv_results_, fo)
if IS_REFIT:
grid.best_estimator_.model.save('model/cnn/best.model')
def _create_model(
self,
filters = 320,
kernel_size = 5,
conv_activation = 'tanh',
conv_l2_regularizer = 0.01,
dropout_rate = 0.6,
dense_activation = 'relu',
dense_l2_regularizer = 0.01,
activation = 'sigmoid',
optimizer = "nadam",
loss_function = 'binary_crossentropy',
units = 256,
trainable = False,
dense_layers = 1,
**kwargs
):
K.clear_session()
MAX_SEQUENCE_LENGTH = kwargs.get("max_sequence_length", 150)
# Define Architecture
layer_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
layer_embedding = self._load_embedding(self.WE_PATH, trainable=trainable, vocabulary_size=15000, embedding_vector_length=500)(layer_input)
layer_conv = Conv1D(filters=filters, kernel_size=kernel_size, padding='same', activation=conv_activation,
kernel_regularizer=regularizers.l2(conv_l2_regularizer))(layer_embedding)
layer_pooling = GlobalMaxPooling1D()(layer_conv)
layer_dropout = Dropout(dropout_rate, seed=7)(layer_pooling)
for i in range(dense_layers):
layer_dense = Dense(units, activation=dense_activation, kernel_regularizer=regularizers.l2(dense_l2_regularizer))(layer_dropout)
layer_dropout = Dropout(dropout_rate, seed=7)(layer_dense)
layer_softmax = Dense(4, activation=activation)(layer_dropout)
# Create Model
cnn_model = Model(inputs=layer_input, outputs=layer_softmax)
# Create Optimizer
# optimizer = optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
# optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
cnn_model.compile(loss=loss_function, optimizer=optimizer, metrics=['accuracy'])
return cnn_model
def load_best_model(self):
best_model = load_model(Const.CE_ROOT + 'model/cnn/best.model')
del self.cnn_model
self.cnn_model = best_model
def _get_features(self, x):
return x
def load_weights(self, path):
self._create_model()
self.cnn_model.load_weights(path)
def set_threshold(self, thresh):
self.threshold = thresh
def get_threshold(self):
return self.threshold
def cnn():
"""
Initialize data
"""
X, y, X_test, y_test = utils.get_ce_dataset()
# X_train, X_validate, y_train, y_validate = train_test_split(X, y, test_size=0.20, random_state=7)
thresh_to_try = [0.2, 0.3, 0.4, 0.5, 0.55, 0.6, 0.65, 0.7, 0.725, 0.75, 0.775, 0.8, 0.825, 0.85, 0.875, 0.9, 0.925, 0.95, 0.975, 0.999]
thresh_to_try = [0.5]
"""
Make the model
"""
np.random.seed(7)
# checkpointer = ModelCheckpoint(filepath='model/cnn/weights/CNN.hdf5', verbose=1, save_best_only=True)
ce = CNNCategoryExtractor()
"""
Fit the model
"""
# ce.fit(X, y, verbose=1,
# epochs = 100,
# batch_size = 64,
# # validation_split = 0.2,
# filters = 128,
# kernel_size = 5,
# conv_activation = 'tanh',
# conv_l2_regularizer = 0.001,
# dropout_rate = 0.5,
# dense_activation = 'tanh',
# dense_l2_regularizer = 0.01,
# activation = 'sigmoid',
# optimizer = "nadam",
# loss_function = 'binary_crossentropy',
# units = 64,
# trainable = False,
# dense_layers=1,
# is_save = True,
# show_summary = True
# )
# ce._fit_new_gridsearch_cv(X, y, params, thresholds=thresh_to_try, score_verbose=True)
"""
Load best estimator and score it
"""
ce.load_best_model()
ce.cnn_model.summary()
for thresh in thresh_to_try:
print("\nTHRESH: {}".format(thresh))
ce.set_threshold(thresh); ce.score(X_test, y_test)
def get_wrong_preds(data='train'):
import pandas as pd
ce = CNNCategoryExtractor()
ce.load_best_model()
ce.set_threshold(0.5)
X, y, X_test, y_test, df, df_test = utils.get_ce_dataset(return_df = True)
data = 'train'
if data == 'test':
df = df_test
X = X_test
y = y_test
print(len(df))
y_pred = ce.predict(X)
ce.score(X, y)
str_to_pred = {
'food': [1,0,0,0],
'service': [0,1,0,0],
'price': [0,0,1,0],
'place': [0,0,0,1],
}
cnt = 0
for i, (review, y_pred_single, y_single) in enumerate(list(zip(df['review'], y_pred, y.values.tolist()))):
y_pred_single = list(y_pred_single)
if y_pred_single != y_single:
cnt += 1
print("=================={}==================".format(i))
print(review)
print('PRED:', y_pred_single)
print('ACTL:', y_single)
print()
print(cnt, "sentences missclasified")
if __name__ == "__main__":
utils.time_log(cnn)
# utils.time_log(get_wrong_preds)
| [
"sklearn.model_selection.GridSearchCV",
"sys.path.insert",
"utils.time_log",
"MyClassifier.Model",
"utils.get_ce_dataset",
"MyClassifier.MultilabelKerasClassifier",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.m... | [((1102, 1125), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (1117, 1125), False, 'import sys\n'), ((1162, 1192), 'sys.path.insert', 'sys.path.insert', (['(0)', 'Const.ROOT'], {}), '(0, Const.ROOT)\n', (1177, 1192), False, 'import sys\n'), ((8595, 8617), 'utils.get_ce_dataset', 'utils.get_ce_dataset', ([], {}), '()\n', (8615, 8617), False, 'import utils\n'), ((8932, 8949), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (8946, 8949), True, 'import numpy as np\n'), ((10261, 10297), 'utils.get_ce_dataset', 'utils.get_ce_dataset', ([], {'return_df': '(True)'}), '(return_df=True)\n', (10281, 10297), False, 'import utils\n'), ((11105, 11124), 'utils.time_log', 'utils.time_log', (['cnn'], {}), '(cnn)\n', (11119, 11124), False, 'import utils\n'), ((1205, 1229), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (1220, 1229), False, 'import sys\n'), ((4863, 4880), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (4877, 4880), True, 'import numpy as np\n'), ((4931, 4996), 'MyClassifier.MultilabelKerasClassifier', 'MultilabelKerasClassifier', ([], {'build_fn': 'self._create_model', 'verbose': '(0)'}), '(build_fn=self._create_model, verbose=0)\n', (4956, 4996), False, 'from MyClassifier import MyClassifier, MultilabelKerasClassifier, KerasClassifier, Model\n'), ((5145, 5291), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model', 'param_grid': 'param_grid', 'cv': '(5)', 'refit': 'IS_REFIT', 'verbose': '(1)', 'scoring': "['f1_macro', 'precision_macro', 'recall_macro']"}), "(estimator=model, param_grid=param_grid, cv=5, refit=IS_REFIT,\n verbose=1, scoring=['f1_macro', 'precision_macro', 'recall_macro'])\n", (5157, 5291), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6637, 6654), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6652, 6654), True, 'from tensorflow.keras import backend as K\n'), ((6777, 6812), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(MAX_SEQUENCE_LENGTH,)'}), '(shape=(MAX_SEQUENCE_LENGTH,))\n', (6782, 6812), False, 'from tensorflow.keras import Input, Sequential\n'), ((7647, 7695), 'MyClassifier.Model', 'Model', ([], {'inputs': 'layer_input', 'outputs': 'layer_softmax'}), '(inputs=layer_input, outputs=layer_softmax)\n', (7652, 7695), False, 'from MyClassifier import MyClassifier, MultilabelKerasClassifier, KerasClassifier, Model\n'), ((8116, 8166), 'tensorflow.keras.models.load_model', 'load_model', (["(Const.CE_ROOT + 'model/cnn/best.model')"], {}), "(Const.CE_ROOT + 'model/cnn/best.model')\n", (8126, 8166), False, 'from tensorflow.keras.models import load_model\n'), ((6037, 6075), 'dill.dump', 'dill.dump', (['grid_result.cv_results_', 'fo'], {}), '(grid_result.cv_results_, fo)\n', (6046, 6075), False, 'import dill\n'), ((7180, 7200), 'tensorflow.keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (7198, 7200), False, 'from tensorflow.keras.layers import AveragePooling1D, GlobalMaxPooling1D, MaxPooling1D\n'), ((7237, 7266), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout_rate'], {'seed': '(7)'}), '(dropout_rate, seed=7)\n', (7244, 7266), False, 'from tensorflow.keras.layers import GRU, LSTM, RNN, Bidirectional, Dense, Dropout, Lambda, RepeatVector, TimeDistributed, Concatenate\n'), ((7556, 7587), 'tensorflow.keras.layers.Dense', 'Dense', (['(4)'], {'activation': 'activation'}), '(4, activation=activation)\n', (7561, 7587), False, 'from tensorflow.keras.layers import GRU, LSTM, RNN, Bidirectional, Dense, Dropout, Lambda, RepeatVector, TimeDistributed, Concatenate\n'), ((7489, 7518), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout_rate'], {'seed': '(7)'}), '(dropout_rate, seed=7)\n', (7496, 7518), False, 'from tensorflow.keras.layers import GRU, LSTM, RNN, Bidirectional, Dense, Dropout, Lambda, RepeatVector, TimeDistributed, Concatenate\n'), ((7101, 7137), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['conv_l2_regularizer'], {}), '(conv_l2_regularizer)\n', (7116, 7137), False, 'from tensorflow.keras import optimizers, regularizers\n'), ((7407, 7444), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['dense_l2_regularizer'], {}), '(dense_l2_regularizer)\n', (7422, 7444), False, 'from tensorflow.keras import optimizers, regularizers\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# # MNIST with CNN
# %%
import pickle
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from mlcourse.config import Config
from mlcourse.utils.data import show_dataset
from pytorch_model_summary import summary
from sklearn.metrics import (
ConfusionMatrixDisplay,
classification_report,
confusion_matrix,
)
from skorch import NeuralNetClassifier
from skorch.callbacks import Checkpoint, EarlyStopping, LRScheduler
from skorch.helper import predefined_split
from torch.utils.data import Subset
from torchvision.transforms.functional import InterpolationMode
# %%
config = Config()
# %%
input_size = 28 * 28
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.005
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# %% slideshow={"slide_type": "subslide"}
mnist_transforms = transforms.Compose(
[transforms.Resize((28, 28)), transforms.ToTensor()]
)
# %%
train_dataset = torchvision.datasets.MNIST(
root="./data", train=True, transform=mnist_transforms, download=True
)
test_dataset = torchvision.datasets.MNIST(
root="./data", train=False, transform=mnist_transforms, download=True
)
# %%
partial_model = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(10, 20, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
# nn.Linear(320, 60),
# nn.ReLU(),
# nn.Linear(60, 10),
)
print(summary(partial_model, torch.zeros((1, 1, 28, 28)), show_input=True))
print(summary(partial_model, torch.zeros((1, 1, 28, 28))))
# %%
conv_model = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(10, 20, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(320, 60),
nn.ReLU(),
nn.Linear(60, 10),
# nn.Softmax(dim=1),
)
# %%
print(summary(conv_model, torch.zeros((1, 1, 28, 28))))
print(summary(conv_model, torch.zeros((1, 1, 28, 28)), show_input=True))
# %%
cnn_classifier = NeuralNetClassifier(
conv_model,
criterion=nn.CrossEntropyLoss,
batch_size=100,
max_epochs=2,
lr=0.1,
iterator_train__shuffle=True,
train_split=predefined_split(test_dataset),
device=device,
)
# %%
cnn_classifier.fit(train_dataset, None)
# %%
cnn_classifier.partial_fit(train_dataset, None)
# %%
y_pred_cnn = cnn_classifier.predict(test_dataset)
# %%
y_test = np.array([y for _, y in test_dataset])
# %%
print(classification_report(y_test, y_pred_cnn))
# %%
print(confusion_matrix(y_test, y_pred_cnn))
# %%
plt.figure(figsize=(10, 8))
ax = plt.axes()
ConfusionMatrixDisplay.from_predictions(y_test, y_pred_cnn, ax=ax)
# %% [markdown]
#
# ## Finding Misclassified Images
# %%
def find_misclassified_images(y_pred=y_pred_cnn):
return np.where(y_test != y_pred)[0]
# %%
find_misclassified_images(y_pred_cnn)
# %%
misclassified_ds = Subset(test_dataset, find_misclassified_images())
# %%
show_dataset(misclassified_ds)
# %% [markdown]
#
# ## Data Augmentation (V2)
# %%
augmented_transforms = transforms.Compose(
[
transforms.RandomApply(
[
transforms.Resize((56, 56)),
transforms.RandomResizedCrop(
28, (0.8, 1.0), interpolation=InterpolationMode.BICUBIC
),
transforms.RandomApply(
[
transforms.RandomAffine(
degrees=15.0,
translate=(0.08, 0.8),
interpolation=InterpolationMode.BICUBIC,
)
],
0.5,
),
]
),
transforms.Resize((28, 28)),
transforms.ToTensor(),
]
)
# %%
augmented_train_dataset = torchvision.datasets.MNIST(
root="./data", train=True, transform=augmented_transforms, download=True
)
# %%
cnn_classifier = NeuralNetClassifier(
conv_model,
criterion=nn.CrossEntropyLoss,
batch_size=100,
max_epochs=2,
optimizer=torch.optim.Adam,
lr=1e-3,
iterator_train__shuffle=True,
train_split=predefined_split(test_dataset),
device=device,
)
# %%
cnn_classifier.fit(augmented_train_dataset, None)
# %% [markdown]
#
# ## Callbacks
# %%
step_lr_scheduler = LRScheduler(policy="StepLR", step_size=5, gamma=0.1)
# %%
checkpoint = Checkpoint(
f_pickle="mnist_cnn.pkl",
dirname=config.model_dir_path.as_posix(),
monitor="valid_acc_best",
)
# %%
early_stopping = EarlyStopping(monitor="valid_acc", patience=5, lower_is_better=False)
# %%
cnn_classifier = NeuralNetClassifier(
conv_model,
criterion=nn.CrossEntropyLoss,
batch_size=100,
max_epochs=200,
optimizer=torch.optim.Adam,
lr=1e-3,
iterator_train__shuffle=True,
train_split=predefined_split(test_dataset),
callbacks=[step_lr_scheduler, checkpoint, early_stopping],
device=device,
)
# %%
cnn_classifier.fit(augmented_train_dataset, None)
# %%
with open(config.model_dir_path / "mnist_cnn.pkl", "rb") as file:
loaded_classifier = pickle.load(file)
# %%
y_pred_loaded = loaded_classifier.predict(test_dataset)
# %%
print(classification_report(y_test, y_pred_loaded))
# %%
print(confusion_matrix(y_test, y_pred_loaded))
# %% [markdown]
# ## Workshop Fashion MNIST mit CNN
#
# Trainieren Sie ein Konvolutionsnetz, das Bilder aus dem Fashion MNIST Datenset
# klassifizieren kann.
#
# (Zur Erinnerung: Das Torch `Dataset` für Fashion MNIST kann mit der Klasse
# `torchvision.datasets.FashionMNIST` erzeugt werden.)
# %%
| [
"torch.nn.ReLU",
"sklearn.metrics.classification_report",
"mlcourse.utils.data.show_dataset",
"numpy.array",
"skorch.callbacks.EarlyStopping",
"torch.cuda.is_available",
"numpy.where",
"torch.nn.Flatten",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"sklearn.met... | [((1015, 1023), 'mlcourse.config.Config', 'Config', ([], {}), '()\n', (1021, 1023), False, 'from mlcourse.config import Config\n'), ((1343, 1444), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(True)', 'transform': 'mnist_transforms', 'download': '(True)'}), "(root='./data', train=True, transform=\n mnist_transforms, download=True)\n", (1369, 1444), False, 'import torchvision\n'), ((1461, 1563), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(False)', 'transform': 'mnist_transforms', 'download': '(True)'}), "(root='./data', train=False, transform=\n mnist_transforms, download=True)\n", (1487, 1563), False, 'import torchvision\n'), ((2815, 2853), 'numpy.array', 'np.array', (['[y for _, y in test_dataset]'], {}), '([y for _, y in test_dataset])\n', (2823, 2853), True, 'import numpy as np\n'), ((2965, 2992), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2975, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2998, 3008), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (3006, 3008), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3075), 'sklearn.metrics.ConfusionMatrixDisplay.from_predictions', 'ConfusionMatrixDisplay.from_predictions', (['y_test', 'y_pred_cnn'], {'ax': 'ax'}), '(y_test, y_pred_cnn, ax=ax)\n', (3048, 3075), False, 'from sklearn.metrics import ConfusionMatrixDisplay, classification_report, confusion_matrix\n'), ((3352, 3382), 'mlcourse.utils.data.show_dataset', 'show_dataset', (['misclassified_ds'], {}), '(misclassified_ds)\n', (3364, 3382), False, 'from mlcourse.utils.data import show_dataset\n'), ((4217, 4322), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(True)', 'transform': 'augmented_transforms', 'download': '(True)'}), "(root='./data', train=True, transform=\n augmented_transforms, download=True)\n", (4243, 4322), False, 'import torchvision\n'), ((4721, 4773), 'skorch.callbacks.LRScheduler', 'LRScheduler', ([], {'policy': '"""StepLR"""', 'step_size': '(5)', 'gamma': '(0.1)'}), "(policy='StepLR', step_size=5, gamma=0.1)\n", (4732, 4773), False, 'from skorch.callbacks import Checkpoint, EarlyStopping, LRScheduler\n'), ((4936, 5005), 'skorch.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""valid_acc"""', 'patience': '(5)', 'lower_is_better': '(False)'}), "(monitor='valid_acc', patience=5, lower_is_better=False)\n", (4949, 5005), False, 'from skorch.callbacks import Checkpoint, EarlyStopping, LRScheduler\n'), ((1143, 1168), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1166, 1168), False, 'import torch\n'), ((1607, 1638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (1616, 1638), True, 'import torch.nn as nn\n'), ((1644, 1653), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1651, 1653), True, 'import torch.nn as nn\n'), ((1659, 1674), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1671, 1674), True, 'import torch.nn as nn\n'), ((1680, 1712), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (1689, 1712), True, 'import torch.nn as nn\n'), ((1718, 1727), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1725, 1727), True, 'import torch.nn as nn\n'), ((1733, 1748), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1745, 1748), True, 'import torch.nn as nn\n'), ((1754, 1766), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1764, 1766), True, 'import torch.nn as nn\n'), ((2011, 2042), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (2020, 2042), True, 'import torch.nn as nn\n'), ((2048, 2057), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2055, 2057), True, 'import torch.nn as nn\n'), ((2063, 2078), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2075, 2078), True, 'import torch.nn as nn\n'), ((2084, 2116), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (2093, 2116), True, 'import torch.nn as nn\n'), ((2122, 2131), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2129, 2131), True, 'import torch.nn as nn\n'), ((2137, 2152), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2149, 2152), True, 'import torch.nn as nn\n'), ((2158, 2170), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (2168, 2170), True, 'import torch.nn as nn\n'), ((2176, 2194), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(60)'], {}), '(320, 60)\n', (2185, 2194), True, 'import torch.nn as nn\n'), ((2200, 2209), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2207, 2209), True, 'import torch.nn as nn\n'), ((2215, 2232), 'torch.nn.Linear', 'nn.Linear', (['(60)', '(10)'], {}), '(60, 10)\n', (2224, 2232), True, 'import torch.nn as nn\n'), ((2866, 2907), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred_cnn'], {}), '(y_test, y_pred_cnn)\n', (2887, 2907), False, 'from sklearn.metrics import ConfusionMatrixDisplay, classification_report, confusion_matrix\n'), ((2921, 2957), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred_cnn'], {}), '(y_test, y_pred_cnn)\n', (2937, 2957), False, 'from sklearn.metrics import ConfusionMatrixDisplay, classification_report, confusion_matrix\n'), ((5504, 5521), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5515, 5521), False, 'import pickle\n'), ((5596, 5640), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred_loaded'], {}), '(y_test, y_pred_loaded)\n', (5617, 5640), False, 'from sklearn.metrics import ConfusionMatrixDisplay, classification_report, confusion_matrix\n'), ((5654, 5693), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred_loaded'], {}), '(y_test, y_pred_loaded)\n', (5670, 5693), False, 'from sklearn.metrics import ConfusionMatrixDisplay, classification_report, confusion_matrix\n'), ((1267, 1294), 'torchvision.transforms.Resize', 'transforms.Resize', (['(28, 28)'], {}), '((28, 28))\n', (1284, 1294), True, 'import torchvision.transforms as transforms\n'), ((1296, 1317), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1315, 1317), True, 'import torchvision.transforms as transforms\n'), ((1867, 1894), 'torch.zeros', 'torch.zeros', (['(1, 1, 28, 28)'], {}), '((1, 1, 28, 28))\n', (1878, 1894), False, 'import torch\n'), ((1943, 1970), 'torch.zeros', 'torch.zeros', (['(1, 1, 28, 28)'], {}), '((1, 1, 28, 28))\n', (1954, 1970), False, 'import torch\n'), ((2293, 2320), 'torch.zeros', 'torch.zeros', (['(1, 1, 28, 28)'], {}), '((1, 1, 28, 28))\n', (2304, 2320), False, 'import torch\n'), ((2349, 2376), 'torch.zeros', 'torch.zeros', (['(1, 1, 28, 28)'], {}), '((1, 1, 28, 28))\n', (2360, 2376), False, 'import torch\n'), ((2591, 2621), 'skorch.helper.predefined_split', 'predefined_split', (['test_dataset'], {}), '(test_dataset)\n', (2607, 2621), False, 'from skorch.helper import predefined_split\n'), ((3196, 3222), 'numpy.where', 'np.where', (['(y_test != y_pred)'], {}), '(y_test != y_pred)\n', (3204, 3222), True, 'import numpy as np\n'), ((4117, 4144), 'torchvision.transforms.Resize', 'transforms.Resize', (['(28, 28)'], {}), '((28, 28))\n', (4134, 4144), True, 'import torchvision.transforms as transforms\n'), ((4154, 4175), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4173, 4175), True, 'import torchvision.transforms as transforms\n'), ((4552, 4582), 'skorch.helper.predefined_split', 'predefined_split', (['test_dataset'], {}), '(test_dataset)\n', (4568, 4582), False, 'from skorch.helper import predefined_split\n'), ((5236, 5266), 'skorch.helper.predefined_split', 'predefined_split', (['test_dataset'], {}), '(test_dataset)\n', (5252, 5266), False, 'from skorch.helper import predefined_split\n'), ((3548, 3575), 'torchvision.transforms.Resize', 'transforms.Resize', (['(56, 56)'], {}), '((56, 56))\n', (3565, 3575), True, 'import torchvision.transforms as transforms\n'), ((3593, 3683), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(28)', '(0.8, 1.0)'], {'interpolation': 'InterpolationMode.BICUBIC'}), '(28, (0.8, 1.0), interpolation=\n InterpolationMode.BICUBIC)\n', (3621, 3683), True, 'import torchvision.transforms as transforms\n'), ((3804, 3910), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '(15.0)', 'translate': '(0.08, 0.8)', 'interpolation': 'InterpolationMode.BICUBIC'}), '(degrees=15.0, translate=(0.08, 0.8), interpolation=\n InterpolationMode.BICUBIC)\n', (3827, 3910), True, 'import torchvision.transforms as transforms\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Some plotting examples
#
# https://seaborn.pydata.org/examples/index.html
# In[1]:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
# In[23]:
from importlib import reload
reload(mpl)
reload(plt)
reload(sns)
# In[28]:
sns.get_data_home()
# In[29]:
fmri = sns.load_dataset('fmri')
# In[30]:
fmri
# In[31]:
sns.set_theme(style="darkgrid")
sns.lineplot(x="timepoint", y="signal",
hue="region", style="event",
data=fmri)
# # Faceted logistic regression
# In[35]:
sns.set_theme(style='darkgrid')
df = sns.load_dataset('titanic')
df.head()
# In[36]:
# Make a custom palette with gendered colors
pal = dict(male="#6495ED", female="#F08080")
# In[38]:
# Show the survival probability as a function of age and sex
g = sns.lmplot(x="age", y="survived", col="sex", hue="sex", data=df,
palette=pal, y_jitter=.02, logistic=True, truncate=False)
g.set(xlim=(0, 80), ylim=(-.05, 1.05))
# # Tutorial from beginning
# https://seaborn.pydata.org/tutorial/function_overview.html
# In[39]:
penguins = sns.load_dataset('penguins')
penguins
# ## Axes level functions (e.g. `histplot`, `scatterplot`)
# In[48]:
sns.histplot(data=penguins, x='flipper_length_mm', hue='species', multiple='stack')
# In[46]:
sns.kdeplot(data=penguins, x='flipper_length_mm', hue='species', multiple='stack', alpha=0.5)
# In[45]:
sns.kdeplot(data=penguins, x='flipper_length_mm', hue='species', multiple='layer', alpha=0.5)
# In[44]:
sns.kdeplot(data=penguins, x='flipper_length_mm', hue='species', multiple='fill', alpha=0.5)
# ## Figure level functions (`displot`, `relplot`, `catplot`)
# In[49]:
sns.displot(data=penguins, x='flipper_length_mm', hue='species', multiple='stack')
# In[51]:
sns.displot(data=penguins, x='flipper_length_mm', hue='species', multiple='stack', kind='kde', alpha=0.5)
# ## Faceting with figure level functions
# In[52]:
sns.displot(data=penguins, x='flipper_length_mm', hue='species', col='species')
# ## Axes level functioins make self contained plots
# In[57]:
f, axs = plt.subplots(1, 2, figsize=(8,4), gridspec_kw=dict(width_ratios=[4, 3]))
sns.scatterplot(data=penguins, x='flipper_length_mm', y='bill_length_mm', hue='species', ax=axs[0])
sns.histplot(data=penguins, x='species', hue='species', shrink=0.8, alpha=0.8, legend=False, ax=axs[1])
# In[70]:
f, axs = plt.subplots(1, 2, figsize=(8,4), gridspec_kw=dict(width_ratios=[4, 3]))
sns.scatterplot(data=penguins, x='flipper_length_mm', y='bill_length_mm', hue='species', ax=axs[0])
sns.histplot(data=penguins, x='species', hue='species', shrink=0.8, alpha=0.8, legend=False, ax=axs[1])
f.tight_layout()
# ## Figure level functions own the figure
# In[68]:
tips = sns.load_dataset('tips')
g = sns.relplot(data=tips, x='total_bill', y='tip')
g.ax.axline(xy1=(10, 2), slope=0.2, color='r', dashes=(5, 2))
# ## Customize plot from a figure level function
# In[72]:
g = sns.relplot(data=penguins, x='flipper_length_mm', y='bill_length_mm', col='sex')
g.set_axis_labels('Flipper length (mm)', 'Bill length (mm)')
# ## Specify figure size
# * Axes level functions
# * size determined by axes layout of the figure
# * Figure level functions
# * matplotlib: set `figsize` in `plt.subplots` or `mpl.Figure.set_size_inches()`
# * seaborn: `height`, `aspect` parameters (width = height * aspect)
# * Parameters correspond to size of each subplot
# Matplotlib
# In[73]:
f, ax = plt.subplots()
# In[74]:
f, ax = plt.subplots(1, 2, sharey=True)
# Seaborn FacetGrid
# In[75]:
g = sns.FacetGrid(penguins)
# In[76]:
g = sns.FacetGrid(penguins, col='sex')
# In[79]:
g = sns.FacetGrid(penguins, col='sex', height=3.5, aspect=1.2)
# ## Combine multiple views on the data
# `jointplot` and `pairplot`
# In[80]:
sns.jointplot(data=penguins, x='flipper_length_mm', y='bill_length_mm', hue='species')
# In[81]:
sns.pairplot(data=penguins, hue='species')
# In[82]:
sns.jointplot(data=penguins, x='flipper_length_mm', y='bill_length_mm', hue='species', kind='hist')
# # Data structures accepted by Seaborn
# https://seaborn.pydata.org/tutorial/data_structure.html
# ## Clean up messy data
# In[83]:
anagrams = sns.load_dataset('anagrams')
anagrams
# In[87]:
anagrams_long = anagrams.melt(id_vars=['subidr', 'attnr'], var_name='solutions', value_name='score')
anagrams_long.head()
# Plot the average score as a function of attention and number of solutions
# In[94]:
sns.catplot(data=anagrams_long, x='solutions', y='score', hue='attnr', kind='point')
# ## Options for visualizing long form data
# In[96]:
flights = sns.load_dataset('flights')
flights_dict = flights.to_dict()
sns.relplot(data=flights_dict, x="year", y="passengers", hue="month", kind="line")
# In[97]:
flights.head()
# In[100]:
type(flights_dict['year'])
# In[102]:
flights_avg = flights.groupby('year').mean()
flights_avg
# In[104]:
sns.relplot(data=flights_avg, x='year', y='passengers', kind='line')
# Or, pass vectors
# In[105]:
year = flights_avg.index
passengers = flights_avg['passengers']
sns.relplot(x=year, y=passengers, kind='line')
# ### Collections with different length
# In[107]:
flights_wide = flights.pivot(index="year", columns="month", values="passengers")
two_series = [flights_wide.loc[:1955, 'Jan'], flights_wide.loc[1952:, 'Aug']]
sns.relplot(data=two_series, kind='line')
# ### remove index info
# In[108]:
two_series = [s.to_numpy() for s in two_series]
sns.relplot(data=two_series, kind='line')
# # Visualizing statistical relationships
# https://seaborn.pydata.org/tutorial/relational.html
# `relplot` (figure level)
#
# * `scatterplot()` (with `kind='scatter'`)
# * `lineplot()` (with `kind='line'`)
# ## Relate variables with scatter plots
# In[109]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid")
# In[110]:
tips = sns.load_dataset('tips')
sns.relplot(x='total_bill', y='tip', data=tips)
# ### Add a 3rd dimension using `hue` (change colors of points)
# In[112]:
sns.relplot(x='total_bill', y='tip', data=tips, hue='smoker')
# ### Different marker style for each class
# In[113]:
sns.relplot(x='total_bill', y='tip', data=tips, hue='smoker', style='smoker')
# ### 4D using hue and style
# In[114]:
sns.relplot(x='total_bill', y='tip', data=tips, hue='smoker', style='time')
# ### If hue is a numeric value, uses a sequential palette
# In[115]:
sns.relplot(x="total_bill", y="tip", hue="size", data=tips);
# ### Customize palette
# In[116]:
sns.relplot(x="total_bill", y="tip", hue="size", data=tips, palette="ch:r=-0.5, l=0.75");
# ### Change size of points
# In[118]:
sns.relplot(x="total_bill", y="tip", size="size", data=tips);
# In[123]:
sns.relplot(x="total_bill", y="tip", size="size", sizes=(15,200), data=tips, alpha=0.7);
# ## Emphasize continuity with line plots
# by default, the data is sorted by `x` before plotting with `lineplot`
# In[124]:
df = pd.DataFrame(dict(time=np.arange(500),
value=np.random.randn(500).cumsum()))
g = sns.relplot(x="time", y="value", kind="line", data=df)
g.fig.autofmt_xdate()
# #### Aggregation and representing uncertainty
# In[125]:
fmri = sns.load_dataset('fmri')
fmri
# Confidence intervals are computed using bootstrapping.
#
# May be time-intensive for larger datasets
# In[132]:
sns.relplot(x='timepoint', y='signal', kind='line', data=fmri)
# In[135]:
fmri.query("timepoint == 5").agg(['mean', 'std'])
# #### No confidence interval
#
# plots the mean of each point
# In[128]:
sns.relplot(x='timepoint', y='signal', ci=None, kind='line', data=fmri)
# #### Show standard deviation
# In[129]:
sns.relplot(x='timepoint', y='signal', ci='sd', kind='line', data=fmri)
# #### Turn off aggregation
# In[130]:
sns.relplot(x='timepoint', y='signal', estimator=None, kind='line', data=fmri)
# ## Plotting subsets of data with semantic mappings
# In[136]:
sns.relplot(x='timepoint', y='signal', hue='event', kind='line', data=fmri)
# In[137]:
sns.relplot(x='timepoint', y='signal', hue='region', style='event', kind='line', data=fmri)
# In[139]:
sns.relplot(x='timepoint', y='signal', hue='region', style='event', kind='line', markers=True, dashes=False, data=fmri)
# ## Plotting with date data
# In[146]:
df = pd.DataFrame(dict(time=pd.date_range("2021-01-01", periods=500),
value=np.random.randn(500).cumsum()))
g = sns.relplot(x="time", y="value", kind="line", data=df)
g.fig.autofmt_xdate()
g.ax.grid(False)
# ## Show multiple relationships with facets (`col` variable)
# In[147]:
sns.relplot(x='total_bill', y='tip', hue='smoker', col='time', data=tips)
# In[148]:
sns.relplot(x="timepoint", y="signal", hue="subject",
col="region", row="event", height=3,
kind="line", estimator=None, data=fmri);
# In[149]:
sns.relplot(x="timepoint", y="signal", hue="event", style="event",
col="subject", col_wrap=5,
height=3, aspect=.75, linewidth=2.5,
kind="line", data=fmri.query("region == 'frontal'"));
# # Visualizing distributions of data
# The distributions module contains several functions designed to answer questions such as these. The axes-level functions are `histplot()`, `kdeplot()`, `ecdfplot()`, and `rugplot()`. They are grouped together within the figure-level `displot()`, `jointplot()`, and `pairplot()` functions.
# ## Plot univariate histograms
# In[151]:
penguins = sns.load_dataset('penguins')
sns.displot(penguins, x='flipper_length_mm');
# In[152]:
sns.displot(penguins, x='flipper_length_mm', binwidth=3);
# In[154]:
sns.displot(penguins, x='flipper_length_mm', bins=25);
# In[155]:
tips = sns.load_dataset('tips')
sns.displot(tips, x='size')
# ### specify the precise bin breaks by passing an array to bins
# In[156]:
sns.displot(tips, x="size", bins=[1, 2, 3, 4, 5, 6, 7])
# ### setting discrete=True, which chooses bin breaks that represent the unique values in a dataset with bars that are centered on their corresponding value
# In[163]:
sns.displot(tips, x="size", discrete=True, shrink=0.8, alpha=0.5)
# In[167]:
sns.displot(penguins, x="flipper_length_mm", col='sex', )
# ## `FacetGrid.map`
# In[169]:
with sns.axes_style("white"):
g = sns.FacetGrid(tips, row="sex", col="smoker", margin_titles=True, height=2.5)
g.map(sns.scatterplot, "total_bill", "tip", color="#334488")
g.set_axis_labels("Total bill (US Dollars)", "Tip")
g.set(xticks=[10, 30, 50], yticks=[2, 6, 10])
g.fig.subplots_adjust(wspace=.02, hspace=.02)
# In[168]:
g = sns.FacetGrid(tips, col="smoker", margin_titles=True, height=4)
g.map(plt.scatter, "total_bill", "tip", color="#338844", edgecolor="white", s=50, lw=1)
for ax in g.axes.flat:
ax.axline((0, 0), slope=.2, c=".2", ls="--", zorder=0)
g.set(xlim=(0, 60), ylim=(0, 14))
# ### Custom function to map to `FacetGrid`
# In[170]:
from scipy import stats
def quantile_plot(x, **kwargs):
quantiles, xr = stats.probplot(x, fit=False)
plt.scatter(xr, quantiles, **kwargs)
g = sns.FacetGrid(tips, col="sex", height=4)
g.map(quantile_plot, "total_bill")
# In[ ]:
| [
"seaborn.histplot",
"seaborn.catplot",
"seaborn.scatterplot",
"seaborn.pairplot",
"numpy.arange",
"pandas.date_range",
"seaborn.load_dataset",
"matplotlib.pyplot.scatter",
"seaborn.axes_style",
"seaborn.displot",
"seaborn.lineplot",
"scipy.stats.probplot",
"seaborn.jointplot",
"numpy.rando... | [((325, 336), 'importlib.reload', 'reload', (['mpl'], {}), '(mpl)\n', (331, 336), False, 'from importlib import reload\n'), ((337, 348), 'importlib.reload', 'reload', (['plt'], {}), '(plt)\n', (343, 348), False, 'from importlib import reload\n'), ((349, 360), 'importlib.reload', 'reload', (['sns'], {}), '(sns)\n', (355, 360), False, 'from importlib import reload\n'), ((375, 394), 'seaborn.get_data_home', 'sns.get_data_home', ([], {}), '()\n', (392, 394), True, 'import seaborn as sns\n'), ((416, 440), 'seaborn.load_dataset', 'sns.load_dataset', (['"""fmri"""'], {}), "('fmri')\n", (432, 440), True, 'import seaborn as sns\n'), ((474, 505), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (487, 505), True, 'import seaborn as sns\n'), ((506, 585), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""region"""', 'style': '"""event"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='region', style='event', data=fmri)\n", (518, 585), True, 'import seaborn as sns\n'), ((667, 698), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (680, 698), True, 'import seaborn as sns\n'), ((705, 732), 'seaborn.load_dataset', 'sns.load_dataset', (['"""titanic"""'], {}), "('titanic')\n", (721, 732), True, 'import seaborn as sns\n'), ((926, 1054), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': '"""age"""', 'y': '"""survived"""', 'col': '"""sex"""', 'hue': '"""sex"""', 'data': 'df', 'palette': 'pal', 'y_jitter': '(0.02)', 'logistic': '(True)', 'truncate': '(False)'}), "(x='age', y='survived', col='sex', hue='sex', data=df, palette=\n pal, y_jitter=0.02, logistic=True, truncate=False)\n", (936, 1054), True, 'import seaborn as sns\n'), ((1220, 1248), 'seaborn.load_dataset', 'sns.load_dataset', (['"""penguins"""'], {}), "('penguins')\n", (1236, 1248), True, 'import seaborn as sns\n'), ((1332, 1420), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'multiple': '"""stack"""'}), "(data=penguins, x='flipper_length_mm', hue='species', multiple=\n 'stack')\n", (1344, 1420), True, 'import seaborn as sns\n'), ((1430, 1528), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'multiple': '"""stack"""', 'alpha': '(0.5)'}), "(data=penguins, x='flipper_length_mm', hue='species', multiple=\n 'stack', alpha=0.5)\n", (1441, 1528), True, 'import seaborn as sns\n'), ((1538, 1636), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'multiple': '"""layer"""', 'alpha': '(0.5)'}), "(data=penguins, x='flipper_length_mm', hue='species', multiple=\n 'layer', alpha=0.5)\n", (1549, 1636), True, 'import seaborn as sns\n'), ((1646, 1743), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'multiple': '"""fill"""', 'alpha': '(0.5)'}), "(data=penguins, x='flipper_length_mm', hue='species', multiple=\n 'fill', alpha=0.5)\n", (1657, 1743), True, 'import seaborn as sns\n'), ((1816, 1903), 'seaborn.displot', 'sns.displot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'multiple': '"""stack"""'}), "(data=penguins, x='flipper_length_mm', hue='species', multiple=\n 'stack')\n", (1827, 1903), True, 'import seaborn as sns\n'), ((1913, 2023), 'seaborn.displot', 'sns.displot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'multiple': '"""stack"""', 'kind': '"""kde"""', 'alpha': '(0.5)'}), "(data=penguins, x='flipper_length_mm', hue='species', multiple=\n 'stack', kind='kde', alpha=0.5)\n", (1924, 2023), True, 'import seaborn as sns\n'), ((2076, 2155), 'seaborn.displot', 'sns.displot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'hue': '"""species"""', 'col': '"""species"""'}), "(data=penguins, x='flipper_length_mm', hue='species', col='species')\n", (2087, 2155), True, 'import seaborn as sns\n'), ((2306, 2409), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'y': '"""bill_length_mm"""', 'hue': '"""species"""', 'ax': 'axs[0]'}), "(data=penguins, x='flipper_length_mm', y='bill_length_mm',\n hue='species', ax=axs[0])\n", (2321, 2409), True, 'import seaborn as sns\n'), ((2406, 2514), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'penguins', 'x': '"""species"""', 'hue': '"""species"""', 'shrink': '(0.8)', 'alpha': '(0.8)', 'legend': '(False)', 'ax': 'axs[1]'}), "(data=penguins, x='species', hue='species', shrink=0.8, alpha=\n 0.8, legend=False, ax=axs[1])\n", (2418, 2514), True, 'import seaborn as sns\n'), ((2606, 2709), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'y': '"""bill_length_mm"""', 'hue': '"""species"""', 'ax': 'axs[0]'}), "(data=penguins, x='flipper_length_mm', y='bill_length_mm',\n hue='species', ax=axs[0])\n", (2621, 2709), True, 'import seaborn as sns\n'), ((2706, 2814), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'penguins', 'x': '"""species"""', 'hue': '"""species"""', 'shrink': '(0.8)', 'alpha': '(0.8)', 'legend': '(False)', 'ax': 'axs[1]'}), "(data=penguins, x='species', hue='species', shrink=0.8, alpha=\n 0.8, legend=False, ax=axs[1])\n", (2718, 2814), True, 'import seaborn as sns\n'), ((2892, 2916), 'seaborn.load_dataset', 'sns.load_dataset', (['"""tips"""'], {}), "('tips')\n", (2908, 2916), True, 'import seaborn as sns\n'), ((2921, 2968), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'tips', 'x': '"""total_bill"""', 'y': '"""tip"""'}), "(data=tips, x='total_bill', y='tip')\n", (2932, 2968), True, 'import seaborn as sns\n'), ((3099, 3184), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'y': '"""bill_length_mm"""', 'col': '"""sex"""'}), "(data=penguins, x='flipper_length_mm', y='bill_length_mm', col='sex'\n )\n", (3110, 3184), True, 'import seaborn as sns\n'), ((3626, 3640), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3638, 3640), True, 'import matplotlib.pyplot as plt\n'), ((3663, 3694), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)'}), '(1, 2, sharey=True)\n', (3675, 3694), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3757), 'seaborn.FacetGrid', 'sns.FacetGrid', (['penguins'], {}), '(penguins)\n', (3747, 3757), True, 'import seaborn as sns\n'), ((3776, 3810), 'seaborn.FacetGrid', 'sns.FacetGrid', (['penguins'], {'col': '"""sex"""'}), "(penguins, col='sex')\n", (3789, 3810), True, 'import seaborn as sns\n'), ((3829, 3887), 'seaborn.FacetGrid', 'sns.FacetGrid', (['penguins'], {'col': '"""sex"""', 'height': '(3.5)', 'aspect': '(1.2)'}), "(penguins, col='sex', height=3.5, aspect=1.2)\n", (3842, 3887), True, 'import seaborn as sns\n'), ((3973, 4064), 'seaborn.jointplot', 'sns.jointplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'y': '"""bill_length_mm"""', 'hue': '"""species"""'}), "(data=penguins, x='flipper_length_mm', y='bill_length_mm', hue\n ='species')\n", (3986, 4064), True, 'import seaborn as sns\n'), ((4074, 4116), 'seaborn.pairplot', 'sns.pairplot', ([], {'data': 'penguins', 'hue': '"""species"""'}), "(data=penguins, hue='species')\n", (4086, 4116), True, 'import seaborn as sns\n'), ((4131, 4235), 'seaborn.jointplot', 'sns.jointplot', ([], {'data': 'penguins', 'x': '"""flipper_length_mm"""', 'y': '"""bill_length_mm"""', 'hue': '"""species"""', 'kind': '"""hist"""'}), "(data=penguins, x='flipper_length_mm', y='bill_length_mm', hue\n ='species', kind='hist')\n", (4144, 4235), True, 'import seaborn as sns\n'), ((4382, 4410), 'seaborn.load_dataset', 'sns.load_dataset', (['"""anagrams"""'], {}), "('anagrams')\n", (4398, 4410), True, 'import seaborn as sns\n'), ((4647, 4736), 'seaborn.catplot', 'sns.catplot', ([], {'data': 'anagrams_long', 'x': '"""solutions"""', 'y': '"""score"""', 'hue': '"""attnr"""', 'kind': '"""point"""'}), "(data=anagrams_long, x='solutions', y='score', hue='attnr', kind\n ='point')\n", (4658, 4736), True, 'import seaborn as sns\n'), ((4801, 4828), 'seaborn.load_dataset', 'sns.load_dataset', (['"""flights"""'], {}), "('flights')\n", (4817, 4828), True, 'import seaborn as sns\n'), ((4862, 4949), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'flights_dict', 'x': '"""year"""', 'y': '"""passengers"""', 'hue': '"""month"""', 'kind': '"""line"""'}), "(data=flights_dict, x='year', y='passengers', hue='month', kind=\n 'line')\n", (4873, 4949), True, 'import seaborn as sns\n'), ((5103, 5171), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'flights_avg', 'x': '"""year"""', 'y': '"""passengers"""', 'kind': '"""line"""'}), "(data=flights_avg, x='year', y='passengers', kind='line')\n", (5114, 5171), True, 'import seaborn as sns\n'), ((5271, 5317), 'seaborn.relplot', 'sns.relplot', ([], {'x': 'year', 'y': 'passengers', 'kind': '"""line"""'}), "(x=year, y=passengers, kind='line')\n", (5282, 5317), True, 'import seaborn as sns\n'), ((5534, 5575), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'two_series', 'kind': '"""line"""'}), "(data=two_series, kind='line')\n", (5545, 5575), True, 'import seaborn as sns\n'), ((5664, 5705), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'two_series', 'kind': '"""line"""'}), "(data=two_series, kind='line')\n", (5675, 5705), True, 'import seaborn as sns\n'), ((6067, 6098), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (6080, 6098), True, 'import seaborn as sns\n'), ((6121, 6145), 'seaborn.load_dataset', 'sns.load_dataset', (['"""tips"""'], {}), "('tips')\n", (6137, 6145), True, 'import seaborn as sns\n'), ((6146, 6193), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'data': 'tips'}), "(x='total_bill', y='tip', data=tips)\n", (6157, 6193), True, 'import seaborn as sns\n'), ((6274, 6335), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'data': 'tips', 'hue': '"""smoker"""'}), "(x='total_bill', y='tip', data=tips, hue='smoker')\n", (6285, 6335), True, 'import seaborn as sns\n'), ((6396, 6473), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'data': 'tips', 'hue': '"""smoker"""', 'style': '"""smoker"""'}), "(x='total_bill', y='tip', data=tips, hue='smoker', style='smoker')\n", (6407, 6473), True, 'import seaborn as sns\n'), ((6519, 6594), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'data': 'tips', 'hue': '"""smoker"""', 'style': '"""time"""'}), "(x='total_bill', y='tip', data=tips, hue='smoker', style='time')\n", (6530, 6594), True, 'import seaborn as sns\n'), ((6670, 6729), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""size"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='size', data=tips)\n", (6681, 6729), True, 'import seaborn as sns\n'), ((6771, 6864), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""size"""', 'data': 'tips', 'palette': '"""ch:r=-0.5, l=0.75"""'}), "(x='total_bill', y='tip', hue='size', data=tips, palette=\n 'ch:r=-0.5, l=0.75')\n", (6782, 6864), True, 'import seaborn as sns\n'), ((6905, 6965), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'size': '"""size"""', 'data': 'tips'}), "(x='total_bill', y='tip', size='size', data=tips)\n", (6916, 6965), True, 'import seaborn as sns\n'), ((6982, 7075), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'size': '"""size"""', 'sizes': '(15, 200)', 'data': 'tips', 'alpha': '(0.7)'}), "(x='total_bill', y='tip', size='size', sizes=(15, 200), data=\n tips, alpha=0.7)\n", (6993, 7075), True, 'import seaborn as sns\n'), ((7311, 7365), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""value"""', 'kind': '"""line"""', 'data': 'df'}), "(x='time', y='value', kind='line', data=df)\n", (7322, 7365), True, 'import seaborn as sns\n'), ((7459, 7483), 'seaborn.load_dataset', 'sns.load_dataset', (['"""fmri"""'], {}), "('fmri')\n", (7475, 7483), True, 'import seaborn as sns\n'), ((7609, 7671), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', kind='line', data=fmri)\n", (7620, 7671), True, 'import seaborn as sns\n'), ((7817, 7888), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'ci': 'None', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', ci=None, kind='line', data=fmri)\n", (7828, 7888), True, 'import seaborn as sns\n'), ((7936, 8007), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'ci': '"""sd"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', ci='sd', kind='line', data=fmri)\n", (7947, 8007), True, 'import seaborn as sns\n'), ((8052, 8130), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'estimator': 'None', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', estimator=None, kind='line', data=fmri)\n", (8063, 8130), True, 'import seaborn as sns\n'), ((8200, 8275), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""event"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='event', kind='line', data=fmri)\n", (8211, 8275), True, 'import seaborn as sns\n'), ((8291, 8387), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""region"""', 'style': '"""event"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='region', style='event', kind=\n 'line', data=fmri)\n", (8302, 8387), True, 'import seaborn as sns\n'), ((8398, 8522), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""region"""', 'style': '"""event"""', 'kind': '"""line"""', 'markers': '(True)', 'dashes': '(False)', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='region', style='event', kind=\n 'line', markers=True, dashes=False, data=fmri)\n", (8409, 8522), True, 'import seaborn as sns\n'), ((8698, 8752), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""value"""', 'kind': '"""line"""', 'data': 'df'}), "(x='time', y='value', kind='line', data=df)\n", (8709, 8752), True, 'import seaborn as sns\n'), ((8870, 8943), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""smoker"""', 'col': '"""time"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='smoker', col='time', data=tips)\n", (8881, 8943), True, 'import seaborn as sns\n'), ((8959, 9094), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""subject"""', 'col': '"""region"""', 'row': '"""event"""', 'height': '(3)', 'kind': '"""line"""', 'estimator': 'None', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='subject', col='region', row=\n 'event', height=3, kind='line', estimator=None, data=fmri)\n", (8970, 9094), True, 'import seaborn as sns\n'), ((9741, 9769), 'seaborn.load_dataset', 'sns.load_dataset', (['"""penguins"""'], {}), "('penguins')\n", (9757, 9769), True, 'import seaborn as sns\n'), ((9770, 9814), 'seaborn.displot', 'sns.displot', (['penguins'], {'x': '"""flipper_length_mm"""'}), "(penguins, x='flipper_length_mm')\n", (9781, 9814), True, 'import seaborn as sns\n'), ((9831, 9887), 'seaborn.displot', 'sns.displot', (['penguins'], {'x': '"""flipper_length_mm"""', 'binwidth': '(3)'}), "(penguins, x='flipper_length_mm', binwidth=3)\n", (9842, 9887), True, 'import seaborn as sns\n'), ((9904, 9957), 'seaborn.displot', 'sns.displot', (['penguins'], {'x': '"""flipper_length_mm"""', 'bins': '(25)'}), "(penguins, x='flipper_length_mm', bins=25)\n", (9915, 9957), True, 'import seaborn as sns\n'), ((9981, 10005), 'seaborn.load_dataset', 'sns.load_dataset', (['"""tips"""'], {}), "('tips')\n", (9997, 10005), True, 'import seaborn as sns\n'), ((10006, 10033), 'seaborn.displot', 'sns.displot', (['tips'], {'x': '"""size"""'}), "(tips, x='size')\n", (10017, 10033), True, 'import seaborn as sns\n'), ((10115, 10170), 'seaborn.displot', 'sns.displot', (['tips'], {'x': '"""size"""', 'bins': '[1, 2, 3, 4, 5, 6, 7]'}), "(tips, x='size', bins=[1, 2, 3, 4, 5, 6, 7])\n", (10126, 10170), True, 'import seaborn as sns\n'), ((10344, 10409), 'seaborn.displot', 'sns.displot', (['tips'], {'x': '"""size"""', 'discrete': '(True)', 'shrink': '(0.8)', 'alpha': '(0.5)'}), "(tips, x='size', discrete=True, shrink=0.8, alpha=0.5)\n", (10355, 10409), True, 'import seaborn as sns\n'), ((10425, 10480), 'seaborn.displot', 'sns.displot', (['penguins'], {'x': '"""flipper_length_mm"""', 'col': '"""sex"""'}), "(penguins, x='flipper_length_mm', col='sex')\n", (10436, 10480), True, 'import seaborn as sns\n'), ((10859, 10922), 'seaborn.FacetGrid', 'sns.FacetGrid', (['tips'], {'col': '"""smoker"""', 'margin_titles': '(True)', 'height': '(4)'}), "(tips, col='smoker', margin_titles=True, height=4)\n", (10872, 10922), True, 'import seaborn as sns\n'), ((11338, 11378), 'seaborn.FacetGrid', 'sns.FacetGrid', (['tips'], {'col': '"""sex"""', 'height': '(4)'}), "(tips, col='sex', height=4)\n", (11351, 11378), True, 'import seaborn as sns\n'), ((10525, 10548), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (10539, 10548), True, 'import seaborn as sns\n'), ((10558, 10634), 'seaborn.FacetGrid', 'sns.FacetGrid', (['tips'], {'row': '"""sex"""', 'col': '"""smoker"""', 'margin_titles': '(True)', 'height': '(2.5)'}), "(tips, row='sex', col='smoker', margin_titles=True, height=2.5)\n", (10571, 10634), True, 'import seaborn as sns\n'), ((11263, 11291), 'scipy.stats.probplot', 'stats.probplot', (['x'], {'fit': '(False)'}), '(x, fit=False)\n', (11277, 11291), False, 'from scipy import stats\n'), ((11296, 11332), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xr', 'quantiles'], {}), '(xr, quantiles, **kwargs)\n', (11307, 11332), True, 'import matplotlib.pyplot as plt\n'), ((7230, 7244), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (7239, 7244), True, 'import numpy as np\n'), ((8591, 8631), 'pandas.date_range', 'pd.date_range', (['"""2021-01-01"""'], {'periods': '(500)'}), "('2021-01-01', periods=500)\n", (8604, 8631), True, 'import pandas as pd\n'), ((7275, 7295), 'numpy.random.randn', 'np.random.randn', (['(500)'], {}), '(500)\n', (7290, 7295), True, 'import numpy as np\n'), ((8662, 8682), 'numpy.random.randn', 'np.random.randn', (['(500)'], {}), '(500)\n', (8677, 8682), True, 'import numpy as np\n')] |
from models.Model import Model
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn import svm
class SVM(Model):
def __init__(self, params):
self.params = params
self.featureList = []
self.acc = -1
# used for the name of the prediction file
self.name = "SVM"
def feature_selection(self, x_train, y_train):
# we only want numerical variables
self.featureList = list(x_train.dtypes[x_train.dtypes != 'object'].index)
self.featureList = ["Pclass", "Sex", "Age", "Fare_PP", "Embarked", "Age*Class",
"Ticket_firstchar", "FamilySize", "FamilySize_cat", "Embarked_1",
"Embarked_2", "Embarked_3", "Title_1", "Title_2", "Title_3", "Title_4", "Title_5"]
print (self.featureList)
return self.featureList
# train the model with the features determined in feature_selection()
def train(self, train_X, train_Y, model_args):
if self.featureList == []:
raise ValueError('No features selected. Please first run feature selection.')
# save trainingset size, prepare the data, and select the features
self.train_set_size = len(train_X)
train_X = np.array(train_X[self.featureList])
train_Y = np.array(train_Y)
print("Training model..")
param_grid = [
{'C': [0.01, 0.1, 1], 'kernel': ['linear','rbf']}
]
# optimize on SVM with no added in probability estimates
clf_raw = svm.SVC()
self.clf = GridSearchCV(clf_raw, param_grid, cv=10, scoring="accuracy", n_jobs=2)
self.clf.fit(train_X, train_Y)
print (self.clf.best_params_)
self.acc = self.clf.best_score_
bestParams = self.clf.best_params_
# # fit an SVM with probability estimates directly with the best params
# self.clf.best_estimator_ = svm.SVC(C =bestParams['C'], kernel=bestParams['kernel'], probability=True).fit(train_X,train_Y)
print("Model with best parameters, train set avg CV accuracy:", self.acc)
# predict the test set
def test(self, X_test, labels):
if self.featureList == []:
raise ValueError('No features selected. Please first run feature selection.')
if self.train_set_size == -1:
raise ValueError("Couldn't determine training set size, did you run feature_selection and train first?")
X_test = np.array(X_test[self.featureList])
y_pred = self.clf.predict(X_test)
# Write predictions to csv file
id_offset = self.train_set_size
self.predictions = []
for i, prediction in enumerate(y_pred):
self.predictions.append([labels[i], prediction])
| [
"numpy.array",
"sklearn.model_selection.GridSearchCV",
"sklearn.svm.SVC"
] | [((1226, 1261), 'numpy.array', 'np.array', (['train_X[self.featureList]'], {}), '(train_X[self.featureList])\n', (1234, 1261), True, 'import numpy as np\n'), ((1280, 1297), 'numpy.array', 'np.array', (['train_Y'], {}), '(train_Y)\n', (1288, 1297), True, 'import numpy as np\n'), ((1513, 1522), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (1520, 1522), False, 'from sklearn import svm\n'), ((1542, 1612), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf_raw', 'param_grid'], {'cv': '(10)', 'scoring': '"""accuracy"""', 'n_jobs': '(2)'}), "(clf_raw, param_grid, cv=10, scoring='accuracy', n_jobs=2)\n", (1554, 1612), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2435, 2469), 'numpy.array', 'np.array', (['X_test[self.featureList]'], {}), '(X_test[self.featureList])\n', (2443, 2469), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
"""
Parse MultiNest *stats output file.
"""
from __future__ import absolute_import, print_function
__all__ = ['UNITS', 'parse', 'main']
from collections import OrderedDict
from os.path import abspath, dirname
import sys
import numpy as np
if __name__ == '__main__' and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro.utils.misc import expand
UNITS = dict(
x='m', y='m', z='m', t='ns', track_zenith='deg', track_azimuth='deg',
track_energy='GeV', cascade_energy='GeV'
)
# TODO: handle multi-modal output
def parse(stats, cube_dims):
"""Parse the contents of a MultiNest *stats output file.
Parameters
----------
stats : iterable of str
Result of open(file, 'r').readlines()
cube_dims : sequence
Dimension names in order used for the MultiNest hypercube
Returns
-------
"""
found = False
lineno = -1
for lineno, line in enumerate(stats):
if line.startswith('Dim No.'):
found = True
break
if not found:
raise ValueError('Could not find line with "Dim No." in file')
stats = [s.strip() for s in stats[lineno + 1 : lineno + 1 + len(cube_dims)]]
points = OrderedDict()
errors = OrderedDict()
for stat_line, dim in zip(stats, cube_dims):
points[dim], errors[dim] = [float(x) for x in stat_line.split()[1:]]
e_pt = points['energy']
e_sd = errors['energy']
tfrac_pt = points['track_fraction']
tfrac_sd = errors['track_fraction']
points['track_energy'] = e_pt * tfrac_pt
points['cascade_energy'] = e_pt * (1 - tfrac_pt)
errors['track_energy'] = e_sd * tfrac_pt
errors['cascade_energy'] = e_sd * (1 - tfrac_pt)
return points, errors
def main():
"""Load file specified on command line and print results of parsing it."""
with open(expand(sys.argv[1]), 'r') as f:
contents = f.readlines()
try:
points, errors = parse(contents)
except ValueError:
print('Failed to parse file "{}"'.format(sys.argv[1]))
raise
for dim in 't x y z track_zenith track_azimuth track_energy cascade_energy'.split():
pt = points[dim]
sd = errors[dim]
if dim in ['track_zenith', 'track_azimuth']:
pt = np.rad2deg(pt)
sd = np.rad2deg(sd)
print('{:14s} = {:8.3f} +/- {:5.1f} {}'.format(dim, pt, sd, UNITS[dim]))
if __name__ == '__main__':
main()
| [
"retro.utils.misc.expand",
"collections.OrderedDict",
"os.path.abspath",
"numpy.rad2deg",
"sys.path.append"
] | [((1376, 1389), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1387, 1389), False, 'from collections import OrderedDict\n'), ((1403, 1416), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1414, 1416), False, 'from collections import OrderedDict\n'), ((476, 502), 'sys.path.append', 'sys.path.append', (['RETRO_DIR'], {}), '(RETRO_DIR)\n', (491, 502), False, 'import sys\n'), ((414, 431), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (421, 431), False, 'from os.path import abspath, dirname\n'), ((2013, 2032), 'retro.utils.misc.expand', 'expand', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2019, 2032), False, 'from retro.utils.misc import expand\n'), ((2439, 2453), 'numpy.rad2deg', 'np.rad2deg', (['pt'], {}), '(pt)\n', (2449, 2453), True, 'import numpy as np\n'), ((2471, 2485), 'numpy.rad2deg', 'np.rad2deg', (['sd'], {}), '(sd)\n', (2481, 2485), True, 'import numpy as np\n')] |
# Copyright (C) 2020 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
import gwbench.snr as snr_mod
def calc_fisher_cov_matrices(del_hf_list,psd,f,only_fisher=0,df=None,cond_sup=1e15):
n = len(del_hf_list)
fisher = np.zeros((n,n))
for i in np.arange(n):
fisher[i,i] = snr_mod.scalar_product_freq_array(del_hf_list[i],del_hf_list[i],psd,f,df)
for j in np.arange(i+1,n):
fisher[i,j] = snr_mod.scalar_product_freq_array(del_hf_list[i],del_hf_list[j],psd,f,df)
fisher[j,i] = fisher[i,j]
wc_fisher, cond_num = check_well_conditioned(fisher,cond_sup)
# return cov=None, if Fisher not well conditioned OR if we only fisher is wanted
cov = calc_cov_from_fisher(fisher,(wc_fisher and not only_fisher))
return fisher, cov, wc_fisher, cond_num
def calc_cond_number(fisher):
EWs,_ = np.linalg.eig(fisher)
return np.amax(np.abs(EWs))/np.amin(np.abs(EWs))
def check_well_conditioned(fisher,cond_sup=1e15):
if cond_sup is None: cond_sup = np.inf
cond_num = calc_cond_number(fisher)
return ( cond_num < cond_sup), cond_num
def calc_cov_from_fisher(fisher,wc_fisher):
if wc_fisher: return np.linalg.inv(fisher)
else: return None
def inv_err_from_fisher_cov(fisher,cov,by_element=0):
if cov is None:
return None
else:
ident = np.identity(cov.shape[0])
res = np.abs(np.matmul(fisher,cov)-ident)
if by_element: return np.array([np.maximum(np.amax(res[:,i]),np.amax(res[i])) for i in range(cov.shape[0])])
else: return np.amax(res)
def get_errs_from_cov(cov,deriv_variables):
if cov is None:
return None
else:
errs = {}
for i,name in enumerate(deriv_variables):
errs[name] = np.sqrt(np.abs(cov[i,i]))
return errs
| [
"numpy.identity",
"numpy.abs",
"numpy.linalg.eig",
"gwbench.snr.scalar_product_freq_array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.amax",
"numpy.arange"
] | [((914, 930), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (922, 930), True, 'import numpy as np\n'), ((944, 956), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (953, 956), True, 'import numpy as np\n'), ((1535, 1556), 'numpy.linalg.eig', 'np.linalg.eig', (['fisher'], {}), '(fisher)\n', (1548, 1556), True, 'import numpy as np\n'), ((980, 1057), 'gwbench.snr.scalar_product_freq_array', 'snr_mod.scalar_product_freq_array', (['del_hf_list[i]', 'del_hf_list[i]', 'psd', 'f', 'df'], {}), '(del_hf_list[i], del_hf_list[i], psd, f, df)\n', (1013, 1057), True, 'import gwbench.snr as snr_mod\n'), ((1071, 1090), 'numpy.arange', 'np.arange', (['(i + 1)', 'n'], {}), '(i + 1, n)\n', (1080, 1090), True, 'import numpy as np\n'), ((1858, 1879), 'numpy.linalg.inv', 'np.linalg.inv', (['fisher'], {}), '(fisher)\n', (1871, 1879), True, 'import numpy as np\n'), ((2031, 2056), 'numpy.identity', 'np.identity', (['cov.shape[0]'], {}), '(cov.shape[0])\n', (2042, 2056), True, 'import numpy as np\n'), ((1114, 1191), 'gwbench.snr.scalar_product_freq_array', 'snr_mod.scalar_product_freq_array', (['del_hf_list[i]', 'del_hf_list[j]', 'psd', 'f', 'df'], {}), '(del_hf_list[i], del_hf_list[j], psd, f, df)\n', (1147, 1191), True, 'import gwbench.snr as snr_mod\n'), ((1576, 1587), 'numpy.abs', 'np.abs', (['EWs'], {}), '(EWs)\n', (1582, 1587), True, 'import numpy as np\n'), ((1597, 1608), 'numpy.abs', 'np.abs', (['EWs'], {}), '(EWs)\n', (1603, 1608), True, 'import numpy as np\n'), ((2254, 2266), 'numpy.amax', 'np.amax', (['res'], {}), '(res)\n', (2261, 2266), True, 'import numpy as np\n'), ((2078, 2100), 'numpy.matmul', 'np.matmul', (['fisher', 'cov'], {}), '(fisher, cov)\n', (2087, 2100), True, 'import numpy as np\n'), ((2463, 2480), 'numpy.abs', 'np.abs', (['cov[i, i]'], {}), '(cov[i, i])\n', (2469, 2480), True, 'import numpy as np\n'), ((2158, 2176), 'numpy.amax', 'np.amax', (['res[:, i]'], {}), '(res[:, i])\n', (2165, 2176), True, 'import numpy as np\n'), ((2176, 2191), 'numpy.amax', 'np.amax', (['res[i]'], {}), '(res[i])\n', (2183, 2191), True, 'import numpy as np\n')] |
import os
from tqdm import tqdm
import parse
import numpy as np
import pandas as pd
try:
from __init__ import ROOT
except ImportError:
ROOT = None
from misc.elc import GIW_EVENT_MAPPING
from misc import utils
data_path_giw = "Gaze-In-Wild/LabelData"
data_path_giw_data = "Gaze-In-Wild/ProcessData"
def parse_giw(root, **kwargs):
dataset_name = "giw"
fname_fmt = "{fname}_Lbr_{coder:d}"
data_dir = os.path.join(root, data_path_giw)
print(f"Parsing {dataset_name} from {data_dir}...")
files = utils.dir_walk(data_dir, "mat")
data_accum = []
for fpath in tqdm(files):
fdir, fname = utils.split_path(fpath)
_p = parse.parse(fname_fmt, fname)
coder = _p["coder"]
data = utils.loadmat(fpath)["LabelData"]
assert coder == data["LbrIdx"]
# try to load gaze data
_fname = _p["fname"]
fpath_gaze = os.path.join(root, data_path_giw_data, f"{_fname}.mat")
if os.path.exists(fpath_gaze):
data_gaze = utils.loadmat(fpath_gaze)["ProcessData"]
x, y = data_gaze["ETG"]["POR"].T
# label data with confidence < 0.3 as trackloss (Kothari et al. 2020, pp.6)
trackloss = data_gaze["ETG"]["Confidence"] < 0.3
x[trackloss] = np.nan
y[trackloss] = np.nan
else:
x = y = np.zeros(_l, dtype=np.float32)
trackloss = np.ones(_l, dtype=np.bool)
_l = len(data["Labels"])
etdata = pd.DataFrame(
{
"t": data["T"],
"x": x,
"y": y,
"status": ~trackloss,
"evt": data["Labels"],
}
)
etdata.replace({"evt": GIW_EVENT_MAPPING}, inplace=True)
rdir = os.path.relpath(fdir, data_dir)
spath = os.path.join(f"{dataset_name}_{coder}", rdir, _fname)
data_accum.append((etdata, spath))
return data_accum
| [
"misc.utils.split_path",
"os.path.exists",
"parse.parse",
"numpy.ones",
"tqdm.tqdm",
"os.path.join",
"misc.utils.dir_walk",
"numpy.zeros",
"pandas.DataFrame",
"misc.utils.loadmat",
"os.path.relpath"
] | [((424, 457), 'os.path.join', 'os.path.join', (['root', 'data_path_giw'], {}), '(root, data_path_giw)\n', (436, 457), False, 'import os\n'), ((527, 558), 'misc.utils.dir_walk', 'utils.dir_walk', (['data_dir', '"""mat"""'], {}), "(data_dir, 'mat')\n", (541, 558), False, 'from misc import utils\n'), ((596, 607), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (600, 607), False, 'from tqdm import tqdm\n'), ((631, 654), 'misc.utils.split_path', 'utils.split_path', (['fpath'], {}), '(fpath)\n', (647, 654), False, 'from misc import utils\n'), ((668, 697), 'parse.parse', 'parse.parse', (['fname_fmt', 'fname'], {}), '(fname_fmt, fname)\n', (679, 697), False, 'import parse\n'), ((897, 952), 'os.path.join', 'os.path.join', (['root', 'data_path_giw_data', 'f"""{_fname}.mat"""'], {}), "(root, data_path_giw_data, f'{_fname}.mat')\n", (909, 952), False, 'import os\n'), ((964, 990), 'os.path.exists', 'os.path.exists', (['fpath_gaze'], {}), '(fpath_gaze)\n', (978, 990), False, 'import os\n'), ((1488, 1583), 'pandas.DataFrame', 'pd.DataFrame', (["{'t': data['T'], 'x': x, 'y': y, 'status': ~trackloss, 'evt': data['Labels']}"], {}), "({'t': data['T'], 'x': x, 'y': y, 'status': ~trackloss, 'evt':\n data['Labels']})\n", (1500, 1583), True, 'import pandas as pd\n'), ((1778, 1809), 'os.path.relpath', 'os.path.relpath', (['fdir', 'data_dir'], {}), '(fdir, data_dir)\n', (1793, 1809), False, 'import os\n'), ((1826, 1879), 'os.path.join', 'os.path.join', (['f"""{dataset_name}_{coder}"""', 'rdir', '_fname'], {}), "(f'{dataset_name}_{coder}', rdir, _fname)\n", (1838, 1879), False, 'import os\n'), ((741, 761), 'misc.utils.loadmat', 'utils.loadmat', (['fpath'], {}), '(fpath)\n', (754, 761), False, 'from misc import utils\n'), ((1355, 1385), 'numpy.zeros', 'np.zeros', (['_l'], {'dtype': 'np.float32'}), '(_l, dtype=np.float32)\n', (1363, 1385), True, 'import numpy as np\n'), ((1410, 1436), 'numpy.ones', 'np.ones', (['_l'], {'dtype': 'np.bool'}), '(_l, dtype=np.bool)\n', (1417, 1436), True, 'import numpy as np\n'), ((1016, 1041), 'misc.utils.loadmat', 'utils.loadmat', (['fpath_gaze'], {}), '(fpath_gaze)\n', (1029, 1041), False, 'from misc import utils\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 17:13:47 2020
@author: jeremiasknoblauch
Description: Some helper files for running splits with NPL
"""
import numpy as np
from npl.NPL import NPL
import os
import pandas as pd
def get_data(data_path, data_name):
data = pd.read_csv(data_path + data_name + ".txt", sep=" ", header=None)
X = np.array(data)[:,:-1]
Y = np.array(data)[:,-1]
Y = Y.astype(int)
return X, Y
def get_test_performance(X, Y, num_splits, train_proportion, L, B,
save_path, data_name,
print_option = True):
"""Take in a data set (X,Y), split it (randomly) and train on a
portion of the data (test on the remainder)"""
# Create the NPL object
npl_sampler = NPL(L)
n, d = X.shape
n_train = int(np.floor(n * train_proportion))
n_test = int(n - n_train)
# Loop over the splits
for i in range(0, num_splits):
# notify user of split
if print_option:
print("Starting to process split " + str(i) + " / " + str(num_splits))
# Create the split for seed
np.random.seed(i)
train_indices = np.random.choice(n, size = n_train, replace=False)
test_indices = np.setdiff1d(np.linspace(0,n-1,n, dtype=int), train_indices)
X_train = X[train_indices,:]
Y_train = Y[train_indices]
X_test = X[test_indices,:]
Y_test = Y[test_indices]
# Sample from NPL object, based on the train proportion of (X,Y)
npl_sampler.draw_samples(Y_train, X_train,B, display_opt=False)
# Test on the remainder of (X,Y)
log_probs, accuracy, cross_entropy = npl_sampler.predict(Y_test, X_test)
log_probs_init, accuracy_init, cross_entropy_init = npl_sampler.predict_log_loss(Y_test, X_test)
accuracy_prob = np.abs(np.exp(log_probs) * Y_test[:,np.newaxis]
+ (np.exp(log_probs)-1) * (1-Y_test[:,np.newaxis]))
accuracy_prob_init = np.abs(np.exp(log_probs_init) * Y_test[:,np.newaxis]
+ (np.exp(log_probs_init)-1) * (1-Y_test[:,np.newaxis]))
# notify user of results
if print_option:
print("TVD accuracy ", np.mean(accuracy))
print("KLD accuracy ", np.mean(accuracy_init))
print("TVD probabilistic accuracy ", np.mean(accuracy_prob))
print("KLD probabilistic accuracy ", np.mean(accuracy_prob_init))
print("TVD cross entropy ", np.mean(cross_entropy))
print("KLD cross entropy ", np.mean(cross_entropy_init))
# save the results to path
if i < 10:
num = "0" + str(i)
else:
num = str(i)
# create a folder with the data name in which to save all results
file_path = save_path + "/" + data_name + "/"
if not os.path.exists(file_path):
os.makedirs(file_path)
# log probs
np.savetxt(file_path + num + "_log_probs_TVD.txt", log_probs)
np.savetxt(file_path + num + "_log_probs_KLD.txt", log_probs_init)
# accuracy
np.savetxt(file_path + num + "_accuracy_TVD.txt", accuracy)
np.savetxt(file_path + num + "_accuracy_KLD.txt", accuracy_init)
# probabilistic accuracy
np.savetxt(file_path + num + "_probabilistic_accuracy_TVD.txt",
accuracy_prob)
np.savetxt(file_path + num + "_probabilistic_accuracy_KLD.txt",
accuracy_prob_init)
# cross-entropy
np.savetxt(file_path + num + "_cross_entropy_TVD.txt", cross_entropy)
np.savetxt(file_path + num + "_cross_entropy_KLD.txt", cross_entropy_init)
| [
"os.path.exists",
"numpy.mean",
"pandas.read_csv",
"os.makedirs",
"numpy.random.choice",
"npl.NPL.NPL",
"numpy.floor",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.random.seed",
"numpy.savetxt"
] | [((307, 372), 'pandas.read_csv', 'pd.read_csv', (["(data_path + data_name + '.txt')"], {'sep': '""" """', 'header': 'None'}), "(data_path + data_name + '.txt', sep=' ', header=None)\n", (318, 372), True, 'import pandas as pd\n'), ((815, 821), 'npl.NPL.NPL', 'NPL', (['L'], {}), '(L)\n', (818, 821), False, 'from npl.NPL import NPL\n'), ((381, 395), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (389, 395), True, 'import numpy as np\n'), ((411, 425), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (419, 425), True, 'import numpy as np\n'), ((859, 889), 'numpy.floor', 'np.floor', (['(n * train_proportion)'], {}), '(n * train_proportion)\n', (867, 889), True, 'import numpy as np\n'), ((1185, 1202), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (1199, 1202), True, 'import numpy as np\n'), ((1227, 1275), 'numpy.random.choice', 'np.random.choice', (['n'], {'size': 'n_train', 'replace': '(False)'}), '(n, size=n_train, replace=False)\n', (1243, 1275), True, 'import numpy as np\n'), ((3099, 3160), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_log_probs_TVD.txt')", 'log_probs'], {}), "(file_path + num + '_log_probs_TVD.txt', log_probs)\n", (3109, 3160), True, 'import numpy as np\n'), ((3169, 3235), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_log_probs_KLD.txt')", 'log_probs_init'], {}), "(file_path + num + '_log_probs_KLD.txt', log_probs_init)\n", (3179, 3235), True, 'import numpy as np\n'), ((3263, 3322), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_accuracy_TVD.txt')", 'accuracy'], {}), "(file_path + num + '_accuracy_TVD.txt', accuracy)\n", (3273, 3322), True, 'import numpy as np\n'), ((3331, 3395), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_accuracy_KLD.txt')", 'accuracy_init'], {}), "(file_path + num + '_accuracy_KLD.txt', accuracy_init)\n", (3341, 3395), True, 'import numpy as np\n'), ((3437, 3515), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_probabilistic_accuracy_TVD.txt')", 'accuracy_prob'], {}), "(file_path + num + '_probabilistic_accuracy_TVD.txt', accuracy_prob)\n", (3447, 3515), True, 'import numpy as np\n'), ((3544, 3631), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_probabilistic_accuracy_KLD.txt')", 'accuracy_prob_init'], {}), "(file_path + num + '_probabilistic_accuracy_KLD.txt',\n accuracy_prob_init)\n", (3554, 3631), True, 'import numpy as np\n'), ((3680, 3749), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_cross_entropy_TVD.txt')", 'cross_entropy'], {}), "(file_path + num + '_cross_entropy_TVD.txt', cross_entropy)\n", (3690, 3749), True, 'import numpy as np\n'), ((3758, 3832), 'numpy.savetxt', 'np.savetxt', (["(file_path + num + '_cross_entropy_KLD.txt')", 'cross_entropy_init'], {}), "(file_path + num + '_cross_entropy_KLD.txt', cross_entropy_init)\n", (3768, 3832), True, 'import numpy as np\n'), ((1314, 1349), 'numpy.linspace', 'np.linspace', (['(0)', '(n - 1)', 'n'], {'dtype': 'int'}), '(0, n - 1, n, dtype=int)\n', (1325, 1349), True, 'import numpy as np\n'), ((2996, 3021), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (3010, 3021), False, 'import os\n'), ((3035, 3057), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (3046, 3057), False, 'import os\n'), ((2352, 2369), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (2359, 2369), True, 'import numpy as np\n'), ((2406, 2428), 'numpy.mean', 'np.mean', (['accuracy_init'], {}), '(accuracy_init)\n', (2413, 2428), True, 'import numpy as np\n'), ((2479, 2501), 'numpy.mean', 'np.mean', (['accuracy_prob'], {}), '(accuracy_prob)\n', (2486, 2501), True, 'import numpy as np\n'), ((2552, 2579), 'numpy.mean', 'np.mean', (['accuracy_prob_init'], {}), '(accuracy_prob_init)\n', (2559, 2579), True, 'import numpy as np\n'), ((2621, 2643), 'numpy.mean', 'np.mean', (['cross_entropy'], {}), '(cross_entropy)\n', (2628, 2643), True, 'import numpy as np\n'), ((2685, 2712), 'numpy.mean', 'np.mean', (['cross_entropy_init'], {}), '(cross_entropy_init)\n', (2692, 2712), True, 'import numpy as np\n'), ((1939, 1956), 'numpy.exp', 'np.exp', (['log_probs'], {}), '(log_probs)\n', (1945, 1956), True, 'import numpy as np\n'), ((2103, 2125), 'numpy.exp', 'np.exp', (['log_probs_init'], {}), '(log_probs_init)\n', (2109, 2125), True, 'import numpy as np\n'), ((2018, 2035), 'numpy.exp', 'np.exp', (['log_probs'], {}), '(log_probs)\n', (2024, 2035), True, 'import numpy as np\n'), ((2187, 2209), 'numpy.exp', 'np.exp', (['log_probs_init'], {}), '(log_probs_init)\n', (2193, 2209), True, 'import numpy as np\n')] |
import os
import argparse
import cv2
import numpy as np
import pandas as pd
import sys
sys.path.append('..')
nose_rect_index = {
'aflw':[8,9,12,13,14],
'wflw':[51,52,53,54,55,56,57,58,58,64,68]
}
def get_rect(index_array,landmarks,img_shape):
"""
args:
landmarks: (-1,2)
index_array: [int,]
return:
left_top: (int,int)
right_bottom : (int,int)
"""
point_landmarks = np.array([landmarks[i] for i in index_array],dtype=np.float32)
a0_max, a1_max = np.argmax(point_landmarks,axis=0)
a0_min, a1_min = np.argmin(point_landmarks,axis=0)
left_top_x = int(max(0,point_landmarks[a0_min][0]-np.random.randint(0,15)/1000.0 * img_shape[1]))
left_top_y = int(max(0,point_landmarks[a1_min][1]-np.random.randint(0,15)/1000.0 * img_shape[0]))
right_bottom_x = int(min(img_shape[1],point_landmarks[a0_max][0]+np.random.randint(0,15)/1000.0 * img_shape[1]))
right_bottom_y = int(min(img_shape[0],point_landmarks[a1_max][1]+np.random.randint(0,15)/1000.0 * img_shape[0]))
return (left_top_x,left_top_y),(right_bottom_x,right_bottom_y)
# return (point_landmarks[a0_min][0],point_landmarks[a1_min][1]),(point_landmarks[a0_max][0],point_landmarks[a1_max][1])
def create_folder(str_path):
paths = str_path.split('/')
temp_folder = paths[0]
for i in range (len(paths)-2):
temp_folder = os.path.join(temp_folder,paths[i+1])
if not os.path.exists(temp_folder):
print("{} not exist , created.".format(temp_folder))
os.mkdir(temp_folder)
if os.path.exists(str_path):
print("{} exist more than one face.".format(str_path))
def parse_args():
parser = argparse.ArgumentParser(description='Gen Nose image dataset')
parser.add_argument('--Dataset', help='experiment dataset name',
type=str, default="wflw")
parser.add_argument('--Train_type', help='experiment dataset type',
type=str, default="test")
parser.add_argument('--Gen_folder', help='generate dataset folder',
type=str, default="nose_images")
args = parser.parse_args()
return args
def main():
args = parse_args()
csv_file = os.path.join('data',args.Dataset,"face_landmarks_{}_{}.csv".format(args.Dataset,args.Train_type))
image_folder = os.path.join('data',args.Dataset,"images")
Gen_folder = os.path.join('data',args.Dataset,args.Gen_folder)
if not os.path.exists(Gen_folder):
os.mkdir(Gen_folder)
print("Create folder : {}".format(Gen_folder))
landmark_frame = pd.read_csv(csv_file)
for i in range(landmark_frame.shape[0]):
image_name = landmark_frame.iloc[i, 0]
image_path = os.path.join(image_folder,image_name)
gen_image_path = os.path.join(Gen_folder,image_name)
# print(image_path)
# print("Process image to {}".format(gen_image_path))
img = cv2.imread(image_path)
if os.path.exists(gen_image_path):
print("{} have processed one face, add face inplace.".format(gen_image_path))
nose_img = cv2.imread(gen_image_path)
else:
nose_img = np.zeros(img.shape,dtype=np.int)
if args.Dataset == 'wflw':
landmarks = landmark_frame.iloc[i,4:].values
else :
landmarks = landmark_frame.iloc[i,5:].values
landmarks = landmarks.astype('int').reshape(-1, 2)
left_top, right_bottom = get_rect(nose_rect_index[args.Dataset],landmarks,img.shape)
nose_img[left_top[1]:right_bottom[1],left_top[0]:right_bottom[0]] = img[left_top[1]:right_bottom[1],left_top[0]:right_bottom[0]]
# cv2.rectangle(nose_img,left_top,right_bottom,(0,255,255),1)
# for l_i in range(landmarks.shape[0]) :
# cv2.circle(nose_img,(landmarks[l_i][0],landmarks[l_i][1]),1,(0,255,255),1,1)
# # cv2.putText(nose_img,str(l_i),(landmarks[l_i][0],landmarks[l_i][1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
create_folder(gen_image_path)
cv2.imwrite(gen_image_path,nose_img)
# exit()
# print(image_folder)
if __name__=="__main__":
main()
| [
"os.path.exists",
"cv2.imwrite",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"os.mkdir",
"numpy.argmin",
"sys.path.append",
"cv2.imread"
] | [((88, 109), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (103, 109), False, 'import sys\n'), ((431, 494), 'numpy.array', 'np.array', (['[landmarks[i] for i in index_array]'], {'dtype': 'np.float32'}), '([landmarks[i] for i in index_array], dtype=np.float32)\n', (439, 494), True, 'import numpy as np\n'), ((515, 549), 'numpy.argmax', 'np.argmax', (['point_landmarks'], {'axis': '(0)'}), '(point_landmarks, axis=0)\n', (524, 549), True, 'import numpy as np\n'), ((570, 604), 'numpy.argmin', 'np.argmin', (['point_landmarks'], {'axis': '(0)'}), '(point_landmarks, axis=0)\n', (579, 604), True, 'import numpy as np\n'), ((1570, 1594), 'os.path.exists', 'os.path.exists', (['str_path'], {}), '(str_path)\n', (1584, 1594), False, 'import os\n'), ((1694, 1755), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gen Nose image dataset"""'}), "(description='Gen Nose image dataset')\n", (1717, 1755), False, 'import argparse\n'), ((2345, 2389), 'os.path.join', 'os.path.join', (['"""data"""', 'args.Dataset', '"""images"""'], {}), "('data', args.Dataset, 'images')\n", (2357, 2389), False, 'import os\n'), ((2405, 2456), 'os.path.join', 'os.path.join', (['"""data"""', 'args.Dataset', 'args.Gen_folder'], {}), "('data', args.Dataset, args.Gen_folder)\n", (2417, 2456), False, 'import os\n'), ((2601, 2622), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (2612, 2622), True, 'import pandas as pd\n'), ((1383, 1422), 'os.path.join', 'os.path.join', (['temp_folder', 'paths[i + 1]'], {}), '(temp_folder, paths[i + 1])\n', (1395, 1422), False, 'import os\n'), ((2466, 2492), 'os.path.exists', 'os.path.exists', (['Gen_folder'], {}), '(Gen_folder)\n', (2480, 2492), False, 'import os\n'), ((2502, 2522), 'os.mkdir', 'os.mkdir', (['Gen_folder'], {}), '(Gen_folder)\n', (2510, 2522), False, 'import os\n'), ((2736, 2774), 'os.path.join', 'os.path.join', (['image_folder', 'image_name'], {}), '(image_folder, image_name)\n', (2748, 2774), False, 'import os\n'), ((2799, 2835), 'os.path.join', 'os.path.join', (['Gen_folder', 'image_name'], {}), '(Gen_folder, image_name)\n', (2811, 2835), False, 'import os\n'), ((2939, 2961), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2949, 2961), False, 'import cv2\n'), ((2974, 3004), 'os.path.exists', 'os.path.exists', (['gen_image_path'], {}), '(gen_image_path)\n', (2988, 3004), False, 'import os\n'), ((4080, 4117), 'cv2.imwrite', 'cv2.imwrite', (['gen_image_path', 'nose_img'], {}), '(gen_image_path, nose_img)\n', (4091, 4117), False, 'import cv2\n'), ((1435, 1462), 'os.path.exists', 'os.path.exists', (['temp_folder'], {}), '(temp_folder)\n', (1449, 1462), False, 'import os\n'), ((1541, 1562), 'os.mkdir', 'os.mkdir', (['temp_folder'], {}), '(temp_folder)\n', (1549, 1562), False, 'import os\n'), ((3119, 3145), 'cv2.imread', 'cv2.imread', (['gen_image_path'], {}), '(gen_image_path)\n', (3129, 3145), False, 'import cv2\n'), ((3183, 3216), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.int'}), '(img.shape, dtype=np.int)\n', (3191, 3216), True, 'import numpy as np\n'), ((659, 683), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15)'], {}), '(0, 15)\n', (676, 683), True, 'import numpy as np\n'), ((761, 785), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15)'], {}), '(0, 15)\n', (778, 785), True, 'import numpy as np\n'), ((879, 903), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15)'], {}), '(0, 15)\n', (896, 903), True, 'import numpy as np\n'), ((996, 1020), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1013, 1020), True, 'import numpy as np\n')] |
# encoding: utf-8
"""
@version: ??
@author: Mouse
@license: Apache Licence
@contact: <EMAIL>
@software: PyCharm
@file: homework_1.py
@time: 2018/5/2 22:03
"""
import tensorflow as tf
import numpy as np
def main():
"""
使用placeholder计算Y = aX + b
:return:
"""
# 定义三个占位节点a,b,x
a = tf.placeholder(tf.float32, [3, 4])
b = tf.placeholder(tf.float32, [4, 3])
c = tf.placeholder(tf.float32, [3, 3])
# 计算a*b
mul = tf.matmul(a, b)
y = tf.add(mul, c)
# 使用默认图
with tf.Session() as sess:
# 执行每一步,并喂值
np.random.seed(5)
ax = sess.run(mul, feed_dict={a: np.random.random((3, 4)), b: np.random.random((4, 3))})
print(ax)
y = sess.run(y, feed_dict={mul: ax, c: np.random.random((3, 3))})
print(y)
# 上下文结束自动关闭sess,资源释放,不需要Session.close()
if __name__ == '__main__':
main()
| [
"numpy.random.random",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.add",
"numpy.random.seed",
"tensorflow.matmul"
] | [((305, 339), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[3, 4]'], {}), '(tf.float32, [3, 4])\n', (319, 339), True, 'import tensorflow as tf\n'), ((348, 382), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[4, 3]'], {}), '(tf.float32, [4, 3])\n', (362, 382), True, 'import tensorflow as tf\n'), ((391, 425), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[3, 3]'], {}), '(tf.float32, [3, 3])\n', (405, 425), True, 'import tensorflow as tf\n'), ((448, 463), 'tensorflow.matmul', 'tf.matmul', (['a', 'b'], {}), '(a, b)\n', (457, 463), True, 'import tensorflow as tf\n'), ((472, 486), 'tensorflow.add', 'tf.add', (['mul', 'c'], {}), '(mul, c)\n', (478, 486), True, 'import tensorflow as tf\n'), ((509, 521), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (519, 521), True, 'import tensorflow as tf\n'), ((559, 576), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (573, 576), True, 'import numpy as np\n'), ((618, 642), 'numpy.random.random', 'np.random.random', (['(3, 4)'], {}), '((3, 4))\n', (634, 642), True, 'import numpy as np\n'), ((647, 671), 'numpy.random.random', 'np.random.random', (['(4, 3)'], {}), '((4, 3))\n', (663, 671), True, 'import numpy as np\n'), ((739, 763), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (755, 763), True, 'import numpy as np\n')] |
'''
File: PointSpeckleProc.py
Description: Processes for ultrasound point speckles
History:
Date Programmer SAR# - Description
---------- ---------- ----------------------------
Author: <EMAIL> 04FEB2020 - Created
Requirements:
numpy
scipy
Known Bug:
None
All rights reserved.
'''
_version='2.4.0'
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
from scipy.ndimage import shift
from scipy import signal
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.filters import uniform_filter
from scipy.ndimage.measurements import variance
from scipy.stats import norm
import processFunc
#Optional dependancies
def gKern(std,kernlen=7,normalize=None):
"""Returns a Gaussian kernel array."""
if isinstance(kernlen,(float,int)):
kernlen=(kernlen*np.ones(len(std))).astype(int)
gkern1d = []
for n in range(len(std)):
gkern1d.append(signal.gaussian(kernlen[n]*std, std=std[n]))
gkern = np.zeros(gkern1d)
gkern[:]=gkern1d[0].copy()
gkern=gkern.transpose(np.roll(np.arange(len(std),dtype=int),-1))
for n in range(1,len(std)):
gkern[:]*=gkern1d[n]
gkern=gkern.transpose(np.roll(np.arange(len(std),dtype=int),-1))
if normalize:
gkern*=normalize/gkern.max()
return gkern2d
def getSpecklePoint(image,smoothingSize,prefilter=None):
newImage=image.clone()
extraDim=len(image.data.shape)-len(smoothingSize)
if prefilter=='gaussian':
tempArray=gaussian_filter(image.data, [*np.zeros(extraDim).astype(int),*smoothingSize],mode='constant')
i=[]
adjust=np.mgrid[tuple([slice(-1,2)]*len(smoothingSize))].reshape((2,-1)).T
for adj in adjust:
i.append(shift(tempArray,[*np.zeros(extraDim).astype(int),*adj],order=0))
i=np.array(i)
newImage.data[np.max(i,axis=0)>tempArray]=0
else:
i=[]
sliceList=[]
for n in range(len(smoothingSize)):
sliceList.append(slice(-smoothingSize[n],smoothingSize[n]+1))
adjust=np.mgrid[tuple(sliceList)].reshape((2,-1)).T
for adj in adjust:
i.append(shift(image.data,[*np.zeros(extraDim).astype(int),*adj],order=0))
i=np.array(i)
newImage.data[np.max(i,axis=0)>image.data]=0
#remove duplicate
nnzInd=np.transpose(np.nonzero(newImage.data))
for checkInd in nnzInd:
if newImage.data[tuple(checkInd)]>0:
currentCheckIndex=checkInd.copy()
for maxN in range(10):
sliceList=list(currentCheckIndex[:extraDim])
addind=np.zeros(len(smoothingSize),dtype=int)
for nn in range(len(smoothingSize)):
sliceList.append(slice(max(0,currentCheckIndex[nn+extraDim]-int(np.ceil(smoothingSize[nn]))),min(newImage.data.shape[nn+extraDim],currentCheckIndex[nn+extraDim]+int(np.ceil(smoothingSize[nn]))+1)))
addind[nn]=max(0,currentCheckIndex[nn+extraDim]-int(np.ceil(smoothingSize[nn])))
repeatedInd=np.transpose(np.nonzero(newImage.data[tuple(sliceList)]))
if repeatedInd.shape[0]>1:
temp=np.around(np.mean(repeatedInd,axis=0)).astype(int)+addind
if np.all(temp==currentCheckIndex[extraDim:]):
break
else:
currentCheckIndex[extraDim:]=temp.copy()
else:
break
sliceList=list(currentCheckIndex[:extraDim])
emptyslice=False
for nn in range(len(smoothingSize)):
sliceList.append(slice(max(0,currentCheckIndex[nn+extraDim]-int(np.ceil(smoothingSize[nn]))),min(newImage.data.shape[nn+extraDim],currentCheckIndex[nn+extraDim]+int(np.ceil(smoothingSize[nn]))+1)))
temp_avg=newImage.data[tuple(sliceList)][newImage.data[tuple(sliceList)]>0].mean()
newImage.data[tuple(sliceList)]=0
newImage.data[tuple(currentCheckIndex)]=temp_avg
return newImage
def reduceRepeat(image,checkSize,removeSingleton=0):
reduce=image.data.shape[1]-1
newImage=image.clone()
for base in range(image.data.shape[1]):
ii=[image.data[:,base].astype(bool)]
for t in range(image.data.shape[1]):
if t==base:
continue
i=[]
for y in range(-checkSize[0],checkSize[0]+1):
for x in range(-checkSize[1],checkSize[1]+1):
i.append(shift(image.data[:,t],[0,y,x],order=0))
ii.append(np.any(np.array(i),axis=0))
ii=np.array(ii).astype(int)
temp_sum=np.maximum(np.sum(ii,axis=0),1).astype(float)
if removeSingleton>0:
newImage.data[:,base][temp_sum<=removeSingleton]=0
newImage.data[:,base]*=(1-(temp_sum-1)/reduce)/temp_sum
return newImage
def reduceNonRandom(image,sigmas,densityApprox=None,dimSigmaFactor=1.,average=False,truncate=0.5,dim='t',useCorrDet=False):
oneNzeroArray=image.data.astype(bool).astype(float)
normVal=(1./gaussian_filter(np.ones(np.ones(len(sigmas)).astype(int)), sigmas,mode='constant')).max()
imgSigmaData=normVal*gaussian_filter(oneNzeroArray, [*np.zeros(len(image.data.shape)-len(sigmas)).astype(int),*sigmas],mode='constant')
imgSigmaData=imgSigmaData.sum(axis=1)
sigmaAvg=np.ones(len(sigmas))*np.mean(sigmas)
if average:
normValAvg=(1./gaussian_filter(np.ones(np.ones(len(sigmaAvg)).astype(int)), sigmaAvg,mode='constant',truncate=truncate)).max()
imgSigmaAvgData=normValAvg*gaussian_filter(oneNzeroArray, [*np.zeros(len(image.data.shape)-len(sigmaAvg)).astype(int),*sigmaAvg],mode='constant',truncate=truncate)
imgSigmaAvgData=imgSigmaAvgData.sum(axis=1)
if densityApprox:
density=densityApprox
else:
density=float(oneNzeroArray[oneNzeroArray>0].size)/oneNzeroArray.size
dimInd=image.dim.index(dim)
dimIndSlice=[slice(None)]*dimInd
newImage=image.clone()
if dimSigmaFactor!=0:
mean=normVal*density*(image.data.shape[dimInd]-1)+1
std=(mean-1)*abs(dimSigmaFactor)
logger.info('Using mean and std of: {0:.3f} , {1:.3f}'.format(mean,std))
nProb=norm(mean,std)
normalize=1./nProb.pdf(mean)
for t in range(image.data.shape[dimInd]):
if average:
newImage.data[(*dimIndSlice,t)][newImage.data[(*dimIndSlice,t)]>0]*=normalize*nProb.pdf(imgSigmaData[newImage.data[(*dimIndSlice,t)]>0])/imgSigmaAvgData[newImage.data[(*dimIndSlice,t)]>0]
elif dimSigmaFactor<0:
newImage.data[(*dimIndSlice,t)][newImage.data[(*dimIndSlice,t)]>0]=normalize*nProb.pdf(imgSigmaData[newImage.data[(*dimIndSlice,t)]>0])
else:
newImage.data[(*dimIndSlice,t)][newImage.data[(*dimIndSlice,t)]>0]*=normalize*nProb.pdf(imgSigmaData[newImage.data[(*dimIndSlice,t)]>0])
if dimSigmaFactor<0:
return newImage.data
if useCorrDet:
logger.warning('Warning, use only when sample size is large.')
corrdet=[]
posALL=[]
maxCorrdet=0
for t in range(image.data.shape[dimInd]):
pos=np.array(np.nonzero(image.data[(*dimIndSlice,t)]>0))
posALL.append(pos.copy())
corrdet.append(np.zeros(pos.shape[1]))
for n in range(pos.shape[1]):
nearbyPosInd=np.nonzero(np.all(np.logical_and(pos[-2:]>=(pos[-2:,n]-np.ceil(sigmaAvg*2)).reshape((-1,1)),pos[-2:]<=(pos[-2:,n]+np.ceil(sigmaAvg*2)).reshape((-1,1))),axis=0))[0]
if len(nearbyPosInd)>(3**2.):
temp=np.corrcoef(pos[:,nearbyPosInd])
if np.any(np.isnan(temp)):
temp=0
else:
temp=np.linalg.det(temp)
corrdet[-1][n]=temp
elif np.any(pos[-2:,n]<np.ceil(sigmaAvg*2)) or np.any((pos[-2:,n]+np.ceil(sigmaAvg*2))>=image.data.shape[-2:]):
corrdet[-1][n]=-1
if maxCorrdet<corrdet[-1].max():
maxCorrdet=corrdet[-1].max()
if maxCorrdet<0.5:
logger.warning('Warning, determinant or correlation matrix has a maximum value of {0:.3e}'.format(maxCorrdet))
for t in range(image.data.shape[dimInd]):
corrdet[t]=corrdet[t]/maxCorrdet
corrdet[t][corrdet[t]<0]=1.
newImage.data[(*dimIndSlice,t)][tuple(posALL[t])]*=corrdet[t]
return newImage
def applyThreshold(image,threshold):
reduce=int(255/image.data.shape[1])
newImage=image.clone()
newImage.data[newImage.data<=(255-threshold*reduce)]=0
return newImage
def singleOutFilter(speckleImageArray, sigma):
threshold=speckleImageArray.max()*0.1
newpoints=np.transpose(np.nonzero(speckleImageArray>threshold))
val=speckleImageArray[np.nonzero(speckleImageArray>threshold)].copy()
logger.info('Single out '+repr(len(newpoints))+' number of points')
for n in range(len(newpoints)-1,-1,-1):
toRemove=2
for nn in range(-2,0,-1):
if newpoints[n][nn]<(2*sigma[nn]) or newpoints[n][nn]>(speckleImageArray.shape[nn]-2*sigma[nn]-1):
toRemove=0
for nn in [[1,1],[1,-2],[-2,1],[-2,-2]]:
if not(toRemove):
break
mincoord=newpoints[n][-2:]+np.array(nn)*sigma
if np.any(np.logical_and(newpoints[:,-2:]>=mincoord,newpoints[:,-2:]<=(mincoord+sigma))):
toRemove-=1
if toRemove:
newpoints=np.delete(newpoints,n,axis=0)
val=np.delete(val,n,axis=0)
elif toRemove==1:
logger.debug('at least i have 1')
logger.info(' to '+repr(len(newpoints))+' number of points')
newpoints=np.around(newpoints).astype(int)
newImageArray=np.zeros(speckleImageArray.shape)
newImageArray[tuple(newpoints.T)]=val
return newImageArray
def spreadSpeckle(image,spreadSize,overlay=False,overlayFunc=np.max,averageSigma=True,dim='t'):
newImg=image.clone()
percent99=np.percentile(newImg.data[newImg.data>0],99)
if averageSigma:
spreadSizeAvg=np.ones(len(spreadSize))*np.mean(spreadSize)
else:
spreadSizeAvg=spreadSize
normVal=(1./gaussian_filter(np.ones(np.ones(len(spreadSizeAvg)).astype(int)), spreadSizeAvg,mode='constant')).max()
if overlay:
newImg.removeDim(dim)
newImg.data=overlayFunc(image.data,axis=image.dim.index(dim))
newImg.data=normVal*gaussian_filter(newImg.data, [*np.zeros(len(newImg.data.shape)-len(spreadSizeAvg)).astype(int),*spreadSizeAvg],mode='wrap')
newImg.data*=255/percent99
newImg.data=np.minimum(255,newImg.data)
return newImg
def speckleTransform(speckleImageArray,transformFolder,fromTime,toTime=None,totalTimeSteps=None,Eulerian=True,highErrorDim=3):
fromTime=int(fromTime)
if type(toTime)!=type(None):
toTime=int(toTime)
pos=np.transpose(np.nonzero(speckleImageArray))[:,::-1]
val=speckleImageArray[np.nonzero(speckleImageArray)].copy()
if Eulerian:
if totalTimeSteps:
#Forward
currentTime=fromTime
if type(toTime)==type(None):
temp_toTime=fromTime-1
else:
temp_toTime=toTime
if temp_toTime<0:
temp_toTime=totalTimeSteps-1
posF=[pos.copy()]
incr=1
Fcount=0
while currentTime!=temp_toTime:
if currentTime+incr<totalTimeSteps:
nextTime=currentTime+incr
else:
nextTime=0
file=transformFolder+'/tstep'+str(currentTime)+'to'+str(nextTime)+'_0.txt'
if not(os.path.isfile(file)):
logger.error('ERROR '+file+' does not exist')
posF.append(processFunc.transform_img2img(posF[-1],file,savePath=transformFolder))
currentTime=nextTime
Fcount+=1
#Backward
currentTime=fromTime
if type(toTime)==type(None):
temp_toTime=fromTime+1
else:
temp_toTime=toTime
if temp_toTime>=totalTimeSteps:
temp_toTime=0
posB=[pos.copy()]
incr=-1
Bcount=0
while currentTime!=temp_toTime:
if currentTime+incr>=0:
nextTime=currentTime+incr
else:
nextTime=totalTimeSteps-1
file=transformFolder+'/tstep'+str(currentTime)+'to'+str(nextTime)+'_0.txt'
if not(os.path.isfile(file)):
logger.error('ERROR '+file+' does not exist')
posB.append(processFunc.transform_img2img(posB[-1],file,savePath=transformFolder))
currentTime=nextTime
Bcount+=1
if type(toTime)==type(None):
Fratio=1./(1.+np.arange(totalTimeSteps)/np.arange(totalTimeSteps,0,-1))
posF=np.roll(np.array(posF),fromTime,axis=0)
Fratio=np.roll(Fratio,fromTime)
posB=np.roll(np.array(posB)[::-1],fromTime+1,axis=0)
newpos=Fratio.reshape((-1,1,1))*posF+(1-Fratio.reshape((-1,1,1)))*posB
else:
newpos=np.array([(Bcount*posF[-1]+Fcount*posB[-1])/(Fcount+Bcount)])
else:
currentTime=fromTime
newpos=pos.copy()
if currentTime>toTime:
incr=-1
elif currentTime<toTime:
incr=1
while currentTime!=toTime:
file=transformFolder+'/tstep'+str(currentTime)+'to'+str(currentTime+incr)+'_0.txt'
if not(os.path.isfile(file)):
logger.error('ERROR '+file+' does not exist')
newpos=processFunc.transform_img2img(newpos,file,savePath=transformFolder)
currentTime+=incr
newpos=np.array([newpos])
else:
file=transformFolder+'/tstep'+str(fromTime)+'to'+str(toTime)+'_0.txt'
if not(os.path.isfile(file)):
logger.error('ERROR '+file+' does not exist')
newpos=np.array([processFunc.transform_img2img(pos,file,savePath=transformFolder)])
if type(toTime)==type(None) and Eulerian and totalTimeSteps:
runN=totalTimeSteps
else:
runN=1
newArray=[]
newpos=np.around(newpos).astype(int)
if highErrorDim and runN>1:
error=Fratio*(1-Fratio)*totalTimeSteps
if highErrorDim==True or highErrorDim==0:
accScaling=1./np.maximum(1.,error)
elif isinstance(highErrorDim,int):
if highErrorDim<int((len(error)+1)/2):
accScaling=1./np.maximum(1.,error/error[np.argmin(error)-highErrorDim])
else:
accScaling=np.ones(runN)
else:
accScaling=highErrorDim(error)
else:
accScaling=np.ones(runN)
for n in range(runN):
newArray.append(speckleImageArray.copy())
get=np.all(np.logical_and(newpos[n]>=0,newpos[n]<np.array(speckleImageArray.shape)[::-1]),axis=-1)
temppos=newpos[n][get]
tempval=val[get]
newArray[-1][:]=0
newArray[-1][tuple(temppos[:,::-1].T)]=tempval.copy()*accScaling[n]
if runN==1:
newArray=newArray[0]
return np.array(newArray)
| [
"logging.getLogger",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.delete",
"numpy.max",
"numpy.argmin",
"numpy.maximum",
"numpy.ceil",
"numpy.ones",
"numpy.corrcoef",
"processFunc.transform_img2img",
"os.path.isfile",
"numpy.isnan",
"numpy.around",
"numpy.nonzero",
"numpy.roll... | [((368, 395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'import logging\n'), ((1010, 1027), 'numpy.zeros', 'np.zeros', (['gkern1d'], {}), '(gkern1d)\n', (1018, 1027), True, 'import numpy as np\n'), ((9879, 9912), 'numpy.zeros', 'np.zeros', (['speckleImageArray.shape'], {}), '(speckleImageArray.shape)\n', (9887, 9912), True, 'import numpy as np\n'), ((10116, 10163), 'numpy.percentile', 'np.percentile', (['newImg.data[newImg.data > 0]', '(99)'], {}), '(newImg.data[newImg.data > 0], 99)\n', (10129, 10163), True, 'import numpy as np\n'), ((10723, 10751), 'numpy.minimum', 'np.minimum', (['(255)', 'newImg.data'], {}), '(255, newImg.data)\n', (10733, 10751), True, 'import numpy as np\n'), ((15411, 15429), 'numpy.array', 'np.array', (['newArray'], {}), '(newArray)\n', (15419, 15429), True, 'import numpy as np\n'), ((1836, 1847), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (1844, 1847), True, 'import numpy as np\n'), ((2246, 2257), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (2254, 2257), True, 'import numpy as np\n'), ((2358, 2383), 'numpy.nonzero', 'np.nonzero', (['newImage.data'], {}), '(newImage.data)\n', (2368, 2383), True, 'import numpy as np\n'), ((5400, 5415), 'numpy.mean', 'np.mean', (['sigmas'], {}), '(sigmas)\n', (5407, 5415), True, 'import numpy as np\n'), ((6264, 6279), 'scipy.stats.norm', 'norm', (['mean', 'std'], {}), '(mean, std)\n', (6268, 6279), False, 'from scipy.stats import norm\n'), ((8850, 8891), 'numpy.nonzero', 'np.nonzero', (['(speckleImageArray > threshold)'], {}), '(speckleImageArray > threshold)\n', (8860, 8891), True, 'import numpy as np\n'), ((15000, 15013), 'numpy.ones', 'np.ones', (['runN'], {}), '(runN)\n', (15007, 15013), True, 'import numpy as np\n'), ((953, 998), 'scipy.signal.gaussian', 'signal.gaussian', (['(kernlen[n] * std)'], {'std': 'std[n]'}), '(kernlen[n] * std, std=std[n])\n', (968, 998), False, 'from scipy import signal\n'), ((9604, 9635), 'numpy.delete', 'np.delete', (['newpoints', 'n'], {'axis': '(0)'}), '(newpoints, n, axis=0)\n', (9613, 9635), True, 'import numpy as np\n'), ((9650, 9675), 'numpy.delete', 'np.delete', (['val', 'n'], {'axis': '(0)'}), '(val, n, axis=0)\n', (9659, 9675), True, 'import numpy as np\n'), ((9828, 9848), 'numpy.around', 'np.around', (['newpoints'], {}), '(newpoints)\n', (9837, 9848), True, 'import numpy as np\n'), ((10229, 10248), 'numpy.mean', 'np.mean', (['spreadSize'], {}), '(spreadSize)\n', (10236, 10248), True, 'import numpy as np\n'), ((11005, 11034), 'numpy.nonzero', 'np.nonzero', (['speckleImageArray'], {}), '(speckleImageArray)\n', (11015, 11034), True, 'import numpy as np\n'), ((14027, 14045), 'numpy.array', 'np.array', (['[newpos]'], {}), '([newpos])\n', (14035, 14045), True, 'import numpy as np\n'), ((14149, 14169), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (14163, 14169), False, 'import os\n'), ((14467, 14484), 'numpy.around', 'np.around', (['newpos'], {}), '(newpos)\n', (14476, 14484), True, 'import numpy as np\n'), ((1870, 1887), 'numpy.max', 'np.max', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (1876, 1887), True, 'import numpy as np\n'), ((2280, 2297), 'numpy.max', 'np.max', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (2286, 2297), True, 'import numpy as np\n'), ((4624, 4636), 'numpy.array', 'np.array', (['ii'], {}), '(ii)\n', (4632, 4636), True, 'import numpy as np\n'), ((7246, 7289), 'numpy.nonzero', 'np.nonzero', (['(image.data[*dimIndSlice, t] > 0)'], {}), '(image.data[*dimIndSlice, t] > 0)\n', (7256, 7289), True, 'import numpy as np\n'), ((7355, 7377), 'numpy.zeros', 'np.zeros', (['pos.shape[1]'], {}), '(pos.shape[1])\n', (7363, 7377), True, 'import numpy as np\n'), ((8917, 8958), 'numpy.nonzero', 'np.nonzero', (['(speckleImageArray > threshold)'], {}), '(speckleImageArray > threshold)\n', (8927, 8958), True, 'import numpy as np\n'), ((9453, 9541), 'numpy.logical_and', 'np.logical_and', (['(newpoints[:, -2:] >= mincoord)', '(newpoints[:, -2:] <= mincoord + sigma)'], {}), '(newpoints[:, -2:] >= mincoord, newpoints[:, -2:] <= mincoord +\n sigma)\n', (9467, 9541), True, 'import numpy as np\n'), ((11070, 11099), 'numpy.nonzero', 'np.nonzero', (['speckleImageArray'], {}), '(speckleImageArray)\n', (11080, 11099), True, 'import numpy as np\n'), ((13153, 13178), 'numpy.roll', 'np.roll', (['Fratio', 'fromTime'], {}), '(Fratio, fromTime)\n', (13160, 13178), True, 'import numpy as np\n'), ((13375, 13446), 'numpy.array', 'np.array', (['[(Bcount * posF[-1] + Fcount * posB[-1]) / (Fcount + Bcount)]'], {}), '([(Bcount * posF[-1] + Fcount * posB[-1]) / (Fcount + Bcount)])\n', (13383, 13446), True, 'import numpy as np\n'), ((13906, 13975), 'processFunc.transform_img2img', 'processFunc.transform_img2img', (['newpos', 'file'], {'savePath': 'transformFolder'}), '(newpos, file, savePath=transformFolder)\n', (13935, 13975), False, 'import processFunc\n'), ((14255, 14321), 'processFunc.transform_img2img', 'processFunc.transform_img2img', (['pos', 'file'], {'savePath': 'transformFolder'}), '(pos, file, savePath=transformFolder)\n', (14284, 14321), False, 'import processFunc\n'), ((14652, 14674), 'numpy.maximum', 'np.maximum', (['(1.0)', 'error'], {}), '(1.0, error)\n', (14662, 14674), True, 'import numpy as np\n'), ((3269, 3313), 'numpy.all', 'np.all', (['(temp == currentCheckIndex[extraDim:])'], {}), '(temp == currentCheckIndex[extraDim:])\n', (3275, 3313), True, 'import numpy as np\n'), ((4592, 4603), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (4600, 4603), True, 'import numpy as np\n'), ((4677, 4695), 'numpy.sum', 'np.sum', (['ii'], {'axis': '(0)'}), '(ii, axis=0)\n', (4683, 4695), True, 'import numpy as np\n'), ((7685, 7718), 'numpy.corrcoef', 'np.corrcoef', (['pos[:, nearbyPosInd]'], {}), '(pos[:, nearbyPosInd])\n', (7696, 7718), True, 'import numpy as np\n'), ((9412, 9424), 'numpy.array', 'np.array', (['nn'], {}), '(nn)\n', (9420, 9424), True, 'import numpy as np\n'), ((11793, 11813), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (11807, 11813), False, 'import os\n'), ((11910, 11981), 'processFunc.transform_img2img', 'processFunc.transform_img2img', (['posF[-1]', 'file'], {'savePath': 'transformFolder'}), '(posF[-1], file, savePath=transformFolder)\n', (11939, 11981), False, 'import processFunc\n'), ((12689, 12709), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (12703, 12709), False, 'import os\n'), ((12806, 12877), 'processFunc.transform_img2img', 'processFunc.transform_img2img', (['posB[-1]', 'file'], {'savePath': 'transformFolder'}), '(posB[-1], file, savePath=transformFolder)\n', (12835, 12877), False, 'import processFunc\n'), ((13098, 13112), 'numpy.array', 'np.array', (['posF'], {}), '(posF)\n', (13106, 13112), True, 'import numpy as np\n'), ((13794, 13814), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (13808, 13814), False, 'import os\n'), ((14900, 14913), 'numpy.ones', 'np.ones', (['runN'], {}), '(runN)\n', (14907, 14913), True, 'import numpy as np\n'), ((4523, 4566), 'scipy.ndimage.shift', 'shift', (['image.data[:, t]', '[0, y, x]'], {'order': '(0)'}), '(image.data[:, t], [0, y, x], order=0)\n', (4528, 4566), False, 'from scipy.ndimage import shift\n'), ((7748, 7762), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (7756, 7762), True, 'import numpy as np\n'), ((7851, 7870), 'numpy.linalg.det', 'np.linalg.det', (['temp'], {}), '(temp)\n', (7864, 7870), True, 'import numpy as np\n'), ((13207, 13221), 'numpy.array', 'np.array', (['posB'], {}), '(posB)\n', (13215, 13221), True, 'import numpy as np\n'), ((15147, 15180), 'numpy.array', 'np.array', (['speckleImageArray.shape'], {}), '(speckleImageArray.shape)\n', (15155, 15180), True, 'import numpy as np\n'), ((1553, 1571), 'numpy.zeros', 'np.zeros', (['extraDim'], {}), '(extraDim)\n', (1561, 1571), True, 'import numpy as np\n'), ((13011, 13036), 'numpy.arange', 'np.arange', (['totalTimeSteps'], {}), '(totalTimeSteps)\n', (13020, 13036), True, 'import numpy as np\n'), ((13037, 13069), 'numpy.arange', 'np.arange', (['totalTimeSteps', '(0)', '(-1)'], {}), '(totalTimeSteps, 0, -1)\n', (13046, 13069), True, 'import numpy as np\n'), ((3005, 3031), 'numpy.ceil', 'np.ceil', (['smoothingSize[nn]'], {}), '(smoothingSize[nn])\n', (3012, 3031), True, 'import numpy as np\n'), ((7950, 7971), 'numpy.ceil', 'np.ceil', (['(sigmaAvg * 2)'], {}), '(sigmaAvg * 2)\n', (7957, 7971), True, 'import numpy as np\n'), ((1779, 1797), 'numpy.zeros', 'np.zeros', (['extraDim'], {}), '(extraDim)\n', (1787, 1797), True, 'import numpy as np\n'), ((2189, 2207), 'numpy.zeros', 'np.zeros', (['extraDim'], {}), '(extraDim)\n', (2197, 2207), True, 'import numpy as np\n'), ((3198, 3226), 'numpy.mean', 'np.mean', (['repeatedInd'], {'axis': '(0)'}), '(repeatedInd, axis=0)\n', (3205, 3226), True, 'import numpy as np\n'), ((3697, 3723), 'numpy.ceil', 'np.ceil', (['smoothingSize[nn]'], {}), '(smoothingSize[nn])\n', (3704, 3723), True, 'import numpy as np\n'), ((7993, 8014), 'numpy.ceil', 'np.ceil', (['(sigmaAvg * 2)'], {}), '(sigmaAvg * 2)\n', (8000, 8014), True, 'import numpy as np\n'), ((2799, 2825), 'numpy.ceil', 'np.ceil', (['smoothingSize[nn]'], {}), '(smoothingSize[nn])\n', (2806, 2825), True, 'import numpy as np\n'), ((3798, 3824), 'numpy.ceil', 'np.ceil', (['smoothingSize[nn]'], {}), '(smoothingSize[nn])\n', (3805, 3824), True, 'import numpy as np\n'), ((14823, 14839), 'numpy.argmin', 'np.argmin', (['error'], {}), '(error)\n', (14832, 14839), True, 'import numpy as np\n'), ((2900, 2926), 'numpy.ceil', 'np.ceil', (['smoothingSize[nn]'], {}), '(smoothingSize[nn])\n', (2907, 2926), True, 'import numpy as np\n'), ((7505, 7526), 'numpy.ceil', 'np.ceil', (['(sigmaAvg * 2)'], {}), '(sigmaAvg * 2)\n', (7512, 7526), True, 'import numpy as np\n'), ((7564, 7585), 'numpy.ceil', 'np.ceil', (['(sigmaAvg * 2)'], {}), '(sigmaAvg * 2)\n', (7571, 7585), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from matplotlib import patches
import shutil
import torch
import numpy as np
def deboxing(bbox):
corner = [bbox[0], bbox[1]]
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
return corner, height, width
def show_each_image(sample, boxes=None, pred_boxes=None):
"""
plot the images with or without the bounding boxes
"""
plt.figure(figsize=(16, 16))
plt.imshow(sample[..., 0], plt.cm.gray)
ax = plt.gca()
if boxes is not None:
for bbox in boxes:
corner, height, width = deboxing(bbox)
rect = patches.Rectangle(
corner, width, height, linewidth=1, edgecolor=[0, 1, 0], facecolor='none'
)
ax.add_patch(rect)
if pred_boxes is not None:
for bbox in pred_boxes:
corner, height, width = deboxing(bbox)
rect = patches.Rectangle(
corner, width, height, linewidth=1, edgecolor=[1, 0, 0], facecolor='none'
)
ax.add_patch(rect)
plt.show()
def save_checkpoint(state, is_best, checkpoint_path, best_model_path):
"""
state: checkpoint we want to save
is_best: is this the best checkpoint; min validation loss
checkpoint_path: path to save checkpoint
best_model_path: path to save best model
"""
# save checkpoint data to the path given, checkpoint_path
torch.save(state, checkpoint_path)
# if it is a best model, min validation loss
if is_best:
# copy that checkpoint file to best path given, best_model_path
shutil.copyfile(checkpoint_path, best_model_path)
def collate_fn(batch):
return tuple(zip(*batch))
def g_to_rgb(image):
b = np.repeat(image[:, :, np.newaxis], 3, axis=2)
rer_b = np.transpose(b, axes=[2, 0, 1])
return rer_b
| [
"matplotlib.pyplot.imshow",
"matplotlib.patches.Rectangle",
"numpy.repeat",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figure",
"shutil.copyfile",
"torch.save",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((392, 420), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (402, 420), True, 'import matplotlib.pyplot as plt\n'), ((425, 464), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample[..., 0]', 'plt.cm.gray'], {}), '(sample[..., 0], plt.cm.gray)\n', (435, 464), True, 'import matplotlib.pyplot as plt\n'), ((474, 483), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (481, 483), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1061, 1063), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1443), 'torch.save', 'torch.save', (['state', 'checkpoint_path'], {}), '(state, checkpoint_path)\n', (1419, 1443), False, 'import torch\n'), ((1726, 1771), 'numpy.repeat', 'np.repeat', (['image[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(image[:, :, np.newaxis], 3, axis=2)\n', (1735, 1771), True, 'import numpy as np\n'), ((1784, 1815), 'numpy.transpose', 'np.transpose', (['b'], {'axes': '[2, 0, 1]'}), '(b, axes=[2, 0, 1])\n', (1796, 1815), True, 'import numpy as np\n'), ((1590, 1639), 'shutil.copyfile', 'shutil.copyfile', (['checkpoint_path', 'best_model_path'], {}), '(checkpoint_path, best_model_path)\n', (1605, 1639), False, 'import shutil\n'), ((607, 703), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['corner', 'width', 'height'], {'linewidth': '(1)', 'edgecolor': '[0, 1, 0]', 'facecolor': '"""none"""'}), "(corner, width, height, linewidth=1, edgecolor=[0, 1, 0],\n facecolor='none')\n", (624, 703), False, 'from matplotlib import patches\n'), ((894, 990), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['corner', 'width', 'height'], {'linewidth': '(1)', 'edgecolor': '[1, 0, 0]', 'facecolor': '"""none"""'}), "(corner, width, height, linewidth=1, edgecolor=[1, 0, 0],\n facecolor='none')\n", (911, 990), False, 'from matplotlib import patches\n')] |
import sqlalchemy
import logging
import os
import sys
import numpy as np
import pandas as pd
import pandas.io.sql
from itertools import chain, product
from functools import reduce
from datetime import datetime, timedelta, date
try:
from repoze.lru import lru_cache
except ImportError:
from functools import lru_cache
# useful for finding number of days in an interval: (date1 - date2) /day
day = np.timedelta64(1, 'D')
def create_engine():
return sqlalchemy.create_engine('postgresql://{user}:{pwd}@{host}:5432/{db}'.format(
host=os.environ['PGHOST'], db=os.environ['PGDATABASE'], user=os.environ['PGUSER'],
pwd=os.environ['PGPASSWORD']))
def create_db():
engine = create_engine()
return PgSQLDatabase(engine)
def execute_sql(sql, engine):
conn = engine.connect()
trans = conn.begin()
conn.execute(sql)
trans.commit()
def mtime(path):
return datetime.fromtimestamp(os.stat(path).st_mtime)
def parse_dates(df, inplace=True, *args, **kwargs):
"""
Parse all datetime.date and datetime.datetime columns
"""
if not inplace:
df = df.copy()
for c in df.columns:
i = df[c].first_valid_index()
if i is not None and type(df[c].ix[i]) in (date, datetime):
df[c] = pd.to_datetime(df[c], *args, **kwargs)
if not inplace:
return df
def touch(path):
open(path, 'a').close()
os.utime(path, None)
def get_subdirs(directory):
"""
Returns: a list of subdirectories of the given directory
"""
return [os.path.join(directory, name)
for name in os.listdir(directory)
if os.path.isdir(os.path.join(directory, name))]
def intersect(sets):
return reduce(lambda a, b: a & b, sets)
def union(sets):
return reduce(lambda a, b: a | b, sets, set())
def to_float(*args):
"""
cast numpy arrays to float32
if there's more than one, return an array
"""
floats = [np.array(a, dtype=np.float32) for a in args]
return floats[0] if len(floats) == 1 else floats
def timestamp(year, month, day):
"""
Convenient constructor for pandas Timestamp
"""
return pd.Timestamp('%04d-%02d-%02d' % (year, month, day))
epoch = np.datetime64(0, 'ns')
def date_to_days(date):
"""
Number of days since epoch
"""
return (date - epoch)/day
def date_ceil(month, day):
def ceil(t):
c = timestamp(t.year, month, day)
return c if c >= t else timestamp(t.year+1, month, day)
return ceil
def date_floor(month, day):
def floor(t):
f = timestamp(t.year, month, day)
return f if f <= t else timestamp(t.year-1, month, day)
return floor
def eqattr(object1, object2, attr):
return hasattr(object1, attr) and\
hasattr(object2, attr) and\
(getattr(object1, attr) == getattr(object2, attr))
def get_attr(name):
"""
get a class or function by name
"""
i = name.rfind('.')
cls = str(name[i+1:])
module = str(name[:i])
mod = __import__(module, fromlist=[cls])
return getattr(mod, cls)
def init_object(name, **kwargs):
return get_attr(name)(**kwargs)
def randtimedelta(low, high, size):
d = np.empty(shape=size, dtype=timedelta)
r = np.random.randint(low, high, size=size)
for i in range(size):
d[i] = timedelta(r[i])
return d
def randdates(start, end, size):
d = np.empty(shape=size, dtype=datetime)
r = randtimedelta(0, (end-start).days, size)
for i in range(size):
d[i] = start + r[i]
return d
def mode(series):
"""
pandas mode is "empty if nothing has 2+ occurrences."
this method always returns something:
nan if the series is empty/nan), breaking ties arbitrarily
"""
if series.notnull().sum() == 0:
return np.nan
else:
return series.value_counts().idxmax()
def get_collinear(df, tol=.1, verbose=False):
q, r = np.linalg.qr(df)
diag = r.diagonal()
if verbose:
for i in range(len(diag)):
if np.abs(diag[i]) < tol:
print(r[:, i]) # TODO print equation with column names!
return [df.columns[i] for i in range(len(diag)) if np.abs(diag[i]) < tol]
def drop_collinear(df, tol=.1, verbose=True):
columns = get_collinear(df, tol=tol)
if (len(columns) > 0) and verbose:
logging.info('Dropping collinear columns: ' + str(columns))
df.drop(columns, axis=1, inplace=True)
return df
def cross_join(left, right, lsuffix='_left', rsuffix='_right'):
left.index = np.zeros(len(left))
right.index = np.zeros(len(right))
return left.join(right, lsuffix=lsuffix, rsuffix=rsuffix)
def dict_merge(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def dict_subset(d, keys):
return {k: d[k] for k in keys if k in d}
def drop_constant_column_levels(df):
"""
drop the levels of a multi-level column dataframe which are constant
operates in place
"""
columns = df.columns
constant_levels = [i for i, level in enumerate(columns.levels) if len(level) <= 1]
constant_levels.reverse()
for i in constant_levels:
columns = columns.droplevel(i)
df.columns = columns
def list_expand(d, prefix=None):
"""
Recursively expand dictionaries into lists
e.g. list_expand({1:{2:[3,4]}, 5:[6]}) == [(1,2,3), (1,2,4), (5,6)]
"""
if prefix is None:
prefix = tuple()
for k in d:
if isinstance(d, dict):
for i in list_expand(d[k], prefix=tuple(chain(prefix, (k,)))):
yield i
else:
yield tuple(chain(prefix, make_list(k)))
def dict_expand(d, prefix=None):
"""
Recursively expand subdictionaries returning dictionary
dict_expand({1:{2:3}, 4:5}) = {(1,2):3, 4:5}
"""
result = {}
for k, v in d.items():
if isinstance(v, dict):
result.update(dict_expand(v, prefix=k))
else:
result[k] = v
if prefix is not None:
result = {make_tuple(prefix) + make_tuple(k): v
for k, v in result.items()}
return result
def nunique(iterable):
try:
return len(set(iterable))
except TypeError:
# use equals to count unhashable objects
unique = []
for i in iterable:
if i not in unique:
unique.append(i)
return len(unique)
def dict_diff(dicts):
"""
Subset dictionaries to keys which map to multiple values
"""
diff_keys = set()
for k in union(set(d.keys()) for d in dicts):
values = []
for d in dicts:
if k not in d:
diff_keys.add(k)
break
else:
values.append(d[k])
if nunique(values) > 1:
diff_keys.add(k)
break
return [dict_subset(d, diff_keys) for d in dicts]
def make_list(a):
return [a] if not type(a) in (list, tuple) else list(a)
def make_tuple(a):
return (a,) if not type(a) in (list, tuple) else tuple(a)
def get_collection_values(a):
if not hasattr(a, '__iter__'):
raise ValueError("Must pass iterable")
return a.values() if isinstance(a, dict) else a
def dict_product(*d, **kwargs):
"""
cartesian product of dict whose values are lists
Args:
d: dictionary to take product of. multiple dictionaries will first
be merged by dict_merge
kwargs: additional kwargs for convenience
Returns:
a list of dictionaries with the same keys as d and kwargs
"""
d = dict(dict_merge(*d), **kwargs)
holdout = {k: d[k] for k in d if not isinstance(d[k], list)}
d = {k: d[k] for k in d if k not in holdout}
items = d.items()
if len(items) == 0:
dicts = [{}]
else:
keys, values = zip(*items)
dicts = [dict_filter_none(dict(zip(keys, v))) for v in product(*values)]
for d in dicts:
d.update(holdout)
return dicts
def dict_filter_none(d):
"""
filter none values from dict
"""
return {k: v for k, v in d.items() if v is not None}
def list_filter_none(l):
"""
filter none values from a list
"""
return [v for v in l if v is not None]
def dict_update_union(d1, d2):
"""
update a set-valued dictionary
when key exists, union sets
"""
for k in d2:
if k in d1:
d1[k].update(d2[k])
else:
d1[k] = d2[k]
def set_dtypes(df, dtypes):
for column in df.columns:
dtype = None
if isinstance(dtypes, dict):
if column in dtypes:
dtype = dtypes[column]
else:
dtype = dtypes
if dtype is not None and df[column].dtype != dtype:
df[column] = df[column].astype(dtype)
@lru_cache(maxsize=500)
def read_file(filename):
with open(filename) as f:
return f.read()
def conditional_join(left, right, left_on, right_on, condition,
lsuffix='_left', rsuffix='_right'):
left_index = left[left_on].reset_index()
right_index = right[right_on].reset_index()
join_table = cross_join(left_index, right_index, lsuffix=lsuffix, rsuffix=rsuffix)
join_table = join_table[condition(join_table)]
lindex = left.index.name if left.index.name is not None else 'index'
rindex = left.index.name if right.index.name is not None else 'index'
if lindex == rindex:
lindex = lindex + lsuffix
rindex = rindex + rsuffix
df = left.merge(join_table[[lindex, rindex]], left_index=True, right_on=lindex)
df = df.merge(right, left_on=rindex, right_index=True)
df.drop(labels=[lindex, rindex], axis=1, inplace=True)
df.reset_index(drop=True, inplace=True)
return df
class PgSQLDatabase(pandas.io.sql.SQLDatabase):
# FIXME Schema is pulled from Meta object, shouldn't actually be part of signature!
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None,
dtype=None, pk=None, prefixes=None, raise_on_error=True):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type.
pk: name of column(s) to set as primary keys
"""
table = pandas.io.sql.SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
existed = table.exists()
table.create()
replaced = existed and if_exists == 'replace'
table_name = name
if schema is not None:
table_name = schema + '.' + table_name
if pk is not None and ((not existed) or replaced):
if isinstance(pk, str):
pks = pk
else:
pks = ", ".join(pk)
sql = "ALTER TABLE {table_name} ADD PRIMARY KEY ({pks})".format(
table_name=table_name, pks=pks)
self.execute(sql)
from subprocess import Popen, PIPE, STDOUT
columns = frame.index.names + list(frame.columns) if index else frame.columns
columns = str.join(",", map(lambda c: '"' + c + '"', columns))
sql = "COPY {table_name} ({columns}) FROM STDIN WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name, columns=columns)
p = Popen(['psql', '-c', sql], stdout=PIPE, stdin=PIPE, stderr=STDOUT,
universal_newlines=True)
frame.to_csv(p.stdin, index=index)
psql_out = p.communicate()[0]
logging.info(psql_out),
r = p.wait()
if raise_on_error and (r > 0):
sys.exit(r)
return r
def read_table(self, name, schema=None):
table_name = name
if schema is not None:
table_name = schema + '.' + table_name
return self.read_query('select * from %s' % table_name)
def read_sql(self, query, raise_on_error=True, **kwargs):
from subprocess import Popen, PIPE, STDOUT
sql = "COPY (%s) TO STDOUT WITH (FORMAT CSV, HEADER TRUE)" % query
p = Popen(['psql', '-c', sql], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
df = pd.read_csv(p.stdout, **kwargs)
psql_out = p.communicate()
logging.info(psql_out[0].decode(),)
r = p.wait()
if raise_on_error and (r > 0):
sys.exit(r)
return df
def indent(s, n_spaces=2, initial=True):
"""
Indent all new lines
Args:
n_spaces: number of spaces to use for indentation
initial: whether or not to start with an indent
"""
i = ' '*n_spaces
t = s.replace('\n', '\n%s' % i)
if initial:
t = i + t
return t
def is_instance_collection(c, cls):
"""
Args:
c: any object
cls: a class or a list/tuple of classes
Returns: True if c is a non-empty collection of objects, each of which
is an instance of one of the specified classes.
"""
if not (hasattr(c, '__iter__') and len(c) > 0):
# make sure it's iterable and not empty
return False
cls = make_list(cls)
for i in get_collection_values(c):
instance = False
for cl in cls:
if isinstance(i, cl):
instance = True
break
if not instance:
return False
return True
| [
"itertools.chain",
"pandas.read_csv",
"numpy.array",
"sys.exit",
"datetime.timedelta",
"logging.info",
"pandas.to_datetime",
"os.listdir",
"numpy.linalg.qr",
"subprocess.Popen",
"itertools.product",
"numpy.empty",
"numpy.datetime64",
"numpy.abs",
"functools.reduce",
"numpy.timedelta64"... | [((408, 430), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (422, 430), True, 'import numpy as np\n'), ((2233, 2255), 'numpy.datetime64', 'np.datetime64', (['(0)', '"""ns"""'], {}), "(0, 'ns')\n", (2246, 2255), True, 'import numpy as np\n'), ((9030, 9052), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(500)'}), '(maxsize=500)\n', (9039, 9052), False, 'from functools import lru_cache\n'), ((1417, 1437), 'os.utime', 'os.utime', (['path', 'None'], {}), '(path, None)\n', (1425, 1437), False, 'import os\n'), ((1728, 1760), 'functools.reduce', 'reduce', (['(lambda a, b: a & b)', 'sets'], {}), '(lambda a, b: a & b, sets)\n', (1734, 1760), False, 'from functools import reduce\n'), ((2171, 2222), 'pandas.Timestamp', 'pd.Timestamp', (["('%04d-%02d-%02d' % (year, month, day))"], {}), "('%04d-%02d-%02d' % (year, month, day))\n", (2183, 2222), True, 'import pandas as pd\n'), ((3215, 3252), 'numpy.empty', 'np.empty', ([], {'shape': 'size', 'dtype': 'timedelta'}), '(shape=size, dtype=timedelta)\n', (3223, 3252), True, 'import numpy as np\n'), ((3261, 3300), 'numpy.random.randint', 'np.random.randint', (['low', 'high'], {'size': 'size'}), '(low, high, size=size)\n', (3278, 3300), True, 'import numpy as np\n'), ((3414, 3450), 'numpy.empty', 'np.empty', ([], {'shape': 'size', 'dtype': 'datetime'}), '(shape=size, dtype=datetime)\n', (3422, 3450), True, 'import numpy as np\n'), ((3943, 3959), 'numpy.linalg.qr', 'np.linalg.qr', (['df'], {}), '(df)\n', (3955, 3959), True, 'import numpy as np\n'), ((1557, 1586), 'os.path.join', 'os.path.join', (['directory', 'name'], {}), '(directory, name)\n', (1569, 1586), False, 'import os\n'), ((1963, 1992), 'numpy.array', 'np.array', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (1971, 1992), True, 'import numpy as np\n'), ((3342, 3357), 'datetime.timedelta', 'timedelta', (['r[i]'], {}), '(r[i])\n', (3351, 3357), False, 'from datetime import datetime, timedelta, date\n'), ((12945, 13040), 'subprocess.Popen', 'Popen', (["['psql', '-c', sql]"], {'stdout': 'PIPE', 'stdin': 'PIPE', 'stderr': 'STDOUT', 'universal_newlines': '(True)'}), "(['psql', '-c', sql], stdout=PIPE, stdin=PIPE, stderr=STDOUT,\n universal_newlines=True)\n", (12950, 13040), False, 'from subprocess import Popen, PIPE, STDOUT\n'), ((13693, 13759), 'subprocess.Popen', 'Popen', (["['psql', '-c', sql]"], {'stdout': 'PIPE', 'stdin': 'PIPE', 'stderr': 'STDOUT'}), "(['psql', '-c', sql], stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n", (13698, 13759), False, 'from subprocess import Popen, PIPE, STDOUT\n'), ((13773, 13804), 'pandas.read_csv', 'pd.read_csv', (['p.stdout'], {}), '(p.stdout, **kwargs)\n', (13784, 13804), True, 'import pandas as pd\n'), ((941, 954), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (948, 954), False, 'import os\n'), ((1288, 1326), 'pandas.to_datetime', 'pd.to_datetime', (['df[c]', '*args'], {}), '(df[c], *args, **kwargs)\n', (1302, 1326), True, 'import pandas as pd\n'), ((1611, 1632), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1621, 1632), False, 'import os\n'), ((13145, 13167), 'logging.info', 'logging.info', (['psql_out'], {}), '(psql_out)\n', (13157, 13167), False, 'import logging\n'), ((13242, 13253), 'sys.exit', 'sys.exit', (['r'], {}), '(r)\n', (13250, 13253), False, 'import sys\n'), ((13958, 13969), 'sys.exit', 'sys.exit', (['r'], {}), '(r)\n', (13966, 13969), False, 'import sys\n'), ((1662, 1691), 'os.path.join', 'os.path.join', (['directory', 'name'], {}), '(directory, name)\n', (1674, 1691), False, 'import os\n'), ((4050, 4065), 'numpy.abs', 'np.abs', (['diag[i]'], {}), '(diag[i])\n', (4056, 4065), True, 'import numpy as np\n'), ((4201, 4216), 'numpy.abs', 'np.abs', (['diag[i]'], {}), '(diag[i])\n', (4207, 4216), True, 'import numpy as np\n'), ((8123, 8139), 'itertools.product', 'product', (['*values'], {}), '(*values)\n', (8130, 8139), False, 'from itertools import chain, product\n'), ((5733, 5752), 'itertools.chain', 'chain', (['prefix', '(k,)'], {}), '(prefix, (k,))\n', (5738, 5752), False, 'from itertools import chain, product\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pygame
import numpy as np
class bgColor(object):
def __init__(self, color):
self.background = color
class tank(object):
def __init__(self, x, y, size, screen):
self.xinit, self.yinit, self.screen = x, y, screen
self.tankRADIUS = size
self.tankGUN = 2*size
self.tankTRACK = size
self.tankCOLOR = pygame.Color('black')
self.xfinit, self.yfinit = x+size, y+size
def showTank(self, showCOLOR = False, angle=False):
if showCOLOR:
pass
else:
showCOLOR = self.tankCOLOR
pygame.draw.circle(self.screen, showCOLOR,
(self.xinit, self.yinit), self.tankRADIUS)
pygame.draw.line(self.screen, showCOLOR,
[self.xinit, self.yinit],
[self.xinit+self.tankGUN,
self.yinit], 5)
pygame.draw.line(self.screen, showCOLOR,
[self.xinit-self.tankRADIUS-1, self.yinit-self.tankRADIUS-1],
[self.xinit+self.tankRADIUS-1, self.yinit-self.tankRADIUS-1],
10)
pygame.draw.line(self.screen, showCOLOR,
[self.xinit+self.tankRADIUS-1, self.yinit+self.tankRADIUS-1],
[self.xinit-self.tankRADIUS-1, self.yinit+self.tankRADIUS-1],
10)
if angle:
pass
# self.yinit = int(self.yinit+angle)
# pygame.draw.line(self.screen, showCOLOR,
# [self.xinit, self.yinit],
#
# [self.xinit+self.tankGUN,
# self.yinit], 5)
def updateTank(self, bgCOLOR, pressed = False, mouse = False):
if pressed:
self.showTank(bgCOLOR)
if pressed == 100:
self.xinit = self.xinit+5
if pressed == 97:
self.xinit = self.xinit-5
self.showTank(self.tankCOLOR, self.xinit)
if mouse:
self.showTank(bgCOLOR)
angle = np.tanh((self.yinit-mouse)/(self.xinit+self.tankRADIUS))
angle = angle*180/np.pi
self.showTank(self.tankCOLOR, angle)
else:
self.showTank(bgCOLOR)
self.showTank(self.tankCOLOR) | [
"pygame.Color",
"pygame.draw.circle",
"numpy.tanh",
"pygame.draw.line"
] | [((415, 436), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (427, 436), False, 'import pygame\n'), ((645, 735), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', 'showCOLOR', '(self.xinit, self.yinit)', 'self.tankRADIUS'], {}), '(self.screen, showCOLOR, (self.xinit, self.yinit), self.\n tankRADIUS)\n', (663, 735), False, 'import pygame\n'), ((767, 882), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'showCOLOR', '[self.xinit, self.yinit]', '[self.xinit + self.tankGUN, self.yinit]', '(5)'], {}), '(self.screen, showCOLOR, [self.xinit, self.yinit], [self.\n xinit + self.tankGUN, self.yinit], 5)\n', (783, 882), False, 'import pygame\n'), ((963, 1156), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'showCOLOR', '[self.xinit - self.tankRADIUS - 1, self.yinit - self.tankRADIUS - 1]', '[self.xinit + self.tankRADIUS - 1, self.yinit - self.tankRADIUS - 1]', '(10)'], {}), '(self.screen, showCOLOR, [self.xinit - self.tankRADIUS - 1,\n self.yinit - self.tankRADIUS - 1], [self.xinit + self.tankRADIUS - 1, \n self.yinit - self.tankRADIUS - 1], 10)\n', (979, 1156), False, 'import pygame\n'), ((1216, 1409), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'showCOLOR', '[self.xinit + self.tankRADIUS - 1, self.yinit + self.tankRADIUS - 1]', '[self.xinit - self.tankRADIUS - 1, self.yinit + self.tankRADIUS - 1]', '(10)'], {}), '(self.screen, showCOLOR, [self.xinit + self.tankRADIUS - 1,\n self.yinit + self.tankRADIUS - 1], [self.xinit - self.tankRADIUS - 1, \n self.yinit + self.tankRADIUS - 1], 10)\n', (1232, 1409), False, 'import pygame\n'), ((2186, 2248), 'numpy.tanh', 'np.tanh', (['((self.yinit - mouse) / (self.xinit + self.tankRADIUS))'], {}), '((self.yinit - mouse) / (self.xinit + self.tankRADIUS))\n', (2193, 2248), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
df = pd.read_csv("FuelConsumption.csv")
# take a look at the dataset
df.head()
cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY','FUELCONSUMPTION_COMB','CO2EMISSIONS']]
cdf.head(9)
plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
from sklearn import linear_model
regr = linear_model.LinearRegression()
x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit (x, y)
# The coefficients
print ('Coefficients: ', regr.coef_)
y_hat= regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
y = np.asanyarray(test[['CO2EMISSIONS']])
print("Residual sum of squares: %.2f"
% np.mean((y_hat - y) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(x, y)) | [
"numpy.mean",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.asanyarray",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((96, 130), 'pandas.read_csv', 'pd.read_csv', (['"""FuelConsumption.csv"""'], {}), "('FuelConsumption.csv')\n", (107, 130), True, 'import pandas as pd\n'), ((305, 364), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cdf.ENGINESIZE', 'cdf.CO2EMISSIONS'], {'color': '"""blue"""'}), "(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')\n", (316, 364), True, 'import matplotlib.pyplot as plt\n'), ((366, 391), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Engine size"""'], {}), "('Engine size')\n", (376, 391), True, 'import matplotlib.pyplot as plt\n'), ((392, 414), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Emission"""'], {}), "('Emission')\n", (402, 414), True, 'import matplotlib.pyplot as plt\n'), ((415, 425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (423, 425), True, 'import matplotlib.pyplot as plt\n'), ((498, 561), 'matplotlib.pyplot.scatter', 'plt.scatter', (['train.ENGINESIZE', 'train.CO2EMISSIONS'], {'color': '"""blue"""'}), "(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')\n", (509, 561), True, 'import matplotlib.pyplot as plt\n'), ((563, 588), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Engine size"""'], {}), "('Engine size')\n", (573, 588), True, 'import matplotlib.pyplot as plt\n'), ((589, 611), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Emission"""'], {}), "('Emission')\n", (599, 611), True, 'import matplotlib.pyplot as plt\n'), ((612, 622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (620, 622), True, 'import matplotlib.pyplot as plt\n'), ((664, 695), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (693, 695), False, 'from sklearn import linear_model\n'), ((700, 773), 'numpy.asanyarray', 'np.asanyarray', (["train[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB']]"], {}), "(train[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB']])\n", (713, 773), True, 'import numpy as np\n'), ((776, 814), 'numpy.asanyarray', 'np.asanyarray', (["train[['CO2EMISSIONS']]"], {}), "(train[['CO2EMISSIONS']])\n", (789, 814), True, 'import numpy as np\n'), ((969, 1041), 'numpy.asanyarray', 'np.asanyarray', (["test[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB']]"], {}), "(test[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB']])\n", (982, 1041), True, 'import numpy as np\n'), ((1044, 1081), 'numpy.asanyarray', 'np.asanyarray', (["test[['CO2EMISSIONS']]"], {}), "(test[['CO2EMISSIONS']])\n", (1057, 1081), True, 'import numpy as np\n'), ((1128, 1153), 'numpy.mean', 'np.mean', (['((y_hat - y) ** 2)'], {}), '((y_hat - y) ** 2)\n', (1135, 1153), True, 'import numpy as np\n')] |
from __future__ import print_function
from orphics import maps,io,cosmology,catalogs,stats,mpi
from pixell import enmap,reproject
import numpy as np
import os,sys
from soapack import interfaces as sints
import tilec.fg as tfg
import tilec.utils as tutils
import random
np.random.seed(100)
cversion = 'joint'
region1 = 'deep56'
region2 = 'boss'
# cols = catalogs.load_fits("AdvACT.fits",['RAdeg','DECdeg','SNR'])
# ras = cols['RAdeg']
# decs = cols['DECdeg']
# sns = cols['SNR']
# iras = ras[sns>5]
# idecs = decs[sns>5]
# fname = os.environ['WORK'] + "/data/boss/boss_dr12/galaxy_DR12v5_CMASS_South.fits"
# cols = catalogs.load_fits(fname,['RA','DEC'])
# iras1 = cols['RA']
# idecs1 = cols['DEC']
# fname = os.environ['WORK'] + "/data/boss/boss_dr12/galaxy_DR12v5_CMASS_North.fits"
# cols = catalogs.load_fits(fname,['RA','DEC'])
# iras2 = cols['RA']
# idecs2 = cols['DEC']
# iras = np.append(iras1,iras2)
# idecs = np.append(idecs1,idecs2)
fname = os.environ['WORK'] + "/data/boss/sdss_dr8/redmapper_dr8_public_v6.3_catalog.fits"
cols = catalogs.load_fits(fname,['RA','DEC'])
iras = cols['RA']
idecs = cols['DEC']
mask1 = sints.get_act_mr3_crosslinked_mask(region1)
mask1[mask1<0.99] = 0
mask2 = sints.get_act_mr3_crosslinked_mask(region2)
mask2[mask2<0.99] = 0
dm1 = sints.ACTmr3(region=mask1,calibrated=True)
dm2 = sints.ACTmr3(region=mask2,calibrated=True)
wt1 = dm1.get_coadd_ivar("s15",region1,"pa2_f150")
wt2 = dm2.get_coadd_ivar("s15",region2,"pa2_f150")
ras1,decs1 = catalogs.select_based_on_mask(iras,idecs,mask1)
ras2,decs2 = catalogs.select_based_on_mask(iras,idecs,mask2)
tdir = '/scratch/r/rbond/msyriac/data/depot/tilec/v1.0.0_rc_20190919'
solution = 'tsz'
yfile1 = tutils.get_generic_fname(tdir,region1,solution,None,cversion)
cfile1 = tutils.get_generic_fname(tdir,region1,solution,'cib',cversion)
dfile1 = tutils.get_generic_fname(tdir,region1,solution,'cmb',cversion)
ybfile1 = tutils.get_generic_fname(tdir,region1,solution,None,cversion,beam=True)
cbfile1 = tutils.get_generic_fname(tdir,region1,solution,'cib',cversion,beam=True)
yfile2 = tutils.get_generic_fname(tdir,region2,solution,None,cversion)
cfile2 = tutils.get_generic_fname(tdir,region2,solution,'cib',cversion)
dfile2 = tutils.get_generic_fname(tdir,region2,solution,'cmb',cversion)
ybfile2 = tutils.get_generic_fname(tdir,region2,solution,None,cversion,beam=True)
cbfile2 = tutils.get_generic_fname(tdir,region2,solution,'cib',cversion,beam=True)
cmap1 = enmap.read_map(cfile1)
cmap2 = enmap.read_map(cfile2)
dmap1 = enmap.read_map(dfile1)
dmap2 = enmap.read_map(dfile2)
modlmap1 = cmap1.modlmap()
modlmap2 = cmap2.modlmap()
lsy1,by1 = np.loadtxt(ybfile1,unpack=True)
by12d = maps.interp(lsy1,by1)(modlmap1)
lsc1,bc1 = np.loadtxt(cbfile1,unpack=True)
bc12d = maps.interp(lsc1,bc1)(modlmap1)
beam_ratio1 = bc12d/by12d
beam_ratio1[~np.isfinite(beam_ratio1)] = 0
lsy2,by2 = np.loadtxt(ybfile2,unpack=True)
by22d = maps.interp(lsy2,by2)(modlmap2)
# ls,bc2 = np.loadtxt(cbfile2,unpack=True)
bc22d = maps.interp(lsc1,bc1)(modlmap2)
beam_ratio2 = bc22d/by22d
beam_ratio2[~np.isfinite(beam_ratio2)] = 0
ymap1 = maps.filter_map(enmap.read_map(yfile1),beam_ratio1)
ymap2 = maps.filter_map(enmap.read_map(yfile2),beam_ratio2)
arcmin = 40.
pix = 0.5
def get_cuts(mask,ymap,cmap,dmap,wtmap,ra,dec,arcmin,pix):
mcut = reproject.cutout(mask, ra=np.deg2rad(ra), dec=np.deg2rad(dec),npix=int(arcmin/pix))
if mcut is None:
return None,None,None,None,None
if np.any(mcut)<=0:
return None,None,None,None,None
ycut = reproject.cutout(ymap, ra=np.deg2rad(ra), dec=np.deg2rad(dec),npix=int(arcmin/pix))
ccut = reproject.cutout(cmap, ra=np.deg2rad(ra), dec=np.deg2rad(dec),npix=int(arcmin/pix))
dcut = reproject.cutout(dmap, ra=np.deg2rad(ra), dec=np.deg2rad(dec),npix=int(arcmin/pix))
wcut = reproject.cutout(wtmap, ra=np.deg2rad(ra), dec=np.deg2rad(dec),npix=int(arcmin/pix))
weight = wcut.mean()
return mcut,ycut,ccut,dcut,weight
def do(ymap,cmap,dmap,mask,ras,decs,wt):
combined = list(zip(ras, decs))
random.shuffle(combined)
ras[:], decs[:] = zip(*combined)
Nrand = 400
njobs = len(ras)
comm,rank,my_tasks = mpi.distribute(njobs)
print("Rank %d starting" % rank)
s = stats.Stats(comm)
i = 0
for task in my_tasks:
ra = ras[task]
dec = decs[task]
mcut,ycut,ccut,dcut,weight = get_cuts(mask,ymap,cmap,dmap,wt,ra,dec,arcmin,pix)
if mcut is None: continue
if i==0:
modrmap = np.rad2deg(ycut.modrmap())*60.
bin_edges = np.arange(0.,15.,1.0)
binner = stats.bin2D(modrmap,bin_edges)
rras,rdecs = catalogs.random_catalog(ymap.shape,ymap.wcs,Nrand,edge_avoid_deg=4.)
nrej = 0
for rra,rdec in zip(rras,rdecs):
rmcut,rycut,rccut,rdcut,rweight = get_cuts(mask,ymap,cmap,dmap,wt,rra,rdec,arcmin,pix)
if rmcut is None:
nrej = nrej + 1
continue
cents,ry1d = binner.bin(rycut)
cents,rc1d = binner.bin(rccut)
cents,rd1d = binner.bin(rdcut)
s.add_to_stats("rc1d",rc1d*1e6)
s.add_to_stats("ry1d",ry1d*1e6)
s.add_to_stats("rd1d",rd1d*1e6)
if rank==0: print(Nrand-nrej, " accepted")
cents,y1d = binner.bin(ycut)
cents,c1d = binner.bin(ccut)
cents,d1d = binner.bin(dcut)
s.add_to_stats("c1d",c1d*1e6)
s.add_to_stats("y1d",y1d*1e6)
s.add_to_stats("d1d",d1d*1e6)
s.add_to_stack("cstack",ccut*1e6*weight)
s.add_to_stack("dstack",dcut*1e6*weight)
s.add_to_stack("ystack",ycut*1e6*weight)
s.add_to_stats("sum",(weight,))
i = i + 1
if i%10==0 and rank==0: print(i)
print("Rank %d done " % rank)
s.get_stats()
s.get_stacks()
if rank==0:
N = s.vectors['sum'].sum()
ystack = s.stacks['ystack'] * N
cstack = s.stacks['cstack'] * N
dstack = s.stacks['dstack'] * N
y1ds = s.vectors['y1d']
c1ds = s.vectors['c1d']
d1ds = s.vectors['d1d']
ry1d = s.stats['ry1d']['mean']
rc1d = s.stats['rc1d']['mean']
rd1d = s.stats['rd1d']['mean']
_,nwcs = enmap.geometry(pos=(0,0),shape=ystack.shape,res=np.deg2rad(0.5/60.))
return rank,enmap.enmap(ystack,nwcs),enmap.enmap(cstack,nwcs),enmap.enmap(dstack,nwcs),N,cents,y1ds,c1ds,d1ds,ry1d,rc1d,rd1d
else:
return rank,None,None,None,None,None,None,None,None,None,None,None
hplot = lambda x,y: io.hplot(x,os.environ['WORK']+"/"+y,ticks=5,tick_unit='arcmin',grid=True,colorbar=True,color='gray',upgrade=4,quantile=1e-3)
print("Starting deep56")
rank,ystack1,cstack1,dstack1,i1,cents,y1ds1,c1ds1,d1ds1,ry1d1,rc1d1,rd1d1 = do(ymap1,cmap1,dmap1,mask1,ras1,decs1,wt1)
if rank == 0:
print(i1)
hplot(ystack1,"fig_all_cmass_ystack_%s_%s" % (cversion,'deep56'))
hplot(cstack1,"fig_all_cmass_cstack_%s_%s" % (cversion,'deep56'))
hplot(dstack1,"fig_all_cmass_dstack_%s_%s" % (cversion,'deep56'))
print("Starting boss")
rank,ystack2,cstack2,dstack2,i2,cents,y1ds2,c1ds2,d1ds2,ry1d2,rc1d2,rd1d2 = do(ymap2,cmap2,dmap2,mask2,ras2,decs2,wt2)
if rank == 0:
print(i2)
hplot(ystack2,"fig_all_cmass_ystack_%s_%s" % (cversion,'boss'))
hplot(cstack2,"fig_all_cmass_cstack_%s_%s" % (cversion,'boss'))
hplot(dstack2,"fig_all_cmass_dstack_%s_%s" % (cversion,'boss'))
ystack = (ystack1+ystack2)/(i1+i2)
cstack = (cstack1+cstack2)/(i1+i2)
dstack = (dstack1+dstack2)/(i1+i2)
hplot(ystack,"fig_all_cmass_ystack_%s_%s" % (cversion,'both'))
hplot(cstack,"fig_all_cmass_cstack_%s_%s" % (cversion,'both'))
hplot(dstack,"fig_all_cmass_dstack_%s_%s" % (cversion,'both'))
sy1 = stats.get_stats(y1ds1)
sc1 = stats.get_stats(c1ds1)
sd1 = stats.get_stats(d1ds1)
sy2 = stats.get_stats(y1ds2)
sc2 = stats.get_stats(c1ds2)
sd2 = stats.get_stats(d1ds2)
y1 = sy1['mean']
ey1 = sy1['errmean']
c1 = sc1['mean']
ec1 = sc1['errmean']
d1 = sd1['mean']
ed1 = sd1['errmean']
y2 = sy2['mean']
ey2 = sy2['errmean']
c2 = sc2['mean']
ec2 = sc2['errmean']
d2 = sd2['mean']
ed2 = sd2['errmean']
pl = io.Plotter(xlabel='$\\theta$ (arcmin)',ylabel='Filtered $Y (\\times 10^6)$')
pl.add_err(cents,y1-ry1d1,yerr=ey1,label="deep56",ls="none",marker="x",markersize=8,elinewidth=2,mew=2,color='C0')
pl.add_err(cents,c1-rc1d1,yerr=ec1,label="deep56 no dust",ls="none",marker="o",markersize=8,elinewidth=2,mew=2,color='C0')
pl.add_err(cents,d1-rd1d1,yerr=ed1,label="deep56 no cmb",ls="none",marker="_",markersize=8,elinewidth=2,mew=2,color='C0')
pl.add_err(cents,y2-ry1d2,yerr=ey2,label="boss",ls="none",marker="x",markersize=8,elinewidth=2,mew=2,color='C1')
pl.add_err(cents,c2-rc1d2,yerr=ec2,label="boss no dust",ls="none",marker="o",markersize=8,elinewidth=2,mew=2,color='C1')
pl.add_err(cents,d2-rd1d2,yerr=ed2,label="boss no cmb",ls="none",marker="_",markersize=8,elinewidth=2,mew=2,color='C1')
pl.add_err(cents+0.1,y1,yerr=ey1,ls="none",marker="x",markersize=8,elinewidth=2,mew=2,color='C0',alpha=0.2)
pl.add_err(cents+0.1,c1,yerr=ec1,ls="none",marker="o",markersize=8,elinewidth=2,mew=2,color='C0',alpha=0.2)
pl.add_err(cents+0.1,d1,yerr=ed1,ls="none",marker="_",markersize=8,elinewidth=2,mew=2,color='C0',alpha=0.2)
pl.add_err(cents+0.1,y2,yerr=ey2,ls="none",marker="x",markersize=8,elinewidth=2,mew=2,color='C1',alpha=0.2)
pl.add_err(cents+0.1,c2,yerr=ec2,ls="none",marker="_",markersize=8,elinewidth=2,mew=2,color='C1',alpha=0.2)
pl.add_err(cents+0.1,d2,yerr=ed2,ls="none",marker="o",markersize=8,elinewidth=2,mew=2,color='C1',alpha=0.2)
pl.hline(y=0)
pl.done(os.environ['WORK']+"/"+'fig_boss_yprofile.png')
| [
"orphics.maps.interp",
"orphics.stats.get_stats",
"numpy.isfinite",
"numpy.arange",
"orphics.stats.Stats",
"orphics.mpi.distribute",
"pixell.enmap.enmap",
"orphics.io.Plotter",
"orphics.stats.bin2D",
"numpy.random.seed",
"soapack.interfaces.get_act_mr3_crosslinked_mask",
"random.shuffle",
"o... | [((270, 289), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (284, 289), True, 'import numpy as np\n'), ((1048, 1088), 'orphics.catalogs.load_fits', 'catalogs.load_fits', (['fname', "['RA', 'DEC']"], {}), "(fname, ['RA', 'DEC'])\n", (1066, 1088), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((1138, 1181), 'soapack.interfaces.get_act_mr3_crosslinked_mask', 'sints.get_act_mr3_crosslinked_mask', (['region1'], {}), '(region1)\n', (1172, 1181), True, 'from soapack import interfaces as sints\n'), ((1212, 1255), 'soapack.interfaces.get_act_mr3_crosslinked_mask', 'sints.get_act_mr3_crosslinked_mask', (['region2'], {}), '(region2)\n', (1246, 1255), True, 'from soapack import interfaces as sints\n'), ((1285, 1328), 'soapack.interfaces.ACTmr3', 'sints.ACTmr3', ([], {'region': 'mask1', 'calibrated': '(True)'}), '(region=mask1, calibrated=True)\n', (1297, 1328), True, 'from soapack import interfaces as sints\n'), ((1334, 1377), 'soapack.interfaces.ACTmr3', 'sints.ACTmr3', ([], {'region': 'mask2', 'calibrated': '(True)'}), '(region=mask2, calibrated=True)\n', (1346, 1377), True, 'from soapack import interfaces as sints\n'), ((1494, 1543), 'orphics.catalogs.select_based_on_mask', 'catalogs.select_based_on_mask', (['iras', 'idecs', 'mask1'], {}), '(iras, idecs, mask1)\n', (1523, 1543), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((1555, 1604), 'orphics.catalogs.select_based_on_mask', 'catalogs.select_based_on_mask', (['iras', 'idecs', 'mask2'], {}), '(iras, idecs, mask2)\n', (1584, 1604), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((1703, 1768), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region1', 'solution', 'None', 'cversion'], {}), '(tdir, region1, solution, None, cversion)\n', (1727, 1768), True, 'import tilec.utils as tutils\n'), ((1774, 1840), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region1', 'solution', '"""cib"""', 'cversion'], {}), "(tdir, region1, solution, 'cib', cversion)\n", (1798, 1840), True, 'import tilec.utils as tutils\n'), ((1846, 1912), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region1', 'solution', '"""cmb"""', 'cversion'], {}), "(tdir, region1, solution, 'cmb', cversion)\n", (1870, 1912), True, 'import tilec.utils as tutils\n'), ((1919, 1995), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region1', 'solution', 'None', 'cversion'], {'beam': '(True)'}), '(tdir, region1, solution, None, cversion, beam=True)\n', (1943, 1995), True, 'import tilec.utils as tutils\n'), ((2001, 2078), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region1', 'solution', '"""cib"""', 'cversion'], {'beam': '(True)'}), "(tdir, region1, solution, 'cib', cversion, beam=True)\n", (2025, 2078), True, 'import tilec.utils as tutils\n'), ((2085, 2150), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region2', 'solution', 'None', 'cversion'], {}), '(tdir, region2, solution, None, cversion)\n', (2109, 2150), True, 'import tilec.utils as tutils\n'), ((2156, 2222), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region2', 'solution', '"""cib"""', 'cversion'], {}), "(tdir, region2, solution, 'cib', cversion)\n", (2180, 2222), True, 'import tilec.utils as tutils\n'), ((2228, 2294), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region2', 'solution', '"""cmb"""', 'cversion'], {}), "(tdir, region2, solution, 'cmb', cversion)\n", (2252, 2294), True, 'import tilec.utils as tutils\n'), ((2301, 2377), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region2', 'solution', 'None', 'cversion'], {'beam': '(True)'}), '(tdir, region2, solution, None, cversion, beam=True)\n', (2325, 2377), True, 'import tilec.utils as tutils\n'), ((2383, 2460), 'tilec.utils.get_generic_fname', 'tutils.get_generic_fname', (['tdir', 'region2', 'solution', '"""cib"""', 'cversion'], {'beam': '(True)'}), "(tdir, region2, solution, 'cib', cversion, beam=True)\n", (2407, 2460), True, 'import tilec.utils as tutils\n'), ((2465, 2487), 'pixell.enmap.read_map', 'enmap.read_map', (['cfile1'], {}), '(cfile1)\n', (2479, 2487), False, 'from pixell import enmap, reproject\n'), ((2496, 2518), 'pixell.enmap.read_map', 'enmap.read_map', (['cfile2'], {}), '(cfile2)\n', (2510, 2518), False, 'from pixell import enmap, reproject\n'), ((2527, 2549), 'pixell.enmap.read_map', 'enmap.read_map', (['dfile1'], {}), '(dfile1)\n', (2541, 2549), False, 'from pixell import enmap, reproject\n'), ((2558, 2580), 'pixell.enmap.read_map', 'enmap.read_map', (['dfile2'], {}), '(dfile2)\n', (2572, 2580), False, 'from pixell import enmap, reproject\n'), ((2648, 2680), 'numpy.loadtxt', 'np.loadtxt', (['ybfile1'], {'unpack': '(True)'}), '(ybfile1, unpack=True)\n', (2658, 2680), True, 'import numpy as np\n'), ((2731, 2763), 'numpy.loadtxt', 'np.loadtxt', (['cbfile1'], {'unpack': '(True)'}), '(cbfile1, unpack=True)\n', (2741, 2763), True, 'import numpy as np\n'), ((2885, 2917), 'numpy.loadtxt', 'np.loadtxt', (['ybfile2'], {'unpack': '(True)'}), '(ybfile2, unpack=True)\n', (2895, 2917), True, 'import numpy as np\n'), ((2688, 2710), 'orphics.maps.interp', 'maps.interp', (['lsy1', 'by1'], {}), '(lsy1, by1)\n', (2699, 2710), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((2771, 2793), 'orphics.maps.interp', 'maps.interp', (['lsc1', 'bc1'], {}), '(lsc1, bc1)\n', (2782, 2793), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((2925, 2947), 'orphics.maps.interp', 'maps.interp', (['lsy2', 'by2'], {}), '(lsy2, by2)\n', (2936, 2947), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((3008, 3030), 'orphics.maps.interp', 'maps.interp', (['lsc1', 'bc1'], {}), '(lsc1, bc1)\n', (3019, 3030), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((3135, 3157), 'pixell.enmap.read_map', 'enmap.read_map', (['yfile1'], {}), '(yfile1)\n', (3149, 3157), False, 'from pixell import enmap, reproject\n'), ((3195, 3217), 'pixell.enmap.read_map', 'enmap.read_map', (['yfile2'], {}), '(yfile2)\n', (3209, 3217), False, 'from pixell import enmap, reproject\n'), ((4068, 4092), 'random.shuffle', 'random.shuffle', (['combined'], {}), '(combined)\n', (4082, 4092), False, 'import random\n'), ((4194, 4215), 'orphics.mpi.distribute', 'mpi.distribute', (['njobs'], {}), '(njobs)\n', (4208, 4215), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((4261, 4278), 'orphics.stats.Stats', 'stats.Stats', (['comm'], {}), '(comm)\n', (4272, 4278), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((6574, 6716), 'orphics.io.hplot', 'io.hplot', (['x', "(os.environ['WORK'] + '/' + y)"], {'ticks': '(5)', 'tick_unit': '"""arcmin"""', 'grid': '(True)', 'colorbar': '(True)', 'color': '"""gray"""', 'upgrade': '(4)', 'quantile': '(0.001)'}), "(x, os.environ['WORK'] + '/' + y, ticks=5, tick_unit='arcmin', grid\n =True, colorbar=True, color='gray', upgrade=4, quantile=0.001)\n", (6582, 6716), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((7802, 7824), 'orphics.stats.get_stats', 'stats.get_stats', (['y1ds1'], {}), '(y1ds1)\n', (7817, 7824), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((7835, 7857), 'orphics.stats.get_stats', 'stats.get_stats', (['c1ds1'], {}), '(c1ds1)\n', (7850, 7857), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((7868, 7890), 'orphics.stats.get_stats', 'stats.get_stats', (['d1ds1'], {}), '(d1ds1)\n', (7883, 7890), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((7902, 7924), 'orphics.stats.get_stats', 'stats.get_stats', (['y1ds2'], {}), '(y1ds2)\n', (7917, 7924), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((7935, 7957), 'orphics.stats.get_stats', 'stats.get_stats', (['c1ds2'], {}), '(c1ds2)\n', (7950, 7957), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((7968, 7990), 'orphics.stats.get_stats', 'stats.get_stats', (['d1ds2'], {}), '(d1ds2)\n', (7983, 7990), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((8283, 8361), 'orphics.io.Plotter', 'io.Plotter', ([], {'xlabel': '"""$\\\\theta$ (arcmin)"""', 'ylabel': '"""Filtered $Y (\\\\times 10^6)$"""'}), "(xlabel='$\\\\theta$ (arcmin)', ylabel='Filtered $Y (\\\\times 10^6)$')\n", (8293, 8361), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((2842, 2866), 'numpy.isfinite', 'np.isfinite', (['beam_ratio1'], {}), '(beam_ratio1)\n', (2853, 2866), True, 'import numpy as np\n'), ((3079, 3103), 'numpy.isfinite', 'np.isfinite', (['beam_ratio2'], {}), '(beam_ratio2)\n', (3090, 3103), True, 'import numpy as np\n'), ((3481, 3493), 'numpy.any', 'np.any', (['mcut'], {}), '(mcut)\n', (3487, 3493), True, 'import numpy as np\n'), ((4679, 4751), 'orphics.catalogs.random_catalog', 'catalogs.random_catalog', (['ymap.shape', 'ymap.wcs', 'Nrand'], {'edge_avoid_deg': '(4.0)'}), '(ymap.shape, ymap.wcs, Nrand, edge_avoid_deg=4.0)\n', (4702, 4751), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((3354, 3368), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (3364, 3368), True, 'import numpy as np\n'), ((3374, 3389), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (3384, 3389), True, 'import numpy as np\n'), ((3577, 3591), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (3587, 3591), True, 'import numpy as np\n'), ((3597, 3612), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (3607, 3612), True, 'import numpy as np\n'), ((3672, 3686), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (3682, 3686), True, 'import numpy as np\n'), ((3692, 3707), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (3702, 3707), True, 'import numpy as np\n'), ((3767, 3781), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (3777, 3781), True, 'import numpy as np\n'), ((3787, 3802), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (3797, 3802), True, 'import numpy as np\n'), ((3863, 3877), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (3873, 3877), True, 'import numpy as np\n'), ((3883, 3898), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (3893, 3898), True, 'import numpy as np\n'), ((4583, 4608), 'numpy.arange', 'np.arange', (['(0.0)', '(15.0)', '(1.0)'], {}), '(0.0, 15.0, 1.0)\n', (4592, 4608), True, 'import numpy as np\n'), ((4626, 4657), 'orphics.stats.bin2D', 'stats.bin2D', (['modrmap', 'bin_edges'], {}), '(modrmap, bin_edges)\n', (4637, 4657), False, 'from orphics import maps, io, cosmology, catalogs, stats, mpi\n'), ((6354, 6379), 'pixell.enmap.enmap', 'enmap.enmap', (['ystack', 'nwcs'], {}), '(ystack, nwcs)\n', (6365, 6379), False, 'from pixell import enmap, reproject\n'), ((6379, 6404), 'pixell.enmap.enmap', 'enmap.enmap', (['cstack', 'nwcs'], {}), '(cstack, nwcs)\n', (6390, 6404), False, 'from pixell import enmap, reproject\n'), ((6404, 6429), 'pixell.enmap.enmap', 'enmap.enmap', (['dstack', 'nwcs'], {}), '(dstack, nwcs)\n', (6415, 6429), False, 'from pixell import enmap, reproject\n'), ((6312, 6334), 'numpy.deg2rad', 'np.deg2rad', (['(0.5 / 60.0)'], {}), '(0.5 / 60.0)\n', (6322, 6334), True, 'import numpy as np\n')] |
import os
import random
import cv2
import numpy as np
import torch
import torchvision.datasets as datasets
import tqdm
from dg_util.python_utils import drawing
from dg_util.python_utils import pytorch_util as pt_util
from dg_util.python_utils.persistent_dataloader import PersistentDataLoader
from sklearn.decomposition import PCA
from torch.utils.data.dataloader import DataLoader
import arg_parser
from datasets.r2v2_dataset import R2V2Dataset
from models.vince_model import VinceModel
from utils.transforms import RepeatedImagenetTransform
from utils.transforms import StandardVideoTransform
from utils.util_functions import to_uint8
"""
Example run command
python visualizations/view_nearest_neighbors.py \
--title sample_mosaic \
--description none \
--checkpoint-dir logs/moco/MocoImagenetModel/checkpoints_r18-b-256-q-65536-fsize-64-vid-ibc-4-no-self/ \
--data-path /home/xkcd/datasets/r2v2_large_with_ids/ \
--num-workers 80 --backbone ResNet18 --pytorch-gpu-ids 0 --feature-extractor-gpu-ids 0 \
-b 512 \
"""
NUM_QUERIES = 100
NUM_NEIGHBORS = 10
NUM_TO_COMPARE = 50000
data_subset = "val"
args = arg_parser.parse_args()
def get_data_item(data):
if isinstance(data, dict):
data = data["data"]
data = data.squeeze(1)
elif isinstance(data, list) or isinstance(data, tuple):
data, label = data
data = data.squeeze(1)
else:
raise NotImplementedError
return data
def dataset_nn(model, data_loader):
with torch.no_grad():
num_to_compare = min(int(NUM_TO_COMPARE / args.batch_size + 1) * args.batch_size, len(data_loader.dataset))
# Get features
image_array = np.zeros((num_to_compare, args.input_height, args.input_width, 3), dtype=np.uint8)
features_array = None
data_ind = 0
pbar = tqdm.tqdm(total=num_to_compare)
for data in data_loader:
data = get_data_item(data)
data_size = data.shape[0]
data = data.to(model.device)
output = model.get_embeddings({"data": data, "batch_type": ("images", len(data))})
features = output["extracted_features"]
if features_array is None:
feature_size = features.shape[1]
features_array = torch.zeros((num_to_compare, feature_size), dtype=torch.float32, device=model.device)
features_array[data_ind: data_ind + data_size] = features
data = to_uint8(data)
image_array[data_ind: min(num_to_compare, data_ind + data_size)] = data
data_ind += data_size
pbar.update(data_size)
if data_ind >= num_to_compare:
break
pbar.close()
if features_array.shape[1] != 64:
features_array_new = pt_util.to_numpy(features_array)
pca = PCA(n_components=64)
features_array_new = pca.fit_transform(features_array_new)
features_array_new = pt_util.from_numpy(features_array_new).to(features_array.device)
features_array = features_array_new
features_array = torch.nn.functional.normalize(features_array, dim=-1)
return features_array, image_array
def draw_nns(source_features, source_images, source_name, target_features=None, target_images=None, target_name=None):
skip_first = False
if target_features is None:
target_features = source_features
target_images = source_images
target_name = source_name
skip_first = True
num_to_compare = target_features.shape[0]
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
rand_selection = np.sort(np.random.choice(source_features.shape[0], NUM_QUERIES, replace=False))
query_features = source_features[rand_selection]
dists = torch.mm(query_features, target_features.T)
val, neighbors = torch.topk(dists, k=(NUM_NEIGHBORS + int(skip_first)), dim=1, sorted=True, largest=True)
if skip_first:
neighbors = neighbors[:, 1:]
neighbors = target_images[pt_util.to_numpy(neighbors)]
os.makedirs(
os.path.join(args.checkpoint_dir, "neighbors_from_%s_to_%s" % (source_name, target_name)), exist_ok=True
)
# Get images
for ii in tqdm.tqdm(range(neighbors.shape[0])):
images = []
image = source_images[rand_selection[ii]].copy()
image = np.pad(image, ((10, 10), (10, 10), (0, 0)), "constant")
images.append(image)
for jj in range(neighbors.shape[1]):
image = neighbors[ii, jj].copy()
images.append(image)
subplot = drawing.subplot(images, 1, neighbors.shape[1] + 1, args.input_width, args.input_height, border=5)
cv2.imwrite(
os.path.join(
args.checkpoint_dir,
"neighbors_from_%s_to_%s" % (source_name, target_name),
"bsize_%06d_%03d.jpg" % (num_to_compare, ii),
),
subplot[:, :, ::-1],
)
def main():
with torch.no_grad():
torch_devices = args.pytorch_gpu_ids
device = "cuda:" + str(torch_devices[0])
model = VinceModel(args)
model.restore()
model.eval()
model.to(device)
yt_dataset = R2V2Dataset(
args, "val", transform=StandardVideoTransform(args.input_size, "val"), num_images_to_return=1
)
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
data_loader = PersistentDataLoader(
yt_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
collate_fn=R2V2Dataset.collate_fn,
worker_init_fn=R2V2Dataset.worker_init_fn,
)
yt_features, yt_images = dataset_nn(model, data_loader)
del data_loader
draw_nns(yt_features, yt_images, "youtube")
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
valdir = os.path.join(args.imagenet_data_path, data_subset)
transform = RepeatedImagenetTransform(args.input_height, data_subset="val", repeats=1)
imagenet_dataset = datasets.ImageFolder(valdir, transform)
data_loader = DataLoader(
imagenet_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True
)
imagenet_features, imagenet_images = dataset_nn(model, data_loader)
del data_loader
draw_nns(imagenet_features, imagenet_images, "imagenet")
draw_nns(imagenet_features, imagenet_images, "imagenet", yt_features, yt_images, "youtube")
draw_nns(yt_features, yt_images, "youtube", imagenet_features, imagenet_images, "imagenet")
if __name__ == "__main__":
main()
| [
"sklearn.decomposition.PCA",
"torch.utils.data.dataloader.DataLoader",
"utils.util_functions.to_uint8",
"torchvision.datasets.ImageFolder",
"numpy.random.seed",
"models.vince_model.VinceModel",
"dg_util.python_utils.pytorch_util.to_numpy",
"utils.transforms.StandardVideoTransform",
"numpy.random.cho... | [((1134, 1157), 'arg_parser.parse_args', 'arg_parser.parse_args', ([], {}), '()\n', (1155, 1157), False, 'import arg_parser\n'), ((3567, 3587), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3584, 3587), False, 'import torch\n'), ((3592, 3606), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3603, 3606), False, 'import random\n'), ((3611, 3628), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3625, 3628), True, 'import numpy as np\n'), ((3797, 3840), 'torch.mm', 'torch.mm', (['query_features', 'target_features.T'], {}), '(query_features, target_features.T)\n', (3805, 3840), False, 'import torch\n'), ((1500, 1515), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1513, 1515), False, 'import torch\n'), ((1679, 1766), 'numpy.zeros', 'np.zeros', (['(num_to_compare, args.input_height, args.input_width, 3)'], {'dtype': 'np.uint8'}), '((num_to_compare, args.input_height, args.input_width, 3), dtype=np\n .uint8)\n', (1687, 1766), True, 'import numpy as np\n'), ((1829, 1860), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'num_to_compare'}), '(total=num_to_compare)\n', (1838, 1860), False, 'import tqdm\n'), ((3658, 3728), 'numpy.random.choice', 'np.random.choice', (['source_features.shape[0]', 'NUM_QUERIES'], {'replace': '(False)'}), '(source_features.shape[0], NUM_QUERIES, replace=False)\n', (3674, 3728), True, 'import numpy as np\n'), ((4038, 4065), 'dg_util.python_utils.pytorch_util.to_numpy', 'pt_util.to_numpy', (['neighbors'], {}), '(neighbors)\n', (4054, 4065), True, 'from dg_util.python_utils import pytorch_util as pt_util\n'), ((4092, 4185), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('neighbors_from_%s_to_%s' % (source_name, target_name))"], {}), "(args.checkpoint_dir, 'neighbors_from_%s_to_%s' % (source_name,\n target_name))\n", (4104, 4185), False, 'import os\n'), ((4366, 4421), 'numpy.pad', 'np.pad', (['image', '((10, 10), (10, 10), (0, 0))', '"""constant"""'], {}), "(image, ((10, 10), (10, 10), (0, 0)), 'constant')\n", (4372, 4421), True, 'import numpy as np\n'), ((4593, 4695), 'dg_util.python_utils.drawing.subplot', 'drawing.subplot', (['images', '(1)', '(neighbors.shape[1] + 1)', 'args.input_width', 'args.input_height'], {'border': '(5)'}), '(images, 1, neighbors.shape[1] + 1, args.input_width, args.\n input_height, border=5)\n', (4608, 4695), False, 'from dg_util.python_utils import drawing\n'), ((4990, 5005), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5003, 5005), False, 'import torch\n'), ((5117, 5133), 'models.vince_model.VinceModel', 'VinceModel', (['args'], {}), '(args)\n', (5127, 5133), False, 'from models.vince_model import VinceModel\n'), ((5363, 5383), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5380, 5383), False, 'import torch\n'), ((5392, 5406), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (5403, 5406), False, 'import random\n'), ((5415, 5432), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5429, 5432), True, 'import numpy as np\n'), ((5455, 5663), 'dg_util.python_utils.persistent_dataloader.PersistentDataLoader', 'PersistentDataLoader', (['yt_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'collate_fn': 'R2V2Dataset.collate_fn', 'worker_init_fn': 'R2V2Dataset.worker_init_fn'}), '(yt_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, collate_fn=R2V2Dataset.\n collate_fn, worker_init_fn=R2V2Dataset.worker_init_fn)\n', (5475, 5663), False, 'from dg_util.python_utils.persistent_dataloader import PersistentDataLoader\n'), ((5901, 5921), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5918, 5921), False, 'import torch\n'), ((5930, 5944), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (5941, 5944), False, 'import random\n'), ((5953, 5970), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5967, 5970), True, 'import numpy as np\n'), ((5988, 6038), 'os.path.join', 'os.path.join', (['args.imagenet_data_path', 'data_subset'], {}), '(args.imagenet_data_path, data_subset)\n', (6000, 6038), False, 'import os\n'), ((6059, 6133), 'utils.transforms.RepeatedImagenetTransform', 'RepeatedImagenetTransform', (['args.input_height'], {'data_subset': '"""val"""', 'repeats': '(1)'}), "(args.input_height, data_subset='val', repeats=1)\n", (6084, 6133), False, 'from utils.transforms import RepeatedImagenetTransform\n'), ((6161, 6200), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valdir', 'transform'], {}), '(valdir, transform)\n', (6181, 6200), True, 'import torchvision.datasets as datasets\n'), ((6223, 6344), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['imagenet_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(imagenet_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n', (6233, 6344), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2456, 2470), 'utils.util_functions.to_uint8', 'to_uint8', (['data'], {}), '(data)\n', (2464, 2470), False, 'from utils.util_functions import to_uint8\n'), ((2785, 2817), 'dg_util.python_utils.pytorch_util.to_numpy', 'pt_util.to_numpy', (['features_array'], {}), '(features_array)\n', (2801, 2817), True, 'from dg_util.python_utils import pytorch_util as pt_util\n'), ((2836, 2856), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(64)'}), '(n_components=64)\n', (2839, 2856), False, 'from sklearn.decomposition import PCA\n'), ((3103, 3156), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['features_array'], {'dim': '(-1)'}), '(features_array, dim=-1)\n', (3132, 3156), False, 'import torch\n'), ((4724, 4863), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('neighbors_from_%s_to_%s' % (source_name, target_name))", "('bsize_%06d_%03d.jpg' % (num_to_compare, ii))"], {}), "(args.checkpoint_dir, 'neighbors_from_%s_to_%s' % (source_name,\n target_name), 'bsize_%06d_%03d.jpg' % (num_to_compare, ii))\n", (4736, 4863), False, 'import os\n'), ((2281, 2371), 'torch.zeros', 'torch.zeros', (['(num_to_compare, feature_size)'], {'dtype': 'torch.float32', 'device': 'model.device'}), '((num_to_compare, feature_size), dtype=torch.float32, device=\n model.device)\n', (2292, 2371), False, 'import torch\n'), ((5274, 5320), 'utils.transforms.StandardVideoTransform', 'StandardVideoTransform', (['args.input_size', '"""val"""'], {}), "(args.input_size, 'val')\n", (5296, 5320), False, 'from utils.transforms import StandardVideoTransform\n'), ((2961, 2999), 'dg_util.python_utils.pytorch_util.from_numpy', 'pt_util.from_numpy', (['features_array_new'], {}), '(features_array_new)\n', (2979, 2999), True, 'from dg_util.python_utils import pytorch_util as pt_util\n')] |
# -*- coding: utf-8 -*-
'''
Texas A&M University Sounding Rocketry Team
SRT-6 | 2018-2019
%-------------------------------------------------------------%
TAMU SRT
_____ __ _____ __ __
/ ___/______ __ _____ ___/ / / ___/__ ___ / /________ / /
/ (_ / __/ _ \/ // / _ \/ _ / / /__/ _ \/ _ \/ __/ __/ _ \/ /
\___/_/ \___/\_,_/_//_/\_,_/ \___/\___/_//_/\__/_/ \___/_/
%-------------------------------------------------------------%
Filepath:
gc/srt_gc_launchGui/srt_gc_launchTools.py
Developers:
(C) <NAME> 20181108
(L) <NAME> ########
Description:
<description>
Input(s):
<none>
Output(s):
<outputs>
'''
# Installed modules --> Utilities
import numpy as np
class Object(object):
'''
Empty "Dummy" Container
'''
pass
class Tools():
def resize(self,grid,rowStretch,colStretch):
'''
Row & Column Resizing
'''
for i in range(len(rowStretch)):
grid.setRowStretch(i,rowStretch[i])
for i in range(len(colStretch)):
grid.setColumnStretch(i,colStretch[i])
def extrap(self,t,x,tq,dt):
'''
Data Extrapolation
'''
# No. past values to use in linear fit
n = int(round(tq/dt))
if (n < len(t)):
# Use all past packets
dxdt = (x[-1] - x[-n])/(t[-1] - t[-n])
else:
# Only use past n packets
dxdt = (x[-1] - x[0])/(t[-1] - t[0])
xq = x[-1] + tq*dxdt
return xq
def vapPress(self,T0):
'''
Determine N2O Vapor Pressure
'''
f2r = 459.67
r2k = 0.55556
pa2psi = 0.0001450377
G = [96.512,-4045,-12.277,0.0000289,2]
T0 = (T0 + f2r)*r2k
pSat0 = np.exp(G[0] + G[1]/T0 + G[2]*np.log(T0) + G[3]*pow(T0,G[4])) # Initial vapor pressure of N2O [Pa]
pSat0 = pSat0*pa2psi
return pSat0 | [
"numpy.log"
] | [((2114, 2124), 'numpy.log', 'np.log', (['T0'], {}), '(T0)\n', (2120, 2124), True, 'import numpy as np\n')] |
# coding=utf-8
# Based on:
# HuggingFace Transformers
# See https://github.com/huggingface/transformers/LICENSE for details.
#################################################
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import pdb
from typing import Dict, List, Tuple
import time
import hashlib
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from sklearn.metrics import average_precision_score
from models import (
WEIGHTS_NAME,
AdamW,
PreTrainedModel,
RobertaConfig,
RobertaForMaskedLM,
get_linear_schedule_with_warmup,
)
from data import video_data_helper
from data.video_data_helper import binarize
from utils.ava_eval_helper import evaluate_ava
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"roberta": (RobertaConfig, RobertaForMaskedLM),
}
ZERO_FEAT2304 = np.zeros((2304,))
EVAL_START_SEC = 902 # inclusive
EVAL_END_SEC = 1799 # not inclusive
with open('data/ava/slowfast_baseline_outputs/ava_eval_data.pkl', 'rb') as f:
(excluded_keys,
class_whitelist,
categories,
groundtruth,
video_idx_to_name) = pickle.load(f)
with open('data/ava/slowfast_baseline_outputs/predictions-29.4.pkl', 'rb') as f:
(all_preds,
all_ori_boxes,
all_metadata) = pickle.load(f)
video_name_to_idx = {video_idx_to_name[key] : key for key in range(len(video_idx_to_name))}
logger.info(video_name_to_idx)
logger.info(video_idx_to_name)
proj_W = None
proj_b = None
def proj(x):
return torch.matmul(x, proj_W) + proj_b
class VideoDataset(Dataset):
def __init__(self, args, evaluate):
self.evaluate = evaluate
self.secs_per_example = args.secs_per_example
self.all_features = video_data_helper.load_features(
args.eval_feature_file if evaluate else args.train_feature_file,
args,
)
if args.action_recognition:
self.videos = video_data_helper.load_video_data(
args.eval_data_file if evaluate else args.train_data_file,
args,
)
elif args.train_long_term:
self.videos, self.val_set, self.test_set = video_data_helper.load_mc_video_data(args, evaluate)
else:
if evaluate:
self.videos = video_data_helper.load_video_data(
args.eval_data_file if evaluate else args.train_data_file,
args,
)
else:
self.videos, self.val_set, self.test_set = video_data_helper.load_mc_video_data(args, evaluate)
self.args = args
self.spans = []
if args.action_recognition:
for video_name in self.videos.keys():
v = self.videos[video_name]
# for action recognition only, both train and test use 15 min only.
for center_sec in range(EVAL_START_SEC, EVAL_END_SEC):
if sum(
[sec in v.keys()
for sec in range(center_sec - self.secs_per_example // 2,
center_sec + self.secs_per_example // 2)
]) > 0:
self.spans.append((video_name, center_sec, None))
if evaluate:
self.spans = self.spans * args.eval_sample_x
if args.same_movie:
self.spans = {}
for video_name in self.videos.keys():
v = self.videos[video_name]
if args.same_movie:
positive_id = video_name
# complete spans
range_start = min(v.keys()) + self.secs_per_example - 1
range_end = max(v.keys()) + 1
gap = 60 if (self.evaluate and not args.is_end_task) else 1
found_long_span = False
for tail_sec in range(range_start, range_end, gap):
if sum(
[sec in v.keys()
for sec in range(tail_sec + 1 - self.secs_per_example,
tail_sec + 1)
]) > 0:
if args.same_movie:
if positive_id not in self.spans:
self.spans[positive_id] = []
self.spans[positive_id].append((video_name, None, tail_sec))
else:
self.spans.append((video_name, None, tail_sec))
found_long_span = True
if not found_long_span and args.train_long_term:
self.spans.append((video_name, None, range_end - 1))
self.force_len = None
print(len(set([x[0] for x in self.spans])), 'videos in spans in total')
print(len(self.videos), 'video data loaded in total')
def __len__(self):
if self.args.is_end_task and not self.evaluate:
return len(set([x[0] for x in self.spans])) * int(self.args.num_train_epochs)
if self.force_len is not None:
return self.force_len
if self.args.same_movie:
return sum([len(x) for x in self.spans.values()])
return len(self.spans)
def __getitem__(self, item):
if self.args.same_movie:
if self.evaluate:
positive_id = list(self.spans.keys())[item % len(self.spans.keys())]
else:
positive_id = random.choice(list(self.spans.keys()))
selected = [random.choice(self.spans[positive_id]) for _ in range(2)]
else:
if self.evaluate:
selected = [self.spans[item % len(self.spans)]]
else:
selected = [random.choice(self.spans)]
ret = []
construct_func = self.construct_example
for video_name, center_start, tail_start in selected:
for _ in range(100):
one_ex = construct_func(
video_name,
center_start=center_start,
tail_start=tail_start
)
if one_ex is not None:
break
v = self.videos[video_name]
tail_start = random.choice(range(min(v.keys()), max(v.keys()) + 1))
ret.append(one_ex + [video_name])
return ret
def construct_example(self, video_name, center_start=None, tail_start=None):
def get_spatial_encoding(box, perturb=0.0):
box = [float(x) for x in box.split(',')]
if perturb > 0 and not self.evaluate:
p0 = (box[2] - box[0]) * perturb
p1 = (box[3] - box[1]) * perturb
box = [
box[0] + p0 * random.uniform(-1.0, 1.0),
box[1] + p1 * random.uniform(-1.0, 1.0),
box[2] + p0 * random.uniform(-1.0, 1.0),
box[3] + p1 * random.uniform(-1.0, 1.0),
]
box.append((box[2] - box[0]) * (box[3] - box[1]))
return np.array(box)
args = self.args
is_pretrain = (not args.action_recognition) and (not args.train_long_term)
is_mc = not args.action_recognition and not (is_pretrain and self.evaluate)
video = self.videos[video_name]
if is_mc:
video_features = np.load(
os.path.join(args.mc_train_feature_file, video_name + '.npz'),
allow_pickle=True,
)['a'].item()
else:
video_features = self.all_features[video_name] if (
self.all_features is not None) else None
ex_link_ids = []
ex_scene_ids = []
ex_boxes = []
ex_secs = []
ex_actions = []
ex_long_term = []
ex_features = []
ex_spatial = []
all_tube_exs = {}
for shift_idx, sec_shift in enumerate(range(self.secs_per_example)):
if center_start is not None:
if sec_shift % 2 == 0:
sec = center_start + (sec_shift + 1) // 2
auged_sec = center_start + (shift_idx + 1) // 2
else:
sec = center_start - (sec_shift + 1) // 2
auged_sec = center_start - (shift_idx + 1) // 2
if tail_start is not None:
sec = tail_start - sec_shift
auged_sec = tail_start - shift_idx
if sec in video:
for box, (scene_id, link_id, actions) in video[sec].items():
if len(ex_link_ids) < args.max_position_embeddings - 4:
ex_link_ids.append(link_id)
ex_secs.append(auged_sec)
ex_scene_ids.append(scene_id)
ex_boxes.append(box)
if args.action_recognition:
ex_actions.append(binarize(actions))
if args.train_long_term:
before_action = actions
ex_long_term.append(actions)
cur_feat = video_features[sec][box]
cur_mc_feat_ava = None
if is_mc:
cur_mc_feat_ava = cur_feat
ex_features.append(cur_feat)
ex_spatial.append(get_spatial_encoding(box, 0.2))
if len(ex_secs) == 0:
return None
original_ex_secs = ex_secs
assert (max(ex_secs) - min(ex_secs)) < args.secs_per_example
halfway = args.max_position_embeddings // 2
if tail_start is None:
tail_start = max(ex_secs)
if center_start is None:
center_start = (max(ex_secs) + min(ex_secs)) // 2
increasing_pos_ids = [x - min(ex_secs) for x in ex_secs]
decreasing_pos_ids = [max(ex_secs) - x for x in ex_secs]
center_pos_ids = [max(0, x - center_start + halfway) for x in ex_secs]
increasing_scene_ids = [x - min(ex_scene_ids) for x in ex_scene_ids]
decreasing_scene_ids = [max(ex_scene_ids) - x for x in ex_scene_ids]
dists = [abs(x - center_start) for x in ex_secs]
for dist, tmp_scene_id in zip(dists, ex_scene_ids):
if dist == min(dists):
center_scene_id = tmp_scene_id
center_scene_ids = [max(0, x - center_scene_id + halfway) for x in ex_scene_ids]
n_links = len(set(ex_link_ids))
rand_link_ids = dict(zip(
list(set(ex_link_ids)),
random.sample(range(n_links), n_links),
))
ex_link_ids = [rand_link_ids[x] + 2 for x in ex_link_ids]
if args.action_recognition:
ex_actions = [binarize([])] + ex_actions + [binarize([])]
else:
ex_actions = []
if args.train_long_term:
ex_long_term = [-1] + ex_long_term + [-1]
else:
ex_long_term = []
ex_link_ids = [0] + ex_link_ids + [1] # end doens't belong to a link
increasing_pos_ids = [0] + [x + 2 for x in increasing_pos_ids] + [1] # end can have a new pos
decreasing_pos_ids = [0] + [x + 2 for x in decreasing_pos_ids] + [1] # end can have a new pos
center_pos_ids = [0] + [x + 2 for x in center_pos_ids] + [1] # end can have a new pos
increasing_scene_ids = [0] + [x + 2 for x in increasing_scene_ids] + [1]
decreasing_scene_ids = [0] + [x + 2 for x in decreasing_scene_ids] + [1]
center_scene_ids = [0] + [x + 2 for x in center_scene_ids] + [1]
ex_features = [ZERO_FEAT2304] + ex_features + [ZERO_FEAT2304]
ex_spatial = [ex_spatial[0] * 0.0] + ex_spatial + [ex_spatial[0] * 0.0]
return [torch.tensor(ex_link_ids) + 2,
torch.tensor(increasing_pos_ids) + 2,
torch.tensor(decreasing_pos_ids) + 2,
torch.tensor(center_pos_ids) + 2,
torch.tensor(increasing_scene_ids) + 2,
torch.tensor(decreasing_scene_ids) + 2,
torch.tensor(center_scene_ids) + 2,
torch.tensor(ex_actions),
torch.tensor(ex_long_term),
torch.from_numpy(np.ascontiguousarray(ex_features)),
torch.tensor(ex_spatial),
ex_secs,
ex_boxes,
]
def set_seed(args):
seed = args.seed + args.local_rank + 1
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
# if args.save_total_limit <= 0:
# return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def get_mask_indices(x_batch, masked_indices, features=None):
use_pos = None
if features is not None:
use_pos = (features.sum(axis=2) != 0)
for i in range(x_batch.shape[0]):
if use_pos is not None:
cur_x = x_batch[i][use_pos[i]]
else:
cur_x = x_batch[i]
x_ids = set(cur_x.tolist()) - set([1, 2, 3]) # remove padding, start, and end
assert 0 not in x_ids
if len(x_ids) == 0:
if features is not None and features.shape[-1] != 1024:
logger.info('warning: no masked elements in example')
continue
group_mask = {}
while sum(group_mask.values()) < 1:
group_mask = {x_id: int(np.random.choice([0, 1], p=[0.85, 0.15])) for x_id in x_ids}
for x_id in x_ids:
if group_mask[x_id] == 1:
assert x_id > 3
masked_indices[i, x_batch[i] == x_id] = 1
assert masked_indices[i].sum() > 0
return masked_indices
def perform_masking(masked_indices, inputs_embed_batch, contents):
mask_dim = inputs_embed_batch.shape[2]
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(
torch.full(masked_indices.shape, 0.8)
).bool() & masked_indices
feat_mask = indices_replaced.view((
indices_replaced.shape[0],
indices_replaced.shape[1],
1)).expand(-1, -1, mask_dim)
inputs_embed_batch[feat_mask] = -10
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(masked_indices.shape, 0.5)).bool() & masked_indices & ~indices_replaced
not_replaced = inputs_embed_batch[(~indices_replaced & contents)].reshape((-1, mask_dim))
num_to_sample = int(indices_random.sum())
num_features = not_replaced.shape[0]
if num_to_sample > 0 and num_features > 0:
random_indices = np.random.choice(num_features, num_to_sample)
inputs_embed_batch[indices_random[:, :, None].repeat(1, 1, mask_dim)] = not_replaced[random_indices].reshape((-1,))
return inputs_embed_batch
def mask_tokens(link_batch: torch.Tensor,
inc_scene_batch: torch.Tensor,
action_batch: torch.Tensor,
soft_label_batch: torch.Tensor,
inputs_embed_batch: torch.Tensor,
center_pos_batch: torch.Tensor,
args,
is_eval=False,
dec_pos_batch=None) -> Tuple[torch.Tensor, torch.Tensor]:
################################################################################
masked_indices = torch.zeros(link_batch.shape, dtype=torch.int64)
scene_masked_indices = None
masked_indices = get_mask_indices(link_batch, masked_indices, inputs_embed_batch)
masked_indices = masked_indices.bool()
################################################################################
################################################################################
contents = (center_pos_batch != 1) & (center_pos_batch != 2) & (center_pos_batch != 3)
out_masked_indices = masked_indices.clone().detach()
if args.action_recognition:
action_batch[~out_masked_indices[:, :, None].expand(-1, -1, args.num_action_classes)] = -100 # We only compute loss on masked tokens
if args.use_soft_labels:
soft_label_batch[~out_masked_indices[:, :, None].expand(-1, -1, args.soft_label_dim)] = -100
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(link_batch.shape, 0.8)).bool() & masked_indices
if args.mask_sep:
if not args.mask_sep_no_mask:
start = 0
for cur_feat_dim in args.all_feat_dims:
if cur_feat_dim == args.action_feat_dim:
cur_masked_indices = masked_indices
else:
assert False
inputs_embed_batch[:, :, start:start + cur_feat_dim] = perform_masking(
cur_masked_indices,
inputs_embed_batch[:, :, start:start + cur_feat_dim], contents)
start += cur_feat_dim
inputs_embed_batch[:, :, :args.action_feat_dim] = perform_masking(
masked_indices, inputs_embed_batch[:, :, :args.action_feat_dim], contents)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return (action_batch, inputs_embed_batch, soft_label_batch, masked_indices)
def shared_collate(all_examples: List[torch.Tensor]):
if len(all_examples[0]) == 1:
all_examples = [x[0] for x in all_examples]
elif len(all_examples[0]) == 2:
all_examples = [x[0] for x in all_examples] + [x[1] for x in all_examples]
assert len(all_examples[0]) == 14
zipped = list(zip(*all_examples))
meta = [list(examples) for examples in zipped[9:]]
padding_value = 1
padding_values = [padding_value] * 7 + [-100] * 2
return [pad_sequence(list(examples), batch_first=True, padding_value=padding_values[i])
for i, examples in enumerate(zipped[:9])] + meta
def prepare_model_input(link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
action_batch,
feature_batch, spatial_batch, sec_batch,
args, is_eval=False):
inputs_embed_batch = pad_feature_batch(feature_batch, args.device)
soft_label_batch = proj(inputs_embed_batch) if args.use_soft_labels else None
spatial_batch = pad_feature_batch(spatial_batch, args.device)
if args.action_recognition:
outputs_embed_batch = inputs_embed_batch.clone().detach()
else:
outputs_embed_batch = None
if args.mask_sep:
start = 0
for cur_feat_dim in args.all_feat_dims:
if cur_feat_dim == 2304:
pass
else:
assert False
start += cur_feat_dim
(action_batch, inputs_embed_batch, soft_label_batch,
target_locations) = mask_tokens(
link_batch,
inc_scene_batch,
action_batch,
soft_label_batch,
inputs_embed_batch,
center_pos_batch,
args,
is_eval=is_eval,
dec_pos_batch=dec_pos_batch)
if action_batch is not None:
action_batch = action_batch.to(args.device)
target_locations = target_locations.to(args.device)
link_batch = link_batch.to(args.device)
inc_pos_batch = inc_pos_batch.to(args.device)
dec_pos_batch = dec_pos_batch.to(args.device)
center_pos_batch = center_pos_batch.to(args.device)
inc_scene_batch = inc_scene_batch.to(args.device)
dec_scene_batch = dec_scene_batch.to(args.device)
center_scene_batch = center_scene_batch.to(args.device)
return (action_batch, link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
inputs_embed_batch, outputs_embed_batch, spatial_batch,
soft_label_batch,
target_locations)
def freeze(mod):
count = 0
for p in mod.parameters():
p.requires_grad = False
count += 1
logger.info('freeze {} ({} params)'.format(mod, count))
def pad_feature_batch(feature_batch, device):
batch_size = len(feature_batch)
max_len = max([len(x) for x in feature_batch])
dim = feature_batch[0][0].shape[0]
batch = torch.zeros((batch_size, max_len, dim), device=device)
for i in range(batch_size):
batch[i, :len(feature_batch[i])] = feature_batch[i].to(device)
return batch
def train(args, train_dataset, model: PreTrainedModel) -> Tuple[int, float]:
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
def collate(all_examples: List[torch.Tensor]):
return shared_collate(all_examples)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=collate,
num_workers=args.num_workers,
pin_memory=True,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
if args.is_end_task:
t_total = len(train_dataloader) // args.gradient_accumulation_steps
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
tmp_model = model.module if hasattr(model, "module") else model
if args.action_recognition:
logger.warn('Initializing final W/b')
state_dict = torch.load(
args.short_term_model_weights,
map_location="cpu",
)
tmp_model.action_lm_head.decoder_feat.weight = nn.Parameter(state_dict['model_state']['head.projection.weight'])
tmp_model.action_lm_head.decoder_feat.bias = nn.Parameter(state_dict['model_state']['head.projection.bias'])
pretrained_state_dict = torch.load(args.force_load_checkpoint, map_location="cpu")
tmp_weight = pretrained_state_dict['action_lm_head.decoder.weight']
if tmp_model.action_lm_head.decoder.weight.shape == tmp_weight.shape:
logger.warn('init pretrained weights')
tmp_model.action_lm_head.decoder.weight = nn.Parameter(tmp_weight)
tmp_bias = pretrained_state_dict['action_lm_head.decoder.bias']
tmp_model.action_lm_head.bias = nn.Parameter(tmp_bias)
tmp_model.action_lm_head.decoder.bias = tmp_model.action_lm_head.bias
else:
logger.warn('Not init pretrained weights {} {} not match'.format(
tmp_model.action_lm_head.decoder.weight.shape,
tmp_weight.shape
))
if args.action_recognition:
freeze(tmp_model.roberta)
if hasattr(tmp_model, 'lm_head'):
freeze(tmp_model.lm_head.dense)
if hasattr(tmp_model, 'action_lm_head'):
freeze(tmp_model.action_lm_head.dense)
if hasattr(tmp_model, 'lm_head'):
freeze(tmp_model.lm_head.layer_norm)
if hasattr(tmp_model, 'action_lm_head'):
freeze(tmp_model.action_lm_head.layer_norm)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
rbt_no_d = []
final_no_d = []
rbt_d = []
final_d = []
for n, p in model.named_parameters():
if any(nd in n for nd in no_decay):
if 'roberta' in n:
rbt_no_d.append(p)
else:
final_no_d.append(p)
else:
if 'roberta' in n:
rbt_d.append(p)
else:
final_d.append(p)
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=round(t_total * 0.1), num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if (
args.model_name_or_path
and os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
logger.info("loading optimizer and scheduler from {}".format(args.model_name_or_path))
if (
args.force_load_checkpoint_opt
and os.path.isfile(os.path.join(args.force_load_checkpoint_opt, "optimizer.pt"))
and os.path.isfile(os.path.join(args.force_load_checkpoint_opt, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.force_load_checkpoint_opt, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.force_load_checkpoint_opt, "scheduler.pt")))
logger.info("loading optimizer and scheduler from {}".format(args.force_load_checkpoint_opt))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_remain_in_current_epoch = -1
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.force_load_checkpoint_opt:
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.force_load_checkpoint_opt.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
steps_remain_in_current_epoch = (len(train_dataloader) // args.gradient_accumulation_steps) - steps_trained_in_current_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
# logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
logger.info(" Will train only %d steps in the first epoch", steps_remain_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
lm_action_loss, same_movie_loss = 0.0, 0.0
model = model.to(args.device)
model.zero_grad()
train_iterator = trange(
epochs_trained, 1 if args.is_end_task else int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproducibility
logger.info(model)
is_first_epoch = True
for cur_epoch in train_iterator:
if steps_remain_in_current_epoch > -1:
tr_d = train_dataloader.dataset
if is_first_epoch:
original_dataset_len = len(tr_d)
tr_d.force_len = steps_remain_in_current_epoch * args.train_batch_size
is_first_epoch = False
else:
tr_d.force_len = original_dataset_len
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, (link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
action_batch, long_term_batch,
feature_batch, spatial_batch, sec_batch, box_batch,
video_name_batch) in enumerate(epoch_iterator):
(action_batch, link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
inputs_embed_batch, outputs_embed_batch, spatial_batch,
soft_label_batch,
target_locations) = prepare_model_input(
link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
action_batch,
feature_batch, spatial_batch, sec_batch, args)
model.train()
outputs = model(
link_ids=None if args.no_link_ids else link_batch,
inc_scene_ids=None if args.no_scene_ids else inc_scene_batch,
dec_scene_ids=None if args.no_scene_ids else dec_scene_batch,
center_scene_ids=None if args.no_scene_ids else center_scene_batch,
inc_position_ids=None if args.no_pos_ids else inc_pos_batch,
dec_position_ids=None if args.no_pos_ids else dec_pos_batch,
center_position_ids=None if args.no_pos_ids else center_pos_batch,
action_labels=action_batch,####
long_term_labels=long_term_batch,
inputs_embeds=inputs_embed_batch,
outputs_embeds=outputs_embed_batch,
spatial_codes=spatial_batch,
soft_labels=soft_label_batch,
target_locations=target_locations,
secs=sec_batch,
boxes=box_batch,
args=args)
losses = outputs[0] # model outputs are always tuple in transformers (see doc)
if step == 0:
logger.info(losses)
if args.n_gpu > 1:
loss = sum([loss.mean() for loss in losses.values()]) # mean() to average on multi-gpu parallel training
else:
loss = sum(losses.values())
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if 'lm_action' in losses:
lm_action_loss += losses['lm_action'].mean().item()
if 'same_movie' in losses:
same_movie_loss += losses['same_movie'].mean().item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if len(args.eval_epochs) == 0:
do_eval = (step == len(train_dataloader) - 1 and args.is_end_task) \
or (args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0)
else:
epoch_len = len(train_dataloader) // int(args.num_train_epochs)
if (step + 1) % epoch_len == 0:
do_eval = (step + 1) in [int(x) * epoch_len for x in args.eval_epochs.strip().split(',')]
else:
do_eval = False
if do_eval:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training and not args.is_end_task
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model)
logger.info(("lr", scheduler.get_lr()[0], global_step))
logger.info(
(
'training loss',
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
)
logger.info(
(
'same_movie_loss',
same_movie_loss / args.logging_steps,
)
)
logger.info(
(
'lm_action_loss',
lm_action_loss / args.logging_steps,
)
)
same_movie_loss = 0.0
lm_action_loss = 0.0
logging_loss = tr_loss
if args.save_steps == -1:
do_save = do_eval
else:
do_save = (args.local_rank in [-1, 0]) and (args.save_steps > 0) and (global_step % args.save_steps == 0)
if do_save:
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate_action_recognition(bert_all_preds, args):
logger.info('bert output to dict')
bert_preds = {}
for pred_batch, video_name_batch, sec_batch, box_batch, is_center in bert_all_preds:
pred_batch = torch.sigmoid(pred_batch)
for i in range(len(video_name_batch)):
video_idx = video_name_to_idx[video_name_batch[i]]
secs = sec_batch[i]
boxes = box_batch[i]
for j, (ex_sec, ex_box) in enumerate(zip(secs, boxes)):
if not is_center[i, j + 1]:
continue
if video_idx not in bert_preds:
bert_preds[video_idx] = {}
if isinstance(ex_sec, int):
sec_list = [ex_sec]
box_list = [ex_box]
else:
sec_list = ex_sec
box_list = ex_box
for sec, box in zip(sec_list, box_list):
if sec not in bert_preds[video_idx]:
bert_preds[video_idx][sec] = {}
if box in bert_preds[video_idx][sec]:
#### WTF it should be j + 1.
bert_preds[video_idx][sec][box].append(pred_batch[i, j + 1])
else:
bert_preds[video_idx][sec][box] = [pred_batch[i, j + 1]]
logger.info('set all_preds to bert')
used_count = 0
all_preds[:, :] = 0.0
for i in range(all_preds.shape[0]):
video_idx = int(all_metadata[i][0])
sec = int(all_metadata[i][1])
box = ','.join(['%.03f' % x for x in all_ori_boxes[i][1:]])
if video_idx in bert_preds \
and sec in bert_preds[video_idx] \
and box in bert_preds[video_idx][sec]:
pred_list = bert_preds[video_idx][sec][box]
all_preds[i, :] = sum(pred_list) / len(pred_list)
used_count += 1
logger.info('%d predictions used' % used_count)
logger.info('%d predictions in total' % all_preds.shape[0])
mean_ap = evaluate_ava(
all_preds,
all_ori_boxes,
all_metadata.tolist(),
excluded_keys,
class_whitelist,
categories,
groundtruth=groundtruth,
video_idx_to_name=video_idx_to_name,
)
return mean_ap * 100.0
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - x.max())
return e_x / e_x.sum()
def evaluate(args, model: PreTrainedModel, prefix="") -> Dict:
logger.info(model)
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = VideoDataset(args, evaluate=True)
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
def collate(all_examples: List[torch.Tensor]):
return shared_collate(all_examples)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
collate_fn=collate,
num_workers=args.num_workers_eval,
pin_memory=True,
)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
all_eval_loss = 0.0
long_term_top1 = 0.0
long_term_count = 0
nb_eval_steps = 0
eval_example_count = 0
model.eval()
all_preds = []
all_states = []
for (link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
action_batch, long_term_batch,
feature_batch, spatial_batch, sec_batch, box_batch,
video_name_batch) in tqdm(eval_dataloader, desc="Evaluating"):
(action_batch, link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
inputs_embed_batch, outputs_embed_batch, spatial_batch,
soft_label_batch,
target_locations) = prepare_model_input(
link_batch,
inc_pos_batch, dec_pos_batch, center_pos_batch,
inc_scene_batch, dec_scene_batch, center_scene_batch,
action_batch,
feature_batch, spatial_batch, sec_batch, args, is_eval=True)
with torch.no_grad():
outputs = model(
link_ids=None if args.no_link_ids else link_batch,
inc_scene_ids=None if args.no_scene_ids else inc_scene_batch,
dec_scene_ids=None if args.no_scene_ids else dec_scene_batch,
center_scene_ids=None if args.no_scene_ids else center_scene_batch,
inc_position_ids=None if args.no_pos_ids else inc_pos_batch,
dec_position_ids=None if args.no_pos_ids else dec_pos_batch,
center_position_ids=None if args.no_pos_ids else center_pos_batch,
action_labels=action_batch,
long_term_labels=long_term_batch,
inputs_embeds=inputs_embed_batch,
outputs_embeds=outputs_embed_batch,
spatial_codes=spatial_batch,
soft_labels=soft_label_batch,
target_locations=target_locations,
secs=sec_batch,
boxes=box_batch,
args=args)
losses = outputs[0]
if args.action_recognition:
all_preds.append((
outputs[1]['pred'].cpu(), video_name_batch, sec_batch, box_batch,
(action_batch[:, :, 0] != -100).cpu()
))
if args.train_long_term:
lt_pred = outputs[1]['long_term_logits'].cpu()
lt_labels = long_term_batch[:, 1]
if args.num_long_term_classes == -1:
lt_pred = lt_pred[:, 0]
all_preds.append((video_name_batch, lt_pred, lt_labels))
if args.num_long_term_classes > 0:
lt_pred = outputs[1]['long_term_logits'].argmax(dim=1).cpu()
lt_labels = long_term_batch[:, 1]
long_term_top1 += (lt_pred == lt_labels).sum()
long_term_count += lt_labels.shape[0]
if args.mask_sep:
eval_loss += losses['lm_action'].mean().item()
all_eval_loss += sum([loss.mean() for loss in losses.values()]).item()
else:
eval_loss += sum([loss.mean() for loss in losses.values()]).item()
eval_example_count += inc_pos_batch.shape[0]
nb_eval_steps += 1
mean_ap = 0.0
if args.action_recognition:
start_eval = time.time()
mean_ap = evaluate_action_recognition(all_preds, args)
logger.info('eval done in {} secs'.format(time.time() - start_eval))
clip_mse = []
split_result = {}
if args.train_long_term:
pred_agg = {}
video_label = {}
for video_name_batch, pred_batch, label_batch in all_preds:
for i in range(len(video_name_batch)):
v_name = video_name_batch[i]
if v_name not in pred_agg:
if args.num_long_term_classes > 0:
pred_agg[v_name] = softmax(pred_batch[i])
else:
pred_agg[v_name] = [pred_batch[i]]
video_label[v_name] = label_batch[i]
else:
if args.num_long_term_classes > 0:
pred_agg[v_name] += softmax(pred_batch[i])
else:
pred_agg[v_name].append(pred_batch[i])
assert video_label[v_name] == label_batch[i]
if args.num_long_term_classes == -1:
clip_mse.append(
(pred_batch[i] - label_batch[i]) ** 2.0
)
for split in (['val', 'test'] if args.three_split else ['val']):
agg_sm_correct, agg_count = 0.0, 0.0
mse = []
for v_name in pred_agg.keys():
if args.three_split and split == 'val':
if v_name not in eval_dataset.val_set:
continue
if args.three_split and split == 'test':
if v_name not in eval_dataset.test_set:
continue
if args.num_long_term_classes > 0:
if pred_agg[v_name].argmax() == video_label[v_name]:
agg_sm_correct += 1
else:
mse.append(
(np.mean(pred_agg[v_name]) - video_label[v_name]) ** 2.0
)
agg_count += 1
if args.num_long_term_classes > 0:
acc = 100.0 * agg_sm_correct / agg_count
split_result[split] = f'{acc} {agg_sm_correct} {agg_count}'
else:
split_result[split] = f'{np.mean(mse)} {len(mse)}'
eval_loss = eval_loss / nb_eval_steps
all_eval_loss = all_eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
total_perplexity = torch.exp(torch.tensor(all_eval_loss))
if long_term_count > 0:
long_term_top1 = float(long_term_top1) / float(long_term_count)
result = {"perplexity": perplexity,
"all_eval_loss": all_eval_loss,
"total_perplexity": total_perplexity,
"map": mean_ap,
"clip_mse": np.mean(clip_mse),
"long_term_top1": long_term_top1,
}
for split in split_result.keys():
result['agg_' + split] = split_result[split]
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} ({} examples) *****".format(prefix, eval_example_count))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--train_data_file", default=None, type=str, required=True, help="The input training data file (a text file)."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--model_type", type=str, required=True, help="The model architecture to be trained or fine-tuned.",
)
# Other parameters
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--line_by_line",
action="store_true",
help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
)
parser.add_argument(
"--should_continue", action="store_true", help="Whether to continue from latest checkpoint in output_dir"
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--mlm", action="store_true", help="Train with masked-language modeling loss instead of language modeling."
)
parser.add_argument(
"--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=4, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=10.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=4000, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=4000, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--max_iter", type=int, default=-1, help="")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
parser.add_argument("--secs_per_example", type=int, default=60, help="Number of secs per example.")
parser.add_argument("--get_mc_states_name", type=str, default="binary_task", help="")
parser.add_argument("--same_movie", action="store_true", help="")
parser.add_argument("--same_movie_temperature", type=float, default=0.2, help="")
parser.add_argument("--same_movie_weight", type=float, default=1.0, help="")
parser.add_argument("--train_long_term", action="store_true", help="")
parser.add_argument("--train_long_term_linear", action="store_true", help="")
parser.add_argument("--train_long_term_dropout", action="store_true", help="")
parser.add_argument("--long_term_task_name", type=str, default="relationship", help="")
parser.add_argument("--num_long_term_classes", type=int, default=-1, help="")
parser.add_argument("--eval_epochs", default="", type=str, help="")
parser.add_argument("--num_workers", type=int, default=16, help="Number of DataLoader workers.")
parser.add_argument("--num_workers_eval", type=int, default=2, help="Number of DataLoader workers.")
parser.add_argument("--force_load_checkpoint", type=str, default="", help="Force-load checkpoint path.")
parser.add_argument("--force_load_checkpoint_opt", type=str, default=None, help="Force-load checkpoint path.")
parser.add_argument("--init_final", action="store_true", help="")
parser.add_argument("--train_feature_file", default=None, type=str, required=True, help="")
parser.add_argument("--mc_train_feature_file", default=None, type=str, help="")
parser.add_argument("--eval_feature_file", default=None, type=str, required=True, help="")
parser.add_argument("--exp", default='', type=str, required=True, help="")
parser.add_argument("--num_action_classes", type=int, default=80, help="")
parser.add_argument("--max_position_embeddings", type=int, default=258, help="")
parser.add_argument("--action_recognition", action="store_true", help="")
parser.add_argument("--num_hidden_layers", type=int, default=3, help="")
parser.add_argument("--num_attention_heads", type=int, default=12, help="")
parser.add_argument("--action_feat_dim", type=int, default=2304, help="")
parser.add_argument("--feat_dim", type=int, default=2304, help="")
parser.add_argument("--action_loss_weight", default=1.0, type=float, help="")
parser.add_argument("--no_link_ids", action="store_true", help="")
parser.add_argument("--no_scene_ids", action="store_true", help="")
parser.add_argument("--no_pos_ids", action="store_true", help="")
parser.add_argument("--use_soft_labels", action="store_true", help="")
parser.add_argument("--mask_sep", action="store_true", help="")
parser.add_argument("--mask_sep_no_mask", action="store_true", help="")
parser.add_argument("--temperature", default=1.0, type=float, help="")
parser.add_argument("--eval_sample_x", default=10, type=int, help="")
parser.add_argument("--three_split", action="store_true", help="")
parser.add_argument("--short_term_model_weights", default='data/ava/SLOWFAST_32x2_R101_50_50.pkl', type=str, help="")
parser.add_argument("--debug", action="store_true", help="")
parser.add_argument("--use_good_quality", action="store_true", help="")
args = parser.parse_args()
args.is_end_task = args.train_long_term or args.action_recognition
args.all_feat_dims = [2304]
if args.model_type in ["bert", "roberta", "distilbert", "camembert"] and not args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if args.eval_data_file is None and args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if args.should_continue:
sorted_checkpoints = _sorted_checkpoints(args)
if len(sorted_checkpoints) == 0:
raise ValueError("Used --should_continue but no checkpoint was found in --output_dir.")
else:
args.model_name_or_path = sorted_checkpoints[-1]
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
config_class, model_class = MODEL_CLASSES[args.model_type]
if args.config_name:
config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
config = config_class()
if args.model_name_or_path:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
args=args,
)
else:
logger.info("Training new model from scratch")
model = model_class(config=config)
model.to(args.device)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
global proj_W
global proj_b
tmp_state_dict = torch.load(
args.short_term_model_weights,
map_location="cpu",
)
proj_W = torch.tensor(tmp_state_dict['model_state']['head.projection.weight'].numpy()).float().T # 2304, 80
proj_b = torch.tensor(tmp_state_dict['model_state']['head.projection.bias'].numpy()).float() # 80
args.soft_label_dim = 80
proj_W = proj_W.to(args.device)
proj_b = proj_b.to(args.device)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = VideoDataset(args, evaluate=False)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.is_end_task and args.local_rank in [-1, 0]:
evaluate(args, model)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"apex.amp.scale_loss",
"torch.utils.data.DataLoader",
"torch.cuda.device_count",
"numpy.ascontiguousarray",
"numpy.array",
"torch.utils.data.distributed.DistributedSampler",
"apex.amp.initialize",
"data.video_data_helper.binarize",
"torch.cuda.is_available",
"torch.distribut... | [((1929, 1956), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1946, 1956), False, 'import logging\n'), ((2048, 2065), 'numpy.zeros', 'np.zeros', (['(2304,)'], {}), '((2304,))\n', (2056, 2065), True, 'import numpy as np\n'), ((2319, 2333), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2330, 2333), False, 'import pickle\n'), ((2472, 2486), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2483, 2486), False, 'import pickle\n'), ((13593, 13610), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (13604, 13610), False, 'import random\n'), ((13615, 13635), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13629, 13635), True, 'import numpy as np\n'), ((13640, 13663), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (13657, 13663), False, 'import torch\n'), ((17957, 18005), 'torch.zeros', 'torch.zeros', (['link_batch.shape'], {'dtype': 'torch.int64'}), '(link_batch.shape, dtype=torch.int64)\n', (17968, 18005), False, 'import torch\n'), ((22914, 22968), 'torch.zeros', 'torch.zeros', (['(batch_size, max_len, dim)'], {'device': 'device'}), '((batch_size, max_len, dim), device=device)\n', (22925, 22968), False, 'import torch\n'), ((23510, 23668), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size', 'collate_fn': 'collate', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(train_dataset, sampler=train_sampler, batch_size=args.\n train_batch_size, collate_fn=collate, num_workers=args.num_workers,\n pin_memory=True)\n', (23520, 23668), False, 'from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\n'), ((26749, 26835), 'models.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'eps': 'args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.\n adam_epsilon)\n', (26754, 26835), False, 'from models import WEIGHTS_NAME, AdamW, PreTrainedModel, RobertaConfig, RobertaForMaskedLM, get_linear_schedule_with_warmup\n'), ((42739, 42770), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (42756, 42770), False, 'from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\n'), ((42793, 42953), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size', 'collate_fn': 'collate', 'num_workers': 'args.num_workers_eval', 'pin_memory': '(True)'}), '(eval_dataset, sampler=eval_sampler, batch_size=args.\n eval_batch_size, collate_fn=collate, num_workers=args.num_workers_eval,\n pin_memory=True)\n', (42803, 42953), False, 'from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\n'), ((43830, 43870), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (43834, 43870), False, 'from tqdm import tqdm, trange\n'), ((49849, 49906), 'os.path.join', 'os.path.join', (['eval_output_dir', 'prefix', '"""eval_results.txt"""'], {}), "(eval_output_dir, prefix, 'eval_results.txt')\n", (49861, 49906), False, 'import os\n'), ((50268, 50293), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (50291, 50293), False, 'import argparse\n'), ((61167, 61362), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if args.local_rank in [-1, 0] else logging.WARN)'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else\n logging.WARN)\n", (61186, 61362), False, 'import logging\n'), ((62917, 62978), 'torch.load', 'torch.load', (['args.short_term_model_weights'], {'map_location': '"""cpu"""'}), "(args.short_term_model_weights, map_location='cpu')\n", (62927, 62978), False, 'import torch\n'), ((2696, 2719), 'torch.matmul', 'torch.matmul', (['x', 'proj_W'], {}), '(x, proj_W)\n', (2708, 2719), False, 'import torch\n'), ((2918, 3024), 'data.video_data_helper.load_features', 'video_data_helper.load_features', (['(args.eval_feature_file if evaluate else args.train_feature_file)', 'args'], {}), '(args.eval_feature_file if evaluate else\n args.train_feature_file, args)\n', (2949, 3024), False, 'from data import video_data_helper\n'), ((13695, 13727), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (13721, 13727), False, 'import torch\n'), ((15247, 15272), 'shutil.rmtree', 'shutil.rmtree', (['checkpoint'], {}), '(checkpoint)\n', (15260, 15272), False, 'import shutil\n'), ((17242, 17287), 'numpy.random.choice', 'np.random.choice', (['num_features', 'num_to_sample'], {}), '(num_features, num_to_sample)\n', (17258, 17287), True, 'import numpy as np\n'), ((23297, 23325), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (23310, 23325), False, 'from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\n'), ((23356, 23389), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (23374, 23389), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((24295, 24356), 'torch.load', 'torch.load', (['args.short_term_model_weights'], {'map_location': '"""cpu"""'}), "(args.short_term_model_weights, map_location='cpu')\n", (24305, 24356), False, 'import torch\n'), ((24447, 24512), 'torch.nn.Parameter', 'nn.Parameter', (["state_dict['model_state']['head.projection.weight']"], {}), "(state_dict['model_state']['head.projection.weight'])\n", (24459, 24512), True, 'import torch.nn as nn\n'), ((24566, 24629), 'torch.nn.Parameter', 'nn.Parameter', (["state_dict['model_state']['head.projection.bias']"], {}), "(state_dict['model_state']['head.projection.bias'])\n", (24578, 24629), True, 'import torch.nn as nn\n'), ((24663, 24721), 'torch.load', 'torch.load', (['args.force_load_checkpoint'], {'map_location': '"""cpu"""'}), "(args.force_load_checkpoint, map_location='cpu')\n", (24673, 24721), False, 'import torch\n'), ((28423, 28486), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': 'args.fp16_opt_level'}), '(model, optimizer, opt_level=args.fp16_opt_level)\n', (28437, 28486), False, 'from apex import amp\n'), ((28595, 28623), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (28616, 28623), False, 'import torch\n'), ((28741, 28884), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], output_device=args.local_rank, find_unused_parameters=True)\n', (28782, 28884), False, 'import torch\n'), ((29790, 29829), 'os.path.exists', 'os.path.exists', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (29804, 29829), False, 'import os\n'), ((32769, 32854), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""', 'disable': '(args.local_rank not in [-1, 0])'}), "(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0]\n )\n", (32773, 32854), False, 'from tqdm import tqdm, trange\n'), ((39917, 39942), 'torch.sigmoid', 'torch.sigmoid', (['pred_batch'], {}), '(pred_batch)\n', (39930, 39942), False, 'import torch\n'), ((42449, 42492), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {'exist_ok': '(True)'}), '(eval_output_dir, exist_ok=True)\n', (42460, 42492), False, 'import os\n'), ((43114, 43142), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (43135, 43142), False, 'import torch\n'), ((46822, 46833), 'time.time', 'time.time', ([], {}), '()\n', (46831, 46833), False, 'import time\n'), ((49268, 49291), 'torch.tensor', 'torch.tensor', (['eval_loss'], {}), '(eval_loss)\n', (49280, 49291), False, 'import torch\n'), ((49326, 49353), 'torch.tensor', 'torch.tensor', (['all_eval_loss'], {}), '(all_eval_loss)\n', (49338, 49353), False, 'import torch\n'), ((49652, 49669), 'numpy.mean', 'np.mean', (['clip_mse'], {}), '(clip_mse)\n', (49659, 49669), True, 'import numpy as np\n'), ((60250, 60281), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (60264, 60281), False, 'import os\n'), ((60294, 60321), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (60304, 60321), False, 'import os\n'), ((60808, 60833), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (60831, 60833), False, 'import torch\n'), ((60939, 60977), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (60960, 60977), False, 'import torch\n'), ((60995, 61032), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (61007, 61032), False, 'import torch\n'), ((61041, 61093), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (61077, 61093), False, 'import torch\n'), ((61747, 61774), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (61772, 61774), False, 'import torch\n'), ((62670, 62697), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (62695, 62697), False, 'import torch\n'), ((3119, 3222), 'data.video_data_helper.load_video_data', 'video_data_helper.load_video_data', (['(args.eval_data_file if evaluate else args.train_data_file)', 'args'], {}), '(args.eval_data_file if evaluate else args\n .train_data_file, args)\n', (3152, 3222), False, 'from data import video_data_helper\n'), ((8205, 8218), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (8213, 8218), True, 'import numpy as np\n'), ((13276, 13300), 'torch.tensor', 'torch.tensor', (['ex_actions'], {}), '(ex_actions)\n', (13288, 13300), False, 'import torch\n'), ((13318, 13344), 'torch.tensor', 'torch.tensor', (['ex_long_term'], {}), '(ex_long_term)\n', (13330, 13344), False, 'import torch\n'), ((13431, 13455), 'torch.tensor', 'torch.tensor', (['ex_spatial'], {}), '(ex_spatial)\n', (13443, 13455), False, 'import torch\n'), ((24982, 25006), 'torch.nn.Parameter', 'nn.Parameter', (['tmp_weight'], {}), '(tmp_weight)\n', (24994, 25006), True, 'import torch.nn as nn\n'), ((25127, 25149), 'torch.nn.Parameter', 'nn.Parameter', (['tmp_bias'], {}), '(tmp_bias)\n', (25139, 25149), True, 'import torch.nn as nn\n'), ((27097, 27150), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""optimizer.pt"""'], {}), "(args.model_name_or_path, 'optimizer.pt')\n", (27109, 27150), False, 'import os\n'), ((27179, 27232), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""scheduler.pt"""'], {}), "(args.model_name_or_path, 'scheduler.pt')\n", (27191, 27232), False, 'import os\n'), ((27663, 27723), 'os.path.join', 'os.path.join', (['args.force_load_checkpoint_opt', '"""optimizer.pt"""'], {}), "(args.force_load_checkpoint_opt, 'optimizer.pt')\n", (27675, 27723), False, 'import os\n'), ((27752, 27812), 'os.path.join', 'os.path.join', (['args.force_load_checkpoint_opt', '"""scheduler.pt"""'], {}), "(args.force_load_checkpoint_opt, 'scheduler.pt')\n", (27764, 27812), False, 'import os\n'), ((44455, 44470), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (44468, 44470), False, 'import torch\n'), ((63413, 63440), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (63438, 63440), False, 'import torch\n'), ((63673, 63700), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (63698, 63700), False, 'import torch\n'), ((3355, 3407), 'data.video_data_helper.load_mc_video_data', 'video_data_helper.load_mc_video_data', (['args', 'evaluate'], {}), '(args, evaluate)\n', (3391, 3407), False, 'from data import video_data_helper\n'), ((6660, 6698), 'random.choice', 'random.choice', (['self.spans[positive_id]'], {}), '(self.spans[positive_id])\n', (6673, 6698), False, 'import random\n'), ((12907, 12932), 'torch.tensor', 'torch.tensor', (['ex_link_ids'], {}), '(ex_link_ids)\n', (12919, 12932), False, 'import torch\n'), ((12954, 12986), 'torch.tensor', 'torch.tensor', (['increasing_pos_ids'], {}), '(increasing_pos_ids)\n', (12966, 12986), False, 'import torch\n'), ((13008, 13040), 'torch.tensor', 'torch.tensor', (['decreasing_pos_ids'], {}), '(decreasing_pos_ids)\n', (13020, 13040), False, 'import torch\n'), ((13062, 13090), 'torch.tensor', 'torch.tensor', (['center_pos_ids'], {}), '(center_pos_ids)\n', (13074, 13090), False, 'import torch\n'), ((13112, 13146), 'torch.tensor', 'torch.tensor', (['increasing_scene_ids'], {}), '(increasing_scene_ids)\n', (13124, 13146), False, 'import torch\n'), ((13168, 13202), 'torch.tensor', 'torch.tensor', (['decreasing_scene_ids'], {}), '(decreasing_scene_ids)\n', (13180, 13202), False, 'import torch\n'), ((13224, 13254), 'torch.tensor', 'torch.tensor', (['center_scene_ids'], {}), '(center_scene_ids)\n', (13236, 13254), False, 'import torch\n'), ((13379, 13412), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ex_features'], {}), '(ex_features)\n', (13399, 13412), True, 'import numpy as np\n'), ((27335, 27388), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""optimizer.pt"""'], {}), "(args.model_name_or_path, 'optimizer.pt')\n", (27347, 27388), False, 'import os\n'), ((27436, 27489), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""scheduler.pt"""'], {}), "(args.model_name_or_path, 'scheduler.pt')\n", (27448, 27489), False, 'import os\n'), ((27915, 27975), 'os.path.join', 'os.path.join', (['args.force_load_checkpoint_opt', '"""optimizer.pt"""'], {}), "(args.force_load_checkpoint_opt, 'optimizer.pt')\n", (27927, 27975), False, 'import os\n'), ((28023, 28083), 'os.path.join', 'os.path.join', (['args.force_load_checkpoint_opt', '"""scheduler.pt"""'], {}), "(args.force_load_checkpoint_opt, 'scheduler.pt')\n", (28035, 28083), False, 'import os\n'), ((29357, 29391), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (29389, 29391), False, 'import torch\n'), ((3477, 3580), 'data.video_data_helper.load_video_data', 'video_data_helper.load_video_data', (['(args.eval_data_file if evaluate else args.train_data_file)', 'args'], {}), '(args.eval_data_file if evaluate else args\n .train_data_file, args)\n', (3510, 3580), False, 'from data import video_data_helper\n'), ((3712, 3764), 'data.video_data_helper.load_mc_video_data', 'video_data_helper.load_mc_video_data', (['args', 'evaluate'], {}), '(args, evaluate)\n', (3748, 3764), False, 'from data import video_data_helper\n'), ((6872, 6897), 'random.choice', 'random.choice', (['self.spans'], {}), '(self.spans)\n', (6885, 6897), False, 'import random\n'), ((11936, 11948), 'data.video_data_helper.binarize', 'binarize', (['[]'], {}), '([])\n', (11944, 11948), False, 'from data.video_data_helper import binarize\n'), ((14066, 14088), 'os.path.getmtime', 'os.path.getmtime', (['path'], {}), '(path)\n', (14082, 14088), False, 'import os\n'), ((16000, 16040), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': '[0.85, 0.15]'}), '([0, 1], p=[0.85, 0.15])\n', (16016, 16040), True, 'import numpy as np\n'), ((16538, 16575), 'torch.full', 'torch.full', (['masked_indices.shape', '(0.8)'], {}), '(masked_indices.shape, 0.8)\n', (16548, 16575), False, 'import torch\n'), ((18926, 18959), 'torch.full', 'torch.full', (['link_batch.shape', '(0.8)'], {}), '(link_batch.shape, 0.8)\n', (18936, 18959), False, 'import torch\n'), ((35383, 35414), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (35397, 35414), False, 'from apex import amp\n'), ((38598, 38636), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (38609, 38636), False, 'import os\n'), ((46947, 46958), 'time.time', 'time.time', ([], {}), '()\n', (46956, 46958), False, 'import time\n'), ((60728, 60753), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (60751, 60753), False, 'import torch\n'), ((11906, 11918), 'data.video_data_helper.binarize', 'binarize', (['[]'], {}), '([])\n', (11914, 11918), False, 'from data.video_data_helper import binarize\n'), ((16904, 16941), 'torch.full', 'torch.full', (['masked_indices.shape', '(0.5)'], {}), '(masked_indices.shape, 0.5)\n', (16914, 16941), False, 'import torch\n'), ((35926, 35954), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (35943, 35954), False, 'from apex import amp\n'), ((38919, 38964), 'os.path.join', 'os.path.join', (['output_dir', '"""training_args.bin"""'], {}), "(output_dir, 'training_args.bin')\n", (38931, 38964), False, 'import os\n'), ((39165, 39205), 'os.path.join', 'os.path.join', (['output_dir', '"""optimizer.pt"""'], {}), "(output_dir, 'optimizer.pt')\n", (39177, 39205), False, 'import os\n'), ((39262, 39302), 'os.path.join', 'os.path.join', (['output_dir', '"""scheduler.pt"""'], {}), "(output_dir, 'scheduler.pt')\n", (39274, 39302), False, 'import os\n'), ((49121, 49133), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (49128, 49133), True, 'import numpy as np\n'), ((7896, 7921), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (7910, 7921), False, 'import random\n'), ((7957, 7982), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (7971, 7982), False, 'import random\n'), ((8018, 8043), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (8032, 8043), False, 'import random\n'), ((8079, 8104), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (8093, 8104), False, 'import random\n'), ((8526, 8587), 'os.path.join', 'os.path.join', (['args.mc_train_feature_file', "(video_name + '.npz')"], {}), "(args.mc_train_feature_file, video_name + '.npz')\n", (8538, 8587), False, 'import os\n'), ((10063, 10080), 'data.video_data_helper.binarize', 'binarize', (['actions'], {}), '(actions)\n', (10071, 10080), False, 'from data.video_data_helper import binarize\n'), ((48773, 48798), 'numpy.mean', 'np.mean', (['pred_agg[v_name]'], {}), '(pred_agg[v_name])\n', (48780, 48798), True, 'import numpy as np\n')] |
import numpy as np
from PySide2.QtCore import Qt
from hexrd.ui.hexrd_config import HexrdConfig
from utils import select_files_when_asked
def test_load_data(qtbot, main_window, default_config_path, default_data_path):
# Prove this gets changed
assert 'GE' not in HexrdConfig().detectors
# Load config file
with select_files_when_asked(default_config_path):
main_window.ui.action_open_config_file.triggered.emit()
# Should have loaded the instrument config
detectors = HexrdConfig().detectors
assert len(detectors) == 1
assert 'GE' in detectors
def is_dummy_data():
for ims in HexrdConfig().imageseries_dict.values():
if len(ims) != 1 or not np.all(ims[0] == 1):
return False
return True
# There should only be dummy data currently
assert is_dummy_data()
load_panel = main_window.simple_image_series_dialog
# Press the "Select Image Files" button
with select_files_when_asked(default_data_path):
qtbot.mouseClick(load_panel.ui.image_files, Qt.LeftButton)
qtbot.mouseClick(load_panel.ui.read, Qt.LeftButton)
assert not is_dummy_data()
ims = HexrdConfig().imageseries_dict['GE']
assert len(ims) == 480
| [
"numpy.all",
"utils.select_files_when_asked",
"hexrd.ui.hexrd_config.HexrdConfig"
] | [((332, 376), 'utils.select_files_when_asked', 'select_files_when_asked', (['default_config_path'], {}), '(default_config_path)\n', (355, 376), False, 'from utils import select_files_when_asked\n'), ((506, 519), 'hexrd.ui.hexrd_config.HexrdConfig', 'HexrdConfig', ([], {}), '()\n', (517, 519), False, 'from hexrd.ui.hexrd_config import HexrdConfig\n'), ((969, 1011), 'utils.select_files_when_asked', 'select_files_when_asked', (['default_data_path'], {}), '(default_data_path)\n', (992, 1011), False, 'from utils import select_files_when_asked\n'), ((275, 288), 'hexrd.ui.hexrd_config.HexrdConfig', 'HexrdConfig', ([], {}), '()\n', (286, 288), False, 'from hexrd.ui.hexrd_config import HexrdConfig\n'), ((1179, 1192), 'hexrd.ui.hexrd_config.HexrdConfig', 'HexrdConfig', ([], {}), '()\n', (1190, 1192), False, 'from hexrd.ui.hexrd_config import HexrdConfig\n'), ((635, 648), 'hexrd.ui.hexrd_config.HexrdConfig', 'HexrdConfig', ([], {}), '()\n', (646, 648), False, 'from hexrd.ui.hexrd_config import HexrdConfig\n'), ((712, 731), 'numpy.all', 'np.all', (['(ims[0] == 1)'], {}), '(ims[0] == 1)\n', (718, 731), True, 'import numpy as np\n')] |
import numpy as np
import random
from numba import jit
def indicator(S, n):
x = np.zeros(n)
x[list(S)] = 1
return x
def sample_live_icm(g, num_graphs):
'''
Returns num_graphs live edge graphs sampled from the ICM on g. Assumes that
each edge has a propagation probability accessible via g[u][v]['p'].
'''
import networkx as nx
live_edge_graphs = []
for _ in range(num_graphs):
h = nx.Graph()
h.add_nodes_from(g.nodes())
for u,v in g.edges():
if random.random() < g[u][v]['p']:
h.add_edge(u,v)
live_edge_graphs.append(h)
return live_edge_graphs
def f_all_influmax_multlinear(x, Gs, Ps, ws):
'''
Objective function for the multilinear extension of a live-edge
influence maximization problem.
x: continuous decision variables
Gs/Ps/ws: representation of the influence maximization problem as an
expectation over a sampled set of probabilistic coverage functions (see below)
'''
n = len(Gs)
sample_weights = 1./n * np.ones(n)
return objective_live_edge(x, Gs, Ps, ws, sample_weights)
def make_multilinear_objective_samples(live_graphs, target_nodes, selectable_nodes, p_attend):
'''
Given a set of sampled live edge graphs, returns an function evaluating the
multilinear extension for the corresponding influence maximization problem.
live_graphs: list of networkx graphs containing sampled live edges
target_nodes: nodes that should be counted towards the objective
selectable_nodes: nodes that are eligible to be chosen as seeds
p_attend: probability that each node will be influenced if it is chosen as
a seed.
'''
Gs, Ps, ws = live_edge_to_adjlist(live_graphs, target_nodes, p_attend)
def f_all(x):
x_expand = np.zeros(len(live_graphs[0]))
x_expand[selectable_nodes] = x
return f_all_influmax_multlinear(x_expand, Gs, Ps, ws)
return f_all
def make_multilinear_gradient_samples(live_graphs, target_nodes, selectable_nodes, p_attend):
'''
Given a set of sampled live edge graphs, returns an stochastic gradient
oracle for the multilinear extension of the corresponding influence
maximization problem.
live_graphs: list of networkx graphs containing sampled live edges
target_nodes: nodes that should be counted towards the objective
selectable_nodes: nodes that are eligible to be chosen as seeds
p_attend: probability that each node will be influenced if it is chosen as
a seed.
'''
import random
Gs, Ps, ws = live_edge_to_adjlist(live_graphs, target_nodes, p_attend)
def gradient(x, batch_size):
x_expand = np.zeros(len(live_graphs[0]))
x_expand[selectable_nodes] = x
samples = random.sample(range(len(Gs)), batch_size)
grad = gradient_live_edge(x_expand, [Gs[i] for i in samples], [Ps[i] for i in samples], [ws[i] for i in samples], 1./batch_size * np.ones(len(Gs)))
return grad[selectable_nodes]
return gradient
def live_edge_to_adjlist(live_edge_graphs, target_nodes, p_attend):
'''
Takes a list of live edge graphs and converts them to the format used by the functions below.
For each live edge graph g, the corresponding entry of Gs is the adjacency list of a bipartite graph,
with each row representing a connected component of g and the entries of that row giving the nodes in
that connected component. Each row is terminated with -1s.
Each entry of Ps is an array of 1s of the same size as the corresponding entry of Gs.
Each entry of ws is an array, with each entry giving the size of the corresponding connected
component.
'''
import networkx as nx
Gs = []
Ps = []
ws = []
target_nodes = set(target_nodes)
for g in live_edge_graphs:
cc = list(nx.connected_components(g))
n = len(cc)
max_degree = max([len(c) for c in cc])
G_array = np.zeros((n, max_degree), dtype=np.int)
P = np.zeros((n, max_degree))
G_array[:] = -1
for i in range(n):
for j, v in enumerate(cc[i]):
G_array[i, j] = v
P[i, j] = p_attend[v]
Gs.append(G_array)
Ps.append(P)
w = np.zeros((n))
for i in range(n):
w[i] = len(target_nodes.intersection(cc[i]))
ws.append(w)
return Gs, Ps, ws
@jit
def gradient_live_edge(x, Gs, Ps, ws, weights):
'''
Gradient wrt x of the live edge influence maximization model.
x: current probability of seeding each node
Gs/Ps/ws represent the input graphs, as defined in live_edge_to_adjlist
'''
grad = np.zeros((len(x)))
for i in range(len(Gs)):
grad += weights[i]*gradient_coverage(x, Gs[i], Ps[i], ws[i])
grad /= len(x)
return grad
@jit
def objective_live_edge(x, Gs, Ps, ws, weights):
'''
Objective in the live edge influence maximization model, where nodes are
seeded with probability in the corresponding entry of x.
Gs/Ps/ws represent the input graphs, as defined in live_edge_to_adjlist
weights: probability of each graph occurring
'''
total = 0
for i in range(len(Gs)):
total += weights[i] * objective_coverage(x, Gs[i], Ps[i], ws[i])
return total
'''
The following functions compute gradients/objective values for the multilinear relaxation
of a (probabilistic) coverage function. The function is represented by the arrays G and P.
Each row of G is a set to be covered, with the entries of the row giving the items that will
cover it (terminated with -1s). The corresponding entry of P gives the probability that
the item will cover that set (independently of all others).
Corresponding to each row of G is an entry in the vector w, which gives the contribution
to the objective from covering that set.
'''
@jit
def gradient_coverage(x, G, P, w):
'''
Calculates gradient of the objective at fractional point x.
x: fractional point as a vector. Should be reshapable into a matrix giving
probability of choosing copy i of node u.
G: graph (adjacency list)
P: probability on each edge.
w: weights for nodes in R
'''
grad = np.zeros((x.shape[0]))
#process gradient entries one node at a time
for v in range(G.shape[0]):
p_all_fail = 1
for j in range(G.shape[1]):
if G[v, j] == -1:
break
p_all_fail *= 1 - x[G[v, j]]*P[v, j]
for j in range(G.shape[1]):
u = G[v, j]
if u == -1:
break
#0/0 should be 0 here
if p_all_fail == 0:
p_others_fail = 0
else:
p_others_fail = p_all_fail/(1 - x[u]*P[v, j])
grad[u] += w[v]*P[v, j]*p_others_fail
return grad
@jit
def marginal_coverage(x, G, P, w):
'''
Returns marginal probability that each RHS vertex is reached.
'''
probs = np.ones((G.shape[0]))
for v in range(G.shape[0]):
for j in range(G.shape[1]):
if G[v, j] == -1:
break
u = G[v, j]
probs[v] *= 1 - x[u]*P[v, j]
probs = 1 - probs
return probs
@jit
def objective_coverage(x, G, P, w):
'''
Weighted objective value: the expected weight of the RHS nodes that are reached.
'''
return np.dot(w, marginal_coverage(x, G, P, w))
| [
"numpy.ones",
"networkx.Graph",
"networkx.connected_components",
"numpy.zeros",
"random.random"
] | [((85, 96), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (93, 96), True, 'import numpy as np\n'), ((6334, 6354), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (6342, 6354), True, 'import numpy as np\n'), ((7086, 7105), 'numpy.ones', 'np.ones', (['G.shape[0]'], {}), '(G.shape[0])\n', (7093, 7105), True, 'import numpy as np\n'), ((431, 441), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (439, 441), True, 'import networkx as nx\n'), ((1065, 1075), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1072, 1075), True, 'import numpy as np\n'), ((4027, 4066), 'numpy.zeros', 'np.zeros', (['(n, max_degree)'], {'dtype': 'np.int'}), '((n, max_degree), dtype=np.int)\n', (4035, 4066), True, 'import numpy as np\n'), ((4079, 4104), 'numpy.zeros', 'np.zeros', (['(n, max_degree)'], {}), '((n, max_degree))\n', (4087, 4104), True, 'import numpy as np\n'), ((4330, 4341), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4338, 4341), True, 'import numpy as np\n'), ((3914, 3940), 'networkx.connected_components', 'nx.connected_components', (['g'], {}), '(g)\n', (3937, 3940), True, 'import networkx as nx\n'), ((523, 538), 'random.random', 'random.random', ([], {}), '()\n', (536, 538), False, 'import random\n')] |
#!/usr/local/bin/python3
import math
import numpy as np
# get prime factors unduplicatedly, optimized
def getPrimes( num ):
pset = set()
if 0 == num % 2:
pset.add(2)
for i in range(3, num+1, 2):
if 0 == num % i:
isprime = True
for j in range(3, int(math.sqrt(i))+1, 2):
if 0 == i % j and i != j:
isprime = False
break
if isprime:
pset.add(i)
if len(pset) == 0:
pset.add(num)
return pset
# get prime factor lists, optimized: sorted by set length
def getPrimesLists( start, end ):
plist = list()
for i in range(start, end+1):
plist.append(getPrimes(i))
plist.sort(key=lambda ps:len(ps))
return plist
# find frequent itemsets, to be optimized: implemented in multi-round map-reduce
def findFrequentItemsets( buckets, candset, cursize, thrd ):
# print(len(buckets), len(candset), cursize, thrd)
filist = list()
newcandset = list()
# count frequent item sets in current loop
for itemset in buckets:
if len(itemset) == cursize:
maybe = False
if len(candset) == 0:
maybe = True
else:
for cand in candset:
if set(cand).issubset(set(itemset)):
maybe = True
if maybe:
count = 0
for bucket in buckets:
if set(itemset).issubset(set(bucket)):
count += 1
if count >= thrd:
existed = False
for check in filist:
if itemset == check:
existed = True
break
if not existed:
filist.append(itemset)
break
# construct candidate item sets for next loop
# print(filist)
for i in range(len(filist)-1):
for j in range(i+1, len(filist)):
cand = list(set(filist[i]).union(set(filist[j])))
if len(cand) == cursize+1:
existed = False
for check in newcandset:
if cand == check:
existed = True
break
if not existed:
newcandset.append(cand)
if len(newcandset) == 0:
return filist
# next loop
filist.extend(findFrequentItemsets( buckets, newcandset, cursize+1, thrd ))
# return current result
return filist
# sort frequent itemsets list & output
def sortFISandOutput( filist, outputfile ):
outlist = list()
dtype = list()
order = list()
for i in filist:
outlist.append(tuple(sorted(list(i))))
print(outlist)
maxfield = len(outlist[len(outlist)-1])
for i in range(1, maxfield+1):
dtype.append((str(i), int))
order.append(str(i))
# print(dtype, order)
outlist = np.array(outlist, dtype = dtype)
outlist.sort(order = order)
with open(outputfile, 'w') as f:
for out in outlist:
# print(out)
for i in out:
f.write("%2d\t" % i)
f.write("\n")
return 0
if __name__ == "__main__":
start = 2
end = 10000
dimstart = 3
threshold = 50
outputfile = './B.txt'
buckets = getPrimesLists(start, end)
sortFISandOutput( findFrequentItemsets(buckets, [], dimstart, threshold), outputfile)
# print( findFrequentItemsets(buckets, set([]), dimstart, threshold))
| [
"numpy.array",
"math.sqrt"
] | [((3074, 3104), 'numpy.array', 'np.array', (['outlist'], {'dtype': 'dtype'}), '(outlist, dtype=dtype)\n', (3082, 3104), True, 'import numpy as np\n'), ((303, 315), 'math.sqrt', 'math.sqrt', (['i'], {}), '(i)\n', (312, 315), False, 'import math\n')] |
import numpy as np
import h5py
def generator_index(N_max, N_size, F_max, F_size=3, seed=None):
''' Generate index for N and F
Args:
N_max: video clip total size
N_size: select size
F_max: frame total size
F_size: frame size = 3
seed: None, or 0..10000
'''
if seed is not None:
rng_N_index = np.random.RandomState(seed)
rng_F_index = np.random.RandomState(seed+1)
while True:
N_st = rng_N_index.randint(0, N_max-N_size+1)
F_st = rng_F_index.randint(0, F_max-F_size+1)
yield list(range(N_st, N_st+N_size)), list(range(F_st, F_st+F_size))
else:
N_st = 0
F_st = 0
while True:
yield list(range(N_st, N_st+N_size)), list(range(F_st, F_st+F_size))
F_st = (F_st+1) % (F_max-F_size+1) # increase frame by 1
if F_st==0: # increase video number by N_size
N_st = (N_st+N_size) % (N_max-N_size+1)
def generator_dataset_5d_array(h5_file, key_list, batch_size=8, random_seed=None):
''' Generate 3-frame images [batch_size,3,H,W,C]
Args:
h5_file: h5 file
key_list: key selected from h5py
batch_size: 8
random_seed: None, or 0..10000
'''
with h5py.File(h5_file, 'r') as f_h5:
N = f_h5['/N'][...].item()
for idx_list, frame_list in generator_index(N, batch_size, F_max=7, F_size=3, seed=random_seed):
# print(idx_list, frame_list)
out_list = list()
for k in key_list:
out_list.append(f_h5[k][idx_list,:,:,:,:][:,frame_list,:,:,:]) # h5py don't allow two index list
yield out_list
if __name__ == "__main__":
for out_list in generator_dataset_5d_array('../dataset_h5/train_data.h5', ['/im'], batch_size=2, random_seed=100):
print(out_list[0].shape) | [
"numpy.random.RandomState",
"h5py.File"
] | [((377, 404), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (398, 404), True, 'import numpy as np\n'), ((428, 459), 'numpy.random.RandomState', 'np.random.RandomState', (['(seed + 1)'], {}), '(seed + 1)\n', (449, 459), True, 'import numpy as np\n'), ((1346, 1369), 'h5py.File', 'h5py.File', (['h5_file', '"""r"""'], {}), "(h5_file, 'r')\n", (1355, 1369), False, 'import h5py\n')] |
#!/usr/bin/env python3
######################################################
##
## Testing OBSTACLE_DISTANCE messages with ArduPilot and Mission Planner
##
######################################################
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
# Import the libraries
import sys
import numpy as np
import time
import argparse
import threading
from time import sleep
from apscheduler.schedulers.background import BackgroundScheduler
from dronekit import connect
######################################################
## Reconfigurable parameters ##
######################################################
# Default configurations for connection to the FCU
connection_string_default = '/dev/ttyUSB0'
connection_baudrate_default = 921600
# Enable/disable each message/function individually
enable_msg_obstacle_distance = True
enable_msg_distance_sensor = False
obstacle_distance_msg_hz = 15.0
# lock for thread synchronization
lock = threading.Lock()
# FCU connection variables
vehicle = None
is_vehicle_connected = False
######################################################
## Parsing user' inputs ##
######################################################
parser = argparse.ArgumentParser(description='Reboots vehicle')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, a default string will be used.")
parser.add_argument('--baudrate', type=float,
help="Vehicle connection baudrate. If not specified, a default value will be used.")
args = parser.parse_args()
connection_string = args.connect
connection_baudrate = args.baudrate
# Using default values if no specified inputs
if not connection_string:
connection_string = connection_string_default
print("INFO: Using default connection_string", connection_string)
else:
print("INFO: Using connection_string", connection_string)
if not connection_baudrate:
connection_baudrate = connection_baudrate_default
print("INFO: Using default connection_baudrate", connection_baudrate)
else:
print("INFO: Using connection_baudrate", connection_baudrate)
######################################################
## Functions - MAVLink ##
######################################################
# https://mavlink.io/en/messages/common.html#OBSTACLE_DISTANCE
def send_obstacle_distance_message():
#
# Set the parameters for obstacle distance here
#
# [0] [35] [71] <- Output: distances[72]
# | | | <- step = width / 72
# --------------- <- horizontal line
# \ | /
# \ | /
# \ | /
# ^ \ | / ^
# | \ | / |
#start \ | / end
# Camera <- Input: depth image, obtained from depth camera (implemented in d4xx_to_mavlink.py)
#
angle_start = -39.5 # -FOV/2
angle_end = 39.5 # 39.5 - real camera (2 arcs), <= 69.0: 2 arcs, > 70.0: 3 arcs
FOV = angle_end - angle_start
angle_offset = angle_start
distances_array_length = 72
increment_f = FOV / distances_array_length
min_dist_cm = 10
max_dist_cm = 800
cur_dist_cm = 200
# Setup the distances array with the same value (cur_dist_cm)
distances = np.ones((distances_array_length,), dtype=np.uint16) * cur_dist_cm
current_time_us = int(round(time.time() * 1000000))
msg = vehicle.message_factory.obstacle_distance_encode(
current_time_us, # us Timestamp (UNIX time or time since system boot)
0, # sensor_type, defined here: https://mavlink.io/en/messages/common.html#MAV_DISTANCE_SENSOR
distances, # distances, uint16_t[72], cm
0, # increment, uint8_t, deg
min_dist_cm, # min_distance, uint16_t, cm
max_dist_cm, # max_distance, uint16_t, cm
increment_f, # increment_f, float, deg
angle_offset, # angle_offset, float, deg
12 # MAV_FRAME, vehicle-front aligned: https://mavlink.io/en/messages/common.html#MAV_FRAME_BODY_FRD
)
vehicle.send_mavlink(msg)
vehicle.flush()
# https://mavlink.io/en/messages/common.html#DISTANCE_SENSOR
def send_distance_sensor_message():
# Use this to rotate all processed data
camera_facing_angle_degree = 0
orientation = int(camera_facing_angle_degree / 45)
min_dist_cm = 10
max_dist_cm = 800
curr_dist_cm = 100
current_time_ms = int(round(time.time() * 1000))
msg = vehicle.message_factory.distance_sensor_encode(
current_time_ms,# ms Timestamp (UNIX time or time since system boot) (ignored)
min_dist_cm, # min_distance, uint16_t, cm
max_dist_cm, # min_distance, uint16_t, cm
curr_dist_cm, # current_distance, uint16_t, cm
0, # type : 0 (ignored)
0, # id : 0 (ignored)
orientation, # orientation
0 # covariance : 0 (ignored)
)
vehicle.send_mavlink(msg)
vehicle.flush()
def send_msg_to_gcs(text_to_be_sent):
# MAV_SEVERITY: 0=EMERGENCY 1=ALERT 2=CRITICAL 3=ERROR, 4=WARNING, 5=NOTICE, 6=INFO, 7=DEBUG, 8=ENUM_END
# Defined here: https://mavlink.io/en/messages/common.html#MAV_SEVERITY
# MAV_SEVERITY = 3 will let the message be displayed on Mission Planner HUD, but 6 is ok for QGroundControl
if is_vehicle_connected == True:
text_msg = 'OA: ' + text_to_be_sent
status_msg = vehicle.message_factory.statustext_encode(
6, # MAV_SEVERITY
text_msg.encode() # max size is char[50]
)
vehicle.send_mavlink(status_msg)
vehicle.flush()
print("INFO: " + text_to_be_sent)
else:
print("INFO: Vehicle not connected. Cannot send text message to Ground Control Station (GCS)")
# Request a timesync update from the flight controller, for future work.
# TODO: Inspect the usage of timesync_update
def update_timesync(ts=0, tc=0):
if ts == 0:
ts = int(round(time.time() * 1000))
msg = vehicle.message_factory.timesync_encode(
tc, # tc1
ts # ts1
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Establish connection to the FCU
def vehicle_connect():
global vehicle, is_vehicle_connected
if vehicle == None:
try:
vehicle = connect(connection_string, wait_ready = True, baud = connection_baudrate, source_system = 1)
except Exception as e:
print(e)
sleep(1)
except:
print('Connection error! Retrying...')
sleep(1)
if vehicle == None:
is_vehicle_connected = False
return False
else:
is_vehicle_connected = True
return True
######################################################
## Main code starts here ##
######################################################
print("INFO: Connecting to vehicle.")
while (not vehicle_connect()):
pass
print("INFO: Vehicle connected.")
# Send MAVlink messages in the background at pre-determined frequencies
sched = BackgroundScheduler()
if enable_msg_obstacle_distance:
sched.add_job(send_obstacle_distance_message, 'interval', seconds = 1/obstacle_distance_msg_hz)
send_msg_to_gcs('Sending obstacle distance messages to FCU')
elif enable_msg_distance_sensor:
sched.add_job(send_distance_sensor_message, 'interval', seconds = 1/obstacle_distance_msg_hz)
send_msg_to_gcs('Sending distance sensor messages to FCU')
else:
send_msg_to_gcs('Nothing to do. Check params to enable something')
vehicle.close()
print("INFO: Realsense pipe and vehicle object closed.")
sys.exit()
sched.start()
try:
while True:
pass
except KeyboardInterrupt:
send_msg_to_gcs('Closing the script...')
except Exception as e:
print(e)
pass
except:
send_msg_to_gcs('ERROR: Depth camera disconnected')
finally:
vehicle.close()
sys.exit()
| [
"numpy.ones",
"argparse.ArgumentParser",
"threading.Lock",
"time.sleep",
"sys.exit",
"time.time",
"dronekit.connect",
"apscheduler.schedulers.background.BackgroundScheduler"
] | [((998, 1014), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1012, 1014), False, 'import threading\n'), ((1263, 1317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Reboots vehicle"""'}), "(description='Reboots vehicle')\n", (1286, 1317), False, 'import argparse\n'), ((7325, 7346), 'apscheduler.schedulers.background.BackgroundScheduler', 'BackgroundScheduler', ([], {}), '()\n', (7344, 7346), False, 'from apscheduler.schedulers.background import BackgroundScheduler\n'), ((8184, 8194), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8192, 8194), False, 'import sys\n'), ((3366, 3417), 'numpy.ones', 'np.ones', (['(distances_array_length,)'], {'dtype': 'np.uint16'}), '((distances_array_length,), dtype=np.uint16)\n', (3373, 3417), True, 'import numpy as np\n'), ((7902, 7912), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7910, 7912), False, 'import sys\n'), ((6562, 6652), 'dronekit.connect', 'connect', (['connection_string'], {'wait_ready': '(True)', 'baud': 'connection_baudrate', 'source_system': '(1)'}), '(connection_string, wait_ready=True, baud=connection_baudrate,\n source_system=1)\n', (6569, 6652), False, 'from dronekit import connect\n'), ((3465, 3476), 'time.time', 'time.time', ([], {}), '()\n', (3474, 3476), False, 'import time\n'), ((4649, 4660), 'time.time', 'time.time', ([], {}), '()\n', (4658, 4660), False, 'import time\n'), ((6719, 6727), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (6724, 6727), False, 'from time import sleep\n'), ((6807, 6815), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (6812, 6815), False, 'from time import sleep\n'), ((6231, 6242), 'time.time', 'time.time', ([], {}), '()\n', (6240, 6242), False, 'import time\n')] |
import argparse
import asyncio
import csv
import logging
import os
import random
import time
import statistics
import numpy as np
from nepytune.benchmarks.query_runner import get_query_runner
from nepytune.benchmarks.connection_pool import NeptuneConnectionPool
QUERY_NAMES = [
'get_sibling_attrs', 'undecided_user_check', 'undecided_user_audience',
'brand_interaction_audience', 'get_all_transient_ids_in_household',
'early_website_adopters'
]
parser = argparse.ArgumentParser(description="Run query benchmarks")
parser.add_argument("--users", type=int, default=10)
parser.add_argument("--samples", type=int, default=1000)
parser.add_argument("--queries", default=['all'], type=str,
nargs='+', choices=QUERY_NAMES + ['all'])
parser.add_argument("--verbose", action='store_true')
parser.add_argument("--csv", action="store_true")
parser.add_argument("--output", type=str, default="results")
args = parser.parse_args()
if args.queries == ['all']:
args.queries = QUERY_NAMES
if (args.verbose):
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
sem = asyncio.Semaphore(args.users)
def custom_exception_handler(loop, context):
"""Stop event loop if exception occurs."""
loop.default_exception_handler(context)
exception = context.get('exception')
if isinstance(exception, Exception):
print(context)
loop.stop()
async def run_query(query_runner, sample, semaphore, pool):
"""Run query with limit on concurrent connections."""
async with semaphore:
return await query_runner.run(sample, pool)
async def run(query, samples, pool):
"""Run query benchmark tasks."""
query_runner = get_query_runner(query, samples)
logger.info("Initializing query data.")
await asyncio.gather(query_runner.initialize())
queries = []
logger.info("Running benchmark.")
for i in range(samples):
queries.append(asyncio.create_task(run_query(query_runner, i, sem, pool)))
results = await asyncio.gather(*queries)
logger.info(f"Successful queries: {query_runner.succeded}")
logger.info(f"Failed queries: {query_runner.failed}")
benchmark_results = [result for result in results if result]
return benchmark_results, query_runner.succeded, query_runner.failed
def stats(results):
"""Print statistics for benchmark results."""
print(f"Samples: {args.samples}")
print(f"Mean: {statistics.mean(results)}s")
print(f"Median: {statistics.median(results)}s")
a = np.array(results)
for percentile in [50, 90, 99, 99.9, 99.99]:
result = np.percentile(a, percentile)
print(f"{percentile} percentile: {result}s")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.set_exception_handler(custom_exception_handler)
pool = NeptuneConnectionPool(args.users)
try:
loop.run_until_complete(pool.create())
for query in args.queries:
logger.info(f"Benchmarking query: {query}")
logger.info(f"Concurrent users: {args.users}")
results, succeded, failed = loop.run_until_complete(run(query, args.samples, pool))
stats([measure[2] for measure in results])
if args.csv:
dst = f"{args.output}/{query}-{args.samples}-{args.users}.csv"
with open(dst, "w") as f:
writer = csv.writer(f)
for measure in results:
writer.writerow(measure)
query_stats = f"{args.output}/{query}-{args.samples}-{args.users}-stats.csv"
with open(query_stats, "w") as f:
writer = csv.writer(f)
writer.writerow([succeded, failed])
finally:
loop.run_until_complete(pool.destroy())
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
| [
"logging.basicConfig",
"logging.getLogger",
"nepytune.benchmarks.connection_pool.NeptuneConnectionPool",
"statistics.mean",
"argparse.ArgumentParser",
"csv.writer",
"nepytune.benchmarks.query_runner.get_query_runner",
"statistics.median",
"numpy.array",
"asyncio.Semaphore",
"asyncio.gather",
"... | [((470, 529), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run query benchmarks"""'}), "(description='Run query benchmarks')\n", (493, 529), False, 'import argparse\n'), ((1076, 1176), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=level, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1095, 1176), False, 'import logging\n'), ((1181, 1208), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1198, 1208), False, 'import logging\n'), ((1216, 1245), 'asyncio.Semaphore', 'asyncio.Semaphore', (['args.users'], {}), '(args.users)\n', (1233, 1245), False, 'import asyncio\n'), ((1803, 1835), 'nepytune.benchmarks.query_runner.get_query_runner', 'get_query_runner', (['query', 'samples'], {}), '(query, samples)\n', (1819, 1835), False, 'from nepytune.benchmarks.query_runner import get_query_runner\n'), ((2626, 2643), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (2634, 2643), True, 'import numpy as np\n'), ((2833, 2857), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2855, 2857), False, 'import asyncio\n'), ((2927, 2960), 'nepytune.benchmarks.connection_pool.NeptuneConnectionPool', 'NeptuneConnectionPool', (['args.users'], {}), '(args.users)\n', (2948, 2960), False, 'from nepytune.benchmarks.connection_pool import NeptuneConnectionPool\n'), ((2121, 2145), 'asyncio.gather', 'asyncio.gather', (['*queries'], {}), '(*queries)\n', (2135, 2145), False, 'import asyncio\n'), ((2710, 2738), 'numpy.percentile', 'np.percentile', (['a', 'percentile'], {}), '(a, percentile)\n', (2723, 2738), True, 'import numpy as np\n'), ((2537, 2561), 'statistics.mean', 'statistics.mean', (['results'], {}), '(results)\n', (2552, 2561), False, 'import statistics\n'), ((2587, 2613), 'statistics.median', 'statistics.median', (['results'], {}), '(results)\n', (2604, 2613), False, 'import statistics\n'), ((3493, 3506), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3503, 3506), False, 'import csv\n'), ((3772, 3785), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3782, 3785), False, 'import csv\n')] |
import os
from collections import OrderedDict
import numpy as np
np.set_printoptions(suppress=True)
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from time import time
from copy import copy
class designer():
def __init__(self,ff,weight,method='D'):
'''
input:
------
ff: 2-D array. Rows represent points in the pool; columns represent parameters
involved in the direvative.
weight: 1-D array. Its length equals to the total number of points in the pool,
or the numnber of rows of 'ff'.
method: The criterion used for the optimization, default is D-optimal method.
'''
self.ff = ff
self.m = ff.shape[1] # number of parameters
self.weight = weight
self.N_candidates = np.sum(weight!=0)
self.method = method
self.d = 0 # sensitivity function
self.d_max = 0 # initialize the maximum of sensitivity.
self.id_minimax = None
self.M = 0 # information matrix
self.M_inv = self.M # information matrix inverse
self.psi_iter = [] # all the optimal criteria over the iterative procedure
self.phi_iter = [] # all the sensitivity function ove the iterative procedure
self.weight_iter = []
def cal_criterion(self,local=False):
self.M = 0
for i,f in enumerate(self.ff):
self.M += self.weight[i] * np.outer(f,f)
self.M_inv = np.linalg.inv(self.M)
if self.method == 'D':
self.d = np.array([f @ self.M_inv @ f for f in self.ff])
if local==False:
self.id_minimax = np.argmax(self.d)
self.d_max = self.d[self.id_minimax]
else:
self.id_minimax = np.argmin(np.ma.array(self.d,mask=(self.weight==0)))
def collect(self):
self.psi_iter.append(np.linalg.det(self.M_inv))
self.phi_iter.append(self.d_max)
self.weight_iter.append(self.weight)
def update_design(self, alpha, action='add'):
if action == 'add':
alpha_s = alpha
elif action == 'remove':
p_s = self.weight[self.id_minimax]
alpha_s = -min(alpha, p_s/(1-p_s))
else:
print("Design not updated")
return 1
self.weight = self.weight * (1-alpha_s) # reduce current design by alpha
self.weight[self.id_minimax] += alpha_s # add the new point weighted by alpha
self.weight = self.weight / sum(self.weight) # renormalize weight
return 0
def optimize(self,verbose=False,delta=1e-5,max_steps=1e6,remove=False):
if delta == None:
threshold = 0 # no limit on "d_max"
else:
threshold = self.m / (1-delta)
# the stop condition: either maximum steps or threshold met.
stop = lambda s: s >= max_steps or self.d_max <= threshold
step = 0
self.cal_criterion(local=False)
self.collect()
while not stop(step):
step += 1
alpha = 1 / (1+step+self.N_candidates) # step length
self.cal_criterion(local=False)
if self.update_design(alpha,action='add'):
break
if remove == True:
self.cal_criterion(local=True)
if self.update_design(alpha,action='remove'):
break
self.collect()
if verbose:
print('Iteration steps: {}'.format(step))
print('criterion: {:.3f}'.format(self.m/self.d_max))
def timer():
pass
| [
"numpy.ma.array",
"numpy.argmax",
"numpy.linalg.det",
"numpy.sum",
"numpy.array",
"numpy.linalg.inv",
"numpy.outer",
"numpy.set_printoptions"
] | [((65, 99), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (84, 99), True, 'import numpy as np\n'), ((825, 844), 'numpy.sum', 'np.sum', (['(weight != 0)'], {}), '(weight != 0)\n', (831, 844), True, 'import numpy as np\n'), ((1481, 1502), 'numpy.linalg.inv', 'np.linalg.inv', (['self.M'], {}), '(self.M)\n', (1494, 1502), True, 'import numpy as np\n'), ((1556, 1605), 'numpy.array', 'np.array', (['[(f @ self.M_inv @ f) for f in self.ff]'], {}), '([(f @ self.M_inv @ f) for f in self.ff])\n', (1564, 1605), True, 'import numpy as np\n'), ((1660, 1677), 'numpy.argmax', 'np.argmax', (['self.d'], {}), '(self.d)\n', (1669, 1677), True, 'import numpy as np\n'), ((1879, 1904), 'numpy.linalg.det', 'np.linalg.det', (['self.M_inv'], {}), '(self.M_inv)\n', (1892, 1904), True, 'import numpy as np\n'), ((1446, 1460), 'numpy.outer', 'np.outer', (['f', 'f'], {}), '(f, f)\n', (1454, 1460), True, 'import numpy as np\n'), ((1782, 1824), 'numpy.ma.array', 'np.ma.array', (['self.d'], {'mask': '(self.weight == 0)'}), '(self.d, mask=self.weight == 0)\n', (1793, 1824), True, 'import numpy as np\n')] |
import torch
import os
import argparse
import numpy as np
import hparams as hp
from data.utils import parse_path_file
from model.generator.melgan import MelGANGenerator
from .synthesize import Synthesizer
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_data(audio_index_path, mel_index_path, index_list):
audio_index = parse_path_file(audio_index_path)
mel_index = parse_path_file(mel_index_path)
audio_list = []
mel_list = []
for index in index_list:
audio_list.append(np.load(audio_index[index]))
mel_list.append(torch.from_numpy(np.load(mel_index[index])))
return audio_list, mel_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str)
parser.add_argument('--audio_index_path', type=str, default=os.path.join("dataset", "audio", "eval"))
parser.add_argument('--mel_index_path', type=str, default=os.path.join("dataset", "mel", "eval"))
args = parser.parse_args()
synthesizer = Synthesizer(args.checkpoint_path)
audio_list, mel_list = load_data(args.audio_index_path, args.mel_index_path, [0, 1, 2, 3, 4, 5])
| [
"argparse.ArgumentParser",
"data.utils.parse_path_file",
"os.path.join",
"torch.cuda.is_available",
"numpy.load"
] | [((358, 391), 'data.utils.parse_path_file', 'parse_path_file', (['audio_index_path'], {}), '(audio_index_path)\n', (373, 391), False, 'from data.utils import parse_path_file\n'), ((408, 439), 'data.utils.parse_path_file', 'parse_path_file', (['mel_index_path'], {}), '(mel_index_path)\n', (423, 439), False, 'from data.utils import parse_path_file\n'), ((705, 730), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (728, 730), False, 'import argparse\n'), ((239, 264), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (262, 264), False, 'import torch\n'), ((533, 560), 'numpy.load', 'np.load', (['audio_index[index]'], {}), '(audio_index[index])\n', (540, 560), True, 'import numpy as np\n'), ((850, 890), 'os.path.join', 'os.path.join', (['"""dataset"""', '"""audio"""', '"""eval"""'], {}), "('dataset', 'audio', 'eval')\n", (862, 890), False, 'import os\n'), ((954, 992), 'os.path.join', 'os.path.join', (['"""dataset"""', '"""mel"""', '"""eval"""'], {}), "('dataset', 'mel', 'eval')\n", (966, 992), False, 'import os\n'), ((603, 628), 'numpy.load', 'np.load', (['mel_index[index]'], {}), '(mel_index[index])\n', (610, 628), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from misc.hdd_save_load import save_json, load_json
import numpy as np
from misc.stick_breaking import stick_breaking_construction_sticky_hdp
class StickyHDPPrior:
fn = 'sticky_hdp_prior.json'
def __init__(self, max_z, alpha_kappa0, rho0, gamma0, H0):
"""This initializes a HDP Prior with all relevant information."""
# 0save paramse
self.alpha_kappa0 = alpha_kappa0
self.rho0 = rho0
self.gamma0 = gamma0
self.H0 = H0
self.max_z = max_z
def prior_sample(self):
# sample some value for each of them
alpha_kappa = np.random.gamma(self.alpha_kappa0[0], 1 / self.alpha_kappa0[1])
rho = np.random.beta(self.rho0[0], self.rho0[1])
gamma = np.random.gamma(self.gamma0[0], 1 / self.gamma0[1])
# calc alpha and kappa
kappa = rho * alpha_kappa
alpha = (1 - rho) * alpha_kappa
# create beta and pi
beta, pi = stick_breaking_construction_sticky_hdp(alpha, kappa, gamma, self.max_z, self.H0)
# pass back
return [alpha_kappa, rho, gamma, beta, pi]
def max_likely_sample(self):
alpha_kappa = self.alpha_kappa0[0] / self.alpha_kappa0[1]
rho = self.rho0[0] / (self.rho0[0] + self.rho0[1])
gamma = self.gamma0[0] / self.gamma0[1]
# calc alpha and kappa
kappa = rho * alpha_kappa
alpha = (1 - rho) * alpha_kappa
# create beta and pi
beta, pi = stick_breaking_construction_sticky_hdp(alpha, kappa, gamma, self.max_z, self.H0)
# pass back
return [alpha_kappa, rho, gamma, beta, pi]
@staticmethod
def load(folder):
"""Load the prior from the specified folder."""
d = load_json(folder, StickyHDPPrior.fn)
return StickyHDPPrior(d['max_z'],
[d['alpha_kappa_shape'], d['alpha_kappa_rate']],
[d['rho_shape_a'], d['rho_shape_b']],
[d['gamma_shape'], d['gamma_rate']],
d['H0'])
def store(self, folder):
"""Store the prior in the specified folder."""
# store the dict
sdict = {
'alpha_kappa_shape': self.alpha_kappa0[0],
'alpha_kappa_rate': self.alpha_kappa0[1],
'rho_shape_a': self.rho0[0],
'rho_shape_b': self.rho0[1],
'gamma_shape': self.gamma0[0],
'gamma_rate': self.gamma0[1],
'H0': self.H0,
'max_z': self.max_z
}
save_json(folder, StickyHDPPrior.fn, sdict)
| [
"numpy.random.beta",
"misc.hdd_save_load.load_json",
"misc.stick_breaking.stick_breaking_construction_sticky_hdp",
"numpy.random.gamma",
"misc.hdd_save_load.save_json"
] | [((1709, 1772), 'numpy.random.gamma', 'np.random.gamma', (['self.alpha_kappa0[0]', '(1 / self.alpha_kappa0[1])'], {}), '(self.alpha_kappa0[0], 1 / self.alpha_kappa0[1])\n', (1724, 1772), True, 'import numpy as np\n'), ((1787, 1829), 'numpy.random.beta', 'np.random.beta', (['self.rho0[0]', 'self.rho0[1]'], {}), '(self.rho0[0], self.rho0[1])\n', (1801, 1829), True, 'import numpy as np\n'), ((1846, 1897), 'numpy.random.gamma', 'np.random.gamma', (['self.gamma0[0]', '(1 / self.gamma0[1])'], {}), '(self.gamma0[0], 1 / self.gamma0[1])\n', (1861, 1897), True, 'import numpy as np\n'), ((2053, 2138), 'misc.stick_breaking.stick_breaking_construction_sticky_hdp', 'stick_breaking_construction_sticky_hdp', (['alpha', 'kappa', 'gamma', 'self.max_z', 'self.H0'], {}), '(alpha, kappa, gamma, self.max_z, self.H0\n )\n', (2091, 2138), False, 'from misc.stick_breaking import stick_breaking_construction_sticky_hdp\n'), ((2569, 2654), 'misc.stick_breaking.stick_breaking_construction_sticky_hdp', 'stick_breaking_construction_sticky_hdp', (['alpha', 'kappa', 'gamma', 'self.max_z', 'self.H0'], {}), '(alpha, kappa, gamma, self.max_z, self.H0\n )\n', (2607, 2654), False, 'from misc.stick_breaking import stick_breaking_construction_sticky_hdp\n'), ((2832, 2868), 'misc.hdd_save_load.load_json', 'load_json', (['folder', 'StickyHDPPrior.fn'], {}), '(folder, StickyHDPPrior.fn)\n', (2841, 2868), False, 'from misc.hdd_save_load import save_json, load_json\n'), ((3647, 3690), 'misc.hdd_save_load.save_json', 'save_json', (['folder', 'StickyHDPPrior.fn', 'sdict'], {}), '(folder, StickyHDPPrior.fn, sdict)\n', (3656, 3690), False, 'from misc.hdd_save_load import save_json, load_json\n')] |
'''
Description: 使用梯度下降法求解一元线性回归方程 y(x,w) = w0 + w1 * x 系数
Author: xuzf
Date: 2021-03-21 07:55:04
FilePath: \algorithm-toy\Gradient-descent\gd.py
'''
import numpy as np
def grad_descent_fit(X, Y, steps, lr):
X_array = np.array(X)
Y_array = np.array(Y)
n = len(X)
# 随机初始化参数
w0 = np.random.random()
w1 = np.random.random();
for i in range(steps):
# 计算梯度
Loss_array = w1 * X_array + w0 - Y_array
grad_w0 = 2 * np.sum(Loss_array) / n
grad_w1 = 2 * np.dot(Loss_array, X_array) / n
# 更新参数
w0 = w0 - lr * grad_w0
w1 = w1 - lr * grad_w1
return w0, w1
| [
"numpy.random.random",
"numpy.array",
"numpy.dot",
"numpy.sum"
] | [((223, 234), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (231, 234), True, 'import numpy as np\n'), ((249, 260), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (257, 260), True, 'import numpy as np\n'), ((299, 317), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (315, 317), True, 'import numpy as np\n'), ((327, 345), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (343, 345), True, 'import numpy as np\n'), ((461, 479), 'numpy.sum', 'np.sum', (['Loss_array'], {}), '(Loss_array)\n', (467, 479), True, 'import numpy as np\n'), ((506, 533), 'numpy.dot', 'np.dot', (['Loss_array', 'X_array'], {}), '(Loss_array, X_array)\n', (512, 533), True, 'import numpy as np\n')] |
from itertools import chain
from statistics import mode
import numpy as np
import pandas as pd
from bpdb import set_trace
from numpy.linalg import inv
class State_Log:
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.define_logs()
def define_logs(self):
self.log_t = []
self.log_t_estimate = []
self.log_x = []
self.log_P = []
self._log_u = []
self.log_x_true = []
self.log_epsilon_x = []
def log_state(self):
t = self.agent.clock.magic_time()
x, P = self.agent.estimator.get_state_estimate()
t_estimate = self.agent.estimator.filt.get_time_estimate()
self.log_t.append(t)
self.log_t_estimate.append(t_estimate)
self.log_x.append(x)
self.log_P.append(P)
x_true = self.get_true()
self.log_x_true.append(x_true)
def get_true(self):
# Clock states
b_values = [
self.env.agent_dict[agent].clock.b
if hasattr(self.env.agent_dict[agent], "clock")
else np.nan
for agent in self.env.agent_clocks_to_be_estimated
]
b_dot_values = [
self.env.agent_dict[agent].clock.b_dot
if hasattr(self.env.agent_dict[agent], "clock")
else np.nan
for agent in self.env.agent_clocks_to_be_estimated
]
clock_states = np.array(list(chain(*zip(b_values, b_dot_values))))
if self.env.ROVER_NAMES:
rover_states = np.concatenate(
[
self.env.dynamics.get_true_state(rover_name)
for rover_name in self.env.ROVER_NAMES
]
)
else:
rover_states = np.empty(0)
x_true = np.concatenate([clock_states, rover_states])
return x_true
def log_u(self):
t_estimate = self.agent.estimator.filt.get_time_estimate()
u = self.env.dynamics.u(t_estimate)
self._log_u.append(u)
def log_NEES_errors(self):
# State
x = self.agent.estimator.filt.x.copy()
P = self.agent.estimator.filt.P.copy()
x_true = self.get_true()
e_x = x - x_true
epsilon_x = e_x @ inv(P) @ e_x
# Measurements
# e_z = self._log_residuals[-1]
# epsilon_z = e_z @ inv(self._log_P_yy[-1]) @ e_z
self.log_epsilon_x.append(epsilon_x)
# self.log_epsilon_z.append(epsilon_z)
def get_state_log_df(self):
if not self.env.ros:
data = np.hstack(
[np.array(self.log_t)[np.newaxis].T]
+ [np.array(self.log_t_estimate)[np.newaxis].T]
+ [np.stack(self.log_x)]
+ [np.stack(self.log_x) - np.stack(self.log_x_true)]
+ [np.stack(self.log_x_true)]
+ [
np.sqrt(np.dstack(self.log_P)[i, i, :])[np.newaxis].T
for i in range(self.env.NUM_STATES)
]
+ [np.stack(self._log_u)]
)
else:
data = np.hstack(
[np.array(self.log_t)[np.newaxis].T]
+ [np.array(self.log_t_estimate)[np.newaxis].T]
+ [np.stack(self.log_x)]
+ [np.stack(self.log_x) - np.stack(self.log_x_true)]
+ [np.stack(self.log_x_true)]
+ [
np.sqrt(np.dstack(self.log_P)[i, i, :])[np.newaxis].T
for i in range(self.env.NUM_STATES)
]
)
state_names = self.env.STATE_NAMES
if not self.env.ros:
control_names = list(
chain.from_iterable(
[
["u_{}_{}".format(v, rover_name) for v in self.env.dim_names]
for rover_name in self.env.ROVER_NAMES
]
)
)
else:
control_names = []
columns = (
["t", "t_estimate"]
+ state_names
+ ["{}_error".format(var) for var in state_names]
+ ["{}_true".format(var) for var in state_names]
+ ["{}_sigma".format(var) for var in state_names]
+ control_names
)
df = pd.DataFrame(data=data, columns=columns)
df["P"] = self.log_P
return df
def get_P(self):
data = np.dstack(self.log_P)
df = pd.DataFrame(data=data, columns=["P"])
return df
| [
"numpy.dstack",
"numpy.stack",
"numpy.array",
"numpy.linalg.inv",
"numpy.empty",
"numpy.concatenate",
"pandas.DataFrame"
] | [((1815, 1859), 'numpy.concatenate', 'np.concatenate', (['[clock_states, rover_states]'], {}), '([clock_states, rover_states])\n', (1829, 1859), True, 'import numpy as np\n'), ((4323, 4363), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'columns'}), '(data=data, columns=columns)\n', (4335, 4363), True, 'import pandas as pd\n'), ((4450, 4471), 'numpy.dstack', 'np.dstack', (['self.log_P'], {}), '(self.log_P)\n', (4459, 4471), True, 'import numpy as np\n'), ((4486, 4524), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['P']"}), "(data=data, columns=['P'])\n", (4498, 4524), True, 'import pandas as pd\n'), ((1785, 1796), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1793, 1796), True, 'import numpy as np\n'), ((2276, 2282), 'numpy.linalg.inv', 'inv', (['P'], {}), '(P)\n', (2279, 2282), False, 'from numpy.linalg import inv\n'), ((3057, 3078), 'numpy.stack', 'np.stack', (['self._log_u'], {}), '(self._log_u)\n', (3065, 3078), True, 'import numpy as np\n'), ((3384, 3409), 'numpy.stack', 'np.stack', (['self.log_x_true'], {}), '(self.log_x_true)\n', (3392, 3409), True, 'import numpy as np\n'), ((2843, 2868), 'numpy.stack', 'np.stack', (['self.log_x_true'], {}), '(self.log_x_true)\n', (2851, 2868), True, 'import numpy as np\n'), ((3274, 3294), 'numpy.stack', 'np.stack', (['self.log_x'], {}), '(self.log_x)\n', (3282, 3294), True, 'import numpy as np\n'), ((3315, 3335), 'numpy.stack', 'np.stack', (['self.log_x'], {}), '(self.log_x)\n', (3323, 3335), True, 'import numpy as np\n'), ((3338, 3363), 'numpy.stack', 'np.stack', (['self.log_x_true'], {}), '(self.log_x_true)\n', (3346, 3363), True, 'import numpy as np\n'), ((2733, 2753), 'numpy.stack', 'np.stack', (['self.log_x'], {}), '(self.log_x)\n', (2741, 2753), True, 'import numpy as np\n'), ((2774, 2794), 'numpy.stack', 'np.stack', (['self.log_x'], {}), '(self.log_x)\n', (2782, 2794), True, 'import numpy as np\n'), ((2797, 2822), 'numpy.stack', 'np.stack', (['self.log_x_true'], {}), '(self.log_x_true)\n', (2805, 2822), True, 'import numpy as np\n'), ((3459, 3480), 'numpy.dstack', 'np.dstack', (['self.log_P'], {}), '(self.log_P)\n', (3468, 3480), True, 'import numpy as np\n'), ((2918, 2939), 'numpy.dstack', 'np.dstack', (['self.log_P'], {}), '(self.log_P)\n', (2927, 2939), True, 'import numpy as np\n'), ((3155, 3175), 'numpy.array', 'np.array', (['self.log_t'], {}), '(self.log_t)\n', (3163, 3175), True, 'import numpy as np\n'), ((3210, 3239), 'numpy.array', 'np.array', (['self.log_t_estimate'], {}), '(self.log_t_estimate)\n', (3218, 3239), True, 'import numpy as np\n'), ((2614, 2634), 'numpy.array', 'np.array', (['self.log_t'], {}), '(self.log_t)\n', (2622, 2634), True, 'import numpy as np\n'), ((2669, 2698), 'numpy.array', 'np.array', (['self.log_t_estimate'], {}), '(self.log_t_estimate)\n', (2677, 2698), True, 'import numpy as np\n')] |
def blend(color, factor):
return int(255 - (255 - color) * factor)
def rgb(red, green, blue, factor=1):
"""Return color as #rrggbb for the given color values."""
return '#%02x%02x%02x' % (blend(red, factor), blend(green, factor), blend(blue, factor))
colors = {
'blue': rgb(55, 126, 184),
'lightblue': rgb(55, 126, 184, 0.6),
'oldgreen': rgb(77, 175, 74),
'oldlightgreen': rgb(77, 175, 74, 0.6),
'orange': rgb(255, 127, 0),
'lightorange': rgb(255, 127, 0, 0.6),
'red': rgb(228, 26, 28),
'lightred': rgb(228, 26, 28, 0.75),
'black': rgb(0, 0, 0),
'morton': "#FFD200",
'cuckoo': "#FF7F00",
'lightmorton': rgb(255, 210, 0, 0.6),
'lightcuckoo': rgb(255, 127, 0, 0.6),
'xor': "#CD161C",
'bloom': "#23507A",
'lightbloom': rgb(35, 80, 122, 0.6),
'fuse': "#29021d",
'lightviolet': "#984EA3",
'violet': "#67356F",
'lightgreen': "#4DAF4A",
'green': "#3C893A",
'turquoise': "#45E2CD",
'pink': "#F028F0",
}
from matplotlib.ticker import FuncFormatter
def kilos(x, pos):
"""The two args are the value and tick position"""
return '%1.0f\\,K' % (x * 1e-3)
def millions(x, pos=None):
"""The two args are the value and tick position"""
if x:
return '%1.0f\\,M' % (x * 1e-6)
else:
return '0'
def billions(x, pos):
"""The two args are the value and tick position"""
if x == 0:
return '0'
elif x < 1e8:
return '%1.0f\\,M' % (x * 1e-6)
elif x < 1e10:
return '%1.1f\\,G' % (x * 1e-9)
else:
return '%1.0f\\,G' % (x * 1e-9)
def billions2(x, pos):
"""The two args are the value and tick position"""
if x == 0:
return '0'
else:
return '%1.0f\\,G' % (x * 1e-9)
def speedup(x, pos=None):
sign = '+' if x > 0 else ''
return sign + '%.0f' % (x * 100) + '\%'
def perc(x, pos):
return '%.0f' % (x * 100) + '\%'
def perc2(x, pos):
return '%.0f' % (x) + '\%'
kilos = FuncFormatter(kilos)
mills = FuncFormatter(millions)
gigs = FuncFormatter(billions)
gigs2 = FuncFormatter(billions2)
percent = FuncFormatter(perc)
percent2 = FuncFormatter(perc2)
markers = {
'circle': 'o',
'triangle': '^',
'square': 's',
'diamond': 'D'
}
def extract(rows: [dict], index: int, xaxis: str, yaxis: str):
return rows[index][yaxis]
def extract_time(rows: [dict], index: int, xaxis: str, yaxis: str):
return rows[index]['real_time']
def extract_throughput(rows: [dict], index: int, xaxis: str, yaxis: str):
if rows[index]['fixture'] == 'Count':
return rows[index]['n_elements_lookup'] * 1000 / rows[index]['real_time']
else:
return rows[index]['n_elements_build'] * 1000 / rows[index]['real_time']
def extract_speedup(rows: [dict], index: int, xaxis: str, yaxis: str):
return rows[0]['real_time'] / rows[index]['real_time']
yconverter = {
'time': extract_time,
'throughput': extract_throughput,
'speedup': extract_speedup,
'DTLB-misses': extract,
'ITLB-misses': extract,
'L1D-misses': extract,
'L1I-misses': extract,
'LLC-misses': extract,
'branch-misses': extract,
'cycles': extract,
'instructions': extract,
'task-clock': extract,
'avg_size': extract,
'size': extract,
'bits': extract,
'retries': extract,
'fpr': extract
}
xscale = {
'k': 'linear',
's': 'linear',
'n_threads': 'linear',
'n_partitions': 'log',
'n_elements_build': 'log',
'n_elements_lookup': 'log',
'shared_elements': 'linear'
}
import pandas as pd
def read_benchmark(path: str):
csv = pd.read_csv(path)
split_name = csv['name'].apply(lambda x: x.split('/')[0].split('_'))
csv['k'] = split_name.apply(lambda x: int(x[len(x) - 1]))
csv['fixture'] = csv['name'].apply(lambda x: x.split('/')[1])
csv['s'] = csv['name'].apply(lambda x: float(x.split('/')[2]) / 100)
csv['n_threads'] = csv['name'].apply(lambda x: int(x.split('/')[3]))
csv['n_partitions'] = csv['name'].apply(lambda x: int(x.split('/')[4]))
csv['n_elements_build'] = csv['name'].apply(lambda x: int(x.split('/')[5]))
csv['n_elements_lookup'] = csv['name'].apply(lambda x: int(x.split('/')[6]))
csv['shared_elements'] = csv['name'].apply(lambda x: float(x.split('/')[7]) / 100)
csv['name'] = split_name.apply(lambda x: "_".join(x[0:(len(x) - 1)]))
data = {}
for _, row in csv.iterrows():
data_row = {}
for label, item in row.iteritems():
data_row[label] = item
if data_row['fixture'] == 'Construct':
data_row['throughput'] = data_row['n_elements_build'] / (data_row['duration'] / 1000)
elif data_row['fixture'] == 'Count' or data_row['fixture'] == 'MTCount':
data_row['throughput'] = data_row['n_elements_lookup'] / (data_row['duration'] / 1000)
name = row['name']
if name not in data:
data[name] = []
data[name].append(data_row)
return data
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator
from matplotlib.patches import Rectangle
import numpy as np
matplotlib.use('pgf')
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert (columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_height = fig_width * golden_mean # height in inches
MAX_HEIGHT_INCHES = 32.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'pgf.rcfonts': False,
'axes.labelsize': 7, # fontsize for x and y labels (was 10)
'axes.titlesize': 7,
'font.size': 7, # was 10
'legend.fontsize': 7, # was 8 # was 10
'legend.handlelength': 1,
'legend.handletextpad': 0.5,
'legend.labelspacing': 0.1, # was 0.1
'legend.columnspacing': 1.5,
'legend.borderpad': 0.3,
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'axes.labelpad': 1,
'axes.titlepad': 3,
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif',
'text.latex.preamble': r'\usepackage{hyperref} \usepackage{amssymb} \usepackage{ifsym} \usepackage[T1]{fontenc} \usepackage{libertine} \usepackage{graphicx}',
'pgf.preamble': r'\usepackage{hyperref} \usepackage{amssymb} \usepackage{ifsym} \usepackage[T1]{fontenc} \usepackage{libertine} \usepackage{graphicx}'
}
matplotlib.rcParams.update(params)
def logPrintFormat(x, pos):
if x < 1:
return "$10^{{\\scalebox{{0.75}}[1.0]{{-}}{}}}$".format(round(-math.log10(x)))
else:
return "$10^{}$".format(round(math.log10(x)))
def format_axes(ax, xscale='linear', yscale='linear'):
spine_color = 'black'
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(spine_color)
ax.spines[spine].set_linewidth(0.5)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if yscale == 'log':
locmaj = matplotlib.ticker.LogLocator(base=10, numticks=12)
ax.yaxis.set_major_locator(locmaj)
ax.yaxis.set_major_formatter(FuncFormatter(logPrintFormat))
locmin = matplotlib.ticker.LogLocator(base=10.0, subs=(np.arange(0, 1, 0.1)), numticks=12)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
else:
ax.yaxis.set_minor_locator(AutoMinorLocator(n=2))
ax.yaxis.grid(b=True, which='minor', linestyle=':')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_tick_params(direction='out', color=spine_color)
if xscale == 'log':
locmaj = matplotlib.ticker.LogLocator(base=10, numticks=12)
ax.xaxis.set_major_locator(locmaj)
ax.xaxis.set_major_formatter(FuncFormatter(logPrintFormat))
locmin = matplotlib.ticker.LogLocator(base=10.0, subs=(np.arange(0, 1, 0.1)), numticks=12)
ax.xaxis.set_minor_locator(locmin)
ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.yaxis.set_ticks_position('left')
ax.yaxis.set_tick_params(direction='out', color=spine_color)
ax.yaxis.grid(b=True, which='major')
ax.tick_params(axis='both', which='major', pad=0.5)
return ax
def format_axins(ax, xscale='linear', yscale='linear'):
spine_color = 'black'
for spine in ['left', 'top', 'right', 'bottom']:
ax.spines[spine].set_color(spine_color)
ax.spines[spine].set_linewidth(0.5)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.xaxis.set_visible(False)
# ax.yaxis.set_visible(False)
if yscale == 'log':
locmaj = matplotlib.ticker.LogLocator(base=10, numticks=12)
ax.yaxis.set_major_locator(locmaj)
ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
else:
ax.yaxis.grid(b=True, which='minor', linestyle=':')
ax.yaxis.grid(b=True, which='major')
ax.set_yticklabels([])
ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
for tic in ax.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
for tic in ax.yaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
return ax
def barAxes(ax):
ax.set_axisbelow(True)
def cm2inch(value):
return value / 2.54
def reorderLegend(ax=None, order=None, unique=False):
if ax is None: ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) # sort both labels and handles by labels
if order is not None: # Sort according to a given list (not necessarily complete)
keys = dict(zip(order, range(len(order))))
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0], np.inf)))
if unique:
labels, handles = zip(*unique_everseen(zip(labels, handles), key=labels)) # Keep only the first of each handle
return handles, labels
def unique_everseen(seq, key=None):
seen = set()
seen_add = seen.add
return [x for x, k in zip(seq, key) if not (k in seen or seen_add(k))]
import os
def savefig(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
plt.savefig(path + ".pgf", bbox_inches='tight', pad_inches=0)
plt.savefig(path + ".pdf", bbox_inches='tight', pad_inches=0)
import math
def analyzeFPR(data, skyline):
for name in data.keys():
b = 0
cr = 0
ota = 0
if 'Cuckoo' in name and not 'Opt' in name:
b = int(name.replace('Cuckoo', ''))
skyline_name = 'Cuckoo'
elif 'Morton' in name and not 'Opt' in name:
b = int(name.replace('Morton', '').split('_')[0])
cr = int(name.replace('Morton', '').split('_')[1])
ota = int(name.replace('Morton', '').split('_')[2])
skyline_name = f'Morton'
else:
b = 0
skyline_name = name
if skyline_name not in skyline.keys():
skyline[skyline_name] = {}
last_bits = 0
for benchmark in data[name]:
k = benchmark['k']
bits = round(benchmark['bits'] * 4) / 4
failures = benchmark['failures']
fpr = benchmark['fpr']
s = benchmark['s']
if failures == 100:
bits = round(k * s * 4) / 4
if bits == last_bits:
bits += 0.25
last_bits = bits
if failures <= 0 and (bits not in skyline[skyline_name].keys() or skyline[skyline_name][bits]['fpr'] > fpr):
skyline[skyline_name][bits] = {'k': k, 'fpr': fpr, 's': s, 'b': b, 'cr': cr, 'ota': ota}
def analyzeStacked(data, skyline):
for name in data.keys():
if name == 'InitialiseData':
continue
optimization = name.split('_', 1)[1]
skyline_name = name.split('_', 1)[0]
if skyline_name not in skyline.keys():
skyline[skyline_name] = {'Construct': {}, 'Count': {}}
for benchmark in data[name]:
if benchmark['failures'] <= 0:
skyline[skyline_name][benchmark['fixture']][optimization] = {'t': benchmark['throughput']}
def analyzeFailures(data, skyline, xaxis):
for name in data.keys():
if name not in skyline.keys():
skyline[name] = {}
for benchmark in data[name]:
x = benchmark[xaxis]
failures = benchmark['failures']
fpr = benchmark['fpr']
s = benchmark['s']
k = benchmark['k']
if benchmark['bits'] <= s * k * 1.05 and failures == 0 and (x not in skyline[name].keys() or skyline[name][x]['s'] > s):
skyline[name][x] = {'fpr': fpr, 's': s}
def analyzeHashing(data, skyline):
for name in data.keys():
if name not in skyline:
skyline[name] = {}
for benchmark in data[name]:
fixture = benchmark['fixture']
t = benchmark['duration']
f = benchmark['failures']
if f == 0 and not (benchmark['error_occurred'] == True) and fixture not in skyline[name].keys():
skyline[name][fixture] = {'t': t}
for name in {'BloomBlocked', 'Cuckoo', 'Xor', 'Morton'}:
for fixture in skyline[f'{name}Cityhash']:
city = skyline[f'{name}Cityhash'][fixture]['t']
for hashfunc in {'Murmur', 'Fasthash', 'Mul'}:
t = skyline[f'{name}{hashfunc}'][fixture]['t']
skyline[f'{name}{hashfunc}'][fixture]['speedup'] = -2 if t == 0 or math.isnan(t) else city / t
def analyzeCompetitors(data, skyline):
for name in data.keys():
if name not in skyline.keys():
skyline[name] = {'Construct': {}, 'Count': {}}
for benchmark in data[name]:
if benchmark['failures'] <= 0:
skyline[name][benchmark['fixture']] = {'t': benchmark['throughput']}
def analyzePerKey(data, skyline):
for name in data.keys():
if name not in skyline.keys():
skyline[name] = {'Construct': {}, 'Count': {}}
size1 = 0
for benchmark in data[name]:
fixture = benchmark['fixture']
n_elements_build = benchmark['n_elements_build']
n_elements_lookup = benchmark['n_elements_lookup']
n_elements = n_elements_build if fixture == 'Construct' else n_elements_lookup
size = benchmark['size']
f = benchmark['failures']
t = benchmark['duration'] * 1e6 / n_elements
throughput = n_elements * 1e3 / benchmark['duration']
dtlb = benchmark['DTLB-misses'] / n_elements
l1d = benchmark['L1D-misses'] / n_elements
llc = benchmark['LLC-misses'] / n_elements
p = math.log(benchmark['n_partitions'], 2)
if p == 0:
size1 = size
if f == 0 and size / size1 < 1.05 and not (benchmark['error_occurred'] == True) and (
n_elements_build not in skyline[name][fixture].keys() or skyline[name][fixture][n_elements_build]['t'] > t):
skyline[name][fixture][n_elements_build] = {'size': size, 't': t, 'dtlb': dtlb, 'l1d': l1d, 'llc': llc, 'p': p, 'throughput': throughput}
def analyzeCorridor(data, skyline):
for name in data.keys():
if name == "InitialiseData":
continue
if name not in skyline:
skyline[name] = {'Count': {}, 'Construct': {}}
for benchmark in data[name]:
n_elements_build = benchmark['n_elements_build']
fixture = benchmark['fixture']
size = benchmark['size']
f = benchmark['failures']
t = benchmark['duration']
p = math.log(benchmark['n_partitions'], 2)
if f == 0 and not (benchmark['error_occurred'] == True):
if not n_elements_build in skyline[name][fixture]:
skyline[name][fixture][n_elements_build] = {}
skyline[name][fixture][n_elements_build][p] = {'size': size, 't': t, 'p': p}
def analyzeSIMD(data, skyline):
for name in data.keys():
size1 = 0
skyline_name = name.replace('Partitioned', '')
if skyline_name not in skyline:
skyline[skyline_name] = {'Count': {}}
for benchmark in data[name]:
n_elements_build = benchmark['n_elements_build']
size = benchmark['size']
f = benchmark['failures']
t = benchmark['duration']
p = math.log(benchmark['n_partitions'], 2)
e = 'Partitioned' in name
if p == 0:
size1 = size
if f == 0 and size / size1 < 1.05 and not (benchmark['error_occurred'] == True) and (
n_elements_build not in skyline[skyline_name]['Count'].keys() or skyline[skyline_name]['Count'][n_elements_build]['t'] > t):
skyline[skyline_name]['Count'][n_elements_build] = {'size': size, 't': t, 'p': p, 'e': e}
def analyzeMultiThreading(prefix, data, skyline, numa=False, partitions=None):
for name in data.keys():
size1 = 0
skyline_name = f'{prefix}_{name}'
if skyline_name not in skyline:
skyline[skyline_name] = {}
for benchmark in data[name]:
n_elements_build = benchmark['n_elements_build']
size = benchmark['size']
f = benchmark['failures']
t = benchmark['duration']
p = math.log(benchmark['n_partitions'], 2)
n_threads = benchmark['n_threads']
e = 'Partitioned' in name
if e and partitions is not None and benchmark['n_partitions'] != partitions:
continue
if n_elements_build == 100000000 and f == 0 and not (benchmark['error_occurred'] == True) and (n_threads not in skyline[skyline_name].keys() or skyline[skyline_name][n_threads]['t'] > t):
skyline[skyline_name][n_threads] = {'size': size, 't': t, 'p': p, 'e': e}
def plotFPR(config, skyline, ax, yaxis):
ax.set_xlabel('Bits per key ($m/n$)')
ax.set_xlim(4.75, 25.25)
handles = []
for name in config.keys():
x = []
y = []
for bits in sorted(list(skyline[name])):
if 5 <= bits <= 25:
x.append(bits)
y.append(skyline[name][bits][yaxis])
handles.append(ax.plot(x, y, label=config[name]['label'], color=config[name]['color'], linestyle=config[name]['linestyle'], linewidth=config[name]['linewidth'])[0])
return handles
def plotFailure(config, skyline, ax, k=True):
ax.set_xlabel("Minimal space overhead $s$")
handles = []
for name in config.keys():
x = []
y = []
for i in sorted(list(skyline[name])):
if (i and 1 <= i <= 32) or (not k and i >= config[name]['min']):
x.append(i)
y.append(skyline[name][i]['s'])
handles.append(ax.plot(x, y, label=config[name]['label'], color=config[name]['color'], linewidth=1)[0])
return handles
def plotStacked(config, opt_config, skyline, ax):
width = 0.4
offset = 0.02
p = []
for i, name in enumerate(config.keys()):
for (fixture, factor) in [('Construct', -1), ('Count', 1)]:
bottom = 0
for opt in config[name][fixture]:
last_opt = opt.split('_')[-1]
throughput = skyline[name][fixture][opt]['t']
b = ax.bar([i + (width + offset) / 2 * factor], [throughput - bottom], width, bottom=bottom, color=opt_config[last_opt]['color'], zorder=10, edgecolor='gray', linewidth=0)
if fixture == 'Count' and i == len(config.keys()) - 1:
p.append(b)
bottom = throughput
ax.text(b[0].get_x() + width / 2, -3e6, 'Lookup' if fixture == 'Count' else 'Build', ha='center', va='top', color='k', fontsize=5, zorder=20)
props = dict(facecolor='white', boxstyle='square,pad=0.2', alpha=1, lw=0.5)
print(bottom)
# ax.text(b[0].get_x(), 190e6, f"FPR: {config[name]['fpr']}\nSize: {config[name]['size']}", ha='center', va='bottom', bbox=props, color='k', fontsize=5, zorder=20)
ax.text(b[0].get_x(), bottom + 10e6, f"FPR: {config[name]['fpr']}\nSize: {config[name]['size']}", ha='center', va='bottom', bbox=props, color='k', fontsize=5, zorder=20)
ax.axhline(y=0, color='k', linestyle='-', lw=1, zorder=20)
ax.yaxis.set_major_formatter(mills)
allfig = plt.gcf()
label0 = 'Baseline (\\hyperref[s:addr]{Section 4.1} \\& \\hyperref[s:hash]{Section 4.2})'
label1 = 'Partitioning (\\hyperref[s:part]{Section 4.3})'
label2 = 'Vectorization (\\hyperref[s:vec]{Section 4.4})'
# allfig.legend((p[0]), ('label0'), bbox_to_anchor=(0, 1), loc='upper left', borderaxespad=0, frameon=False)
allfig.legend((p[0],), (label0,), ncol=2, bbox_to_anchor=(0.1, 1.075), loc='upper left', borderaxespad=0, frameon=False, columnspacing=1)
allfig.legend((p[1], p[2]), (label1, label2), ncol=2, bbox_to_anchor=(0.1, 1), loc='upper left', borderaxespad=0, frameon=False, columnspacing=1)
def plotHashing(config, skyline, ax):
handles = {}
width = 0.175
offset = 0.025
for i, name in enumerate(["BloomBlocked", "Cuckoo", "Morton", "Xor"]):
for (fixture, factor, color) in [('Construct', -1, 'color1'), ('Count', 1, 'color1')]:
for j, hash_func in enumerate(config.keys()):
speedup = skyline[f'{name}{hash_func}'][fixture]['speedup']
handles[hash_func] = ax.bar([i + factor * (width + offset) + width / 2 * config[hash_func]['factor']], [speedup - 1], width, 0, color=config[hash_func][color], zorder=10)
ax.text(i + factor * (width + offset), -0.02, 'Lookup' if fixture == 'Count' else 'Build', ha='center', va='top', color='k', fontsize=5, zorder=20)
return handles
def plotCompetitors(config, fixture, skyline, ax, legend=True):
width = 0.35
ind = []
label = []
p = {}
for i, name in enumerate(config.keys()):
ind.append(i)
label.append(config[name]['label'])
t_competitor = skyline[name][fixture]['t']
t_our = skyline[config[name]['competitor']][fixture]['t']
p['Competitor'] = ax.bar([i - width / 2], [t_competitor], width, color=colors['blue'], zorder=10)
p['Ours'] = ax.bar([i + width / 2], [t_our], width, color=colors['orange'], zorder=10)
bar0 = p['Competitor'][0]
bar1 = p['Ours'][0]
posText = (bar0.get_height() + bar1.get_height()) / 2
if t_competitor <= t_our:
middle = bar0.get_x() + bar0.get_width() / 2
else:
middle = bar1.get_x() + bar1.get_width() / 2
height = max(bar0.get_height(), bar1.get_height())
ax.plot([bar0.get_x(), bar0.get_x() + bar0.get_width() * 2], [height, height], 'k-', lw=0.5, zorder=20)
ax.plot([middle, middle], [bar0.get_height(), bar1.get_height()], 'k-', lw=0.5, zorder=20)
ax.text(bar1.get_x(), height + 0.005e9, speedup(t_our / t_competitor - 1), ha='center', va='bottom', color='k')
ax.text(bar1.get_x() + width / 2, -1e6, 'Lookup' if fixture == 'Count' else 'Build', ha='center', va='top', color='k', fontsize=5, zorder=20)
l = ax.legend((p['Competitor'], p['Ours']), ('Competitor', 'Ours'), labelspacing=0, ncol=2, bbox_to_anchor=(0.99, 1), borderaxespad=0, framealpha=1, edgecolor='black', fancybox=False)
l.set_visible(legend)
l.get_frame().set_linewidth(0.5)
ax.set_xticks(ind)
ax.set_xticklabels(label, rotation=45, ha='right', rotation_mode="anchor")
ax.yaxis.set_major_formatter(gigs)
ax.set_ylim(0, 0.41e9)
ax.axhline(y=0, color='k', linestyle='-', lw=1, zorder=20)
def plotFilterSize(config, fixture, skyline, ax, yaxis, min_size, max_size, y_caches, TLB=False):
ax.set_xlabel('Filter size $m$ [KiB]')
if 'speedup' in yaxis:
ax.axhline(y=0, color='k', linestyle='-', lw=1)
caches = {'L1': 32, 'L2': 1024, 'L3': 1024 * 19.25} if not TLB else {'dTLB': 256, 'L2 TLB': 6144}
for name in caches:
if min_size < caches[name] < max_size:
ax.axvline(x=caches[name], color='k', linestyle='-', lw=0.75)
props = dict(boxstyle='round', facecolor='white', alpha=1, lw=0.75)
ax.text(caches[name], y_caches, name, horizontalalignment='center', bbox=props, fontsize=6)
handles = []
for name in config.keys():
x = []
y = []
for n_elements in sorted(list(skyline[name][fixture])):
size = skyline[name][fixture][n_elements]['size'] / 1024
if (min_size < size < max_size):
x.append(size)
val = skyline[name][fixture][n_elements][yaxis]
if math.isnan(val):
val = -2
y.append(val)
handles.append(ax.plot(x, y, label=config[name]['label'], color=config[name]['color'], linestyle=config[name]['linestyle'], linewidth=config[name]['linewidth'], clip_on=False))
return handles
def plotCorridor(config, skyline, ax, fixture, min_size, max_size, y_caches):
ax.set_xlabel("Filter size in [KiB]")
caches = {'L1': 32, 'L2': 1024, 'L3': 1024 * 19.25}
for name in caches:
if min_size < caches[name] < max_size:
ax.axvline(x=caches[name], color='k', linestyle='-', lw=0.75)
props = dict(boxstyle='round', facecolor='white', alpha=1, lw=0.75)
ax.text(caches[name], y_caches, name, horizontalalignment='center', bbox=props, fontsize=6)
handles = []
for name in config.keys():
x = []
y = []
y1 = []
y2 = []
y15 = []
y25 = []
for n_elements in sorted(list(skyline[name][fixture])):
size = skyline[name][fixture][n_elements]['size'] / 1024
if min_size < size < max_size:
x.append(skyline[name][fixture][n_elements]['size'] / 1024)
y.append(skyline[name][fixture][n_elements]['min'])
y1.append(min(skyline[name][fixture][n_elements]['corridor']))
y2.append(max(skyline[name][fixture][n_elements]['corridor']))
y15.append(min(skyline[name][fixture][n_elements]['corridor5']))
y25.append(max(skyline[name][fixture][n_elements]['corridor5']))
# ax.fill_between(x, y1, y2, color=config[name]['color'], linestyle=config[name]['linestyle'], linewidth=config[name]['linewidth'], alpha=0.25)
ax.fill_between(x, y15, y25, color=config[name]['color'], linestyle=config[name]['linestyle'], linewidth=config[name]['linewidth'], alpha=0.25)
handles.append(ax.plot(x, y, label=config[name]['label'], color=config[name]['color'], linestyle=config[name]['linestyle'], linewidth=config[name]['linewidth'])[0])
def plotScaleup(config, skyline, ax, base_name, datapoints):
for postfix in config.keys():
name = f'{base_name}{postfix}'
x = []
y = []
lbls = [str(d) for d in datapoints]
for n_threads in datapoints:
val = skyline[name][n_threads]['scaleup']
if math.isnan(val):
val = 0
y.append(val)
ax.plot(lbls, y, label=f'{config[postfix]["label"]}', color=config[postfix]['color'], linestyle='solid', linewidth=config[postfix]['linewidth'])
skylineStacked = {}
analyzeStacked(read_benchmark('../benchmark/paper/introduction/bloom_blocked_stacked_construct.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/bloom_blocked_stacked_count.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/bloom_sectorized_stacked_construct.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/bloom_sectorized_stacked_count.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/cuckoo_stacked_construct.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/cuckoo_stacked_count.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/morton_stacked_construct.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/morton_stacked_count.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/xor_stacked_construct.csv'), skylineStacked)
analyzeStacked(read_benchmark('../benchmark/paper/introduction/xor_stacked_count.csv'), skylineStacked)
config = {
'BloomBlocked': {'label': 'Bloom',
'Construct': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned'],
'Count': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned', 'Lemire_Murmur_Partitioned_SIMD'],
'fpr': '0.41\%', 'size': '143 MiB'},
'Cuckoo': {'label': 'Cuckoo',
'Construct': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned'],
'Count': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned', 'Lemire_Murmur_Partitioned_SIMD'],
'fpr': '2.93\%', 'size': '101 MiB'},
'Morton': {'label': 'Cuckoo',
'Construct': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned'],
'Count': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned', 'Lemire_Murmur_Partitioned_SIMD'],
'fpr': '0.42\%', 'size': '132 MiB'},
'Xor': {'label': 'Xor',
'Construct': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned'],
'Count': ['Lemire_Murmur', 'Lemire_Murmur_Partitioned', 'Lemire_Murmur_Partitioned_SIMD'],
'fpr': '0.39\%', 'size': '117 MiB'},
}
lightred = rgb(228, 26, 28, 0.8)
opt_config = {
'Baseline': {'label': 'Baseline', 'color': lightred},
'Lemire': {'label': 'Baseline', 'color': lightred},
'Murmur': {'label': 'Baseline', 'color': lightred},
'Mul': {'label': 'Baseline', 'color': lightred},
'SIMD': {'label': 'Vectorization', 'color': colors['lightorange']},
'Partitioning': {'label': 'Partitioning', 'color': colors['lightgreen']},
'Partitioned': {'label': 'Partitioning', 'color': colors['lightgreen']},
}
latexify(cm2inch(8.5), cm2inch(3.8), 2)
fig = plt.figure()
ax = fig.add_subplot(111)
format_axes(ax)
plotStacked(config, opt_config, skylineStacked, ax)
# ax.legend().set_visible(False)
ax.set_xticks([0, 1, 2, 3])
ax.set_xticklabels([
"\\textbf{Bloom}\n(\\hyperref[s:bloom]{Section 3.1})",
"\\textbf{Cuckoo}\n(\\hyperref[s:cuckoo]{Section 3.2.1})",
"\\textbf{Morton}\n(\\hyperref[s:morton]{Section 3.2.2})",
"\\textbf{Xor}\n(\\hyperref[s:xor]{Section 3.2.3})"
])
ax.tick_params(axis='x', which='major', pad=2.5)
ax.set_ylabel("Throughput [Keys/s]")
ax.set_ylim(0, 215e6)
savefig("./pdf/introduction/stacked")
skylineFPR = {}
analyzeFPR(read_benchmark('../benchmark/paper/background/bloom/bloom_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/cuckoo/fingerprintbased_cuckoo_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/cuckoo_opt/fingerprintbased_cuckoo_opt_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/morton_opt/fingerprintbased_morton_opt_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/morton2/fingerprintbased_morton2_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/morton3/fingerprintbased_morton3_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/morton7/fingerprintbased_morton7_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/xor/fingerprintbased_xor_fpr.csv'), skylineFPR)
analyzeFPR(read_benchmark('../benchmark/paper/background/fingerprintbased/fuse/fingerprintbased_fuse_fpr.csv'), skylineFPR)
skylineFailureElements = {}
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/xor/fingerprintbased_xor_failure_elements.csv'), skylineFailureElements, 'n_elements_build')
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/fuse/fingerprintbased_fuse_failure_elements.csv'), skylineFailureElements, 'n_elements_build')
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/morton_opt/fingerprintbased_morton_opt_failure_elements.csv'), skylineFailureElements, 'n_elements_build')
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/cuckoo_opt/fingerprintbased_cuckoo_opt_failure_elements.csv'), skylineFailureElements, 'n_elements_build')
skylineFailureK = {}
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/xor/fingerprintbased_xor_failure_k.csv'), skylineFailureK, 'k')
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/fuse/fingerprintbased_fuse_failure_k.csv'), skylineFailureK, 'k')
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/morton_opt/fingerprintbased_morton_opt_failure_k.csv'), skylineFailureK, 'k')
analyzeFailures(read_benchmark('../benchmark/paper/background/fingerprintbased/cuckoo_opt/fingerprintbased_cuckoo_opt_failure_k.csv'), skylineFailureK, 'k')
configBloom = {
'BloomSectorized256': {'label': 'Sectorized', 'color': colors['lightred'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'BloomBlocked64': {'label': 'Register-blocked', 'color': colors['lightorange'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'BloomCacheSectorized2': {'label': 'Cache-sectorized', 'color': colors['orange'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'BloomStandard': {'label': 'Na\\"ive', 'color': colors['blue'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'BloomBlocked512': {'label': 'Cache-blocked', 'color': colors['bloom'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
}
configFingerprintFPR = {
'Morton': {'label': '', 'color': colors['lightmorton'], 'marker': markers['circle'], 'linestyle': 'dashed', 'linewidth': 0.75},
'Cuckoo': {'label': '', 'color': colors['lightcuckoo'], 'marker': markers['circle'], 'linestyle': 'dashed', 'linewidth': 0.75},
'MortonOpt': {'label': 'Morton', 'color': colors['morton'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'CuckooOpt': {'label': 'Cuckoo', 'color': colors['cuckoo'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'Xor': {'label': 'Xor', 'color': colors['xor'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
'Fuse': {'label': 'Xor (Fuse)', 'color': colors['fuse'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1},
}
configFingerprintK = {
'MortonOpt': {'label': 'Morton filter', 'color': colors['morton'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1, 'min': 1000},
'CuckooOpt': {'label': 'Cuckoo filter', 'color': colors['cuckoo'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1, 'min': 100},
'Xor': {'label': 'Xor filter', 'color': colors['xor'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1, 'min': 100},
'Fuse': {'label': 'Xor filter (Fuse)', 'color': colors['fuse'], 'marker': markers['circle'], 'linestyle': 'solid', 'linewidth': 1, 'min': 100},
}
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import TransformedBbox, BboxPatch, BboxConnector
latexify(cm2inch(9), cm2inch(3.5), 2)
def plot_inset(parent_axes, inset_axes, loc1a=1, loc1b=1, loc2a=2, loc2b=2, **kwargs):
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, fill=False, color='w', ec='gray', lw=0.5, zorder=2, ls='--')
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1a, loc2=loc1b, ec='gray', lw=0.5, zorder=2, ls='--')
parent_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2a, loc2=loc2b, ec='gray', lw=0.5, zorder=2, ls='--')
parent_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
if True:
fig = plt.figure()
ax0 = fig.add_subplot(121)
handles = plotFPR(configBloom, skylineFPR, ax0, 'fpr')
ax0.set_ylabel("False-positive rate $\\varepsilon$")
format_axes(ax0, 'linear', 'log')
ax0.set_xticks([5, 10, 15, 20, 25])
axins = zoomed_inset_axes(ax0, 2, loc='lower left')
# axins = inset_axes(ax0, 0.25, 0.25, loc='lower left', bbox_to_anchor=(0, 0)) # no zoom
plotFPR(configBloom, skylineFPR, axins, 'fpr')
axins.set_xlim(6, 10.5)
axins.set_ylim(0.009, 0.065)
format_axins(axins, 'linear', 'log')
patch, pp1, pp2 = plot_inset(ax0, axins, loc1a=2, loc1b=3, loc2a=1, loc2b=4, fc="none", ec="0.5", lw=0.2)
handles = [handles[3], handles[4], handles[0], handles[2], handles[1]]
allfig = plt.gcf()
allfig.legend(handles=[handles[0], handles[2], handles[1], handles[4], handles[3]], bbox_to_anchor=(0.5, 1.1), loc='upper center', ncol=3, borderaxespad=0, frameon=False)
ax1 = fig.add_subplot(122)
plotFPR(configBloom, skylineFPR, ax1, 'k')
ax1.set_ylabel("Optimal $k$")
format_axes(ax1)
ax1.set_xticks([5, 10, 15, 20, 25])
plt.tight_layout()
savefig("./pdf/background/bloom_fpr")
if True:
fig = plt.figure()
ax0 = fig.add_subplot(121)
plotFPR(configFingerprintFPR, skylineFPR, ax0, 'fpr')
ax0.legend().set_visible(False)
ax0.set_ylabel("False-positive rate $\\varepsilon$")
format_axes(ax0, 'linear', 'log')
ax0.set_xticks([5, 10, 15, 20, 25])
axins = zoomed_inset_axes(ax0, 2, loc='lower left')
# axins = inset_axes(ax0, 0.25, 0.25, loc='lower left', bbox_to_anchor=(0, 0)) # no zoom
plotFPR(configFingerprintFPR, skylineFPR, axins, 'fpr')
axins.set_xlim(6, 9.5)
axins.set_ylim(6e-3, 0.15)
format_axins(axins, 'linear', 'log')
patch, pp1, pp2 = plot_inset(ax0, axins, loc1a=2, loc1b=3, loc2a=1, loc2b=4, fc="none", ec="0.5", lw=0.2)
axins = zoomed_inset_axes(ax0, 2, loc='upper right', borderpad=0)
# axins = inset_axes(ax0, 0.25, 0.25, loc='lower left', bbox_to_anchor=(0, 0)) # no zoom
plotFPR(configFingerprintFPR, skylineFPR, axins, 'fpr')
axins.set_xlim(20, 24)
axins.set_ylim(5e-7, 2e-5)
format_axins(axins, 'linear', 'log')
patch, pp1, pp2 = plot_inset(ax0, axins, loc1a=3, loc1b=2, loc2a=4, loc2b=1, fc="none", ec="0.5")
ax1 = fig.add_subplot(122)
handles = plotFPR(configFingerprintK, skylineFPR, ax1, 'k')
ax1.set_ylabel("Optimal $k$")
format_axes(ax1)
ax1.set_xticks([5, 10, 15, 20, 25])
ax1.set_yticks([5, 10, 15, 20])
axins = zoomed_inset_axes(ax1, 2, loc='upper left', bbox_to_anchor=(0, 1.05), bbox_transform=ax1.transAxes)
# axins = inset_axes(ax0, 0.25, 0.25, loc='lower left', bbox_to_anchor=(0, 0)) # no zoom
plotFPR(configFingerprintK, skylineFPR, axins, 'k')
axins.set_xlim(6, 9.5)
axins.set_ylim(3.5, 8.5)
format_axins(axins, 'linear', 'linear')
axins.yaxis.set_minor_locator(matplotlib.ticker.FixedLocator([7.5]))
axins.set_yticks([5])
for tic in axins.yaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
patch, pp1, pp2 = plot_inset(ax1, axins, loc1a=3, loc1b=2, loc2a=4, loc2b=1, fc="none", ec="0.5", lw=0.2)
axins = zoomed_inset_axes(ax1, 2, loc='lower right', bbox_to_anchor=(1.04, -0.02), bbox_transform=ax1.transAxes)
# axins = inset_axes(ax0, 0.25, 0.25, loc='lower left', bbox_to_anchor=(0, 0)) # no zoom
plotFPR(configFingerprintK, skylineFPR, axins, 'k')
axins.set_xlim(20, 24)
axins.set_ylim(16.5, 21.5)
format_axins(axins, 'linear', 'linear')
axins.yaxis.set_minor_locator(matplotlib.ticker.FixedLocator([17.5]))
axins.set_yticks([20])
for tic in axins.yaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
patch, pp1, pp2 = plot_inset(ax1, axins, loc1a=2, loc1b=3, loc2a=1, loc2b=4, fc="none", ec="0.5")
handles[0], handles[1] = handles[1], handles[0]
allfig = plt.gcf()
allfig.legend(handles=handles, bbox_to_anchor=(0.5, 1.05), loc='upper center', ncol=4, borderaxespad=0, frameon=False)
plt.tight_layout()
savefig("./pdf/background/fingerprintbased_fpr")
if True:
fig = plt.figure()
ax0 = fig.add_subplot(121)
plotFailure(configFingerprintK, skylineFailureK, ax0, True)
ax0.set_ylabel("Minimal scale factor $s$")
ax0.set_xlabel("Fingerprint size $k$ [bits]")
ax0.set_ylim(1.0, 1.62)
ax0.set_xlim(0, 32.5)
format_axes(ax0)
ax0.set_yticks([1, 1.2, 1.4, 1.6])
ax1 = fig.add_subplot(122)
handles = plotFailure(configFingerprintK, skylineFailureElements, ax1, False)
ax1.set_ylim(1.0, 1.62)
ax1.set_xlabel("Number of keys $n$ (log scale)")
ax1.set_xlim(1e2, 1.5e7)
format_axes(ax1, 'log', 'linear')
ax1.set_xticks([1e2, 1e3, 1e4, 1e5, 1e6, 1e7])
ax1.set_yticks([1, 1.2, 1.4, 1.6])
# allfig = plt.gcf()
# allfig.legend(handles=handles, bbox_to_anchor=(0.5, 1.05), loc='upper center', ncol=4, borderaxespad=0, frameon=False, columnspacing=2, handlelength=1)
plt.tight_layout()
savefig("./pdf/background/fingerprintbased_failure")
plt.close()
skylineAddressing = {}
analyzePerKey(read_benchmark('../benchmark/paper/optimization/addressing/addressing_construct.csv'), skylineAddressing)
analyzePerKey(read_benchmark('../benchmark/paper/optimization/addressing/addressing_count.csv'), skylineAddressing)
for name in {'BloomBlocked', 'Cuckoo', 'Xor', 'Morton'}:
for fixture in {'Construct', 'Count'}:
for n_elements in skylineAddressing[f'{name}PowerOfTwo'][fixture]:
powerOfTwo = skylineAddressing[f'{name}PowerOfTwo'][fixture][n_elements]
for addr in {'Magic', 'Lemire'}:
if n_elements in skylineAddressing[f'{name}{addr}'][fixture]:
relative = skylineAddressing[f'{name}{addr}'][fixture][n_elements]
for attribute in {'t', 'dtlb', 'l1d', 'llc', 'throughput'}:
if relative[attribute] == 0 or math.isnan(relative[attribute]):
relative[f'{attribute}_speedup'] = -2
else:
relative[f'{attribute}_speedup'] = powerOfTwo[attribute] / relative[attribute] - 1
config = {
'MortonLemire': {'label': 'Morton filter', 'color': colors['morton'], 'linestyle': 'solid', 'linewidth': 1},
'BloomBlockedLemire': {'label': 'Bloom filter', 'color': colors['bloom'], 'linestyle': 'solid', 'linewidth': 1},
'CuckooLemire': {'label': 'Cuckoo filter', 'color': colors['cuckoo'], 'linestyle': 'solid', 'linewidth': 1},
'XorLemire': {'label': 'Xor filter', 'color': colors['xor'], 'linestyle': 'solid', 'linewidth': 1},
}
latexify(cm2inch(9), cm2inch(3.5), 2)
fig = plt.figure()
ax = fig.add_subplot(121)
plotFilterSize(config, 'Construct', skylineAddressing, ax, 't_speedup', 1e2, 1.1e5, -0.62)
ax.set_title('\\textbf{Build}', pad=1)
ax.set_ylabel("Speedup [%]")
ax.set_ylim(-0.75, 1.26)
ax.set_xlim(1e2, 1.2e5)
format_axes(ax, 'log', 'linear')
ax.set_xticks([1e2, 1e3, 1e4, 1e5])
ax.set_yticks([-0.5, 0, 0.5, 1])
ax.set_yticklabels(["-50\%", "0\%", "+50%", "~~~~+100\%"], rotation=90, verticalalignment='center')
ax = fig.add_subplot(122)
plotFilterSize(config, 'Count', skylineAddressing, ax, 't_speedup', 1e2, 1.1e5, -0.62)
ax.set_title('\\textbf{Lookup}', pad=1)
ax.set_ylim(-0.75, 1.26)
ax.set_xlim(1e2, 1.2e5)
format_axes(ax, 'log', 'linear')
ax.set_xticks([1e2, 1e3, 1e4, 1e5])
ax.set_yticks([-0.5, 0, 0.5, 1])
ax.set_yticklabels(["-50\%", "0\%", "+50%", "~~~~+100\%"], rotation=90, verticalalignment='center')
handles, labels = ax.get_legend_handles_labels()
handles[0], handles[1], handles[2] = handles[1], handles[2], handles[0]
labels[0], labels[1], labels[2] = labels[1], labels[2], labels[0]
allfig = plt.gcf()
allfig.legend(handles, labels, ncol=4, bbox_to_anchor=(0.5, 1.0), loc='upper center', borderaxespad=0, frameon=False, columnspacing=1)
plt.tight_layout()
savefig("./pdf/optimization/addressing")
plt.close()
skylineHashing = {}
analyzeHashing(read_benchmark('../benchmark/paper/optimization/hashing/hashing_count.csv'), skylineHashing)
analyzeHashing(read_benchmark('../benchmark/paper/optimization/hashing/hashing_construct.csv'), skylineHashing)
config = {
'Murmur': {'label': 'murmur', 'color1': colors['blue'], 'color2': colors['lightblue'], 'factor': -1},
'Mul': {'label': 'mul', 'color1': colors['orange'], 'color2': colors['lightorange'], 'factor': 1},
}
latexify(cm2inch(8.5), cm2inch(2), 2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.axhline(y=0, color='k', linestyle='-', lw=1, zorder=20)
ax.set_ylabel("Speedup [\%]")
handles = plotHashing(config, skylineHashing, ax)
format_axes(ax)
ax.set_yticks([0, 0.25, 0.5, 0.75])
ax.set_ylim(-0, 0.77)
ax.set_xticks([0, 1, 2, 3])
ax.set_xticklabels(["\\textbf{Bloom}", "\\textbf{Cuckoo}", "\\textbf{Morton}", "\\textbf{Xor}"])
ax.tick_params(axis='x', which='major', pad=2.5)
ax.yaxis.set_major_formatter(speedup)
legend = ax.legend((handles['Murmur'], handles['Mul']), ('MurmurMix', 'Mul'), labelspacing=0, ncol=2, bbox_to_anchor=(0.99, 1), borderaxespad=0, framealpha=1, edgecolor='black', fancybox=False)
legend.get_frame().set_linewidth(0.5)
# allfig = plt.gcf()
# allfig.legend((handles['Murmur'], handles['Mul']), ('Murmur', 'Mul'), ncol=2, bbox_to_anchor=(0.5, 1.05), loc='upper center', borderaxespad=0, frameon=False, columnspacing=2)
savefig("./pdf/optimization/hashing")
plt.close()
skylinePartitioning = {}
analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_construct.csv'), skylinePartitioning)
analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_construct_small.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_construct_large.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count_small.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count_large.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count_huge1.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count_huge2.csv'), skylinePartitioning)
# analyzePerKey(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count_huge3.csv'), skylinePartitioning)
config = {
'BloomBlocked': {'label': '', 'color': colors['bloom'], 'linestyle': 'dashed', 'linewidth': 1},
'Morton': {'label': '', 'color': colors['morton'], 'linestyle': 'dashed', 'linewidth': 1},
'Cuckoo': {'label': '', 'color': colors['cuckoo'], 'linestyle': 'dashed', 'linewidth': 1},
'Xor': {'label': '', 'color': colors['xor'], 'linestyle': 'dashed', 'linewidth': 1},
'BloomBlockedPartitioned': {'label': 'Bloom filter', 'color': colors['bloom'], 'linestyle': 'solid', 'linewidth': 1},
'MortonPartitioned': {'label': 'Morton filter', 'color': colors['morton'], 'linestyle': 'solid', 'linewidth': 1},
'CuckooPartitioned': {'label': 'Cuckoo filter', 'color': colors['cuckoo'], 'linestyle': 'solid', 'linewidth': 1},
'XorPartitioned': {'label': 'Xor filter', 'color': colors['xor'], 'linestyle': 'solid', 'linewidth': 1},
}
latexify(cm2inch(9), cm2inch(3.1), 2)
fig = plt.figure()
ax = fig.add_subplot(121)
plotFilterSize(config, 'Construct', skylinePartitioning, ax, 't', 1e1, 1.3e6, 223)
ax.set_title('\\textbf{Build}')
ax.set_ylabel("Exection time per key [ns]")
ax.set_ylim(0, 252)
ax.set_xlim(1e1, 1.4e6)
format_axes(ax, 'log', 'linear')
ax.set_xticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6])
ax.set_yticks([0, 100, 200])
ax.set_yticklabels(["0", "100", "200"], rotation=90, verticalalignment='center')
ax = fig.add_subplot(122)
plotFilterSize(config, 'Count', skylinePartitioning, ax, 't', 1e1, 1.3e6, 78)
ax.set_title('\\textbf{Lookup}')
ax.set_ylim(0, 88)
ax.set_xlim(1e1, 1.4e6)
format_axes(ax, 'log', 'linear')
ax.set_xticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6])
ax.set_yticks([0, 25, 50, 75])
ax.set_yticklabels(["0", "25", "50", "75"], rotation=90, verticalalignment='center')
handles, labels = ax.get_legend_handles_labels()
handles[1], handles[2] = handles[2], handles[1]
labels[1], labels[2] = labels[2], labels[1]
allfig = plt.gcf()
allfig.legend(handles, labels, ncol=4, bbox_to_anchor=(0.5, 1.02), loc='upper center', borderaxespad=0, frameon=False, columnspacing=1)
plt.tight_layout()
savefig("./pdf/optimization/partitioning_t")
latexify(cm2inch(9), cm2inch(2.6), 2)
fig = plt.figure()
ax = fig.add_subplot(121)
plotFilterSize(config, 'Construct', skylinePartitioning, ax, 'dtlb', 1e1, 1.3e6, 7.1, True)
ax.set_ylabel("dTLB-misses per key")
ax.yaxis.set_label_coords(-0.1, 0.35)
ax.set_ylim(-0.25, 7.75)
ax.set_xlim(1e1, 1.4e6)
format_axes(ax, 'log', 'linear')
ax.set_xticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6])
ax.set_yticks([0, 2, 4, 6])
ax = fig.add_subplot(122)
plotFilterSize(config, 'Count', skylinePartitioning, ax, 'llc', 1e1, 1.3e6, 2.99)
ax.set_ylabel("LLC-misses per key")
ax.yaxis.set_label_coords(-0.1, 0.35)
ax.set_ylim(0, 3.25)
ax.set_xlim(1e1, 1.4e6)
format_axes(ax, 'log', 'linear')
ax.set_xticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6])
ax.set_yticks([0, 1, 2, 3])
plt.tight_layout()
savefig("./pdf/optimization/partitioning_misses")
skylinePartition = {}
analyzeCorridor(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count.csv'), skylinePartition)
analyzeCorridor(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_construct.csv'), skylinePartition)
skylineCorridor = {}
for name in {'BloomBlocked', 'Cuckoo', 'Xor'}:
skylineCorridor[name] = {'Count': {}, 'Construct': {}}
for fixture in ['Count', 'Construct']:
partitionName = f'{name}Partitioned'
for n_elements in skylinePartition[partitionName][fixture]:
ts = {} # all thoughputs
minT = float('inf')
minTId = 0
if 0.0 in skylinePartition[name][fixture][n_elements]:
skylinePartition[partitionName][fixture][n_elements][0.0] = skylinePartition[name][fixture][n_elements][0.0]
for partitionNum in skylinePartition[partitionName][fixture][n_elements]:
partitioned = skylinePartition[partitionName][fixture][n_elements][partitionNum]
ts[partitionNum] = partitioned['t']
if partitioned['t'] < minT:
minTId = partitionNum
minT = partitioned['t']
skylineCorridor[name][fixture][n_elements] = {}
skylineCorridor[name][fixture][n_elements]['size'] = skylinePartition[partitionName][fixture][n_elements][minTId]['size']
skylineCorridor[name][fixture][n_elements]['min'] = minTId
skylineCorridor[name][fixture][n_elements]['corridor'] = []
skylineCorridor[name][fixture][n_elements]['corridor5'] = []
for partitionNum in ts:
t = ts[partitionNum]
if t < (minT / 0.9):
skylineCorridor[name][fixture][n_elements]['corridor'].append(partitionNum)
if t < (minT / 0.95):
skylineCorridor[name][fixture][n_elements]['corridor5'].append(partitionNum)
configConstruct = {
'BloomBlocked': {'label': 'bloom', 'color': colors['bloom'], 'linestyle': 'solid', 'linewidth': 1},
'Cuckoo': {'label': 'cuckoo', 'color': colors['cuckoo'], 'linestyle': 'solid', 'linewidth': 1},
'Xor': {'label': 'xor', 'color': colors['xor'], 'linestyle': 'solid', 'linewidth': 1},
}
configCount = {
'BloomBlocked': {'label': 'bloom', 'color': colors['bloom'], 'linestyle': 'solid', 'linewidth': 1},
'Cuckoo': {'label': 'cuckoo', 'color': colors['cuckoo'], 'linestyle': 'solid', 'linewidth': 1},
# 'Xor': {'label': 'xor', 'color': colors['xor'], 'linestyle': 'solid', 'linewidth': 1},
}
latexify(cm2inch(8.5), cm2inch(3.5), 2)
fig = plt.figure()
ax = fig.add_subplot(121)
plotCorridor(configConstruct, skylineCorridor, ax, 'Construct', 1e1, 1.3e6, 12.92)
format_axes(ax, 'log', 'linear')
ax.set_xlim(1e1, 1.4e6)
ax.set_ylim(-0.5, 14.5)
ax.set_ylabel("Number of Partitions", labelpad=4)
format_axes(ax, 'log', 'linear')
ax.set_title('\\textbf{Build}')
ax.set_yticks([0, 4, 8, 12])
ax.set_xticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6])
ax.set_yticklabels(["$0$", "$2^{4}$", "$2^{8}$", "$2^{12}$"], horizontalalignment="left", x=-0.075)
ax = fig.add_subplot(122)
plotCorridor(configCount, skylineCorridor, ax, 'Count', 1e1, 1.3e6, 12.92)
format_axes(ax, 'log', 'linear')
ax.set_xlim(1e1, 1.4e6)
ax.set_ylim(-0.5, 14.5)
format_axes(ax, 'log', 'linear')
ax.set_yticks([0, 4, 8, 12])
ax.set_yticklabels([])
ax.set_xticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6])
ax.set_title('\\textbf{Lookup}')
plt.tight_layout()
savefig("./pdf/optimization/partitioning_corridor")
skylineSIMD = {}
analyzeSIMD(read_benchmark('../benchmark/paper/optimization/partitioning/partitioning_count.csv'), skylineSIMD)
analyzeSIMD(read_benchmark('../benchmark/paper/optimization/simd/simd_count.csv'), skylineSIMD)
for name in {'BloomBlocked', 'Cuckoo', 'Morton', 'Xor'}:
for n_elements in skylineSIMD[f'{name}SIMD']['Count']:
simd = skylineSIMD[f'{name}SIMD']['Count'][n_elements]
base_name = name.replace('Horizontal', '').replace('Vertical', '')
if n_elements in skylineSIMD[base_name]['Count']:
scalar = skylineSIMD[base_name]['Count'][n_elements]
relative = {'size': simd['size']}
simd['speedup'] = -2 if simd['t'] == 0 or math.isnan(simd['t']) else scalar['t'] / simd['t'] - 1
config = {
'MortonSIMD': {'label': 'Morton filter', 'color': colors['morton'], 'linestyle': 'solid', 'linewidth': 1},
'CuckooSIMD': {'label': 'Cuckoo filter', 'color': colors['cuckoo'], 'linestyle': 'solid', 'linewidth': 1},
'BloomBlockedSIMD': {'label': 'Bloom filter', 'color': colors['bloom'], 'linestyle': 'solid', 'linewidth': 1},
'XorSIMD': {'label': 'Xor filter', 'color': colors['xor'], 'linestyle': 'solid', 'linewidth': 1},
}
latexify(cm2inch(8.5), cm2inch(2.5), 2)
fig = plt.figure()
ax = fig.add_subplot(111)
handles = plotFilterSize(config, 'Count', skylineSIMD, ax, 'speedup', 1e0, 1.3e6, 0.95)
format_axes(ax, 'log', 'linear')
ax.set_ylabel("Speedup [%]")
ax.set_xlim(1e0, 1.4e6)
ax.set_ylim(-0.15, 1.05)
ax.yaxis.set_major_formatter(speedup)
handles, labels = ax.get_legend_handles_labels()
handles[0], handles[2] = handles[2], handles[0]
labels[0], labels[2] = labels[2], labels[0]
allfig = plt.gcf()
allfig.legend(handles, labels, ncol=4, bbox_to_anchor=(0.45, 1.04), loc='upper center', borderaxespad=0, frameon=False, columnspacing=1)
savefig("./pdf/optimization/simd")
skylineMultiThreading = {}
analyzeMultiThreading('Skylake_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_count_9900X.csv'), skylineMultiThreading)
analyzeMultiThreading('Skylake_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_mtcount_9900X.csv'), skylineMultiThreading)
analyzeMultiThreading('Skylake_Construct', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_construct_9900X.csv'), skylineMultiThreading)
analyzeMultiThreading('Ryzen_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_count_ryzen3000.csv'), skylineMultiThreading)
analyzeMultiThreading('Ryzen_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_mtcount_ryzen3000.csv'), skylineMultiThreading)
analyzeMultiThreading('Ryzen_Construct', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_construct_ryzen3000.csv'), skylineMultiThreading)
analyzeMultiThreading('Ryzen_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_count_ryzen3000_numa.csv'), skylineMultiThreading, True)
analyzeMultiThreading('Ryzen_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_mtcount_ryzen3000_numa.csv'), skylineMultiThreading, True)
analyzeMultiThreading('Ryzen_Construct', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_construct_ryzen3000_numa.csv'), skylineMultiThreading, True)
analyzeMultiThreading('Xeon_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_count_xeon.csv'), skylineMultiThreading)
analyzeMultiThreading('Xeon_Count', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_mtcount_xeon.csv'), skylineMultiThreading)
analyzeMultiThreading('Xeon_Construct', read_benchmark('../benchmark/paper/optimization/multi-threading/multi_threading_construct_xeon.csv'), skylineMultiThreading)
for machine in {'Skylake', 'Xeon', 'Ryzen'}:
for filtername in {'BloomBlocked', 'Cuckoo', 'Morton', 'Xor'}:
for name in {f'{machine}_Count_{filtername}MT', f'{machine}_Count_{filtername}PartitionedMT', f'{machine}_Count_{filtername}MTPartitioned', f'{machine}_Construct_{filtername}PartitionedMT'}:
if name in skylineMultiThreading:
singlethreaded = skylineMultiThreading[name.replace('MT', '')][1]
for n_threads in skylineMultiThreading[name]:
multithreaded = skylineMultiThreading[name][n_threads]
multithreaded['scaleup'] = 0 if multithreaded['t'] == 0 or math.isnan(multithreaded['t']) else singlethreaded['t'] / multithreaded['t']
config = {
'Construct_BloomBlockedPartitionedMT': {'label': 'Build\\textsuperscript{Part+MT}', 'color': colors['bloom'], 'linestyle': 'solid', 'linewidth': 1},
'Count_BloomBlockedMT': {'label': 'Lookup\\textsuperscript{MT}', 'color': colors['cuckoo'], 'linestyle': 'solid', 'linewidth': 1},
'Count_BloomBlockedPartitionedMT': {'label': 'Lookup\\textsuperscript{Part+MT}', 'color': colors['morton'], 'linestyle': 'solid', 'linewidth': 1},
'Count_BloomBlockedMTPartitioned': {'label': 'Lookup\\textsuperscript{MT+Part}', 'color': colors['xor'], 'linestyle': 'solid', 'linewidth': 1},
}
def format(ax, title):
ax.set_xlabel("Threads")
format_axes(ax, 'linear', 'linear')
ax.set_ylabel("Scale-Up")
ax.set_title(f'\\textbf{"{" + title + "}"}', pad=1)
ax.legend().set_visible(False)
ax.set_ylim(1)
if title == 'Skylake-X':
ax.set_xlim(0, 14.25)
ax.xaxis.set_major_locator(matplotlib.ticker.FixedLocator([0, 1, 3, 5, 7, 9, 11.5, 14]))
ax.set_xticklabels([1, 2, 4, 6, 8, 10, 15, 20])
ax.yaxis.set_major_locator(matplotlib.ticker.FixedLocator([1, 5, 10]))
ax.yaxis.set_minor_locator(matplotlib.ticker.FixedLocator([3, 7.5]))
ind = np.arange(15)
ylim = ax.get_ylim()
ax.add_patch(Rectangle((ind[-6] + .01, ylim[0]), 8, ylim[1] - ylim[0], color='#000000', alpha=0.15, linewidth=0))
ax.text(9 + 5.25 * 0.5, ylim[0] + (ylim[1] - ylim[0]) * .04, '\\textsf{\\textbf{SMT}}', ha='center', va='bottom', color='w')
if title == 'Ryzen':
ax.set_xlim(0, 19.30)
ax.set_ylim(1, 16.1)
ax.xaxis.set_major_locator(matplotlib.ticker.FixedLocator([0, 3, 7, 11, 15, 17, 19]))
ax.set_xticklabels([1, 4, 8, 12, 16, 24, 32])
ax.yaxis.set_major_locator(matplotlib.ticker.FixedLocator([1, 8, 16]))
ax.yaxis.set_minor_locator(matplotlib.ticker.FixedLocator([4.5, 12]))
ind = np.arange(20)
ylim = ax.get_ylim()
ax.add_patch(Rectangle((ind[-5] + .01, ylim[0]), 8, ylim[1] - ylim[0], color='#000000', alpha=0.15, linewidth=0))
ax.text(15 + 4.3 * 0.5, ylim[0] + (ylim[1] - ylim[0]) * .05, '\\textsf{\\textbf{SMT}}', ha='center', va='bottom', color='w')
if title == 'Xeon Gold':
ax.set_xlim(0, 27.3)
ax.xaxis.set_major_locator(matplotlib.ticker.FixedLocator([0, 3, 7, 11, 15, 19, 23, 25, 27]))
ax.set_xticklabels([1, 4, 8, 12, 16, 20, 24, 36, 48])
ax.yaxis.set_major_locator(matplotlib.ticker.FixedLocator([1, 12, 24]))
ax.yaxis.set_minor_locator(matplotlib.ticker.FixedLocator([6.5, 18]))
ind = np.arange(28)
ylim = ax.get_ylim()
ax.add_patch(Rectangle((ind[-5] + .01, ylim[0]), 8, ylim[1] - ylim[0], color='#000000', alpha=0.15, linewidth=0))
ax.text(23 + 4.3 * 0.5, ylim[0] + (ylim[1] - ylim[0]) * .05, '\\textsf{\\textbf{SMT}}', ha='center', va='bottom', color='w')
latexify(cm2inch(8.5), cm2inch(4.5), 2)
fig = plt.figure()
ax0 = fig.add_subplot(221)
ax1 = fig.add_subplot(222)
ax2 = fig.add_subplot(212)
datapoints = list(range(1, 11)) + [12, 14, 16, 18, 20]
plotScaleup(config, skylineMultiThreading, ax0, 'Skylake_', datapoints)
format(ax0, 'Skylake-X')
datapoints = list(range(1, 17)) + [20, 24, 28, 32]
plotScaleup(config, skylineMultiThreading, ax1, 'Ryzen_', datapoints)
format(ax1, 'Ryzen')
datapoints = list(range(1, 25)) + [30, 36, 42, 48]
plotScaleup(config, skylineMultiThreading, ax2, 'Xeon_', datapoints)
format(ax2, 'Xeon Gold')
handles, labels = ax0.get_legend_handles_labels()
allfig = plt.gcf()
allfig.legend(handles, labels, ncol=4, bbox_to_anchor=(0.5, 1.05), loc='upper center', borderaxespad=0, frameon=False)
plt.tight_layout(h_pad=-0.5)
fig.subplots_adjust(bottom=0.15)
savefig("./pdf/optimization/multithreading")
skylineCompetitors = {}
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_bsd_construct.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_bsd_count.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_cuckoo_construct.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_cuckoo_count.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_impala_construct.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_impala_count.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_xor_construct.csv'), skylineCompetitors)
analyzeCompetitors(read_benchmark('../benchmark/paper/experiments/competitors/competitors_xor_count.csv'), skylineCompetitors)
config = {
'BSD_Bloom_Blocked512': {'label': 'cache-blocked\nBloom filter~\cite{Lang19}', 'competitor': 'Bloom_Blocked512'},
'BSD_Bloom_Blocked32': {'label': 'register-blocked\nBloom filter~\cite{Lang19}', 'competitor': 'Bloom_Blocked32'},
'BSD_Bloom_Grouped2': {'label': 'cache-sectorized\nBloom filter~\cite{Lang19}', 'competitor': 'Bloom_Grouped2'},
'Impala_Bloom_Sectorized256_AVX2': {'label': 'sectorized\nBloom filter~\cite{Kornacker15}', 'competitor': 'Bloom_Sectorized256_AVX2'},
'Efficient_Cuckoo_Standard4_Scalar': {'label': 'Cuckoo filter~\cite{Fan14}', 'competitor': 'Cuckoo_Standard4_Scalar'},
'AMD_Morton_Standard3_Scalar': {'label': 'Morton filter~\cite{Breslow18}', 'competitor': 'Morton_Standard3_Scalar'},
'Fastfilter_Xor_Standard_Scalar': {'label': 'Xor filter~\cite{Graf20}', 'competitor': 'Xor_Standard_Scalar'},
}
def plotCompetitors(config, fixture, skyline, ax, legend=True):
width = 0.4
ind = []
label = []
p = {}
for fixture in ['Construct', 'Count']:
for i, name in enumerate(config.keys()):
ind.append(i)
label.append(config[name]['label'])
t_competitor = skyline[name][fixture]['t']
t_our = skyline[config[name]['competitor']][fixture]['t']
p['Competitor'] = ax.bar([(-0.01 if fixture == 'Construct' else 0.01) + i + width / 4 * (-3 if fixture == 'Construct' else 1)], [t_competitor], width / 2, color=colors['blue'], zorder=10)
p['Ours'] = ax.bar([(-0.01 if fixture == 'Construct' else 0.01) + i + width / 4 * (-1 if fixture == 'Construct' else 3)], [t_our], width / 2, color=colors['orange'], zorder=10)
bar0 = p['Competitor'][0]
bar1 = p['Ours'][0]
posText = (bar0.get_height() + bar1.get_height()) / 2
if t_competitor <= t_our:
middle = bar0.get_x() + bar0.get_width() / 2
else:
middle = bar1.get_x() + bar1.get_width() / 2
height = max(bar0.get_height(), bar1.get_height())
ax.plot([bar0.get_x(), bar0.get_x() + bar0.get_width() * 2], [height, height], 'k-', lw=0.5, zorder=20)
ax.plot([middle, middle], [bar0.get_height(), bar1.get_height()], 'k-', lw=0.5, zorder=20)
ax.text(bar1.get_x() + (-0.05 if fixture == 'Construct' and t_our / t_competitor - 1 > 0.99 else 0), height + 0.005e9, '\\textbf{' + speedup(t_our / t_competitor - 1) + '}', ha='center',
va='bottom', fontsize=6, color='k')
ax.text(bar0.get_x() + width / 2 + (0.05 if fixture == 'Count' else 0), -4e6, 'Lookup' if fixture == 'Count' else 'Build', ha='center', va='top', color='k', fontsize=4, zorder=20)
# ax.text(bar1.get_x() + width / 2 + 0.05, -1e6, 'Lookup' if fixture == 'Count' else 'Build', ha='center', va='top', color='k', fontsize=4, zorder=20)
l = ax.legend((p['Competitor'], p['Ours']), ('Competitor', 'Ours'), labelspacing=0, ncol=2, bbox_to_anchor=(0.99, 1), borderaxespad=0, framealpha=1, edgecolor='black', fancybox=False)
l.set_visible(legend)
l.get_frame().set_linewidth(0.5)
ax.set_xticks(ind)
ax.set_xticklabels(label, rotation=30, ha='right', rotation_mode="anchor", fontsize=6)
ax.yaxis.set_major_formatter(gigs)
ax.set_ylim(0, 0.41e9)
ax.set_xlim(-0.55, 6.45)
ax.set_yticks([0, 0.2e9, 0.4e9])
ax.axhline(y=0, color='k', linestyle='-', lw=1, zorder=20)
latexify(cm2inch(8.5), cm2inch(4), 2)
# 6
latexify(cm2inch(9), cm2inch(6.5), 1)
fig = plt.figure()
ax = fig.add_subplot(211)
format_axes(ax)
plotCompetitors(config, 'Count', skylineCompetitors, ax)
# ax.set_xticklabels([])
# ax.set_title('\\textbf{Lookup}')
ax.set_ylabel("Throughput [Keys/s]")
# ax = fig.add_subplot(212)
# format_axes(ax)
# plotCompetitors(config, 'Construct', skylineCompetitors, ax, False)
# ax.set_title('\\textbf{Build}')
# ax.set_ylabel("Throughput [Keys/s]")
plt.tight_layout()
fig.subplots_adjust(bottom=0.22) # or whatever
savefig("./pdf/experiments/competitors")
| [
"matplotlib.ticker.NullFormatter",
"matplotlib.ticker.LogLocator",
"numpy.sqrt",
"pandas.read_csv",
"math.log",
"math.log10",
"matplotlib.ticker.AutoMinorLocator",
"numpy.arange",
"os.path.exists",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.close",
"mpl_toolkits.axes_grid1.inset_loc... | [((1990, 2010), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['kilos'], {}), '(kilos)\n', (2003, 2010), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((2019, 2042), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['millions'], {}), '(millions)\n', (2032, 2042), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((2050, 2073), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['billions'], {}), '(billions)\n', (2063, 2073), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((2082, 2106), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['billions2'], {}), '(billions2)\n', (2095, 2106), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((2117, 2136), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['perc'], {}), '(perc)\n', (2130, 2136), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((2148, 2168), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['perc2'], {}), '(perc2)\n', (2161, 2168), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((5191, 5212), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (5205, 5212), False, 'import matplotlib\n'), ((31192, 31204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (31202, 31204), True, 'import matplotlib.pyplot as plt\n'), ((44089, 44101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (44099, 44101), True, 'import matplotlib.pyplot as plt\n'), ((45140, 45149), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (45147, 45149), True, 'import matplotlib.pyplot as plt\n'), ((45286, 45304), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (45302, 45304), True, 'import matplotlib.pyplot as plt\n'), ((45346, 45357), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (45355, 45357), True, 'import matplotlib.pyplot as plt\n'), ((45869, 45881), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (45879, 45881), True, 'import matplotlib.pyplot as plt\n'), ((46801, 46812), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (46810, 46812), True, 'import matplotlib.pyplot as plt\n'), ((48911, 48923), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (48921, 48923), True, 'import matplotlib.pyplot as plt\n'), ((49869, 49878), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (49876, 49878), True, 'import matplotlib.pyplot as plt\n'), ((50016, 50034), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (50032, 50034), True, 'import matplotlib.pyplot as plt\n'), ((50125, 50137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (50135, 50137), True, 'import matplotlib.pyplot as plt\n'), ((50823, 50841), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (50839, 50841), True, 'import matplotlib.pyplot as plt\n'), ((53536, 53548), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (53546, 53548), True, 'import matplotlib.pyplot as plt\n'), ((54377, 54395), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (54393, 54395), True, 'import matplotlib.pyplot as plt\n'), ((55709, 55721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (55719, 55721), True, 'import matplotlib.pyplot as plt\n'), ((56135, 56144), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (56142, 56144), True, 'import matplotlib.pyplot as plt\n'), ((62045, 62057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (62055, 62057), True, 'import matplotlib.pyplot as plt\n'), ((62641, 62650), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (62648, 62650), True, 'import matplotlib.pyplot as plt\n'), ((62771, 62799), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(-0.5)'}), '(h_pad=-0.5)\n', (62787, 62799), True, 'import matplotlib.pyplot as plt\n'), ((67471, 67483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (67481, 67483), True, 'import matplotlib.pyplot as plt\n'), ((67871, 67889), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (67887, 67889), True, 'import matplotlib.pyplot as plt\n'), ((3622, 3639), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (3633, 3639), True, 'import pandas as pd\n'), ((7386, 7420), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (['params'], {}), '(params)\n', (7412, 7420), False, 'import matplotlib\n'), ((11161, 11182), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (11176, 11182), False, 'import os\n'), ((11244, 11305), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + '.pgf')"], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(path + '.pgf', bbox_inches='tight', pad_inches=0)\n", (11255, 11305), True, 'import matplotlib.pyplot as plt\n'), ((11310, 11371), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + '.pdf')"], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(path + '.pdf', bbox_inches='tight', pad_inches=0)\n", (11321, 11371), True, 'import matplotlib.pyplot as plt\n'), ((21537, 21546), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21544, 21546), True, 'import matplotlib.pyplot as plt\n'), ((36836, 36894), 'mpl_toolkits.axes_grid1.inset_locator.TransformedBbox', 'TransformedBbox', (['inset_axes.viewLim', 'parent_axes.transData'], {}), '(inset_axes.viewLim, parent_axes.transData)\n', (36851, 36894), False, 'from mpl_toolkits.axes_grid1.inset_locator import TransformedBbox, BboxPatch, BboxConnector\n'), ((36905, 36981), 'mpl_toolkits.axes_grid1.inset_locator.BboxPatch', 'BboxPatch', (['rect'], {'fill': '(False)', 'color': '"""w"""', 'ec': '"""gray"""', 'lw': '(0.5)', 'zorder': '(2)', 'ls': '"""--"""'}), "(rect, fill=False, color='w', ec='gray', lw=0.5, zorder=2, ls='--')\n", (36914, 36981), False, 'from mpl_toolkits.axes_grid1.inset_locator import TransformedBbox, BboxPatch, BboxConnector\n'), ((37022, 37125), 'mpl_toolkits.axes_grid1.inset_locator.BboxConnector', 'BboxConnector', (['inset_axes.bbox', 'rect'], {'loc1': 'loc1a', 'loc2': 'loc1b', 'ec': '"""gray"""', 'lw': '(0.5)', 'zorder': '(2)', 'ls': '"""--"""'}), "(inset_axes.bbox, rect, loc1=loc1a, loc2=loc1b, ec='gray', lw=\n 0.5, zorder=2, ls='--')\n", (37035, 37125), False, 'from mpl_toolkits.axes_grid1.inset_locator import TransformedBbox, BboxPatch, BboxConnector\n'), ((37186, 37289), 'mpl_toolkits.axes_grid1.inset_locator.BboxConnector', 'BboxConnector', (['inset_axes.bbox', 'rect'], {'loc1': 'loc2a', 'loc2': 'loc2b', 'ec': '"""gray"""', 'lw': '(0.5)', 'zorder': '(2)', 'ls': '"""--"""'}), "(inset_axes.bbox, rect, loc1=loc2a, loc2=loc2b, ec='gray', lw=\n 0.5, zorder=2, ls='--')\n", (37199, 37289), False, 'from mpl_toolkits.axes_grid1.inset_locator import TransformedBbox, BboxPatch, BboxConnector\n'), ((37385, 37397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37395, 37397), True, 'import matplotlib.pyplot as plt\n'), ((37636, 37679), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax0', '(2)'], {'loc': '"""lower left"""'}), "(ax0, 2, loc='lower left')\n", (37653, 37679), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes\n'), ((38125, 38134), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (38132, 38134), True, 'import matplotlib.pyplot as plt\n'), ((38488, 38506), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (38504, 38506), True, 'import matplotlib.pyplot as plt\n'), ((38569, 38581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38579, 38581), True, 'import matplotlib.pyplot as plt\n'), ((38855, 38898), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax0', '(2)'], {'loc': '"""lower left"""'}), "(ax0, 2, loc='lower left')\n", (38872, 38898), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes\n'), ((39274, 39331), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax0', '(2)'], {'loc': '"""upper right"""', 'borderpad': '(0)'}), "(ax0, 2, loc='upper right', borderpad=0)\n", (39291, 39331), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes\n'), ((39926, 40029), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax1', '(2)'], {'loc': '"""upper left"""', 'bbox_to_anchor': '(0, 1.05)', 'bbox_transform': 'ax1.transAxes'}), "(ax1, 2, loc='upper left', bbox_to_anchor=(0, 1.05),\n bbox_transform=ax1.transAxes)\n", (39943, 40029), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes\n'), ((40584, 40692), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax1', '(2)'], {'loc': '"""lower right"""', 'bbox_to_anchor': '(1.04, -0.02)', 'bbox_transform': 'ax1.transAxes'}), "(ax1, 2, loc='lower right', bbox_to_anchor=(1.04, -0.02),\n bbox_transform=ax1.transAxes)\n", (40601, 40692), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes\n'), ((41296, 41305), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (41303, 41305), True, 'import matplotlib.pyplot as plt\n'), ((41433, 41451), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (41449, 41451), True, 'import matplotlib.pyplot as plt\n'), ((41525, 41537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (41535, 41537), True, 'import matplotlib.pyplot as plt\n'), ((42384, 42402), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (42400, 42402), True, 'import matplotlib.pyplot as plt\n'), ((42464, 42475), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (42473, 42475), True, 'import matplotlib.pyplot as plt\n'), ((8003, 8053), 'matplotlib.ticker.LogLocator', 'matplotlib.ticker.LogLocator', ([], {'base': '(10)', 'numticks': '(12)'}), '(base=10, numticks=12)\n', (8031, 8053), False, 'import matplotlib\n'), ((8657, 8707), 'matplotlib.ticker.LogLocator', 'matplotlib.ticker.LogLocator', ([], {'base': '(10)', 'numticks': '(12)'}), '(base=10, numticks=12)\n', (8685, 8707), False, 'import matplotlib\n'), ((9643, 9693), 'matplotlib.ticker.LogLocator', 'matplotlib.ticker.LogLocator', ([], {'base': '(10)', 'numticks': '(12)'}), '(base=10, numticks=12)\n', (9671, 9693), False, 'import matplotlib\n'), ((9981, 10014), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (10012, 10014), False, 'import matplotlib\n'), ((10372, 10381), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10379, 10381), True, 'import matplotlib.pyplot as plt\n'), ((11194, 11213), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (11208, 11213), False, 'import os\n'), ((11223, 11239), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11234, 11239), False, 'import os\n'), ((40309, 40346), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[7.5]'], {}), '([7.5])\n', (40339, 40346), False, 'import matplotlib\n'), ((40974, 41012), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[17.5]'], {}), '([17.5])\n', (41004, 41012), False, 'import matplotlib\n'), ((60305, 60318), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (60314, 60318), True, 'import numpy as np\n'), ((61006, 61019), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (61015, 61019), True, 'import numpy as np\n'), ((61698, 61711), 'numpy.arange', 'np.arange', (['(28)'], {}), '(28)\n', (61707, 61711), True, 'import numpy as np\n'), ((8134, 8163), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['logPrintFormat'], {}), '(logPrintFormat)\n', (8147, 8163), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((8344, 8377), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (8375, 8377), False, 'import matplotlib\n'), ((8424, 8445), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {'n': '(2)'}), '(n=2)\n', (8440, 8445), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((8788, 8817), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['logPrintFormat'], {}), '(logPrintFormat)\n', (8801, 8817), False, 'from matplotlib.ticker import AutoMinorLocator, FuncFormatter, FixedLocator\n'), ((8998, 9031), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (9029, 9031), False, 'import matplotlib\n'), ((9774, 9807), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (9805, 9807), False, 'import matplotlib\n'), ((15815, 15853), 'math.log', 'math.log', (["benchmark['n_partitions']", '(2)'], {}), "(benchmark['n_partitions'], 2)\n", (15823, 15853), False, 'import math\n'), ((16773, 16811), 'math.log', 'math.log', (["benchmark['n_partitions']", '(2)'], {}), "(benchmark['n_partitions'], 2)\n", (16781, 16811), False, 'import math\n'), ((17560, 17598), 'math.log', 'math.log', (["benchmark['n_partitions']", '(2)'], {}), "(benchmark['n_partitions'], 2)\n", (17568, 17598), False, 'import math\n'), ((18514, 18552), 'math.log', 'math.log', (["benchmark['n_partitions']", '(2)'], {}), "(benchmark['n_partitions'], 2)\n", (18522, 18552), False, 'import math\n'), ((28189, 28204), 'math.isnan', 'math.isnan', (['val'], {}), '(val)\n', (28199, 28204), False, 'import math\n'), ((60017, 60077), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[0, 1, 3, 5, 7, 9, 11.5, 14]'], {}), '([0, 1, 3, 5, 7, 9, 11.5, 14])\n', (60047, 60077), False, 'import matplotlib\n'), ((60170, 60212), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[1, 5, 10]'], {}), '([1, 5, 10])\n', (60200, 60212), False, 'import matplotlib\n'), ((60249, 60289), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[3, 7.5]'], {}), '([3, 7.5])\n', (60279, 60289), False, 'import matplotlib\n'), ((60369, 60473), 'matplotlib.patches.Rectangle', 'Rectangle', (['(ind[-6] + 0.01, ylim[0])', '(8)', '(ylim[1] - ylim[0])'], {'color': '"""#000000"""', 'alpha': '(0.15)', 'linewidth': '(0)'}), "((ind[-6] + 0.01, ylim[0]), 8, ylim[1] - ylim[0], color='#000000',\n alpha=0.15, linewidth=0)\n", (60378, 60473), False, 'from matplotlib.patches import Rectangle\n'), ((60722, 60779), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[0, 3, 7, 11, 15, 17, 19]'], {}), '([0, 3, 7, 11, 15, 17, 19])\n', (60752, 60779), False, 'import matplotlib\n'), ((60870, 60912), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[1, 8, 16]'], {}), '([1, 8, 16])\n', (60900, 60912), False, 'import matplotlib\n'), ((60949, 60990), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[4.5, 12]'], {}), '([4.5, 12])\n', (60979, 60990), False, 'import matplotlib\n'), ((61070, 61174), 'matplotlib.patches.Rectangle', 'Rectangle', (['(ind[-5] + 0.01, ylim[0])', '(8)', '(ylim[1] - ylim[0])'], {'color': '"""#000000"""', 'alpha': '(0.15)', 'linewidth': '(0)'}), "((ind[-5] + 0.01, ylim[0]), 8, ylim[1] - ylim[0], color='#000000',\n alpha=0.15, linewidth=0)\n", (61079, 61174), False, 'from matplotlib.patches import Rectangle\n'), ((61397, 61462), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[0, 3, 7, 11, 15, 19, 23, 25, 27]'], {}), '([0, 3, 7, 11, 15, 19, 23, 25, 27])\n', (61427, 61462), False, 'import matplotlib\n'), ((61561, 61604), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[1, 12, 24]'], {}), '([1, 12, 24])\n', (61591, 61604), False, 'import matplotlib\n'), ((61641, 61682), 'matplotlib.ticker.FixedLocator', 'matplotlib.ticker.FixedLocator', (['[6.5, 18]'], {}), '([6.5, 18])\n', (61671, 61682), False, 'import matplotlib\n'), ((61762, 61866), 'matplotlib.patches.Rectangle', 'Rectangle', (['(ind[-5] + 0.01, ylim[0])', '(8)', '(ylim[1] - ylim[0])'], {'color': '"""#000000"""', 'alpha': '(0.15)', 'linewidth': '(0)'}), "((ind[-5] + 0.01, ylim[0]), 8, ylim[1] - ylim[0], color='#000000',\n alpha=0.15, linewidth=0)\n", (61771, 61866), False, 'from matplotlib.patches import Rectangle\n'), ((5922, 5932), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (5929, 5932), True, 'import numpy as np\n'), ((7600, 7613), 'math.log10', 'math.log10', (['x'], {}), '(x)\n', (7610, 7613), False, 'import math\n'), ((8228, 8248), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (8237, 8248), True, 'import numpy as np\n'), ((8882, 8902), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (8891, 8902), True, 'import numpy as np\n'), ((25824, 25839), 'math.isnan', 'math.isnan', (['val'], {}), '(val)\n', (25834, 25839), False, 'import math\n'), ((7536, 7549), 'math.log10', 'math.log10', (['x'], {}), '(x)\n', (7546, 7549), False, 'import math\n'), ((55153, 55174), 'math.isnan', 'math.isnan', (["simd['t']"], {}), "(simd['t'])\n", (55163, 55174), False, 'import math\n'), ((14596, 14609), 'math.isnan', 'math.isnan', (['t'], {}), '(t)\n', (14606, 14609), False, 'import math\n'), ((43343, 43374), 'math.isnan', 'math.isnan', (['relative[attribute]'], {}), '(relative[attribute])\n', (43353, 43374), False, 'import math\n'), ((59010, 59040), 'math.isnan', 'math.isnan', (["multithreaded['t']"], {}), "(multithreaded['t'])\n", (59020, 59040), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 06 17:14:41 2015
@author: poesch
"""
import matplotlib.pyplot as plt
import numpy as np
def heatmap(data, gridcolor='face', cmap=None, colorbar=False):
''' from (small matrices) make annotated plot
if colorbar then add one
data -- 2d numpy array to display as heatmap
'''
if not data.ndim == 2:
raise ValueError("Only supporting 2d arrays")
heatmap = plt.pcolormesh(data, edgecolors=gridcolor, cmap=cmap)#, axes='tight')
m,M = np.min(data), np.max(data)
b = m + (M-m) * 0.2
B = m + (M-m) * 0.8
for y in range(data.shape[0]):
for x in range(data.shape[1]):
plt.text(x + 0.5, y + 0.5, '%.0f' % data[y, x],
horizontalalignment='center',
verticalalignment='center',
color='black' if b < data[y,x] else 'white' ,
#bbox=dict(facecolor='white', alpha=0.5) #hintergrundfarbe
)
if colorbar:
plt.colorbar(heatmap)
if __name__ == '__main__':
data = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
plt.subplot(121)
heatmap(data, gridcolor='#00004f')
#plt.grid(1)
plt.show() | [
"matplotlib.pyplot.text",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"numpy.max",
"numpy.array",
"numpy.min",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((450, 503), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['data'], {'edgecolors': 'gridcolor', 'cmap': 'cmap'}), '(data, edgecolors=gridcolor, cmap=cmap)\n', (464, 503), True, 'import matplotlib.pyplot as plt\n'), ((1135, 1482), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 0, 0, 0, 0, 0, 0, 0], [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n', (1143, 1482), True, 'import numpy as np\n'), ((1623, 1639), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1634, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1705, 1715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1713, 1715), True, 'import matplotlib.pyplot as plt\n'), ((530, 542), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (536, 542), True, 'import numpy as np\n'), ((544, 556), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (550, 556), True, 'import numpy as np\n'), ((1046, 1067), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {}), '(heatmap)\n', (1058, 1067), True, 'import matplotlib.pyplot as plt\n'), ((696, 856), 'matplotlib.pyplot.text', 'plt.text', (['(x + 0.5)', '(y + 0.5)', "('%.0f' % data[y, x])"], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'color': "('black' if b < data[y, x] else 'white')"}), "(x + 0.5, y + 0.5, '%.0f' % data[y, x], horizontalalignment=\n 'center', verticalalignment='center', color='black' if b < data[y, x] else\n 'white')\n", (704, 856), True, 'import matplotlib.pyplot as plt\n')] |
import tensorflow as tf
import numpy as np
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
## ======================================================================
## ======================================================================
def crop_and_concat_layer(inputs, axis=-1):
'''
Layer for cropping and stacking feature maps of different size along a different axis.
Currently, the first feature map in the inputs list defines the output size.
The feature maps can have different numbers of channels.
:param inputs: A list of input tensors of the same dimensionality but can have different sizes
:param axis: Axis along which to concatentate the inputs
:return: The concatentated feature map tensor
'''
output_size = inputs[0].get_shape().as_list()
concat_inputs = [inputs[0]]
for ii in range(1,len(inputs)):
larger_size = inputs[ii].get_shape().as_list()
start_crop = np.subtract(larger_size, output_size) // 2
if len(output_size) == 5: # 3D images
cropped_tensor = tf.slice(inputs[ii],
(0, start_crop[1], start_crop[2], start_crop[3], 0),
(-1, output_size[1], output_size[2], output_size[3], -1))
elif len(output_size) == 4: # 2D images
cropped_tensor = tf.slice(inputs[ii],
(0, start_crop[1], start_crop[2], 0),
(-1, output_size[1], output_size[2], -1))
else:
raise ValueError('Unexpected number of dimensions on tensor: %d' % len(output_size))
concat_inputs.append(cropped_tensor)
return tf.concat(concat_inputs, axis=axis)
## ======================================================================
## ======================================================================
def pad_to_size(bottom, output_size):
'''
A layer used to pad the tensor bottom to output_size by padding zeros around it
TODO: implement for 3D data
'''
input_size = bottom.get_shape().as_list()
size_diff = np.subtract(output_size, input_size)
pad_size = size_diff // 2
odd_bit = np.mod(size_diff, 2)
if len(input_size) == 4:
padded = tf.pad(bottom, paddings=[[0,0],
[pad_size[1], pad_size[1] + odd_bit[1]],
[pad_size[2], pad_size[2] + odd_bit[2]],
[0,0]])
return padded
elif len(input_size) == 5:
raise NotImplementedError('This layer has not yet been extended to 3D')
else:
raise ValueError('Unexpected input size: %d' % input_size)
## ======================================================================
# reshape
## ======================================================================
def reshape_like(target, size, name):
'''
target: tensor to be reshaped
size: shape to which the target tensor should be reshaped to
'''
target_reshaped = tf.image.resize(target, size, method=tf.image.ResizeMethod.BILINEAR, name=name)
return target_reshaped
## ======================================================================
## ======================================================================
def _add_summaries(op, weights, biases):
# Tensorboard variables
tf.summary.histogram(weights.name[:-2], weights)
if biases: tf.summary.histogram(biases.name[:-2], biases)
tf.summary.histogram(op.op.name + '/activations', op)
| [
"logging.basicConfig",
"tensorflow.slice",
"tensorflow.pad",
"tensorflow.image.resize",
"numpy.subtract",
"tensorflow.concat",
"tensorflow.summary.histogram",
"numpy.mod"
] | [((58, 131), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (77, 131), False, 'import logging\n'), ((1733, 1768), 'tensorflow.concat', 'tf.concat', (['concat_inputs'], {'axis': 'axis'}), '(concat_inputs, axis=axis)\n', (1742, 1768), True, 'import tensorflow as tf\n'), ((2157, 2193), 'numpy.subtract', 'np.subtract', (['output_size', 'input_size'], {}), '(output_size, input_size)\n', (2168, 2193), True, 'import numpy as np\n'), ((2239, 2259), 'numpy.mod', 'np.mod', (['size_diff', '(2)'], {}), '(size_diff, 2)\n', (2245, 2259), True, 'import numpy as np\n'), ((3114, 3193), 'tensorflow.image.resize', 'tf.image.resize', (['target', 'size'], {'method': 'tf.image.ResizeMethod.BILINEAR', 'name': 'name'}), '(target, size, method=tf.image.ResizeMethod.BILINEAR, name=name)\n', (3129, 3193), True, 'import tensorflow as tf\n'), ((3448, 3496), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['weights.name[:-2]', 'weights'], {}), '(weights.name[:-2], weights)\n', (3468, 3496), True, 'import tensorflow as tf\n'), ((3563, 3616), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["(op.op.name + '/activations')", 'op'], {}), "(op.op.name + '/activations', op)\n", (3583, 3616), True, 'import tensorflow as tf\n'), ((2309, 2437), 'tensorflow.pad', 'tf.pad', (['bottom'], {'paddings': '[[0, 0], [pad_size[1], pad_size[1] + odd_bit[1]], [pad_size[2], pad_size[2] +\n odd_bit[2]], [0, 0]]'}), '(bottom, paddings=[[0, 0], [pad_size[1], pad_size[1] + odd_bit[1]], [\n pad_size[2], pad_size[2] + odd_bit[2]], [0, 0]])\n', (2315, 2437), True, 'import tensorflow as tf\n'), ((3512, 3558), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['biases.name[:-2]', 'biases'], {}), '(biases.name[:-2], biases)\n', (3532, 3558), True, 'import tensorflow as tf\n'), ((985, 1022), 'numpy.subtract', 'np.subtract', (['larger_size', 'output_size'], {}), '(larger_size, output_size)\n', (996, 1022), True, 'import numpy as np\n'), ((1105, 1241), 'tensorflow.slice', 'tf.slice', (['inputs[ii]', '(0, start_crop[1], start_crop[2], start_crop[3], 0)', '(-1, output_size[1], output_size[2], output_size[3], -1)'], {}), '(inputs[ii], (0, start_crop[1], start_crop[2], start_crop[3], 0), (\n -1, output_size[1], output_size[2], output_size[3], -1))\n', (1113, 1241), True, 'import tensorflow as tf\n'), ((1389, 1494), 'tensorflow.slice', 'tf.slice', (['inputs[ii]', '(0, start_crop[1], start_crop[2], 0)', '(-1, output_size[1], output_size[2], -1)'], {}), '(inputs[ii], (0, start_crop[1], start_crop[2], 0), (-1, output_size\n [1], output_size[2], -1))\n', (1397, 1494), True, 'import tensorflow as tf\n')] |
import numpy as np
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
from collections import Counter
import tweepy
import _pickle
import h5py
import sqlite3
import datetime
def most_common(lst):
return max(set(lst), key=lst.count)
def load_offline(str):
with open(str, 'rb') as f:
dump = _pickle.load(f)
return dump
word2index = load_offline('app/static/models/word_dict.pkl')
def init_model():
lstm_model = load_model('app/static/models/lstm_1.h5')
cnn_model = load_model('app/static/models/cnn.h5')
perceptron_model = load_model('app/static/models/percept_1.h5')
cnn_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
bilstm_model = load_model('app/static/models/bilstm.h5')
lstm_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
perceptron_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
graph = tf.get_default_graph()
return lstm_model, perceptron_model, bilstm_model, cnn_model, graph
lmodel, percep, bilstm, cnn, graph = init_model()
auth = tweepy.OAuthHandler('8cBEjwKgM262KDcoJExVPgfwU','<KEY>')
auth.set_access_token('<KEY>','<KEY>')
api = tweepy.API(auth)
def pencode(text):
vector = np.zeros(len(word2index))
for i, word in enumerate(text.split(' ')):
try:
vector[word2index[word]] = 1
except KeyError:
vector[i] = 0
return vector
def lencode(text):
vector = []
for word in text.split(' '):
try:
vector.append(word2index[word])
except KeyError:
vector.append(0)
padded_seq = pad_sequences([vector], maxlen=100, value=0.)
return padded_seq
def lencode_cnn(text):
vector = []
for word in text.split(' '):
try:
vector.append(word2index[word])
except KeyError:
vector.append(0)
padded_seq = pad_sequences([vector], maxlen=200, value=0.)
return padded_seq
def get_most_count(x):
return Counter(x).most_common()[0][0]
def get_date():
d = datetime.date
return str(d.today())
def predictor(query, c_sqlite, conn, currency, current_price):
with graph.as_default():
lout = lmodel.predict(lencode(query))
cnn_out = cnn.predict(lencode_cnn(query))
percept_out = percep.predict(np.expand_dims(pencode(query), axis=0))
lout = np.argmax(lout, axis=1)
cnn_out = np.argmax(cnn_out, axis=1)
percept_out=np.argmax(percept_out, axis=1)
bilstm_out = bilstm.predict(lencode(query))
bilstm_out = np.argmax(bilstm_out, axis=1)
var = [lout.tolist()[0], percept_out.tolist()[0], bilstm_out.tolist()[0], cnn_out.tolist()[0]]
c = c_sqlite
c.execute("INSERT INTO sentiments VALUES (?,?,?,?)", (get_date(), get_most_count(var),current_price, currency))
conn.commit()
return var
def get_db_results(c_sqlite, currency):
c = c_sqlite
db_date_list = []
db_percent_list = []
currency = currency
for row in c.execute('SELECT timedate, SUM(case when sent=0 then 1 else 0 end) AS `negative`, SUM(case when sent=1 then 1 else 0 end) AS `positive`, COUNT(sent) AS `total` FROM sentiments where currency=? GROUP BY timedate', [currency]):
db_date_list.append(row[0])
db_percent_list.append((row[2]/row[3])*100)
return db_date_list, db_percent_list
def processing_results(query, currency, current_price):
conn = sqlite3.connect("app/static/tweet.db",check_same_thread=False)
c = conn.cursor()
predict_list = []
line_sentiment = []
for t in query:
if not t == '':
p = predictor(t, c, conn, currency, current_price)
line_sentiment.append(most_common(p))
predict_list.append(p)
data = {'LSTM network': 0,
'Perceptron network':0,
'Bi-LSTM':0,
'Convolutional Neural Network':0
}
# overal per sentence
predict_list = np.array(predict_list)
i = 0
for key in data:
data[key] = get_most_count(predict_list[:, i])
i += 1
# all the sentences with 3 emotions
predict_list = predict_list.tolist()
emotion_sents = [0, 0]
for p in predict_list:
if most_common(p) == 0:
emotion_sents[0] += 1
else:
emotion_sents[1] += 1
# overall score
score = most_common(list(data.values()))
db_date_list, db_percent_list = get_db_results(c, currency)
conn.close()
return data, emotion_sents, score, line_sentiment, query, len(query), (db_date_list, db_percent_list)
| [
"keras.models.load_model",
"sqlite3.connect",
"numpy.argmax",
"_pickle.load",
"collections.Counter",
"numpy.array",
"tweepy.API",
"keras.preprocessing.sequence.pad_sequences",
"tensorflow.get_default_graph",
"tweepy.OAuthHandler"
] | [((1164, 1221), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['"""8cBEjwKgM262KDcoJExVPgfwU"""', '"""<KEY>"""'], {}), "('8cBEjwKgM262KDcoJExVPgfwU', '<KEY>')\n", (1183, 1221), False, 'import tweepy\n'), ((1268, 1284), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (1278, 1284), False, 'import tweepy\n'), ((502, 543), 'keras.models.load_model', 'load_model', (['"""app/static/models/lstm_1.h5"""'], {}), "('app/static/models/lstm_1.h5')\n", (512, 543), False, 'from keras.models import load_model\n'), ((558, 596), 'keras.models.load_model', 'load_model', (['"""app/static/models/cnn.h5"""'], {}), "('app/static/models/cnn.h5')\n", (568, 596), False, 'from keras.models import load_model\n'), ((617, 661), 'keras.models.load_model', 'load_model', (['"""app/static/models/percept_1.h5"""'], {}), "('app/static/models/percept_1.h5')\n", (627, 661), False, 'from keras.models import load_model\n'), ((769, 810), 'keras.models.load_model', 'load_model', (['"""app/static/models/bilstm.h5"""'], {}), "('app/static/models/bilstm.h5')\n", (779, 810), False, 'from keras.models import load_model\n'), ((1012, 1034), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1032, 1034), True, 'import tensorflow as tf\n'), ((1674, 1720), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[vector]'], {'maxlen': '(100)', 'value': '(0.0)'}), '([vector], maxlen=100, value=0.0)\n', (1687, 1720), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1944, 1990), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[vector]'], {'maxlen': '(200)', 'value': '(0.0)'}), '([vector], maxlen=200, value=0.0)\n', (1957, 1990), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3386, 3449), 'sqlite3.connect', 'sqlite3.connect', (['"""app/static/tweet.db"""'], {'check_same_thread': '(False)'}), "('app/static/tweet.db', check_same_thread=False)\n", (3401, 3449), False, 'import sqlite3\n'), ((3821, 3843), 'numpy.array', 'np.array', (['predict_list'], {}), '(predict_list)\n', (3829, 3843), True, 'import numpy as np\n'), ((374, 389), '_pickle.load', '_pickle.load', (['f'], {}), '(f)\n', (386, 389), False, 'import _pickle\n'), ((2394, 2417), 'numpy.argmax', 'np.argmax', (['lout'], {'axis': '(1)'}), '(lout, axis=1)\n', (2403, 2417), True, 'import numpy as np\n'), ((2430, 2456), 'numpy.argmax', 'np.argmax', (['cnn_out'], {'axis': '(1)'}), '(cnn_out, axis=1)\n', (2439, 2456), True, 'import numpy as np\n'), ((2471, 2501), 'numpy.argmax', 'np.argmax', (['percept_out'], {'axis': '(1)'}), '(percept_out, axis=1)\n', (2480, 2501), True, 'import numpy as np\n'), ((2563, 2592), 'numpy.argmax', 'np.argmax', (['bilstm_out'], {'axis': '(1)'}), '(bilstm_out, axis=1)\n', (2572, 2592), True, 'import numpy as np\n'), ((2048, 2058), 'collections.Counter', 'Counter', (['x'], {}), '(x)\n', (2055, 2058), False, 'from collections import Counter\n')] |
# coding:utf-8
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
from mpl_toolkits.mplot3d import axes3d, Axes3D
from computeCost import *
from gradientDescent import *
from plotData import *
# ===================== Part 1: 画图 =====================
print('画出数据...')
# 读取txt数据
data = np.loadtxt('ex1data1.txt', delimiter=',', usecols=(0, 1))
# 数据中的第一列作为X,第二列作为Y
X = data[:, 0]
y = data[:, 1]
m = y.size
# 打开plt交互模式
plt.ion()
plt.figure(0)
plot_data(X, y) # 画图函数,该函数在一个单一的文件中
input('程序已暂停,按“回车”或者其他键继续...')
# ===================== Part 2: Gradient descent 梯度下降法 =====================
print('运行梯度下降法...')
X = np.c_[np.ones(m), X] # 在X左面添加一列1
theta = np.zeros(2) # 初始化拟合参数
# 梯度下降法参数设定
iterations = 1500 # 迭代次数
alpha = 0.01 # 学习速录
# 计算并显示初始化代价函数
print('初始化代价函数: ' + str(compute_cost(X, y, theta)) + ' (这个值大概为32.07)')
# 开始迭代,并记录代价函数的值
theta, J_history = gradient_descent(X, y, theta, alpha, iterations)
print('通过梯度下降法计算的θ: ' + str(theta.reshape(2)))
# 绘制线性拟合
plt.figure(0)
line1, = plt.plot(X[:, 1], np.dot(X, theta), label='Linear Regression')
plt.legend(handles=[line1])
plt.show()
input('程序已暂停, 按“回车”或其他按键继续...')
# 预测人口规模为35,000和70,000的值
predict1 = np.dot(np.array([1, 3.5]), theta)
print('当人口 = 35,000, 预测的数字为 {:0.3f} (这个数值应该约为 4519.77)'.format(predict1*10000))
predict2 = np.dot(np.array([1, 7]), theta)
print('当人口 = 70,000, 预测的数字为 {:0.3f} (这个数值应该约为 45342.45)'.format(predict2*10000))
input('程序已暂停,按“回车”或者其他键继续...')
# ===================== Part 3: 可视化 J(theta0, theta1) =====================
print('可视化 J(theta0, theta1) ...')
# 创建-10到10和-1到4的等差数列
theta0_vals = np.linspace(-10, 10, 100)
theta1_vals = np.linspace(-1, 4, 100)
# 生成网格点
xs, ys = np.meshgrid(theta0_vals, theta1_vals)
# 根据网格点的数量生成J
J_vals = np.zeros(xs.shape)
# 计算代价函数的值
for i in range(0, theta0_vals.size):
for j in range(0, theta1_vals.size):
t = np.array([theta0_vals[i], theta1_vals[j]])
J_vals[i][j] = compute_cost(X, y, t)
J_vals = np.transpose(J_vals)
# 画3D图
fig1 = plt.figure(1)
ax = fig1.gca(projection='3d')
ax.plot_surface(xs, ys, J_vals)
plt.xlabel(r'$\theta_0$')
plt.ylabel(r'$\theta_1$')
plt.show()
plt.figure(2)
lvls = np.logspace(-2, 3, 20)
# 画等高线图
plt.contour(xs, ys, J_vals, levels=lvls, norm=LogNorm())
# 在等高线图上加中心点
plt.plot(theta[0], theta[1], c='r', marker="x")
plt.show()
input('ex1已结束,按“回车”或者其他键退出...')
| [
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.logspace",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.dot",
"matplotlib.pyplot.ion",
"numpy.meshgrid",
"numpy.loadtxt"... | [((323, 380), 'numpy.loadtxt', 'np.loadtxt', (['"""ex1data1.txt"""'], {'delimiter': '""","""', 'usecols': '(0, 1)'}), "('ex1data1.txt', delimiter=',', usecols=(0, 1))\n", (333, 380), True, 'import numpy as np\n'), ((455, 464), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (462, 464), True, 'import matplotlib.pyplot as plt\n'), ((465, 478), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (475, 478), True, 'import matplotlib.pyplot as plt\n'), ((697, 708), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (705, 708), True, 'import numpy as np\n'), ((1025, 1038), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1035, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1138), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[line1]'}), '(handles=[line1])\n', (1121, 1138), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1147, 1149), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1663), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (1649, 1663), True, 'import numpy as np\n'), ((1678, 1701), 'numpy.linspace', 'np.linspace', (['(-1)', '(4)', '(100)'], {}), '(-1, 4, 100)\n', (1689, 1701), True, 'import numpy as np\n'), ((1720, 1757), 'numpy.meshgrid', 'np.meshgrid', (['theta0_vals', 'theta1_vals'], {}), '(theta0_vals, theta1_vals)\n', (1731, 1757), True, 'import numpy as np\n'), ((1781, 1799), 'numpy.zeros', 'np.zeros', (['xs.shape'], {}), '(xs.shape)\n', (1789, 1799), True, 'import numpy as np\n'), ((2000, 2020), 'numpy.transpose', 'np.transpose', (['J_vals'], {}), '(J_vals)\n', (2012, 2020), True, 'import numpy as np\n'), ((2036, 2049), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2046, 2049), True, 'import matplotlib.pyplot as plt\n'), ((2113, 2138), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta_0$"""'], {}), "('$\\\\theta_0$')\n", (2123, 2138), True, 'import matplotlib.pyplot as plt\n'), ((2139, 2164), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta_1$"""'], {}), "('$\\\\theta_1$')\n", (2149, 2164), True, 'import matplotlib.pyplot as plt\n'), ((2165, 2175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2173, 2175), True, 'import matplotlib.pyplot as plt\n'), ((2177, 2190), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2187, 2190), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2220), 'numpy.logspace', 'np.logspace', (['(-2)', '(3)', '(20)'], {}), '(-2, 3, 20)\n', (2209, 2220), True, 'import numpy as np\n'), ((2299, 2346), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', 'theta[1]'], {'c': '"""r"""', 'marker': '"""x"""'}), "(theta[0], theta[1], c='r', marker='x')\n", (2307, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2357), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2355, 2357), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1082), 'numpy.dot', 'np.dot', (['X', 'theta'], {}), '(X, theta)\n', (1072, 1082), True, 'import numpy as np\n'), ((1227, 1245), 'numpy.array', 'np.array', (['[1, 3.5]'], {}), '([1, 3.5])\n', (1235, 1245), True, 'import numpy as np\n'), ((1352, 1368), 'numpy.array', 'np.array', (['[1, 7]'], {}), '([1, 7])\n', (1360, 1368), True, 'import numpy as np\n'), ((661, 671), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (668, 671), True, 'import numpy as np\n'), ((1902, 1944), 'numpy.array', 'np.array', (['[theta0_vals[i], theta1_vals[j]]'], {}), '([theta0_vals[i], theta1_vals[j]])\n', (1910, 1944), True, 'import numpy as np\n'), ((2275, 2284), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (2282, 2284), False, 'from matplotlib.colors import LogNorm\n')] |
import numpy as np
def greedy_max(g):
adj = np.array(g.get_adjacency()._get_data())
while np.sum(adj) > 0:
max_row = np.argmax(np.sum(adj, axis=1))
adj = np.delete(adj, max_row, 0)
adj = np.delete(adj, max_row, 1)
return adj.shape[0]
def greedy_min(g):
adj = np.array(g.get_adjacency()._get_data())
while np.sum(adj) > 0:
min_row = np.argmin(np.sum(adj, axis=1))
to_remove = np.nonzero(adj[min_row,:])
adj = np.delete(adj, to_remove, 0)
adj = np.delete(adj, to_remove, 1)
return adj.shape[0]
| [
"numpy.sum",
"numpy.nonzero",
"numpy.delete"
] | [((99, 110), 'numpy.sum', 'np.sum', (['adj'], {}), '(adj)\n', (105, 110), True, 'import numpy as np\n'), ((179, 205), 'numpy.delete', 'np.delete', (['adj', 'max_row', '(0)'], {}), '(adj, max_row, 0)\n', (188, 205), True, 'import numpy as np\n'), ((220, 246), 'numpy.delete', 'np.delete', (['adj', 'max_row', '(1)'], {}), '(adj, max_row, 1)\n', (229, 246), True, 'import numpy as np\n'), ((351, 362), 'numpy.sum', 'np.sum', (['adj'], {}), '(adj)\n', (357, 362), True, 'import numpy as np\n'), ((437, 464), 'numpy.nonzero', 'np.nonzero', (['adj[min_row, :]'], {}), '(adj[min_row, :])\n', (447, 464), True, 'import numpy as np\n'), ((478, 506), 'numpy.delete', 'np.delete', (['adj', 'to_remove', '(0)'], {}), '(adj, to_remove, 0)\n', (487, 506), True, 'import numpy as np\n'), ((521, 549), 'numpy.delete', 'np.delete', (['adj', 'to_remove', '(1)'], {}), '(adj, to_remove, 1)\n', (530, 549), True, 'import numpy as np\n'), ((144, 163), 'numpy.sum', 'np.sum', (['adj'], {'axis': '(1)'}), '(adj, axis=1)\n', (150, 163), True, 'import numpy as np\n'), ((396, 415), 'numpy.sum', 'np.sum', (['adj'], {'axis': '(1)'}), '(adj, axis=1)\n', (402, 415), True, 'import numpy as np\n')] |
import numpy as np
# from sklearn.datasets import fetch_mldata
# import matplotlib.pyplot as plt
# from tensorflow.contrib import keras
def add_bias_feature(X):
return np.c_[np.ones(len(X)), X]
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def hypotheses(W, X):
result = X.dot(W)
return sigmoid(result)
def cost(W, X, Y, eps=0.01):
h = hypotheses(W, X)
result = Y * np.log(h + eps) + (1 - Y) * np.log(1 - h + eps)
result = result.mean()
result *= -1
return result
def gradient_step(W, X, Y, learning_rate=0.01):
H = hypotheses(W, X)
errors = H - Y
epsilons = (X.T.dot(errors)) / len(errors)
return W - epsilons * learning_rate
def prepare_lin_reg():
mnist_dir = './../../../data/mnist'
mnist = fetch_mldata('MNIST original', data_home=mnist_dir)
examples_count = mnist.data.shape[0]
labels = mnist.target.astype(int)
normalized_pixels_nobias = mnist.data / 255
one_hot_labels = np.zeros((examples_count, 10))
one_hot_labels[np.arange(examples_count), labels] = 1
print(one_hot_labels[0])
def display_mnist_elem(index):
img = mnist.data[rand_no]
pixels = img.reshape(28, 28) / 255
plt.imshow(pixels, cmap='gray')
plt.show()
print('label:', labels[rand_no])
print('label as a one-hot vector:', one_hot_labels[rand_no])
normalized_pixels = add_bias_feature(normalized_pixels_nobias)
rand_numbers = np.arange(examples_count)
np.random.shuffle(rand_numbers)
train_count = 60000
train_numbers = rand_numbers[:train_count]
X_train = np.array([normalized_pixels[i] for i in range(examples_count) if i in train_numbers])
Y_train = np.array([one_hot_labels[i] for i in range(examples_count) if i in train_numbers])
X_test = np.array([normalized_pixels[i] for i in range(examples_count) if i not in train_numbers])
Y_test = np.array([mnist.target[i] for i in range(examples_count) if i not in train_numbers])
W = np.random.random((785, 10)) # 784 + bias feature
# costs = []
steps = 1000
for i in range(steps):
print(i)
W = gradient_step(W, X_train, Y_train)
# print(cost(W, X_train, Y_train))
rand_no = np.random.randint(0, examples_count)
display_mnist_elem(rand_no)
img_pixels = normalized_pixels[rand_no]
predicted_H = hypotheses(W, img_pixels)
predicted_class = np.argmax(predicted_H)
print('predicted hypotheses:', predicted_H)
print('predicted_class:', predicted_class)
np.save('mnist_linear_reg.weights', W)
def prepare_nn():
mnist_dir = './../../../data/mnist'
print('fetching')
mnist = fetch_mldata('MNIST original', data_home=mnist_dir)
examples_count = mnist.data.shape[0]
labels = mnist.target.astype(int)
normalized_pixels_nobias = mnist.data / 255
one_hot_labels = np.zeros((examples_count, 10))
one_hot_labels[np.arange(examples_count), labels] = 1
normalized_pixels = normalized_pixels_nobias
# normalization is important
normalized_pixels = normalized_pixels
rand_numbers = np.arange(examples_count)
np.random.shuffle(rand_numbers)
train_count = 60000
train_numbers = rand_numbers[:train_count]
X_train = np.array([normalized_pixels[i] for i in range(examples_count) if i in train_numbers])
Y_train = np.array([one_hot_labels[i] for i in range(examples_count) if i in train_numbers])
X_test = np.array([normalized_pixels[i] for i in range(examples_count) if i not in train_numbers])
Y_test = np.array([one_hot_labels[i] for i in range(examples_count) if i not in train_numbers])
model = keras.models.Sequential([
keras.layers.Dense(100, activation='relu', input_shape=(784,)),
keras.layers.Dense(10, activation='softmax') # input shape inferred automatically, yaay!
])
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(), # dać nowy optimajzer? A dam! (hehe)
metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=10)
model.save('mnist.keras')
# prepare_nn() | [
"numpy.random.random",
"numpy.log",
"numpy.argmax",
"numpy.exp",
"numpy.random.randint",
"numpy.zeros",
"numpy.save",
"numpy.arange",
"numpy.random.shuffle"
] | [((965, 995), 'numpy.zeros', 'np.zeros', (['(examples_count, 10)'], {}), '((examples_count, 10))\n', (973, 995), True, 'import numpy as np\n'), ((1453, 1478), 'numpy.arange', 'np.arange', (['examples_count'], {}), '(examples_count)\n', (1462, 1478), True, 'import numpy as np\n'), ((1483, 1514), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_numbers'], {}), '(rand_numbers)\n', (1500, 1514), True, 'import numpy as np\n'), ((1996, 2023), 'numpy.random.random', 'np.random.random', (['(785, 10)'], {}), '((785, 10))\n', (2012, 2023), True, 'import numpy as np\n'), ((2230, 2266), 'numpy.random.randint', 'np.random.randint', (['(0)', 'examples_count'], {}), '(0, examples_count)\n', (2247, 2266), True, 'import numpy as np\n'), ((2409, 2431), 'numpy.argmax', 'np.argmax', (['predicted_H'], {}), '(predicted_H)\n', (2418, 2431), True, 'import numpy as np\n'), ((2533, 2571), 'numpy.save', 'np.save', (['"""mnist_linear_reg.weights"""', 'W'], {}), "('mnist_linear_reg.weights', W)\n", (2540, 2571), True, 'import numpy as np\n'), ((2867, 2897), 'numpy.zeros', 'np.zeros', (['(examples_count, 10)'], {}), '((examples_count, 10))\n', (2875, 2897), True, 'import numpy as np\n'), ((3102, 3127), 'numpy.arange', 'np.arange', (['examples_count'], {}), '(examples_count)\n', (3111, 3127), True, 'import numpy as np\n'), ((3132, 3163), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_numbers'], {}), '(rand_numbers)\n', (3149, 3163), True, 'import numpy as np\n'), ((238, 248), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (244, 248), True, 'import numpy as np\n'), ((396, 411), 'numpy.log', 'np.log', (['(h + eps)'], {}), '(h + eps)\n', (402, 411), True, 'import numpy as np\n'), ((424, 443), 'numpy.log', 'np.log', (['(1 - h + eps)'], {}), '(1 - h + eps)\n', (430, 443), True, 'import numpy as np\n'), ((1015, 1040), 'numpy.arange', 'np.arange', (['examples_count'], {}), '(examples_count)\n', (1024, 1040), True, 'import numpy as np\n'), ((2917, 2942), 'numpy.arange', 'np.arange', (['examples_count'], {}), '(examples_count)\n', (2926, 2942), True, 'import numpy as np\n')] |
import time
import wave
import numpy as np
import scipy as sp
from scipy import signal
from scipy.io import wavfile
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
plt.style.use('dark_background') # comment out for "light" theme
plt.rcParams["font.size"] = 16
class TrackedArray():
def __init__(self, arr, kind="minimal"):
self.arr = np.copy(arr)
self.kind = kind
self.reset()
def reset(self):
self.indices = []
self.values = []
self.access_type = []
self.full_copies = []
def track(self, key, access_type):
self.indices.append(key)
self.values.append(self.arr[key])
self.access_type.append(access_type)
if self.kind == "full":
self.full_copies.append(np.copy(self.arr))
def GetActivity(self, idx=None):
if isinstance(idx, type(None)):
return [(i, op) for (i, op) in zip(self.indices, self.access_type)]
else:
return (self.indices[idx], self.access_type[idx])
def __delitem__(self, key):
self.track(key, "del")
self.arr.__delitem__(key)
def __getitem__(self, key):
self.track(key, "get")
return self.arr.__getitem__(key)
def __setitem__(self, key, value):
self.arr.__setitem__(key, value)
self.track(key, "set")
def __len__(self):
return self.arr.__len__()
def __str__(self):
return self.arr.__str__()
def __repr__(self):
return self.arr.__repr__()
def freq_map(x, x_min=0, x_max=1000, freq_min=120, freq_max=1200):
""" map a value x to a frequency f and return a chunk of that
frequency for the specificed time dt"""
return np.interp(x, [x_min, x_max], [freq_min, freq_max])
def freq_sample(freq, dt=1./60., samplerate=44100, oversample=2):
"""Create a sample with a specific freqency {freq} for a specified
time {dt}"""
mid_samples = np.int(dt * samplerate)
pad_samples = np.int((mid_samples*(oversample-1)/2))
total_samples = mid_samples + 2*pad_samples
y = np.sin(2 * np.pi * freq * np.linspace(0, dt, total_samples))
y[:pad_samples] = y[:pad_samples] * np.linspace(0, 1, pad_samples)
y[- pad_samples:] = y[len(y) - pad_samples:] * \
np.linspace(1, 0, pad_samples)
return y
N = 30
FPS = 60
OVERSAMPLE = 2
F_SAMPLE = 44100
arr = np.round(np.linspace(0, 1000, N), 0)
np.random.seed(0)
np.random.shuffle(arr)
arr = TrackedArray(arr, "full")
np.random.seed(0)
# ##############################################
# ########### DEMO 1 - Insertion Sort ##########
# ##############################################
# sorter = "Insertion"
# t0 = time.perf_counter()
# i = 1
# while (i < len(arr)):
# j = i
# while ((j > 0) and (arr[j-1] > arr[j])):
# temp = arr[j-1]
# arr[j-1] = arr[j]
# arr[j] = temp
# j -= 1
# i += 1
# t_ex = time.perf_counter() - t0
# ##############################################
##############################################
########### DEMO 2 - Quick sort ##############
##############################################
sorter = "Quick"
def quicksort(A, lo, hi):
if lo < hi:
p = partition(A, lo, hi)
quicksort(A, lo, p - 1)
quicksort(A, p + 1, hi)
def partition(A, lo, hi):
pivot = A[hi]
i = lo
for j in range(lo, hi):
if A[j] < pivot:
temp = A[i]
A[i] = A[j]
A[j] = temp
i += 1
temp = A[i]
A[i] = A[hi]
A[hi] = temp
return i
t0 = time.perf_counter()
quicksort(arr, 0, len(arr)-1)
t_ex = time.perf_counter() - t0
##############################################
print(f"---------- {sorter} Sort ----------")
print(f"Array Sorted in {t_ex*1E3:.1f} ms | {len(arr.full_copies):.0f} "
f"array access operations were performed")
wav_data = np.zeros(np.int(F_SAMPLE*len(arr.values)*1./FPS), dtype=np.float)
dN = np.int(F_SAMPLE * 1./FPS) # how many samples is each chunk
for i, value in enumerate(arr.values):
freq = freq_map(value)
sample = freq_sample(freq, dt=1./FPS, samplerate=F_SAMPLE,
oversample=OVERSAMPLE)
idx_0 = np.int((i+0.5)*dN - len(sample)/2)
idx_1 = idx_0 + len(sample)
try:
wav_data[idx_0:idx_1] = wav_data[idx_0:idx_1] + sample
except ValueError:
print(f"Failed to generate {i:.0f}th index sample")
wav_data = (2**15*(wav_data/np.max(np.abs(wav_data)))).astype(np.int16)
sp.io.wavfile.write(f"{sorter}_sound.wav", F_SAMPLE, wav_data)
fig, ax = plt.subplots(figsize=(16, 8))
container = ax.bar(np.arange(0, len(arr), 1),
arr.full_copies[0], align="edge", width=0.8)
fig.suptitle(f"{sorter} sort")
ax.set(xlabel="Index", ylabel="Value")
ax.set_xlim([0, N])
txt = ax.text(0.01, 0.99, "", ha="left", va="top", transform=ax.transAxes)
def update(frame):
txt.set_text(f"Accesses = {frame}")
for rectangle, height in zip(container.patches, arr.full_copies[frame]):
rectangle.set_height(height)
rectangle.set_color("#1f77b4")
idx, op = arr.GetActivity(frame)
if op == "get":
container.patches[idx].set_color("magenta")
elif op == "set":
container.patches[idx].set_color("red")
fig.savefig(f"frames/{sorter}_frame{frame:05.0f}.png")
return (txt, *container)
ani = FuncAnimation(fig, update, frames=range(len(arr.full_copies)),
blit=True, interval=1000./FPS, repeat=False)
| [
"numpy.copy",
"numpy.abs",
"time.perf_counter",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"numpy.random.seed",
"scipy.io.wavfile.write",
"numpy.interp",
"numpy.int",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] | [((205, 237), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (218, 237), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2536), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2533, 2536), True, 'import numpy as np\n'), ((2538, 2560), 'numpy.random.shuffle', 'np.random.shuffle', (['arr'], {}), '(arr)\n', (2555, 2560), True, 'import numpy as np\n'), ((2599, 2616), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2613, 2616), True, 'import numpy as np\n'), ((3717, 3736), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3734, 3736), False, 'import time\n'), ((4114, 4142), 'numpy.int', 'np.int', (['(F_SAMPLE * 1.0 / FPS)'], {}), '(F_SAMPLE * 1.0 / FPS)\n', (4120, 4142), True, 'import numpy as np\n'), ((4683, 4745), 'scipy.io.wavfile.write', 'sp.io.wavfile.write', (['f"""{sorter}_sound.wav"""', 'F_SAMPLE', 'wav_data'], {}), "(f'{sorter}_sound.wav', F_SAMPLE, wav_data)\n", (4702, 4745), True, 'import scipy as sp\n'), ((4759, 4788), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (4771, 4788), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1850), 'numpy.interp', 'np.interp', (['x', '[x_min, x_max]', '[freq_min, freq_max]'], {}), '(x, [x_min, x_max], [freq_min, freq_max])\n', (1809, 1850), True, 'import numpy as np\n'), ((2031, 2054), 'numpy.int', 'np.int', (['(dt * samplerate)'], {}), '(dt * samplerate)\n', (2037, 2054), True, 'import numpy as np\n'), ((2074, 2116), 'numpy.int', 'np.int', (['(mid_samples * (oversample - 1) / 2)'], {}), '(mid_samples * (oversample - 1) / 2)\n', (2080, 2116), True, 'import numpy as np\n'), ((2490, 2513), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', 'N'], {}), '(0, 1000, N)\n', (2501, 2513), True, 'import numpy as np\n'), ((3780, 3799), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3797, 3799), False, 'import time\n'), ((398, 410), 'numpy.copy', 'np.copy', (['arr'], {}), '(arr)\n', (405, 410), True, 'import numpy as np\n'), ((2275, 2305), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'pad_samples'], {}), '(0, 1, pad_samples)\n', (2286, 2305), True, 'import numpy as np\n'), ((2369, 2399), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'pad_samples'], {}), '(1, 0, pad_samples)\n', (2380, 2399), True, 'import numpy as np\n'), ((2199, 2232), 'numpy.linspace', 'np.linspace', (['(0)', 'dt', 'total_samples'], {}), '(0, dt, total_samples)\n', (2210, 2232), True, 'import numpy as np\n'), ((833, 850), 'numpy.copy', 'np.copy', (['self.arr'], {}), '(self.arr)\n', (840, 850), True, 'import numpy as np\n'), ((4643, 4659), 'numpy.abs', 'np.abs', (['wav_data'], {}), '(wav_data)\n', (4649, 4659), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
from constraints import generate_constraints_function
def plot_convergence(xs, objective, robot_arm, show=False):
"""
A function for plotting the convergence of an optimization algorithm.
Input:
xs - The inputs generated iteratively by the optimization method,
given as column vectors in an n times i array, where n is the
dimension of the domain space, and i is the numbers of
iterations performed by the optimization method.
objective - The function to be minimized by the optimization method
"""
# Make sure the xs are never mutated (because I'm a bad programmer)
xs.setflags(write=False)
# Dimension of domain space and number of method iterations
n, i = xs.shape
# The final solution of the method is used as a numerical refernce
# solution
minimizer = xs[:, -1]
minimum = objective(minimizer)
# Calculate remaining distance to final minimizer
remaining_distance = xs - minimizer.reshape((n, 1,))
remaining_distance = np.linalg.norm(
remaining_distance,
ord=2,
axis=0
)
assert remaining_distance.shape == (i,)
# Calculate the decrement in the objective values
objective_values = np.apply_along_axis(
objective,
axis=0,
arr=xs
)
remaining_decline = objective_values - minimum
assert remaining_decline.shape == (i,)
# Calculate the change in constraint values over time
constraints_func = generate_constraints_function(robot_arm)
constraints_values = np.apply_along_axis(
constraints_func,
axis=0,
arr=xs
)
print('Constraint_values; ', constraints_values)
constraints_values = np.sum(np.abs(constraints_values), axis=0)
# Create three subplots, one showing convergence of the minimizer,
# the other showing the convergence to the mimimum (input vs. output),
# and the third the values of the constraints over time
plt.style.use('ggplot')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# Plot minimizer convergence
ax = plt.subplot('131')
ax.plot(remaining_distance[:-1])
ax.set_yscale('log')
ax.set_title('Convergence towards \n optimal configuration')
ax.set_ylabel(r'$||\Theta - \Theta^*||$')
ax.set_xlabel(r'Iteration number')
# Plot minimum convergence
ax = plt.subplot('132')
ax.plot(objective_values)
ax.set_title('Objective values')
ax.set_ylabel(r'$E(\Theta)$')
ax.set_xlabel(r'Iteration number')
# Plot values of constraints
ax = plt.subplot('133')
ax.plot(constraints_values)
ax.set_title('Equality constraint values')
ax.set_ylabel(r'$\sum_{i=1}^{s}(|c_i,x(\Theta)| + |c_i,y(\Theta)|)$')
ax.set_xlabel(r'Iteration number')
fig = plt.gcf()
fig.set_size_inches(18.5, 5)
plt.savefig('figures/equality_constraint.pdf', bbox_inches='tight')
if show is True:
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.savefig",
"constraints.generate_constraints_function",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.style.use",
"numpy.apply_along_axis",
"numpy.linalg.norm",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] | [((1107, 1156), 'numpy.linalg.norm', 'np.linalg.norm', (['remaining_distance'], {'ord': '(2)', 'axis': '(0)'}), '(remaining_distance, ord=2, axis=0)\n', (1121, 1156), True, 'import numpy as np\n'), ((1309, 1355), 'numpy.apply_along_axis', 'np.apply_along_axis', (['objective'], {'axis': '(0)', 'arr': 'xs'}), '(objective, axis=0, arr=xs)\n', (1328, 1355), True, 'import numpy as np\n'), ((1562, 1602), 'constraints.generate_constraints_function', 'generate_constraints_function', (['robot_arm'], {}), '(robot_arm)\n', (1591, 1602), False, 'from constraints import generate_constraints_function\n'), ((1628, 1681), 'numpy.apply_along_axis', 'np.apply_along_axis', (['constraints_func'], {'axis': '(0)', 'arr': 'xs'}), '(constraints_func, axis=0, arr=xs)\n', (1647, 1681), True, 'import numpy as np\n'), ((2044, 2067), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2057, 2067), True, 'from matplotlib import pyplot as plt\n'), ((2072, 2099), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (2078, 2099), True, 'from matplotlib import pyplot as plt\n'), ((2104, 2134), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (2110, 2134), True, 'from matplotlib import pyplot as plt\n'), ((2178, 2196), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""131"""'], {}), "('131')\n", (2189, 2196), True, 'from matplotlib import pyplot as plt\n'), ((2450, 2468), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""132"""'], {}), "('132')\n", (2461, 2468), True, 'from matplotlib import pyplot as plt\n'), ((2652, 2670), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""133"""'], {}), "('133')\n", (2663, 2670), True, 'from matplotlib import pyplot as plt\n'), ((2874, 2883), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2881, 2883), True, 'from matplotlib import pyplot as plt\n'), ((2921, 2988), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/equality_constraint.pdf"""'], {'bbox_inches': '"""tight"""'}), "('figures/equality_constraint.pdf', bbox_inches='tight')\n", (2932, 2988), True, 'from matplotlib import pyplot as plt\n'), ((1797, 1823), 'numpy.abs', 'np.abs', (['constraints_values'], {}), '(constraints_values)\n', (1803, 1823), True, 'import numpy as np\n'), ((3019, 3029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3027, 3029), True, 'from matplotlib import pyplot as plt\n')] |
# Copyright(c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import unittest
import pandas as pd
import numpy as np
from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, \
multi_factor_value
from datetime import date
import itertools
from tests import utils
# TODO regression with antithetic
class TestSpotPriceSim(unittest.TestCase):
def test_regression(self):
factors = [ # Tuples where 1st element is factor mean-reversion, 2nd element is factor vol curve
(0.0, {date(2020, 8, 1): 0.35,
'2021-01-15': 0.29, # Can use string to specify forward delivery date
date(2021, 7, 30): 0.32}),
# factor vol can also be specified as a pandas Series
(2.5, pd.Series(data=[0.15, 0.18, 0.21],
index=pd.PeriodIndex(data=['2020-08-01', '2021-01-15', '2021-07-30'], freq='D'))),
(16.2, {date(2020, 8, 1): 0.95,
'2021-01-15': 0.92,
date(2021, 7, 30): 0.89}),
]
factor_corrs = np.array([
[1.0, 0.6, 0.3],
[0.6, 1.0, 0.4],
[0.3, 0.4, 1.0]
])
# Like with factor vol, the fwd_curve can be a pandas Series object
fwd_curve = {
'2020-08-01': 56.85,
pd.Period('2021-01-15', freq='D'): 59.08,
date(2021, 7, 30): 62.453
}
current_date = date(2020, 7, 27)
# Demonstrates different ways tp specify spot periods to simulate. Easier in practice to just use
# keys of fwd_curve
spot_periods_to_sim = [pd.Period('2020-08-01'), '2021-01-15', date(2021, 7, 30)]
random_seed = 12
spot_simulator = mf.MultiFactorSpotSim('D', factors, factor_corrs, current_date, fwd_curve,
spot_periods_to_sim, random_seed)
num_sims = 4
sim_spot_prices = spot_simulator.simulate(num_sims)
self.assertEqual(3, len(sim_spot_prices))
sim1 = sim_spot_prices[0]
self.assertEqual(52.59976397688973, sim1['2020-08-01'])
self.assertEqual(57.559631642935514, sim1['2021-01-15'])
self.assertEqual(89.40526992772634, sim1['2021-07-30'])
sim2 = sim_spot_prices[1]
self.assertEqual(46.1206448628463, sim2['2020-08-01'])
self.assertEqual(72.0381089486175, sim2['2021-01-15'])
self.assertEqual(85.18869803117379, sim2['2021-07-30'])
sim3 = sim_spot_prices[2]
self.assertEqual(58.15838580682589, sim3['2020-08-01'])
self.assertEqual(82.49607173562342, sim3['2021-01-15'])
self.assertEqual(138.68587285875978, sim3['2021-07-30'])
sim4 = sim_spot_prices[3]
self.assertEqual(65.500441945042979, sim4['2020-08-01'])
self.assertEqual(42.812676607997183, sim4['2021-01-15'])
self.assertEqual(76.586790647813046, sim4['2021-07-30'])
class TestMultiFactorModel(unittest.TestCase):
_short_plus_long_indices = pd.period_range(start='2020-09-01', periods=25, freq='D') \
.append(pd.period_range(start='2030-09-01', periods=25, freq='D'))
_1f_0_mr_model = mf.MultiFactorModel('D', [(0.0, {'2020-09-01': 0.36, '2020-10-01': 0.29, '2020-11-01': 0.23})])
_1f_pos_mr_model = mf.MultiFactorModel('D', [(2.5, pd.Series(data=np.linspace(0.65, 0.38, num=50),
index=_short_plus_long_indices))])
_2f_canonical_model = mf.MultiFactorModel('D',
factors=[(0.0, pd.Series(data=np.linspace(0.53, 0.487, num=50),
index=_short_plus_long_indices)),
(2.5, pd.Series(data=np.linspace(1.45, 1.065, num=50),
index=_short_plus_long_indices))],
factor_corrs=0.87) # If only 2 factors can supply a float for factor_corrs rather than a matrix
def test_single_non_mean_reverting_factor_implied_vol_equals_factor_vol(self):
fwd_contract = '2020-09-01'
implied_vol = self._1f_0_mr_model.integrated_vol(date(2020, 8, 5), date(2020, 8, 30), '2020-09-01')
factor_vol = self._1f_0_mr_model._factors[0][1][fwd_contract]
self.assertEqual(factor_vol, implied_vol)
def test_single_non_mean_reverting_factor_correlations_equal_one(self):
self._assert_cross_correlations_all_one(date(2020, 8, 1), date(2020, 9, 1), self._1f_0_mr_model)
def test_single_mean_reverting_factor_correlations_equal_one(self):
self._assert_cross_correlations_all_one(date(2020, 5, 1), date(2020, 9, 1), self._1f_pos_mr_model)
def _assert_cross_correlations_all_one(self, obs_start, obs_end, model: mf.MultiFactorModel):
fwd_points = model._factors[0][1].keys()
for fwd_point_1, fwd_point_2 in itertools.product(fwd_points, fwd_points):
if fwd_point_1 != fwd_point_2:
corr = model.integrated_corr(obs_start, obs_end, fwd_point_1, fwd_point_2)
self.assertAlmostEqual(1.0, corr, places=14)
def test_single_mean_reverting_factor_variance_far_in_future_equals_zero(self):
variance = self._1f_pos_mr_model.integrated_variance('2020-08-05', '2020-09-01', fwd_contract='2030-09-15')
self.assertAlmostEqual(0.0, variance, places=14)
def test_2f_canonical_vol_far_in_future_equal_non_mr_vol(self):
fwd_contract = '2030-09-15'
implied_vol = self._2f_canonical_model.integrated_vol('2020-08-05', '2021-08-05', fwd_contract)
non_mr_factor_vol = self._2f_canonical_model._factors[0][1][fwd_contract]
self.assertAlmostEqual(non_mr_factor_vol, implied_vol, places=10)
def test_diff_corr_types_give_same_results(self):
factors = [(0.0, pd.Series(data=np.linspace(0.53, 0.487, num=50),
index=self._short_plus_long_indices)),
(2.5, pd.Series(data=np.linspace(1.45, 1.065, num=50),
index=self._short_plus_long_indices))]
two_f_model_float_corr = mf.MultiFactorModel('D', factors=factors, factor_corrs=0.0)
two_f_model_int_corr = mf.MultiFactorModel('D', factors=factors, factor_corrs=0)
two_f_model_float_array_corr = mf.MultiFactorModel('D', factors=factors, factor_corrs=np.array([[1.0, 0.0],
[0.0, 1.0]]))
two_f_model_int_array_corr = mf.MultiFactorModel('D', factors=factors, factor_corrs=np.array([[1, 0],
[0, 1]]))
two_f_model_float_corr_covar = two_f_model_float_corr.integrated_covar(date(2020, 8, 5),
date(2020, 8, 30), '2020-09-01',
'2020-09-20')
two_f_model_float_array_corr_covar = two_f_model_float_array_corr.integrated_covar(date(2020, 8, 5),
date(2020, 8, 30),
'2020-09-01', '2020-09-20')
two_f_model_int_corr_covar = two_f_model_int_corr.integrated_covar(date(2020, 8, 5),
date(2020, 8, 30), '2020-09-01',
'2020-09-20')
two_f_model_int_array_corr_covar = two_f_model_int_array_corr.integrated_covar(date(2020, 8, 5),
date(2020, 8, 30), '2020-09-01',
'2020-09-20')
self.assertEqual(two_f_model_float_corr_covar, two_f_model_float_array_corr_covar)
self.assertEqual(two_f_model_float_corr_covar, two_f_model_int_corr_covar)
self.assertEqual(two_f_model_float_corr_covar, two_f_model_int_array_corr_covar)
# TODO test MultiFactorModel.for_3_factor_seasonal
class TestMultiFactorValue(unittest.TestCase):
def test_multi_factor_value_regression(self):
storage_start = '2019-12-01'
storage_end = '2020-04-01'
constant_injection_rate = 700.0
constant_withdrawal_rate = 700.0
constant_injection_cost = 1.23
constant_withdrawal_cost = 0.98
min_inventory = 0.0
max_inventory = 100000.0
cmdty_storage = CmdtyStorage('D', storage_start, storage_end, constant_injection_cost,
constant_withdrawal_cost, min_inventory=min_inventory,
max_inventory=max_inventory,
max_injection_rate=constant_injection_rate,
max_withdrawal_rate=constant_withdrawal_rate)
inventory = 0.0
val_date = '2019-08-29'
low_price = 23.87
high_price = 150.32
date_switch_high_price = '2020-03-12'
forward_curve = utils.create_piecewise_flat_series([low_price, high_price, high_price],
[val_date, date_switch_high_price,
storage_end], freq='D')
flat_interest_rate = 0.03
interest_rate_curve = pd.Series(index=pd.period_range(val_date, '2020-06-01', freq='D'))
interest_rate_curve[:] = flat_interest_rate
# Multi-Factor parameters
mean_reversion = 16.2
spot_volatility = pd.Series(index=pd.period_range(val_date, '2020-06-01', freq='D'))
spot_volatility[:] = 1.15
def twentieth_of_next_month(period): return period.asfreq('M').asfreq('D', 'end') + 20
long_term_vol = pd.Series(index=pd.period_range(val_date, '2020-06-01', freq='D'))
long_term_vol[:] = 0.14
factors = [(0.0, long_term_vol),
(mean_reversion, spot_volatility)]
factor_corrs = 0.64
progresses = []
def on_progress(progress): progresses.append(progress)
# Simulation parameter
num_sims = 500
seed = 11
fwd_sim_seed = seed # Temporarily set to pass regression tests
basis_funcs = '1 + x0 + x0**2 + x1 + x1*x1'
discount_deltas = False
multi_factor_val = multi_factor_value(cmdty_storage, val_date, inventory, forward_curve,
interest_rate_curve, twentieth_of_next_month,
factors, factor_corrs, num_sims,
basis_funcs, discount_deltas,
seed=seed,
fwd_sim_seed=fwd_sim_seed,
on_progress_update=on_progress)
self.assertAlmostEqual(multi_factor_val.npv, 1780380.7581833513, places=6)
self.assertEqual(123, len(multi_factor_val.deltas)) # TODO look into why deltas is longer the intrinsic profile
self.assertEqual(123, len(multi_factor_val.expected_profile))
self.assertEqual(progresses[-1], 1.0)
self.assertEqual(245, len(progresses))
self.assertEqual(1703773.0757192627, multi_factor_val.intrinsic_npv)
self.assertEqual(123, len(multi_factor_val.intrinsic_profile))
self.assertEqual((123, num_sims), multi_factor_val.sim_spot_regress.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_spot_valuation.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_inventory.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_inject_withdraw.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_cmdty_consumed.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_inventory_loss.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_net_volume.shape)
def test_three_factor_seasonal_regression(self):
storage_start = '2019-12-01'
storage_end = '2020-04-01'
constant_injection_rate = 700.0
constant_withdrawal_rate = 700.0
constant_injection_cost = 1.23
constant_withdrawal_cost = 0.98
min_inventory = 0.0
max_inventory = 100000.0
cmdty_storage = CmdtyStorage('D', storage_start, storage_end, constant_injection_cost,
constant_withdrawal_cost, min_inventory=min_inventory,
max_inventory=max_inventory,
max_injection_rate=constant_injection_rate,
max_withdrawal_rate=constant_withdrawal_rate)
inventory = 0.0
val_date = '2019-08-29'
low_price = 23.87
high_price = 150.32
date_switch_high_price = '2020-03-12'
forward_curve = utils.create_piecewise_flat_series([low_price, high_price, high_price],
[val_date, date_switch_high_price,
storage_end], freq='D')
flat_interest_rate = 0.03
interest_rate_curve = pd.Series(index=pd.period_range(val_date, '2020-06-01', freq='D'))
interest_rate_curve[:] = flat_interest_rate
# Multi-Factor parameters
spot_mean_reversion = 16.2
spot_volatility = 1.15
seasonal_volatility = 0.18
long_term_vol = 0.14
def twentieth_of_next_month(period): return period.asfreq('M').asfreq('D', 'end') + 20
progresses = []
def on_progress(progress): progresses.append(progress)
# Simulation parameter
num_sims = 500
seed = 11
fwd_sim_seed = seed # Temporarily set to pass regression tests
basis_funcs = '1 + x_st + x_sw + x_lt + x_st**2 + x_sw**2 + x_lt**2'
discount_deltas = False
multi_factor_val = three_factor_seasonal_value(cmdty_storage, val_date, inventory, forward_curve,
interest_rate_curve, twentieth_of_next_month,
spot_mean_reversion, spot_volatility, long_term_vol,
seasonal_volatility,
num_sims,
basis_funcs,
discount_deltas,
seed=seed,
fwd_sim_seed=fwd_sim_seed,
on_progress_update=on_progress)
self.assertAlmostEqual(multi_factor_val.npv, 1766460.137569665, places=6)
self.assertEqual(123, len(multi_factor_val.deltas)) # TODO look into why deltas is longer the intrinsic profile
self.assertEqual(123, len(multi_factor_val.expected_profile))
self.assertEqual(progresses[-1], 1.0)
self.assertEqual(245, len(progresses))
self.assertEqual(1703773.0757192627, multi_factor_val.intrinsic_npv)
self.assertEqual(123, len(multi_factor_val.intrinsic_profile))
self.assertEqual((123, num_sims), multi_factor_val.sim_spot_regress.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_spot_valuation.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_inventory.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_inject_withdraw.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_cmdty_consumed.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_inventory_loss.shape)
self.assertEqual((123, num_sims), multi_factor_val.sim_net_volume.shape)
if __name__ == '__main__':
unittest.main()
| [
"cmdty_storage.multi_factor.MultiFactorSpotSim",
"cmdty_storage.CmdtyStorage",
"cmdty_storage.multi_factor.MultiFactorModel",
"cmdty_storage.three_factor_seasonal_value",
"itertools.product",
"tests.utils.create_piecewise_flat_series",
"numpy.array",
"numpy.linspace",
"pandas.period_range",
"datet... | [((4232, 4331), 'cmdty_storage.multi_factor.MultiFactorModel', 'mf.MultiFactorModel', (['"""D"""', "[(0.0, {'2020-09-01': 0.36, '2020-10-01': 0.29, '2020-11-01': 0.23})]"], {}), "('D', [(0.0, {'2020-09-01': 0.36, '2020-10-01': 0.29,\n '2020-11-01': 0.23})])\n", (4251, 4331), True, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((17357, 17372), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17370, 17372), False, 'import unittest\n'), ((2141, 2202), 'numpy.array', 'np.array', (['[[1.0, 0.6, 0.3], [0.6, 1.0, 0.4], [0.3, 0.4, 1.0]]'], {}), '([[1.0, 0.6, 0.3], [0.6, 1.0, 0.4], [0.3, 0.4, 1.0]])\n', (2149, 2202), True, 'import numpy as np\n'), ((2506, 2523), 'datetime.date', 'date', (['(2020)', '(7)', '(27)'], {}), '(2020, 7, 27)\n', (2510, 2523), False, 'from datetime import date\n'), ((2798, 2910), 'cmdty_storage.multi_factor.MultiFactorSpotSim', 'mf.MultiFactorSpotSim', (['"""D"""', 'factors', 'factor_corrs', 'current_date', 'fwd_curve', 'spot_periods_to_sim', 'random_seed'], {}), "('D', factors, factor_corrs, current_date, fwd_curve,\n spot_periods_to_sim, random_seed)\n", (2819, 2910), True, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((4152, 4209), 'pandas.period_range', 'pd.period_range', ([], {'start': '"""2030-09-01"""', 'periods': '(25)', 'freq': '"""D"""'}), "(start='2030-09-01', periods=25, freq='D')\n", (4167, 4209), True, 'import pandas as pd\n'), ((6054, 6095), 'itertools.product', 'itertools.product', (['fwd_points', 'fwd_points'], {}), '(fwd_points, fwd_points)\n', (6071, 6095), False, 'import itertools\n'), ((7299, 7358), 'cmdty_storage.multi_factor.MultiFactorModel', 'mf.MultiFactorModel', (['"""D"""'], {'factors': 'factors', 'factor_corrs': '(0.0)'}), "('D', factors=factors, factor_corrs=0.0)\n", (7318, 7358), True, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((7390, 7447), 'cmdty_storage.multi_factor.MultiFactorModel', 'mf.MultiFactorModel', (['"""D"""'], {'factors': 'factors', 'factor_corrs': '(0)'}), "('D', factors=factors, factor_corrs=0)\n", (7409, 7447), True, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((9900, 10157), 'cmdty_storage.CmdtyStorage', 'CmdtyStorage', (['"""D"""', 'storage_start', 'storage_end', 'constant_injection_cost', 'constant_withdrawal_cost'], {'min_inventory': 'min_inventory', 'max_inventory': 'max_inventory', 'max_injection_rate': 'constant_injection_rate', 'max_withdrawal_rate': 'constant_withdrawal_rate'}), "('D', storage_start, storage_end, constant_injection_cost,\n constant_withdrawal_cost, min_inventory=min_inventory, max_inventory=\n max_inventory, max_injection_rate=constant_injection_rate,\n max_withdrawal_rate=constant_withdrawal_rate)\n", (9912, 10157), False, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((10473, 10608), 'tests.utils.create_piecewise_flat_series', 'utils.create_piecewise_flat_series', (['[low_price, high_price, high_price]', '[val_date, date_switch_high_price, storage_end]'], {'freq': '"""D"""'}), "([low_price, high_price, high_price], [\n val_date, date_switch_high_price, storage_end], freq='D')\n", (10507, 10608), False, 'from tests import utils\n'), ((11787, 12048), 'cmdty_storage.multi_factor_value', 'multi_factor_value', (['cmdty_storage', 'val_date', 'inventory', 'forward_curve', 'interest_rate_curve', 'twentieth_of_next_month', 'factors', 'factor_corrs', 'num_sims', 'basis_funcs', 'discount_deltas'], {'seed': 'seed', 'fwd_sim_seed': 'fwd_sim_seed', 'on_progress_update': 'on_progress'}), '(cmdty_storage, val_date, inventory, forward_curve,\n interest_rate_curve, twentieth_of_next_month, factors, factor_corrs,\n num_sims, basis_funcs, discount_deltas, seed=seed, fwd_sim_seed=\n fwd_sim_seed, on_progress_update=on_progress)\n', (11805, 12048), False, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((13783, 14040), 'cmdty_storage.CmdtyStorage', 'CmdtyStorage', (['"""D"""', 'storage_start', 'storage_end', 'constant_injection_cost', 'constant_withdrawal_cost'], {'min_inventory': 'min_inventory', 'max_inventory': 'max_inventory', 'max_injection_rate': 'constant_injection_rate', 'max_withdrawal_rate': 'constant_withdrawal_rate'}), "('D', storage_start, storage_end, constant_injection_cost,\n constant_withdrawal_cost, min_inventory=min_inventory, max_inventory=\n max_inventory, max_injection_rate=constant_injection_rate,\n max_withdrawal_rate=constant_withdrawal_rate)\n", (13795, 14040), False, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((14356, 14491), 'tests.utils.create_piecewise_flat_series', 'utils.create_piecewise_flat_series', (['[low_price, high_price, high_price]', '[val_date, date_switch_high_price, storage_end]'], {'freq': '"""D"""'}), "([low_price, high_price, high_price], [\n val_date, date_switch_high_price, storage_end], freq='D')\n", (14390, 14491), False, 'from tests import utils\n'), ((15421, 15745), 'cmdty_storage.three_factor_seasonal_value', 'three_factor_seasonal_value', (['cmdty_storage', 'val_date', 'inventory', 'forward_curve', 'interest_rate_curve', 'twentieth_of_next_month', 'spot_mean_reversion', 'spot_volatility', 'long_term_vol', 'seasonal_volatility', 'num_sims', 'basis_funcs', 'discount_deltas'], {'seed': 'seed', 'fwd_sim_seed': 'fwd_sim_seed', 'on_progress_update': 'on_progress'}), '(cmdty_storage, val_date, inventory,\n forward_curve, interest_rate_curve, twentieth_of_next_month,\n spot_mean_reversion, spot_volatility, long_term_vol,\n seasonal_volatility, num_sims, basis_funcs, discount_deltas, seed=seed,\n fwd_sim_seed=fwd_sim_seed, on_progress_update=on_progress)\n', (15448, 15745), False, 'from cmdty_storage import multi_factor as mf, CmdtyStorage, three_factor_seasonal_value, multi_factor_value\n'), ((2393, 2426), 'pandas.Period', 'pd.Period', (['"""2021-01-15"""'], {'freq': '"""D"""'}), "('2021-01-15', freq='D')\n", (2402, 2426), True, 'import pandas as pd\n'), ((2447, 2464), 'datetime.date', 'date', (['(2021)', '(7)', '(30)'], {}), '(2021, 7, 30)\n', (2451, 2464), False, 'from datetime import date\n'), ((2689, 2712), 'pandas.Period', 'pd.Period', (['"""2020-08-01"""'], {}), "('2020-08-01')\n", (2698, 2712), True, 'import pandas as pd\n'), ((2728, 2745), 'datetime.date', 'date', (['(2021)', '(7)', '(30)'], {}), '(2021, 7, 30)\n', (2732, 2745), False, 'from datetime import date\n'), ((4076, 4133), 'pandas.period_range', 'pd.period_range', ([], {'start': '"""2020-09-01"""', 'periods': '(25)', 'freq': '"""D"""'}), "(start='2020-09-01', periods=25, freq='D')\n", (4091, 4133), True, 'import pandas as pd\n'), ((5333, 5349), 'datetime.date', 'date', (['(2020)', '(8)', '(5)'], {}), '(2020, 8, 5)\n', (5337, 5349), False, 'from datetime import date\n'), ((5351, 5368), 'datetime.date', 'date', (['(2020)', '(8)', '(30)'], {}), '(2020, 8, 30)\n', (5355, 5368), False, 'from datetime import date\n'), ((5629, 5645), 'datetime.date', 'date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (5633, 5645), False, 'from datetime import date\n'), ((5647, 5663), 'datetime.date', 'date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (5651, 5663), False, 'from datetime import date\n'), ((5807, 5823), 'datetime.date', 'date', (['(2020)', '(5)', '(1)'], {}), '(2020, 5, 1)\n', (5811, 5823), False, 'from datetime import date\n'), ((5825, 5841), 'datetime.date', 'date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (5829, 5841), False, 'from datetime import date\n'), ((7984, 8000), 'datetime.date', 'date', (['(2020)', '(8)', '(5)'], {}), '(2020, 8, 5)\n', (7988, 8000), False, 'from datetime import date\n'), ((8081, 8098), 'datetime.date', 'date', (['(2020)', '(8)', '(30)'], {}), '(2020, 8, 30)\n', (8085, 8098), False, 'from datetime import date\n'), ((8298, 8314), 'datetime.date', 'date', (['(2020)', '(8)', '(5)'], {}), '(2020, 8, 5)\n', (8302, 8314), False, 'from datetime import date\n'), ((8407, 8424), 'datetime.date', 'date', (['(2020)', '(8)', '(30)'], {}), '(2020, 8, 30)\n', (8411, 8424), False, 'from datetime import date\n'), ((8620, 8636), 'datetime.date', 'date', (['(2020)', '(8)', '(5)'], {}), '(2020, 8, 5)\n', (8624, 8636), False, 'from datetime import date\n'), ((8713, 8730), 'datetime.date', 'date', (['(2020)', '(8)', '(30)'], {}), '(2020, 8, 30)\n', (8717, 8730), False, 'from datetime import date\n'), ((8922, 8938), 'datetime.date', 'date', (['(2020)', '(8)', '(5)'], {}), '(2020, 8, 5)\n', (8926, 8938), False, 'from datetime import date\n'), ((9027, 9044), 'datetime.date', 'date', (['(2020)', '(8)', '(30)'], {}), '(2020, 8, 30)\n', (9031, 9044), False, 'from datetime import date\n'), ((7542, 7576), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (7550, 7576), True, 'import numpy as np\n'), ((7774, 7800), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (7782, 7800), True, 'import numpy as np\n'), ((10804, 10853), 'pandas.period_range', 'pd.period_range', (['val_date', '"""2020-06-01"""'], {'freq': '"""D"""'}), "(val_date, '2020-06-01', freq='D')\n", (10819, 10853), True, 'import pandas as pd\n'), ((11014, 11063), 'pandas.period_range', 'pd.period_range', (['val_date', '"""2020-06-01"""'], {'freq': '"""D"""'}), "(val_date, '2020-06-01', freq='D')\n", (11029, 11063), True, 'import pandas as pd\n'), ((11236, 11285), 'pandas.period_range', 'pd.period_range', (['val_date', '"""2020-06-01"""'], {'freq': '"""D"""'}), "(val_date, '2020-06-01', freq='D')\n", (11251, 11285), True, 'import pandas as pd\n'), ((14687, 14736), 'pandas.period_range', 'pd.period_range', (['val_date', '"""2020-06-01"""'], {'freq': '"""D"""'}), "(val_date, '2020-06-01', freq='D')\n", (14702, 14736), True, 'import pandas as pd\n'), ((1586, 1602), 'datetime.date', 'date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (1590, 1602), False, 'from datetime import date\n'), ((1719, 1736), 'datetime.date', 'date', (['(2021)', '(7)', '(30)'], {}), '(2021, 7, 30)\n', (1723, 1736), False, 'from datetime import date\n'), ((1996, 2012), 'datetime.date', 'date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (2000, 2012), False, 'from datetime import date\n'), ((2080, 2097), 'datetime.date', 'date', (['(2021)', '(7)', '(30)'], {}), '(2021, 7, 30)\n', (2084, 2097), False, 'from datetime import date\n'), ((1899, 1972), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'data': "['2020-08-01', '2021-01-15', '2021-07-30']", 'freq': '"""D"""'}), "(data=['2020-08-01', '2021-01-15', '2021-07-30'], freq='D')\n", (1913, 1972), True, 'import pandas as pd\n'), ((4398, 4429), 'numpy.linspace', 'np.linspace', (['(0.65)', '(0.38)'], {'num': '(50)'}), '(0.65, 0.38, num=50)\n', (4409, 4429), True, 'import numpy as np\n'), ((7010, 7042), 'numpy.linspace', 'np.linspace', (['(0.53)', '(0.487)'], {'num': '(50)'}), '(0.53, 0.487, num=50)\n', (7021, 7042), True, 'import numpy as np\n'), ((7158, 7190), 'numpy.linspace', 'np.linspace', (['(1.45)', '(1.065)'], {'num': '(50)'}), '(1.45, 1.065, num=50)\n', (7169, 7190), True, 'import numpy as np\n'), ((4658, 4690), 'numpy.linspace', 'np.linspace', (['(0.53)', '(0.487)'], {'num': '(50)'}), '(0.53, 0.487, num=50)\n', (4669, 4690), True, 'import numpy as np\n'), ((4873, 4905), 'numpy.linspace', 'np.linspace', (['(1.45)', '(1.065)'], {'num': '(50)'}), '(1.45, 1.065, num=50)\n', (4884, 4905), True, 'import numpy as np\n')] |
import numpy as np
import logging
logger = logging.getLogger(__name__)
def get_n_unique_rows(edge):
new = [tuple(row) for row in edge]
edge_unique = np.unique(new)
edge_size = edge_unique.shape[0]
return edge_size
def get_unique_rows(edge):
edge_unique = np.unique(edge, axis=0)
return edge_unique
def get_depth_of_nested_list(l):
depth = lambda L: isinstance(L, list) and max(map(depth, L)) + 1
return depth(l)
| [
"logging.getLogger",
"numpy.unique"
] | [((44, 71), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (61, 71), False, 'import logging\n'), ((160, 174), 'numpy.unique', 'np.unique', (['new'], {}), '(new)\n', (169, 174), True, 'import numpy as np\n'), ((280, 303), 'numpy.unique', 'np.unique', (['edge'], {'axis': '(0)'}), '(edge, axis=0)\n', (289, 303), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sys
'''
A basic script to graph test score quantities and calculate statistical data.
'''
def retrieve():
# Gets test scores from text file passed as command line arg
try:
with open(sys.argv[1]) as data:
return [int(x.strip()) for x in data.readlines()]
except FileNotFoundError as e:
sys.exit('\nFile was not found.\n')
except Exception as e:
sys.exit('''
Something went wrong...
Usage:
python stats.py <file>
''')
def build(values, entry):
'''
Builds a DataFrame of the statistical results. The DataFrame is output to
both the console and a results.txt file
'''
df = pd.DataFrame(pd.Series([
np.amin(values),
np.amax(values),
np.mean(values),
np.median(values),
np.var(values),
np.std(values)
], [
'Min',
'Max',
'Mean',
'Median',
'Variance',
'Standard Deviation'
]),
columns=[
entry
]
)
print("\n", df, "\n")
try:
with open('results.txt', 'w') as output:
output.write(df.to_string())
except Exception as e:
print(e)
def display(x, y, title=None):
plt.bar(x, y, label='Scores')
if title:
plt.title(title)
plt.xlabel('Score')
plt.ylabel('Quantity')
plt.legend()
plt.show()
def main():
values = retrieve()
title = input('\n\tEnter a name for these scores:\n\t')
build(values, title=title)
# x is the domain of scores, y is the quantities of each score
x = np.array([i for i in range(min(values), max(values) + 1)])
y = np.array([0 for i in range(max(values) + 1)])
# Adds up score quantities
for j in values:
y[j] += 1
display(x, y[min(values):], title)
if __name__ == '__main__':
main()
| [
"numpy.mean",
"numpy.median",
"numpy.amin",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.std",
"matplotlib.pyplot.bar",
"numpy.var",
"sys.exit",
"matplotlib.pyplot.title",
"numpy.amax",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1355, 1384), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'label': '"""Scores"""'}), "(x, y, label='Scores')\n", (1362, 1384), True, 'from matplotlib import pyplot as plt\n'), ((1428, 1447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Score"""'], {}), "('Score')\n", (1438, 1447), True, 'from matplotlib import pyplot as plt\n'), ((1452, 1474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Quantity"""'], {}), "('Quantity')\n", (1462, 1474), True, 'from matplotlib import pyplot as plt\n'), ((1479, 1491), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1489, 1491), True, 'from matplotlib import pyplot as plt\n'), ((1496, 1506), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1504, 1506), True, 'from matplotlib import pyplot as plt\n'), ((1407, 1423), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1416, 1423), True, 'from matplotlib import pyplot as plt\n'), ((416, 453), 'sys.exit', 'sys.exit', (['"""\nFile was not found.\n"""'], {}), '("""\nFile was not found.\n""")\n', (424, 453), False, 'import sys\n'), ((487, 620), 'sys.exit', 'sys.exit', (['"""\n Something went wrong...\n Usage:\n python stats.py <file>\n """'], {}), '(\n """\n Something went wrong...\n Usage:\n python stats.py <file>\n """\n )\n', (495, 620), False, 'import sys\n'), ((828, 843), 'numpy.amin', 'np.amin', (['values'], {}), '(values)\n', (835, 843), True, 'import numpy as np\n'), ((853, 868), 'numpy.amax', 'np.amax', (['values'], {}), '(values)\n', (860, 868), True, 'import numpy as np\n'), ((878, 893), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (885, 893), True, 'import numpy as np\n'), ((903, 920), 'numpy.median', 'np.median', (['values'], {}), '(values)\n', (912, 920), True, 'import numpy as np\n'), ((930, 944), 'numpy.var', 'np.var', (['values'], {}), '(values)\n', (936, 944), True, 'import numpy as np\n'), ((954, 968), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (960, 968), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import scipy.integrate as integrate
from scipy.optimize import brentq as root
import math
import numpy as np
import scipy.special as scp
from scipy.special import iv
# In[2]:
def rvonmises(n, mu, kappa):
vm = np.zeros(n)
a = 1 + (1 + 4 * (kappa**2))**0.5
b = (a - (2 * a)**0.5)/(2 * kappa)
r = (1 + b**2)/(2 * b)
obs = 0
while (obs < n):
U1 = np.random.uniform(0, 1, 1)
z = np.cos(np.pi * U1)
f = (1 + r * z)/(r + z)
c = kappa * (r - f)
U2 = np.random.uniform(0, 1, 1)
if (c * (2 - c) - U2 > 0):
U3 = np.random.uniform(0, 1, 1)
vm[obs] = np.sign(U3 - 0.5) * math.acos(f) + mu
vm[obs] = vm[obs] % (2 * np.pi)
obs = obs + 1
else:
if (math.log(c/U2) + 1 - c >= 0):
U3 = np.random.uniform(0, 1, 1)
vm[obs] = np.sign(U3 - 0.5) * math.acos(f) + mu
vm[obs] = vm[obs] % (2 * math.pi)
obs = obs + 1
return(vm)
# In[3]:
def dvonmises(x, mu, kappa, log = False):
if (type(x) == int):
x = [x]
if (type(x) == float):
x = [x]
vm = np.zeros(len(x))
if (log):
if (kappa == 0):
vm = np.log(np.repreat(1/(2*pi), len(x)))
elif (kappa < 100000):
vm = -(np.log(2*math.pi)+np.log(scp.ive(0, kappa)) + kappa) + kappa*(np.cos(np.subtract(x - mu)))
else:
if (((x-mu)%(2*math.pi))==0):
vm = math.inf
else:
vm = -math.inf
else:
if (kappa == 0):
vm = np.repeat(1/(2*np.pi), len(x))
elif (kappa < 100000):
vm = 1/(2 * np.pi * scp.ive(0, kappa)) * (np.exp(np.subtract(np.cos(np.subtract(x, mu)), 1)))**kappa
else:
if (np.mod(np.subtract(x, mu),(2*np.pi))==0):
vm = math.inf
else:
vm = 0
return(vm)
# In[21]:
def pvonmises(q, mu, kappa, tol = 1e-020):
from_ = mu - np.pi
mu = (mu - from_) % (2 * np.pi)
if (type(q) == int):
q = [q]
if(type(q) == float):
q =[q]
q = np.mod(np.subtract(q, from_), (2 * np.pi))
q = np.mod(q,(2 * np.pi))
n = len(q)
mu = mu % (2 * np.pi)
def pvm_mu0(q, kappa, tol):
flag = True
p = 1
sum_ = 0
while (flag):
term = (iv(p, kappa) * np.sin(np.multiply(q, p)))/p
sum_ = sum_ + term
p = p + 1
if (abs(term) < tol):
flag = False
return(np.divide(q,(2 * np.pi)) + sum_/(np.pi * iv(0, kappa)))
result = np.repeat(np.nan, n)
if (mu == 0):
for i in range(0,n):
result[i] = pvm_mu0(q[i], kappa, tol)
else:
for i in range(0,n):
if (q[i] <= mu):
upper = (q[i] - mu) % (2 * np.pi)
if (upper == 0):
upper = 2 * np.pi
lower = (-mu) % (2 * np.pi)
result[i] = pvm_mu0(upper, kappa, tol) - pvm_mu0(lower, kappa, tol)
else:
upper = q[i] - mu
lower = mu % (2 * np.pi)
result[i] = pvm_mu0(upper, kappa, tol) + pvm_mu0(lower, kappa, tol)
return(result)
# In[63]:
def qvonmises(p, mu = 0 , kappa = None, from_ = None, tol = np.finfo(float).eps**0.6):
epsilon = 10 * np.finfo(float).eps ##epsilon is Python equivalent of .Machine$double.eps
if (type(p) == int):
p = np.array([p])
elif (type(p) == float):
p = np.array([p])
else:
p = np.array(p)
if (np.any(p > 1)):
raise ValueError("p must be in [0,1]")
elif (np.any(p < 0)):
raise ValueError("p must be in [0,1]")
if (pd.isnull(from_)):
from_ = mu - np.pi
n = p.size
mu = (mu - from_)%(2 * np.pi)
if (pd.isnull(kappa)):
raise ValueError("kappa must be provided")
def zeroPvonmisesRad(x, p, mu, kappa):
if (np.isnan(x)):
y = np.nan
else:
integration = integrate.quad(lambda x: dvonmises(x, mu, kappa), 0, x)
y = integration[0] - p ##integration[0] will give the value
return(y);
value = np.repeat(np.nan, p.size)
for i in range(p.size):
try:
value[i] = root(lambda x: zeroPvonmisesRad(x, p[i], mu, kappa), 0, 2 * np.pi - epsilon)
except:
pass
if(p[i] < (10 * epsilon)):
value[i] = 0
elif (p[i] > (1 - 10 * epsilon)):
value[i] = 2 * np.pi - epsilon
value += from_
return(value)
| [
"math.acos",
"numpy.log",
"math.log",
"numpy.array",
"numpy.mod",
"numpy.divide",
"numpy.multiply",
"numpy.repeat",
"numpy.subtract",
"scipy.special.iv",
"numpy.any",
"numpy.isnan",
"numpy.cos",
"numpy.sign",
"numpy.finfo",
"scipy.special.ive",
"pandas.isnull",
"numpy.zeros",
"nu... | [((285, 296), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (293, 296), True, 'import numpy as np\n'), ((2257, 2277), 'numpy.mod', 'np.mod', (['q', '(2 * np.pi)'], {}), '(q, 2 * np.pi)\n', (2263, 2277), True, 'import numpy as np\n'), ((2690, 2710), 'numpy.repeat', 'np.repeat', (['np.nan', 'n'], {}), '(np.nan, n)\n', (2699, 2710), True, 'import numpy as np\n'), ((3668, 3681), 'numpy.any', 'np.any', (['(p > 1)'], {}), '(p > 1)\n', (3674, 3681), True, 'import numpy as np\n'), ((3814, 3830), 'pandas.isnull', 'pd.isnull', (['from_'], {}), '(from_)\n', (3823, 3830), True, 'import pandas as pd\n'), ((3929, 3945), 'pandas.isnull', 'pd.isnull', (['kappa'], {}), '(kappa)\n', (3938, 3945), True, 'import pandas as pd\n'), ((4340, 4365), 'numpy.repeat', 'np.repeat', (['np.nan', 'p.size'], {}), '(np.nan, p.size)\n', (4349, 4365), True, 'import numpy as np\n'), ((447, 473), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (464, 473), True, 'import numpy as np\n'), ((486, 504), 'numpy.cos', 'np.cos', (['(np.pi * U1)'], {}), '(np.pi * U1)\n', (492, 504), True, 'import numpy as np\n'), ((578, 604), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (595, 604), True, 'import numpy as np\n'), ((2213, 2234), 'numpy.subtract', 'np.subtract', (['q', 'from_'], {}), '(q, from_)\n', (2224, 2234), True, 'import numpy as np\n'), ((3557, 3570), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (3565, 3570), True, 'import numpy as np\n'), ((3742, 3755), 'numpy.any', 'np.any', (['(p < 0)'], {}), '(p < 0)\n', (3748, 3755), True, 'import numpy as np\n'), ((4067, 4078), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (4075, 4078), True, 'import numpy as np\n'), ((657, 683), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (674, 683), True, 'import numpy as np\n'), ((2620, 2643), 'numpy.divide', 'np.divide', (['q', '(2 * np.pi)'], {}), '(q, 2 * np.pi)\n', (2629, 2643), True, 'import numpy as np\n'), ((3396, 3411), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3404, 3411), True, 'import numpy as np\n'), ((3442, 3457), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3450, 3457), True, 'import numpy as np\n'), ((3612, 3625), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (3620, 3625), True, 'import numpy as np\n'), ((3648, 3659), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (3656, 3659), True, 'import numpy as np\n'), ((895, 921), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (912, 921), True, 'import numpy as np\n'), ((706, 723), 'numpy.sign', 'np.sign', (['(U3 - 0.5)'], {}), '(U3 - 0.5)\n', (713, 723), True, 'import numpy as np\n'), ((726, 738), 'math.acos', 'math.acos', (['f'], {}), '(f)\n', (735, 738), False, 'import math\n'), ((2445, 2457), 'scipy.special.iv', 'iv', (['p', 'kappa'], {}), '(p, kappa)\n', (2447, 2457), False, 'from scipy.special import iv\n'), ((2661, 2673), 'scipy.special.iv', 'iv', (['(0)', 'kappa'], {}), '(0, kappa)\n', (2663, 2673), False, 'from scipy.special import iv\n'), ((844, 860), 'math.log', 'math.log', (['(c / U2)'], {}), '(c / U2)\n', (852, 860), False, 'import math\n'), ((948, 965), 'numpy.sign', 'np.sign', (['(U3 - 0.5)'], {}), '(U3 - 0.5)\n', (955, 965), True, 'import numpy as np\n'), ((968, 980), 'math.acos', 'math.acos', (['f'], {}), '(f)\n', (977, 980), False, 'import math\n'), ((1879, 1897), 'numpy.subtract', 'np.subtract', (['x', 'mu'], {}), '(x, mu)\n', (1890, 1897), True, 'import numpy as np\n'), ((2467, 2484), 'numpy.multiply', 'np.multiply', (['q', 'p'], {}), '(q, p)\n', (2478, 2484), True, 'import numpy as np\n'), ((1458, 1477), 'numpy.subtract', 'np.subtract', (['(x - mu)'], {}), '(x - mu)\n', (1469, 1477), True, 'import numpy as np\n'), ((1761, 1778), 'scipy.special.ive', 'scp.ive', (['(0)', 'kappa'], {}), '(0, kappa)\n', (1768, 1778), True, 'import scipy.special as scp\n'), ((1389, 1408), 'numpy.log', 'np.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (1395, 1408), True, 'import numpy as np\n'), ((1414, 1431), 'scipy.special.ive', 'scp.ive', (['(0)', 'kappa'], {}), '(0, kappa)\n', (1421, 1431), True, 'import scipy.special as scp\n'), ((1809, 1827), 'numpy.subtract', 'np.subtract', (['x', 'mu'], {}), '(x, mu)\n', (1820, 1827), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from profit.dataset.splitters.stratified_splitter import StratifiedSplitter
@pytest.fixture
def cls_dataset():
# Create dataset with 10 features and last column as labels
X = np.random.random((30, 10))
y = np.concatenate([np.zeros(20), np.ones(10)]).astype(np.int)
return np.concatenate([X, y.reshape(-1,1)], axis=1)
@pytest.fixture
def cls_label():
y = np.concatenate([np.zeros(20), np.ones(10)]).astype(np.int)
return y
@pytest.fixture
def reg_dataset():
X = np.random.random((100, 10))
y = np.random.random(size=100).astype(np.float) * 10 # range[0,10]
return np.concatenate([X, y.reshape(-1,1)], axis=1)
def test_classification_split(cls_dataset):
splitter = StratifiedSplitter()
# Split using default values: 0.8 for train, 0.1 for val, and 0.1 for test
train, valid, test = splitter.train_valid_test_split(cls_dataset, return_idxs=False)
assert type(train) == np.ndarray
assert train.shape[0] == 24
assert valid.shape[0] == 3
assert test.shape[0] == 3
# Each set should contain the same ratio of positive to negative labels
# For our ex, this is 1/3 true labels and 2/3 false labels, same ratio as full dataset
assert (train[:,-1] == 0).sum() == 16
assert (train[:,-1] == 1).sum() == 8
assert (valid[:,-1] == 0).sum() == 2
assert (valid[:,-1] == 1).sum() == 1
assert (test[:,-1] == 0).sum() == 2
assert (test[:,-1] == 1).sum() == 1
# Split using 0.5 for train, 0.3 for val, and 0.2 for test
train, valid, test = splitter.train_valid_test_split(
cls_dataset, frac_train=0.5, frac_valid=0.3, frac_test=0.2,
return_idxs=False)
assert type(train) == np.ndarray
assert train.shape[0] == 15
assert valid.shape[0] == 9
assert test.shape[0] == 6
# Each set should contain the same ratio of positive to negative labels
# For our ex, this is 1/3 true labels and 2/3 false labels, same ratio as full dataset
assert (train[:,-1] == 0).sum() == 10
assert (train[:,-1] == 1).sum() == 5
assert (valid[:,-1] == 0).sum() == 6
assert (valid[:,-1] == 1).sum() == 3
assert (test[:,-1] == 0).sum() == 4
assert (test[:,-1] == 1).sum() == 2
def test_regression_split(reg_dataset):
splitter = StratifiedSplitter()
# Split using default values: 0.8 for train, 0.1 for val, and 0.1 for test
train, valid, test = splitter.train_valid_test_split(reg_dataset, return_idxs=False)
assert type(train) == np.ndarray
assert train.shape[0] == 80
assert valid.shape[0] == 10
assert test.shape[0] == 10
assert 4.25 < train[:, -1].mean() < 5.75
assert 4.25 < valid[:, -1].mean() < 5.75
assert 4.25 < test[:, -1].mean() < 5.75
# Split using 0.5 for train, 0.3 for val, and 0.2 for test
train, valid, test = splitter.train_valid_test_split(
reg_dataset, frac_train=0.5, frac_valid=0.3, frac_test=0.2,
return_idxs=False)
assert type(train) == np.ndarray
assert train.shape[0] == 50
assert valid.shape[0] == 30
assert test.shape[0] == 20
assert 4.25 < train[:, -1].mean() < 5.75
assert 4.25 < valid[:, -1].mean() < 5.75
assert 4.25 < test[:, -1].mean() < 5.75
| [
"numpy.random.random",
"numpy.zeros",
"numpy.ones",
"profit.dataset.splitters.stratified_splitter.StratifiedSplitter"
] | [((219, 245), 'numpy.random.random', 'np.random.random', (['(30, 10)'], {}), '((30, 10))\n', (235, 245), True, 'import numpy as np\n'), ((529, 556), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (545, 556), True, 'import numpy as np\n'), ((745, 765), 'profit.dataset.splitters.stratified_splitter.StratifiedSplitter', 'StratifiedSplitter', ([], {}), '()\n', (763, 765), False, 'from profit.dataset.splitters.stratified_splitter import StratifiedSplitter\n'), ((2308, 2328), 'profit.dataset.splitters.stratified_splitter.StratifiedSplitter', 'StratifiedSplitter', ([], {}), '()\n', (2326, 2328), False, 'from profit.dataset.splitters.stratified_splitter import StratifiedSplitter\n'), ((565, 591), 'numpy.random.random', 'np.random.random', ([], {'size': '(100)'}), '(size=100)\n', (581, 591), True, 'import numpy as np\n'), ((270, 282), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (278, 282), True, 'import numpy as np\n'), ((284, 295), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (291, 295), True, 'import numpy as np\n'), ((428, 440), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (436, 440), True, 'import numpy as np\n'), ((442, 453), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (449, 453), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import time
import os
import math
def Binaryfind(TermList, target):
start = 0
end = len(TermList) - 1
while start < end:
middle =int((start + end)/ 2)
midpoint = TermList[middle]
if midpoint > target:
end = middle - 1
if start==end:
return start
elif midpoint < target:
start = middle + 1
if start==end:
return start
else:
#print("middle",middle)
return middle
def get_doc_set(term_dict,term_list):
term_set=set(term_dict.index)
doc_set=set()
for term in term_list:
if term in term_set:
docs=eval(term_dict.loc[term]["posting_list"])
docs=list(docs.keys())
for doc in docs:
doc_set.add(doc)
# print(doc_set)
return doc_set
#total_doc_num=len(filenames = os.listdir("./tmp_dir"))
def Topk(term_dict,csv_file_path,query,k=10):
"""
term_dict:存放term_dict的DataFrame 即term_dict=pd.read_csv(term_dict_file)
csv_file_path:存放vector_model的csv文件夹路径
query:查询字符串
k:最多返回k个
返回值:得分最高的docid列表
"""
term_list=list(term_dict.index)
query=query.split(" ")
query_vector=np.zeros(len(term_list))# 建立查询向量
doc_set=get_doc_set(term_dict,query)
for qterm in query:
idx = Binaryfind(term_list,qterm)
# print(idx)
if term_list[idx]==qterm:# 可能词项列表中没有query的词汇
query_vector[idx]=1
query_vector_norm= np.linalg.norm(query_vector)
if sum(query_vector)==0: return [] #如果向量表中没有对应词项,直接返回空列表
sim_dict={}# 每个doc对应的的余弦相似度
filenames = os.listdir(csv_file_path)
#filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
traversed_files=0
for file in filenames:
tmp_df=pd.read_csv(csv_file_path+"/"+file)
for index,row in tmp_df.iterrows():
doc_id=traversed_files*100+index+1
if doc_id in doc_set:
doc_vector=np.array(row[1:])
sim_dict[doc_id]=sum(doc_vector*query_vector)/(np.linalg.norm(doc_vector) *query_vector_norm)
traversed_files+=1
sim_dict=sorted(sim_dict.items(), key = lambda kv:(kv[1], kv[0]), reverse=True)
topk_list=[]
for _ in sim_dict[0:k]:
# print(_)
if _[1]!=0:
topk_list.append(_[0])
else : break;
# print(sim_dict[0:k])
return topk_list
def NewTopK(term_dict,vector_model,query,k=10):
term_list=list(term_dict.index)
query=query.split(" ")
term_idx_set=set()
for qterm in query:
idx = Binaryfind(term_list,qterm)
# print(idx)
if term_list[idx]==qterm:# 可能词项列表中没有query的词汇
term_idx_set.add(idx)
if len(term_idx_set)==0: return [] # 如果查询的词在词项词典中不存在,直接返回空列表
query_vector_norm=math.sqrt(len(query))# 查询向量的模就是根号下查询向量的长度
doc_vectors=list(vector_model["values"])# [[(5,3),(6,1),(200,1.5)],[(4,0.8),(6,1.2),(100,1.5)]]
sim_dict={}# 每个doc对应的的余弦相似度
i=1
for doc_vector in doc_vectors:
doc_vector=eval(doc_vector)
fenzi=0
doc_vector_norm=0# 文档向量的模
for x in doc_vector:# x[0]是词项索引,x[1]是tf-idf
if x[0] in term_idx_set:
fenzi+=x[1]
doc_vector_norm+=x[1]*x[1]
doc_vector_norm=math.sqrt(doc_vector_norm)
if fenzi!=0: sim_dict[i]=fenzi/(doc_vector_norm*query_vector_norm) # 如果文档和查询一个匹配都没有,不需要加入词典
i+=1
sim_dict=sorted(sim_dict.items(), key = lambda kv:(kv[1], kv[0]), reverse=True)
topk_list=[]
if len(sim_dict)>k:
topk_list=sim_dict[0:k]
else: topk_list=sim_dict
return topk_list | [
"os.listdir",
"pandas.read_csv",
"math.sqrt",
"numpy.array",
"numpy.linalg.norm"
] | [((1538, 1566), 'numpy.linalg.norm', 'np.linalg.norm', (['query_vector'], {}), '(query_vector)\n', (1552, 1566), True, 'import numpy as np\n'), ((1677, 1702), 'os.listdir', 'os.listdir', (['csv_file_path'], {}), '(csv_file_path)\n', (1687, 1702), False, 'import os\n'), ((1838, 1877), 'pandas.read_csv', 'pd.read_csv', (["(csv_file_path + '/' + file)"], {}), "(csv_file_path + '/' + file)\n", (1849, 1877), True, 'import pandas as pd\n'), ((3334, 3360), 'math.sqrt', 'math.sqrt', (['doc_vector_norm'], {}), '(doc_vector_norm)\n', (3343, 3360), False, 'import math\n'), ((2026, 2043), 'numpy.array', 'np.array', (['row[1:]'], {}), '(row[1:])\n', (2034, 2043), True, 'import numpy as np\n'), ((2107, 2133), 'numpy.linalg.norm', 'np.linalg.norm', (['doc_vector'], {}), '(doc_vector)\n', (2121, 2133), True, 'import numpy as np\n')] |
import numpy as np
def sigmoid(z):
return 1/(1 + np.exp(-z))
def sigmoid_gradient(z):
"""
Derivative of sigmoid(z)
"""
sigmoid_grad = sigmoid(z)*(1 - sigmoid(z))
return sigmoid_grad
def roll(nn_params, layer_sizes):
"""
nn_params: long array of weights
layer_sizes: vector of length 3 containing input, hidden, and output layer sizes (# nodes)
returns:
weight_mat_1, weight_mat_2, two weight matrices for going from layer 1 to layer 2, and layer 2 to layer 3.
"""
n_in, n_hid, n_out = layer_sizes
len_t1 = n_hid * (n_in + 1)
len_t2 = n_out * (n_hid + 1)
weight_mat_1 = np.reshape(nn_params[:len_t1], (n_hid, n_in + 1))
weight_mat_2 = np.reshape(nn_params[len_t1:], (n_out, n_hid + 1))
return weight_mat_1, weight_mat_2
def unroll(weights_mat_1, weight_mat_2):
"""
Take in two weight matrices, output one long array of weights. Undo this again with 'Roll'
"""
reshaped_weights_1 = np.reshape(weights_mat_1, np.size(weights_mat_1))
reshaped_weights_2 = np.reshape(weight_mat_2, np.size(weight_mat_2))
combined_weights = np.concatenate((reshaped_weights_1, reshaped_weights_2))
nn_params = np.reshape(combined_weights, np.size(combined_weights))
return nn_params
def get_activations(weights_1, weights_2, input_data):
# Now calculate the activations
a1 = input_data # activations of layer 1 are just the input values
a1 = np.insert(a1, 0, 1, axis=1) # First, add col of ones to input_data, which are the bias units
a1 = np.transpose(a1)
a2 = sigmoid(np.dot(weights_1, a1))
a2 = np.insert(a2, 0, 1, axis=0) # add row of ones
a3 = sigmoid(np.dot(weights_2, a2)) # our outputs, a Kxm vector (K = nb classes)
return [a1, a2, a3]
def one_hot_encode(y, num_classes, m):
labels = np.zeros((num_classes, m))
labels[y, range(m)] = 1 # y = 1,2, ... 10 for images 1,2, .. 0
return labels
def get_cost(labels, prediction_mat, weight_mat_1, weight_mat_2, reg_param, num_train_points):
"""
Calculate the cost of the current configuration.
param labels: Kxm matrix with true labels. K = number of classes, num_train_points = number of training examples.
each column contains all zeros and a single one at the correct class
param prediction_mat: Kxm matrix containing predictions, each column is single training example.
param weight_mat_1: s1xs2 matrix, weight matrix connecting input layer to first hidden layer. s1 is number of
nodes in hidden layer, s2 number of inputs + bias.
param weight_mat_2: Kx(s1+1) matrix, weight matrix connecting hidden layer to output layer. K is number of output
classes, s1 number of nodes in hidden layer.
param reg_param: float, regularisation parameter
param num_train_points: int, number of training examples
returns:
J: float, cost of current configuration
"""
# weight_mat_1, weight_mat_2 = roll(nn_params, layer_sizes)
# Now calculate overall cost
pred_mat = -labels*np.log(prediction_mat) - (1 - labels)*np.log(1.0 - prediction_mat)
cost = 1.0/num_train_points*np.sum(pred_mat)
# Add regularization term for cost. No regularizaton for the bias units (indexed by 0)
# reg_theta1 = np.copy(weight_mat_1[:, 1:])
# reg_theta2 = np.copy(weight_mat_2[:, 1:])
# J_reg = reg_param/(2.0*num_train_points)*(np.sum(reg_theta1**2) + np.sum(reg_theta2**2))
cost_regularize = reg_param/(2.0*num_train_points)*(np.sum(weight_mat_1[:, 1:]**2) + np.sum(weight_mat_2[:, 1:]**2))
cost += cost_regularize
return cost
def get_cost_gradient(nn_params, layer_sizes, input_data, y, reg_param):
"""
layer_sizes: vector containing number of units in each of the layers, e.g. [400, 4, 10] (input, hidden, output)
get_cost_gradient Implements the neural network cost function for a two layer
neural network which performs classification
"""
weight_mat_1, weight_mat_2 = roll(nn_params, layer_sizes)
num_train_points = input_data.shape[0]
# Setup some useful variables (num_train_points = number of training examples)
# activations is vector [a1, a2, a3] where a3 are final outputs
activations = get_activations(weight_mat_1, weight_mat_2, input_data)
# One hot encode the correct labels
labels = one_hot_encode(y, layer_sizes[-1], num_train_points)
# Now calculate overall cost
cost = get_cost(labels, activations[-1], weight_mat_1, weight_mat_2, reg_param, num_train_points)
# Gradient using back propagation
grad = perform_back_prop(labels, activations, weight_mat_1, weight_mat_2, reg_param, num_train_points)
return cost, grad
def perform_back_prop(labels, activations, weight_mat_1, weight_mat_2, reg_param, num_train_points):
"""
Function for backpropagation algorithm to provide the cost gradient.
returns:
grad, long array containing gradients of cost with respect to each theta (Unroll used
to create the long array).
"""
# weight_mat_1, weight_mat_2 = roll(nn_params, layer_sizes)
[a1, a2, a3] = activations
delta3 = a3 - labels
# delta3 is a Kxm matrix. One column denotes
# one training example. The length of the column is the number
# of classes we have. So in a given column, delta3 gives the
# %error of that output node, which is just the difference
# between our prediction of that output node (a3) and
# the actual value (labels).
temp = np.dot(np.transpose(weight_mat_2), delta3)
delta2 = temp[1:, :]*sigmoid_gradient(np.dot(weight_mat_1, a1))
delta_1 = np.dot(delta2, np.transpose(a1))
delta_2 = np.dot(delta3, np.transpose(a2))
# Regularization terms. We want to add reg_param/num_train_points*Theta but
# We do not want to regularize the thetas linked to the bias units
# Which corresponds to the first column in theta.
theta1_reg = np.copy(weight_mat_1)
theta1_reg[:,0] = 0
theta2_reg = np.copy(weight_mat_2)
theta2_reg[:,0] = 0
theta1_grad = 1.0/num_train_points*(delta_1 + reg_param*theta1_reg)
theta2_grad = 1.0/num_train_points*(delta_2 + reg_param*theta2_reg)
grad = unroll(theta1_grad, theta2_grad)
# grad = [np.ravel(theta1_grad), np.ravel(theta2_grad)]
return grad
def initialize_weights(n_in, n_out, eps):
"""
Randomly initialise weights.
"""
np.random.seed(3)
w = np.random.uniform(size = (n_out, 1 + n_in))
w = w*2*eps - eps
return w
def predict(weight_mat_1, weight_mat_2, data_mat):
"""
Predict the label of an input given a trained neural network
param weight_mat_1: s1xs2 matrix, weight matrix connecting input layer to first hidden layer. s1 is number of
nodes in hidden layer, s2 number of inputs + bias.
param weight_mat_2: Kx(s1+1) matrix, weight matrix connecting hidden layer to output layer. K is number of output
classes, s1 number of nodes in hidden layer.
param data_mat: mxn matrix, each row is an image
returns:
prediction_array: m-dim array containing predicted digit for each input sample
prob: m-dim array, probability of an input belonging to its predicted class
"""
outputs = get_activations(weight_mat_1, weight_mat_2, data_mat)[-1]
m = data_mat.shape[0]
# data_mat = np.insert(data_mat, 0, 1, axis=1) #add column of ones at start, bias units
# h1 = sigmoid(np.dot(data_mat, weight_mat_1.T))
# h1 = np.insert(h1, 0, 1, axis = 1)
# h2 = sigmoid(np.dot(h1, weight_mat_2.T)) #each row is sample, each col is prob to be in certain class
prediction_array = np.argmax(outputs, axis=0) # indices of col with max value
prob = outputs[prediction_array, np.arange(m)] # probs of digit being the predicted label
return prediction_array, prob
| [
"numpy.insert",
"numpy.copy",
"numpy.reshape",
"numpy.size",
"numpy.log",
"numpy.argmax",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.transpose",
"numpy.arange"
] | [((647, 696), 'numpy.reshape', 'np.reshape', (['nn_params[:len_t1]', '(n_hid, n_in + 1)'], {}), '(nn_params[:len_t1], (n_hid, n_in + 1))\n', (657, 696), True, 'import numpy as np\n'), ((716, 766), 'numpy.reshape', 'np.reshape', (['nn_params[len_t1:]', '(n_out, n_hid + 1)'], {}), '(nn_params[len_t1:], (n_out, n_hid + 1))\n', (726, 766), True, 'import numpy as np\n'), ((1130, 1186), 'numpy.concatenate', 'np.concatenate', (['(reshaped_weights_1, reshaped_weights_2)'], {}), '((reshaped_weights_1, reshaped_weights_2))\n', (1144, 1186), True, 'import numpy as np\n'), ((1454, 1481), 'numpy.insert', 'np.insert', (['a1', '(0)', '(1)'], {'axis': '(1)'}), '(a1, 0, 1, axis=1)\n', (1463, 1481), True, 'import numpy as np\n'), ((1556, 1572), 'numpy.transpose', 'np.transpose', (['a1'], {}), '(a1)\n', (1568, 1572), True, 'import numpy as np\n'), ((1627, 1654), 'numpy.insert', 'np.insert', (['a2', '(0)', '(1)'], {'axis': '(0)'}), '(a2, 0, 1, axis=0)\n', (1636, 1654), True, 'import numpy as np\n'), ((1841, 1867), 'numpy.zeros', 'np.zeros', (['(num_classes, m)'], {}), '((num_classes, m))\n', (1849, 1867), True, 'import numpy as np\n'), ((5958, 5979), 'numpy.copy', 'np.copy', (['weight_mat_1'], {}), '(weight_mat_1)\n', (5965, 5979), True, 'import numpy as np\n'), ((6022, 6043), 'numpy.copy', 'np.copy', (['weight_mat_2'], {}), '(weight_mat_2)\n', (6029, 6043), True, 'import numpy as np\n'), ((6431, 6448), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (6445, 6448), True, 'import numpy as np\n'), ((6457, 6498), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n_out, 1 + n_in)'}), '(size=(n_out, 1 + n_in))\n', (6474, 6498), True, 'import numpy as np\n'), ((7707, 7733), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(0)'}), '(outputs, axis=0)\n', (7716, 7733), True, 'import numpy as np\n'), ((1010, 1032), 'numpy.size', 'np.size', (['weights_mat_1'], {}), '(weights_mat_1)\n', (1017, 1032), True, 'import numpy as np\n'), ((1084, 1105), 'numpy.size', 'np.size', (['weight_mat_2'], {}), '(weight_mat_2)\n', (1091, 1105), True, 'import numpy as np\n'), ((1232, 1257), 'numpy.size', 'np.size', (['combined_weights'], {}), '(combined_weights)\n', (1239, 1257), True, 'import numpy as np\n'), ((1595, 1616), 'numpy.dot', 'np.dot', (['weights_1', 'a1'], {}), '(weights_1, a1)\n', (1601, 1616), True, 'import numpy as np\n'), ((1690, 1711), 'numpy.dot', 'np.dot', (['weights_2', 'a2'], {}), '(weights_2, a2)\n', (1696, 1711), True, 'import numpy as np\n'), ((3147, 3163), 'numpy.sum', 'np.sum', (['pred_mat'], {}), '(pred_mat)\n', (3153, 3163), True, 'import numpy as np\n'), ((5532, 5558), 'numpy.transpose', 'np.transpose', (['weight_mat_2'], {}), '(weight_mat_2)\n', (5544, 5558), True, 'import numpy as np\n'), ((5666, 5682), 'numpy.transpose', 'np.transpose', (['a1'], {}), '(a1)\n', (5678, 5682), True, 'import numpy as np\n'), ((5713, 5729), 'numpy.transpose', 'np.transpose', (['a2'], {}), '(a2)\n', (5725, 5729), True, 'import numpy as np\n'), ((56, 66), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (62, 66), True, 'import numpy as np\n'), ((3048, 3070), 'numpy.log', 'np.log', (['prediction_mat'], {}), '(prediction_mat)\n', (3054, 3070), True, 'import numpy as np\n'), ((3086, 3114), 'numpy.log', 'np.log', (['(1.0 - prediction_mat)'], {}), '(1.0 - prediction_mat)\n', (3092, 3114), True, 'import numpy as np\n'), ((3504, 3536), 'numpy.sum', 'np.sum', (['(weight_mat_1[:, 1:] ** 2)'], {}), '(weight_mat_1[:, 1:] ** 2)\n', (3510, 3536), True, 'import numpy as np\n'), ((3537, 3569), 'numpy.sum', 'np.sum', (['(weight_mat_2[:, 1:] ** 2)'], {}), '(weight_mat_2[:, 1:] ** 2)\n', (3543, 3569), True, 'import numpy as np\n'), ((5610, 5634), 'numpy.dot', 'np.dot', (['weight_mat_1', 'a1'], {}), '(weight_mat_1, a1)\n', (5616, 5634), True, 'import numpy as np\n'), ((7804, 7816), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (7813, 7816), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.