code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""evaluate model performance
TODO
- Evaluate by window and by participant (rewrite to make windows)
"""
import torch
import torch.nn.functional as F
import torchaudio
from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
import numpy as np
import pandas as pd
import os
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
MODEL_PATH = os.path.join("model", "xlsr_autism_stories", "checkpoint-10")
TEST = pd.read_csv(os.path.join("data", "splits", "stories_train_data_gender_False.csv"))
LABEL_COL = "Diagnosis"
def speech_file_to_array_fn(path, sampling_rate):
speech_array, _sampling_rate = torchaudio.load(path)
resampler = torchaudio.transforms.Resample(_sampling_rate, sampling_rate)
speech = resampler(speech_array).squeeze().numpy()
return speech
def predict(path, sampling_rate):
speech = speech_file_to_array_fn(path, sampling_rate)
features = processor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
input_values = features.input_values.to(device)
attention_mask = features.attention_mask.to(device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
pred = config.id2label[np.argmax(scores)]
confidence = scores[np.argmax(scores)]
return pred, confidence
def add_predicted_and_confidence(df):
pred, confidence = predict(df["file"], target_sampling_rate)
df["pred"] = pred
df["confidence"] = confidence
return df
# setup model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained(MODEL_PATH)
processor = Wav2Vec2FeatureExtractor.from_pretrained(MODEL_PATH)
target_sampling_rate = processor.sampling_rate
model = Wav2Vec2ForSequenceClassification.from_pretrained(MODEL_PATH).to(device)
# load test data
# apply predictions
test = TEST.apply(add_predicted_and_confidence, axis=1)
print(confusion_matrix(test[LABEL_COL], test["pred"]))
print(classification_report(test[LABEL_COL], test["pred"]))
acc = accuracy_score(test[LABEL_COL], test["pred"])
print(f"accuracy: {acc}") | [
"torch.nn.functional.softmax",
"transformers.AutoConfig.from_pretrained",
"sklearn.metrics.classification_report",
"torchaudio.load",
"transformers.Wav2Vec2ForSequenceClassification.from_pretrained",
"os.path.join",
"numpy.argmax",
"transformers.Wav2Vec2FeatureExtractor.from_pretrained",
"torchaudio... | [((418, 479), 'os.path.join', 'os.path.join', (['"""model"""', '"""xlsr_autism_stories"""', '"""checkpoint-10"""'], {}), "('model', 'xlsr_autism_stories', 'checkpoint-10')\n", (430, 479), False, 'import os\n'), ((1706, 1744), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1732, 1744), False, 'from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification\n'), ((1757, 1809), 'transformers.Wav2Vec2FeatureExtractor.from_pretrained', 'Wav2Vec2FeatureExtractor.from_pretrained', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1797, 1809), False, 'from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification\n'), ((2156, 2201), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test[LABEL_COL]', "test['pred']"], {}), "(test[LABEL_COL], test['pred'])\n", (2170, 2201), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score\n'), ((499, 568), 'os.path.join', 'os.path.join', (['"""data"""', '"""splits"""', '"""stories_train_data_gender_False.csv"""'], {}), "('data', 'splits', 'stories_train_data_gender_False.csv')\n", (511, 568), False, 'import os\n'), ((680, 701), 'torchaudio.load', 'torchaudio.load', (['path'], {}), '(path)\n', (695, 701), False, 'import torchaudio\n'), ((718, 779), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', (['_sampling_rate', 'sampling_rate'], {}), '(_sampling_rate, sampling_rate)\n', (748, 779), False, 'import torchaudio\n'), ((2041, 2088), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test[LABEL_COL]', "test['pred']"], {}), "(test[LABEL_COL], test['pred'])\n", (2057, 2088), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score\n'), ((2096, 2148), 'sklearn.metrics.classification_report', 'classification_report', (['test[LABEL_COL]', "test['pred']"], {}), "(test[LABEL_COL], test['pred'])\n", (2117, 2148), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score\n'), ((1163, 1178), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1176, 1178), False, 'import torch\n'), ((1347, 1364), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1356, 1364), True, 'import numpy as np\n'), ((1390, 1407), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1399, 1407), True, 'import numpy as np\n'), ((1659, 1684), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1682, 1684), False, 'import torch\n'), ((1865, 1926), 'transformers.Wav2Vec2ForSequenceClassification.from_pretrained', 'Wav2Vec2ForSequenceClassification.from_pretrained', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1914, 1926), False, 'from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification\n'), ((1269, 1293), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1278, 1293), True, 'import torch.nn.functional as F\n')] |
# authors: anonymous
import numpy as np
import time
# Sectect action based on the the action-state function with a softmax strategy
def softmax_action(Q, s):
proba=np.exp(Q[s, :])/np.exp(Q[s, :]).sum()
nb_actions = Q.shape[1]
return np.random.choice(nb_actions, p=proba)
# Select the best action based on the action-state function
def best_action(Q, s):
return np.argmax(Q[s, :])
# Compute the baseline policy, which is a softmax ovec a given function Q.
def compute_baseline(Q):
baseline = np.exp(Q)
norm = np.sum(baseline, axis=1).reshape(Q.shape[0], 1)
return baseline/norm
# Prints with a time stamp
def prt(s):
format1 = ';'.join([str(0), str(30), str(41)])
format2 = ';'.join([str(0), str(31), str(40)])
s1 = '\x1b[%sm %s \x1b[0m' % (format1, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
s2 = '\x1b[%sm %s \x1b[0m' % (format2, s)
print(s1 + ' '+ s2)
# The reward function is defined on SxS, but we need it on SxA.
# This function makes the transformation based on the transition function P.
def get_reward_model(P, R):
return np.einsum('ijk,ik->ij', P, R)
# Compute the performance of a policy given the corresponding action-state function
def compute_perf(env, gamma, Q=None, nb_trajectories=1000, max_steps=50, model=None, bootstrap=False, strategy_best=True):
cum_rew_arr = []
for _ in np.arange(nb_trajectories):
isNotOver = True
cum_rew = 0
nb_steps = 0
state = env.reset()
if model != None:
model.new_episode()
while isNotOver and nb_steps < max_steps:
if model != None:
action_choice = model.predict(int(state), bootstrap)
else:
if strategy_best:
action_choice = best_action(Q, int(state))
else:
action_choice = softmax_action(Q, int(state))
state, reward, next_state, is_done = env.step(action_choice)
isNotOver = not(is_done)
cum_rew += reward*gamma**nb_steps
nb_steps += 1
state = next_state
cum_rew_arr.append(cum_rew)
expt_return = np.mean(cum_rew_arr)
return expt_return
# Computes the monte-carlo estimation of the Q function of the behavioural policy given a batch of trajectories
def compute_q_pib_est(gamma, nb_states, nb_actions, batch):
count_state_action = np.zeros((nb_states, nb_actions))
q_pib_est = np.zeros((nb_states, nb_actions))
for traj in batch:
rev_traj = traj[::-1]
ret = 0
for elm in rev_traj:
count_state_action[elm[1], elm[0]] += 1
ret = elm[3] + gamma * ret
q_pib_est[elm[1], elm[0]] += ret
q_pib_est = np.divide(q_pib_est, count_state_action)
return np.nan_to_num(q_pib_est)
# Generates a batch of trajectories
def generate_batch(nb_trajectories, env, pi, easter_egg, max_steps=50):
trajectories = []
for _ in np.arange(nb_trajectories):
nb_steps = 0
trajectorY = []
state = env.reset()
is_done = False
while nb_steps < max_steps and not is_done:
action_choice = np.random.choice(pi.shape[1], p=pi[state])
state, reward, next_state, is_done = env.step(action_choice, easter_egg)
trajectorY.append([action_choice, state, next_state, reward])
state = next_state
nb_steps += 1
trajectories.append(trajectorY)
batch_traj = [val for sublist in trajectories for val in sublist]
return trajectories, batch_traj
| [
"numpy.mean",
"numpy.nan_to_num",
"numpy.divide",
"numpy.random.choice",
"numpy.argmax",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.einsum",
"time.localtime",
"numpy.arange"
] | [((249, 286), 'numpy.random.choice', 'np.random.choice', (['nb_actions'], {'p': 'proba'}), '(nb_actions, p=proba)\n', (265, 286), True, 'import numpy as np\n'), ((385, 403), 'numpy.argmax', 'np.argmax', (['Q[s, :]'], {}), '(Q[s, :])\n', (394, 403), True, 'import numpy as np\n'), ((523, 532), 'numpy.exp', 'np.exp', (['Q'], {}), '(Q)\n', (529, 532), True, 'import numpy as np\n'), ((1103, 1132), 'numpy.einsum', 'np.einsum', (['"""ijk,ik->ij"""', 'P', 'R'], {}), "('ijk,ik->ij', P, R)\n", (1112, 1132), True, 'import numpy as np\n'), ((1376, 1402), 'numpy.arange', 'np.arange', (['nb_trajectories'], {}), '(nb_trajectories)\n', (1385, 1402), True, 'import numpy as np\n'), ((2013, 2033), 'numpy.mean', 'np.mean', (['cum_rew_arr'], {}), '(cum_rew_arr)\n', (2020, 2033), True, 'import numpy as np\n'), ((2256, 2289), 'numpy.zeros', 'np.zeros', (['(nb_states, nb_actions)'], {}), '((nb_states, nb_actions))\n', (2264, 2289), True, 'import numpy as np\n'), ((2304, 2337), 'numpy.zeros', 'np.zeros', (['(nb_states, nb_actions)'], {}), '((nb_states, nb_actions))\n', (2312, 2337), True, 'import numpy as np\n'), ((2545, 2585), 'numpy.divide', 'np.divide', (['q_pib_est', 'count_state_action'], {}), '(q_pib_est, count_state_action)\n', (2554, 2585), True, 'import numpy as np\n'), ((2595, 2619), 'numpy.nan_to_num', 'np.nan_to_num', (['q_pib_est'], {}), '(q_pib_est)\n', (2608, 2619), True, 'import numpy as np\n'), ((2765, 2791), 'numpy.arange', 'np.arange', (['nb_trajectories'], {}), '(nb_trajectories)\n', (2774, 2791), True, 'import numpy as np\n'), ((176, 191), 'numpy.exp', 'np.exp', (['Q[s, :]'], {}), '(Q[s, :])\n', (182, 191), True, 'import numpy as np\n'), ((542, 566), 'numpy.sum', 'np.sum', (['baseline'], {'axis': '(1)'}), '(baseline, axis=1)\n', (548, 566), True, 'import numpy as np\n'), ((2937, 2979), 'numpy.random.choice', 'np.random.choice', (['pi.shape[1]'], {'p': 'pi[state]'}), '(pi.shape[1], p=pi[state])\n', (2953, 2979), True, 'import numpy as np\n'), ((192, 207), 'numpy.exp', 'np.exp', (['Q[s, :]'], {}), '(Q[s, :])\n', (198, 207), True, 'import numpy as np\n'), ((832, 848), 'time.localtime', 'time.localtime', ([], {}), '()\n', (846, 848), False, 'import time\n')] |
import numpy as np
import os
from struct import unpack
from .defaultreader import DefaultReader
class StlReader(DefaultReader):
"""
@type _facets: dict[str, list[tuple[tuple[float]]]]
@type _norms: dict[str, list[tuple[float]]]
"""
def __init__(self):
self._facets = {}
self._norms = {}
@staticmethod
def read_binary(file_path):
"""
Created on Thu Nov 19 06:37:35 2013
@author: <NAME>
Reads a Binary file and
Returns Header,Points,Normals,Vertex1,Vertex2,Vertex3
Source: http://sukhbinder.wordpress.com/2013/11/28/binary-stl-file-reader-in-python-powered-by-numpy/
@type file_path: str
@rtype:
"""
fp = open(file_path, 'rb')
header = fp.read(80)
nn = fp.read(4)
number_of_facets = unpack('i', nn)[0]
record_dtype = np.dtype([
('normals', np.float32, (3,)),
('Vertex1', np.float32, (3,)),
('Vertex2', np.float32, (3,)),
('Vertex3', np.float32, (3,)),
('atttr', '<i2', (1,))
])
data = np.fromfile(fp, dtype=record_dtype, count=number_of_facets)
fp.close()
normals = data['normals']
vertex_1 = data['Vertex1']
vertex_2 = data['Vertex2']
vertex_3 = data['Vertex3']
# p = np.append(vertex_1, vertex_2, axis=0)
# p = np.append(p, vertex_3, axis=0) # list(v1)
# points = np.array(list(set(tuple(p1) for p1 in p)))
return header, normals, vertex_1, vertex_2, vertex_3
@staticmethod
def parse_askii_verticle(input_stream):
"""
'vertex 0.0 0.0 0.0'
@param input_stream:
@rtype: (float, float, float)
"""
_, verticle_x, verticle_y, verticle_z = input_stream.readline().strip().split(' ')
return float(verticle_x), float(verticle_y), float(verticle_z),
@staticmethod
def parse_askii_triangle(input_stream):
"""
'vertex 0.0 0.0 0.0' x3
@param input_stream:
@rtype: ((float, float, float), (float, float, float), (float, float, float))
"""
assert input_stream.readline().strip().startswith("outer loop")
triangle = (
StlReader.parse_askii_verticle(input_stream),
StlReader.parse_askii_verticle(input_stream),
StlReader.parse_askii_verticle(input_stream))
assert input_stream.readline().strip().startswith("endloop")
return triangle
@staticmethod
def parse_askii_list_of_facets(input_stream):
"""
'facet normal 0.0 -1.0 0.0'
'outer loop'
'vertex 0.0 0.0 0.0' x3
'endloop'
'endfacet'
@param input_stream:
@rtype: collections.Iterable[((float, float, float), ((float, float, float), (float, float, float), (float, float, float)))]
"""
line = input_stream.readline().strip()
while not line.startswith("endsolid"):
_, _, normal_x, normal_y, normal_z = line.split(' ')
triangle = StlReader.parse_askii_triangle(input_stream)
assert input_stream.readline().strip().startswith("endfacet")
yield (normal_x, normal_y, normal_z), triangle
line = input_stream.readline().strip()
@staticmethod
def parse_askii_solids(input_stream):
"""
'solid cube_corner'
'facet normal 0.0 -1.0 0.0'
'outer loop'
'vertex 0.0 0.0 0.0' x3
'endloop'
'endfacet'
'endsolid'
@param input_stream:
@rtype: collections.Iterable[(str, collections.Iterable[((float, float, float), ((float, float, float), (float, float, float), (float, float, float)))]])]
"""
line = input_stream.readline()
while line:
line = line.strip()
assert line.startswith("solid"), line
_, name = line.split(' ', 1)
# print(line)
yield name, StlReader.parse_askii_list_of_facets(input_stream)
line = input_stream.readline()
input_stream.close()
@staticmethod
def read_askii_stl(file_path):
"""
@type file_path: str
@rtype: collections.Iterable[(str, collections.Iterable[((float, float, float), ((float, float, float), (float, float, float), (float, float, float)))]])]
"""
assert os.path.exists(file_path), "Bad path: {}".format(file_path)
return StlReader.parse_askii_solids(open(file_path, 'r'))
@staticmethod
def _is_ascii_stl(file_path):
"""
@type file_path: str
@rtype: bool
"""
with open(file_path, 'rb') as input_data:
line = input_data.readline()
if line.startswith(b'solid'):
return True
else:
return False
def read(self, file_path):
"""
@type file_path: str
@rtype: None
"""
del self._facets
del self._norms
self._facets = {}
self._norms = {}
if StlReader._is_ascii_stl(file_path):
for name, facets in StlReader.read_askii_stl(file_path):
assert name not in self._facets, "Objects in file are not unique"
self._facets[name] = []
self._norms[name] = []
for normal, (v1, v2, v3) in facets:
self._facets[name].append((v1, v2, v3))
self._norms[name].append(normal)
else:
head, n, v1, v2, v3 = StlReader.read_binary(file_path)
self._facets["obj"] = []
self._norms["obj"] = []
for norm, vertex_1, vertex_2, vertex_3 in zip(n, v1, v2, v3):
# yield (tuple(i), tuple(j), tuple(k))
self._facets["obj"].append((vertex_1, vertex_2, vertex_3))
self._norms["obj"].append(norm)
def get_names(self):
"""
@rtype: collections.Iterable[str]
"""
return self._facets.keys()
def get_facets(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
if name:
assert name in self._facets, "Unknown object: {}".format(name)
for facet in self._facets[name]:
yield facet
else:
assert name is None, "Unknown object: {}".format(name)
for name in self._facets:
for facet in self._facets[name]:
yield facet
def has_triangular_facets(self):
"""
@rtype: bool
"""
# todo: is this always the case?
return True
| [
"os.path.exists",
"numpy.dtype",
"struct.unpack",
"numpy.fromfile"
] | [((879, 1046), 'numpy.dtype', 'np.dtype', (["[('normals', np.float32, (3,)), ('Vertex1', np.float32, (3,)), ('Vertex2',\n np.float32, (3,)), ('Vertex3', np.float32, (3,)), ('atttr', '<i2', (1,))]"], {}), "([('normals', np.float32, (3,)), ('Vertex1', np.float32, (3,)), (\n 'Vertex2', np.float32, (3,)), ('Vertex3', np.float32, (3,)), ('atttr',\n '<i2', (1,))])\n", (887, 1046), True, 'import numpy as np\n'), ((1123, 1182), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'record_dtype', 'count': 'number_of_facets'}), '(fp, dtype=record_dtype, count=number_of_facets)\n', (1134, 1182), True, 'import numpy as np\n'), ((4403, 4428), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (4417, 4428), False, 'import os\n'), ((837, 852), 'struct.unpack', 'unpack', (['"""i"""', 'nn'], {}), "('i', nn)\n", (843, 852), False, 'from struct import unpack\n')] |
import sys, argparse
sys.path.append('game/')
import flappy_wrapped as game
import cv2
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.optim as optim
KERNEL = np.array([[-1,-1,-1], [-1, 9,-1],[-1,-1,-1]])
def processFrame(frame):
frame = frame[55:288,0:400] #crop image
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) #convert image to black and white
frame = cv2.resize(frame,(84,84),interpolation=cv2.INTER_AREA)
_ , frame = cv2.threshold(frame,50,255,cv2.THRESH_BINARY)
frame = cv2.filter2D(frame,-1,KERNEL)
frame = frame.astype(np.float64)/255.0
return frame
#Dueling DQN
class DDQN(nn.Module):
def __init__(self,input_shape,nactions):
super(DDQN,self).__init__()
self.nactions = nactions
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0],32,kernel_size=4,stride=2),
nn.ReLU(),
nn.Conv2d(32,64,kernel_size=3,stride=2),
nn.ReLU(),
nn.Conv2d(64,64,kernel_size=2,stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.fca = nn.Sequential(
nn.Linear( conv_out_size, 512),
nn.ReLU(),
nn.Linear( 512, nactions )
)
self.fcv = nn.Sequential(
nn.Linear(conv_out_size,512),
nn.ReLU(),
nn.Linear(512,1)
)
def _get_conv_out(self,shape):
o = self.conv( torch.zeros(1,*shape) )
return int(np.prod(o.size()))
def forward(self,x):
conv_out = self.conv(x).view(x.size()[0], -1)
action_v = self.fca(conv_out)
value_v = self.fcv(conv_out).expand(x.size(0), self.nactions)
return value_v + action_v - action_v.mean(1).unsqueeze(1).expand(x.size(0), self.nactions)
STATE_DIM = 4
SKIP_FRAME = 2
INITIAL_SKIP = [0,1,0,1,0,1,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1]
def initial_autoplay(env):
state = collections.deque(maxlen=STATE_DIM)
for i in INITIAL_SKIP[:-7]:
frame,reward,done = env.frame_step(i)
frame = processFrame(frame)
state.append(frame)
for i in INITIAL_SKIP[-7:-5]:
frame,reward,done = env.frame_step(i)
frame = processFrame(frame)
state.append(frame)
for i in INITIAL_SKIP[-5:-3]:
frame,reward,done = env.frame_step(i)
frame = processFrame(frame)
state.append(frame)
for i in INITIAL_SKIP[-3:-1]:
frame,reward,done = env.frame_step(i)
frame = processFrame(frame)
state.append(frame)
return state
if __name__=='__main__':
device = torch.device( "cuda" if torch.cuda.is_available() else "cpu" )
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", required=True, help="Model file to load")
env = game.GameState()
args = parser.parse_args()
net = DDQN( (STATE_DIM,84,84), 2 ).to(device)
net.load_state_dict(torch.load('checkpoints/flappy_best_model.dat'))
input("Please Press Enter to Start")
state = initial_autoplay(env)
total_rewards = 0
while True:
state_v = torch.tensor(np.array([state],copy=False),dtype=torch.float32).to(device)
action = int(torch.argmax(net(state_v)))
frame,reward,done = env.frame_step(action)
total_rewards += reward
for _ in range(SKIP_FRAME):
frame,reward,done = env.frame_step(action)
total_rewards += reward
if done:
break
frame = processFrame(frame)
state.append(frame)
| [
"torch.nn.ReLU",
"collections.deque",
"argparse.ArgumentParser",
"cv2.threshold",
"torch.load",
"flappy_wrapped.GameState",
"cv2.filter2D",
"torch.nn.Conv2d",
"numpy.array",
"torch.cuda.is_available",
"cv2.cvtColor",
"torch.nn.Linear",
"cv2.resize",
"sys.path.append",
"torch.zeros"
] | [((21, 45), 'sys.path.append', 'sys.path.append', (['"""game/"""'], {}), "('game/')\n", (36, 45), False, 'import sys, argparse\n'), ((198, 249), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (206, 249), True, 'import numpy as np\n'), ((325, 364), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (337, 364), False, 'import cv2\n'), ((410, 467), 'cv2.resize', 'cv2.resize', (['frame', '(84, 84)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (84, 84), interpolation=cv2.INTER_AREA)\n', (420, 467), False, 'import cv2\n'), ((481, 529), 'cv2.threshold', 'cv2.threshold', (['frame', '(50)', '(255)', 'cv2.THRESH_BINARY'], {}), '(frame, 50, 255, cv2.THRESH_BINARY)\n', (494, 529), False, 'import cv2\n'), ((539, 570), 'cv2.filter2D', 'cv2.filter2D', (['frame', '(-1)', 'KERNEL'], {}), '(frame, -1, KERNEL)\n', (551, 570), False, 'import cv2\n'), ((2014, 2049), 'collections.deque', 'collections.deque', ([], {'maxlen': 'STATE_DIM'}), '(maxlen=STATE_DIM)\n', (2031, 2049), False, 'import collections\n'), ((2751, 2776), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2774, 2776), False, 'import sys, argparse\n'), ((2875, 2891), 'flappy_wrapped.GameState', 'game.GameState', ([], {}), '()\n', (2889, 2891), True, 'import flappy_wrapped as game\n'), ((2997, 3044), 'torch.load', 'torch.load', (['"""checkpoints/flappy_best_model.dat"""'], {}), "('checkpoints/flappy_best_model.dat')\n", (3007, 3044), False, 'import torch\n'), ((827, 881), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_shape[0]', '(32)'], {'kernel_size': '(4)', 'stride': '(2)'}), '(input_shape[0], 32, kernel_size=4, stride=2)\n', (836, 881), True, 'import torch.nn as nn\n'), ((892, 901), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (899, 901), True, 'import torch.nn as nn\n'), ((915, 957), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)'}), '(32, 64, kernel_size=3, stride=2)\n', (924, 957), True, 'import torch.nn as nn\n'), ((968, 977), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (975, 977), True, 'import torch.nn as nn\n'), ((991, 1033), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(2)', 'stride': '(1)'}), '(64, 64, kernel_size=2, stride=1)\n', (1000, 1033), True, 'import torch.nn as nn\n'), ((1044, 1053), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1051, 1053), True, 'import torch.nn as nn\n'), ((1184, 1213), 'torch.nn.Linear', 'nn.Linear', (['conv_out_size', '(512)'], {}), '(conv_out_size, 512)\n', (1193, 1213), True, 'import torch.nn as nn\n'), ((1228, 1237), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1235, 1237), True, 'import torch.nn as nn\n'), ((1251, 1275), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'nactions'], {}), '(512, nactions)\n', (1260, 1275), True, 'import torch.nn as nn\n'), ((1343, 1372), 'torch.nn.Linear', 'nn.Linear', (['conv_out_size', '(512)'], {}), '(conv_out_size, 512)\n', (1352, 1372), True, 'import torch.nn as nn\n'), ((1385, 1394), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1392, 1394), True, 'import torch.nn as nn\n'), ((1408, 1425), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (1417, 1425), True, 'import torch.nn as nn\n'), ((1502, 1524), 'torch.zeros', 'torch.zeros', (['(1)', '*shape'], {}), '(1, *shape)\n', (1513, 1524), False, 'import torch\n'), ((2699, 2724), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2722, 2724), False, 'import torch\n'), ((3195, 3224), 'numpy.array', 'np.array', (['[state]'], {'copy': '(False)'}), '([state], copy=False)\n', (3203, 3224), True, 'import numpy as np\n')] |
# -*- coding:utf8 -*
import heapq
import logging
import time
import pandas as pd
import numpy as np
import random
from ..util import sample_ints
from .model_based_tuner import ModelOptimizer, knob2point, point2knob
logger = logging.getLogger('autotvm')
class RegOptimizer(ModelOptimizer):
def __init__(self, task, n_iter=500, temp=(1, 0), persistent=True, parallel_size=128,
early_stop=50, log_interval=50):
super(RegOptimizer, self).__init__()
self.task = task
self.dims = [len(x) for x in self.task.config_space.space_map.values()]
self.n_iter = n_iter
self.temp = temp
self.persistent = persistent
self.parallel_size = min(parallel_size, len(self.task.config_space))
self.early_stop = early_stop or 1e9
self.log_interval = log_interval
self.points = None
self.find_maximums_count = 0
def find_maximums(self, model, num, exclusive):
# 典型调用:maximums = self.model_optimizer.find_maximums(base_model, self.plan_size, self.visited)
"""Find maximum of a cost model
Note we use cost model to predict GFLOPS, so we should find the maximum
Parameters
----------
model: CostModel
Cost model
num: int ----->常见值对应plan_size = 64
The number of returned maximum points
exclusive: set, optional
The excluded set of this optimizer. Return results won't include any
elements in this set.
"""
#print("regOptimizer find maximums!")
points = np.array(np.arange(1, len(self.task.config_space), 1)).astype('int32')
pointset = set(points)
result = pointset - exclusive
points = np.array(list(result))
# <class 'numpy.ndarray'>
if len(points) == 0:
print("result is null!")
return list(points)
scores = model.predict(points)
if scores.shape[0] > 64:
# print("case1")
new_points = self.top_num(points, scores, num, exclusive) # 前64
if scores.shape[0] <= 64:
# print("case2")
return list(points)
return new_points
def top_num(self, points, scores, num, exclusive):
# tic = time.time()
points = points.reshape((-1, 1))
scores = scores.reshape((-1, 1))
data = np.append(points, scores, axis=1)
dataframe = pd.DataFrame(data, columns=['index', 'score'])
sorteddf = dataframe.sort_values(by="score", ascending=False)
res = []
count = 0
ex_count = 0
config_space = len(self.task.config_space)
for i, row in sorteddf.iterrows():
# print(row['index'],'---->' ,row['score'])
ex_count += 1
if not exclusive.__contains__(row['index']) and count < num and row['score'] > 1e-9:
res.append(int(row['index']))
count += 1
if count == num or ex_count >= config_space // 2 or len(exclusive) >= config_space // 2:
# print("top num break")
break
# print("top num cost time:", time.time() - tic)
return res
def top_num_expand(self, points, scores, num, exclusive):
# tic = time.time()
points = points.reshape((-1, 1))
scores = scores.reshape((-1, 1))
data = np.append(points, scores, axis=1)
dataframe = pd.DataFrame(data, columns=['index', 'score'])
sorteddf = dataframe.sort_values(by="score", ascending=False)
res = []
count = 0
expand_factors = 2.0
for i, row in sorteddf.iterrows():
# print(row['index'], '---->',row['score'])
if not exclusive.__contains__(row['index']) and count < num * expand_factors:
res.append(row['index'])
count += 1
if count == num * expand_factors:
break
res_expand_temp = random.sample(res, num)
res_expand = []
for r in res_expand_temp:
res_expand.append(int(r))
# print("top num expand cost time:", time.time() - tic)
return res_expand
| [
"logging.getLogger",
"random.sample",
"numpy.append",
"pandas.DataFrame"
] | [((227, 255), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (244, 255), False, 'import logging\n'), ((2453, 2486), 'numpy.append', 'np.append', (['points', 'scores'], {'axis': '(1)'}), '(points, scores, axis=1)\n', (2462, 2486), True, 'import numpy as np\n'), ((2507, 2553), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['index', 'score']"}), "(data, columns=['index', 'score'])\n", (2519, 2553), True, 'import pandas as pd\n'), ((3454, 3487), 'numpy.append', 'np.append', (['points', 'scores'], {'axis': '(1)'}), '(points, scores, axis=1)\n', (3463, 3487), True, 'import numpy as np\n'), ((3508, 3554), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['index', 'score']"}), "(data, columns=['index', 'score'])\n", (3520, 3554), True, 'import pandas as pd\n'), ((4040, 4063), 'random.sample', 'random.sample', (['res', 'num'], {}), '(res, num)\n', (4053, 4063), False, 'import random\n')] |
import numpy as np
import pandas as pd
import sqlite3
import datetime as dt
from bs4 import BeautifulSoup as BS
from os.path import basename
import time
import requests
import csv
import re
import pickle
def name_location_scrapper(url): # scrapes a list of teams and their urls
r = requests.get(url)
soup = BS(r.content,'html.parser')
tables = soup.find_all('table')
table_body = tables[0].find_all('tbody')
body_tds = table_body[0].find_all('td',attrs={'data-stat':'squad'})
team_link = []
team_name = []
for row in body_tds:
teams = row.find_all('a')
for team in teams:
team_link.append(team['href'])
team_name.append(team.text)
return team_link, team_name
def epl_link_cleaner(lst_of_team_urls,team_name): # reworks epl team website endings to complete urls
team_urls = [x.split('/') for x in lst_of_team_urls]
team_links = ['https://fbref.com/en/squads/'+x[3]+'/history/'+y+'-Stats-and-History'
for x,y in zip(team_urls,team_name)]
return team_links
def pickler(input, output): # pickles variables needing saving
with open(output, 'wb') as f:
pickle.dump(input,f,pickle.HIGHEST_PROTOCOL)
def unpickler(file): # unpickles those variables
with open(file, 'rb') as f:
return pickle.load(f)
def team_domestic_league_df_creator(lst): # starts epl team statistics tables
url = lst[0]
r = requests.get(url)
soup = BS(r.content,'html.parser')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
tabs = tables[:3]
df_dict = {}
for table in range(len(tabs)):
bodys = tabs[table].find_all('tbody')
heads = tabs[table].find_all('thead')
for head in heads:
hds = head.find_all('th')
cols = [hd.text for hd in hds[1:]]
rows = bodys[0].find_all('tr')
data = []
seasons = []
for row in rows:
row_tds = row.find_all('td')
yrs = row.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
r = [rtd.text for rtd in row_tds]
data.append(r)
seasons.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = seasons
df_dict[table] = df
pickler(df_dict,'df_dict.pickle')
return df_dict
def team_df_appender(lst,dom_df,icup_df,dcup_df): # appends epl team by team statistics to the table created above
for site in lst[1:]:
url = site
r = requests.get(url)
print(url)
soup = BS(r.content,'lxml')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
df_dict = {}
caption_text = []
for tab in tables:
cap = tab.select('caption')
for c in cap:
caption_text.append(c.get_text(strip=True))
for tabs,caps in zip(range(len(tables)),caption_text):
df_dict[caps] = tables[tabs]
for table_name in df_dict.keys():
bodys = df_dict[table_name].find_all('tbody')
heads = df_dict[table_name].find_all('thead')
for head in heads:
hds = head.find_all('th')
cols = [hd.text for hd in hds[1:]]
rows = bodys[0].find_all('tr')
seasons = []
data = []
for row in rows:
row_tds = row.find_all('td')
yrs = row.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
r = [rtd.text for rtd in row_tds]
data.append(r)
seasons.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = seasons
if table_name == 'Domestic Leagues Results Table':
try:
dom_df = pd.concat([dom_df,df],axis=0,join='outer')
dom_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df_working.pickle' # saves progress in case of
# connection issues
pickler(dom_df,dom_file)
except:
print(f'{url} dom_league passed!! Try again')
elif table_name == 'International Cup Results Table':
try:
icup_df = pd.concat([icup_df,df],axis=0,join='outer')
icup_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_intnl_cup_df_working.pickle'
pickler(icup_df,icup_file)
except:
print(f'{url} icup passed!! Try again')
elif table_name == 'Domestic Cup Results Table':
try:
dcup_df = pd.concat([dcup_df,df],axis=0,join='outer')
dcup_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_cup_df_working.pickle'
pickler(dcup_df,dcup_file)
except:
print(f'{url} dcup passed!! Try again')
wait = np.random.randint(5,size=1)
time.sleep(wait)
return dom_df, icup_df, dcup_df
def nfl_team_df_creator(lst): # starts nfl team statistics table
url = lst[0]
r = requests.get(url)
soup = BS(r.content,'html.parser')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
tbod = soup.find_all('tbody')
thead = soup.find_all('thead')
head_rows = thead[0].find_all('tr')
for hr in head_rows:
hds = hr.find_all('th')
cols = [hd.text for hd in hds[1:]]
trows = tbod[0].find_all('tr')
data = []
y_played = []
for tr in trows[:22]: # takes table rows 2020 - 2002
tds = tr.find_all('td')
yrs = tr.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
row = [td_.text for td_ in tds]
data.append(row)
y_played.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = y_played
return df
def nfl_df_appender(df_to_append,lst): # appends nfl team by team statistics to the nfl table created
for site in lst[1:]:
url = site
print(url)
r = requests.get(url)
soup = BS(r.content,'html.parser')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
tbod = soup.find_all('tbody')
thead = soup.find_all('thead')
head_rows = thead[0].find_all('tr')
for hr in head_rows:
hds = hr.find_all('th')
cols = [hd.text for hd in hds[1:]]
trows = tbod[0].find_all('tr')
data = []
y_played = []
for tr in trows[:22]:
tds = tr.find_all('td')
yrs = tr.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
row = [td_.text for td_ in tds]
data.append(row)
y_played.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = y_played
df_to_append = df_to_append.append(df)
pickler(df_to_append,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/working_nfl_df.pickle')
wait = np.random.randint(5,size=1)
time.sleep(wait)
return df_to_append
if __name__ == '__main__':
team_link, team_name = name_location_scrapper('https://fbref.com/en/players/') # creates epl team url locations
team_links = epl_link_cleaner(team_link,team_name) # cleans url locations to connectable website pages
pickler(team_links,'team_links.pickle') # saves the names and urls of epl teams
team_urls = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data//pickles/epl_team_links.pickle')
team_urls = [x.replace(' ','-') for x in team_urls] # fixes an issue with teams with 2 names to replace the space between names with a dash
team_start_df = team_domestic_league_df_creator(team_urls) # starts epl team stats table
team_starter_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_df_dict.pickle')
domestic_df = team_starter_df[0] # creates a variable for one of the 3 tables scraped for each team
intnl_cup_df = team_starter_df[1] # creates a variable for one of the 3 tables scraped for each team
dom_cup_df = team_starter_df[2] # creates a variable for one of the 3 tables scraped for each team
dom_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df.pickle' # saves the epl team domestic league stats table
d_lg_df = unpickler(dom_file)
int_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_intnl_cup_df.pickle' # saves the epl team international cup stats table
i_cp_df = unpickler(int_file)
domcup_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_cup_df.pickle' # saves the epl team domestic cup stats table
d_cp_df = unpickler(domcup_file)
domestic_df, intnl_cup_df, dom_cup_df = team_df_appender(lst=team_urls,dom_df=domestic_df,icup_df=intnl_cup_df,dcup_df=dom_cup_df) # fills out the 3 started tables
dom_full = r'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df_full.pickle'
icup_full = r'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_intnl_cup_df_full.pickle'
dcup_full = r'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_cup_df_full.pickle'
pickler(domestic_df,dom_full)
pickler(intnl_cup_df,icup_full)
pickler(dom_cup_df,dcup_full)
nfl_team_urls = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_team_links.pickle') # starts nfl team stats table
nfl_start_df = nfl_team_df_creator(nfl_team_urls)
pickler(nfl_start_df,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_start_df.pickle')
nfl_start_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_start_df.pickle') # finish nfl team stats table
nfl_df = nfl_df_appender(nfl_start_df,nfl_team_urls)
pickler(nfl_start_df,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_df.pickle') | [
"pickle.dump",
"pickle.load",
"requests.get",
"time.sleep",
"bs4.BeautifulSoup",
"numpy.random.randint",
"os.path.basename",
"pandas.DataFrame",
"pandas.concat"
] | [((295, 312), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (307, 312), False, 'import requests\n'), ((324, 352), 'bs4.BeautifulSoup', 'BS', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (326, 352), True, 'from bs4 import BeautifulSoup as BS\n'), ((1488, 1505), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1500, 1505), False, 'import requests\n'), ((1517, 1545), 'bs4.BeautifulSoup', 'BS', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (1519, 1545), True, 'from bs4 import BeautifulSoup as BS\n'), ((5850, 5867), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5862, 5867), False, 'import requests\n'), ((5879, 5907), 'bs4.BeautifulSoup', 'BS', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (5881, 5907), True, 'from bs4 import BeautifulSoup as BS\n'), ((6701, 6733), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (6713, 6733), True, 'import pandas as pd\n'), ((1198, 1244), 'pickle.dump', 'pickle.dump', (['input', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(input, f, pickle.HIGHEST_PROTOCOL)\n', (1209, 1244), False, 'import pickle\n'), ((1359, 1373), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1370, 1373), False, 'import pickle\n'), ((2405, 2437), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (2417, 2437), True, 'import pandas as pd\n'), ((2738, 2755), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2750, 2755), False, 'import requests\n'), ((2790, 2811), 'bs4.BeautifulSoup', 'BS', (['r.content', '"""lxml"""'], {}), "(r.content, 'lxml')\n", (2792, 2811), True, 'from bs4 import BeautifulSoup as BS\n'), ((5651, 5679), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(1)'}), '(5, size=1)\n', (5668, 5679), True, 'import numpy as np\n'), ((5687, 5703), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (5697, 5703), False, 'import time\n'), ((6965, 6982), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6977, 6982), False, 'import requests\n'), ((6998, 7026), 'bs4.BeautifulSoup', 'BS', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (7000, 7026), True, 'from bs4 import BeautifulSoup as BS\n'), ((7862, 7894), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (7874, 7894), True, 'import pandas as pd\n'), ((8119, 8147), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(1)'}), '(5, size=1)\n', (8136, 8147), True, 'import numpy as np\n'), ((8155, 8171), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (8165, 8171), False, 'import time\n'), ((1651, 1670), 'os.path.basename', 'basename', (['badge_pic'], {}), '(badge_pic)\n', (1659, 1670), False, 'from os.path import basename\n'), ((4043, 4075), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (4055, 4075), True, 'import pandas as pd\n'), ((6013, 6032), 'os.path.basename', 'basename', (['badge_pic'], {}), '(badge_pic)\n', (6021, 6032), False, 'from os.path import basename\n'), ((1699, 1722), 'requests.get', 'requests.get', (['badge_pic'], {}), '(badge_pic)\n', (1711, 1722), False, 'import requests\n'), ((2929, 2948), 'os.path.basename', 'basename', (['badge_pic'], {}), '(badge_pic)\n', (2937, 2948), False, 'from os.path import basename\n'), ((6061, 6084), 'requests.get', 'requests.get', (['badge_pic'], {}), '(badge_pic)\n', (6073, 6084), False, 'import requests\n'), ((7144, 7163), 'os.path.basename', 'basename', (['badge_pic'], {}), '(badge_pic)\n', (7152, 7163), False, 'from os.path import basename\n'), ((2981, 3004), 'requests.get', 'requests.get', (['badge_pic'], {}), '(badge_pic)\n', (2993, 3004), False, 'import requests\n'), ((4221, 4266), 'pandas.concat', 'pd.concat', (['[dom_df, df]'], {'axis': '(0)', 'join': '"""outer"""'}), "([dom_df, df], axis=0, join='outer')\n", (4230, 4266), True, 'import pandas as pd\n'), ((7196, 7219), 'requests.get', 'requests.get', (['badge_pic'], {}), '(badge_pic)\n', (7208, 7219), False, 'import requests\n'), ((4881, 4927), 'pandas.concat', 'pd.concat', (['[icup_df, df]'], {'axis': '(0)', 'join': '"""outer"""'}), "([icup_df, df], axis=0, join='outer')\n", (4890, 4927), True, 'import pandas as pd\n'), ((5313, 5359), 'pandas.concat', 'pd.concat', (['[dcup_df, df]'], {'axis': '(0)', 'join': '"""outer"""'}), "([dcup_df, df], axis=0, join='outer')\n", (5322, 5359), True, 'import pandas as pd\n')] |
#python routines for estimating energy resolution and intensity
#<NAME>
#Updated 2-19-2013 to include tube efficiency
import sys
#sys.path.append('/SNS/users/19g/SEQUOIA/commissioning/python')
from unit_convert import E2V,E2K
import numpy as np
from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros
from slit_pack import Slit_pack
from scipy.interpolate import interp1d
from pylab import figure, plot, subplot, show, xlabel, ylabel, title
from UB import Bmat_gen,gen_rec_latt,Bmat
class Chopper_spec(object):
"""
class to define a chopper spectrometer
w is the width fo the detector in m
h is the height of the detector in m
L is a three vector all in m
L[0]= moderator to chopper distance
L[1]= chopper to sample distance
L[3]= sample to detector distance
slit_pack is and instance of the slit_pack class
sw is the sample width in m
sh is the sample height in m
He_press is the He pressure in ATM
mod_file is a moderator file provided by the neutronics group
"""
def __init__(self,instr_name,L,slit_pack,hphilims,vphilims,w=0.0254,h=0.01,sw=0.05,sh=0.05,He_press=10,He_T=300.0,mod_file='source_sct521_bu_17_1.dat'):
self.instr_name=instr_name
self.L=L
self.w=w
self.h=h
self.sw=sw
self.sh=sh
self.hphilims=hphilims
self.vphilims=vphilims
self.slit_pack=slit_pack
self.He_press=He_press
self.He_T=He_T
self.pix_vol=pi*self.w*self.w/4.0*self.h
self.num_density=self.He_press*7.336e26/self.He_T
self.mod_file=mod_file
self.I_func=read_mod_file(mod_file)
def domega_in(self,Ei,Ef,nu):
"""
"""
dtm=H2Omod_dt(Ei)
dtc=self.slit_pack.Fermi_dt(nu)
dtd=det_dt(self.w,Ef)
return domega(Ei,Ef,self.L,dtm,dtc,dtd)
def dE(self,Ei,nu):
"""
"""
vi=E2V(Ei)
dtc=self.slit_pack.Fermi_dt(nu)
return 2*5.227e-6*vi**3.0*dtc/self.L[0]
def flux(self,filename,Ei,Ef,nu,He_flag=0):
"calculate the flux on sample given an Ei and and chopper frequency"
v=E2V(Ei)
#print v
#v0=self.slit_pack.v0(nu)
#print v0
T=self.slit_pack.Fermi_T(nu,v)
#I_func=read_mod_file(filename)
I_return=self.I_func(Ei/1000.0)*self.dE(Ei,nu)*self.sw*self.sh/((self.L[0]+self.L[1])**2.0)*T
if He_flag:
I_return=I_return*(1-exp(-1.1734E-21/(E2V(Ef))*self.num_density*self.w))
return I_return
def domega(Ei,Ef,L,dtm,dtc,dtd):
"""
provides the energy resolution of a direct chopper spectrometer given
Ei: incident energy in meV
Ef: fineal energy in meV
A three element tuple
L[0]= moderator to chopper distance
L[1]= chopper to sample distance
L[3]= sample to detector distance
dtm = moderator pulse width (s)
dtc= chopper pulse width (s)
dtd= detector time uncertainty (s)
"""
mn=1.674e-5/1.602 #mass of the neutron in meV
vi=E2V(Ei)
vf=E2V(Ef)
return mn*sqrt(((vi**3.0)/L[0]+(vf**3.0)*L[1]/L[0]/L[2])**2.0*(dtm**2.0)+((vi**3.0)/L[0]+(vf**3.0)*(L[1]+L[0])/L[0]/L[2])**2.0*(dtc**2.0)+((vf**3.0)/L[2])**2.0*(dtd)**2.0)
def interpmod_dt(Ein, filename):
"""
returns the FWHM interpolated from a moderator file
"""
E, flux, dt = read_file(filename)
return np.interp(Ein, np.array(E)*1000, np.array(dt))*1e-6
def H2Omod_dt(E):
"""
returns the time width of the neutron distribution as a function of energy
E(meV)
"""
E=E
x=log(E)
p=[-0.4494,-0.046,4.3672,0.8530,3.7389,0.1271]
y=exp(m1tanhm2(x,p))
return y*1e-6
def det_dt(w,Ef):
"""
w is the detector width in (m)
Ef is the final energy in meV
"""
return w/E2V(Ef)
def m1tanhm2(x,p):
"""
"""
m=[p[0],p[1]]
#m[0]=p[0]
#m[1]=p[1]
x0=p[2]
w=p[3]
y0=p[4]
A=p[5]
return (1+tanh((x-x0)/w))/2.0*m[0]*x+(1-tanh((x-x0)/w))/2.0*m[1]*x+y0+A*tanh((x-x0)/w)
def read_file(filename):
"""
read the moderator file and return 3 lists
E(eV), flux(n/pulse/sr/eV) and dt (mus)
"""
with open(filename) as fid:
dattmp = fid.readlines()
idx = 0
E = []
flux = []
dt = []
while '#' in dattmp[idx]:
idx += 1
while not ('#' in dattmp[idx]):
# print dattmp[idx]
tmp1 = dattmp[idx].split()
if len(tmp1) > 0:
E.append(eval(tmp1[0]))
flux.append(eval(tmp1[2]))
dt.append(float(tmp1[8]))
idx += 1
return E, flux, dt
def read_mod_file(filename):
"""
a function to read a moderator file from the neutronics group
"""
with open(filename) as fid:
dattmp = fid.readlines()
idx = 0
E = []
flux = []
while '#' in dattmp[idx]:
idx += 1
while not ('#' in dattmp[idx]):
# print dattmp[idx]
tmp1 = dattmp[idx].split()
if len(tmp1) > 0:
E.append(eval(tmp1[0]))
flux.append(eval(tmp1[2]))
idx += 1
flux_func = interp1d(E, flux, kind='linear')
return flux_func
def plot_flux(nu,Ei,Ef,Spec,He_flag=1):
"""
plot_flux(nu,Ei,Ef,Spec)
give a range of chopper frequencies (nu) (Hz)
an incident energy Ei (meV) and a final energy Ef (meV)
and an instance of a spectrometer class (examples are given in the bottome of this file)
plot a number proportional to flux and the resolution as a function of chopper frequency
"""
dw=Spec.domega_in(Ei,Ef,nu)
I=zeros(len(nu))
#for idx in range(len(nu)):
# I.append(Spec.flux('source_sct521_bu_17_1.dat',Ei,Ef,nu[idx],He_flag=He_flag))
I=Spec.flux('source_sct521_bu_17_1.dat',Ei,Ef,nu,He_flag=He_flag)
figure()
subplot(2,1,1)
plot(nu,I,'bo')
ylabel('I (arb. units)')
title('$E_i$ = %g (meV),$E_f$ = %g (meV), $\hbar\omega$ = %g (meV)\n Slit_Pack:%s '%(Ei,Ef,Ei-Ef,Spec.slit_pack.name))
subplot(2,1,2)
plot(nu,dw,'bo')
ylabel('$d(\hbar\omega)$ (meV)')
xlabel('$\\nu$ (Hz)')
show()
def plot_res_omega(nu,Ei,omega,Spec):
"""
plot_res_omega(nu,Ei,omega,Spec)
Plot the energy resolution as a function of omega in meV
nu is the Fermi chopper speed
Ei is the incident energy
omega is a numpy array of energy transfers
Spec is one of the spectrometers defined at the end of the file.
"""
Ef=Ei-omega
dw=Spec.domega_in(Ei,Ef,nu)
figure()
plot(omega,dw,'bo')
ylabel('$d(\hbar\omega)$ (meV)')
xlabel('$\hbar\omega$ (meV)')
title('$E_i$ = %d meV $\\nu$ = %d Hz \n SlitPack:%s'% (Ei,nu,Spec.slit_pack.name))
show()
return [omega,dw]
def plot_qrange(Ei, wmin,spec,UB=[[1,0,0],[0,1,0],[0,0,1]]):
"""
plot_qrange(Ei, wmin,spec,UB)
given an Ei and a minimum energy transfer (wmin) for a given spectrometer (spec) and with a
crystal parameters and orientation defined by UB, plot the Q ranges accesible by the instrument
predefined values for several chopper spectrometers are given at the end of this file.
"""
ki=E2K(Ei)
omega=linspace(wmin,Ei*0.9,100)
Ef=Ei-omega
kf=E2K(Ef)
hphilims=radians(spec.hphilims)
vphilims=radians(spec.vphilims)
Qxmax=-kf*sin(hphilims[1])
Qxmin=-kf*sin(hphilims[0])
Qxmin2=-kf*sin(hphilims[2])
Qxmax2=-kf*sin(hphilims[3])
Qymax=-kf*sin(vphilims[1])
Qymin=-kf*sin(vphilims[0])
Qymin2=-kf*sin(vphilims[2])
Qymax2=-kf*sin(vphilims[3])
Qzmax=ki-kf*cos(hphilims[1])
Qzmin=ki-kf*cos(hphilims[0])
Qzmax2=ki-kf*cos(hphilims[3])
Qzmin2=ki-kf*cos(hphilims[2])
Qmins=array([Qxmin,Qymin,Qzmin])
Qmins2=array([Qxmin2,Qymin2,Qzmin2])
Qmaxs=array([Qxmax,Qymax,Qzmax])
Qmaxs2=array([Qxmax2,Qymax2,Qzmax2])
hklmins=dot(UB,Qmins)
hklmaxs=dot(UB,Qmaxs)
hklmins2=dot(UB,Qmins2)
hklmaxs2=dot(UB,Qmaxs2)
figure()
hold('on')
xlbs=['$Q_x$','$Q_y$','$Q_z$']
for idx in range(3):
subplot(2,2,idx+1)
plot_qlims(hklmins,hklmaxs,hklmins2,hklmaxs2,omega,idx)
xlabel(xlbs[idx])
subplot(2,2,4)
abs_tt=abs(array(hphilims))
tthetamin=min(abs_tt)
tthetamax=max(abs_tt)
Qmin=sqrt(ki*ki+kf*kf-2.*ki*kf*cos(tthetamin))
Qmax=sqrt(ki*ki+kf*kf-2.*ki*kf*cos(tthetamax))
plot(Qmin,omega,'r')
plot(Qmax,omega,'b')
xlabel('|Q|')
ylabel('$\omega$')
show()
def plot_qlims(mins,maxs,mins2,maxs2,omega,idx):
"""
"""
plot(mins[idx,:],omega,'b')
plot(maxs[idx,:],omega,'b')
plot(mins2[idx,:],omega,'r')
plot(maxs2[idx,:],omega,'r')
ylabel('$\omega$')
#define default slit packages
SEQ_100=Slit_pack(0.00203,0.58,'SEQ-100-2.03-AST')
SEQ_700=Slit_pack(0.00356,1.53,'SEQ-700-3.56-AST')
ARCS_100=Slit_pack(0.00152,0.58,'ARCS 100')
ARCS_300=Slit_pack(0.00305,1.00,'ARCS 300')
ARCS_700_2=Slit_pack(0.00152,1.53,'ARCS 700 2')
ARCS_700_3=Slit_pack(0.00356,1.53,'ARCS 700 3')
ARCS_700_sf=Slit_pack(0.0005,1.53,'ARCS-700-0.5-AST')
SEQ_1000=Slit_pack(0.0015,1.83,'SEQ-1000-1.5-AST')
SEQUOIA=Chopper_spec('SEQUOIA',[18.0,2.0,5.5],SEQ_100,[2.1,60.0,-5.3,-30.0],[6.7,18.0,-7.5,-18.0])
SEQUOIA_sloppy=Chopper_spec('SEQUOIA',[18.0,2.0,5.5],SEQ_700,[2.1,60.0,-5.3,-30.0],[6.7,18.0,-7.5,-18.0])
SEQUOIA_700_superfine=Chopper_spec('SEQUOIA',[18.0,2.0,5.5],ARCS_700_sf,[2.1,60.0,-5.3,-30.0],[6.7,18.0,-7.5,-18.0])
SEQUOIA_1000=Chopper_spec('SEQUOIA',[18.0,2.0,5.5],SEQ_1000,[2.1,60.0,-5.3,-30.0],[6.7,18.0,-7.5,-18.0])
ARCS=Chopper_spec('ARCS',[11.6,2.0,3.0],ARCS_100,[2.1,90.0,-5.3,-30.0],[6.7,30.0,-7.5,-30.0])
ARCS_700_fine=Chopper_spec('ARCS',[11.6,2.0,3.0],ARCS_700_2,[2.1,90.0,-5.3,-30.0],[6.7,30.0,-7.5,-30.0])
ARCS_700_sloppy=Chopper_spec('ARCS',[11.6,2.0,3.0],ARCS_700_3,[2.1,90.0,-5.3,-30.0],[6.7,30.0,-7.5,-30.0])
ARCS_700_superfine=Chopper_spec('ARCS',[11.6,2.0,3.0],ARCS_700_sf,[2.1,90.0,-5.3,-30.0],[6.7,30.0,-7.5,-30.0])
| [
"pylab.title",
"numpy.radians",
"numpy.sqrt",
"pylab.subplot",
"pylab.plot",
"numpy.log",
"pylab.xlabel",
"unit_convert.E2V",
"numpy.tanh",
"scipy.interpolate.interp1d",
"pylab.figure",
"unit_convert.E2K",
"numpy.linspace",
"numpy.array",
"pylab.ylabel",
"slit_pack.Slit_pack",
"pylab... | [((8689, 8733), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.00203)', '(0.58)', '"""SEQ-100-2.03-AST"""'], {}), "(0.00203, 0.58, 'SEQ-100-2.03-AST')\n", (8698, 8733), False, 'from slit_pack import Slit_pack\n'), ((8740, 8784), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.00356)', '(1.53)', '"""SEQ-700-3.56-AST"""'], {}), "(0.00356, 1.53, 'SEQ-700-3.56-AST')\n", (8749, 8784), False, 'from slit_pack import Slit_pack\n'), ((8792, 8828), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.00152)', '(0.58)', '"""ARCS 100"""'], {}), "(0.00152, 0.58, 'ARCS 100')\n", (8801, 8828), False, 'from slit_pack import Slit_pack\n'), ((8836, 8871), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.00305)', '(1.0)', '"""ARCS 300"""'], {}), "(0.00305, 1.0, 'ARCS 300')\n", (8845, 8871), False, 'from slit_pack import Slit_pack\n'), ((8882, 8920), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.00152)', '(1.53)', '"""ARCS 700 2"""'], {}), "(0.00152, 1.53, 'ARCS 700 2')\n", (8891, 8920), False, 'from slit_pack import Slit_pack\n'), ((8930, 8968), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.00356)', '(1.53)', '"""ARCS 700 3"""'], {}), "(0.00356, 1.53, 'ARCS 700 3')\n", (8939, 8968), False, 'from slit_pack import Slit_pack\n'), ((8979, 9022), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.0005)', '(1.53)', '"""ARCS-700-0.5-AST"""'], {}), "(0.0005, 1.53, 'ARCS-700-0.5-AST')\n", (8988, 9022), False, 'from slit_pack import Slit_pack\n'), ((9030, 9073), 'slit_pack.Slit_pack', 'Slit_pack', (['(0.0015)', '(1.83)', '"""SEQ-1000-1.5-AST"""'], {}), "(0.0015, 1.83, 'SEQ-1000-1.5-AST')\n", (9039, 9073), False, 'from slit_pack import Slit_pack\n'), ((3083, 3090), 'unit_convert.E2V', 'E2V', (['Ei'], {}), '(Ei)\n', (3086, 3090), False, 'from unit_convert import E2V, E2K\n'), ((3099, 3106), 'unit_convert.E2V', 'E2V', (['Ef'], {}), '(Ef)\n', (3102, 3106), False, 'from unit_convert import E2V, E2K\n'), ((3632, 3638), 'numpy.log', 'log', (['E'], {}), '(E)\n', (3635, 3638), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((5145, 5177), 'scipy.interpolate.interp1d', 'interp1d', (['E', 'flux'], {'kind': '"""linear"""'}), "(E, flux, kind='linear')\n", (5153, 5177), False, 'from scipy.interpolate import interp1d\n'), ((5843, 5851), 'pylab.figure', 'figure', ([], {}), '()\n', (5849, 5851), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((5857, 5873), 'pylab.subplot', 'subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5864, 5873), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((5877, 5894), 'pylab.plot', 'plot', (['nu', 'I', '"""bo"""'], {}), "(nu, I, 'bo')\n", (5881, 5894), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((5898, 5922), 'pylab.ylabel', 'ylabel', (['"""I (arb. units)"""'], {}), "('I (arb. units)')\n", (5904, 5922), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((5928, 6068), 'pylab.title', 'title', (['("""$E_i$ = %g (meV),$E_f$ = %g (meV), $\\\\hbar\\\\omega$ = %g (meV)\n Slit_Pack:%s """\n % (Ei, Ef, Ei - Ef, Spec.slit_pack.name))'], {}), '(\n """$E_i$ = %g (meV),$E_f$ = %g (meV), $\\\\hbar\\\\omega$ = %g (meV)\n Slit_Pack:%s """\n % (Ei, Ef, Ei - Ef, Spec.slit_pack.name))\n', (5933, 6068), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6052, 6068), 'pylab.subplot', 'subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6059, 6068), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6072, 6090), 'pylab.plot', 'plot', (['nu', 'dw', '"""bo"""'], {}), "(nu, dw, 'bo')\n", (6076, 6090), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6094, 6128), 'pylab.ylabel', 'ylabel', (['"""$d(\\\\hbar\\\\omega)$ (meV)"""'], {}), "('$d(\\\\hbar\\\\omega)$ (meV)')\n", (6100, 6128), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6132, 6153), 'pylab.xlabel', 'xlabel', (['"""$\\\\nu$ (Hz)"""'], {}), "('$\\\\nu$ (Hz)')\n", (6138, 6153), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6159, 6165), 'pylab.show', 'show', ([], {}), '()\n', (6163, 6165), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6547, 6555), 'pylab.figure', 'figure', ([], {}), '()\n', (6553, 6555), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6560, 6581), 'pylab.plot', 'plot', (['omega', 'dw', '"""bo"""'], {}), "(omega, dw, 'bo')\n", (6564, 6581), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6584, 6618), 'pylab.ylabel', 'ylabel', (['"""$d(\\\\hbar\\\\omega)$ (meV)"""'], {}), "('$d(\\\\hbar\\\\omega)$ (meV)')\n", (6590, 6618), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6621, 6652), 'pylab.xlabel', 'xlabel', (['"""$\\\\hbar\\\\omega$ (meV)"""'], {}), "('$\\\\hbar\\\\omega$ (meV)')\n", (6627, 6652), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6655, 6748), 'pylab.title', 'title', (['("""$E_i$ = %d meV $\\\\nu$ = %d Hz \n SlitPack:%s""" % (Ei, nu, Spec.\n slit_pack.name))'], {}), '("""$E_i$ = %d meV $\\\\nu$ = %d Hz \n SlitPack:%s""" % (Ei, nu, Spec.\n slit_pack.name))\n', (6660, 6748), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((6742, 6748), 'pylab.show', 'show', ([], {}), '()\n', (6746, 6748), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((7174, 7181), 'unit_convert.E2K', 'E2K', (['Ei'], {}), '(Ei)\n', (7177, 7181), False, 'from unit_convert import E2V, E2K\n'), ((7191, 7220), 'numpy.linspace', 'linspace', (['wmin', '(Ei * 0.9)', '(100)'], {}), '(wmin, Ei * 0.9, 100)\n', (7199, 7220), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((7238, 7245), 'unit_convert.E2K', 'E2K', (['Ef'], {}), '(Ef)\n', (7241, 7245), False, 'from unit_convert import E2V, E2K\n'), ((7258, 7280), 'numpy.radians', 'radians', (['spec.hphilims'], {}), '(spec.hphilims)\n', (7265, 7280), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((7293, 7315), 'numpy.radians', 'radians', (['spec.vphilims'], {}), '(spec.vphilims)\n', (7300, 7315), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((7949, 7957), 'pylab.figure', 'figure', ([], {}), '()\n', (7955, 7957), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8141, 8157), 'pylab.subplot', 'subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (8148, 8157), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8340, 8362), 'pylab.plot', 'plot', (['Qmin', 'omega', '"""r"""'], {}), "(Qmin, omega, 'r')\n", (8344, 8362), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8364, 8386), 'pylab.plot', 'plot', (['Qmax', 'omega', '"""b"""'], {}), "(Qmax, omega, 'b')\n", (8368, 8386), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8388, 8401), 'pylab.xlabel', 'xlabel', (['"""|Q|"""'], {}), "('|Q|')\n", (8394, 8401), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8405, 8424), 'pylab.ylabel', 'ylabel', (['"""$\\\\omega$"""'], {}), "('$\\\\omega$')\n", (8411, 8424), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8427, 8433), 'pylab.show', 'show', ([], {}), '()\n', (8431, 8433), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8501, 8531), 'pylab.plot', 'plot', (['mins[idx, :]', 'omega', '"""b"""'], {}), "(mins[idx, :], omega, 'b')\n", (8505, 8531), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8532, 8562), 'pylab.plot', 'plot', (['maxs[idx, :]', 'omega', '"""b"""'], {}), "(maxs[idx, :], omega, 'b')\n", (8536, 8562), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8563, 8594), 'pylab.plot', 'plot', (['mins2[idx, :]', 'omega', '"""r"""'], {}), "(mins2[idx, :], omega, 'r')\n", (8567, 8594), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8595, 8626), 'pylab.plot', 'plot', (['maxs2[idx, :]', 'omega', '"""r"""'], {}), "(maxs2[idx, :], omega, 'r')\n", (8599, 8626), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8627, 8646), 'pylab.ylabel', 'ylabel', (['"""$\\\\omega$"""'], {}), "('$\\\\omega$')\n", (8633, 8646), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((1958, 1965), 'unit_convert.E2V', 'E2V', (['Ei'], {}), '(Ei)\n', (1961, 1965), False, 'from unit_convert import E2V, E2K\n'), ((2194, 2201), 'unit_convert.E2V', 'E2V', (['Ei'], {}), '(Ei)\n', (2197, 2201), False, 'from unit_convert import E2V, E2K\n'), ((3122, 3333), 'numpy.sqrt', 'sqrt', (['((vi ** 3.0 / L[0] + vf ** 3.0 * L[1] / L[0] / L[2]) ** 2.0 * dtm ** 2.0 + \n (vi ** 3.0 / L[0] + vf ** 3.0 * (L[1] + L[0]) / L[0] / L[2]) ** 2.0 * \n dtc ** 2.0 + (vf ** 3.0 / L[2]) ** 2.0 * dtd ** 2.0)'], {}), '((vi ** 3.0 / L[0] + vf ** 3.0 * L[1] / L[0] / L[2]) ** 2.0 * dtm ** \n 2.0 + (vi ** 3.0 / L[0] + vf ** 3.0 * (L[1] + L[0]) / L[0] / L[2]) ** \n 2.0 * dtc ** 2.0 + (vf ** 3.0 / L[2]) ** 2.0 * dtd ** 2.0)\n', (3126, 3333), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((3850, 3857), 'unit_convert.E2V', 'E2V', (['Ef'], {}), '(Ef)\n', (3853, 3857), False, 'from unit_convert import E2V, E2K\n'), ((8035, 8057), 'pylab.subplot', 'subplot', (['(2)', '(2)', '(idx + 1)'], {}), '(2, 2, idx + 1)\n', (8042, 8057), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((8120, 8137), 'pylab.xlabel', 'xlabel', (['xlbs[idx]'], {}), '(xlbs[idx])\n', (8126, 8137), False, 'from pylab import figure, plot, subplot, show, xlabel, ylabel, title\n'), ((3473, 3485), 'numpy.array', 'np.array', (['dt'], {}), '(dt)\n', (3481, 3485), True, 'import numpy as np\n'), ((4063, 4081), 'numpy.tanh', 'tanh', (['((x - x0) / w)'], {}), '((x - x0) / w)\n', (4067, 4081), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((3455, 3466), 'numpy.array', 'np.array', (['E'], {}), '(E)\n', (3463, 3466), True, 'import numpy as np\n'), ((4001, 4019), 'numpy.tanh', 'tanh', (['((x - x0) / w)'], {}), '((x - x0) / w)\n', (4005, 4019), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((4031, 4049), 'numpy.tanh', 'tanh', (['((x - x0) / w)'], {}), '((x - x0) / w)\n', (4035, 4049), False, 'from numpy import pi, log, exp, sqrt, tanh, linspace, radians, zeros\n'), ((2518, 2525), 'unit_convert.E2V', 'E2V', (['Ef'], {}), '(Ef)\n', (2521, 2525), False, 'from unit_convert import E2V, E2K\n')] |
from dna_features_viewer import BiopythonTranslator
import numpy as np
from copy import deepcopy
from Bio import SeqIO
import flametree
import matplotlib.pyplot as plt
from geneblocks import DiffBlocks
from .biotools import (
annotate_record,
sequence_to_biopython_record,
sequences_differences_segments,
crop_record,
)
def new_sequence_from_cutting_solution(solution, sequence):
"""Return a new sequence will all mutations in the cutting solution.
Input sequence and returned sequences are ATGC strings.
"""
new_sequence = np.array(list(sequence))
for o in solution:
if o.get("n_mutations", 0) == 0:
continue
start, mutated_seq = o["mutated_region"]
end = start + len(mutated_seq)
new_sequence[start:end] = np.array(list(mutated_seq))
return "".join(new_sequence)
def write_report_for_cutting_solution(
solution, target, sequence, left_flank="", right_flank="", display_positions=False,
):
"""Write a complete report for Type IIS arbitrary sequence assembly.
Parameters
-----------
solution
The solution returned by an OverhangsSelector's ``cut_sequence`` method.
target
Either a path to a folder, a zip, or "@memory" to return raw ZIP file
data instead of writing files. If ``target`` poitns to an exsisting
folder/zip, it will be completely overwritten.
sequence
Sequence to be cut (can be a record)
left_flank
Left flank to be added to every fragment
right_flank
Right flank to be added to every fragment
display_positions
If True, the exact coordinate of each cut will be reported in the plot.
"""
root = flametree.file_tree(target, replace=True)
if isinstance(left_flank, str):
left_flank = sequence_to_biopython_record(left_flank)
annotate_record(left_flank, label="left_flank")
if isinstance(right_flank, str):
right_flank = sequence_to_biopython_record(right_flank)
annotate_record(right_flank, label="right_flank")
if hasattr(sequence, "seq"):
record = sequence
sequence = str(record.seq)
else:
record = sequence_to_biopython_record(sequence)
# COMPUTE THE EDITED SEQUENCE (MAY BE EQUAL TO ORIGINAL IF NO EDITS)
new_sequence = new_sequence_from_cutting_solution(solution, sequence)
edited_segments = sequences_differences_segments(sequence, new_sequence)
blocks = DiffBlocks.from_sequences(sequence, new_sequence).merged()
if hasattr(sequence, "features"):
ax, _ = blocks.plot(separate_axes=True)
else:
ax = blocks.plot(separate_axes=False)
ax.set_title("Edits in new sequence vs. original")
ax.figure.savefig(
root._file("edits.pdf").open("wb"), format="pdf", bbox_inches="tight"
)
plt.close(ax.figure)
# PLOT SUMMARY FIGURE
plot_record = sequence_to_biopython_record(sequence)
display_positions = False
for o in solution:
start, end = o["location"], o["location"] + len(o["sequence"])
label = (
"%s\n(%d)" % (o["sequence"], o["location"])
if display_positions
else o["sequence"]
)
annotate_record(plot_record, (start, end, 0), label=label)
translator = BiopythonTranslator()
gr = translator.translate_record(plot_record)
ax, _ = gr.plot(with_ruler=False, figure_width=max(8, len(solution) / 2))
ax.set_title(
"Selected overhangs", loc="left", fontdict=dict(weight="bold", fontsize=13),
)
# ax.figure.set_size_inches((max(8, 0.7*len(o)), 2))
ax.set_ylim(top=ax.get_ylim()[1] + 2)
xx = [x for (a, b) in edited_segments for x in range(a, b)]
ax.plot(xx, [0 for x in xx], marker="o", c="r", lw=0, label="sequence edits")
L = len(sequence)
ax.set_xlim(-0.1 * L, 1.1 * L)
ax.legend(loc=2, fontsize=12)
locs = sorted([o["location"] for o in solution])
diffs = np.diff(locs)
text = "Segment size: %d +/- %d bp. (mean +/- 1std)" % (diffs.mean(), diffs.std(),)
ax.text(
L / 2,
-1,
text,
horizontalalignment="center",
verticalalignment="top",
fontsize=14,
)
ax.figure.savefig(
root._file("summary_plot.pdf").open("wb"), format="pdf", bbox_inches="tight",
)
plt.close(ax.figure)
# WRITE GENBANK RECORD OF FINAL SEQUENCE
report_record = deepcopy(record)
report_record.seq = sequence_to_biopython_record(new_sequence).seq
for (start, end) in edited_segments:
annotate_record(report_record, (int(start), int(end), 0), label="!edited")
for o in solution:
start = int(o["location"])
end = int(o["location"] + len(o["sequence"]))
annotate_record(report_record, (start, end, 0), label="overhang")
annotate_record(report_record, (start, end, 0), label="@DoNotModify")
SeqIO.write(report_record, root._file("final_sequence.gb"), "genbank")
# WRITE GENBANK RECORDS OF ALL FRAGMENTS
sequences = []
fragments_records_dir = root._dir("fragments_records")
overhang_length = len(solution[0]["sequence"])
if solution[0]["location"] != 0:
solution = [{"location": 0, "sequence": sequence[:overhang_length]}] + solution
if solution[-1]["location"] != L - overhang_length:
solution = solution + [
{
"location": L - overhang_length,
"sequence": sequence[L - overhang_length :],
}
]
for i, (o1, o2) in enumerate(zip(solution, solution[1:])):
seqname = "fragment_%02d" % (i + 1)
start, end = o1["location"], o2["location"] + len(o2["sequence"])
fragment = crop_record(report_record, start, end)
seqrecord = left_flank + fragment + right_flank
seqrecord.annotations["molecule_type"] = "DNA"
SeqIO.write(seqrecord, fragments_records_dir._file(seqname + ".gb"), "genbank")
sequences.append(";".join([seqname, str(seqrecord.seq)]))
root._file("fragments_sequences.csv").write("\n".join(sequences))
root._file("overhangs_list.csv").write(", ".join([o["sequence"] for o in solution]))
return root._close()
| [
"dna_features_viewer.BiopythonTranslator",
"geneblocks.DiffBlocks.from_sequences",
"numpy.diff",
"matplotlib.pyplot.close",
"flametree.file_tree",
"copy.deepcopy"
] | [((1705, 1746), 'flametree.file_tree', 'flametree.file_tree', (['target'], {'replace': '(True)'}), '(target, replace=True)\n', (1724, 1746), False, 'import flametree\n'), ((2826, 2846), 'matplotlib.pyplot.close', 'plt.close', (['ax.figure'], {}), '(ax.figure)\n', (2835, 2846), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3309), 'dna_features_viewer.BiopythonTranslator', 'BiopythonTranslator', ([], {}), '()\n', (3307, 3309), False, 'from dna_features_viewer import BiopythonTranslator\n'), ((3949, 3962), 'numpy.diff', 'np.diff', (['locs'], {}), '(locs)\n', (3956, 3962), True, 'import numpy as np\n'), ((4322, 4342), 'matplotlib.pyplot.close', 'plt.close', (['ax.figure'], {}), '(ax.figure)\n', (4331, 4342), True, 'import matplotlib.pyplot as plt\n'), ((4411, 4427), 'copy.deepcopy', 'deepcopy', (['record'], {}), '(record)\n', (4419, 4427), False, 'from copy import deepcopy\n'), ((2459, 2508), 'geneblocks.DiffBlocks.from_sequences', 'DiffBlocks.from_sequences', (['sequence', 'new_sequence'], {}), '(sequence, new_sequence)\n', (2484, 2508), False, 'from geneblocks import DiffBlocks\n')] |
import argparse
from io import BytesIO as _BytesIO
from pathlib import Path
import numpy as _np
import pandas as _pd
from urllib import request as _rqs
from datetime import datetime
from scipy.interpolate import InterpolatedUnivariateSpline
from gn_lib.gn_io.common import path2bytes
from gn_lib.gn_datetime import gpsweekD
def extrap_df_data(df, column_list=["LOD", "LODsig"], order=2):
"""
Extrapolate data from ERP-like structured dataframe
"""
for para in column_list:
xs = df[para][~df[para].isna()].index.values
ys = df[para][~df[para].isna()].values
s = InterpolatedUnivariateSpline(xs, ys, k=order)
x_fill = df[para][df[para].isna()].index.values
fill_dict = {k: v for k, v in zip(x_fill, s(x_fill))}
df.loc[:, para] = df[para].fillna(value=fill_dict)
def mjd_convert(dt):
"""Convert datetime dt to the corresponding MJD
51544.00 == 2000-01-01 00:00:00"""
st_dt = datetime(2000, 1, 1)
return 51544.00 + (dt - st_dt).days + (dt - st_dt).seconds / 86400
def erp_outfile(datetime_epoch: datetime, output_dir: Path):
"""
Input datetime string of format "YY-MM-DD hh:mm:ss"
"""
mjd = mjd_convert(datetime_epoch)
if Path("finals.daily.iau2000.txt").is_file():
Path("finals.daily.iau2000.txt").unlink()
iers_url = "https://datacenter.iers.org/data/latestVersion/finals.daily.iau2000.txt"
iau2000_daily_file = Path.cwd() / "finals.daily.iau2000.txt"
_rqs.urlretrieve(iers_url, filename=iau2000_daily_file)
byte_file = path2bytes(str(iau2000_daily_file))
iers_df = _pd.read_fwf(
_BytesIO(byte_file),
widths=[2, 2, 2, 9, 3, 9, 9, 10, 9, 3, 10, 10, 8, 7, 7, 6, 9, 10, 9, 10, 10, 11, 10, 10],
usecols=[3, 5, 6, 7, 8, 10, 11, 12, 13] + list(range(15, 19)),
header=None,
dtype=float,
)
iau2000_daily_file.unlink()
cols = [
"MJD",
"Xpole",
"Xsig",
"Ypole",
"Ysig",
"UT1-UTC",
"UTsig",
"LOD",
"LODsig",
"Xrt",
"Xrtsig",
"Yrt",
"Yrtsig",
]
iers_df.columns = cols
erp_df = iers_df[(iers_df["MJD"] > mjd - 10) & (iers_df["MJD"] < mjd + 3.1)]
erp_df.loc[:, "Xpole"] = erp_df.loc[:, "Xpole"] * 10 ** 6
erp_df.loc[:, "Xsig"] = erp_df.loc[:, "Xsig"] * 10 ** 6
erp_df.loc[:, "Ypole"] = erp_df.loc[:, "Ypole"] * 10 ** 6
erp_df.loc[:, "Ysig"] = erp_df.loc[:, "Ysig"] * 10 ** 6
erp_df.loc[:, "UT1-UTC"] = erp_df.loc[:, "UT1-UTC"] * 10 ** 7
erp_df.loc[:, "UTsig"] = erp_df.loc[:, "UTsig"] * 10 ** 7
erp_df.loc[:, "Xrt"] = erp_df.loc[:, "Xrt"] * 10 ** 3
erp_df.loc[:, "Xrtsig"] = erp_df.loc[:, "Xrtsig"] * 10 ** 3
erp_df.loc[:, "Yrt"] = erp_df.loc[:, "Yrt"] * 10 ** 3
erp_df.loc[:, "Yrtsig"] = erp_df.loc[:, "Yrtsig"] * 10 ** 3
erp_df.loc[:, "LOD"] = erp_df.loc[:, "LOD"] * 10 ** 4
erp_df.loc[:, "LODsig"] = erp_df.loc[:, "LODsig"] * 10 ** 4
days = erp_df["MJD"].values
erp_df = erp_df.set_index("MJD")
ndf = _pd.DataFrame(index=_np.arange(start=days[0], stop=days[-1] + 1, step=0.25))
ndf.index.name = "MJD"
edf = ndf.merge(erp_df, left_index=True, right_index=True, how="outer").interpolate(limit_area="inside")
extrap_df_data(edf, column_list=["LOD", "LODsig"])
edf = edf.reset_index()
edf = edf.dropna()
cols_order = [
"MJD",
"Xpole",
"Ypole",
"UT1-UTC",
"LOD",
"Xsig",
"Ysig",
"UTsig",
"LODsig",
"Xrt",
"Yrt",
"Xrtsig",
"Yrtsig",
]
erp_out = edf[cols_order]
erp_out.insert(loc=9, column="Nt", value=_np.zeros(len(edf)))
erp_out.insert(loc=9, column="Nf", value=_np.zeros(len(edf)))
erp_out.insert(loc=9, column="Nr", value=_np.zeros(len(edf)))
erp_out = erp_out[erp_out["MJD"] > mjd - 3]
out_vals = erp_out[erp_out["MJD"].apply(str).str.endswith("." + str(int(str(mjd + 0.5).split(".")[1])))].values
# Write file out, with template header of IGU format
template = [
"version 2\n",
"Source: Xpole,Ypole,Xrt,Yrt,LOD: weighted average of centres;\n",
" UT1-UTC: integrated from the 5th day prior to Bull. A\n",
" last non-predicted value.\n",
"\n",
"Orbits: to be used with the IGS Ultra Rapid Orbits (IGU)\n",
"\n",
" MJD Xpole Ypole UT1-UTC LOD Xsig Ysig UTsig LODsig Nr Nf Nt Xrt Yrt Xrtsig Yrtsig\n",
' (10**-6") (0.1 usec) (10**-6") (0.1 usec) (10**-6"/d) (10**-6"/d)\n',
]
for row in out_vals:
temp_row = f"{row[0]:.02f}{row[1].astype(int):8}{row[2].astype(int):8}{row[3].astype(int):9}{row[4].astype(int):7}{row[5].astype(int):6}{row[6].astype(int):6}{row[7].astype(int):8}{row[8].astype(int):8}{row[9].astype(int):4}{row[10].astype(int):3}{row[11].astype(int):3}{row[12].astype(int):7}{row[13].astype(int):7}{row[14].astype(int):7}{row[15].astype(int):7}\n"
template += [temp_row]
gps_date = gpsweekD(datetime_epoch.strftime("%Y"), datetime_epoch.strftime("%j"), wkday_suff=True)
file_suffix = f'_{int(int(str(mjd).split(".")[1].ljust(2,"0"))*0.24):02}'
file_name = f"igu{gps_date}{file_suffix}.erp"
with open(output_dir / file_name, "w") as out_file:
out = out_file.writelines(template)
print(out)
if __name__ == "__main__":
# Introduce command line parser
parser = argparse.ArgumentParser(description="Create an EPR file based on IERS daily data")
# Command line function arguments
parser.add_argument(
"datetime_string",
help="""
DateTime string of format:
'YYYY-MM-DD hh:mm:ss'.
Pass argument in brackets:
e.g. create_erp_file.py "2021-04-17 00:00:00"
At the moment time must be passed as 00:00:00
""",
)
parser.add_argument("-file_suff", "--file_suff", help="Change filename suffix. Default: '_12' ")
args = parser.parse_args()
dt_str = args.datetime_string
f_suff = args.file_suff
if f_suff:
erp_outfile(dt_str, file_suffix=f_suff)
else:
erp_outfile(dt_str, file_suffix="_12")
| [
"datetime.datetime",
"argparse.ArgumentParser",
"urllib.request.urlretrieve",
"pathlib.Path.cwd",
"pathlib.Path",
"io.BytesIO",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.arange"
] | [((957, 977), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (965, 977), False, 'from datetime import datetime\n'), ((1482, 1537), 'urllib.request.urlretrieve', '_rqs.urlretrieve', (['iers_url'], {'filename': 'iau2000_daily_file'}), '(iers_url, filename=iau2000_daily_file)\n', (1498, 1537), True, 'from urllib import request as _rqs\n'), ((5533, 5620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create an EPR file based on IERS daily data"""'}), "(description=\n 'Create an EPR file based on IERS daily data')\n", (5556, 5620), False, 'import argparse\n'), ((607, 652), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['xs', 'ys'], {'k': 'order'}), '(xs, ys, k=order)\n', (635, 652), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((1438, 1448), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1446, 1448), False, 'from pathlib import Path\n'), ((1627, 1646), 'io.BytesIO', '_BytesIO', (['byte_file'], {}), '(byte_file)\n', (1635, 1646), True, 'from io import BytesIO as _BytesIO\n'), ((1230, 1262), 'pathlib.Path', 'Path', (['"""finals.daily.iau2000.txt"""'], {}), "('finals.daily.iau2000.txt')\n", (1234, 1262), False, 'from pathlib import Path\n'), ((3086, 3141), 'numpy.arange', '_np.arange', ([], {'start': 'days[0]', 'stop': '(days[-1] + 1)', 'step': '(0.25)'}), '(start=days[0], stop=days[-1] + 1, step=0.25)\n', (3096, 3141), True, 'import numpy as _np\n'), ((1282, 1314), 'pathlib.Path', 'Path', (['"""finals.daily.iau2000.txt"""'], {}), "('finals.daily.iau2000.txt')\n", (1286, 1314), False, 'from pathlib import Path\n')] |
import os
from dataclasses import dataclass
import imageio
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
def get_callbacks(save_path, lr_schedule, prefix=None):
""" Creates callbacks.
Arguments:
save_path: the logs and checkpoints will be stored here.
lr_schedule: learning rate schedule.
prefix: prefix for the file names (default is `checkpoints`)"""
if prefix is None:
prefix = "checkpoint"
log_path = os.path.join(save_path, "logs", prefix)
checkpoint_path = os.path.join(save_path, "checkpoints",
"%s.ckpt" % prefix)
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(
factor=np.sqrt(0.1), cooldown=0,
patience=5, min_lr=0.5e-6)
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_schedule)
tboard = tf.keras.callbacks.TensorBoard(
log_dir=log_path, histogram_freq=0, write_graph=True, write_images=False,
update_freq=1500, profile_batch="10,20", embeddings_freq=0,
embeddings_metadata=None)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1,
save_freq=5000)
nan_callback = tf.keras.callbacks.TerminateOnNaN()
#lrtboard_callback = LRTensorBoard(log_dir=log_path)
callbacks = [cp_callback, tboard, nan_callback]
return callbacks, checkpoint_path
def get_lr_schedule(initial_lrate, num_epochs, num_steps,
end_lr_coefficient=0.95):
decay = tf.keras.optimizers.schedules.PolynomialDecay(
initial_lrate, num_epochs * num_steps,
end_learning_rate=initial_lrate * end_lr_coefficient,
power=1.0, cycle=False)
return decay
def visualize_mixture_weights(model, domains, num_templates, num_layers):
""" Returns a matrix of mixture weights for visualization.
Arguments:
model: target model.
domains: list of domain names.
num_templates: number of templates in the model.
num_layers: number of resblock layers in the model.
"""
num_domains = len(domains)
mix_w_matr = np.zeros((num_layers, num_domains * (num_templates + 1)))
domain_idx = 0
k = (num_templates + 1)
for domain in domains:
mix_w_arr = np.zeros((num_layers, num_templates))
for i in range(num_layers):
try:
mw = model.get_layer("shared_mix_%s" % (i))
except:
mw = model.get_layer("%s_mix_%s" % (domain, i))
if len(mw.trainable_variables) == 0:
mw_weights = mw.non_trainable_variables[0]
else:
mw_weights = mw.trainable_variables[0]
mix_w_arr[i] = tf.nn.softmax(mw_weights, axis=1).numpy()
mix_w_matr[:, domain_idx * k: (domain_idx + 1) * k - 1] = mix_w_arr
domain_idx += 1
return mix_w_matr
class LRTensorBoard(tf.keras.callbacks.TensorBoard):
"""Custom callbacks class for learning rate plotting in Tenforboard."""
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
"""Updates learning rate log at the end of epoch."""
logs = logs or {}
logs.update({"lr": K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
class VisualizeCallback(tf.keras.callbacks.Callback):
"""
Mixture weights visualization callback.
Arguments:
save_path: the mixture weight plots will be saved in this directory.
domains: domain names.
num_templates: number of templates.
num_layers: number of layers.
frequency: frequency of saving the mixture weights.
"""
def __init__(self, save_path, domains, num_templates, num_layers,
frequency=10, **args):
self.frequency = frequency
self.save_path = os.path.abspath(save_path)
self.domains = domains
self.num_templates = num_templates
self.num_layers = num_layers
# self.summary = tf.summary.create_file_writer(os.path.join(self.save_path,
# "imglog"))
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
super(VisualizeCallback, self).__init__(**args)
def on_epoch_end(self, epoch, logs=None):
"""Writes the mixture weight image at end of epoch.
Arguments:
epoch: number of epoch.
"""
if epoch % self.frequency == 0:
mw_img = visualize_mixture_weights(self.model, domains=self.domains,
num_templates=self.num_templates,
num_layers=self.num_layers)
fname = os.path.join(self.save_path, "mixtures_%d.png" % epoch)
imageio.imwrite(fname, mw_img)
def restore_model(ckpt_path, model):
""" Restore model weight from the checkpoint.
"""
try:
model.load_weights(ckpt_path).expect_partial()
print("Restored weights from %s" % ckpt_path)
return True
except ValueError:
print("could not restore weights from %s" % ckpt_path)
return False
@dataclass
class TrainingParameters:
""" Model fitting parameters class.
Arguments:
num_epochs: number of epochs.
num_steps: number of steps per epoch.
lr: learning rate.
lsmooth: label smoothing parameter.
save_path: experiment files save path.
name: experiment name.
ckpt_path: checkpoint path.
"""
save_path: str = "./experiments/"
name: str = "default"
num_epochs: int = 100
start_epoch: int = 0
num_steps: int = 1000
lr: float = 2*1e-3
lsmooth: float = 0.
ckpt_path: str = ""
num_layers: int = 16
num_templates: int = 4
batch_size: int = 32
restore: bool = False
copy_weights: bool = False
exp_path: str = os.path.join(save_path, name)
def init_from_args(self, args):
"""Initializes the fields from arguments."""
self.num_epochs = args.num_epochs
self.start_epoch = args.start_epoch
assert self.start_epoch <= self.num_epochs
self.num_steps = args.num_steps
self.lsmooth = args.lsmooth
self.lr = args.lr
self.name = args.name
self.save_path = args.save_path
self.exp_path = os.path.join(self.save_path, self.name)
self.ckpt_path = args.ckpt_path
self.num_layers = args.num_blocks
self.num_templates = args.num_templates
self.batch_size = args.batch_size
self.restore = args.restore > 0
self.copy_weights = args.copy_weights > 0
| [
"os.path.exists",
"tensorflow.keras.backend.eval",
"numpy.sqrt",
"tensorflow.keras.callbacks.TensorBoard",
"os.makedirs",
"imageio.imwrite",
"tensorflow.keras.callbacks.LearningRateScheduler",
"os.path.join",
"tensorflow.keras.optimizers.schedules.PolynomialDecay",
"numpy.zeros",
"tensorflow.nn.... | [((464, 503), 'os.path.join', 'os.path.join', (['save_path', '"""logs"""', 'prefix'], {}), "(save_path, 'logs', prefix)\n", (476, 503), False, 'import os\n'), ((524, 582), 'os.path.join', 'os.path.join', (['save_path', '"""checkpoints"""', "('%s.ckpt' % prefix)"], {}), "(save_path, 'checkpoints', '%s.ckpt' % prefix)\n", (536, 582), False, 'import os\n'), ((759, 812), 'tensorflow.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (799, 812), True, 'import tensorflow as tf\n'), ((824, 1023), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_path', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(False)', 'update_freq': '(1500)', 'profile_batch': '"""10,20"""', 'embeddings_freq': '(0)', 'embeddings_metadata': 'None'}), "(log_dir=log_path, histogram_freq=0,\n write_graph=True, write_images=False, update_freq=1500, profile_batch=\n '10,20', embeddings_freq=0, embeddings_metadata=None)\n", (854, 1023), True, 'import tensorflow as tf\n'), ((1050, 1165), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'save_weights_only': '(True)', 'verbose': '(1)', 'save_freq': '(5000)'}), '(filepath=checkpoint_path,\n save_weights_only=True, verbose=1, save_freq=5000)\n', (1084, 1165), True, 'import tensorflow as tf\n'), ((1332, 1367), 'tensorflow.keras.callbacks.TerminateOnNaN', 'tf.keras.callbacks.TerminateOnNaN', ([], {}), '()\n', (1365, 1367), True, 'import tensorflow as tf\n'), ((1611, 1782), 'tensorflow.keras.optimizers.schedules.PolynomialDecay', 'tf.keras.optimizers.schedules.PolynomialDecay', (['initial_lrate', '(num_epochs * num_steps)'], {'end_learning_rate': '(initial_lrate * end_lr_coefficient)', 'power': '(1.0)', 'cycle': '(False)'}), '(initial_lrate, num_epochs *\n num_steps, end_learning_rate=initial_lrate * end_lr_coefficient, power=\n 1.0, cycle=False)\n', (1656, 1782), True, 'import tensorflow as tf\n'), ((2169, 2226), 'numpy.zeros', 'np.zeros', (['(num_layers, num_domains * (num_templates + 1))'], {}), '((num_layers, num_domains * (num_templates + 1)))\n', (2177, 2226), True, 'import numpy as np\n'), ((5682, 5711), 'os.path.join', 'os.path.join', (['save_path', 'name'], {}), '(save_path, name)\n', (5694, 5711), False, 'import os\n'), ((2311, 2348), 'numpy.zeros', 'np.zeros', (['(num_layers, num_templates)'], {}), '((num_layers, num_templates))\n', (2319, 2348), True, 'import numpy as np\n'), ((3781, 3807), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (3796, 3807), False, 'import os\n'), ((6093, 6132), 'os.path.join', 'os.path.join', (['self.save_path', 'self.name'], {}), '(self.save_path, self.name)\n', (6105, 6132), False, 'import os\n'), ((683, 695), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {}), '(0.1)\n', (690, 695), True, 'import numpy as np\n'), ((4073, 4103), 'os.path.exists', 'os.path.exists', (['self.save_path'], {}), '(self.save_path)\n', (4087, 4103), False, 'import os\n'), ((4111, 4138), 'os.makedirs', 'os.makedirs', (['self.save_path'], {}), '(self.save_path)\n', (4122, 4138), False, 'import os\n'), ((4614, 4669), 'os.path.join', 'os.path.join', (['self.save_path', "('mixtures_%d.png' % epoch)"], {}), "(self.save_path, 'mixtures_%d.png' % epoch)\n", (4626, 4669), False, 'import os\n'), ((4676, 4706), 'imageio.imwrite', 'imageio.imwrite', (['fname', 'mw_img'], {}), '(fname, mw_img)\n', (4691, 4706), False, 'import imageio\n'), ((3208, 3239), 'tensorflow.keras.backend.eval', 'K.eval', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (3214, 3239), True, 'from tensorflow.keras import backend as K\n'), ((2688, 2721), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['mw_weights'], {'axis': '(1)'}), '(mw_weights, axis=1)\n', (2701, 2721), True, 'import tensorflow as tf\n')] |
"""
Loads all the images in the "data/" directory.
This folder should contain 6400 images:
- for a number of times the identity operation is applied, k in (1-32):
- for a number of iteration it in (0-99):
- we have 2 images:
- one input image: "Input f k_it.BMP"
- one output image: "Output f k_it.BMP"
The expected number of images is therefore 32 * 100 * 2 = 6400.
The input image corresponds to an uniform array, filled with a single value.
The output image corresponds to the result we get when applying our supposedly
identity function k times.
For each k (number of times the identity operation is applied in (1-32)), we compute
the mean of the average of the difference between the input and the output. This
gives us 32 scalar values, each of them corresponding to the systematic bias introduced
by the application of k repetitions of out identity function.
For each input/output pair, we then compute the PSNR,
after having compensated the systematic bias by adding the above
term.
For each Output image, we then compute the STD of the output.
"""
import numpy as np
import math
import scipy.misc
def psnr(img1, img2):
mse = np.mean( (img1 - img2) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
## Load all images
# inputs[0-31][0-99] : all original inputs
inputs = []
# outputs[0-31][0-99] : result images
outputs = []
directory = 'data/'
for it in range(32):
inputs.append([])
outputs.append([])
for k in range(100):
i_img_filename = directory + 'Input f ' + str(it+1) + '_{:05d}.BMP'.format(k)
o_img_filename = directory + 'Output f ' + str(it+1) + '_{:05d}.BMP'.format(k)
i_img = scipy.misc.imread(i_img_filename)[:,:,0]
o_img = scipy.misc.imread(o_img_filename)[:,:,0]
inputs[it].append(i_img)
outputs[it].append(o_img)
## Compute constant term to compensate for systematic noise
# (one value per # of iterations, applied to the whole batch of 100 images)
constant_terms = []
for it in range(32):
mean_differences = []
for k in range(100):
mean_differences.append((inputs[it][k] - outputs[it][k]).mean())
val = sum(mean_differences) / 100
constant_terms.append(val)
# Add constant terms to compensate for systematic bias
for it in range(32):
for k in range(100):
outputs[it][k] = outputs[it][k] + constant_terms[it]
## Compute mean PSNR
mean_psnr_values = []
for it in range(32):
mean_psnr = []
for k in range(100):
mean_psnr.append(psnr(inputs[it][k], outputs[it][k]))
mean_psnr = sum(mean_psnr) / 100
mean_psnr_values.append(mean_psnr)
## Compute mean STD
mean_std_values = []
for it in range(32):
mean_std = []
for k in range(100):
mean_std.append(outputs[it][k].std())
mean_std = sum(mean_std) / 100
mean_std_values.append(mean_std)
baseline_std = []
for it in range(32):
for k in range(100):
baseline_std.append(inputs[it][k].std())
mean_std_values = [sum(baseline_std)/len(baseline_std)] + mean_std_values
## Print results
print('bias:')
print(constant_terms)
print('')
print('PSNR:')
print(mean_psnr_values)
print('')
print('STD:')
print(mean_std_values)
| [
"numpy.mean",
"math.sqrt"
] | [((1161, 1188), 'numpy.mean', 'np.mean', (['((img1 - img2) ** 2)'], {}), '((img1 - img2) ** 2)\n', (1168, 1188), True, 'import numpy as np\n'), ((1278, 1292), 'math.sqrt', 'math.sqrt', (['mse'], {}), '(mse)\n', (1287, 1292), False, 'import math\n')] |
from cluster.preprocess.pre_node_feed import PreNodeFeed
import os,h5py
import numpy as np
class PreNodeFeedText2FastText(PreNodeFeed):
"""
"""
def run(self, conf_data):
"""
override init class
"""
super(PreNodeFeedText2FastText, self).run(conf_data)
self._init_node_parm(conf_data['node_id'])
def _convert_data_format(self, file_path, index):
"""
just pass hdf5 file chunk
:param file_path:
:param index:
:return:
"""
try:
h5file = h5py.File(file_path, mode='r')
raw_data = h5file['rawdata']
data_set = raw_data[index.start: index.stop]
filtered_data = [data_set[np.logical_not(data_set == '#')].tolist()]
return filtered_data
except Exception as e:
raise Exception(e)
finally:
h5file.close()
def data_size(self):
try:
h5file = h5py.File(self.input_paths[self.pointer], mode='r')
return h5file['rawdata'].len()
except Exception as e:
raise Exception(e)
finally:
h5file.close() | [
"numpy.logical_not",
"h5py.File"
] | [((561, 591), 'h5py.File', 'h5py.File', (['file_path'], {'mode': '"""r"""'}), "(file_path, mode='r')\n", (570, 591), False, 'import os, h5py\n'), ((970, 1021), 'h5py.File', 'h5py.File', (['self.input_paths[self.pointer]'], {'mode': '"""r"""'}), "(self.input_paths[self.pointer], mode='r')\n", (979, 1021), False, 'import os, h5py\n'), ((728, 759), 'numpy.logical_not', 'np.logical_not', (["(data_set == '#')"], {}), "(data_set == '#')\n", (742, 759), True, 'import numpy as np\n')] |
import gensim
import numpy as np
import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_word2vec(word2vec_data_path):
return gensim.models.KeyedVectors.load_word2vec_format(word2vec_data_path, binary=True)
def generate_word_map_from_word2vec_model(word2vec_model):
word_map = {}
vocab = word2vec_model.wv.vocab
vocab_word_list = list(word2vec_model.wv.vocab.keys())
for word_id, word in enumerate(vocab_word_list):
word_map[word] = word_id
return word_map
def add_special_word_to_embedding_vectors(embedding_vectors, word_map, word):
"""
Args:
embedding_vectors: np.ndarray
shape: (n_vocab, n_dimension)
the embedding vectors, always generated by gensim.model.wv.vectors
"""
if word_map.get(word) is not None:
return embedding_vectors, word_map
n_vocab, n_dimension = embedding_vectors.shape
word_embedding = np.random.rand(1, n_dimension)
embedding_vectors = np.append(
embedding_vectors,
word_embedding,
axis=0)
word_map[word] = n_vocab
return embedding_vectors, word_map
# deprecated!
def convert_words_to_word_embeddings(embedding_vectors, word_map, words, default_dim=300):
word_embeddings = []
for word in words:
if word_map.get(word) is None:
embedding = [0.0] * default_dim
continue
embedding = embedding_vectors[word_map[word]]
word_embeddings.append(embedding)
return word_embeddings
def create_embedding_layer(embedding_vectors, is_trainable=False):
num_embeddings, embedding_dim = embedding_vectors.shape
# embedding_layer = nn.Embedding(num_embeddings, embedding_dim)
# embedding_layer.load_state_dict({'weight': embedding_vectors})
weights = torch.FloatTensor(embedding_vectors, device=device)
embedding_layer = nn.Embedding.from_pretrained(weights)
if is_trainable:
embedding_layer.weight.requires_grad = False
return embedding_layer, num_embeddings, embedding_dim
| [
"numpy.random.rand",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.append",
"torch.cuda.is_available",
"torch.FloatTensor",
"torch.nn.Embedding.from_pretrained"
] | [((191, 276), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['word2vec_data_path'], {'binary': '(True)'}), '(word2vec_data_path, binary=True\n )\n', (238, 276), False, 'import gensim\n'), ((979, 1009), 'numpy.random.rand', 'np.random.rand', (['(1)', 'n_dimension'], {}), '(1, n_dimension)\n', (993, 1009), True, 'import numpy as np\n'), ((1034, 1086), 'numpy.append', 'np.append', (['embedding_vectors', 'word_embedding'], {'axis': '(0)'}), '(embedding_vectors, word_embedding, axis=0)\n', (1043, 1086), True, 'import numpy as np\n'), ((1842, 1893), 'torch.FloatTensor', 'torch.FloatTensor', (['embedding_vectors'], {'device': 'device'}), '(embedding_vectors, device=device)\n', (1859, 1893), False, 'import torch\n'), ((1916, 1953), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['weights'], {}), '(weights)\n', (1944, 1953), True, 'import torch.nn as nn\n'), ((101, 126), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (124, 126), False, 'import torch\n')] |
"""
<NAME>
Calculation of curvature using the method outlined in <NAME> et. al 2004
Per face curvature is calculated and per vertex curvature is calculated by weighting the
per-face curvatures. I have vectorized the code where possible.
"""
import numpy as np
from numpy.core.umath_tests import inner1d
from .utils import fastcross,normr
def RotateCoordinateSystem(up,vp,nf):
"""
RotateCoordinateSystem performs the rotation of the vectors up and vp
to the plane defined by nf
INPUT:
up,vp - vectors to be rotated (vertex coordinate system)
nf - face normal
OUTPUT:
r_new_u,r_new_v - rotated coordinate system
"""
nrp=np.cross(up,vp)
nrp=nrp/np.sqrt(nrp[0]**2+nrp[1]**2+nrp[2]**2)
ndot=nf[0]*nrp[0]+nf[1]*nrp[1]+nf[2]*nrp[2]
if ndot<=-1:
return -up,-vp
perp=nf-ndot*nrp
dperp=(nrp+nf)/(1.0+ndot)
r_new_u=up-dperp*(perp[0]*up[0]+perp[1]*up[1]+perp[2]*up[2])
r_new_v=vp-dperp*(perp[0]*vp[0]+perp[1]*vp[1]+perp[2]*vp[2])
return r_new_u,r_new_v
def ProjectCurvatureTensor(uf,vf,nf,old_ku,old_kuv,old_kv,up,vp):
"""
ProjectCurvatureTensor performs a projection
of the tensor variables to the vertexcoordinate system
INPUT:
uf,vf - face coordinate system
old_ku,old_kuv,old_kv - face curvature tensor variables
up,vp - vertex cordinate system
OUTPUT:
new_ku,new_kuv,new_kv - vertex curvature tensor variables
"""
r_new_u,r_new_v = RotateCoordinateSystem(up,vp,nf)
u1=r_new_u[0]*uf[0]+r_new_u[1]*uf[1]+r_new_u[2]*uf[2]
v1=r_new_u[0]*vf[0]+r_new_u[1]*vf[1]+r_new_u[2]*vf[2]
u2=r_new_v[0]*uf[0]+r_new_v[1]*uf[1]+r_new_v[2]*uf[2]
v2=r_new_v[0]*vf[0]+r_new_v[1]*vf[1]+r_new_v[2]*vf[2]
new_ku = u1*(u1*old_ku+v1*old_kuv) + v1*(u1*old_kuv+v1*old_kv )
new_kuv = u2*(u1*old_ku+v1*old_kuv) + v2*(u1*old_kuv+v1*old_kv )
new_kv = u2*(u2*old_ku+v2*old_kuv) + v2*(u2*old_kuv+v2*old_kv )
return new_ku,new_kuv,new_kv
def GetVertexNormalsExtra(vertices,faces,FaceNormals,e0,e1,e2):
"""
In addition to vertex normals this also returns the mixed area weights per vertex
which is used in calculating the curvature at the vertex from per face curvature values
We could have calculated them separetely, but doing both at once is efficient.
The calculations involve loops over the faces and vertices in serial and are not easily vectorized
INPUT:
Vertices : vertices
Faces : vertex connectivity
FaceNormals : Outer Normal per face, having magnitude equal to area of face
e0,e1,e2 : edge vectors
OUTPUT:
VertNormals : Unit normal at the vertex
wfp : Mixed area weights per vertex, as per Meyer 2002
OTHER:
Avertex : Mixed area associated with a vertex. Meyer 2002
Acorner : part of Avertex associated to
"""
#edge lengths
de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2)
de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2)
de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2)
L2=np.c_[de0**2,de1**2,de2**2]
ew=np.c_[L2[:,0]*(L2[:,1]+L2[:,2]-L2[:,0]),L2[:,1]*(L2[:,2]+L2[:,0]-L2[:,1]),L2[:,2]*(L2[:,0]+L2[:,1]-L2[:,2])]
#calculate face area
Af=np.sqrt(FaceNormals[:,0]**2+FaceNormals[:,1]**2+FaceNormals[:,2]**2)
Avertex =np.zeros(vertices.shape[0])
VertNormals =np.zeros(vertices.shape)
#Calculate weights according to N.Max [1999] for normals
wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis]
wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis]
wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis]
verts=faces.T[0]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j])
verts=faces.T[1]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j])
verts=faces.T[2]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j])
Acorner=(0.5*Af/(ew[:,0]+ew[:,1]+ew[:,2]))[:,np.newaxis]*np.c_[ew[:,1]+ew[:,2], ew[:,2]+ew[:,0], ew[:,0]+ew[:,1]]
#Change the area to barycentric area for obtuse triangles
for i,f in enumerate(faces):
if ew[i,0]<=0:
Acorner[i,2]=-0.25*L2[i,1]*Af[i]/(sum(e0[i]*e1[i]))
Acorner[i,1]=-0.25*L2[i,2]*Af[i]/(sum(e0[i]*e2[i]))
Acorner[i,0]=Af[i]-Acorner[i,1]-Acorner[i,2]
elif ew[i,1]<=0:
Acorner[i,2]=-0.25*L2[i,0]*Af[i]/(sum(e1[i]*e0[i]))
Acorner[i,0]=-0.25*L2[i,2]*Af[i]/(sum(e1[i]*e2[i]))
Acorner[i,1]=Af[i]-Acorner[i,0]-Acorner[i,2]
elif ew[i,2]<=0:
Acorner[i,0]=-0.25*L2[i,1]*Af[i]/(sum(e2[i]*e1[i]))
Acorner[i,1]=-0.25*L2[i,0]*Af[i]/(sum(e2[i]*e0[i]))
Acorner[i,2]=Af[i]-Acorner[i,0]-Acorner[i,1]
#Accumulate Avertex from Acorner.
for j,verts in enumerate(faces.T):
Avertex+=np.bincount(verts,minlength=vertices.shape[0],weights=Acorner[:,j])
VertNormals=normr(VertNormals)
#calculate voronoi weights
wfp=Acorner/Avertex[faces]
return VertNormals,wfp
def CalcCurvature(vertices,faces):
"""
CalcCurvature recives a list of vertices and faces
and the normal at each vertex and calculates the second fundamental
matrix and the curvature by least squares, by inverting the 3x3 Normal matrix
INPUT:
vertices -nX3 array of vertices
faces -mX3 array of faces
VertexNormals - nX3 matrix (n=number of vertices) containing the normal at each vertex
FaceNormals - mX3 matrix (m = number of faces) containing the normal of each face
OUTPUT:
FaceSFM - a list of 2x2 np arrays of (m = number of faces) second fundamental tensor at the faces
VertexSFM - a list of 2x2 np arrays (n = number of vertices) second fundamental tensor at the vertices
Other Parameters
wfp : mx3 array of vertex voronoi cell area/Mixed area weights as given in Meyer 2002
up,vp : local coordinate system at each vertex
e0,e1,e2 : edge vectors
"""
#list of 2x2 arrays for each vertex
VertexSFM = [np.zeros([2,2]) for i in vertices]
up = np.zeros(vertices.shape)
e0=vertices[faces[:,2]]-vertices[faces[:,1]]
e1=vertices[faces[:,0]]-vertices[faces[:,2]]
e2=vertices[faces[:,1]]-vertices[faces[:,0]]
e0_norm=normr(e0)
e1_norm=normr(e1)
e2_norm=normr(e2)
FaceNormals=0.5*fastcross(e1,e2) #not unit length. holds the area which is needed next
VertNormals,wfp=GetVertexNormalsExtra(vertices,faces,FaceNormals,e0,e1,e2)
FaceNormals=normr(FaceNormals)
#Calculate initial coordinate system
up[faces[:,0]]=e2_norm
up[faces[:,1]]=e0_norm
up[faces[:,2]]=e1_norm
#Calculate initial vertex coordinate system
up=fastcross(up,VertNormals)
up=normr(up)
vp=fastcross(VertNormals,up)
B=normr(fastcross(FaceNormals,e0_norm))
nfaces=faces.shape[0]
# Build a least square problem at each face to get the SFM at each face and solve it using the normal equation
scale=1.0/np.sqrt(np.sum((e0[0,:]**2+e1[0,:]**2+e2[0,:]**2)/3.0))
AT = scale*np.array([[inner1d(e0,e0_norm), inner1d(e0,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e0,e0_norm), inner1d(e0,B)],
[inner1d(e1,e0_norm), inner1d(e1,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e1,e0_norm), inner1d(e1,B)],
[inner1d(e2,e0_norm), inner1d(e2,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e2,e0_norm), inner1d(e2,B)]]).T
A = np.transpose(AT,axes=(0,2,1)).copy()
dn0=VertNormals[faces[:,2]]-VertNormals[faces[:,1]]
dn1=VertNormals[faces[:,0]]-VertNormals[faces[:,2]]
dn2=VertNormals[faces[:,1]]-VertNormals[faces[:,0]]
b= scale*np.array([inner1d(dn0,e0_norm),
inner1d(dn0,B ),
inner1d(dn1,e0_norm),
inner1d(dn1,B ),
inner1d(dn2,e0_norm),
inner1d(dn2,B )]).T[:,:,np.newaxis]
X1=np.array([np.linalg.pinv(a,-1) for a in A])
X = np.matmul(X1,b)
#now calculate curvature per vertex as weighted sum of the face curvature
for i,f in enumerate(faces):
for j in [0,1,2]:
new_ku,new_kuv,new_kv = ProjectCurvatureTensor(e0_norm[i],B[i],FaceNormals[i],X[i][0],X[i][1],X[i][2],up[f[j]],vp[f[j]])
VertexSFM[f[j]]+=wfp[i,j]*np.array([[new_ku,new_kuv],[new_kuv,new_kv]]).squeeze()
return VertexSFM,VertNormals
def GetCurvatures(vertices,faces):
"""
INPUT : vertices,faces
OUTPUT: Gaussian Curvature, Mean Curvature
"""
VertexSFM,VertNormals=CalcCurvature(vertices,faces)
ku =np.array([VSFM[0,0] for VSFM in VertexSFM])
kuv =np.array([VSFM[0,1] for VSFM in VertexSFM])
kv =np.array([VSFM[1,1] for VSFM in VertexSFM])
return (ku*kv-kuv**2),0.5*(ku+kv),VertNormals
| [
"numpy.sqrt",
"numpy.cross",
"numpy.linalg.pinv",
"numpy.core.umath_tests.inner1d",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.matmul",
"numpy.transpose",
"numpy.bincount"
] | [((695, 711), 'numpy.cross', 'np.cross', (['up', 'vp'], {}), '(up, vp)\n', (703, 711), True, 'import numpy as np\n'), ((3044, 3098), 'numpy.sqrt', 'np.sqrt', (['(e0[:, 0] ** 2 + e0[:, 1] ** 2 + e0[:, 2] ** 2)'], {}), '(e0[:, 0] ** 2 + e0[:, 1] ** 2 + e0[:, 2] ** 2)\n', (3051, 3098), True, 'import numpy as np\n'), ((3095, 3149), 'numpy.sqrt', 'np.sqrt', (['(e1[:, 0] ** 2 + e1[:, 1] ** 2 + e1[:, 2] ** 2)'], {}), '(e1[:, 0] ** 2 + e1[:, 1] ** 2 + e1[:, 2] ** 2)\n', (3102, 3149), True, 'import numpy as np\n'), ((3146, 3200), 'numpy.sqrt', 'np.sqrt', (['(e2[:, 0] ** 2 + e2[:, 1] ** 2 + e2[:, 2] ** 2)'], {}), '(e2[:, 0] ** 2 + e2[:, 1] ** 2 + e2[:, 2] ** 2)\n', (3153, 3200), True, 'import numpy as np\n'), ((3381, 3466), 'numpy.sqrt', 'np.sqrt', (['(FaceNormals[:, 0] ** 2 + FaceNormals[:, 1] ** 2 + FaceNormals[:, 2] ** 2)'], {}), '(FaceNormals[:, 0] ** 2 + FaceNormals[:, 1] ** 2 + FaceNormals[:, 2] **\n 2)\n', (3388, 3466), True, 'import numpy as np\n'), ((3472, 3499), 'numpy.zeros', 'np.zeros', (['vertices.shape[0]'], {}), '(vertices.shape[0])\n', (3480, 3499), True, 'import numpy as np\n'), ((3520, 3544), 'numpy.zeros', 'np.zeros', (['vertices.shape'], {}), '(vertices.shape)\n', (3528, 3544), True, 'import numpy as np\n'), ((6409, 6433), 'numpy.zeros', 'np.zeros', (['vertices.shape'], {}), '(vertices.shape)\n', (6417, 6433), True, 'import numpy as np\n'), ((8477, 8493), 'numpy.matmul', 'np.matmul', (['X1', 'b'], {}), '(X1, b)\n', (8486, 8493), True, 'import numpy as np\n'), ((9090, 9134), 'numpy.array', 'np.array', (['[VSFM[0, 0] for VSFM in VertexSFM]'], {}), '([VSFM[0, 0] for VSFM in VertexSFM])\n', (9098, 9134), True, 'import numpy as np\n'), ((9149, 9193), 'numpy.array', 'np.array', (['[VSFM[0, 1] for VSFM in VertexSFM]'], {}), '([VSFM[0, 1] for VSFM in VertexSFM])\n', (9157, 9193), True, 'import numpy as np\n'), ((9208, 9252), 'numpy.array', 'np.array', (['[VSFM[1, 1] for VSFM in VertexSFM]'], {}), '([VSFM[1, 1] for VSFM in VertexSFM])\n', (9216, 9252), True, 'import numpy as np\n'), ((724, 772), 'numpy.sqrt', 'np.sqrt', (['(nrp[0] ** 2 + nrp[1] ** 2 + nrp[2] ** 2)'], {}), '(nrp[0] ** 2 + nrp[1] ** 2 + nrp[2] ** 2)\n', (731, 772), True, 'import numpy as np\n'), ((3843, 3910), 'numpy.bincount', 'np.bincount', (['verts'], {'minlength': 'vertices.shape[0]', 'weights': 'wfv1[:, j]'}), '(verts, minlength=vertices.shape[0], weights=wfv1[:, j])\n', (3854, 3910), True, 'import numpy as np\n'), ((3978, 4045), 'numpy.bincount', 'np.bincount', (['verts'], {'minlength': 'vertices.shape[0]', 'weights': 'wfv2[:, j]'}), '(verts, minlength=vertices.shape[0], weights=wfv2[:, j])\n', (3989, 4045), True, 'import numpy as np\n'), ((4113, 4180), 'numpy.bincount', 'np.bincount', (['verts'], {'minlength': 'vertices.shape[0]', 'weights': 'wfv3[:, j]'}), '(verts, minlength=vertices.shape[0], weights=wfv3[:, j])\n', (4124, 4180), True, 'import numpy as np\n'), ((5132, 5202), 'numpy.bincount', 'np.bincount', (['verts'], {'minlength': 'vertices.shape[0]', 'weights': 'Acorner[:, j]'}), '(verts, minlength=vertices.shape[0], weights=Acorner[:, j])\n', (5143, 5202), True, 'import numpy as np\n'), ((6357, 6373), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (6365, 6373), True, 'import numpy as np\n'), ((7347, 7408), 'numpy.sum', 'np.sum', (['((e0[0, :] ** 2 + e1[0, :] ** 2 + e2[0, :] ** 2) / 3.0)'], {}), '((e0[0, :] ** 2 + e1[0, :] ** 2 + e2[0, :] ** 2) / 3.0)\n', (7353, 7408), True, 'import numpy as np\n'), ((7899, 7931), 'numpy.transpose', 'np.transpose', (['AT'], {'axes': '(0, 2, 1)'}), '(AT, axes=(0, 2, 1))\n', (7911, 7931), True, 'import numpy as np\n'), ((8432, 8453), 'numpy.linalg.pinv', 'np.linalg.pinv', (['a', '(-1)'], {}), '(a, -1)\n', (8446, 8453), True, 'import numpy as np\n'), ((7424, 7444), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e0', 'e0_norm'], {}), '(e0, e0_norm)\n', (7431, 7444), False, 'from numpy.core.umath_tests import inner1d\n'), ((7445, 7459), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e0', 'B'], {}), '(e0, B)\n', (7452, 7459), False, 'from numpy.core.umath_tests import inner1d\n'), ((7460, 7476), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (7468, 7476), True, 'import numpy as np\n'), ((7505, 7521), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (7513, 7521), True, 'import numpy as np\n'), ((7523, 7543), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e0', 'e0_norm'], {}), '(e0, e0_norm)\n', (7530, 7543), False, 'from numpy.core.umath_tests import inner1d\n'), ((7544, 7558), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e0', 'B'], {}), '(e0, B)\n', (7551, 7558), False, 'from numpy.core.umath_tests import inner1d\n'), ((7586, 7606), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e1', 'e0_norm'], {}), '(e1, e0_norm)\n', (7593, 7606), False, 'from numpy.core.umath_tests import inner1d\n'), ((7607, 7621), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e1', 'B'], {}), '(e1, B)\n', (7614, 7621), False, 'from numpy.core.umath_tests import inner1d\n'), ((7622, 7638), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (7630, 7638), True, 'import numpy as np\n'), ((7667, 7683), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (7675, 7683), True, 'import numpy as np\n'), ((7685, 7705), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e1', 'e0_norm'], {}), '(e1, e0_norm)\n', (7692, 7705), False, 'from numpy.core.umath_tests import inner1d\n'), ((7706, 7720), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e1', 'B'], {}), '(e1, B)\n', (7713, 7720), False, 'from numpy.core.umath_tests import inner1d\n'), ((7748, 7768), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e2', 'e0_norm'], {}), '(e2, e0_norm)\n', (7755, 7768), False, 'from numpy.core.umath_tests import inner1d\n'), ((7769, 7783), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e2', 'B'], {}), '(e2, B)\n', (7776, 7783), False, 'from numpy.core.umath_tests import inner1d\n'), ((7784, 7800), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (7792, 7800), True, 'import numpy as np\n'), ((7829, 7845), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (7837, 7845), True, 'import numpy as np\n'), ((7847, 7867), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e2', 'e0_norm'], {}), '(e2, e0_norm)\n', (7854, 7867), False, 'from numpy.core.umath_tests import inner1d\n'), ((7868, 7882), 'numpy.core.umath_tests.inner1d', 'inner1d', (['e2', 'B'], {}), '(e2, B)\n', (7875, 7882), False, 'from numpy.core.umath_tests import inner1d\n'), ((8136, 8157), 'numpy.core.umath_tests.inner1d', 'inner1d', (['dn0', 'e0_norm'], {}), '(dn0, e0_norm)\n', (8143, 8157), False, 'from numpy.core.umath_tests import inner1d\n'), ((8183, 8198), 'numpy.core.umath_tests.inner1d', 'inner1d', (['dn0', 'B'], {}), '(dn0, B)\n', (8190, 8198), False, 'from numpy.core.umath_tests import inner1d\n'), ((8230, 8251), 'numpy.core.umath_tests.inner1d', 'inner1d', (['dn1', 'e0_norm'], {}), '(dn1, e0_norm)\n', (8237, 8251), False, 'from numpy.core.umath_tests import inner1d\n'), ((8277, 8292), 'numpy.core.umath_tests.inner1d', 'inner1d', (['dn1', 'B'], {}), '(dn1, B)\n', (8284, 8292), False, 'from numpy.core.umath_tests import inner1d\n'), ((8324, 8345), 'numpy.core.umath_tests.inner1d', 'inner1d', (['dn2', 'e0_norm'], {}), '(dn2, e0_norm)\n', (8331, 8345), False, 'from numpy.core.umath_tests import inner1d\n'), ((8371, 8386), 'numpy.core.umath_tests.inner1d', 'inner1d', (['dn2', 'B'], {}), '(dn2, B)\n', (8378, 8386), False, 'from numpy.core.umath_tests import inner1d\n'), ((8792, 8840), 'numpy.array', 'np.array', (['[[new_ku, new_kuv], [new_kuv, new_kv]]'], {}), '([[new_ku, new_kuv], [new_kuv, new_kv]])\n', (8800, 8840), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from PIL import Image
from pathlib import Path
from typing import Tuple, Union
def binarise_mask(mask: Union[np.ndarray, str, Path]) -> np.ndarray:
""" Split the mask into a set of binary masks.
Assume the mask is already binary masks of [N, Height, Width], or
grayscale mask of [Height, Width] with different values
representing different objects, 0 as background.
"""
# get numpy array from image file
if isinstance(mask, (str, Path)):
mask = np.array(Image.open(mask))
# convert to numpy array
mask = np.asarray(mask)
# if it is a boolean array, consider it's already binarised
if mask.ndim == 3:
assert np.issubdtype(mask.dtype, np.bool), "'mask' should be binary."
return mask
assert mask.ndim == 2, "'mask' should have at least 2 channels."
# remove background
obj_values = np.unique(mask)[1:]
# get the binary masks for each color (instance)
binary_masks = mask == obj_values[:, None, None]
return binary_masks
def colorise_binary_mask(
binary_mask: np.ndarray, color: Tuple[int, int, int] = (2, 166, 101)
) -> np.ndarray:
""" Set the color for the instance in the mask. """
# create empty RGB channels
h = binary_mask.shape[0]
w = binary_mask.shape[1]
r, g, b = np.zeros([3, h, w]).astype(np.uint8)
# set corresponding color for each channel
r[binary_mask], g[binary_mask], b[binary_mask] = color
# merge RGB channels
colored_mask = np.dstack([r, g, b])
return colored_mask
def transparentise_mask(
colored_mask: np.ndarray, alpha: float = 0.5
) -> np.ndarray:
""" Return a mask with fully transparent background and alpha-transparent
instances.
Assume channel is the third dimension of mask, and no alpha channel.
"""
assert (
colored_mask.shape[2] == 3
), "'colored_mask' should be of 3-channels RGB."
# convert (0, 0, 0) to (0, 0, 0, 0) and
# all other (x, y, z) to (x, y, z, alpha*255)
binary_mask = (colored_mask != 0).any(axis=2)
alpha_mask = (alpha * 255 * binary_mask).astype(np.uint8)
return np.dstack([colored_mask, alpha_mask])
def merge_binary_masks(binary_masks: np.ndarray) -> np.ndarray:
""" Merge binary masks into one grayscale mask.
Assume binary_masks is of [N, Height, Width].
"""
obj_values = np.arange(len(binary_masks)) + 1
# label mask from 1 to number of instances
labeled_masks = binary_masks * obj_values[:, None, None]
return np.max(labeled_masks, axis=0).astype(np.uint8)
| [
"numpy.dstack",
"PIL.Image.open",
"numpy.unique",
"numpy.asarray",
"numpy.max",
"numpy.issubdtype",
"numpy.zeros"
] | [((667, 683), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (677, 683), True, 'import numpy as np\n'), ((1596, 1616), 'numpy.dstack', 'np.dstack', (['[r, g, b]'], {}), '([r, g, b])\n', (1605, 1616), True, 'import numpy as np\n'), ((2227, 2264), 'numpy.dstack', 'np.dstack', (['[colored_mask, alpha_mask]'], {}), '([colored_mask, alpha_mask])\n', (2236, 2264), True, 'import numpy as np\n'), ((787, 821), 'numpy.issubdtype', 'np.issubdtype', (['mask.dtype', 'np.bool'], {}), '(mask.dtype, np.bool)\n', (800, 821), True, 'import numpy as np\n'), ((981, 996), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (990, 996), True, 'import numpy as np\n'), ((608, 624), 'PIL.Image.open', 'Image.open', (['mask'], {}), '(mask)\n', (618, 624), False, 'from PIL import Image\n'), ((1409, 1428), 'numpy.zeros', 'np.zeros', (['[3, h, w]'], {}), '([3, h, w])\n', (1417, 1428), True, 'import numpy as np\n'), ((2611, 2640), 'numpy.max', 'np.max', (['labeled_masks'], {'axis': '(0)'}), '(labeled_masks, axis=0)\n', (2617, 2640), True, 'import numpy as np\n')] |
import bpy
import numpy as np
from smorgasbord.common.decorate import register
from smorgasbord.common.io import get_vecs, get_scalars
def get_red(arr):
return arr[:, 0:1].ravel()
def get_green(arr):
return arr[:, 1:2].ravel()
def get_blue(arr):
return arr[:, 2:3].ravel()
def avg_rgb(arr):
# Strip alpha values in last column
return np.average(arr[:, :-1], axis=1)
@register
class VertexColorToGroup(bpy.types.Operator):
bl_idname = "object.vertex_color_to_group"
bl_label = "Vertex Color to Group"
bl_description = (
"For every selected object, converts the active vertex color "
"layer into a eponymous vertex group, given a conversion method"
)
bl_options = {'REGISTER', 'UNDO'}
menus = [bpy.types.MESH_MT_vertex_group_context_menu]
method: bpy.props.EnumProperty(
name="Method",
description="Method used to calculate scalar weights from rgb colors",
items=(
('AVG', "Average", "Average all channels"),
('RED', "Red", "Pass red channel"),
('GRE', "Green", "Pass green channel"),
('BLU', "Blue", "Pass blue channel"),
),
default='AVG',
)
@classmethod
def poll(cls, context):
return context.mode == 'OBJECT' and \
len(context.selected_editable_objects) > 0
def execute(self, context):
if self.method == 'RED':
meth = get_red
elif self.method == 'GRE':
meth = get_green
elif self.method == 'BLU':
meth = get_blue
else:
meth = avg_rgb
for o in context.selected_editable_objects:
if o.type != 'MESH':
continue
cols = o.data.vertex_colors.active
if not cols:
continue
# Get colors of all mesh loops
# These loops don't always match the vertex count and
# are not stored at the correct vertex indices.
cs = get_vecs(cols.data, attr='color', vecsize=4)
# For every loop, get its vertex index
lops = o.data.loops
vindcs = get_scalars(lops, attr='vertex_index', dtype=np.int)
# Find the indices of 'vindcs' at which a unique entry is
# found for the first time.
_, i_vindcs = np.unique(vindcs, return_index=True)
# This index list 'i_vindcs' filters out all redundant
# entries in 'cs' and sorts them so each color lands at
# the index of its corresponding vertex.
# Then calculate the (unique) weights of the colors via
# the chosen method.
weights = meth(cs[i_vindcs])
u_weights = np.unique(weights)
vg = o.vertex_groups.new(name=cols.name)
for w in u_weights:
# Get the indices of one weight value and add it to
# the vertex group.
indcs = np.where(weights == w)[0]
vg.add(indcs.tolist(), w, 'REPLACE')
return {'FINISHED'}
# This is an example calculation of the above execute function.
# cs =
# [[0,0,0],
# [1,0,0],
# [0,1,0],
# [1,0,0],
# [1,1,0]]
# vindcs = [1, 3, 2, 3, 0]
# _, i_vindcs = [0, 1, 2, 3] [4, 0, 2, 1]
# cs[i_vindcs] =
# [[1,1,0],
# [0,0,0],
# [0,1,0],
# [1,0,0]]
# weights = [.6, 0, .3, .3]
# u_weights = [0, .3, .6]
# for 0 in u_weights:
# indcs = [1]
# vg.add(indcs, 0)
# for .3 in u_weights:
# indcs = [2, 3]
# vg.add(indcs, .3)
# for .6 in u_weights:
# indcs = [0]
# vg.add(indcs, .6)
| [
"numpy.unique",
"smorgasbord.common.io.get_scalars",
"numpy.average",
"numpy.where",
"bpy.props.EnumProperty",
"smorgasbord.common.io.get_vecs"
] | [((363, 394), 'numpy.average', 'np.average', (['arr[:, :-1]'], {'axis': '(1)'}), '(arr[:, :-1], axis=1)\n', (373, 394), True, 'import numpy as np\n'), ((825, 1132), 'bpy.props.EnumProperty', 'bpy.props.EnumProperty', ([], {'name': '"""Method"""', 'description': '"""Method used to calculate scalar weights from rgb colors"""', 'items': "(('AVG', 'Average', 'Average all channels'), ('RED', 'Red',\n 'Pass red channel'), ('GRE', 'Green', 'Pass green channel'), ('BLU',\n 'Blue', 'Pass blue channel'))", 'default': '"""AVG"""'}), "(name='Method', description=\n 'Method used to calculate scalar weights from rgb colors', items=((\n 'AVG', 'Average', 'Average all channels'), ('RED', 'Red',\n 'Pass red channel'), ('GRE', 'Green', 'Pass green channel'), ('BLU',\n 'Blue', 'Pass blue channel')), default='AVG')\n", (847, 1132), False, 'import bpy\n'), ((2017, 2061), 'smorgasbord.common.io.get_vecs', 'get_vecs', (['cols.data'], {'attr': '"""color"""', 'vecsize': '(4)'}), "(cols.data, attr='color', vecsize=4)\n", (2025, 2061), False, 'from smorgasbord.common.io import get_vecs, get_scalars\n'), ((2167, 2219), 'smorgasbord.common.io.get_scalars', 'get_scalars', (['lops'], {'attr': '"""vertex_index"""', 'dtype': 'np.int'}), "(lops, attr='vertex_index', dtype=np.int)\n", (2178, 2219), False, 'from smorgasbord.common.io import get_vecs, get_scalars\n'), ((2357, 2393), 'numpy.unique', 'np.unique', (['vindcs'], {'return_index': '(True)'}), '(vindcs, return_index=True)\n', (2366, 2393), True, 'import numpy as np\n'), ((2749, 2767), 'numpy.unique', 'np.unique', (['weights'], {}), '(weights)\n', (2758, 2767), True, 'import numpy as np\n'), ((2982, 3004), 'numpy.where', 'np.where', (['(weights == w)'], {}), '(weights == w)\n', (2990, 3004), True, 'import numpy as np\n')] |
import torch
import numpy
from deep_signature.nn.datasets import DeepSignatureEuclideanArclengthTupletsOnlineDataset
from deep_signature.nn.datasets import DeepSignatureEquiaffineArclengthTupletsOnlineDataset
from deep_signature.nn.datasets import DeepSignatureAffineArclengthTupletsOnlineDataset
from deep_signature.nn.networks import DeepSignatureArcLengthNet
from deep_signature.nn.losses import ArcLengthLoss
from deep_signature.nn.losses import ArcLengthLoss2
from deep_signature.nn.trainers import ModelTrainer
from common import settings
from common import utils as common_utils
from argparse import ArgumentParser
if __name__ == '__main__':
torch.set_default_dtype(torch.float64)
parser = ArgumentParser()
parser.add_argument("--group", dest="group")
parser.add_argument("--epochs", dest="epochs", default=settings.arclength_default_epochs, type=int)
parser.add_argument("--continue_training", dest="continue_training", default=settings.arclength_default_continue_training, type=bool)
parser.add_argument("--train_buffer_size", dest="train_buffer_size", default=settings.arclength_default_train_buffer_size, type=int)
parser.add_argument("--validation_buffer_size", dest="validation_buffer_size", default=settings.arclength_default_validation_buffer_size, type=int)
parser.add_argument("--train_batch_size", dest="train_batch_size", default=settings.arclength_default_train_batch_size, type=int)
parser.add_argument("--validation_batch_size", dest="validation_batch_size", default=settings.arclength_default_validation_batch_size, type=int)
parser.add_argument("--train_dataset_size", dest="train_dataset_size", default=settings.arclength_default_train_dataset_size, type=int)
parser.add_argument("--validation_dataset_size", dest="validation_dataset_size", default=settings.arclength_default_validation_dataset_size, type=int)
parser.add_argument("--learning_rate", dest="learning_rate", default=settings.arclength_default_learning_rate, type=float)
parser.add_argument("--validation_split", dest="validation_split", default=settings.arclength_default_validation_split, type=float)
parser.add_argument("--supporting_points_count", dest="supporting_points_count", default=settings.arclength_default_supporting_points_count, type=int)
parser.add_argument("--anchor_points_count", dest="anchor_points_count", default=settings.arclength_default_anchor_points_count, type=int)
parser.add_argument("--multimodality", dest="multimodality", default=settings.arclength_default_multimodality, type=int)
parser.add_argument("--min_offset", dest="min_offset", default=settings.arclength_default_min_offset, type=int)
parser.add_argument("--max_offset", dest="max_offset", default=settings.arclength_default_max_offset, type=int)
parser.add_argument("--num_workers_train", dest="num_workers_train", default=settings.arclength_default_num_workers_train, type=int)
parser.add_argument("--num_workers_validation", dest="num_workers_validation", default=settings.arclength_default_num_workers_validation, type=int)
parser.add_argument("--history_size", dest="history_size", default=settings.arclength_default_history_size, type=int)
args = parser.parse_args()
OnlineDataset = None
results_base_dir_path = None
if args.group == 'euclidean':
OnlineDataset = DeepSignatureEuclideanArclengthTupletsOnlineDataset
results_base_dir_path = settings.level_curves_euclidean_arclength_tuplets_results_dir_path
elif args.group == 'equiaffine':
OnlineDataset = DeepSignatureEquiaffineArclengthTupletsOnlineDataset
results_base_dir_path = settings.level_curves_equiaffine_arclength_tuplets_results_dir_path
elif args.group == 'affine':
OnlineDataset = DeepSignatureAffineArclengthTupletsOnlineDataset
results_base_dir_path = settings.level_curves_affine_arclength_tuplets_results_dir_path
train_dataset = OnlineDataset(
dataset_size=args.train_dataset_size,
dir_path=settings.level_curves_dir_path_train,
multimodality=args.multimodality,
replace=True,
buffer_size=args.train_buffer_size,
num_workers=args.num_workers_train,
supporting_points_count=args.supporting_points_count,
min_offset=args.min_offset,
max_offset=args.max_offset,
anchor_points_count=args.anchor_points_count)
validation_dataset = OnlineDataset(
dataset_size=args.validation_dataset_size,
dir_path=settings.level_curves_dir_path_validation,
multimodality=args.multimodality,
replace=False,
buffer_size=args.validation_buffer_size,
num_workers=args.num_workers_validation,
supporting_points_count=args.supporting_points_count,
min_offset=args.min_offset,
max_offset=args.max_offset,
anchor_points_count=args.anchor_points_count)
validation_dataset.start()
validation_dataset.stop()
train_dataset.start()
model = DeepSignatureArcLengthNet(sample_points=args.supporting_points_count, transformation_group_type=args.group).cuda()
print(model)
if args.continue_training:
latest_subdir = common_utils.get_latest_subdirectory(results_base_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
model.load_state_dict(torch.load(results['model_file_path'], map_location=torch.device('cuda')))
optimizer = torch.optim.LBFGS(model.parameters(), lr=args.learning_rate, line_search_fn='strong_wolfe', history_size=args.history_size)
# optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
loss_fn = ArcLengthLoss(anchor_points_count=args.anchor_points_count)
model_trainer = ModelTrainer(model=model, loss_functions=[loss_fn], optimizer=optimizer)
model_trainer.fit(
train_dataset=train_dataset,
validation_dataset=validation_dataset,
epochs=args.epochs,
train_batch_size=args.train_batch_size,
validation_batch_size=args.validation_batch_size,
validation_split=args.validation_split,
results_base_dir_path=results_base_dir_path)
| [
"common.utils.get_latest_subdirectory",
"deep_signature.nn.losses.ArcLengthLoss",
"deep_signature.nn.trainers.ModelTrainer",
"argparse.ArgumentParser",
"deep_signature.nn.networks.DeepSignatureArcLengthNet",
"torch.set_default_dtype",
"numpy.load",
"torch.device"
] | [((656, 694), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (679, 694), False, 'import torch\n'), ((709, 725), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (723, 725), False, 'from argparse import ArgumentParser\n'), ((5678, 5737), 'deep_signature.nn.losses.ArcLengthLoss', 'ArcLengthLoss', ([], {'anchor_points_count': 'args.anchor_points_count'}), '(anchor_points_count=args.anchor_points_count)\n', (5691, 5737), False, 'from deep_signature.nn.losses import ArcLengthLoss\n'), ((5758, 5830), 'deep_signature.nn.trainers.ModelTrainer', 'ModelTrainer', ([], {'model': 'model', 'loss_functions': '[loss_fn]', 'optimizer': 'optimizer'}), '(model=model, loss_functions=[loss_fn], optimizer=optimizer)\n', (5770, 5830), False, 'from deep_signature.nn.trainers import ModelTrainer\n'), ((5197, 5256), 'common.utils.get_latest_subdirectory', 'common_utils.get_latest_subdirectory', (['results_base_dir_path'], {}), '(results_base_dir_path)\n', (5233, 5256), True, 'from common import utils as common_utils\n'), ((5009, 5120), 'deep_signature.nn.networks.DeepSignatureArcLengthNet', 'DeepSignatureArcLengthNet', ([], {'sample_points': 'args.supporting_points_count', 'transformation_group_type': 'args.group'}), '(sample_points=args.supporting_points_count,\n transformation_group_type=args.group)\n', (5034, 5120), False, 'from deep_signature.nn.networks import DeepSignatureArcLengthNet\n'), ((5275, 5336), 'numpy.load', 'numpy.load', (['f"""{latest_subdir}/results.npy"""'], {'allow_pickle': '(True)'}), "(f'{latest_subdir}/results.npy', allow_pickle=True)\n", (5285, 5336), False, 'import numpy\n'), ((5426, 5446), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5438, 5446), False, 'import torch\n')] |
"""Work out the optimum mass for maximum cannonball range"""
import sympy as sym
import numpy as np
import matplotlib.pyplot as plt
import atmosphere
import pycollo
from pycollo.functions import cubic_spline
# state variables
r = sym.Symbol("r") # downrange distance
h = sym.Symbol("h") # height (above sea level?)
v = sym.Symbol("v") # velocity
y = sym.Symbol("y") # velocity angle
# state parameter
radius = sym.Symbol("rad")
#atmospheric density spline
rho = cubic_spline(h,atmosphere.altitudes,atmosphere.rho_data)
#constants
g = 9.81
density = 7870
Cd = 0.5
cannon_energy = 400000
# cannonball parameters
m = 4/3*np.pi*radius**3*density
sa = np.pi*radius**2
#drag
D = 0.5*rho*v**2*sa*Cd
state_equations = {
r: v*sym.cos(y),
h: v*sym.sin(y),
v: -D/m-g*sym.sin(y),
y: -g*sym.cos(y)/v
}
problem = pycollo.OptimalControlProblem("Optimising Cannonball Radius",parameter_variables=[radius])
phase = problem.new_phase("parabola",[r,h,v,y])
phase.state_equations = state_equations
problem.objective_function = -phase.final_state_variables[0]
phase.bounds.initial_time = 0.0
phase.bounds.final_time = [1,3600] # unlikely to take an hour to land
phase.bounds.initial_state_constraints = {
r: 0.0,
h: 0.0,
}
phase.bounds.state_variables = {
r: [0,1e6],
h: [0,np.max(atmosphere.altitudes)],
v: [1,1e6],
y: [-np.pi/2,np.pi/2]
}
phase.bounds.final_state_constraints = {
h: 0,
}
phase.path_constraints = [1/2*m*v**2]
phase.bounds.path_constraints = [[0,cannon_energy]]
problem.bounds.parameter_variables = {radius:[0,10]} # 20 metre diameter cannon ball is unlikely to go very far
problem.guess.parameter_variables = [0.05]
phase.guess.time = [0, 60]
phase.guess.state_variables = [[0, 1000], [0, 0], [1,1], [0,0]]
problem.settings.max_mesh_iterations=5
problem.initialise()
problem.solve()
optimal_radius = problem.solution.parameter[0]
print(f"""Cannonball radius: {optimal_radius} m
Cannonball mass: {m.subs(radius,optimal_radius)} kg
Launch angle: {np.rad2deg(problem.solution.state[0][3][0])} degrees
Maximum range: {problem.solution.state[0][0][-1]} m""")
plt.plot(problem.solution.state[0][0],problem.solution.state[0][1])
plt.ylabel("Altitude")
plt.xlabel("Range")
plt.show() | [
"sympy.sin",
"sympy.Symbol",
"sympy.cos",
"pycollo.functions.cubic_spline",
"matplotlib.pyplot.ylabel",
"pycollo.OptimalControlProblem",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.rad2deg",
"matplotlib.pyplot.show"
] | [((232, 247), 'sympy.Symbol', 'sym.Symbol', (['"""r"""'], {}), "('r')\n", (242, 247), True, 'import sympy as sym\n'), ((273, 288), 'sympy.Symbol', 'sym.Symbol', (['"""h"""'], {}), "('h')\n", (283, 288), True, 'import sympy as sym\n'), ((321, 336), 'sympy.Symbol', 'sym.Symbol', (['"""v"""'], {}), "('v')\n", (331, 336), True, 'import sympy as sym\n'), ((352, 367), 'sympy.Symbol', 'sym.Symbol', (['"""y"""'], {}), "('y')\n", (362, 367), True, 'import sympy as sym\n'), ((412, 429), 'sympy.Symbol', 'sym.Symbol', (['"""rad"""'], {}), "('rad')\n", (422, 429), True, 'import sympy as sym\n'), ((465, 523), 'pycollo.functions.cubic_spline', 'cubic_spline', (['h', 'atmosphere.altitudes', 'atmosphere.rho_data'], {}), '(h, atmosphere.altitudes, atmosphere.rho_data)\n', (477, 523), False, 'from pycollo.functions import cubic_spline\n'), ((819, 914), 'pycollo.OptimalControlProblem', 'pycollo.OptimalControlProblem', (['"""Optimising Cannonball Radius"""'], {'parameter_variables': '[radius]'}), "('Optimising Cannonball Radius',\n parameter_variables=[radius])\n", (848, 914), False, 'import pycollo\n'), ((2100, 2168), 'matplotlib.pyplot.plot', 'plt.plot', (['problem.solution.state[0][0]', 'problem.solution.state[0][1]'], {}), '(problem.solution.state[0][0], problem.solution.state[0][1])\n', (2108, 2168), True, 'import matplotlib.pyplot as plt\n'), ((2168, 2190), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (2178, 2190), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Range"""'], {}), "('Range')\n", (2201, 2210), True, 'import matplotlib.pyplot as plt\n'), ((2211, 2221), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2219, 2221), True, 'import matplotlib.pyplot as plt\n'), ((725, 735), 'sympy.cos', 'sym.cos', (['y'], {}), '(y)\n', (732, 735), True, 'import sympy as sym\n'), ((746, 756), 'sympy.sin', 'sym.sin', (['y'], {}), '(y)\n', (753, 756), True, 'import sympy as sym\n'), ((1289, 1317), 'numpy.max', 'np.max', (['atmosphere.altitudes'], {}), '(atmosphere.altitudes)\n', (1295, 1317), True, 'import numpy as np\n'), ((772, 782), 'sympy.sin', 'sym.sin', (['y'], {}), '(y)\n', (779, 782), True, 'import sympy as sym\n'), ((794, 804), 'sympy.cos', 'sym.cos', (['y'], {}), '(y)\n', (801, 804), True, 'import sympy as sym\n'), ((1998, 2041), 'numpy.rad2deg', 'np.rad2deg', (['problem.solution.state[0][3][0]'], {}), '(problem.solution.state[0][3][0])\n', (2008, 2041), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torchvision import models
from utils import process_image
import json
from torch import nn, optim
from collections import OrderedDict
#loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint['arch'] == "vgg16":
model = models.vgg16(pretrained=True)
elif checkpoint['arch'] == "vgg11":
model = models.vgg11(pretrained=True)
else:
print("please choose either vgg16 or vgg11")
for param in model.parameters():
param.requires_grad = False
epoch = checkpoint['epoch']
dropout = checkpoint['dropout']
hidden_units = checkpoint['hidden_units']
learning_rate = checkpoint['learning_rate']
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(dropout)),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr = learning_rate)
model.load_state_dict(checkpoint['model_state_dict'])
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return model
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
device = torch.device("cuda" if gpu and torch.cuda.is_available() else "cpu")
model.eval()
image = process_image(image_path, gpu)
image = image.unsqueeze_(0)
model = model.to(device)
image = image.to(device)
with torch.no_grad():
output= model.forward(image)
output = output.to(device)
probabilities = torch.exp(output).data
prob = torch.topk(probabilities, topk)[0].tolist()[0] # probabilities
index = torch.topk(probabilities, topk)[1].tolist()[0] # index
idx_to_class = {v: k for k, v in model.class_to_idx.items()}
label = [idx_to_class[idx] for idx in index]
return prob, label
def sanity_check(prob, classes, cat_to_name_path):
with open(cat_to_name_path, 'r') as f:
cat_to_name = json.load(f)
max_index = np.argmax(prob)
max_probability = prob[max_index]
label = classes[max_index]
labels = []
for cl in classes:
labels.append(cat_to_name[cl])
return labels
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.load",
"torch.topk",
"numpy.argmax",
"torch.exp",
"torchvision.models.vgg11",
"utils.process_image",
"torch.cuda.is_available",
"torch.nn.NLLLoss",
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"json.load",
"torch.no_grad",
"torchvision.models.vgg1... | [((266, 286), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (276, 286), False, 'import torch\n'), ((1217, 1229), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (1227, 1229), False, 'from torch import nn, optim\n'), ((1813, 1843), 'utils.process_image', 'process_image', (['image_path', 'gpu'], {}), '(image_path, gpu)\n', (1826, 1843), False, 'from utils import process_image\n'), ((2534, 2549), 'numpy.argmax', 'np.argmax', (['prob'], {}), '(prob)\n', (2543, 2549), True, 'import numpy as np\n'), ((346, 375), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (358, 375), False, 'from torchvision import models\n'), ((1948, 1963), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1961, 1963), False, 'import torch\n'), ((2059, 2076), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (2068, 2076), False, 'import torch\n'), ((2496, 2508), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2505, 2508), False, 'import json\n'), ((433, 462), 'torchvision.models.vgg11', 'models.vgg11', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (445, 462), False, 'from torchvision import models\n'), ((1745, 1770), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1768, 1770), False, 'import torch\n'), ((865, 895), 'torch.nn.Linear', 'nn.Linear', (['(25088)', 'hidden_units'], {}), '(25088, hidden_units)\n', (874, 895), False, 'from torch import nn, optim\n'), ((939, 948), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (946, 948), False, 'from torch import nn, optim\n'), ((995, 1014), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1005, 1014), False, 'from torch import nn, optim\n'), ((1057, 1085), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(102)'], {}), '(hidden_units, 102)\n', (1066, 1085), False, 'from torch import nn, optim\n'), ((1131, 1151), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1144, 1151), False, 'from torch import nn, optim\n'), ((2101, 2132), 'torch.topk', 'torch.topk', (['probabilities', 'topk'], {}), '(probabilities, topk)\n', (2111, 2132), False, 'import torch\n'), ((2176, 2207), 'torch.topk', 'torch.topk', (['probabilities', 'topk'], {}), '(probabilities, topk)\n', (2186, 2207), False, 'import torch\n')] |
#!/usr/bin/env python
# Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import tikzplotlib
import numpy as np
import pandas as pd
from collections import Counter
import argparse
def plot_barchart(prediction, estimators=10, label_type=None, output_tex=None,
output_png=None):
'''
Make a barchart of the top X hyperparameters settings of the ranked
estimators in all cross validation iterations.
Parameters
----------
prediction: filepath, mandatory
Path pointing to the .hdf5 file which was is the output of the
trainclassifier function.
estimators: integer, default 10
Number of hyperparameter settings/estimators used in each cross
validation. The settings are ranked, so when supplying e.g. 10,
the best 10 settings in each cross validation setting will be used.
label_type: string, default None
The name of the label predicted by the estimator. If None,
the first label from the prediction file will be used.
output_tex: filepath, optional
If given, the barchart will be written to this tex file.
output_png: filepath, optional
If given, the barchart will be written to this png file.
Returns
----------
fig: matplotlib figure
The figure in which the barchart is plotted.
'''
# Load input prediction
prediction = pd.read_hdf(prediction)
# Determine for which label we extract the estimator
keys = prediction.keys()
if label_type is None:
label_type = keys[0]
try:
prediction = prediction[label_type]
except KeyError:
# Multiclass reroute
prediction = prediction[keys[0]]
# Extract the parameter settings:
parameters = dict()
for n_crossval, est in enumerate(prediction.classifiers):
for n_setting in range(0, estimators):
# Extract parameter settings of nth estimator
parameters_all = est.cv_results_['params'][n_setting]
# Stack settings in parameters dictionary
for k in parameters_all.keys():
if k not in parameters.keys():
parameters[k] = list()
parameters[k].append(parameters_all[k])
# Count for every parameter how many times a setting occurs
counts = count_parameters(parameters)
# Normalize the values
normalization_factor = len(prediction.classifiers) * estimators
# Make the barplot
fig = plot_bars(counts, normalization_factor)
# Try making it fullscreen
# Save the output
if output_tex is not None:
print(f'Saving barchart to {output_tex}.')
tikzplotlib.save(output_tex)
if output_png is not None:
print(f'Saving barchart to {output_png}.')
fig.savefig(output_png, bbox_inches='tight', pad_inches=0, dpi=500)
def plot_bars(params, normalization_factor=None, figwidth=40, fontsize=30,
spacing=2):
# Fixing random state for reproducibility
np.random.seed(19680801)
# Count how often feature groups are used
ntimes_groups = list()
groups = list()
for key in params.keys():
# Check if parameter is a boolean
if 'True' in params[key].keys() or 'False' in params[key].keys():
if 'True' in params[key].keys():
ntimes_groups.append(params[key]['True'])
groups.append(key)
else:
# Only False
ntimes_groups.append(0)
groups.append(key)
# Normalize the values in order to not make figure to large
if normalization_factor is None:
normalization_factor = max(ntimes_groups)
normalization_factor = float(normalization_factor) # Needed for percentages
ntimes_groups = [x / normalization_factor for x in ntimes_groups]
# Create the figure for the barchart
plt.rcdefaults()
fig, ax = plt.subplots()
fig.set_figwidth(figwidth)
fig.set_figheight(figwidth)
ax.set_xlim(0, 1)
# Determine positions of all the labels
y_pos = np.arange(len(groups) * spacing)
ntimes_groups_plot = list()
groups_plot = list()
num = 0
for i in range(len(groups) * spacing):
if i % spacing == 0:
ntimes_groups_plot.append(ntimes_groups[num])
groups_plot.append(groups[num])
num += 1
else:
# empty entry to fill up spacing
ntimes_groups_plot.append(0.0)
groups_plot.append('')
# Normal features
colors = ['steelblue', 'lightskyblue']
ax.barh(y_pos, ntimes_groups_plot, align='center',
color=colors[0], ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(groups_plot)
ax.tick_params(axis='both', labelsize=fontsize)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Percentage', fontsize=fontsize)
return fig
def count_parameters(parameters):
# Count for every parameter how many times a setting occurs
output = dict()
for setting, values in parameters.items():
output[setting] = dict()
try:
c = Counter(values)
for k, v in zip(c.keys(), c.values()):
output[setting][k] = v
except TypeError:
# Not possible to count parameters, remove
del output[setting]
return output
def paracheck(parameters):
# NOTE: Deprecated
output = dict()
# print parameters
f = parameters['semantic_features']
total = float(len(f))
count_semantic = sum([i == 'True' for i in f])
ratio_semantic = count_semantic/total
print("Semantic: " + str(ratio_semantic))
output['semantic_features'] = ratio_semantic
f = parameters['patient_features']
count_patient = sum([i == 'True' for i in f])
ratio_patient = count_patient/total
print("patient: " + str(ratio_patient))
output['patient_features'] = ratio_patient
f = parameters['orientation_features']
count_orientation = sum([i == 'True' for i in f])
ratio_orientation = count_orientation/total
print("orientation: " + str(ratio_orientation))
output['orientation_features'] = ratio_orientation
f = parameters['histogram_features']
count_histogram = sum([i == 'True' for i in f])
ratio_histogram = count_histogram/total
print("histogram: " + str(ratio_histogram))
output['histogram_features'] = ratio_histogram
f = parameters['shape_features']
count_shape = sum([i == 'True' for i in f])
ratio_shape = count_shape/total
print("shape: " + str(ratio_shape))
output['shape_features'] = ratio_shape
if 'coliage_features' in parameters.keys():
f = parameters['coliage_features']
count_coliage = sum([i == 'True' for i in f])
ratio_coliage = count_coliage/total
print("coliage: " + str(ratio_coliage))
output['coliage_features'] = ratio_coliage
if 'phase_features' in parameters.keys():
f = parameters['phase_features']
count_phase = sum([i == 'True' for i in f])
ratio_phase = count_phase/total
print("phase: " + str(ratio_phase))
output['phase_features'] = ratio_phase
if 'vessel_features' in parameters.keys():
f = parameters['vessel_features']
count_vessel = sum([i == 'True' for i in f])
ratio_vessel = count_vessel/total
print("vessel: " + str(ratio_vessel))
output['vessel_features'] = ratio_vessel
if 'log_features' in parameters.keys():
f = parameters['log_features']
count_log = sum([i == 'True' for i in f])
ratio_log = count_log/total
print("log: " + str(ratio_log))
output['log_features'] = ratio_log
f = parameters['texture_features']
count_texture_all = sum([i == 'True' for i in f])
ratio_texture_all = count_texture_all/total
print("texture_all: " + str(ratio_texture_all))
output['texture_all_features'] = ratio_texture_all
count_texture_no = sum([i == 'False' for i in f])
ratio_texture_no = count_texture_no/total
print("texture_no: " + str(ratio_texture_no))
output['texture_no_features'] = ratio_texture_no
count_texture_Gabor = sum([i == 'Gabor' for i in f])
ratio_texture_Gabor = count_texture_Gabor/total
print("texture_Gabor: " + str(ratio_texture_Gabor))
output['texture_Gabor_features'] = ratio_texture_Gabor
count_texture_LBP = sum([i == 'LBP' for i in f])
ratio_texture_LBP = count_texture_LBP/total
print("texture_LBP: " + str(ratio_texture_LBP))
output['texture_LBP_features'] = ratio_texture_LBP
count_texture_GLCM = sum([i == 'GLCM' for i in f])
ratio_texture_GLCM = count_texture_GLCM/total
print("texture_GLCM: " + str(ratio_texture_GLCM))
output['texture_GLCM_features'] = ratio_texture_GLCM
count_texture_GLRLM = sum([i == 'GLRLM' for i in f])
ratio_texture_GLRLM = count_texture_GLRLM/total
print("texture_GLRLM: " + str(ratio_texture_GLRLM))
output['texture_GLRLM_features'] = ratio_texture_GLRLM
count_texture_GLSZM = sum([i == 'GLSZM' for i in f])
ratio_texture_GLSZM = count_texture_GLSZM/total
print("texture_GLSZM: " + str(ratio_texture_GLSZM))
output['texture_GLSZM_features'] = ratio_texture_GLSZM
count_texture_NGTDM = sum([i == 'NGTDM' for i in f])
ratio_texture_NGTDM = count_texture_NGTDM/total
print("texture_NGTDM: " + str(ratio_texture_NGTDM))
output['texture_NGTDM_features'] = ratio_texture_NGTDM
if 'degree' in parameters.keys():
f = parameters['degree']
print("Polynomial Degree: " + str(np.mean(f)))
output['polynomial_degree'] = np.mean(f)
return output
def main():
parser = argparse.ArgumentParser(description='Plot a Barchart.')
parser.add_argument('-prediction', '--prediction', metavar='prediction',
nargs='+', dest='prediction', type=str, required=True,
help='Prediction file (HDF)')
parser.add_argument('-estimators', '--estimators', metavar='estimator',
nargs='+', dest='estimators', type=str, required=False,
help='Number of estimators to evaluate in each cross validation.')
parser.add_argument('-label_type', '--label_type', metavar='label_type',
nargs='+', dest='label_type', type=str, required=False,
help='Key of the label which was predicted.')
parser.add_argument('-output_tex', '--output_tex', metavar='output_tex',
nargs='+', dest='output_tex', type=str, required=True,
help='Output file path (.tex)')
parser.add_argument('-output_png', '--output_png', metavar='output_png',
nargs='+', dest='output_png', type=str, required=True,
help='Output file path (.png)')
args = parser.parse_args()
# Convert the inputs to the correct format
if type(args.prediction) is list:
args.prediction = ''.join(args.prediction)
if type(args.output) is list:
args.output = ''.join(args.output)
if type(args.estimators) is list:
args.estimators = int(args.estimators[0])
if type(args.label_type) is list:
args.label_type = ''.join(args.label_type)
plot_barchart(prediction=args.prediction,
estimators=args.estimators,
label_type=args.label_type,
output_tex=args.output_tex,
output_png=args.output_png)
if __name__ == '__main__':
main()
| [
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.use",
"collections.Counter",
"numpy.random.seed",
"tikzplotlib.save",
"matplotlib.pyplot.rcdefaults",
"matplotlib.pyplot.subplots",
"pandas.read_hdf"
] | [((737, 758), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (751, 758), False, 'import matplotlib\n'), ((2107, 2130), 'pandas.read_hdf', 'pd.read_hdf', (['prediction'], {}), '(prediction)\n', (2118, 2130), True, 'import pandas as pd\n'), ((3724, 3748), 'numpy.random.seed', 'np.random.seed', (['(19680801)'], {}), '(19680801)\n', (3738, 3748), True, 'import numpy as np\n'), ((4598, 4614), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (4612, 4614), True, 'import matplotlib.pyplot as plt\n'), ((4629, 4643), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4641, 4643), True, 'import matplotlib.pyplot as plt\n'), ((10445, 10500), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot a Barchart."""'}), "(description='Plot a Barchart.')\n", (10468, 10500), False, 'import argparse\n'), ((3382, 3410), 'tikzplotlib.save', 'tikzplotlib.save', (['output_tex'], {}), '(output_tex)\n', (3398, 3410), False, 'import tikzplotlib\n'), ((10388, 10398), 'numpy.mean', 'np.mean', (['f'], {}), '(f)\n', (10395, 10398), True, 'import numpy as np\n'), ((5847, 5862), 'collections.Counter', 'Counter', (['values'], {}), '(values)\n', (5854, 5862), False, 'from collections import Counter\n'), ((10337, 10347), 'numpy.mean', 'np.mean', (['f'], {}), '(f)\n', (10344, 10347), True, 'import numpy as np\n')] |
import eel
import numpy as np
import datetime
def rotate(arr, x, y, z):
cos_z, sin_z, cos_y = np.cos(z), np.sin(z), np.cos(y)
sin_y, cos_x, sin_x = np.sin(y), np.cos(x), np.sin(x)
rot_mat = [[cos_z*cos_y, cos_z*sin_y*sin_x - sin_z*cos_x, cos_z*sin_y*cos_x + sin_z*sin_x],
[sin_z*cos_y, sin_z*sin_y*sin_x + cos_z*cos_x, sin_z*sin_y*cos_x - cos_z*sin_x],
[-sin_y, cos_y*sin_x, cos_y*cos_x ]]
return np.dot(rot_mat, arr).astype(np.float32)
def get_fps(fps, start):
print(fps)
return 0, datetime.datetime.now()
def next_step(b, r, f):
b = rotate(b-offset, r[0]*deg, r[1]*deg, r[2]*deg)+offset
# v = rotate(v, r[0]*deg, r[1]*deg, r[2]*deg)
r = r*0.99 + np.random.normal(0, sigma, 3)*0.01
f += 1
return b, r, f, []
deg = np.pi/1024
offset = 300
scale = 125
box = np.array([[-1, -1, -1, -1, 1, 1, 1, 1],
[-1, -1, 1, 1, -1, -1, 1, 1],
[-1, 1, -1, 1, -1, 1, -1, 1]])*scale+offset
connections = np.array([0,1,0,2,0,4,1,3,1,5,2,3,2,6,3,7,4,5,4,6,5,7,6,7])
v = np.zeros((3,24))
for c in range(len(connections)//2):
v[:,c*2:c*2+2] = box[:, connections[c*2:c*2+2]] # building the lines
sigma = 10
r = np.random.normal(0, sigma, 3)
fps = 0
start = datetime.datetime.now()
coordinates = []
eel.init('app')
eel.start('index.html', size=(620,620), block=False)
while True:
if (datetime.datetime.now() - start).seconds:
fps, start = get_fps(fps, start)
coordinates = v[:2].T.reshape(12,4).tolist()
eel.sleep(0.000001)#0.007)
eel.drawLines(coordinates)
v, r, fps, coordinates = next_step(v, r, fps)
| [
"numpy.random.normal",
"eel.sleep",
"eel.start",
"eel.init",
"numpy.array",
"numpy.zeros",
"datetime.datetime.now",
"numpy.dot",
"numpy.cos",
"numpy.sin",
"eel.drawLines"
] | [((957, 1043), 'numpy.array', 'np.array', (['[0, 1, 0, 2, 0, 4, 1, 3, 1, 5, 2, 3, 2, 6, 3, 7, 4, 5, 4, 6, 5, 7, 6, 7]'], {}), '([0, 1, 0, 2, 0, 4, 1, 3, 1, 5, 2, 3, 2, 6, 3, 7, 4, 5, 4, 6, 5, 7,\n 6, 7])\n', (965, 1043), True, 'import numpy as np\n'), ((1022, 1039), 'numpy.zeros', 'np.zeros', (['(3, 24)'], {}), '((3, 24))\n', (1030, 1039), True, 'import numpy as np\n'), ((1162, 1191), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', '(3)'], {}), '(0, sigma, 3)\n', (1178, 1191), True, 'import numpy as np\n'), ((1208, 1231), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1229, 1231), False, 'import datetime\n'), ((1250, 1265), 'eel.init', 'eel.init', (['"""app"""'], {}), "('app')\n", (1258, 1265), False, 'import eel\n'), ((1266, 1319), 'eel.start', 'eel.start', (['"""index.html"""'], {'size': '(620, 620)', 'block': '(False)'}), "('index.html', size=(620, 620), block=False)\n", (1275, 1319), False, 'import eel\n'), ((1462, 1478), 'eel.sleep', 'eel.sleep', (['(1e-06)'], {}), '(1e-06)\n', (1471, 1478), False, 'import eel\n'), ((1490, 1516), 'eel.drawLines', 'eel.drawLines', (['coordinates'], {}), '(coordinates)\n', (1503, 1516), False, 'import eel\n'), ((96, 105), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (102, 105), True, 'import numpy as np\n'), ((107, 116), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (113, 116), True, 'import numpy as np\n'), ((118, 127), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (124, 127), True, 'import numpy as np\n'), ((151, 160), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (157, 160), True, 'import numpy as np\n'), ((162, 171), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (168, 171), True, 'import numpy as np\n'), ((173, 182), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (179, 182), True, 'import numpy as np\n'), ((545, 568), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (566, 568), False, 'import datetime\n'), ((825, 930), 'numpy.array', 'np.array', (['[[-1, -1, -1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1, 1, 1], [-1, 1, -1, 1,\n -1, 1, -1, 1]]'], {}), '([[-1, -1, -1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1, 1, 1], [-1, \n 1, -1, 1, -1, 1, -1, 1]])\n', (833, 930), True, 'import numpy as np\n'), ((456, 476), 'numpy.dot', 'np.dot', (['rot_mat', 'arr'], {}), '(rot_mat, arr)\n', (462, 476), True, 'import numpy as np\n'), ((713, 742), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', '(3)'], {}), '(0, sigma, 3)\n', (729, 742), True, 'import numpy as np\n'), ((1337, 1360), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1358, 1360), False, 'import datetime\n')] |
import matplotlib.pyplot as plt
import cv2
import numpy as np
def plot_imgs(imgs, titles=None, cmap='brg', ylabel='', normalize=True, ax=None,
r=(0, 1), dpi=100):
n = len(imgs)
if not isinstance(cmap, list):
cmap = [cmap]*n
if ax is None:
_, ax = plt.subplots(1, n, figsize=(6*n, 6), dpi=dpi)
if n == 1:
ax = [ax]
else:
if not isinstance(ax, list):
ax = [ax]
assert len(ax) == len(imgs)
for i in range(n):
if len(imgs[i].shape) == 3:
if imgs[i].shape[-1] == 3:
imgs[i] = imgs[i][..., ::-1] # BGR to RGB
elif imgs[i].shape[-1] == 1:
imgs[i] = imgs[i][..., 0]
if len(imgs[i].shape) == 2 and cmap[i] == 'brg':
cmap[i] = 'gray'
ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmap[i]),
vmin=None if normalize else r[0],
vmax=None if normalize else r[1])
if titles:
ax[i].set_title(titles[i])
ax[i].get_yaxis().set_ticks([])
ax[i].get_xaxis().set_ticks([])
for spine in ax[i].spines.values(): # remove frame
spine.set_visible(False)
ax[0].set_ylabel(ylabel)
plt.tight_layout()
def draw_datches(img1, kp1, img2, kp2, matches, color=None, kp_radius=5,
thickness=2, margin=20):
# Create frame
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]),
img1.shape[1]+img2.shape[1]+margin,
img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (max(img1.shape[0],
img2.shape[0]),
img1.shape[1]+img2.shape[1]+margin)
new_img = np.ones(new_shape, type(img1.flat[0]))*255
# Place original images
new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
new_img[0:img2.shape[0],
img1.shape[1]+margin:img1.shape[1]+img2.shape[1]+margin] = img2
# Draw lines between matches
if color:
c = color
for m in matches:
# Generate random color for RGB/BGR and grayscale images as needed.
if not color:
if len(img1.shape) == 3:
c = np.random.randint(0, 256, 3)
else:
c = np.random.randint(0, 256)
c = (int(c[0]), int(c[1]), int(c[2]))
end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(int)
+ np.array([img1.shape[1]+margin, 0]))
cv2.line(new_img, end1, end2, c, thickness, lineType=cv2.LINE_AA)
cv2.circle(new_img, end1, kp_radius, c, thickness, lineType=cv2.LINE_AA)
cv2.circle(new_img, end2, kp_radius, c, thickness, lineType=cv2.LINE_AA)
return new_img
| [
"cv2.line",
"numpy.array",
"cv2.circle",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.round",
"matplotlib.pyplot.get_cmap"
] | [((1245, 1263), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1261, 1263), True, 'import matplotlib.pyplot as plt\n'), ((291, 338), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'n'], {'figsize': '(6 * n, 6)', 'dpi': 'dpi'}), '(1, n, figsize=(6 * n, 6), dpi=dpi)\n', (303, 338), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2632), 'cv2.line', 'cv2.line', (['new_img', 'end1', 'end2', 'c', 'thickness'], {'lineType': 'cv2.LINE_AA'}), '(new_img, end1, end2, c, thickness, lineType=cv2.LINE_AA)\n', (2575, 2632), False, 'import cv2\n'), ((2641, 2713), 'cv2.circle', 'cv2.circle', (['new_img', 'end1', 'kp_radius', 'c', 'thickness'], {'lineType': 'cv2.LINE_AA'}), '(new_img, end1, kp_radius, c, thickness, lineType=cv2.LINE_AA)\n', (2651, 2713), False, 'import cv2\n'), ((2722, 2794), 'cv2.circle', 'cv2.circle', (['new_img', 'end2', 'kp_radius', 'c', 'thickness'], {'lineType': 'cv2.LINE_AA'}), '(new_img, end2, kp_radius, c, thickness, lineType=cv2.LINE_AA)\n', (2732, 2794), False, 'import cv2\n'), ((844, 865), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap[i]'], {}), '(cmap[i])\n', (856, 865), True, 'import matplotlib.pyplot as plt\n'), ((2230, 2258), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(3)'], {}), '(0, 256, 3)\n', (2247, 2258), True, 'import numpy as np\n'), ((2297, 2322), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (2314, 2322), True, 'import numpy as np\n'), ((2522, 2559), 'numpy.array', 'np.array', (['[img1.shape[1] + margin, 0]'], {}), '([img1.shape[1] + margin, 0])\n', (2530, 2559), True, 'import numpy as np\n'), ((2395, 2423), 'numpy.round', 'np.round', (['kp1[m.trainIdx].pt'], {}), '(kp1[m.trainIdx].pt)\n', (2403, 2423), True, 'import numpy as np\n'), ((2458, 2486), 'numpy.round', 'np.round', (['kp2[m.queryIdx].pt'], {}), '(kp2[m.queryIdx].pt)\n', (2466, 2486), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import math
from bisect import bisect_right
import torch
class WarmupLrScheduler(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
warmup_iter,
warmup_ratio=5e-4,
warmup='exp',
last_epoch=-1,
):
self.warmup_iter = warmup_iter
self.warmup_ratio = warmup_ratio
self.warmup = warmup
super(WarmupLrScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
ratio = self.get_lr_ratio()
lrs = [ratio * lr for lr in self.base_lrs]
return lrs
def get_lr_ratio(self):
if self.last_epoch < self.warmup_iter:
ratio = self.get_warmup_ratio()
else:
ratio = self.get_main_ratio()
## if you run the model after warming up in the for loop in train.sh, use the policy below
#ratio = self.get_main_ratio()
return ratio
def get_main_ratio(self):
raise NotImplementedError
def get_warmup_ratio(self):
assert self.warmup in ('linear', 'exp')
alpha = self.last_epoch / self.warmup_iter
if self.warmup == 'linear':
ratio = self.warmup_ratio + (1 - self.warmup_ratio) * alpha
elif self.warmup == 'exp':
ratio = self.warmup_ratio ** (1. - alpha)
return ratio
class WarmupPolyLrScheduler(WarmupLrScheduler):
def __init__(
self,
optimizer,
power,
max_iter,
warmup_iter=500,
warmup_ratio=5e-4,
warmup='exp',
last_epoch=-1,
):
self.power = power
self.max_iter = max_iter
super(WarmupPolyLrScheduler, self).__init__(
optimizer, warmup_iter, warmup_ratio, warmup, last_epoch)
def get_main_ratio(self):
real_iter = self.last_epoch - self.warmup_iter
#print('real iter',real_iter)
real_max_iter = self.max_iter - self.warmup_iter
alpha = real_iter / real_max_iter
ratio = (1 - alpha) ** self.power
return ratio
class WarmupExpLrScheduler(WarmupLrScheduler):
def __init__(
self,
optimizer,
gamma,
interval=1,
warmup_iter=500,
warmup_ratio=5e-4,
warmup='exp',
last_epoch=-1,
):
self.gamma = gamma
self.interval = interval
super(WarmupExpLrScheduler, self).__init__(
optimizer, warmup_iter, warmup_ratio, warmup, last_epoch)
def get_main_ratio(self):
real_iter = self.last_epoch - self.warmup_iter
ratio = self.gamma ** (real_iter // self.interval)
return ratio
class WarmupCosineLrScheduler(WarmupLrScheduler):
def __init__(
self,
optimizer,
max_iter,
eta_ratio=0,
warmup_iter=500,
warmup_ratio=5e-4,
warmup='exp',
last_epoch=-1,
):
self.eta_ratio = eta_ratio
self.max_iter = max_iter
super(WarmupCosineLrScheduler, self).__init__(
optimizer, warmup_iter, warmup_ratio, warmup, last_epoch)
def get_main_ratio(self):
real_iter = self.last_epoch - self.warmup_iter
real_max_iter = self.max_iter - self.warmup_iter
return self.eta_ratio + (1 - self.eta_ratio) * (
1 + math.cos(math.pi * self.last_epoch / real_max_iter)) / 2
class WarmupStepLrScheduler(WarmupLrScheduler):
def __init__(
self,
optimizer,
milestones: list,
gamma=0.1,
warmup_iter=500,
warmup_ratio=5e-4,
warmup='exp',
last_epoch=-1,
):
self.milestones = milestones
self.gamma = gamma
super(WarmupStepLrScheduler, self).__init__(
optimizer, warmup_iter, warmup_ratio, warmup, last_epoch)
def get_main_ratio(self):
real_iter = self.last_epoch - self.warmup_iter
ratio = self.gamma ** bisect_right(self.milestones, real_iter)
return ratio
if __name__ == "__main__":
model = torch.nn.Conv2d(3, 16, 3, 1, 1)
optim = torch.optim.SGD(model.parameters(), lr=1e-3)
max_iter = 20000
lr_scheduler = WarmupPolyLrScheduler(optim, 0.9, max_iter, 200, 0.1, 'linear', -1)
lrs = []
for _ in range(max_iter):
lr = lr_scheduler.get_lr()[0]
print(lr)
lrs.append(lr)
lr_scheduler.step()
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
lrs = np.array(lrs)
n_lrs = len(lrs)
plt.plot(np.arange(n_lrs), lrs)
plt.grid()
plt.show()
| [
"matplotlib.pyplot.grid",
"torch.nn.Conv2d",
"math.cos",
"numpy.array",
"bisect.bisect_right",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((4201, 4232), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(3)', '(16)', '(3)', '(1)', '(1)'], {}), '(3, 16, 3, 1, 1)\n', (4216, 4232), False, 'import torch\n'), ((4640, 4653), 'numpy.array', 'np.array', (['lrs'], {}), '(lrs)\n', (4648, 4653), True, 'import numpy as np\n'), ((4715, 4725), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4723, 4725), True, 'import matplotlib.pyplot as plt\n'), ((4730, 4740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4738, 4740), True, 'import matplotlib.pyplot as plt\n'), ((4688, 4704), 'numpy.arange', 'np.arange', (['n_lrs'], {}), '(n_lrs)\n', (4697, 4704), True, 'import numpy as np\n'), ((4098, 4138), 'bisect.bisect_right', 'bisect_right', (['self.milestones', 'real_iter'], {}), '(self.milestones, real_iter)\n', (4110, 4138), False, 'from bisect import bisect_right\n'), ((3455, 3506), 'math.cos', 'math.cos', (['(math.pi * self.last_epoch / real_max_iter)'], {}), '(math.pi * self.last_epoch / real_max_iter)\n', (3463, 3506), False, 'import math\n')] |
import numpy as np
import utils
from dataset_specifications.dataset import Dataset
class ConstNoiseSet(Dataset):
def __init__(self):
super().__init__()
self.name = "const_noise"
self.std_dev = np.sqrt(0.25)
def get_support(self, x):
return (x-2*self.std_dev, x+2*self.std_dev)
def sample(self, n):
xs = np.random.uniform(low=-1., high=1., size=n)
noise = np.random.normal(loc=0., scale=self.std_dev, size=n)
ys = xs + noise
return np.stack((xs, ys), axis=1)
def get_pdf(self, x):
return utils.get_gaussian_pdf(x, self.std_dev)
| [
"numpy.random.normal",
"utils.get_gaussian_pdf",
"numpy.sqrt",
"numpy.stack",
"numpy.random.uniform"
] | [((224, 237), 'numpy.sqrt', 'np.sqrt', (['(0.25)'], {}), '(0.25)\n', (231, 237), True, 'import numpy as np\n'), ((360, 405), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': 'n'}), '(low=-1.0, high=1.0, size=n)\n', (377, 405), True, 'import numpy as np\n'), ((420, 473), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'self.std_dev', 'size': 'n'}), '(loc=0.0, scale=self.std_dev, size=n)\n', (436, 473), True, 'import numpy as np\n'), ((513, 539), 'numpy.stack', 'np.stack', (['(xs, ys)'], {'axis': '(1)'}), '((xs, ys), axis=1)\n', (521, 539), True, 'import numpy as np\n'), ((582, 621), 'utils.get_gaussian_pdf', 'utils.get_gaussian_pdf', (['x', 'self.std_dev'], {}), '(x, self.std_dev)\n', (604, 621), False, 'import utils\n')] |
import os
import sys
import json
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
from robo.solver.hyperband_datasets_size import HyperBand_DataSubsets
from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM
run_id = int(sys.argv[1])
seed = int(sys.argv[2])
rng = np.random.RandomState(seed)
dataset = "surrogate"
f = SurrogateSVM(path="/mhome/kleinaa/experiments/fabolas/dataset/svm_on_mnist_grid", rng=rng)
output_path = "/mhome/kleinaa/experiments/fabolas_journal/results/svm_%s/hyperband_last_seen_incumbent_%d" % (dataset, run_id)
os.makedirs(output_path, exist_ok=True)
eta = 3.
B = -int(np.log(f.s_min)/np.log(3))
print(B)
opt = HyperBand_DataSubsets(f, eta, eta**(-(B-1)), output_path=output_path, rng=rng)
opt.run(int(20 / B * 1.5))
test_error = []
runtime = []
cum_cost = 0
for i, c in enumerate(opt.incumbents):
test_error.append(f.objective_function_test(c)["function_value"])
results = dict()
results["test_error"] = test_error
cum_cost += opt.time_func_eval_incumbent[i]
runtime.append(opt.runtime[i] + cum_cost)
results["runtime"] = runtime
results["run_id"] = run_id
with open(os.path.join(output_path, 'results_%d.json' % run_id), 'w') as fh:
json.dump(results, fh)
| [
"logging.basicConfig",
"os.makedirs",
"json.dump",
"numpy.log",
"os.path.join",
"hpolib.benchmarks.ml.surrogate_svm.SurrogateSVM",
"robo.solver.hyperband_datasets_size.HyperBand_DataSubsets",
"numpy.random.RandomState"
] | [((68, 107), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (87, 107), False, 'import logging\n'), ((298, 325), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (319, 325), True, 'import numpy as np\n'), ((353, 448), 'hpolib.benchmarks.ml.surrogate_svm.SurrogateSVM', 'SurrogateSVM', ([], {'path': '"""/mhome/kleinaa/experiments/fabolas/dataset/svm_on_mnist_grid"""', 'rng': 'rng'}), "(path=\n '/mhome/kleinaa/experiments/fabolas/dataset/svm_on_mnist_grid', rng=rng)\n", (365, 448), False, 'from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM\n'), ((572, 611), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (583, 611), False, 'import os\n'), ((675, 760), 'robo.solver.hyperband_datasets_size.HyperBand_DataSubsets', 'HyperBand_DataSubsets', (['f', 'eta', '(eta ** -(B - 1))'], {'output_path': 'output_path', 'rng': 'rng'}), '(f, eta, eta ** -(B - 1), output_path=output_path, rng=rng\n )\n', (696, 760), False, 'from robo.solver.hyperband_datasets_size import HyperBand_DataSubsets\n'), ((1246, 1268), 'json.dump', 'json.dump', (['results', 'fh'], {}), '(results, fh)\n', (1255, 1268), False, 'import json\n'), ((631, 646), 'numpy.log', 'np.log', (['f.s_min'], {}), '(f.s_min)\n', (637, 646), True, 'import numpy as np\n'), ((647, 656), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (653, 656), True, 'import numpy as np\n'), ((1171, 1224), 'os.path.join', 'os.path.join', (['output_path', "('results_%d.json' % run_id)"], {}), "(output_path, 'results_%d.json' % run_id)\n", (1183, 1224), False, 'import os\n')] |
import numpy as np
from slugnet.activation import ReLU, Softmax
from slugnet.layers import Convolution, Dense, MeanPooling, Flatten
from slugnet.loss import SoftmaxCategoricalCrossEntropy as SCCE
from slugnet.model import Model
from slugnet.optimizers import SGD
from slugnet.data.mnist import get_mnist
X, y = get_mnist()
X = X.reshape((-1, 1, 28, 28)) / 255.0
np.random.seed(100)
X = np.random.permutation(X)[:1000]
np.random.seed(100)
y = np.random.permutation(y)[:1000]
model = Model(lr=0.001, n_epoch=100, batch_size=3, loss=SCCE(),
metrics=['loss', 'accuracy'], optimizer=SGD())
model.add_layer(Convolution(1, (3, 3), inshape=(None, 1, 28, 28)))
model.add_layer(MeanPooling((2, 2)))
model.add_layer(Convolution(2, (4, 4)))
model.add_layer(MeanPooling((2, 2)))
model.add_layer(Flatten())
model.add_layer(Dense(10, activation=Softmax()))
model.fit(X, y)
| [
"slugnet.activation.Softmax",
"slugnet.layers.Convolution",
"slugnet.optimizers.SGD",
"slugnet.layers.Flatten",
"slugnet.data.mnist.get_mnist",
"numpy.random.seed",
"slugnet.loss.SoftmaxCategoricalCrossEntropy",
"slugnet.layers.MeanPooling",
"numpy.random.permutation"
] | [((314, 325), 'slugnet.data.mnist.get_mnist', 'get_mnist', ([], {}), '()\n', (323, 325), False, 'from slugnet.data.mnist import get_mnist\n'), ((365, 384), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (379, 384), True, 'import numpy as np\n'), ((421, 440), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (435, 440), True, 'import numpy as np\n'), ((389, 413), 'numpy.random.permutation', 'np.random.permutation', (['X'], {}), '(X)\n', (410, 413), True, 'import numpy as np\n'), ((445, 469), 'numpy.random.permutation', 'np.random.permutation', (['y'], {}), '(y)\n', (466, 469), True, 'import numpy as np\n'), ((620, 669), 'slugnet.layers.Convolution', 'Convolution', (['(1)', '(3, 3)'], {'inshape': '(None, 1, 28, 28)'}), '(1, (3, 3), inshape=(None, 1, 28, 28))\n', (631, 669), False, 'from slugnet.layers import Convolution, Dense, MeanPooling, Flatten\n'), ((687, 706), 'slugnet.layers.MeanPooling', 'MeanPooling', (['(2, 2)'], {}), '((2, 2))\n', (698, 706), False, 'from slugnet.layers import Convolution, Dense, MeanPooling, Flatten\n'), ((724, 746), 'slugnet.layers.Convolution', 'Convolution', (['(2)', '(4, 4)'], {}), '(2, (4, 4))\n', (735, 746), False, 'from slugnet.layers import Convolution, Dense, MeanPooling, Flatten\n'), ((764, 783), 'slugnet.layers.MeanPooling', 'MeanPooling', (['(2, 2)'], {}), '((2, 2))\n', (775, 783), False, 'from slugnet.layers import Convolution, Dense, MeanPooling, Flatten\n'), ((801, 810), 'slugnet.layers.Flatten', 'Flatten', ([], {}), '()\n', (808, 810), False, 'from slugnet.layers import Convolution, Dense, MeanPooling, Flatten\n'), ((534, 540), 'slugnet.loss.SoftmaxCategoricalCrossEntropy', 'SCCE', ([], {}), '()\n', (538, 540), True, 'from slugnet.loss import SoftmaxCategoricalCrossEntropy as SCCE\n'), ((596, 601), 'slugnet.optimizers.SGD', 'SGD', ([], {}), '()\n', (599, 601), False, 'from slugnet.optimizers import SGD\n'), ((849, 858), 'slugnet.activation.Softmax', 'Softmax', ([], {}), '()\n', (856, 858), False, 'from slugnet.activation import ReLU, Softmax\n')] |
#!/usr/bin/env python
from __future__ import print_function
import MV2
import cdms2
import vcs
import genutil
import glob
import numpy
# import time
import datetime
from genutil import StringConstructor
import os
import pkg_resources
pmp_egg_path = pkg_resources.resource_filename(
pkg_resources.Requirement.parse("pcmdi_metrics"), "share")
def is_dark_color_type(R, G, B, A):
"""figure out if a color is dark or light alpha is ignored"""
# Counting the perceptive luminance - human eye favors green color...
a = 1 - (0.299 * R + 0.587 * G + 0.114 * B) / 100.
return a > .5
class Values(object):
__slots__ = ("show", "array", "text",
"lightcolor", "darkcolor", "format")
def __init__(self, show=False, array=None,
lightcolor="white", darkcolor="black", format="{0:.2f}"):
self.show = show
self.array = array
self.text = vcs.createtext()
self.text.valign = "half"
self.text.halign = "center"
self.lightcolor = lightcolor
self.darkcolor = darkcolor
self.format = format
class Xs(object):
__slots__ = ("x1", "x2")
def __init__(self, x1, x2):
self.x1 = x1
self.x2 = x2
class Ys(object):
__slots__ = ("y1", "y2")
def __init__(self, y1, y2):
self.y1 = y1
self.y2 = y2
class XYs(object):
__slots__ = ("x1", "x2", "y1", "y2")
def __init__(self, x1, x2, y1, y2):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
class Plot_defaults(object):
__slots__ = ["x1", "x2", "y1", "y2", "levels", "colormap",
"fillareacolors", "legend", "_logo",
"xticorientation", "yticorientation",
"parameterorientation", "tictable",
"parametertable", "draw_mesh", "values",
"missing_color", "xtic1", "xtic2", "ytic1", "ytic2",
"time_stamp"]
def getlogo(self):
return self._logo
def setlogo(self, value):
if value is None or isinstance(value, str):
self._logo = vcs.utils.Logo(value)
logo = property(getlogo, setlogo)
def __init__(self):
self.x1 = .12
self.x2 = .84
self.y1 = .17
self.y2 = .8
self.levels = None
self.colormap = None
self.fillareacolors = None
self.legend = XYs(.89, .91, self.y1, self.y2)
# X ticks
self.xticorientation = vcs.createtextorientation()
self.xticorientation.angle = 360 - 90
self.xticorientation.halign = 'right'
self.xticorientation.height = 10
# Y ticks
self.yticorientation = vcs.createtextorientation()
self.yticorientation.angle = 0
self.yticorientation.halign = 'right'
self.yticorientation.height = 10
# Ticks table
self.tictable = vcs.createtexttable()
# parameters text settings
self.parameterorientation = vcs.createtextorientation()
self.parameterorientation.angle = 0
self.parameterorientation.halign = 'center'
self.parameterorientation.height = 20
self.parametertable = vcs.createtexttable()
# values in cell setting
self.values = Values()
# Defaults
self.draw_mesh = 'y'
self.missing_color = 3
self.xtic1 = Ys(None, None)
self.xtic2 = Ys(None, None)
self.ytic1 = Xs(None, None)
self.ytic2 = Xs(None, None)
# Set the logo textorientation
self.logo = None
# Set the time stamp
time_stamp = vcs.createtext()
time_stamp.height = 10
time_stamp.halign = 'center'
time_stamp.path = 'right'
time_stamp.valign = 'half'
time_stamp.x = [0.9]
time_stamp.y = [0.96]
self.time_stamp = time_stamp
class Portrait(object):
__slots_ = [
"verbose", "files_structure",
"exclude", "parameters_list",
"dummies", "auto_dummies", "grouped",
"slaves", "altered", "aliased",
"portrait_types", "PLOT_SETTINGS", "x", "bg"
]
def __init__(self, files_structure=None, exclude=[], **kw):
''' initialize the portrait object, from file structure'''
if "x" in kw:
self.x = kw["x"]
else:
self.x = vcs.init()
scr_file = os.path.join(
pmp_egg_path,
"pmp",
"graphics",
'vcs',
'portraits.scr')
self.x.scriptrun(scr_file)
self.verbose = False # output files looked for to the screen
self.files_structure = files_structure
self.exclude = exclude
# First determine the list of parameters on which we can have a
# portrait
self.parameters_list = []
self.dummies = []
self.auto_dummies = []
self.grouped = []
self.slaves = {}
self.altered = {}
self.aliased = {}
self.portrait_types = {}
self.PLOT_SETTINGS = Plot_defaults()
if files_structure is not None:
sp = files_structure.split('%(')
for s in sp:
i = s.find(')')
if i > -1: # to avoid the leading path
val = s[:i]
if not (
val in self.parameters_list or
val in ['files_structure', 'exclude']):
self.parameters_list.append(s[:i])
self.parameters_list.append('component')
self.parameters_list.append('statistic')
self.parameters_list.append('time_domain')
for p in self.parameters_list:
setattr(self, p, None)
for k in list(kw.keys()):
setattr(self, k, kw[k])
def alter_parameter(
self, parameter=None, x=None, y=None, size=None, color=None):
if parameter is not None:
self.altered[parameter] = {
'x': x,
'y': y,
'size': size,
'color': color}
else:
if color is not None:
self.PLOT_SETTINGS.parametertable.color = color
if size is not None:
self.PLOT_SETTINGS.parameterorientation.size = size
def string_construct(self, nms):
n = nms[0]
if n not in list(self.slaves.keys()):
t1 = [n + ' ' for nn in getattr(self, n)]
t2 = [str(nn) + ' ' for nn in getattr(self, n)]
else:
slavs = self.slaves[n]
nm = ''
for i in slavs:
nm = nm + ' ' + i
t1 = [n + nm + ' ' for nn in getattr(self, n)]
v1 = [res for res in getattr(self, n)]
vals = []
for i in range(len(v1)):
tmp = ''
for a in v1[i]:
if not a == '':
tmp += ' ' + str(a) + ' '
else:
tmp += ' NONE' + ' '
vals.append(tmp)
t2 = [nn for nn in vals]
for n in nms[1:]:
if n not in list(self.slaves.keys()):
t1 = [' ' + t + ' ' + n for t in t1 for nn in getattr(self, n)]
t2 = [
' ' +
t +
' ' +
str(nn) for t in t2 for nn in getattr(
self,
n)]
else:
slavs = self.slaves[n]
nm = ' '
for i in slavs:
nm = ' ' + nm + ' ' + i
t1b = [n + nm for nn in getattr(self, n)]
v1 = [res for res in getattr(self, n)]
vals = []
for i in range(len(v1)):
tmp = ''
for a in v1[i]:
if not a == '':
tmp += ' ' + str(a)
else:
tmp += ' NONE'
vals.append(tmp)
t2b = [nn for nn in vals]
t1 = [t + tb for t in t1 for tb in t1b]
t2 = [t + tb for t in t2 for tb in t2b]
t3 = []
t1 = t1[0]
sp = t1.split()
n = len(sp)
for tmp in t2:
if isinstance(tmp, int):
tmp = str(tmp)
t = []
tt = tmp.split()
for i in range(n):
t.append(self.makestring(sp[i], tt[i]))
t3.append("%%%".join(t))
return t1, t2, t3
def set(self, portrait_type, parameter=None, values=None):
if portrait_type.lower() == 'absolute':
if 'relative' in list(self.portrait_types.keys()):
del(self.portrait_types['relative'])
elif portrait_type.lower() == 'relative':
if not isinstance(parameter, str):
raise 'Parameter must be a string'
if not isinstance(values, (list, tuple)):
raise 'values must be a list or tuple'
self.portrait_types['relative'] = [parameter, values]
elif portrait_type.lower() == 'difference':
if not isinstance(parameter, str):
raise 'Parameter must be a string'
if not isinstance(values, (list, tuple)):
raise 'values must be a list or tuple'
self.portrait_types['difference'] = [parameter, values]
elif portrait_type.lower() in ['mean', 'average']:
if not isinstance(parameter, str):
raise 'Parameter must be a string'
if not isinstance(values, (list, tuple)):
raise 'values must be a list or tuple'
self.portrait_types['mean'] = [parameter, values]
else:
raise RuntimeError(
'Error type:"%s" not supported at this time' %
(portrait_type))
def dummy(self, parameter, which_dummy=''):
''' Sets a parameter as dummy, i.e. all possible values will be used'''
val = getattr(self, which_dummy + 'dummies')
if parameter not in val:
val.append(parameter)
setattr(self, which_dummy + 'dummies', val)
setattr(self, parameter, None)
def group(self, param1, param2):
''' sets 2 multiple values of parameters on the same axis'''
added = 0
for i in range(len(self.grouped)):
g = self.grouped[i]
if param1 in g:
if param2 not in g:
added = 1
self.grouped[i].append(param2)
elif param2 in g:
added = 1
self.grouped[i].append(param1)
if not added:
self.grouped.append([param1, param2])
def slave(self, master, slave):
''' defines a parameter as a slave of a master parameter'''
if master in list(self.slaves.keys()):
v = self.slaves[master]
if slave not in v:
v.append(slave)
self.dummy(slave, which_dummy='auto_')
self.slaves[master] = v
else:
self.slaves[master] = [slave]
self.dummy(slave, which_dummy='auto_')
def alias(self, parameter, values):
if isinstance(values, dict):
self.aliased[parameter] = values
else:
oldvalue = getattr(self, parameter)
if parameter in list(self.slaves.keys()):
ov = []
for n in oldvalue:
ov.append(n[0])
oldvalue = ov
n = len(oldvalue)
if len(values) != n:
raise 'Error aliasing ' + parameter + ' you submitted ' + \
str(len(values)) + ' aliases but it should be:' + str(n)
dic = {}
for i in range(n):
dic[oldvalue[i]] = values[i]
self.aliased[parameter] = dic
def makestring(self, parameter, value):
if parameter in list(self.aliased.keys()):
dic = self.aliased[parameter]
if value in list(dic.keys()):
return dic[value]
else:
return value
else:
return value
def makeaxis(self, names, axis_length):
"""
Create the axis with the names, etc.. .for portrait plot
Usage:
makeaxis(self,names,axis_length)
Returns:
a cdms axis
"""
# Now creates the axis names
t1, t2, t3 = self.string_construct(names)
sp1 = t1.split()
axis_names = []
for i in range(len(t2)):
nm = ''
sp2 = t3[i].split('%%%')
for j in range(len(sp2)):
if not sp1[j] in self.dummies and not sp2[j] == 'NONE':
# print sp2,j
if not sp2[j][0] == '_':
nm += ' ' + sp2[j]
else:
nm += ' ' + sp2[j][1:]
axis_names.append(nm)
dic = {}
for i in range(len(axis_names)):
dic[i] = axis_names[i]
y = cdms2.createAxis(list(range(axis_length)))
y.names = repr(dic)
nm = []
for t in sp1:
if t not in self.dummies:
nm.append(t)
nm = "___".join(nm)
y.id = nm
return y
def rank(self, data, axis=0):
if axis not in [0, 1]:
if not isinstance(axis, str):
raise 'Ranking error, axis can only be 1 or 2 or name'
else:
nms = data.getAxisIds()
for i in range(len(nms)):
nm = nms[i]
if axis in nm.split('___'):
axis = i
if axis not in [0, 1]:
raise 'Ranking error, axis can only be 1 or 2 or name'
if data.ndim > 2:
raise "Ranking error, array can only be 2D"
if axis == 1:
data = MV2.transpose(data)
a0 = MV2.argsort(data.filled(1.E20), axis=0)
n = a0.shape[0]
b = MV2.zeros(a0.shape, MV2.float)
sh = a0[1].shape
for i in range(n):
Indx = MV2.ones(sh) * i
c = MV2.array(a0[i].filled(n - 1))
b = genutil.arrayindexing.set(b, c, Indx)
m = data.mask
if m is not None:
b = MV2.masked_where(m, b)
else:
b = MV2.array(b)
n = MV2.count(b, 0)
n.setAxis(0, b.getAxis(1))
b, n = genutil.grower(b, n)
b = 100. * b / (n - 1)
b.setAxisList(data.getAxisList())
if axis == 1:
b = MV2.transpose(b)
data = MV2.transpose(data)
return b
def rank_nD(self, data, axis=0):
if axis not in [0, 1]:
if not isinstance(axis, str):
raise 'Ranking error, axis can only be 1 or 2 or name'
else:
nms = data.getAxisIds()
for i in range(len(nms)):
nm = nms[i]
if axis in nm.split('___'):
axis = i
if axis not in [0, 1]:
raise 'Ranking error, axis can only be 1 or 2 or name'
if axis != 0:
data = data(order=(str(axis) + '...'))
a0 = MV2.argsort(data.filled(1.E20), axis=0)
n = a0.shape[0]
b = MV2.zeros(a0.shape, MV2.float)
sh = a0[1].shape
for i in range(n):
Indx = MV2.ones(sh) * i
c = MV2.array(a0[i].filled(n - 1))
b = genutil.arrayindexing.set(b, c, Indx)
m = data.mask
if m is not None:
b = MV2.masked_where(m, b)
else:
b = MV2.array(b)
n = MV2.count(b, 0)
n.setAxisList(b.getAxisList()[1:])
b, n = genutil.grower(b, n)
b = 100. * b / (n - 1)
b.setAxisList(data.getAxisList())
if axis != 0:
st = ''
for i in range(axis):
st += str(i + 1)
st += '0...'
data = data(order=st)
b = b(order=st)
return b
def get(self):
if 'difference' in list(self.portrait_types.keys()):
d = self.portrait_types['difference']
setattr(self, d[0], d[1][0])
a1 = self._get()
setattr(self, d[0], d[1][1])
a2 = self._get()
return a1 - a2
elif 'mean' in list(self.portrait_types.keys()):
d = self.portrait_types['mean']
setattr(self, d[0], d[1][0])
# This picked up by flake8
# probably needs double check
# used to be +=
tmp = self._get()
for v in d[1][1:]:
setattr(self, d[0], v)
tmp += self._get()
return tmp / len(d[1])
else:
return self._get()
def _get(self):
if 'relative' in list(self.portrait_types.keys()):
d = self.portrait_types['relative']
vals = d[1]
real_value = getattr(self, d[0])
real = self.__get()
setattr(self, d[0], vals[0])
a0 = self.__get()
sh = list(a0.shape)
sh.insert(0, 1)
a0 = MV2.reshape(a0, sh)
for v in vals[1:]:
setattr(self, d[0], v)
tmp = self.__get()
tmp = MV2.reshape(tmp, sh)
a0 = MV2.concatenate((a0, tmp))
a0 = MV2.sort(a0, 0).filled()
real2 = real.filled()
a0 = MV2.reshape(a0, (a0.shape[0], sh[1] * sh[2]))
real2 = MV2.reshape(real2, (sh[1] * sh[2],))
a0 = MV2.transpose(a0)
indices = []
for i in range(len(real2)):
indices.append(MV2.searchsorted(a0[i], real2[i]))
indices = MV2.array(indices)
indices = MV2.reshape(indices, (sh[1], sh[2]))
if not ((real.mask is None) or (real.mask is MV2.nomask)):
indices = MV2.masked_where(real.mask, indices)
a = MV2.masked_equal(a0, 1.e20)
a = MV2.count(a, 1)
a = MV2.reshape(a, indices.shape)
indices = indices / a * 100
setattr(self, d[0], real_value)
indices.setAxisList(real.getAxisList())
# print indices.shape
return indices
else:
return self.__get()
def __get(self):
nfree = 0
names = []
for p in self.parameters_list:
if p not in self.dummies and p not in self.auto_dummies:
v = getattr(self, p)
if v is None \
or \
(isinstance(v, (list, tuple)) and len(v) > 1):
already = 0
for pn in names:
if p == pn:
already = 1
elif isinstance(pn, list):
if p in pn:
already = 1
if already == 0:
nfree += 1
added = 0
for g in self.grouped:
if p in g:
names.append(g)
added = 1
if added == 0:
names.append(p)
if nfree != 2:
raise 'Error MUST end up with 2 multiple values ! (we have ' + str(
nfree) + ':' + str(names) + ')'
# Now determines length of each axis
axes_length = [1, 1]
# First make sure with have 2 list of parameters
for i in range(2):
if not isinstance(names[i], list):
names[i] = [names[i]]
for n in names[i]:
v = getattr(self, n)
if v is None:
if n == 'component':
axes_length[i] *= 28
elif n == 'time_domain':
axes_length[i] *= 19
else:
raise 'Error, ' + n + \
' is not defined correctly, please' + \
' specify which values you wish to extract'
else:
axes_length[i] *= len(v)
# Creates the dummy array
output = MV2.ones((axes_length[0], axes_length[1]))
# Now mask everywhere
output = MV2.masked_equal(output, 1)
# Indices for filling
i = 0
j = 0
# First creates the filler object and sets all the fixed values !
F = StringConstructor(self.files_structure)
# Ok let's fill it
for p in self.parameters_list:
if p not in self.dummies and p not in self.auto_dummies:
v = getattr(self, p)
if isinstance(v, (list, tuple)):
if len(v) == 1:
v = v[0]
if p in list(self.slaves.keys()):
# vslvs = v[1:]
v = v[0]
setattr(F, p, v)
if p in list(self.slaves.keys()):
slvs = self.slaves[p]
for js in range(len(slvs)):
s = slvs[js]
setattr(F, s, slvs[js])
else:
setattr(F, p, '*')
else:
if p in list(self.slaves.keys()):
# vslvs = v[1:]
v = v[0]
setattr(F, p, v)
if p in list(self.slaves.keys()):
slvs = self.slaves[p]
for js in range(len(slvs)):
s = slvs[js]
setattr(F, s, slvs[js])
else:
setattr(F, p, '*')
# fnms=F()
nms = names[0] + names[1]
t1, t2, t3 = self.string_construct(nms)
output = output.ravel()
sp1 = t1.split()
n = len(sp1)
for i in range(len(t2)):
sp2 = t2[i].split()
for j in range(n):
v = sp2[j]
if sp1[j] == 'time_domain':
try:
v = int(v)
except Exception:
pass
if v == 'NONE':
v = ''
setattr(F, sp1[j], v)
# print 'Search string is:',fnms
# f=os.popen('ls '+F()).readlines()
# ip,op,ep=os.popen3('ls '+F())
if self.verbose:
print('command line:', F())
# f=op.readlines()
f = glob.glob(F())
# print 'F is:',f
files = []
for file in f:
files.append(file)
for e in self.exclude:
if file.find(e) > -1:
files.pop(-1)
break
if self.verbose:
print('files:', files)
try:
# now we get the one value needed in this file
f = cdms2.open(files[0])
V = f[F.statistic]
component = F.component
time_domain = F.time_domain
if isinstance(component, str):
dic = eval(f.components)
for k in list(dic.keys()):
if dic[k] == F.component:
component = k
if isinstance(F.time_domain, str):
dic = eval(f.time_domain)
for k in list(dic.keys()):
if dic[k] == F.time_domain:
time_domain = k
value = V(
time_domain=time_domain,
component=component,
squeeze=1)
output[i] = value
# In case sometihng goes wrong (like modle not processed or
# inexsitant for this var, etc...)
f.close()
except Exception:
pass
output = MV2.reshape(output, (axes_length[0], axes_length[1]))
output.id = 'portrait plot'
yaxis = self.makeaxis(names[0], axes_length[0])
xaxis = self.makeaxis(names[1], axes_length[1])
output.setAxis(0, yaxis)
output.setAxis(1, xaxis)
# Makes the dim with the most element on the X axis
if axes_length[0] > axes_length[1]:
output = MV2.transpose(output)
return output
def decorate(self, output, ynm, xnm):
x = cdms2.createAxis(list(range(len(xnm))))
y = cdms2.createAxis(list(range(len(ynm))))
try:
del(x.name)
del(y.name)
del(output.name)
except Exception:
pass
nm = '___'.join(xnm)
x.id = nm
dic = {}
for i in range(len(xnm)):
dic[i] = xnm[i]
x.names = repr(dic)
nm = '___'.join(ynm)
y.id = nm
y.original_id = output.getAxis(0,).id
output.setAxis(0, y)
dic = {}
for i in range(len(ynm)):
dic[i] = ynm[i]
y.names = repr(dic)
x.original_id = output.getAxis(1,).id
output.setAxis(1, x)
return
def generateTemplate(self):
template = vcs.createtemplate()
# Now sets all the things for the template...
# Sets a bunch of template attributes to off
for att in [
'line1', 'line2', 'line3', 'line4',
'box2', 'box3', 'box4',
'min', 'max', 'mean',
'xtic1', 'xtic2',
'ytic1', 'ytic2',
'xvalue', 'yvalue', 'zvalue', 'tvalue',
'xunits', 'yunits', 'zunits', 'tunits',
'source', 'title', 'dataname',
]:
a = getattr(template, att)
setattr(a, 'priority', 0)
for att in [
'xname', 'yname',
]:
a = getattr(template, att)
setattr(a, 'priority', 0)
template.data.x1 = self.PLOT_SETTINGS.x1
template.data.x2 = self.PLOT_SETTINGS.x2
template.data.y1 = self.PLOT_SETTINGS.y1
template.data.y2 = self.PLOT_SETTINGS.y2
template.box1.x1 = self.PLOT_SETTINGS.x1
template.box1.x2 = self.PLOT_SETTINGS.x2
template.box1.y1 = self.PLOT_SETTINGS.y1
template.box1.y2 = self.PLOT_SETTINGS.y2
template.xname.y = self.PLOT_SETTINGS.y2 + .02
template.yname.x = self.PLOT_SETTINGS.x2 + .01
template.xlabel1.y = self.PLOT_SETTINGS.y1
template.xlabel2.y = self.PLOT_SETTINGS.y2
template.xlabel1.texttable = self.PLOT_SETTINGS.tictable
template.xlabel2.texttable = self.PLOT_SETTINGS.tictable
template.xlabel1.textorientation = \
self.PLOT_SETTINGS.xticorientation
template.xlabel2.textorientation = \
self.PLOT_SETTINGS.xticorientation
template.ylabel1.x = self.PLOT_SETTINGS.x1
template.ylabel2.x = self.PLOT_SETTINGS.x2
template.ylabel1.texttable = self.PLOT_SETTINGS.tictable
template.ylabel2.texttable = self.PLOT_SETTINGS.tictable
template.ylabel1.textorientation = \
self.PLOT_SETTINGS.yticorientation
template.ylabel2.textorientation = \
self.PLOT_SETTINGS.yticorientation
if self.PLOT_SETTINGS.xtic1.y1 is not None:
template.xtic1.y1 = self.PLOT_SETTINGS.xtic1.y1
template.xtic1.priority = 1
if self.PLOT_SETTINGS.xtic1.y2 is not None:
template.xtic1.y2 = self.PLOT_SETTINGS.xtic1.y2
template.xtic1.priority = 1
if self.PLOT_SETTINGS.xtic2.y1 is not None:
template.xmintic2.y1 = self.PLOT_SETTINGS.xtic2.y1
template.xmintic2.priority = 1
if self.PLOT_SETTINGS.xtic2.y2 is not None:
template.xmintic2.y2 = self.PLOT_SETTINGS.xtic2.y2
template.xmintic2.priority = 1
if self.PLOT_SETTINGS.ytic1.x1 is not None:
template.ytic1.x1 = self.PLOT_SETTINGS.ytic1.x1
template.ytic1.priority = 1
if self.PLOT_SETTINGS.ytic1.x2 is not None:
template.ytic1.x2 = self.PLOT_SETTINGS.ytic1.x2
template.ytic1.priority = 1
if self.PLOT_SETTINGS.ytic2.x1 is not None:
template.ymintic2.priority = 1
template.ymintic2.x1 = self.PLOT_SETTINGS.ytic2.x1
if self.PLOT_SETTINGS.ytic2.x2 is not None:
template.ymintic2.priority = 1
template.ymintic2.x2 = self.PLOT_SETTINGS.ytic2.x2
template.legend.x1 = self.PLOT_SETTINGS.legend.x1
template.legend.x2 = self.PLOT_SETTINGS.legend.x2
template.legend.y1 = self.PLOT_SETTINGS.legend.y1
template.legend.y2 = self.PLOT_SETTINGS.legend.y2
try:
tmp = vcs.createtextorientation('crap22')
except Exception:
tmp = vcs.gettextorientation('crap22')
tmp.height = 12
# tmp.halign = 'center'
# template.legend.texttable = tmp
template.legend.textorientation = tmp
return template
def _repr_png_(self):
import tempfile
tmp = tempfile.mktemp() + ".png"
self.x.png(tmp)
f = open(tmp, "rb")
st = f.read()
f.close()
return st
def plot(self, data=None, mesh=None, template=None,
meshfill=None, x=None, bg=0, multiple=1.1):
self.bg = bg
# Create the vcs canvas
if x is not None:
self.x = x
# Continents bug
# x.setcontinentstype(0)
# gets the thing to plot !
if data is None:
data = self.get()
# Do we use a predefined template ?
if template is None:
template = self.generateTemplate()
else:
if isinstance(template, vcs.template.P):
tid = template.name
elif isinstance(template, str):
tid = template
else:
raise 'Error cannot understand what you mean by template=' + \
str(template)
template = vcs.createtemplate(source=tid)
# Do we use a predefined meshfill ?
if meshfill is None:
mtics = {}
for i in range(100):
mtics[i - .5] = ''
meshfill = vcs.createmeshfill()
meshfill.xticlabels1 = eval(data.getAxis(1).names)
meshfill.yticlabels1 = eval(data.getAxis(0).names)
meshfill.datawc_x1 = -.5
meshfill.datawc_x2 = data.shape[1] - .5
meshfill.datawc_y1 = -.5
meshfill.datawc_y2 = data.shape[0] - .5
meshfill.mesh = self.PLOT_SETTINGS.draw_mesh
meshfill.missing = self.PLOT_SETTINGS.missing_color
meshfill.xticlabels2 = meshfill.xticlabels1
meshfill.yticlabels2 = meshfill.yticlabels1
meshfill.xmtics2 = mtics
meshfill.ymtics2 = mtics
if self.PLOT_SETTINGS.colormap is None:
self.set_colormap()
elif self.x.getcolormapname() != self.PLOT_SETTINGS.colormap:
self.x.setcolormap(self.PLOT_SETTINGS.colormap)
if self.PLOT_SETTINGS.levels is None:
min, max = vcs.minmax(data)
if max != 0:
max = max + .000001
levs = vcs.mkscale(min, max)
else:
levs = self.PLOT_SETTINGS.levels
if len(levs) > 1:
meshfill.levels = levs
if self.PLOT_SETTINGS.fillareacolors is None:
if self.PLOT_SETTINGS.colormap is None:
# Default colormap only use range 16->40
cols = vcs.getcolors(
levs, list(range(144, 156)), split=1)
else:
cols = vcs.getcolors(levs, split=1)
meshfill.fillareacolors = cols
else:
meshfill.fillareacolors = self.PLOT_SETTINGS.fillareacolors
# Now creates the mesh associated
n = int(multiple)
ntot = int((multiple - n) * 10 + .1)
sh = list(data.shape)
sh.append(2)
Indx = MV2.indices((sh[0], sh[1]))
Y = Indx[0]
X = Indx[1]
if ntot == 1:
sh.append(4)
M = MV2.zeros(sh)
M[:, :, 0, 0] = Y - .5
M[:, :, 1, 0] = X - .5
M[:, :, 0, 1] = Y - .5
M[:, :, 1, 1] = X + .5
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X + .5
M[:, :, 0, 3] = Y + .5
M[:, :, 1, 3] = X - .5
M = MV2.reshape(M, (sh[0] * sh[1], 2, 4))
elif ntot == 2:
sh.append(3)
M = MV2.zeros(sh)
M[:, :, 0, 0] = Y - .5
M[:, :, 1, 0] = X - .5
M[:, :, 0, 1] = Y + .5 - (n - 1)
M[:, :, 1, 1] = X - 0.5 + (n - 1)
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X + .5
M = MV2.reshape(M, (sh[0] * sh[1], 2, 3))
elif ntot == 3:
design = int((multiple - n) * 100 + .1)
if design == 33:
sh.append(3)
M = MV2.zeros(sh)
if n == 1:
M[:, :, 0, 0] = Y - .5
M[:, :, 1, 0] = X - .5
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X - .5
elif n == 2:
M[:, :, 0, 0] = Y - .5
M[:, :, 1, 0] = X - .5
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y - .5
M[:, :, 1, 2] = X + .5
elif n == 3:
M[:, :, 0, 0] = Y + .5
M[:, :, 1, 0] = X + .5
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y - .5
M[:, :, 1, 2] = X + .5
M = MV2.reshape(M, (sh[0] * sh[1], 2, 3))
elif design == 32:
sh.append(5)
M = MV2.zeros(sh)
M[:, :, 0, 0] = Y
M[:, :, 1, 0] = X
d = .5 / MV2.sqrt(3.)
if n == 1:
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X - .5
M[:, :, 0, 3] = Y - d
M[:, :, 1, 3] = X - .5
# dummy point for n==1 or 3
M[:, :, 0, 4] = Y
M[:, :, 1, 4] = X
if n == 2:
M[:, :, 0, 1] = Y - d
M[:, :, 1, 1] = X - .5
M[:, :, 0, 2] = Y - .5
M[:, :, 1, 2] = X - .5
M[:, :, 0, 3] = Y - .5
M[:, :, 1, 3] = X + .5
M[:, :, 0, 4] = Y - d
M[:, :, 1, 4] = X + .5
elif n == 3:
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X + .5
M[:, :, 0, 3] = Y - d
M[:, :, 1, 3] = X + .5
# dummy point for n==1 or 3
M[:, :, 0, 4] = Y
M[:, :, 1, 4] = X
M = MV2.reshape(M, (sh[0] * sh[1], 2, 5))
else:
sh.append(5)
M = MV2.zeros(sh)
M[:, :, 0, 0] = Y
M[:, :, 1, 0] = X
d = 1. / 3.
if n == 1:
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X - .5
M[:, :, 0, 3] = Y - d
M[:, :, 1, 3] = X - .5
# dummy point for n==1 or 3
M[:, :, 0, 4] = Y
M[:, :, 1, 4] = X
if n == 2:
M[:, :, 0, 1] = Y - d
M[:, :, 1, 1] = X - .5
M[:, :, 0, 2] = Y - .5
M[:, :, 1, 2] = X - .5
M[:, :, 0, 3] = Y - .5
M[:, :, 1, 3] = X + .5
M[:, :, 0, 4] = Y - d
M[:, :, 1, 4] = X + .5
elif n == 3:
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X + .5
M[:, :, 0, 3] = Y - d
M[:, :, 1, 3] = X + .5
# dummy point for n==1 or 3
M[:, :, 0, 4] = Y
M[:, :, 1, 4] = X
M = MV2.reshape(M, (sh[0] * sh[1], 2, 5))
elif ntot == 4:
sh.append(3)
M = MV2.zeros(sh)
M[:, :, 0, 0] = Y
M[:, :, 1, 0] = X
if n == 1:
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X + .5
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X - .5
elif n == 2:
M[:, :, 0, 1] = Y + .5
M[:, :, 1, 1] = X - .5
M[:, :, 0, 2] = Y - .5
M[:, :, 1, 2] = X - .5
elif n == 3:
M[:, :, 0, 1] = Y - .5
M[:, :, 1, 1] = X - .5
M[:, :, 0, 2] = Y - .5
M[:, :, 1, 2] = X + .5
elif n == 4:
M[:, :, 0, 1] = Y - .5
M[:, :, 1, 1] = X + .5
M[:, :, 0, 2] = Y + .5
M[:, :, 1, 2] = X + .5
M = MV2.reshape(M, (sh[0] * sh[1], 2, 3))
else:
raise RuntimeError(
"Portrait plot support only up to 4 subcells at the moment")
else:
if isinstance(meshfill, vcs.meshfill.P):
tid = mesh.id
elif isinstance(meshfill, str):
tid = mesh
else:
raise 'Error cannot understand what you mean by meshfill=' + \
str(meshfill)
meshfill = vcs.createmeshfill(source=tid)
if mesh is None:
mesh = M
raveled = MV2.ravel(data)
self.x.plot(raveled, mesh, template, meshfill,
bg=self.bg, continents=0)
# If required plot values
if self.PLOT_SETTINGS.values.show:
self.draw_values(raveled, mesh, meshfill, template)
# Now prints the rest of the title, etc...
# but only if n==1
if n == 1:
axes_param = []
for a in data.getAxis(0).id.split('___'):
axes_param.append(a)
for a in data.getAxis(1).id.split('___'):
axes_param.append(a)
nparam = 0
for p in self.parameters_list:
if p not in self.dummies and \
p not in self.auto_dummies and \
p not in axes_param:
nparam += 1
if self.verbose:
print('NPARAM:', nparam)
if nparam > 0:
for i in range(nparam):
j = MV2.ceil(float(nparam) / (i + 1.))
if j <= i:
break
npc = i # number of lines
npl = int(j) # number of coulmns
if npc * npl < nparam:
npl += 1
# computes space between each line
dl = (.95 - template.data.y2) / npl
dc = .9 / npc
npci = 0 # counter for columns
npli = 0 # counter for lines
for p in self.parameters_list:
if p not in self.dummies and \
p not in self.auto_dummies and \
p not in axes_param:
txt = self.x.createtext(
None,
self.PLOT_SETTINGS.parametertable.name,
None,
self.PLOT_SETTINGS.parameterorientation.name)
value = getattr(self, p)
if (isinstance(value, (list, tuple)) and
len(value) == 1):
txt.string = p + ':' + \
str(self.makestring(p, value[0]))
display = 1
elif isinstance(value, (str, int, float)):
txt.string = p + ':' + \
str(self.makestring(p, value))
display = 1
else:
display = 0
if display:
# Now figures out where to put these...
txt.x = [(npci) * dc + dc / 2. + .05]
txt.y = [1. - (npli) * dl - dl / 2.]
npci += 1
if npci >= npc:
npci = 0
npli += 1
if p in list(self.altered.keys()):
dic = self.altered[p]
if dic['size'] is not None:
txt.size = dic['size']
if dic['color'] is not None:
txt.color = dic['color']
if dic['x'] is not None:
txt.x = dic['x']
if dic['y'] is not None:
txt.y = dic['y']
self.x.plot(txt, bg=self.bg, continents=0)
if self.PLOT_SETTINGS.time_stamp is not None:
# sp = time.ctime().split()
# sp = sp[:3] + [sp[-1]]
# self.PLOT_SETTINGS.time_stamp.string = ''.join(sp)
sp = "{:v%Y%m%d}".format(datetime.datetime.now())
self.PLOT_SETTINGS.time_stamp.string = sp
self.x.plot(
self.PLOT_SETTINGS.time_stamp,
bg=self.bg,
continents=0)
if self.PLOT_SETTINGS.logo is not None:
self.PLOT_SETTINGS.logo.plot(self.x, bg=self.bg)
return mesh, template, meshfill
def draw_values(self, raveled, mesh, meshfill, template):
# Values to use (data or user passed)
if self.PLOT_SETTINGS.values.array is None:
data = MV2.array(raveled)
else:
data = MV2.ravel(self.PLOT_SETTINGS.values.array)
if isinstance(raveled, numpy.ma.core.MaskedArray):
data.mask = data.mask + raveled.mask
# Now remove masked values
if data.mask is not numpy.ma.nomask: # we have missing
indices = numpy.argwhere(numpy.ma.logical_not(data.mask))
data = data.take(indices).filled(0)[:, 0]
M = mesh.filled()[indices][:, 0]
raveled = raveled.take(indices).filled(0.)[:, 0]
else:
M = mesh.filled()
# Baricenters
xcenters = numpy.average(M[:, 1], axis=-1)
ycenters = numpy.average(M[:, 0], axis=-1)
self.PLOT_SETTINGS.values.text.viewport = [template.data.x1, template.data.x2,
template.data.y1, template.data.y2]
if not numpy.allclose(meshfill.datawc_x1, 1.e20):
self.PLOT_SETTINGS.values.text.worldcoordinate = [meshfill.datawc_x1,
meshfill.datawc_x2,
meshfill.datawc_y1,
meshfill.datawc_y2]
else:
self.PLOT_SETTINGS.values.text.worldcoordinate = [M[:, 1].min(),
M[:, 1].max(),
M[:, 0].min(),
M[:, 0].max()]
self.PLOT_SETTINGS.values.text.string = [
self.PLOT_SETTINGS.values.format.format(value) for value in data]
# Now that we have the formatted values we need get the longest string
lengths = [len(txt) for txt in self.PLOT_SETTINGS.values.text.string]
longest = max(lengths)
index = lengths.index(longest)
tmptxt = vcs.createtext()
tmptxt.string = self.PLOT_SETTINGS.values.text.string[index]
tmptxt.x = xcenters[index]
tmptxt.y = ycenters[index]
smallY = M[index, 0, :].min()
bigY = M[index, 0, :].max()
smallX = M[index, 1, :].min()
bigX = M[index, 1, :].max()
tmptxt.worldcoordinate = self.PLOT_SETTINGS.values.text.worldcoordinate
tmptxt.viewport = self.PLOT_SETTINGS.values.text.viewport
# Now try to shrink until it fits
extent = self.x.gettextextent(tmptxt)[0]
while ((extent[1] - extent[0]) / (bigX - smallX) > 1.01 or
(extent[3] - extent[2]) / (bigY - smallY) > 1.01) and \
tmptxt.height >= 1:
tmptxt.height -= 1
extent = self.x.gettextextent(tmptxt)[0]
self.PLOT_SETTINGS.values.text.height = tmptxt.height
# Finally we need to split into two text objects for dark and light background
# Step 1: figure out each bin color type (dark/light)
colormap = self.x.colormap
if colormap is None:
colormap = vcs._colorMap
cmap = vcs.getcolormap(colormap)
colors = meshfill.fillareacolors
dark_bins = [
is_dark_color_type(
*cmap.getcolorcell(color)) for color in colors]
# Step 2: put values into bin (color where they land)
bins = meshfill.levels[1:-1]
binned = numpy.digitize(raveled, bins)
isdark = [dark_bins[indx] for indx in binned]
tmptxt = vcs.createtext(
Tt_source=self.PLOT_SETTINGS.values.text.Tt_name,
To_source=self.PLOT_SETTINGS.values.text.To_name)
for pick, color in [(numpy.argwhere(isdark), self.PLOT_SETTINGS.values.lightcolor),
(numpy.argwhere(numpy.logical_not(isdark)), self.PLOT_SETTINGS.values.darkcolor)]:
tmptxt.x = xcenters.take(pick)[:, 0].tolist()
tmptxt.y = ycenters.take(pick)[:, 0].tolist()
tmptxt.string = numpy.array(
self.PLOT_SETTINGS.values.text.string).take(pick)[
:, 0].tolist()
tmptxt.color = color
self.x.plot(tmptxt, bg=self.bg, continents=0)
def set_colormap(self):
self.x.setcolormap("bl_rd_12")
| [
"vcs.createtext",
"vcs.createtextorientation",
"numpy.logical_not",
"vcs.createtemplate",
"MV2.array",
"numpy.array",
"pkg_resources.Requirement.parse",
"genutil.arrayindexing.set",
"numpy.ma.logical_not",
"MV2.count",
"MV2.transpose",
"MV2.ones",
"MV2.sqrt",
"MV2.concatenate",
"MV2.mask... | [((287, 335), 'pkg_resources.Requirement.parse', 'pkg_resources.Requirement.parse', (['"""pcmdi_metrics"""'], {}), "('pcmdi_metrics')\n", (318, 335), False, 'import pkg_resources\n'), ((912, 928), 'vcs.createtext', 'vcs.createtext', ([], {}), '()\n', (926, 928), False, 'import vcs\n'), ((2475, 2502), 'vcs.createtextorientation', 'vcs.createtextorientation', ([], {}), '()\n', (2500, 2502), False, 'import vcs\n'), ((2685, 2712), 'vcs.createtextorientation', 'vcs.createtextorientation', ([], {}), '()\n', (2710, 2712), False, 'import vcs\n'), ((2885, 2906), 'vcs.createtexttable', 'vcs.createtexttable', ([], {}), '()\n', (2904, 2906), False, 'import vcs\n'), ((2978, 3005), 'vcs.createtextorientation', 'vcs.createtextorientation', ([], {}), '()\n', (3003, 3005), False, 'import vcs\n'), ((3178, 3199), 'vcs.createtexttable', 'vcs.createtexttable', ([], {}), '()\n', (3197, 3199), False, 'import vcs\n'), ((3601, 3617), 'vcs.createtext', 'vcs.createtext', ([], {}), '()\n', (3615, 3617), False, 'import vcs\n'), ((4363, 4432), 'os.path.join', 'os.path.join', (['pmp_egg_path', '"""pmp"""', '"""graphics"""', '"""vcs"""', '"""portraits.scr"""'], {}), "(pmp_egg_path, 'pmp', 'graphics', 'vcs', 'portraits.scr')\n", (4375, 4432), False, 'import os\n'), ((14131, 14161), 'MV2.zeros', 'MV2.zeros', (['a0.shape', 'MV2.float'], {}), '(a0.shape, MV2.float)\n', (14140, 14161), False, 'import MV2\n'), ((14494, 14509), 'MV2.count', 'MV2.count', (['b', '(0)'], {}), '(b, 0)\n', (14503, 14509), False, 'import MV2\n'), ((14560, 14580), 'genutil.grower', 'genutil.grower', (['b', 'n'], {}), '(b, n)\n', (14574, 14580), False, 'import genutil\n'), ((15437, 15467), 'MV2.zeros', 'MV2.zeros', (['a0.shape', 'MV2.float'], {}), '(a0.shape, MV2.float)\n', (15446, 15467), False, 'import MV2\n'), ((15799, 15814), 'MV2.count', 'MV2.count', (['b', '(0)'], {}), '(b, 0)\n', (15808, 15814), False, 'import MV2\n'), ((15873, 15893), 'genutil.grower', 'genutil.grower', (['b', 'n'], {}), '(b, n)\n', (15887, 15893), False, 'import genutil\n'), ((20467, 20509), 'MV2.ones', 'MV2.ones', (['(axes_length[0], axes_length[1])'], {}), '((axes_length[0], axes_length[1]))\n', (20475, 20509), False, 'import MV2\n'), ((20557, 20584), 'MV2.masked_equal', 'MV2.masked_equal', (['output', '(1)'], {}), '(output, 1)\n', (20573, 20584), False, 'import MV2\n'), ((20729, 20768), 'genutil.StringConstructor', 'StringConstructor', (['self.files_structure'], {}), '(self.files_structure)\n', (20746, 20768), False, 'from genutil import StringConstructor\n'), ((24344, 24397), 'MV2.reshape', 'MV2.reshape', (['output', '(axes_length[0], axes_length[1])'], {}), '(output, (axes_length[0], axes_length[1]))\n', (24355, 24397), False, 'import MV2\n'), ((25592, 25612), 'vcs.createtemplate', 'vcs.createtemplate', ([], {}), '()\n', (25610, 25612), False, 'import vcs\n'), ((39427, 39442), 'MV2.ravel', 'MV2.ravel', (['data'], {}), '(data)\n', (39436, 39442), False, 'import MV2\n'), ((44458, 44489), 'numpy.average', 'numpy.average', (['M[:, 1]'], {'axis': '(-1)'}), '(M[:, 1], axis=-1)\n', (44471, 44489), False, 'import numpy\n'), ((44509, 44540), 'numpy.average', 'numpy.average', (['M[:, 0]'], {'axis': '(-1)'}), '(M[:, 0], axis=-1)\n', (44522, 44540), False, 'import numpy\n'), ((45798, 45814), 'vcs.createtext', 'vcs.createtext', ([], {}), '()\n', (45812, 45814), False, 'import vcs\n'), ((46925, 46950), 'vcs.getcolormap', 'vcs.getcolormap', (['colormap'], {}), '(colormap)\n', (46940, 46950), False, 'import vcs\n'), ((47227, 47256), 'numpy.digitize', 'numpy.digitize', (['raveled', 'bins'], {}), '(raveled, bins)\n', (47241, 47256), False, 'import numpy\n'), ((47328, 47447), 'vcs.createtext', 'vcs.createtext', ([], {'Tt_source': 'self.PLOT_SETTINGS.values.text.Tt_name', 'To_source': 'self.PLOT_SETTINGS.values.text.To_name'}), '(Tt_source=self.PLOT_SETTINGS.values.text.Tt_name, To_source=\n self.PLOT_SETTINGS.values.text.To_name)\n', (47342, 47447), False, 'import vcs\n'), ((2108, 2129), 'vcs.utils.Logo', 'vcs.utils.Logo', (['value'], {}), '(value)\n', (2122, 2129), False, 'import vcs\n'), ((4333, 4343), 'vcs.init', 'vcs.init', ([], {}), '()\n', (4341, 4343), False, 'import vcs\n'), ((14022, 14041), 'MV2.transpose', 'MV2.transpose', (['data'], {}), '(data)\n', (14035, 14041), False, 'import MV2\n'), ((14313, 14350), 'genutil.arrayindexing.set', 'genutil.arrayindexing.set', (['b', 'c', 'Indx'], {}), '(b, c, Indx)\n', (14338, 14350), False, 'import genutil\n'), ((14416, 14438), 'MV2.masked_where', 'MV2.masked_where', (['m', 'b'], {}), '(m, b)\n', (14432, 14438), False, 'import MV2\n'), ((14469, 14481), 'MV2.array', 'MV2.array', (['b'], {}), '(b)\n', (14478, 14481), False, 'import MV2\n'), ((14692, 14708), 'MV2.transpose', 'MV2.transpose', (['b'], {}), '(b)\n', (14705, 14708), False, 'import MV2\n'), ((14728, 14747), 'MV2.transpose', 'MV2.transpose', (['data'], {}), '(data)\n', (14741, 14747), False, 'import MV2\n'), ((15619, 15656), 'genutil.arrayindexing.set', 'genutil.arrayindexing.set', (['b', 'c', 'Indx'], {}), '(b, c, Indx)\n', (15644, 15656), False, 'import genutil\n'), ((15721, 15743), 'MV2.masked_where', 'MV2.masked_where', (['m', 'b'], {}), '(m, b)\n', (15737, 15743), False, 'import MV2\n'), ((15774, 15786), 'MV2.array', 'MV2.array', (['b'], {}), '(b)\n', (15783, 15786), False, 'import MV2\n'), ((17321, 17340), 'MV2.reshape', 'MV2.reshape', (['a0', 'sh'], {}), '(a0, sh)\n', (17332, 17340), False, 'import MV2\n'), ((17630, 17675), 'MV2.reshape', 'MV2.reshape', (['a0', '(a0.shape[0], sh[1] * sh[2])'], {}), '(a0, (a0.shape[0], sh[1] * sh[2]))\n', (17641, 17675), False, 'import MV2\n'), ((17696, 17732), 'MV2.reshape', 'MV2.reshape', (['real2', '(sh[1] * sh[2],)'], {}), '(real2, (sh[1] * sh[2],))\n', (17707, 17732), False, 'import MV2\n'), ((17750, 17767), 'MV2.transpose', 'MV2.transpose', (['a0'], {}), '(a0)\n', (17763, 17767), False, 'import MV2\n'), ((17921, 17939), 'MV2.array', 'MV2.array', (['indices'], {}), '(indices)\n', (17930, 17939), False, 'import MV2\n'), ((17962, 17998), 'MV2.reshape', 'MV2.reshape', (['indices', '(sh[1], sh[2])'], {}), '(indices, (sh[1], sh[2]))\n', (17973, 17998), False, 'import MV2\n'), ((18149, 18176), 'MV2.masked_equal', 'MV2.masked_equal', (['a0', '(1e+20)'], {}), '(a0, 1e+20)\n', (18165, 18176), False, 'import MV2\n'), ((18193, 18208), 'MV2.count', 'MV2.count', (['a', '(1)'], {}), '(a, 1)\n', (18202, 18208), False, 'import MV2\n'), ((18225, 18254), 'MV2.reshape', 'MV2.reshape', (['a', 'indices.shape'], {}), '(a, indices.shape)\n', (18236, 18254), False, 'import MV2\n'), ((24739, 24760), 'MV2.transpose', 'MV2.transpose', (['output'], {}), '(output)\n', (24752, 24760), False, 'import MV2\n'), ((29132, 29167), 'vcs.createtextorientation', 'vcs.createtextorientation', (['"""crap22"""'], {}), "('crap22')\n", (29157, 29167), False, 'import vcs\n'), ((29478, 29495), 'tempfile.mktemp', 'tempfile.mktemp', ([], {}), '()\n', (29493, 29495), False, 'import tempfile\n'), ((30434, 30464), 'vcs.createtemplate', 'vcs.createtemplate', ([], {'source': 'tid'}), '(source=tid)\n', (30452, 30464), False, 'import vcs\n'), ((30653, 30673), 'vcs.createmeshfill', 'vcs.createmeshfill', ([], {}), '()\n', (30671, 30673), False, 'import vcs\n'), ((32600, 32627), 'MV2.indices', 'MV2.indices', (['(sh[0], sh[1])'], {}), '((sh[0], sh[1]))\n', (32611, 32627), False, 'import MV2\n'), ((39330, 39360), 'vcs.createmeshfill', 'vcs.createmeshfill', ([], {'source': 'tid'}), '(source=tid)\n', (39348, 39360), False, 'import vcs\n'), ((43839, 43857), 'MV2.array', 'MV2.array', (['raveled'], {}), '(raveled)\n', (43848, 43857), False, 'import MV2\n'), ((43891, 43933), 'MV2.ravel', 'MV2.ravel', (['self.PLOT_SETTINGS.values.array'], {}), '(self.PLOT_SETTINGS.values.array)\n', (43900, 43933), False, 'import MV2\n'), ((44730, 44771), 'numpy.allclose', 'numpy.allclose', (['meshfill.datawc_x1', '(1e+20)'], {}), '(meshfill.datawc_x1, 1e+20)\n', (44744, 44771), False, 'import numpy\n'), ((14233, 14245), 'MV2.ones', 'MV2.ones', (['sh'], {}), '(sh)\n', (14241, 14245), False, 'import MV2\n'), ((15539, 15551), 'MV2.ones', 'MV2.ones', (['sh'], {}), '(sh)\n', (15547, 15551), False, 'import MV2\n'), ((17468, 17488), 'MV2.reshape', 'MV2.reshape', (['tmp', 'sh'], {}), '(tmp, sh)\n', (17479, 17488), False, 'import MV2\n'), ((17510, 17536), 'MV2.concatenate', 'MV2.concatenate', (['(a0, tmp)'], {}), '((a0, tmp))\n', (17525, 17536), False, 'import MV2\n'), ((18096, 18132), 'MV2.masked_where', 'MV2.masked_where', (['real.mask', 'indices'], {}), '(real.mask, indices)\n', (18112, 18132), False, 'import MV2\n'), ((23334, 23354), 'cdms2.open', 'cdms2.open', (['files[0]'], {}), '(files[0])\n', (23344, 23354), False, 'import cdms2\n'), ((29212, 29244), 'vcs.gettextorientation', 'vcs.gettextorientation', (['"""crap22"""'], {}), "('crap22')\n", (29234, 29244), False, 'import vcs\n'), ((31590, 31606), 'vcs.minmax', 'vcs.minmax', (['data'], {}), '(data)\n', (31600, 31606), False, 'import vcs\n'), ((31699, 31720), 'vcs.mkscale', 'vcs.mkscale', (['min', 'max'], {}), '(min, max)\n', (31710, 31720), False, 'import vcs\n'), ((32752, 32765), 'MV2.zeros', 'MV2.zeros', (['sh'], {}), '(sh)\n', (32761, 32765), False, 'import MV2\n'), ((33098, 33135), 'MV2.reshape', 'MV2.reshape', (['M', '(sh[0] * sh[1], 2, 4)'], {}), '(M, (sh[0] * sh[1], 2, 4))\n', (33109, 33135), False, 'import MV2\n'), ((44179, 44210), 'numpy.ma.logical_not', 'numpy.ma.logical_not', (['data.mask'], {}), '(data.mask)\n', (44199, 44210), False, 'import numpy\n'), ((47497, 47519), 'numpy.argwhere', 'numpy.argwhere', (['isdark'], {}), '(isdark)\n', (47511, 47519), False, 'import numpy\n'), ((17554, 17569), 'MV2.sort', 'MV2.sort', (['a0', '(0)'], {}), '(a0, 0)\n', (17562, 17569), False, 'import MV2\n'), ((17864, 17897), 'MV2.searchsorted', 'MV2.searchsorted', (['a0[i]', 'real2[i]'], {}), '(a0[i], real2[i])\n', (17880, 17897), False, 'import MV2\n'), ((33213, 33226), 'MV2.zeros', 'MV2.zeros', (['sh'], {}), '(sh)\n', (33222, 33226), False, 'import MV2\n'), ((33502, 33539), 'MV2.reshape', 'MV2.reshape', (['M', '(sh[0] * sh[1], 2, 3)'], {}), '(M, (sh[0] * sh[1], 2, 3))\n', (33513, 33539), False, 'import MV2\n'), ((43273, 43296), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (43294, 43296), False, 'import datetime\n'), ((47604, 47629), 'numpy.logical_not', 'numpy.logical_not', (['isdark'], {}), '(isdark)\n', (47621, 47629), False, 'import numpy\n'), ((32214, 32242), 'vcs.getcolors', 'vcs.getcolors', (['levs'], {'split': '(1)'}), '(levs, split=1)\n', (32227, 32242), False, 'import vcs\n'), ((33714, 33727), 'MV2.zeros', 'MV2.zeros', (['sh'], {}), '(sh)\n', (33723, 33727), False, 'import MV2\n'), ((34680, 34717), 'MV2.reshape', 'MV2.reshape', (['M', '(sh[0] * sh[1], 2, 3)'], {}), '(M, (sh[0] * sh[1], 2, 3))\n', (34691, 34717), False, 'import MV2\n'), ((37931, 37944), 'MV2.zeros', 'MV2.zeros', (['sh'], {}), '(sh)\n', (37940, 37944), False, 'import MV2\n'), ((38835, 38872), 'MV2.reshape', 'MV2.reshape', (['M', '(sh[0] * sh[1], 2, 3)'], {}), '(M, (sh[0] * sh[1], 2, 3))\n', (38846, 38872), False, 'import MV2\n'), ((34810, 34823), 'MV2.zeros', 'MV2.zeros', (['sh'], {}), '(sh)\n', (34819, 34823), False, 'import MV2\n'), ((36259, 36296), 'MV2.reshape', 'MV2.reshape', (['M', '(sh[0] * sh[1], 2, 5)'], {}), '(M, (sh[0] * sh[1], 2, 5))\n', (36270, 36296), False, 'import MV2\n'), ((36376, 36389), 'MV2.zeros', 'MV2.zeros', (['sh'], {}), '(sh)\n', (36385, 36389), False, 'import MV2\n'), ((37816, 37853), 'MV2.reshape', 'MV2.reshape', (['M', '(sh[0] * sh[1], 2, 5)'], {}), '(M, (sh[0] * sh[1], 2, 5))\n', (37827, 37853), False, 'import MV2\n'), ((47815, 47865), 'numpy.array', 'numpy.array', (['self.PLOT_SETTINGS.values.text.string'], {}), '(self.PLOT_SETTINGS.values.text.string)\n', (47826, 47865), False, 'import numpy\n'), ((34929, 34942), 'MV2.sqrt', 'MV2.sqrt', (['(3.0)'], {}), '(3.0)\n', (34937, 34942), False, 'import MV2\n')] |
import random
import numpy as np
def divided_training_test(examples_matrix, lbls, train_prec):
concatenated_examples_lbs = np.concatenate((examples_matrix, lbls), axis=1)
np.random.shuffle(concatenated_examples_lbs)
size_of_vector = np.shape(concatenated_examples_lbs)[1]
size_of_matrix = len(concatenated_examples_lbs)
size_of_training = int(size_of_matrix * train_prec)
size_of_cv = int(size_of_matrix * ((1 - train_prec) / 2))
size_of_test = size_of_cv
if (size_of_matrix % 2) == 1:
size_of_test += 1
training = concatenated_examples_lbs[0:size_of_training, :]
validation = concatenated_examples_lbs[size_of_training:size_of_training+size_of_cv, :]
test = concatenated_examples_lbs[size_of_training+size_of_cv:size_of_matrix, :]
training_ex, training_lbls = split_to_ex_lbls(size_of_vector, training)
validation_ex, validation_lbls = split_to_ex_lbls(size_of_vector, validation)
test_ex, test_lbls = split_to_ex_lbls(size_of_vector, test)
return training_ex, training_lbls, validation_ex, validation_lbls, test_ex, test_lbls
def split_to_ex_lbls(size_of_vector, concatened_vec):
return concatened_vec[:, 0:size_of_vector - 1], concatened_vec[:, size_of_vector-1:size_of_vector]
| [
"numpy.shape",
"numpy.concatenate",
"numpy.random.shuffle"
] | [((129, 176), 'numpy.concatenate', 'np.concatenate', (['(examples_matrix, lbls)'], {'axis': '(1)'}), '((examples_matrix, lbls), axis=1)\n', (143, 176), True, 'import numpy as np\n'), ((181, 225), 'numpy.random.shuffle', 'np.random.shuffle', (['concatenated_examples_lbs'], {}), '(concatenated_examples_lbs)\n', (198, 225), True, 'import numpy as np\n'), ((248, 283), 'numpy.shape', 'np.shape', (['concatenated_examples_lbs'], {}), '(concatenated_examples_lbs)\n', (256, 283), True, 'import numpy as np\n')] |
""" Prepare Tests
Script generating and a set of parameters for simulations.
Parameters are saved as set in `parameters/test_set`
To use just run
python test_set
script does not take any command line arguments or flags.
The script is intended to provide a simple way to describe
what experiments to perform.
"""
import pickle
import numpy.random as nr
import numpy as np
from sortedcontainers import SortedSet
test_set = SortedSet([])
all_degrees = SortedSet([])
new_params = False
n_tests = 100
noises = np.linspace(-6, 1, 100)
for polynomial_degree in [5]:
for oversampling in [1, 2, 4, 8]:
for noise_scale in noises[50:]:
test_set.add((polynomial_degree, oversampling, noise_scale))
all_degrees.add(polynomial_degree)
if new_params:
for polynomial_degree in all_degrees:
params = 2 * nr.randn(n_tests, polynomial_degree)
params[:, 0] = 1
np.savetxt("parameters/polynomials{}.csv.".format(polynomial_degree), params, delimiter=",")
with open("parameters/test_set", "wb") as out_file:
pickle.dump(list(test_set), out_file)
| [
"numpy.linspace",
"sortedcontainers.SortedSet",
"numpy.random.randn"
] | [((433, 446), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (442, 446), False, 'from sortedcontainers import SortedSet\n'), ((461, 474), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (470, 474), False, 'from sortedcontainers import SortedSet\n'), ((518, 541), 'numpy.linspace', 'np.linspace', (['(-6)', '(1)', '(100)'], {}), '(-6, 1, 100)\n', (529, 541), True, 'import numpy as np\n'), ((850, 886), 'numpy.random.randn', 'nr.randn', (['n_tests', 'polynomial_degree'], {}), '(n_tests, polynomial_degree)\n', (858, 886), True, 'import numpy.random as nr\n')] |
from __future__ import absolute_import
from ..coordinate import Coordinate
from ..roi import Roi
from .shared_graph_provider import\
SharedGraphProvider, SharedSubGraph
from ..graph import Graph, DiGraph
from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne
from pymongo.errors import BulkWriteError, WriteError
import logging
import numpy as np
import networkx as nx
logger = logging.getLogger(__name__)
class MongoDbGraphProvider(SharedGraphProvider):
'''Provides shared graphs stored in a MongoDB.
Nodes are assumed to have at least an attribute ``id``. If the have a
position attribute (set via argument ``position_attribute``, defaults to
``position``), it will be used for geometric slicing (see ``__getitem__``).
Edges are assumed to have at least attributes ``u``, ``v``.
Arguments:
db_name (``string``):
The name of the MongoDB database.
host (``string``, optional):
The URL of the MongoDB host.
mode (``string``, optional):
One of ``r``, ``r+``, or ``w``. Defaults to ``r+``. ``w`` drops the
node, edge, and meta collections.
directed (``bool``):
True if the graph is directed, false otherwise. If None, attempts
to read value from existing database. If not found, defaults to
false.
nodes_collection (``string``):
edges_collection (``string``):
meta_collection (``string``):
Names of the nodes, edges. and meta collections, should they differ
from ``nodes``, ``edges``, and ``meta``.
endpoint_names (``list`` or ``tuple`` with two elements):
What keys to use for the start and end of an edge. Default is
['u', 'v']
position_attribute (``string`` or list of ``string``s, optional):
The node attribute(s) that contain position information. This will
be used for slicing subgraphs via ``__getitem__``. If a single
string, the attribute is assumed to be an array. If a list, each
entry denotes the position coordinates in order (e.g.,
`position_z`, `position_y`, `position_x`).
'''
def __init__(
self,
db_name,
host=None,
mode='r+',
directed=None,
total_roi=None,
nodes_collection='nodes',
edges_collection='edges',
endpoint_names=None,
meta_collection='meta',
position_attribute='position'):
self.db_name = db_name
self.host = host
self.mode = mode
self.directed = directed
self.total_roi = total_roi
self.nodes_collection_name = nodes_collection
self.edges_collection_name = edges_collection
self.endpoint_names = ['u', 'v'] if endpoint_names is None\
else endpoint_names
self.meta_collection_name = meta_collection
self.client = None
self.database = None
self.nodes = None
self.edges = None
self.meta = None
self.position_attribute = position_attribute
try:
self.__connect()
if mode != 'w':
if self.db_name not in self.client.list_database_names():
logger.warn("Opened with read mode %s, but no db with name"
"%s found in client at %s"
% (mode, self.db_name, self.host))
self.__open_db()
if mode == 'w':
logger.info(
"dropping collections %s, %s, and %s",
self.nodes_collection_name,
self.edges_collection_name,
self.meta_collection_name)
self.__open_collections()
self.nodes.drop()
self.edges.drop()
self.meta.drop()
collection_names = self.database.list_collection_names()
if meta_collection in collection_names:
metadata = self.__get_metadata()
if metadata:
self.__check_metadata(metadata)
else:
self.__set_metadata()
else:
self.__set_metadata()
if nodes_collection not in collection_names:
self.__create_node_collection()
if edges_collection not in collection_names:
self.__create_edge_collection()
except Exception as e:
self.__disconnect()
raise e
def __del__(self):
self.__disconnect()
def read_nodes(self, roi, attr_filter=None, read_attrs=None):
'''Return a list of nodes within roi.
Arguments:
roi (``daisy.Roi``):
Get nodes that fall within this roi
attr_filter (``dict``):
Only return nodes that have attribute=value for
each attribute value pair in attr_filter.
read_attrs (``list`` of ``string``):
Attributes to return. Others will be ignored
'''
logger.debug("Querying nodes in %s", roi)
if attr_filter is None:
attr_filter = {}
try:
self.__connect()
self.__open_db()
self.__open_collections()
pos_query = self.__pos_query(roi)
query_list = [pos_query]
for attr, value in attr_filter.items():
query_list.append({attr: value})
projection = {'_id': False}
if read_attrs is not None:
projection['id'] = True
if type(self.position_attribute) == list:
for a in self.position_attribute:
projection[a] = True
else:
projection[self.position_attribute] = True
for attr in read_attrs:
projection[attr] = True
nodes = self.nodes.find({'$and': query_list}, projection)
nodes = list(nodes)
except Exception as e:
self.__disconnect()
raise e
for node in nodes:
node['id'] = np.uint64(node['id'])
return nodes
def num_nodes(self, roi):
'''Return the number of nodes in the roi.'''
try:
self.__connect()
self.__open_db()
self.__open_collections()
num = self.nodes.count(self.__pos_query(roi))
except Exception as e:
self.__disconnect()
raise e
return num
def has_edges(self, roi):
'''Returns true if there is at least one edge in the roi.'''
try:
self.__connect()
self.__open_db()
self.__open_collections()
nodes = list(self.nodes.find(self.__pos_query(roi)))
# no nodes -> no edges
if len(nodes) == 0:
return False
node_ids = list([int(np.int64(n['id'])) for n in nodes])
# limit query to 1M node IDs (otherwise we might exceed the 16MB
# BSON document size limit)
length = len(node_ids)
query_size = 1000000
num_chunks = (length - 1)//query_size + 1
for i in range(num_chunks):
i_b = i*query_size
i_e = min((i + 1)*query_size, len(node_ids))
assert i_b < len(node_ids)
query = {self.endpoint_names[0]:
{'$in': node_ids[i_b:i_e]}}
if self.edges.find_one(query) is not None:
return True
if num_chunks > 0:
assert i_e == len(node_ids)
except Exception as e:
self.__disconnect()
raise e
return False
def read_edges(self, roi, nodes=None, attr_filter=None, read_attrs=None):
'''Returns a list of edges within roi.
Arguments:
roi (``daisy.Roi``):
Get nodes that fall within this roi
nodes (``dict``):
Return edges with sources in this nodes list. If none,
reads nodes in roi using read_nodes. Dictionary format
is string attribute -> value, including 'id' as an attribute.
attr_filter (``dict``):
Only return nodes that have attribute=value for
each attribute value pair in attr_filter.
read_attrs (``list`` of ``string``):
Attributes to return. Others will be ignored
'''
if nodes is None:
nodes = self.read_nodes(roi)
node_ids = list([int(np.int64(n['id'])) for n in nodes])
logger.debug("found %d nodes", len(node_ids))
logger.debug("looking for edges with u in %s", node_ids[:100])
u, v = self.endpoint_names
edges = []
if attr_filter is None:
attr_filter = {}
try:
self.__connect()
self.__open_db()
self.__open_collections()
# limit query to 1M node IDs (otherwise we might exceed the 16MB
# BSON document size limit)
length = len(node_ids)
query_size = 1000000
num_chunks = (length - 1)//query_size + 1
filters = []
for attr, value in attr_filter.items():
filters.append({attr: value})
projection = {'_id': False}
if read_attrs is not None:
projection[u] = True
projection[v] = True
for attr in read_attrs:
projection[attr] = True
for i in range(num_chunks):
i_b = i*query_size
i_e = min((i + 1)*query_size, len(node_ids))
assert i_b < len(node_ids)
endpoint_query = {self.endpoint_names[0]:
{'$in': node_ids[i_b:i_e]}}
if attr_filter:
query = {'$and': filters + [endpoint_query]}
else:
query = endpoint_query
edges += self.edges.find(query, projection)
if num_chunks > 0:
assert i_e == len(node_ids)
logger.debug("found %d edges", len(edges))
logger.debug("first 100 edges read: %s", edges[:100])
except Exception as e:
self.__disconnect()
raise e
for edge in edges:
edge[u] = np.uint64(edge[u])
edge[v] = np.uint64(edge[v])
return edges
def __getitem__(self, roi):
return self.get_graph(roi)
def get_graph(
self,
roi,
nodes_filter=None,
edges_filter=None,
node_attrs=None,
edge_attrs=None):
''' Return a graph within roi, optionally filtering by
node and edge attributes.
Arguments:
roi (``daisy.Roi``):
Get nodes and edges whose source is within this roi
nodes_filter (``dict``):
edges_filter (``dict``):
Only return nodes/edges that have attribute=value for
each attribute value pair in nodes/edges_filter.
node_attrs (``list`` of ``string``):
Only return these attributes for nodes. Other
attributes will be ignored, but id and position attribute(s)
will always be included. If None (default), return all attrs.
edge_attrs (``list`` of ``string``):
Only return these attributes for edges. Other
attributes will be ignored, but source and target
will always be included. If None (default), return all attrs.
'''
nodes = self.read_nodes(
roi,
attr_filter=nodes_filter,
read_attrs=node_attrs)
edges = self.read_edges(
roi,
nodes=nodes,
attr_filter=edges_filter,
read_attrs=edge_attrs)
u, v = self.endpoint_names
node_list = [
(n['id'], self.__remove_keys(n, ['id']))
for n in nodes]
edge_list = [
(e[u], e[v], self.__remove_keys(e, [u, v]))
for e in edges]
if self.directed:
graph = MongoDbSubDiGraph(
self,
roi)
else:
# create the subgraph
graph = MongoDbSubGraph(
self,
roi)
graph.add_nodes_from(node_list)
graph.add_edges_from(edge_list)
return graph
def __remove_keys(self, dictionary, keys):
'''Removes given keys from dictionary.'''
for key in keys:
del dictionary[key]
return dictionary
def __connect(self):
'''Connects to Mongo client'''
if not self.client:
self.client = MongoClient(self.host)
def __open_db(self):
'''Opens Mongo database'''
if not self.database:
self.database = self.client[self.db_name]
def __open_collections(self):
'''Opens the node, edge, and meta collections'''
if not self.nodes:
self.nodes = self.database[self.nodes_collection_name]
self.edges = self.database[self.edges_collection_name]
self.meta = self.database[self.meta_collection_name]
def __get_metadata(self):
'''Gets metadata out of the meta collection and returns it
as a dictionary.'''
self.__open_collections()
metadata = self.meta.find_one({}, {"_id": False})
return metadata
def __disconnect(self):
'''Closes the mongo client and removes references
to all collections and databases'''
self.nodes = None
self.edges = None
self.meta = None
self.database = None
if self.client:
self.client.close()
self.client = None
def __create_node_collection(self):
'''Creates the node collection, including indexes'''
self.__open_db()
self.__open_collections()
if type(self.position_attribute) == list:
self.nodes.create_index(
[
(key, ASCENDING)
for key in self.position_attribute
],
name='position')
else:
self.nodes.create_index(
[
('position', ASCENDING)
],
name='position')
self.nodes.create_index(
[
('id', ASCENDING)
],
name='id',
unique=True)
def __create_edge_collection(self):
'''Creates the edge collection, including indexes'''
self.__open_db()
self.__open_collections()
u, v = self.endpoint_names
self.edges.create_index(
[
(u, ASCENDING),
(v, ASCENDING)
],
name='incident',
unique=True)
def __check_metadata(self, metadata):
'''Checks if the provided metadata matches the existing
metadata in the meta collection'''
if self.directed is None:
assert metadata['directed'] is not None,\
"Meta collection exists but does not contain "\
"directed information"
self.directed = metadata['directed']
elif metadata['directed'] != self.directed:
raise ValueError((
"Input parameter directed={} does not match"
"directed value {} already in stored metadata")
.format(self.directed, metadata['directed']))
if self.total_roi is None:
if 'total_roi_offset' in metadata\
and 'total_roi_shape' in metadata:
offset = metadata['total_roi_offset']
shape = metadata['total_roi_shape']
self.total_roi = Roi(offset, shape)
else:
offset = self.total_roi.get_offset()
if list(offset) != metadata['total_roi_offset']:
raise ValueError((
"Input total_roi offset {} does not match"
"total_roi offset {} already stored in metadata")
.format(
self.total_roi.get_offset(),
metadata['total_roi_offset']))
if list(self.total_roi.get_shape()) != metadata['total_roi_shape']:
raise ValueError((
"Input total_roi shape {} does not match"
"total_roi shape {} already stored in metadata")
.format(
self.total_roi.get_shape(),
metadata['total_roi_shape']))
def __set_metadata(self):
'''Sets the metadata in the meta collection to the provided values'''
if not self.directed:
# default is false
self.directed = False
meta_data = {'directed': self.directed}
# if total_roi not specified, don't write it
if self.total_roi:
meta_data['total_roi_offset'] = self.total_roi.get_offset()
meta_data['total_roi_shape'] = self.total_roi.get_shape()
self.__open_collections()
# It's possible that another worker has already inserted the metadata -
# upsert to keep only one document in the collection
self.meta.replace_one(meta_data, meta_data, upsert=True)
def __pos_query(self, roi):
'''Generates a mongo query for position'''
begin = roi.get_begin()
end = roi.get_end()
if type(self.position_attribute) == list:
assert len(self.position_attribute) == roi.dims, (
'Number of position attributes does not match number of '
'dimensions')
return {
key: {
k: v
for k, v in zip(
["$gte", "$lt"],
[
b if b is not None else float("-inf"),
e if e is not None else float("inf"),
],
)
}
for key, b, e in zip(self.position_attribute, begin, end)
}
else:
return {
"position.%d"
% d: {
k: v
for k, v in zip(
["$gte", "$lt"],
[
b if b is not None else float("-inf"),
e if e is not None else float("inf"),
],
)
}
for d, (b, e) in enumerate(zip(begin, end))
}
class MongoDbSharedSubGraph(SharedSubGraph):
def __init__(
self,
graph_provider,
roi):
super().__init__()
self.provider = graph_provider
self.roi = roi
self.client = MongoClient(self.provider.host)
self.database = self.client[self.provider.db_name]
self.nodes_collection = self.database[
self.provider.nodes_collection_name]
self.edges_collection = self.database[
self.provider.edges_collection_name]
def write_nodes(
self,
roi=None,
attributes=None,
fail_if_exists=False,
fail_if_not_exists=False,
delete=False):
assert not delete, "Delete not implemented"
assert not(fail_if_exists and fail_if_not_exists),\
"Cannot have fail_if_exists and fail_if_not_exists simultaneously"
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Writing nodes")
nodes = []
for node_id, data in self.nodes(data=True):
if not self.__contains(roi, node_id):
logger.debug(
"Skipping node {} with data {} because not in roi {}"
.format(node_id, data, roi))
continue
node = {
'id': int(np.int64(node_id))
}
if not attributes:
node.update(data)
else:
for key in data:
if key in attributes:
node[key] = data[key]
nodes.append(node)
if len(nodes) == 0:
return
try:
self.__write(self.nodes_collection, ['id'], nodes,
fail_if_exists=fail_if_exists,
fail_if_not_exists=fail_if_not_exists,
delete=delete)
except BulkWriteError as e:
logger.error(e.details)
raise
def write_edges(
self,
roi=None,
attributes=None,
fail_if_exists=False,
fail_if_not_exists=False,
delete=False):
assert not delete, "Delete not implemented"
assert not(fail_if_exists and fail_if_not_exists),\
"Cannot have fail_if_exists and fail_if_not_exists simultaneously"
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Writing edges in %s", roi)
edges = []
u_name, v_name = self.provider.endpoint_names
for u, v, data in self.edges(data=True):
if not self.is_directed():
u, v = min(u, v), max(u, v)
if not self.__contains(roi, u):
logger.debug(
("Skipping edge with u {}, v {}," +
"and data {} because u not in roi {}")
.format(u, v, data, roi))
continue
edge = {
u_name: int(np.int64(u)),
v_name: int(np.int64(v)),
}
if not attributes:
edge.update(data)
else:
for key in data:
if key in attributes:
edge[key] = data[key]
edges.append(edge)
if len(edges) == 0:
logger.debug("No edges to insert in %s", roi)
return
try:
self.__write(self.edges_collection, [u_name, v_name], edges,
fail_if_exists=fail_if_exists,
fail_if_not_exists=fail_if_not_exists,
delete=delete)
except BulkWriteError as e:
logger.error(e.details)
raise
def update_node_attrs(
self,
roi=None,
attributes=None):
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Updating node attributes")
updates = []
for node_id, data in self.nodes(data=True):
if not self.__contains(roi, node_id):
logger.debug(
"Skipping node {} with data {} because not in roi {}"
.format(node_id, data, roi))
continue
_filter = {
'id': int(np.int64(node_id))
}
if not attributes:
update = {'$set': data}
else:
update = {}
for key in data:
if key in attributes:
update[key] = data[key]
if not update:
logger.info("Skipping node %s with data %s"
" - no attributes to update"
% (node_id, data))
continue
update = {'$set': update}
updates.append(UpdateOne(_filter, update))
if len(updates) == 0:
return
try:
self.nodes_collection.bulk_write(updates, ordered=False)
except BulkWriteError as e:
logger.error(e.details)
raise
def update_edge_attrs(
self,
roi=None,
attributes=None):
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Updating edge attributes")
updates = []
u_name, v_name = self.provider.endpoint_names
for u, v, data in self.edges(data=True):
if not self.is_directed():
u, v = min(u, v), max(u, v)
if not self.__contains(roi, u):
logger.debug(
("Skipping edge with u {}, v {}," +
"and data {} because u not in roi {}")
.format(u, v, data, roi))
continue
_filter = {
u_name: int(np.int64(u)),
v_name: int(np.int64(v)),
}
if not attributes:
update = {'$set': data}
else:
update = {}
for key in data:
if key in attributes:
update[key] = data[key]
if not update:
logger.info("Skipping edge %s -> %s with data %s"
"- no attributes to update"
% (u, v, data))
continue
update = {'$set': update}
updates.append(UpdateOne(_filter, update))
if len(updates) == 0:
logger.info("No updates in roi %s" % roi)
return
try:
self.edges_collection.bulk_write(updates, ordered=False)
except BulkWriteError as e:
logger.error(e.details)
raise
def get_connected_components(self):
'''Returns a list of connected components as networkx (di)graphs'''
subgraphs = []
if self.is_directed():
node_set_generator = nx.weakly_connected_components(self)
else:
node_set_generator = nx.connected_components(self)
for node_set in node_set_generator:
edge_set = self.edges(node_set, data=True)
if self.is_directed():
g = nx.DiGraph()
else:
g = nx.Graph()
g.add_nodes_from([(node, self.nodes[node]) for node in node_set])
g.add_edges_from(edge_set)
subgraphs.append(g)
return subgraphs
def __write(self, collection, match_fields, docs,
fail_if_exists=False, fail_if_not_exists=False, delete=False):
'''Writes documents to provided mongo collection, checking for restricitons.
Args:
collection (``pymongo.collection``):
The collection to write the documents into.
match_fields (``list`` of ``string``):
The set of fields to match to be considered the same document.
docs (``dict`` or ``bson``):
The documents to insert into the collection
fail_if_exists, fail_if_not_exists, delete (``bool``):
see write_nodes or write_edges for explanations of these flags
'''
assert not delete, "Delete not implemented"
match_docs = []
for doc in docs:
match_doc = {}
for field in match_fields:
match_doc[field] = doc[field]
match_docs.append(match_doc)
if fail_if_exists:
self.__write_fail_if_exists(collection, match_docs, docs)
elif fail_if_not_exists:
self.__write_fail_if_not_exists(collection, match_docs, docs)
else:
self.__write_no_flags(collection, match_docs, docs)
def __write_no_flags(self, collection, old_docs, new_docs):
bulk_query = [ReplaceOne(old, new, upsert=True)
for old, new in zip(old_docs, new_docs)]
collection.bulk_write(bulk_query, ordered=False)
def __write_fail_if_exists(self, collection, old_docs, new_docs):
for old in old_docs:
if collection.find(old):
raise WriteError(
"Found existing doc %s and fail_if_exists set to True."
" Aborting write for all docs." % old)
collection.insert_many(new_docs)
def __write_fail_if_not_exists(self, collection, old_docs, new_docs):
for old in old_docs:
if not collection.find(old):
raise WriteError(
"Did not find existing doc %s and fail_if_not_exists "
"set to True. Aborting write for all docs." % old)
bulk_query = [ReplaceOne(old, new, upsert=False)
for old, new in zip(old_docs, new_docs)]
result = collection.bulk_write(bulk_query, ordered=False)
assert len(new_docs) == result.matched_count,\
("Supposed to replace %s docs, but only replaced %s"
% (len(new_docs), result.matched_count))
def __contains(self, roi, node):
'''Determines if the given node is inside the given roi'''
node_data = self.nodes[node]
# Some nodes are outside of the originally requested ROI (they have
# been pulled in by edges leaving the ROI). These nodes have no
# attributes, so we can't perform an inclusion test. However, we
# know they are outside of the subgraph ROI, and therefore also
# outside of 'roi', whatever it is.
coordinate = []
if type(self.provider.position_attribute) == list:
for pos_attr in self.provider.position_attribute:
if pos_attr not in node_data:
return False
coordinate.append(node_data[pos_attr])
else:
if self.provider.position_attribute not in node_data:
return False
coordinate = node_data[self.provider.position_attribute]
logger.debug("Checking if coordinate {} is inside roi {}"
.format(coordinate, roi))
return roi.contains(Coordinate(coordinate))
def is_directed(self):
raise RuntimeError("not implemented in %s" % self.name())
class MongoDbSubGraph(MongoDbSharedSubGraph, Graph):
def __init__(
self,
graph_provider,
roi):
# this calls the init function of the MongoDbSharedSubGraph,
# because left parents come before right parents
super().__init__(
graph_provider,
roi)
def is_directed(self):
return False
class MongoDbSubDiGraph(MongoDbSharedSubGraph, DiGraph):
def __init__(
self,
graph_provider,
roi):
# this calls the init function of the MongoDbSharedSubGraph,
# because left parents come before right parents
super().__init__(
graph_provider,
roi)
def is_directed(self):
return True
| [
"logging.getLogger",
"numpy.int64",
"networkx.DiGraph",
"networkx.Graph",
"networkx.connected_components",
"pymongo.UpdateOne",
"numpy.uint64",
"networkx.weakly_connected_components",
"pymongo.ReplaceOne",
"pymongo.MongoClient",
"pymongo.errors.WriteError"
] | [((394, 421), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (411, 421), False, 'import logging\n'), ((19336, 19367), 'pymongo.MongoClient', 'MongoClient', (['self.provider.host'], {}), '(self.provider.host)\n', (19347, 19367), False, 'from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne\n'), ((6266, 6287), 'numpy.uint64', 'np.uint64', (["node['id']"], {}), "(node['id'])\n", (6275, 6287), True, 'import numpy as np\n'), ((10620, 10638), 'numpy.uint64', 'np.uint64', (['edge[u]'], {}), '(edge[u])\n', (10629, 10638), True, 'import numpy as np\n'), ((10661, 10679), 'numpy.uint64', 'np.uint64', (['edge[v]'], {}), '(edge[v])\n', (10670, 10679), True, 'import numpy as np\n'), ((13130, 13152), 'pymongo.MongoClient', 'MongoClient', (['self.host'], {}), '(self.host)\n', (13141, 13152), False, 'from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne\n'), ((26540, 26576), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['self'], {}), '(self)\n', (26570, 26576), True, 'import networkx as nx\n'), ((26624, 26653), 'networkx.connected_components', 'nx.connected_components', (['self'], {}), '(self)\n', (26647, 26653), True, 'import networkx as nx\n'), ((28415, 28448), 'pymongo.ReplaceOne', 'ReplaceOne', (['old', 'new'], {'upsert': '(True)'}), '(old, new, upsert=True)\n', (28425, 28448), False, 'from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne\n'), ((29279, 29313), 'pymongo.ReplaceOne', 'ReplaceOne', (['old', 'new'], {'upsert': '(False)'}), '(old, new, upsert=False)\n', (29289, 29313), False, 'from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne\n'), ((24322, 24348), 'pymongo.UpdateOne', 'UpdateOne', (['_filter', 'update'], {}), '(_filter, update)\n', (24331, 24348), False, 'from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne\n'), ((26032, 26058), 'pymongo.UpdateOne', 'UpdateOne', (['_filter', 'update'], {}), '(_filter, update)\n', (26041, 26058), False, 'from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne\n'), ((26809, 26821), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (26819, 26821), True, 'import networkx as nx\n'), ((26860, 26870), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (26868, 26870), True, 'import networkx as nx\n'), ((28728, 28840), 'pymongo.errors.WriteError', 'WriteError', (["('Found existing doc %s and fail_if_exists set to True. Aborting write for all docs.'\n % old)"], {}), "(\n 'Found existing doc %s and fail_if_exists set to True. Aborting write for all docs.'\n % old)\n", (28738, 28840), False, 'from pymongo.errors import BulkWriteError, WriteError\n'), ((29091, 29214), 'pymongo.errors.WriteError', 'WriteError', (["('Did not find existing doc %s and fail_if_not_exists set to True. Aborting write for all docs.'\n % old)"], {}), "(\n 'Did not find existing doc %s and fail_if_not_exists set to True. Aborting write for all docs.'\n % old)\n", (29101, 29214), False, 'from pymongo.errors import BulkWriteError, WriteError\n'), ((8775, 8792), 'numpy.int64', 'np.int64', (["n['id']"], {}), "(n['id'])\n", (8783, 8792), True, 'import numpy as np\n'), ((20562, 20579), 'numpy.int64', 'np.int64', (['node_id'], {}), '(node_id)\n', (20570, 20579), True, 'import numpy as np\n'), ((22324, 22335), 'numpy.int64', 'np.int64', (['u'], {}), '(u)\n', (22332, 22335), True, 'import numpy as np\n'), ((22366, 22377), 'numpy.int64', 'np.int64', (['v'], {}), '(v)\n', (22374, 22377), True, 'import numpy as np\n'), ((23742, 23759), 'numpy.int64', 'np.int64', (['node_id'], {}), '(node_id)\n', (23750, 23759), True, 'import numpy as np\n'), ((25413, 25424), 'numpy.int64', 'np.int64', (['u'], {}), '(u)\n', (25421, 25424), True, 'import numpy as np\n'), ((25455, 25466), 'numpy.int64', 'np.int64', (['v'], {}), '(v)\n', (25463, 25466), True, 'import numpy as np\n'), ((7077, 7094), 'numpy.int64', 'np.int64', (["n['id']"], {}), "(n['id'])\n", (7085, 7094), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import pytest
from audiomentations import TanhDistortion
from audiomentations.core.utils import calculate_rms
class TestTanhDistortion(unittest.TestCase):
def test_single_channel(self):
samples = np.random.normal(0, 0.1, size=(2048,)).astype(np.float32)
sample_rate = 16000
augmenter = TanhDistortion(min_distortion=0.2, max_distortion=0.6, p=1.0)
distorted_samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, distorted_samples.dtype)
self.assertEqual(samples.shape, distorted_samples.shape)
assert np.amax(distorted_samples) < np.amax(samples)
assert calculate_rms(distorted_samples) == pytest.approx(
calculate_rms(samples), abs=1e-3
)
def test_multichannel(self):
num_channels = 3
samples = np.random.normal(0, 0.1, size=(num_channels, 5555)).astype(np.float32)
sample_rate = 16000
augmenter = TanhDistortion(min_distortion=0.05, max_distortion=0.6, p=1.0)
distorted_samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, distorted_samples.dtype)
self.assertEqual(samples.shape, distorted_samples.shape)
for i in range(num_channels):
assert not np.allclose(samples[i], distorted_samples[i])
assert calculate_rms(distorted_samples[i]) == pytest.approx(
calculate_rms(samples[i]), abs=1e-3
)
| [
"numpy.random.normal",
"numpy.allclose",
"audiomentations.core.utils.calculate_rms",
"audiomentations.TanhDistortion",
"numpy.amax"
] | [((353, 414), 'audiomentations.TanhDistortion', 'TanhDistortion', ([], {'min_distortion': '(0.2)', 'max_distortion': '(0.6)', 'p': '(1.0)'}), '(min_distortion=0.2, max_distortion=0.6, p=1.0)\n', (367, 414), False, 'from audiomentations import TanhDistortion\n'), ((1005, 1067), 'audiomentations.TanhDistortion', 'TanhDistortion', ([], {'min_distortion': '(0.05)', 'max_distortion': '(0.6)', 'p': '(1.0)'}), '(min_distortion=0.05, max_distortion=0.6, p=1.0)\n', (1019, 1067), False, 'from audiomentations import TanhDistortion\n'), ((642, 668), 'numpy.amax', 'np.amax', (['distorted_samples'], {}), '(distorted_samples)\n', (649, 668), True, 'import numpy as np\n'), ((671, 687), 'numpy.amax', 'np.amax', (['samples'], {}), '(samples)\n', (678, 687), True, 'import numpy as np\n'), ((703, 735), 'audiomentations.core.utils.calculate_rms', 'calculate_rms', (['distorted_samples'], {}), '(distorted_samples)\n', (716, 735), False, 'from audiomentations.core.utils import calculate_rms\n'), ((247, 285), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(2048,)'}), '(0, 0.1, size=(2048,))\n', (263, 285), True, 'import numpy as np\n'), ((766, 788), 'audiomentations.core.utils.calculate_rms', 'calculate_rms', (['samples'], {}), '(samples)\n', (779, 788), False, 'from audiomentations.core.utils import calculate_rms\n'), ((886, 937), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(num_channels, 5555)'}), '(0, 0.1, size=(num_channels, 5555))\n', (902, 937), True, 'import numpy as np\n'), ((1341, 1386), 'numpy.allclose', 'np.allclose', (['samples[i]', 'distorted_samples[i]'], {}), '(samples[i], distorted_samples[i])\n', (1352, 1386), True, 'import numpy as np\n'), ((1406, 1441), 'audiomentations.core.utils.calculate_rms', 'calculate_rms', (['distorted_samples[i]'], {}), '(distorted_samples[i])\n', (1419, 1441), False, 'from audiomentations.core.utils import calculate_rms\n'), ((1476, 1501), 'audiomentations.core.utils.calculate_rms', 'calculate_rms', (['samples[i]'], {}), '(samples[i])\n', (1489, 1501), False, 'from audiomentations.core.utils import calculate_rms\n')] |
import os
import urllib.request
import csv
import yaml
from flask import Flask, request, jsonify
import numpy as np
from package.preprocessing import read_data, preprocess
from package.model_utils import train_model
from package.app_util import json_to_row
app = Flask(__name__)
# read in configuration
with open('./deploy/service_cfg.yaml', 'r') as cfg_file:
cfg = yaml.safe_load(cfg_file)
DATA_PATH = './data/'
DATA_FILE = 'wine_data.csv'
random_state = np.random.RandomState(cfg['random_seed'])
# we are going to keep our base data here
if not os.path.exists(DATA_PATH):
os.mkdir(DATA_PATH)
# get the data
urllib.request.urlretrieve(
'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data',
filename=DATA_PATH + DATA_FILE
)
# get data and preprecess
preprocessing = preprocess(
*read_data(DATA_PATH + DATA_FILE), random_state
)
X_train, X_test, y_train, y_test = preprocessing['data']
scaler = preprocessing['scaler']
results = train_model(
X_train,
y_train,
validation_data=(X_test, y_test),
params=cfg.get('model_params', None)
)
# ceate the app
# create service message on main page
@app.route('/')
def main_page():
"""
Message printed at root to indicate the service is active
"""
main_message = 'Model service is active.'
if 'val_perf' in results:
main_message += f'Validation performance: {0}.'
return main_message
# create predict POST method
@app.route('/predict', methods=(['POST']))
def serve_model():
content = request.get_data()
data = json_to_row(content)
data = scaler.transform(data)
out = results['model'].predict(data)
return jsonify({'score':out[0]})
host = cfg.get('app').get('host')
port = cfg.get('app').get('port')
if __name__ == '__main__':
app.run(host=host, port=port)
| [
"os.path.exists",
"flask.Flask",
"flask.request.get_data",
"package.preprocessing.read_data",
"yaml.safe_load",
"package.app_util.json_to_row",
"os.mkdir",
"numpy.random.RandomState",
"flask.jsonify"
] | [((267, 282), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'from flask import Flask, request, jsonify\n'), ((466, 507), 'numpy.random.RandomState', 'np.random.RandomState', (["cfg['random_seed']"], {}), "(cfg['random_seed'])\n", (487, 507), True, 'import numpy as np\n'), ((375, 399), 'yaml.safe_load', 'yaml.safe_load', (['cfg_file'], {}), '(cfg_file)\n', (389, 399), False, 'import yaml\n'), ((558, 583), 'os.path.exists', 'os.path.exists', (['DATA_PATH'], {}), '(DATA_PATH)\n', (572, 583), False, 'import os\n'), ((589, 608), 'os.mkdir', 'os.mkdir', (['DATA_PATH'], {}), '(DATA_PATH)\n', (597, 608), False, 'import os\n'), ((1542, 1560), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (1558, 1560), False, 'from flask import Flask, request, jsonify\n'), ((1572, 1592), 'package.app_util.json_to_row', 'json_to_row', (['content'], {}), '(content)\n', (1583, 1592), False, 'from package.app_util import json_to_row\n'), ((1679, 1705), 'flask.jsonify', 'jsonify', (["{'score': out[0]}"], {}), "({'score': out[0]})\n", (1686, 1705), False, 'from flask import Flask, request, jsonify\n'), ((834, 866), 'package.preprocessing.read_data', 'read_data', (['(DATA_PATH + DATA_FILE)'], {}), '(DATA_PATH + DATA_FILE)\n', (843, 866), False, 'from package.preprocessing import read_data, preprocess\n')] |
import unittest
import numpy
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)],
}))
class TestResizeImagesForwardIdentity(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
def check_forward(self, x, output_shape):
y = functions.resize_images(x, output_shape)
testing.assert_allclose(y.data, x)
def test_forward_cpu(self):
self.check_forward(self.x, output_shape=self.in_shape[2:])
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), output_shape=self.in_shape[2:])
class TestResizeImagesForwardDownScale(unittest.TestCase):
in_shape = (2, 2, 4, 4)
output_shape = (2, 2, 2, 2)
def setUp(self):
self.x = numpy.zeros(self.in_shape, dtype=numpy.float32)
self.x[:, :, :2, :2] = 1
self.x[:, :, 2:, :2] = 2
self.x[:, :, :2, 2:] = 3
self.x[:, :, 2:, 2:] = 4
self.out = numpy.zeros(self.output_shape, dtype=numpy.float32)
self.out[:, :, 0, 0] = 1
self.out[:, :, 1, 0] = 2
self.out[:, :, 0, 1] = 3
self.out[:, :, 1, 1] = 4
def check_forward(self, x, output_shape):
y = functions.resize_images(x, output_shape)
testing.assert_allclose(y.data, self.out)
def test_forward_cpu(self):
self.check_forward(self.x, output_shape=self.output_shape[2:])
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), output_shape=self.output_shape[2:])
class TestResizeImagesForwardUpScale(unittest.TestCase):
in_shape = (1, 1, 2, 2)
output_shape = (1, 1, 3, 3)
def setUp(self):
self.x = numpy.zeros(self.in_shape, dtype=numpy.float32)
self.x[:, :, 0, 0] = 1
self.x[:, :, 1, 0] = 2
self.x[:, :, 0, 1] = 3
self.x[:, :, 1, 1] = 4
self.out = numpy.zeros(self.output_shape, dtype=numpy.float32)
self.out[0, 0, :, :] = numpy.array(
[[1., 2., 3.],
[1.5, 2.5, 3.5],
[2., 3., 4.]],
dtype=numpy.float32)
def check_forward(self, x, output_shape):
y = functions.resize_images(x, output_shape)
testing.assert_allclose(y.data, self.out)
def test_forward_cpu(self):
self.check_forward(self.x, output_shape=self.output_shape[2:])
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), output_shape=self.output_shape[2:])
@testing.parameterize(*testing.product({
'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)],
'output_shape': [(10, 5), (3, 4)]
}))
class TestResizeImagesBackward(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
output_shape_4d = self.in_shape[:2] + self.output_shape
self.grads = numpy.random.uniform(
size=output_shape_4d).astype(numpy.float32)
def check_backward(self, x, output_shape, grads):
gradient_check.check_backward(
functions.ResizeImages(output_shape),
(x,), (grads,), dtype='d', atol=1e-2, rtol=1e-3, eps=1e-5)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.output_shape, self.grads)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), self.output_shape,
cuda.to_gpu(self.grads))
testing.run_module(__name__, __file__)
| [
"chainer.functions.ResizeImages",
"chainer.testing.condition.retry",
"chainer.testing.run_module",
"chainer.testing.product",
"numpy.zeros",
"numpy.array",
"chainer.functions.resize_images",
"numpy.random.uniform",
"chainer.testing.assert_allclose",
"chainer.cuda.to_gpu"
] | [((3757, 3795), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (3775, 3795), False, 'from chainer import testing\n'), ((3443, 3461), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (3458, 3461), False, 'from chainer.testing import condition\n'), ((3582, 3600), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (3597, 3600), False, 'from chainer.testing import condition\n'), ((545, 585), 'chainer.functions.resize_images', 'functions.resize_images', (['x', 'output_shape'], {}), '(x, output_shape)\n', (568, 585), False, 'from chainer import functions\n'), ((594, 628), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['y.data', 'x'], {}), '(y.data, x)\n', (617, 628), False, 'from chainer import testing\n'), ((245, 304), 'chainer.testing.product', 'testing.product', (["{'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)]}"], {}), "({'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)]})\n", (260, 304), False, 'from chainer import testing\n'), ((1017, 1064), 'numpy.zeros', 'numpy.zeros', (['self.in_shape'], {'dtype': 'numpy.float32'}), '(self.in_shape, dtype=numpy.float32)\n', (1028, 1064), False, 'import numpy\n'), ((1217, 1268), 'numpy.zeros', 'numpy.zeros', (['self.output_shape'], {'dtype': 'numpy.float32'}), '(self.output_shape, dtype=numpy.float32)\n', (1228, 1268), False, 'import numpy\n'), ((1460, 1500), 'chainer.functions.resize_images', 'functions.resize_images', (['x', 'output_shape'], {}), '(x, output_shape)\n', (1483, 1500), False, 'from chainer import functions\n'), ((1509, 1550), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['y.data', 'self.out'], {}), '(y.data, self.out)\n', (1532, 1550), False, 'from chainer import testing\n'), ((1958, 2005), 'numpy.zeros', 'numpy.zeros', (['self.in_shape'], {'dtype': 'numpy.float32'}), '(self.in_shape, dtype=numpy.float32)\n', (1969, 2005), False, 'import numpy\n'), ((2150, 2201), 'numpy.zeros', 'numpy.zeros', (['self.output_shape'], {'dtype': 'numpy.float32'}), '(self.output_shape, dtype=numpy.float32)\n', (2161, 2201), False, 'import numpy\n'), ((2233, 2323), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 3.0], [1.5, 2.5, 3.5], [2.0, 3.0, 4.0]]'], {'dtype': 'numpy.float32'}), '([[1.0, 2.0, 3.0], [1.5, 2.5, 3.5], [2.0, 3.0, 4.0]], dtype=\n numpy.float32)\n', (2244, 2323), False, 'import numpy\n'), ((2423, 2463), 'chainer.functions.resize_images', 'functions.resize_images', (['x', 'output_shape'], {}), '(x, output_shape)\n', (2446, 2463), False, 'from chainer import functions\n'), ((2472, 2513), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['y.data', 'self.out'], {}), '(y.data, self.out)\n', (2495, 2513), False, 'from chainer import testing\n'), ((2787, 2885), 'chainer.testing.product', 'testing.product', (["{'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)], 'output_shape': [(10, 5), (3, 4)]}"], {}), "({'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)], 'output_shape':\n [(10, 5), (3, 4)]})\n", (2802, 2885), False, 'from chainer import testing\n'), ((803, 822), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (814, 822), False, 'from chainer import cuda\n'), ((1742, 1761), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1753, 1761), False, 'from chainer import cuda\n'), ((2705, 2724), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2716, 2724), False, 'from chainer import cuda\n'), ((3328, 3364), 'chainer.functions.ResizeImages', 'functions.ResizeImages', (['output_shape'], {}), '(output_shape)\n', (3350, 3364), False, 'from chainer import functions\n'), ((3662, 3681), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (3673, 3681), False, 'from chainer import cuda\n'), ((3730, 3753), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.grads'], {}), '(self.grads)\n', (3741, 3753), False, 'from chainer import cuda\n'), ((410, 450), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'self.in_shape'}), '(size=self.in_shape)\n', (430, 450), False, 'import numpy\n'), ((2983, 3023), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'self.in_shape'}), '(size=self.in_shape)\n', (3003, 3023), False, 'import numpy\n'), ((3144, 3186), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'output_shape_4d'}), '(size=output_shape_4d)\n', (3164, 3186), False, 'import numpy\n')] |
import sys
import numpy as np
import io
from termcolor import colored, cprint
import glob
import os
import subprocess
import shutil
import xml.etree.ElementTree as ET
import itk
import vtk
import vtk.util.numpy_support
from CommonUtils import *
def rename(inname, outDir, extension_addition, extension_change=''):
"""
Takes inname path and replaces dir with outdir and adds extension before file type
"""
initPath = os.path.dirname(inname)
outname = inname.replace(initPath, outDir)
current_extension = "." + inname.split(".")[-1]
if extension_addition != '':
outname = outname.replace(current_extension, '.' + extension_addition + current_extension)
if extension_change != '':
outname = outname.replace(current_extension, extension_change)
cprint(("Input Filename : ", inname), 'cyan')
cprint(("Output Filename : ", outname), 'yellow')
print("######################################\n")
return outname
def applyIsotropicResampling(outDir, inDataList, isoSpacing=1.0, recenter=True, isBinary=True):
"""
This function takes in a filelist and produces the resampled files in the appropriate directory.
"""
if not os.path.exists(outDir):
os.makedirs(outDir)
outDataList = []
for i in range(len(inDataList)):
inname = inDataList[i]
print("\n########### Resampling ###############")
outname = rename(inname, outDir, 'isores')
outDataList.append(outname)
cmd = ["shapeworks", "read-image", "--name", inname]
if isBinary:
cmd.extend(["antialias"])
cmd.extend(["isoresample", "--isospacing", str(isoSpacing)])
if isBinary:
cmd.extend(["binarize"])
if recenter:
cmd.extend(["recenter-image"])
cmd.extend(["write-image", "--name", outname])
print("Calling cmd:\n"+" ".join(cmd))
subprocess.check_call(cmd)
return outDataList
def getOrigin(inname):
infoPrefix = "_".join(inname.split("_")[:3])
cmd = ["WriteImageInfoToText","--inFilename",inname, "--outPrefix", infoPrefix]
subprocess.check_call(cmd)
origin_file = open(infoPrefix + "_origin.txt", "r")
text = origin_file.read()
origin = text.split("\n")
origin_file.close()
os.remove(infoPrefix + "_origin.txt")
os.remove(infoPrefix + "_spacing.txt")
os.remove(infoPrefix + "_size.txt")
return origin
def center(outDir, inDataList):
if not os.path.exists(outDir):
os.makedirs(outDir)
outDataList = []
for i in range(len(inDataList)):
# center
inname = inDataList[i]
print("\n########### Centering ###############")
outname = rename(inname, outDir, 'center')
outDataList.append(outname)
cmd = ["shapeworks", "read-image", "--name", inname]
cmd.extend(["recenter-image"])
cmd.extend(["write-image", "--name", outname])
print("Calling cmd:\n"+" ".join(cmd))
subprocess.check_call(cmd)
# Get translation
original_origin = getOrigin(inname)
new_origin = getOrigin(outname)
translation = []
for dim in range(0,3):
translation.append(float(original_origin[dim]) - float(new_origin[dim]))
# Write translation
translation_file = outname[:-4] + "translation.txt"
out_trans = open(translation_file, "w+")
out_trans.write(str(translation).replace('[','').replace(']','').replace(',',''))
out_trans.close()
return outDataList
def applyPadding(outDir, inDataList, padSize, padValue=0):
"""
This function takes in a filelist and produces the padded files in the appropriate directory.
"""
if not os.path.exists(outDir):
os.makedirs(outDir)
outDataList = []
for i in range(len(inDataList)):
inname = inDataList[i]
print("\n########### Padding ###############")
outname = rename(inname, outDir, 'pad')
outDataList.append(outname)
cmd = ["shapeworks", "read-image", "--name", inname]
cmd.extend(["pad" , "--padding" , str(padSize) , "--value" , str(padValue)])
cmd.extend(["write-image", "--name", outname])
print("Calling cmd:\n"+" ".join(cmd))
subprocess.check_call(cmd)
return outDataList
def applyCOMAlignment(outDir, inDataListSeg, raw=[]):
"""
This function takes in a filelist and produces the center of mass aligned
files in the appropriate directory. If inDataListImg is provided,
then it also applys the same transformation on the corresponding list of
raw files (MRI/CT ...)
"""
if not os.path.exists(outDir):
os.makedirs(outDir)
if raw:
inDataListImg=raw
rawoutDir = os.path.join(outDir, 'images')
if not os.path.exists(rawoutDir):
os.makedirs(rawoutDir)
binaryoutDir = os.path.join(outDir, 'segmentations')
if not os.path.exists(binaryoutDir):
os.makedirs(binaryoutDir)
outDataListSeg = []
outDataListImg = []
for i in range(len(inDataListSeg)):
print("\n############# COM Alignment ###############")
innameSeg = inDataListSeg[i]
outnameSeg = rename(innameSeg, binaryoutDir, 'com')
paramname = outnameSeg.replace('.nrrd', '.txt')
outDataListSeg.append(outnameSeg)
innameImg = inDataListImg[i]
outnameImg = rename(innameImg, rawoutDir, 'com')
outDataListImg.append(outnameImg)
execCommand = ["TranslateShapeToImageOrigin", "--inFilename", innameSeg, "--outFilename", outnameSeg, "--useCenterOfMass", "1",
"--parameterFilename", paramname, "--MRIinFilename", innameImg, "--MRIoutFilename", outnameImg]
subprocess.check_call(execCommand)
return [outDataListSeg, outDataListImg]
else:
outDataListSeg = []
for i in range(len(inDataListSeg)):
print("\n############# COM Alignment ###############")
inname = inDataListSeg[i]
outname = rename(inname, outDir, 'com')
paramname = outname.replace('.nrrd', '.txt')
outDataListSeg.append(outname)
execCommand = ["TranslateShapeToImageOrigin", "--inFilename", inname, "--outFilename", outname, "--useCenterOfMass", "1",
"--parameterFilename", paramname]
subprocess.check_call(execCommand)
return outDataListSeg
def create_tpSmooth_xml(xmlfilename, smoothingIterations, ref_dtnrrdfilename, ref_isonrrdfilename, ref_tpdtnrrdfilename):
root = ET.Element('sample')
propogationScale = ET.SubElement(root, 'propagationScale')
propogationScale.text = "\n 20.0 \n"
alpha = ET.SubElement(root, 'alpha')
alpha.text = "\n 10.5 \n"
beta = ET.SubElement(root, 'beta')
beta.text = "\n 10.0 \n"
isoVal = ET.SubElement(root, 'isoValue')
isoVal.text = "\n 0.0 \n"
smoothing_iterations = ET.SubElement(root, 'smoothing_iterations')
smoothing_iterations.text = "\n " + str(smoothingIterations) + " \n"
verbose = ET.SubElement(root, 'verbose')
verbose.text = "\n 1 \n"
inputs = ET.SubElement(root, 'inputs')
inputs.text = "\n " + ref_dtnrrdfilename + " \n"
outputs = ET.SubElement(root, 'outputs')
outputs.text = "\n " + ref_isonrrdfilename + " \n"
dtFiles = ET.SubElement(root, 'dtFiles')
dtFiles.text = "\n " + ref_tpdtnrrdfilename + " \n"
data = ET.tostring(root, encoding='unicode')
file = open(xmlfilename, "w+")
file.write(data)
def FindReferenceImage(inDataList):
"""
This find the median file between all the input files
"""
IMG = []
DIM = []
for i in range(len(inDataList)):
tmp = itk.GetArrayFromImage(itk.imread(inDataList[i]))
IMG.append(tmp)
DIM.append(tmp.shape)
ref_dim = np.max(DIM, axis =0)
for i in range(len(inDataList)):
IMG[i] = np.pad(IMG[i], ((0,ref_dim[0]-DIM[i][0]), (0,ref_dim[1]-DIM[i][1]), (0,ref_dim[2]-DIM[i][2])), mode ='constant' , constant_values = 0)
COM = np.sum(np.asarray(IMG), axis=0) / len(inDataList)
idx = np.argmin(np.sqrt(np.sum((np.asarray(IMG) - COM) ** 2, axis=(1, 2, 3))))
print(" ")
print("############# Reference File #############")
cprint(("The reference file for rigid alignment is found"), 'green')
cprint(("Output Median Filename : ", inDataList[idx]), 'yellow')
print("###########################################")
print(" ")
return inDataList[idx]
def applyRigidAlignment(parentDir, inDataListSeg, inDataListImg, refFile, antialiasIterations=20,
smoothingIterations=1, isoValue=0, icpIterations=10, processRaw = False):
"""
This function takes in a filelists(binary and raw) and produces rigid aligned
files in the appropriate directory. If the process_raw flag is set True,
then it also applys the same transformation on the corresponding list of
raw files (MRI/CT ...)
"""
outDir = os.path.join(parentDir, 'aligned')
transoutDir = os.path.join(outDir, 'transformations')
if not os.path.exists(outDir):
os.makedirs(outDir)
if not os.path.exists(transoutDir):
os.makedirs(transoutDir)
# identify the reference scan
refDir = os.path.join(outDir, 'reference')
if not os.path.exists(refDir):
os.makedirs(refDir)
initPath = os.path.dirname(refFile)
newRefFile = refFile.replace(initPath, refDir)
ref_dtnrrdfilename = newRefFile.replace('.nrrd', '.DT.nrrd')
ref_tpdtnrrdfilename = newRefFile.replace('.nrrd', '.tpSmoothDT.nrrd')
ref_isonrrdfilename = newRefFile.replace('.nrrd', '.ISO.nrrd')
ref_binnrrdfilename = newRefFile.replace('.nrrd', '.BIN.nrrd')
# reference image processing
execCommand = ["ExtractGivenLabelImage", "--inFilename", refFile, "--outFilename", refFile, "--labelVal", " 1"]
subprocess.check_call(execCommand)
execCommand = ["CloseHoles", "--inFilename", refFile, "--outFilename", refFile]
subprocess.check_call(execCommand)
execCommand = ["shapeworks", "read-image", "--name", refFile, "antialias", "--numiterations", str(antialiasIterations), "write-image", "--name", ref_dtnrrdfilename]
subprocess.check_call(execCommand)
execCommand = ["FastMarching", "--inFilename", ref_dtnrrdfilename, "--outFilename", ref_dtnrrdfilename, "--isoValue", str(
isoValue)]
subprocess.check_call(execCommand)
xmlfilename = newRefFile.replace('.nrrd', '.tpSmoothDT.xml')
create_tpSmooth_xml(xmlfilename, smoothingIterations, ref_dtnrrdfilename, ref_isonrrdfilename, ref_tpdtnrrdfilename)
create_cpp_xml(xmlfilename, xmlfilename)
execCommand = ["TopologyPreservingSmoothing", xmlfilename]
subprocess.check_call(execCommand)
execCommand = ["ThresholdImages", "--inFilename", ref_tpdtnrrdfilename, "--outFilename", ref_binnrrdfilename, "--lowerThresholdLevel", "-0.000001"]
subprocess.check_call(execCommand)
if processRaw:
rawoutDir = os.path.join(outDir, 'images')
binaryoutDir = os.path.join(outDir + 'segmentations')
if not os.path.exists(rawoutDir):
os.makedirs(rawoutDir)
if not os.path.exists(binaryoutDir):
os.makedirs(binaryoutDir)
outRawDataList=[]
outSegDataList=[]
for i in range(len(inDataListSeg)):
seginname = inDataListSeg[i]
initPath = os.path.dirname(seginname)
filename = os.path.basename(seginname)
segoutname = seginname.replace(initPath, binaryoutDir)
segoutname = segoutname.replace('.nrrd', '.aligned.nrrd')
transoutname = seginname.replace(initPath, transoutDir)
transformation = transoutname.replace('.nrrd', '.transformationMatrix.txt')
outSegDataList.append(segoutname)
rawinname = inDataListImg[i]
initPath = os.path.dirname(rawinname)
filename = os.path.basename(rawinname)
rawoutname = rawinname.replace(initPath, rawoutDir)
rawoutname = rawoutname.replace('.nrrd', '.aligned.nrrd')
outRawDataList.append(rawoutname)
dtnrrdfilename = segoutname.replace('.aligned.nrrd', '.aligned.DT.nrrd')
tpdtnrrdfilename = segoutname.replace('.aligned.nrrd', '.aligned.tpSmoothDT.nrrd')
isonrrdfilename = segoutname.replace('.aligned.nrrd', '.aligned.ISO.nrrd')
binnrrdfilename = segoutname.replace('.aligned.nrrd', '.aligned.BIN.nrrd')
print(" ")
print("############# Rigid Alignment #############")
cprint(("Input Segmentation Filename : ", seginname), 'cyan')
cprint(("Input Reference Filename : ", refFile), 'cyan')
cprint(("Input Raw Filename : ", rawinname), 'cyan')
cprint(("Output Segmentation Filename : ", segoutname), 'yellow')
cprint(("Output Raw Filename : ", rawoutname), 'yellow')
cprint(("Output Transformation Matrix : ", transformation), 'yellow')
print("###########################################")
print(" ")
execCommand = ["ExtractGivenLabelImage", "--inFilename", seginname, "--outFilename", seginname, "--labelVal", "1"]
subprocess.check_call(execCommand)
execCommand = ["CloseHoles", "--inFilename", seginname, "--outFilename", seginname]
subprocess.check_call(execCommand)
execCommand = ["shapeworks", "read-image", "--name", seginname, "antialias", "--numiterations", str(antialiasIterations), "write-image", "--name", dtnrrdfilename]
subprocess.check_call(execCommand)
execCommand = ["FastMarching", "--inFilename", dtnrrdfilename, "--outFilename", dtnrrdfilename, "--isoValue", str(
isoValue)]
subprocess.check_call(execCommand)
xmlfilename = segoutname.replace('.aligned.nrrd', '.aligned.tpSmoothDT.xml')
create_tpSmooth_xml(xmlfilename, smoothingIterations, dtnrrdfilename, isonrrdfilename, tpdtnrrdfilename)
create_cpp_xml(xmlfilename, xmlfilename)
execCommand = ["TopologyPreservingSmoothing", xmlfilename]
subprocess.check_call(execCommand )
execCommand = ["ICPRigid3DImageRegistration", "--targetDistanceMap", ref_tpdtnrrdfilename, "--sourceDistanceMap", tpdtnrrdfilename, "--sourceSegmentation", seginname, "--sourceRaw", rawinname, "--icpIterations", str(
icpIterations), "--visualizeResult", "0", "--solutionSegmentation", segoutname, "--solutionRaw", rawoutname, "--solutionTransformation", transformation]
subprocess.check_call(execCommand )
return [outSegDataList, outRawDataList]
else:
outDataList = []
for i in range(len(inDataListSeg)):
inname = inDataListSeg[i]
initPath = os.path.dirname(inname)
outname = inname.replace(initPath, outDir)
outname = outname.replace('.nrrd', '.aligned.nrrd')
transoutname = inname.replace(initPath, transoutDir)
transformation = transoutname.replace('.nrrd', '.tarnsormationMatrix.txt')
outDataList.append(outname)
dtnrrdfilename = outname.replace('.aligned.nrrd', '.aligned.DT.nrrd')
tpdtnrrdfilename = outname.replace('.aligned.nrrd', '.aligned.tpSmoothDT.nrrd')
isonrrdfilename = outname.replace('.aligned.nrrd', '.aligned.ISO.nrrd')
binnrrdfilename = outname.replace('.aligned.nrrd', '.aligned.BIN.nrrd')
print(" ")
print("############# Rigid Alignment #############")
cprint(("Input Segmentation Filename : ", inname), 'cyan')
cprint(("Input Reference Filename : ", refFile), 'cyan')
cprint(("Output Segmentation Filename : ", outname), 'yellow')
cprint(("Output Transformation Matrix : ", transformation), 'yellow')
print("###########################################")
print(" ")
execCommand = ["ExtractGivenLabelImage", "--inFilename", inname, "--outFilename", inname, "--labelVal", "1"]
subprocess.check_call(execCommand )
execCommand = ["CloseHoles", "--inFilename", inname, "--outFilename", inname]
subprocess.check_call(execCommand )
execCommand = ["shapeworks", "read-image", "--name", inname, "antialias", "--numiterations", str(antialiasIterations), "write-image", "--name", dtnrrdfilename]
subprocess.check_call(execCommand )
execCommand = ["FastMarching", "--inFilename", dtnrrdfilename, "--outFilename", dtnrrdfilename, "--isoValue", str(
isoValue)]
subprocess.check_call(execCommand )
xmlfilename = outname.replace('.aligned.nrrd', '.aligned.tpSmoothDT.xml')
create_tpSmooth_xml(xmlfilename, smoothingIterations, dtnrrdfilename, isonrrdfilename, tpdtnrrdfilename)
create_cpp_xml(xmlfilename, xmlfilename)
execCommand = ["TopologyPreservingSmoothing", xmlfilename]
subprocess.check_call(execCommand )
execCommand = ["ICPRigid3DImageRegistration", "--targetDistanceMap", ref_tpdtnrrdfilename, "--sourceDistanceMap", tpdtnrrdfilename, "--sourceSegmentation", inname, "--icpIterations", str(
icpIterations), "--visualizeResult", "0", "--solutionSegmentation", outname, "--solutionTransformation", transformation]
subprocess.check_call(execCommand )
return outDataList
def applyCropping(parentDir, inDataListSeg, inDataListImg, paddingSize=10, processRaw=False):
"""
This function takes in a filelist and crops them according to the largest
bounding box which it discovers
"""
outDir = os.path.join(parentDir, 'cropped')
if not os.path.exists(outDir):
os.makedirs(outDir)
cropinfoDir = os.path.join(outDir, 'crop_info')
if not os.path.exists(cropinfoDir):
os.makedirs(cropinfoDir)
# first create a txtfile with all the scan names in it.
txtfile = os.path.join(cropinfoDir, "_dataList.txt")
with open(txtfile, 'w') as filehandle:
for listitem in inDataListSeg:
filehandle.write('%s\n' % listitem)
outPrefix = os.path.join(cropinfoDir, "largest_bounding_box")
execCommand = ["FindLargestBoundingBox", "--paddingSize", str(
paddingSize), "--inFilename", txtfile, "--outPrefix", outPrefix]
subprocess.check_call(execCommand )
# read all the bounding box files for cropping
bb0 = np.loadtxt(outPrefix + "_bb0.txt")
bb1 = np.loadtxt(outPrefix + "_bb1.txt")
bb2 = np.loadtxt(outPrefix + "_bb2.txt")
smI0 = np.loadtxt(outPrefix + "_smallestIndex0.txt")
smI1 = np.loadtxt(outPrefix + "_smallestIndex1.txt")
smI2 = np.loadtxt(outPrefix + "_smallestIndex2.txt")
if processRaw:
rawoutDir = os.path.join(outDir, 'images')
binaryoutDir = os.path.join(outDir, 'segmentations')
if not os.path.exists(rawoutDir):
os.makedirs(rawoutDir)
if not os.path.exists(binaryoutDir):
os.makedirs(binaryoutDir)
outDataListSeg = []
outDataListImg = []
for i in range(len(inDataListSeg)):
innameSeg = inDataListSeg[i]
innameImg = inDataListImg[i]
initPath = os.path.dirname(innameSeg)
outnameSeg = innameSeg.replace(initPath, binaryoutDir)
outnameSeg = outnameSeg.replace('.nrrd', '.cropped.nrrd')
outDataListSeg.append(outnameSeg)
initPath = os.path.dirname(innameImg)
outnameImg = innameImg.replace(initPath, rawoutDir)
outnameImg = outnameImg.replace('.nrrd', '.cropped.nrrd')
outDataListImg.append(outnameImg)
print(" ")
print("############## Cropping ##############")
cprint(("Input Segmentation Filename : ", innameSeg), 'cyan')
cprint(("Input Image Filename : ", innameImg), 'cyan')
cprint(("Output Segmentation Filename : ", outnameSeg), 'yellow')
cprint(("Output Image Filename : ", outnameImg), 'yellow')
print("######################################")
print(" ")
execCommand = ["CropImages", "--inFilename", innameSeg, "--outFilename", outnameSeg, "--bbX", str(
bb0), "--bbY", str(bb1), "--bbZ", str(bb2), "--startingIndexX", str(
smI0), "--startingIndexY", str(smI1), "--startingIndexZ", str(
smI2), "--MRIinFilename", innameImg, "--MRIoutFilename", outnameImg]
subprocess.check_call(execCommand )
return [outDataListSeg, outDataListImg]
else:
outDataList = []
for i in range(len(inDataListSeg)):
inname = inDataListSeg[i]
initPath = os.path.dirname(inname)
outname = inname.replace(initPath, outDir)
outname = outname.replace('.nrrd', '.cropped.nrrd')
outDataList.append(outname)
print(" ")
print("############## Cropping ##############")
cprint(("Input Filename : ", inname), 'cyan')
cprint(("Output Filename : ", outname), 'yellow')
print("######################################")
print(" ")
execCommand = ["CropImages", "--inFilename", inname, "--outFilename", outname, "--bbX", str(
bb0), "--bbY", str(bb1), "--bbZ", str(bb2), "--startingIndexX", str(
smI0), "--startingIndexY", str(smI1), "--startingIndexZ", str(smI2)]
subprocess.check_call(execCommand )
return outDataList
def create_meshfromDT_xml(xmlfilename, tpdtnrrdfilename, vtkfilename):
file = open(xmlfilename, "w+")
file.write("<lsSmootherIterations>\n1.0\n</lsSmootherIterations>")
file.write("<targetReduction>\n0.0001\n</targetReduction>")
file.write("<featureAngle>\n30.0\n</featureAngle>")
file.write("<preserveTopology>\n1\n</preserveTopology>")
file.write("<inputs>\n" + str(tpdtnrrdfilename) +"\n</inputs>")
file.write("<outputs>\n"+str(vtkfilename) + "\n</outputs>")
file.close()
def applyDistanceTransforms(parentDir, inDataList,antialiasIterations=20, smoothingIterations=1, isoValue=0, percentage=50):
outDir = os.path.join(parentDir, 'groom_and_meshes')
if not os.path.exists(outDir):
os.makedirs(outDir)
finalDTDir = os.path.join(parentDir, 'distance_transforms')
if not os.path.exists(finalDTDir):
os.makedirs(finalDTDir)
outDataList = []
for i in range(len(inDataList)):
inname = inDataList[i]
initPath = os.path.dirname(inDataList[i])
outname = inname.replace(initPath, outDir)
dtnrrdfilename = outname.replace('.nrrd', '.DT.nrrd')
tpdtnrrdfilename = outname.replace('.nrrd', '.tpSmoothDT.nrrd')
isonrrdfilename = outname.replace('.nrrd', '.ISO.nrrd')
vtkfilename = outname.replace('.nrrd', '.tpSmoothDT.vtk')
vtkfilename_preview = outname.replace('.nrrd', '.tpSmoothDT.preview' + str(percentage) + ".vtk")
finalnm = tpdtnrrdfilename.replace(outDir, finalDTDir)
outDataList.append(finalnm)
execCommand = ["ExtractGivenLabelImage", "--inFilename", inname, "--outFilename", inname, "--labelVal", "1"]
subprocess.check_call(execCommand )
execCommand = ["CloseHoles", "--inFilename", inname, "--outFilename", inname ]
subprocess.check_call(execCommand )
execCommand = ["shapeworks", "read-image", "--name", inname, "antialias", "--numiterations", str(antialiasIterations), "write-image", "--name", dtnrrdfilename]
subprocess.check_call(execCommand )
execCommand = ["FastMarching", "--inFilename", dtnrrdfilename, "--outFilename", dtnrrdfilename, "--isoValue", str(isoValue) ]
subprocess.check_call(execCommand )
xmlfilename=outname.replace('.nrrd', '.tpSmoothDT.xml')
create_tpSmooth_xml(xmlfilename, smoothingIterations, dtnrrdfilename, isonrrdfilename, tpdtnrrdfilename)
create_cpp_xml(xmlfilename, xmlfilename)
execCommand = ["TopologyPreservingSmoothing", xmlfilename]
subprocess.check_call(execCommand )
shutil.copy(tpdtnrrdfilename, finalDTDir)
return outDataList
### Mesh Grooming
# Refelcts images and meshes to reference side
def anatomyPairsToSingles(outDir, seg_list, img_list, reference_side):
if not os.path.exists(outDir):
os.makedirs(outDir)
outSegDir = os.path.join(outDir, "segmentations")
if not os.path.exists(outSegDir):
os.mkdir(outSegDir)
outImgDir = os.path.join(outDir, "images")
if not os.path.exists(outImgDir):
os.mkdir(outImgDir)
imageList = []
meshList = []
for img in img_list:
img_name = os.path.basename(img)
prefix = img_name.split("_")[0]
if reference_side == 'right':
ref = 'R'
flip = 'L'
elif reference_side == 'left':
ref = 'L'
flip = 'R'
else:
print("Error: reference side must be 'left' or 'right'.")
# check if ref exists
ref_prefix = prefix + "_" + ref
flip_prefix = prefix + "_" + flip
ref_seg = 'None'
flip_seg = 'None'
for seg in seg_list:
if ref_prefix in seg:
ref_seg = seg
elif flip_prefix in seg:
flip_seg = seg
# if we have ref seg, copy image and seg over with appropriate name
if ref_seg != 'None':
seg_out = ref_seg.replace(os.path.dirname(ref_seg), outSegDir)
meshList.append(seg_out)
shutil.copy(ref_seg, seg_out)
img_out = img.replace(os.path.dirname(img), outImgDir)
img_out = img_out.replace(prefix, ref_prefix)
imageList.append(img_out)
shutil.copy(img, img_out)
# if we have a seg for the non-ref side, reflect it
if flip_seg != 'None':
print("\n############## Reflecting ###############")
img_out = rename(img, outImgDir, 'reflect').replace(prefix, flip_prefix)
imageList.append(img_out)
centerFilename = os.path.join(outDir, prefix + "_origin.txt")
execCommand = ["ReflectVolumes", "--inFilename", img, "--outFilename", img_out, "--centerFilename", centerFilename, "--inputDirection", "0"]
subprocess.check_call(execCommand)
print("\n############## Reflecting ###############")
seg_out = rename(flip_seg, outSegDir, 'reflect')
meshList.append(seg_out)
execCommand = ["ReflectMesh", "--inFilename", flip_seg, "--outFilename", seg_out, "--reflectCenterFilename", centerFilename, "--inputDirection", "0", "--meshFormat", flip_seg.split(".")[-1]]
subprocess.check_call(execCommand)
return meshList, imageList
# rasterization for meshes to DT
def MeshesToVolumes(outDir, meshList, imgList):
segList= []
if not os.path.exists(outDir):
os.mkdir(outDir)
for mesh in meshList:
mesh_name = os.path.basename(mesh)
extension = mesh_name.split(".")[-1]
prefix = mesh_name.split("_")[0] + "_" + mesh_name.split("_")[1]
# change to ply if needed
if extension == "vtk":
mesh_vtk = mesh
mesh = mesh[:-4] + ".ply"
execCommand = ["vtk2ply", mesh_vtk, mesh]
subprocess.check_call(execCommand)
if extension == "stl":
mesh_vtk = mesh
mesh = mesh[:-4] + ".ply"
execCommand = ["stl2ply", mesh_vtk, mesh]
subprocess.check_call(execCommand)
# get image
for image_file in imgList:
if prefix in image_file:
image = image_file
# write origin, size, and spacing info to text file
infoPrefix = os.path.join(outDir, prefix)
execCommand = ["WriteImageInfoToText","--inFilename",image, "--outPrefix", infoPrefix]
subprocess.check_call(execCommand)
# get origin, size, and spacing data
data ={}
origin_file = open(infoPrefix + "_origin.txt", "r")
text = origin_file.read()
data["origin"] = text.split("\n")
origin_file.close()
size_file = open(infoPrefix + "_size.txt", "r")
text = size_file.read()
data["size"] = text.split("\n")
size_file.close()
spacing_file = open(infoPrefix + "_spacing.txt", "r")
text = spacing_file.read()
spacingX = text.split("\n")[0]
data["spacing"] = text.split("\n")
spacing_file.close()
# write xml file
xmlfilename=infoPrefix + "_GenerateBinaryAndDT.xml"
if os.path.exists(xmlfilename):
os.remove(xmlfilename)
xml = open(xmlfilename, "a")
xml.write("<?xml version=\"1.0\" ?>\n")
xml.write("<mesh>\n")
xml.write(mesh+"\n")
xml.write("</mesh>\n")
# write origin, size, and spacing data
for key,value in data.items():
index = 0
for dim in ["x","y","z"]:
xml.write("<" + key + "_" + dim + ">" + str(value[index]) + "</" + key + "_" + dim + ">\n")
index += 1
xml.close()
print("########### Turning Mesh To Volume ##############")
segFile = rename(mesh, outDir, "", ".nrrd")
# call generate binary and DT
execCommand = ["GenerateBinaryAndDTImagesFromMeshes", xmlfilename]
subprocess.check_call(execCommand)
# save output volume
output_volume = mesh.replace(".ply", ".rasterized_sp" + str(spacingX) + ".nrrd")
shutil.move(output_volume, segFile)
segList.append(segFile)
#save output DT
output_DT = mesh.replace(".ply", ".DT_sp" + str(spacingX) + ".nrrd")
dtFile = segFile.replace(".nrrd", "_DT.nrrd")
shutil.move(output_DT, dtFile)
return segList
def ClipBinaryVolumes(outDir, segList, cutting_plane_points):
if not os.path.exists(outDir):
os.makedirs(outDir)
outListSeg = []
for seg in segList:
print("\n############## Clipping ##############")
seg_out = rename(seg, outDir, "clipped")
outListSeg.append(seg_out)
# write xml file
xmlfilename= seg_out.replace(".nrrd",".xml")
if os.path.exists(xmlfilename):
os.remove(xmlfilename)
xml = open(xmlfilename, "a")
xml.write("<?xml version=\"1.0\" ?>\n")
xml.write("<num_shapes>1</num_shapes>\n")
xml.write("<inputs>\n")
xml.write(seg+"\n")
xml.write("</inputs>\n")
xml.write("<outputs>\n")
xml.write(seg_out+"\n")
xml.write("</outputs>\n")
points = str(cutting_plane_points)[1:-1].replace(",","")
xml.write("<cutting_planes> " + points +" </cutting_planes>")
xml.close()
execCommand = ["ClipVolume", xmlfilename]
subprocess.check_call(execCommand)
return outListSeg
def SelectCuttingPlane(input_file):
## Get vtk format
file_format = input_file.split(".")[-1]
input_vtk = input_file.replace(file_format, "vtk")
if file_format == "nrrd":
print("\nCreating mesh from: " + input_file)
print("\nSaving as: " + input_vtk)
xml_filename = os.path.join(os.path.dirname(input_file), "cutting_plane_nrrd2vtk.xml")
create_meshfromDT_xml(xml_filename, input_file, input_vtk)
execCommand = ["MeshFromDistanceTransforms", xml_filename]
subprocess.check_call(execCommand)
print("Calling cmd:\n"+" ".join(execCommand))
elif file_format == "ply":
execCommand = ["ply2vtk", input_file, input_vtk]
subprocess.check_call(execCommand)
print("Calling cmd:\n"+" ".join(execCommand))
elif file_format == "stl":
execCommand = ["stl2vtk", input_file, input_vtk]
subprocess.check_call(execCommand)
print("Calling cmd:\n"+" ".join(execCommand))
elif file_format == "vtk":
pass
else:
print("Error, file format unrecognized: " + input_file)
## VTK interactive window
print('\n Use the interactive window to select your cutting plane. When you are content with your selection, simply close the window. \n')
# read data from file
# read data from file
reader = vtk.vtkPolyDataReader()
reader.SetFileName(input_vtk)
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
# get data
data = reader.GetOutput()
(xmin, xmax, ymin, ymax, zmin, zmax) = data.GetBounds()
(xcenter, ycenter, zcenter) = data.GetCenter()
#create mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(data)
# The actor is a grouping mechanism
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# create camera
camera = vtk.vtkCamera()
camera.SetFocalPoint(xcenter, ycenter, zcenter)
camera.SetPosition(100, -300, -50)
camera.SetViewUp(0,0,1)
# create a renderer
renderer = vtk.vtkRenderer()
renderer.SetActiveCamera(camera);
renderer.SetBackground(0.2, 0.2, 0.5)
renderer.SetBackground2(0.4, 0.4, 1.0)
renderer.SetGradientBackground(True)
renderer.AddActor(actor)
# create a render_window
render_window = vtk.vtkRenderWindow()
render_window.AddRenderer(renderer)
render_window.SetSize(1000,1000)
# create a renderwindowiren
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(render_window)
iren.Initialize()
rep = vtk.vtkImplicitPlaneRepresentation()
rep.SetPlaceFactor(1.25)
rep.PlaceWidget(actor.GetBounds())
rep.SetNormal(0,0,1)
# Create a vtkImagePlaneWidget and activate it
plane_widget = vtk.vtkImplicitPlaneWidget2()
plane_widget.SetInteractor(iren)
plane_widget.SetRepresentation(rep)
plane_widget.On()
iren.Initialize()
iren.Start()
# use orgin as one point and use normla to solve for two others
(o1,o2,o3) = rep.GetOrigin()
(n1,n2,n3) = rep.GetNormal()
# using x = 1 and y =-1 solve for z
pt1_z = (-n1+(n1*o1)+n2+(n2*o2)+(n3*o3))/n3
# using x = -1 and y = 1 solve for z
pt2_z = (n1+(n1*o1)-n2+(n2*o2)+(n3*o3))/n3
# fix 0 edge case
if o1 == 0 and o2 == 0:
o1 = -1
o2 = -1
return np.array([[o1, o2, o3], [1, -1, pt1_z], [-1, 1, pt2_z]])
| [
"vtk.vtkImplicitPlaneRepresentation",
"numpy.array",
"vtk.vtkPolyDataReader",
"os.remove",
"os.path.exists",
"itk.imread",
"shutil.move",
"numpy.asarray",
"numpy.max",
"vtk.vtkRenderer",
"os.mkdir",
"vtk.vtkImplicitPlaneWidget2",
"subprocess.check_call",
"vtk.vtkCamera",
"vtk.vtkRenderWi... | [((434, 457), 'os.path.dirname', 'os.path.dirname', (['inname'], {}), '(inname)\n', (449, 457), False, 'import os\n'), ((795, 840), 'termcolor.cprint', 'cprint', (["('Input Filename : ', inname)", '"""cyan"""'], {}), "(('Input Filename : ', inname), 'cyan')\n", (801, 840), False, 'from termcolor import colored, cprint\n'), ((845, 894), 'termcolor.cprint', 'cprint', (["('Output Filename : ', outname)", '"""yellow"""'], {}), "(('Output Filename : ', outname), 'yellow')\n", (851, 894), False, 'from termcolor import colored, cprint\n'), ((2125, 2151), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (2146, 2151), False, 'import subprocess\n'), ((2296, 2333), 'os.remove', 'os.remove', (["(infoPrefix + '_origin.txt')"], {}), "(infoPrefix + '_origin.txt')\n", (2305, 2333), False, 'import os\n'), ((2338, 2376), 'os.remove', 'os.remove', (["(infoPrefix + '_spacing.txt')"], {}), "(infoPrefix + '_spacing.txt')\n", (2347, 2376), False, 'import os\n'), ((2381, 2416), 'os.remove', 'os.remove', (["(infoPrefix + '_size.txt')"], {}), "(infoPrefix + '_size.txt')\n", (2390, 2416), False, 'import os\n'), ((6611, 6631), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""sample"""'], {}), "('sample')\n", (6621, 6631), True, 'import xml.etree.ElementTree as ET\n'), ((6655, 6694), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""propagationScale"""'], {}), "(root, 'propagationScale')\n", (6668, 6694), True, 'import xml.etree.ElementTree as ET\n'), ((6748, 6776), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""alpha"""'], {}), "(root, 'alpha')\n", (6761, 6776), True, 'import xml.etree.ElementTree as ET\n'), ((6819, 6846), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""beta"""'], {}), "(root, 'beta')\n", (6832, 6846), True, 'import xml.etree.ElementTree as ET\n'), ((6889, 6920), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""isoValue"""'], {}), "(root, 'isoValue')\n", (6902, 6920), True, 'import xml.etree.ElementTree as ET\n'), ((6978, 7021), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""smoothing_iterations"""'], {}), "(root, 'smoothing_iterations')\n", (6991, 7021), True, 'import xml.etree.ElementTree as ET\n'), ((7109, 7139), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""verbose"""'], {}), "(root, 'verbose')\n", (7122, 7139), True, 'import xml.etree.ElementTree as ET\n'), ((7182, 7211), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""inputs"""'], {}), "(root, 'inputs')\n", (7195, 7211), True, 'import xml.etree.ElementTree as ET\n'), ((7279, 7309), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""outputs"""'], {}), "(root, 'outputs')\n", (7292, 7309), True, 'import xml.etree.ElementTree as ET\n'), ((7379, 7409), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""dtFiles"""'], {}), "(root, 'dtFiles')\n", (7392, 7409), True, 'import xml.etree.ElementTree as ET\n'), ((7477, 7514), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['root'], {'encoding': '"""unicode"""'}), "(root, encoding='unicode')\n", (7488, 7514), True, 'import xml.etree.ElementTree as ET\n'), ((7881, 7900), 'numpy.max', 'np.max', (['DIM'], {'axis': '(0)'}), '(DIM, axis=0)\n', (7887, 7900), True, 'import numpy as np\n'), ((8312, 8378), 'termcolor.cprint', 'cprint', (['"""The reference file for rigid alignment is found"""', '"""green"""'], {}), "('The reference file for rigid alignment is found', 'green')\n", (8318, 8378), False, 'from termcolor import colored, cprint\n'), ((8385, 8449), 'termcolor.cprint', 'cprint', (["('Output Median Filename : ', inDataList[idx])", '"""yellow"""'], {}), "(('Output Median Filename : ', inDataList[idx]), 'yellow')\n", (8391, 8449), False, 'from termcolor import colored, cprint\n'), ((9038, 9072), 'os.path.join', 'os.path.join', (['parentDir', '"""aligned"""'], {}), "(parentDir, 'aligned')\n", (9050, 9072), False, 'import os\n'), ((9091, 9130), 'os.path.join', 'os.path.join', (['outDir', '"""transformations"""'], {}), "(outDir, 'transformations')\n", (9103, 9130), False, 'import os\n'), ((9316, 9349), 'os.path.join', 'os.path.join', (['outDir', '"""reference"""'], {}), "(outDir, 'reference')\n", (9328, 9349), False, 'import os\n'), ((9428, 9452), 'os.path.dirname', 'os.path.dirname', (['refFile'], {}), '(refFile)\n', (9443, 9452), False, 'import os\n'), ((9933, 9967), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (9954, 9967), False, 'import subprocess\n'), ((10057, 10091), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (10078, 10091), False, 'import subprocess\n'), ((10265, 10299), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (10286, 10299), False, 'import subprocess\n'), ((10452, 10486), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (10473, 10486), False, 'import subprocess\n'), ((10786, 10820), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (10807, 10820), False, 'import subprocess\n'), ((10977, 11011), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (10998, 11011), False, 'import subprocess\n'), ((17846, 17880), 'os.path.join', 'os.path.join', (['parentDir', '"""cropped"""'], {}), "(parentDir, 'cropped')\n", (17858, 17880), False, 'import os\n'), ((17962, 17995), 'os.path.join', 'os.path.join', (['outDir', '"""crop_info"""'], {}), "(outDir, 'crop_info')\n", (17974, 17995), False, 'import os\n'), ((18143, 18185), 'os.path.join', 'os.path.join', (['cropinfoDir', '"""_dataList.txt"""'], {}), "(cropinfoDir, '_dataList.txt')\n", (18155, 18185), False, 'import os\n'), ((18332, 18381), 'os.path.join', 'os.path.join', (['cropinfoDir', '"""largest_bounding_box"""'], {}), "(cropinfoDir, 'largest_bounding_box')\n", (18344, 18381), False, 'import os\n'), ((18526, 18560), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (18547, 18560), False, 'import subprocess\n'), ((18623, 18657), 'numpy.loadtxt', 'np.loadtxt', (["(outPrefix + '_bb0.txt')"], {}), "(outPrefix + '_bb0.txt')\n", (18633, 18657), True, 'import numpy as np\n'), ((18668, 18702), 'numpy.loadtxt', 'np.loadtxt', (["(outPrefix + '_bb1.txt')"], {}), "(outPrefix + '_bb1.txt')\n", (18678, 18702), True, 'import numpy as np\n'), ((18713, 18747), 'numpy.loadtxt', 'np.loadtxt', (["(outPrefix + '_bb2.txt')"], {}), "(outPrefix + '_bb2.txt')\n", (18723, 18747), True, 'import numpy as np\n'), ((18759, 18804), 'numpy.loadtxt', 'np.loadtxt', (["(outPrefix + '_smallestIndex0.txt')"], {}), "(outPrefix + '_smallestIndex0.txt')\n", (18769, 18804), True, 'import numpy as np\n'), ((18816, 18861), 'numpy.loadtxt', 'np.loadtxt', (["(outPrefix + '_smallestIndex1.txt')"], {}), "(outPrefix + '_smallestIndex1.txt')\n", (18826, 18861), True, 'import numpy as np\n'), ((18873, 18918), 'numpy.loadtxt', 'np.loadtxt', (["(outPrefix + '_smallestIndex2.txt')"], {}), "(outPrefix + '_smallestIndex2.txt')\n", (18883, 18918), True, 'import numpy as np\n'), ((22373, 22416), 'os.path.join', 'os.path.join', (['parentDir', '"""groom_and_meshes"""'], {}), "(parentDir, 'groom_and_meshes')\n", (22385, 22416), False, 'import os\n'), ((22498, 22544), 'os.path.join', 'os.path.join', (['parentDir', '"""distance_transforms"""'], {}), "(parentDir, 'distance_transforms')\n", (22510, 22544), False, 'import os\n'), ((24600, 24637), 'os.path.join', 'os.path.join', (['outDir', '"""segmentations"""'], {}), "(outDir, 'segmentations')\n", (24612, 24637), False, 'import os\n'), ((24720, 24750), 'os.path.join', 'os.path.join', (['outDir', '"""images"""'], {}), "(outDir, 'images')\n", (24732, 24750), False, 'import os\n'), ((32484, 32507), 'vtk.vtkPolyDataReader', 'vtk.vtkPolyDataReader', ([], {}), '()\n', (32505, 32507), False, 'import vtk\n'), ((32811, 32834), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (32832, 32834), False, 'import vtk\n'), ((32917, 32931), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (32929, 32931), False, 'import vtk\n'), ((32994, 33009), 'vtk.vtkCamera', 'vtk.vtkCamera', ([], {}), '()\n', (33007, 33009), False, 'import vtk\n'), ((33168, 33185), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (33183, 33185), False, 'import vtk\n'), ((33428, 33449), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (33447, 33449), False, 'import vtk\n'), ((33570, 33601), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ([], {}), '()\n', (33599, 33601), False, 'import vtk\n'), ((33674, 33710), 'vtk.vtkImplicitPlaneRepresentation', 'vtk.vtkImplicitPlaneRepresentation', ([], {}), '()\n', (33708, 33710), False, 'import vtk\n'), ((33874, 33903), 'vtk.vtkImplicitPlaneWidget2', 'vtk.vtkImplicitPlaneWidget2', ([], {}), '()\n', (33901, 33903), False, 'import vtk\n'), ((34446, 34502), 'numpy.array', 'np.array', (['[[o1, o2, o3], [1, -1, pt1_z], [-1, 1, pt2_z]]'], {}), '([[o1, o2, o3], [1, -1, pt1_z], [-1, 1, pt2_z]])\n', (34454, 34502), True, 'import numpy as np\n'), ((1193, 1215), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (1207, 1215), False, 'import os\n'), ((1225, 1244), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (1236, 1244), False, 'import os\n'), ((1914, 1940), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (1935, 1940), False, 'import subprocess\n'), ((2479, 2501), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (2493, 2501), False, 'import os\n'), ((2511, 2530), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (2522, 2530), False, 'import os\n'), ((2990, 3016), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (3011, 3016), False, 'import subprocess\n'), ((3729, 3751), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (3743, 3751), False, 'import os\n'), ((3761, 3780), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (3772, 3780), False, 'import os\n'), ((4264, 4290), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (4285, 4290), False, 'import subprocess\n'), ((4648, 4670), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (4662, 4670), False, 'import os\n'), ((4680, 4699), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (4691, 4699), False, 'import os\n'), ((4758, 4788), 'os.path.join', 'os.path.join', (['outDir', '"""images"""'], {}), "(outDir, 'images')\n", (4770, 4788), False, 'import os\n'), ((4889, 4926), 'os.path.join', 'os.path.join', (['outDir', '"""segmentations"""'], {}), "(outDir, 'segmentations')\n", (4901, 4926), False, 'import os\n'), ((7957, 8101), 'numpy.pad', 'np.pad', (['IMG[i]', '((0, ref_dim[0] - DIM[i][0]), (0, ref_dim[1] - DIM[i][1]), (0, ref_dim[2] -\n DIM[i][2]))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(IMG[i], ((0, ref_dim[0] - DIM[i][0]), (0, ref_dim[1] - DIM[i][1]), (\n 0, ref_dim[2] - DIM[i][2])), mode='constant', constant_values=0)\n", (7963, 8101), True, 'import numpy as np\n'), ((9143, 9165), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (9157, 9165), False, 'import os\n'), ((9175, 9194), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (9186, 9194), False, 'import os\n'), ((9206, 9233), 'os.path.exists', 'os.path.exists', (['transoutDir'], {}), '(transoutDir)\n', (9220, 9233), False, 'import os\n'), ((9243, 9267), 'os.makedirs', 'os.makedirs', (['transoutDir'], {}), '(transoutDir)\n', (9254, 9267), False, 'import os\n'), ((9361, 9383), 'os.path.exists', 'os.path.exists', (['refDir'], {}), '(refDir)\n', (9375, 9383), False, 'import os\n'), ((9393, 9412), 'os.makedirs', 'os.makedirs', (['refDir'], {}), '(refDir)\n', (9404, 9412), False, 'import os\n'), ((11053, 11083), 'os.path.join', 'os.path.join', (['outDir', '"""images"""'], {}), "(outDir, 'images')\n", (11065, 11083), False, 'import os\n'), ((11107, 11145), 'os.path.join', 'os.path.join', (["(outDir + 'segmentations')"], {}), "(outDir + 'segmentations')\n", (11119, 11145), False, 'import os\n'), ((17892, 17914), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (17906, 17914), False, 'import os\n'), ((17924, 17943), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (17935, 17943), False, 'import os\n'), ((18007, 18034), 'os.path.exists', 'os.path.exists', (['cropinfoDir'], {}), '(cropinfoDir)\n', (18021, 18034), False, 'import os\n'), ((18044, 18068), 'os.makedirs', 'os.makedirs', (['cropinfoDir'], {}), '(cropinfoDir)\n', (18055, 18068), False, 'import os\n'), ((18958, 18988), 'os.path.join', 'os.path.join', (['outDir', '"""images"""'], {}), "(outDir, 'images')\n", (18970, 18988), False, 'import os\n'), ((19012, 19049), 'os.path.join', 'os.path.join', (['outDir', '"""segmentations"""'], {}), "(outDir, 'segmentations')\n", (19024, 19049), False, 'import os\n'), ((22428, 22450), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (22442, 22450), False, 'import os\n'), ((22460, 22479), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (22471, 22479), False, 'import os\n'), ((22556, 22582), 'os.path.exists', 'os.path.exists', (['finalDTDir'], {}), '(finalDTDir)\n', (22570, 22582), False, 'import os\n'), ((22592, 22615), 'os.makedirs', 'os.makedirs', (['finalDTDir'], {}), '(finalDTDir)\n', (22603, 22615), False, 'import os\n'), ((22725, 22755), 'os.path.dirname', 'os.path.dirname', (['inDataList[i]'], {}), '(inDataList[i])\n', (22740, 22755), False, 'import os\n'), ((23403, 23437), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (23424, 23437), False, 'import subprocess\n'), ((23535, 23569), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (23556, 23569), False, 'import subprocess\n'), ((23747, 23781), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (23768, 23781), False, 'import subprocess\n'), ((23925, 23959), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (23946, 23959), False, 'import subprocess\n'), ((24271, 24305), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (24292, 24305), False, 'import subprocess\n'), ((24315, 24356), 'shutil.copy', 'shutil.copy', (['tpdtnrrdfilename', 'finalDTDir'], {}), '(tpdtnrrdfilename, finalDTDir)\n', (24326, 24356), False, 'import shutil\n'), ((24532, 24554), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (24546, 24554), False, 'import os\n'), ((24564, 24583), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (24575, 24583), False, 'import os\n'), ((24649, 24674), 'os.path.exists', 'os.path.exists', (['outSegDir'], {}), '(outSegDir)\n', (24663, 24674), False, 'import os\n'), ((24684, 24703), 'os.mkdir', 'os.mkdir', (['outSegDir'], {}), '(outSegDir)\n', (24692, 24703), False, 'import os\n'), ((24762, 24787), 'os.path.exists', 'os.path.exists', (['outImgDir'], {}), '(outImgDir)\n', (24776, 24787), False, 'import os\n'), ((24797, 24816), 'os.mkdir', 'os.mkdir', (['outImgDir'], {}), '(outImgDir)\n', (24805, 24816), False, 'import os\n'), ((24898, 24919), 'os.path.basename', 'os.path.basename', (['img'], {}), '(img)\n', (24914, 24919), False, 'import os\n'), ((27111, 27133), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (27125, 27133), False, 'import os\n'), ((27143, 27159), 'os.mkdir', 'os.mkdir', (['outDir'], {}), '(outDir)\n', (27151, 27159), False, 'import os\n'), ((27206, 27228), 'os.path.basename', 'os.path.basename', (['mesh'], {}), '(mesh)\n', (27222, 27228), False, 'import os\n'), ((27987, 28015), 'os.path.join', 'os.path.join', (['outDir', 'prefix'], {}), '(outDir, prefix)\n', (27999, 28015), False, 'import os\n'), ((28119, 28153), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (28140, 28153), False, 'import subprocess\n'), ((28838, 28865), 'os.path.exists', 'os.path.exists', (['xmlfilename'], {}), '(xmlfilename)\n', (28852, 28865), False, 'import os\n'), ((29619, 29653), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (29640, 29653), False, 'import subprocess\n'), ((29781, 29816), 'shutil.move', 'shutil.move', (['output_volume', 'segFile'], {}), '(output_volume, segFile)\n', (29792, 29816), False, 'import shutil\n'), ((30013, 30043), 'shutil.move', 'shutil.move', (['output_DT', 'dtFile'], {}), '(output_DT, dtFile)\n', (30024, 30043), False, 'import shutil\n'), ((30158, 30180), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (30172, 30180), False, 'import os\n'), ((30190, 30209), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (30201, 30209), False, 'import os\n'), ((30485, 30512), 'os.path.exists', 'os.path.exists', (['xmlfilename'], {}), '(xmlfilename)\n', (30499, 30512), False, 'import os\n'), ((31089, 31123), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (31110, 31123), False, 'import subprocess\n'), ((31667, 31701), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (31688, 31701), False, 'import subprocess\n'), ((4804, 4829), 'os.path.exists', 'os.path.exists', (['rawoutDir'], {}), '(rawoutDir)\n', (4818, 4829), False, 'import os\n'), ((4843, 4865), 'os.makedirs', 'os.makedirs', (['rawoutDir'], {}), '(rawoutDir)\n', (4854, 4865), False, 'import os\n'), ((4942, 4970), 'os.path.exists', 'os.path.exists', (['binaryoutDir'], {}), '(binaryoutDir)\n', (4956, 4970), False, 'import os\n'), ((4984, 5009), 'os.makedirs', 'os.makedirs', (['binaryoutDir'], {}), '(binaryoutDir)\n', (4995, 5009), False, 'import os\n'), ((5798, 5832), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (5819, 5832), False, 'import subprocess\n'), ((6412, 6446), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (6433, 6446), False, 'import subprocess\n'), ((7785, 7810), 'itk.imread', 'itk.imread', (['inDataList[i]'], {}), '(inDataList[i])\n', (7795, 7810), False, 'import itk\n'), ((8110, 8125), 'numpy.asarray', 'np.asarray', (['IMG'], {}), '(IMG)\n', (8120, 8125), True, 'import numpy as np\n'), ((11162, 11187), 'os.path.exists', 'os.path.exists', (['rawoutDir'], {}), '(rawoutDir)\n', (11176, 11187), False, 'import os\n'), ((11201, 11223), 'os.makedirs', 'os.makedirs', (['rawoutDir'], {}), '(rawoutDir)\n', (11212, 11223), False, 'import os\n'), ((11240, 11268), 'os.path.exists', 'os.path.exists', (['binaryoutDir'], {}), '(binaryoutDir)\n', (11254, 11268), False, 'import os\n'), ((11282, 11307), 'os.makedirs', 'os.makedirs', (['binaryoutDir'], {}), '(binaryoutDir)\n', (11293, 11307), False, 'import os\n'), ((11469, 11495), 'os.path.dirname', 'os.path.dirname', (['seginname'], {}), '(seginname)\n', (11484, 11495), False, 'import os\n'), ((11519, 11546), 'os.path.basename', 'os.path.basename', (['seginname'], {}), '(seginname)\n', (11535, 11546), False, 'import os\n'), ((11951, 11977), 'os.path.dirname', 'os.path.dirname', (['rawinname'], {}), '(rawinname)\n', (11966, 11977), False, 'import os\n'), ((12001, 12028), 'os.path.basename', 'os.path.basename', (['rawinname'], {}), '(rawinname)\n', (12017, 12028), False, 'import os\n'), ((12664, 12725), 'termcolor.cprint', 'cprint', (["('Input Segmentation Filename : ', seginname)", '"""cyan"""'], {}), "(('Input Segmentation Filename : ', seginname), 'cyan')\n", (12670, 12725), False, 'from termcolor import colored, cprint\n'), ((12738, 12794), 'termcolor.cprint', 'cprint', (["('Input Reference Filename : ', refFile)", '"""cyan"""'], {}), "(('Input Reference Filename : ', refFile), 'cyan')\n", (12744, 12794), False, 'from termcolor import colored, cprint\n'), ((12807, 12859), 'termcolor.cprint', 'cprint', (["('Input Raw Filename : ', rawinname)", '"""cyan"""'], {}), "(('Input Raw Filename : ', rawinname), 'cyan')\n", (12813, 12859), False, 'from termcolor import colored, cprint\n'), ((12872, 12937), 'termcolor.cprint', 'cprint', (["('Output Segmentation Filename : ', segoutname)", '"""yellow"""'], {}), "(('Output Segmentation Filename : ', segoutname), 'yellow')\n", (12878, 12937), False, 'from termcolor import colored, cprint\n'), ((12950, 13006), 'termcolor.cprint', 'cprint', (["('Output Raw Filename : ', rawoutname)", '"""yellow"""'], {}), "(('Output Raw Filename : ', rawoutname), 'yellow')\n", (12956, 13006), False, 'from termcolor import colored, cprint\n'), ((13019, 13088), 'termcolor.cprint', 'cprint', (["('Output Transformation Matrix : ', transformation)", '"""yellow"""'], {}), "(('Output Transformation Matrix : ', transformation), 'yellow')\n", (13025, 13088), False, 'from termcolor import colored, cprint\n'), ((13316, 13350), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (13337, 13350), False, 'import subprocess\n'), ((13459, 13493), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (13480, 13493), False, 'import subprocess\n'), ((13681, 13715), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (13702, 13715), False, 'import subprocess\n'), ((13882, 13916), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (13903, 13916), False, 'import subprocess\n'), ((14260, 14294), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (14281, 14294), False, 'import subprocess\n'), ((14708, 14742), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (14729, 14742), False, 'import subprocess\n'), ((14936, 14959), 'os.path.dirname', 'os.path.dirname', (['inname'], {}), '(inname)\n', (14951, 14959), False, 'import os\n'), ((15714, 15772), 'termcolor.cprint', 'cprint', (["('Input Segmentation Filename : ', inname)", '"""cyan"""'], {}), "(('Input Segmentation Filename : ', inname), 'cyan')\n", (15720, 15772), False, 'from termcolor import colored, cprint\n'), ((15785, 15841), 'termcolor.cprint', 'cprint', (["('Input Reference Filename : ', refFile)", '"""cyan"""'], {}), "(('Input Reference Filename : ', refFile), 'cyan')\n", (15791, 15841), False, 'from termcolor import colored, cprint\n'), ((15854, 15916), 'termcolor.cprint', 'cprint', (["('Output Segmentation Filename : ', outname)", '"""yellow"""'], {}), "(('Output Segmentation Filename : ', outname), 'yellow')\n", (15860, 15916), False, 'from termcolor import colored, cprint\n'), ((15929, 15998), 'termcolor.cprint', 'cprint', (["('Output Transformation Matrix : ', transformation)", '"""yellow"""'], {}), "(('Output Transformation Matrix : ', transformation), 'yellow')\n", (15935, 15998), False, 'from termcolor import colored, cprint\n'), ((16221, 16255), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (16242, 16255), False, 'import subprocess\n'), ((16359, 16393), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (16380, 16393), False, 'import subprocess\n'), ((16579, 16613), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (16600, 16613), False, 'import subprocess\n'), ((16781, 16815), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (16802, 16815), False, 'import subprocess\n'), ((17157, 17191), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (17178, 17191), False, 'import subprocess\n'), ((17544, 17578), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (17565, 17578), False, 'import subprocess\n'), ((19065, 19090), 'os.path.exists', 'os.path.exists', (['rawoutDir'], {}), '(rawoutDir)\n', (19079, 19090), False, 'import os\n'), ((19104, 19126), 'os.makedirs', 'os.makedirs', (['rawoutDir'], {}), '(rawoutDir)\n', (19115, 19126), False, 'import os\n'), ((19142, 19170), 'os.path.exists', 'os.path.exists', (['binaryoutDir'], {}), '(binaryoutDir)\n', (19156, 19170), False, 'import os\n'), ((19184, 19209), 'os.makedirs', 'os.makedirs', (['binaryoutDir'], {}), '(binaryoutDir)\n', (19195, 19209), False, 'import os\n'), ((19415, 19441), 'os.path.dirname', 'os.path.dirname', (['innameSeg'], {}), '(innameSeg)\n', (19430, 19441), False, 'import os\n'), ((19648, 19674), 'os.path.dirname', 'os.path.dirname', (['innameImg'], {}), '(innameImg)\n', (19663, 19674), False, 'import os\n'), ((19950, 20011), 'termcolor.cprint', 'cprint', (["('Input Segmentation Filename : ', innameSeg)", '"""cyan"""'], {}), "(('Input Segmentation Filename : ', innameSeg), 'cyan')\n", (19956, 20011), False, 'from termcolor import colored, cprint\n'), ((20024, 20078), 'termcolor.cprint', 'cprint', (["('Input Image Filename : ', innameImg)", '"""cyan"""'], {}), "(('Input Image Filename : ', innameImg), 'cyan')\n", (20030, 20078), False, 'from termcolor import colored, cprint\n'), ((20091, 20156), 'termcolor.cprint', 'cprint', (["('Output Segmentation Filename : ', outnameSeg)", '"""yellow"""'], {}), "(('Output Segmentation Filename : ', outnameSeg), 'yellow')\n", (20097, 20156), False, 'from termcolor import colored, cprint\n'), ((20169, 20227), 'termcolor.cprint', 'cprint', (["('Output Image Filename : ', outnameImg)", '"""yellow"""'], {}), "(('Output Image Filename : ', outnameImg), 'yellow')\n", (20175, 20227), False, 'from termcolor import colored, cprint\n'), ((20683, 20717), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (20704, 20717), False, 'import subprocess\n'), ((20907, 20930), 'os.path.dirname', 'os.path.dirname', (['inname'], {}), '(inname)\n', (20922, 20930), False, 'import os\n'), ((21185, 21230), 'termcolor.cprint', 'cprint', (["('Input Filename : ', inname)", '"""cyan"""'], {}), "(('Input Filename : ', inname), 'cyan')\n", (21191, 21230), False, 'from termcolor import colored, cprint\n'), ((21243, 21292), 'termcolor.cprint', 'cprint', (["('Output Filename : ', outname)", '"""yellow"""'], {}), "(('Output Filename : ', outname), 'yellow')\n", (21249, 21292), False, 'from termcolor import colored, cprint\n'), ((21663, 21697), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (21684, 21697), False, 'import subprocess\n'), ((25765, 25794), 'shutil.copy', 'shutil.copy', (['ref_seg', 'seg_out'], {}), '(ref_seg, seg_out)\n', (25776, 25794), False, 'import shutil\n'), ((25970, 25995), 'shutil.copy', 'shutil.copy', (['img', 'img_out'], {}), '(img, img_out)\n', (25981, 25995), False, 'import shutil\n'), ((26304, 26348), 'os.path.join', 'os.path.join', (['outDir', "(prefix + '_origin.txt')"], {}), "(outDir, prefix + '_origin.txt')\n", (26316, 26348), False, 'import os\n'), ((26514, 26548), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (26535, 26548), False, 'import subprocess\n'), ((26927, 26961), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (26948, 26961), False, 'import subprocess\n'), ((27544, 27578), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (27565, 27578), False, 'import subprocess\n'), ((27742, 27776), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (27763, 27776), False, 'import subprocess\n'), ((28879, 28901), 'os.remove', 'os.remove', (['xmlfilename'], {}), '(xmlfilename)\n', (28888, 28901), False, 'import os\n'), ((30526, 30548), 'os.remove', 'os.remove', (['xmlfilename'], {}), '(xmlfilename)\n', (30535, 30548), False, 'import os\n'), ((31466, 31493), 'os.path.dirname', 'os.path.dirname', (['input_file'], {}), '(input_file)\n', (31481, 31493), False, 'import os\n'), ((31852, 31886), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (31873, 31886), False, 'import subprocess\n'), ((25679, 25703), 'os.path.dirname', 'os.path.dirname', (['ref_seg'], {}), '(ref_seg)\n', (25694, 25703), False, 'import os\n'), ((25829, 25849), 'os.path.dirname', 'os.path.dirname', (['img'], {}), '(img)\n', (25844, 25849), False, 'import os\n'), ((32037, 32071), 'subprocess.check_call', 'subprocess.check_call', (['execCommand'], {}), '(execCommand)\n', (32058, 32071), False, 'import subprocess\n'), ((8190, 8205), 'numpy.asarray', 'np.asarray', (['IMG'], {}), '(IMG)\n', (8200, 8205), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import cv2
from PIL import Image
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'mayavi'))
sys.path.append(os.path.join(BASE_DIR, '../kitti'))
import kitti_util as utils
# import cPickle as pickle
import pickle
from kitti_object import *
def non_max_suppression(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1) * (y2 - y1)
idxs = np.argsort(boxes[:,4])
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
# WARNING: (x1, y1) must be the relatively small point
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return pick
class ProposalObject(object):
def __init__(self, box_3d, score=0.0, type='Car', roi_features=None):
# [x, y, z, l, w, h, ry]
self.t = box_3d[0:3]
self.l = box_3d[3]
self.w = box_3d[4]
self.h = box_3d[5]
self.ry = box_3d[6]
self.score = score
self.type = type
self.roi_features = roi_features
class kitti_object_avod(kitti_object):
def __init__(self, root_dir, split='training'):
'''root_dir contains training and testing folders'''
self.root_dir = root_dir
self.split = split
self.split_dir = os.path.join(root_dir, split)
if split == 'training':
self.num_samples = 7481
elif split == 'testing':
self.num_samples = 7518
else:
print('Unknown split: %s' % (split))
exit(-1)
# if split not in ['training', 'testing']:
# print('Unknown split: %s' % (split))
# exit(-1)
self.image_dir = os.path.join(self.split_dir, 'image_2')
self.calib_dir = os.path.join(self.split_dir, 'calib')
self.lidar_dir = os.path.join(self.split_dir, 'velodyne')
self.label_dir = os.path.join(self.split_dir, 'label_2')
self.proposal_dir = os.path.join(self.split_dir, 'proposal')
# self.num_samples = len(os.listdir(self.image_dir))
# print(self.num_samples)
def np_read_lines(self, filename, lines):
arr = []
with open(filename, 'rb') as fp:
for i, line in enumerate(fp):
if i in lines:
arr.append(np.fromstring(line, dtype=float, sep=' '))
return np.array(arr)
def get_proposals(self, idx, rpn_score_threshold=0.1, nms_iou_thres=0.3):
assert(idx<self.num_samples)
proposals_file_path = os.path.join(self.proposal_dir, '%06d.txt'%(idx))
roi_file_path = os.path.join(self.proposal_dir, '%06d_roi.txt'%(idx))
proposals_and_scores = np.loadtxt(proposals_file_path)
keep_idxs = np.arange(0, len(proposals_and_scores))
proposal_boxes_3d = proposals_and_scores[:, 0:7]
proposal_scores = proposals_and_scores[:, 7]
# Apply score mask to proposals
score_mask = proposal_scores > rpn_score_threshold
# 3D box in the format [x, y, z, l, w, h, ry]
proposal_boxes_3d = proposal_boxes_3d[score_mask]
keep_idxs = keep_idxs[score_mask]
proposal_objs = \
[ProposalObject(box_3d) for box_3d in proposal_boxes_3d]
boxes = []
box_scores = []
calib = self.get_calibration(idx)
for obj in proposal_objs:
_, corners = utils.compute_box_3d(obj, calib.P)
# corners_velo = calib.project_rect_to_velo(corners)
# boxes.append(corners_velo)
boxes.append(corners)
box_scores.append(obj.score)
#bev_boxes = list(map(lambda bs: [np.amin(bs[0],axis=0)[0], np.amin(bs[0], axis=0)[2], np.amax(bs[0], axis=0)[0], np.amax(bs[0], axis=0)[2], bs[1]], zip(boxes, box_scores)))
#bev_boxes = np.array(bev_boxes)
# print('before nms: {0}'.format(len(bev_boxes)))
#nms_idxs = non_max_suppression(bev_boxes, nms_iou_thres)
# print('after nms: {0}'.format(len(nms_idxs)))
# boxes = [boxes[i] for i in nms_idxs]
#keep_idxs = keep_idxs[nms_idxs]
proposals_roi_features = self.np_read_lines(roi_file_path, keep_idxs)
proposal_scores = proposal_scores[keep_idxs]
# proposal_objs = [proposal_objs[i] for i in nms_idxs]
for obj, score, feat in zip(proposal_objs, proposal_scores, proposals_roi_features):
obj.score = score
obj.roi_features = feat
return proposal_objs
| [
"numpy.minimum",
"kitti_util.compute_box_3d",
"numpy.where",
"os.path.join",
"numpy.argsort",
"os.path.dirname",
"numpy.array",
"numpy.loadtxt",
"os.path.abspath",
"numpy.maximum",
"numpy.fromstring",
"sys.path.append"
] | [((150, 175), 'os.path.dirname', 'os.path.dirname', (['BASE_DIR'], {}), '(BASE_DIR)\n', (165, 175), False, 'import os\n'), ((176, 201), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (191, 201), False, 'import sys\n'), ((112, 137), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (127, 137), False, 'import os\n'), ((218, 250), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mayavi"""'], {}), "(ROOT_DIR, 'mayavi')\n", (230, 250), False, 'import os\n'), ((268, 302), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""../kitti"""'], {}), "(BASE_DIR, '../kitti')\n", (280, 302), False, 'import os\n'), ((1106, 1129), 'numpy.argsort', 'np.argsort', (['boxes[:, 4]'], {}), '(boxes[:, 4])\n', (1116, 1129), True, 'import numpy as np\n'), ((1604, 1638), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[idxs[:last]]'], {}), '(x1[i], x1[idxs[:last]])\n', (1614, 1638), True, 'import numpy as np\n'), ((1653, 1687), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[idxs[:last]]'], {}), '(y1[i], y1[idxs[:last]])\n', (1663, 1687), True, 'import numpy as np\n'), ((1702, 1736), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[idxs[:last]]'], {}), '(x2[i], x2[idxs[:last]])\n', (1712, 1736), True, 'import numpy as np\n'), ((1751, 1785), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[idxs[:last]]'], {}), '(y2[i], y2[idxs[:last]])\n', (1761, 1785), True, 'import numpy as np\n'), ((1921, 1945), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1)'], {}), '(0, xx2 - xx1)\n', (1931, 1945), True, 'import numpy as np\n'), ((1958, 1982), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1)'], {}), '(0, yy2 - yy1)\n', (1968, 1982), True, 'import numpy as np\n'), ((2948, 2977), 'os.path.join', 'os.path.join', (['root_dir', 'split'], {}), '(root_dir, split)\n', (2960, 2977), False, 'import os\n'), ((3351, 3390), 'os.path.join', 'os.path.join', (['self.split_dir', '"""image_2"""'], {}), "(self.split_dir, 'image_2')\n", (3363, 3390), False, 'import os\n'), ((3416, 3453), 'os.path.join', 'os.path.join', (['self.split_dir', '"""calib"""'], {}), "(self.split_dir, 'calib')\n", (3428, 3453), False, 'import os\n'), ((3479, 3519), 'os.path.join', 'os.path.join', (['self.split_dir', '"""velodyne"""'], {}), "(self.split_dir, 'velodyne')\n", (3491, 3519), False, 'import os\n'), ((3545, 3584), 'os.path.join', 'os.path.join', (['self.split_dir', '"""label_2"""'], {}), "(self.split_dir, 'label_2')\n", (3557, 3584), False, 'import os\n'), ((3613, 3653), 'os.path.join', 'os.path.join', (['self.split_dir', '"""proposal"""'], {}), "(self.split_dir, 'proposal')\n", (3625, 3653), False, 'import os\n'), ((4017, 4030), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (4025, 4030), True, 'import numpy as np\n'), ((4177, 4226), 'os.path.join', 'os.path.join', (['self.proposal_dir', "('%06d.txt' % idx)"], {}), "(self.proposal_dir, '%06d.txt' % idx)\n", (4189, 4226), False, 'import os\n'), ((4251, 4304), 'os.path.join', 'os.path.join', (['self.proposal_dir', "('%06d_roi.txt' % idx)"], {}), "(self.proposal_dir, '%06d_roi.txt' % idx)\n", (4263, 4304), False, 'import os\n'), ((4336, 4367), 'numpy.loadtxt', 'np.loadtxt', (['proposals_file_path'], {}), '(proposals_file_path)\n', (4346, 4367), True, 'import numpy as np\n'), ((5032, 5066), 'kitti_util.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (5052, 5066), True, 'import kitti_util as utils\n'), ((2196, 2229), 'numpy.where', 'np.where', (['(overlap > overlapThresh)'], {}), '(overlap > overlapThresh)\n', (2204, 2229), True, 'import numpy as np\n'), ((3959, 4000), 'numpy.fromstring', 'np.fromstring', (['line'], {'dtype': 'float', 'sep': '""" """'}), "(line, dtype=float, sep=' ')\n", (3972, 4000), True, 'import numpy as np\n')] |
from pandas import read_csv
import numpy as np
# Calculates 3 of missing values given in the original csv
def calculateMissingValues(data):
missing_data = data.isnull().sum()
print("Missing values for each feature (feature | # missing values): ")
print(missing_data)
# Finds average age of patients
# Replaces nan values with 0
# Converts 'Age' column to list
def findAverageAge(data):
data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] = data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].replace(np.NaN, 0)
age = data[0].tolist()
del age[0]
age = list(map(float, age))
average_age = round(sum(age) / len(age))
print(f"Average age: {average_age} years old")
# Calculates range of 'Glucose' column
# Converts 'Glucose' column to a list and then a numpy array
# Swaps nan values with the mean of the numpy array
# Gets min and max values and calculates range
def glucoseRange(data):
data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] = data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].replace(0, np.NaN)
data.fillna(data.mean(), inplace=True)
glucose = data[2].tolist()
del glucose[0]
glucose = list(map(float, glucose))
glucose = np.array(glucose)
x = np.where(np.isnan(glucose), np.ma.array(glucose, mask=np.isnan(glucose)).mean(axis=0), glucose)
max_val = np.amax(x)
min_val = np.amin(x)
r = round(max_val - min_val)
print(f"Glucose range: {r} ng/ml")
if __name__ == "__main__":
dataset = read_csv('dataR2-w-missing.csv', header=None)
calculateMissingValues(dataset)
findAverageAge(dataset)
glucoseRange(dataset)
| [
"numpy.amin",
"pandas.read_csv",
"numpy.array",
"numpy.isnan",
"numpy.amax"
] | [((1175, 1192), 'numpy.array', 'np.array', (['glucose'], {}), '(glucose)\n', (1183, 1192), True, 'import numpy as np\n'), ((1313, 1323), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (1320, 1323), True, 'import numpy as np\n'), ((1339, 1349), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (1346, 1349), True, 'import numpy as np\n'), ((1471, 1516), 'pandas.read_csv', 'read_csv', (['"""dataR2-w-missing.csv"""'], {'header': 'None'}), "('dataR2-w-missing.csv', header=None)\n", (1479, 1516), False, 'from pandas import read_csv\n'), ((1211, 1228), 'numpy.isnan', 'np.isnan', (['glucose'], {}), '(glucose)\n', (1219, 1228), True, 'import numpy as np\n'), ((1256, 1273), 'numpy.isnan', 'np.isnan', (['glucose'], {}), '(glucose)\n', (1264, 1273), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extract methylation from fast5 files into a RocksDB file. Also has an interface to read those values.
Created on Thursday, 25. July 2019.
"""
import glob
import os.path
def main():
import argparse
parser = argparse.ArgumentParser(description="Extract methylation from fast5 files")
parser.add_argument("input_fast5_dir", type=str,
help="Input dir of fast5 files [default:%(default)s]")
parser.add_argument("-d", "--mod_data",
help="Database to store the modifications to [default:%(default)s]",
default="base_mods.rocksdb")
parser.add_argument("-p", "--processes", type=int,
help="Database to store the modifications to [default:%(default)s]",
default=1)
parser.add_argument("-V", "--verbose", default=False, action="store_true",
help="Be more verbose with output [default:%(default)s]")
args = parser.parse_args()
import logging
if args.verbose:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s:%(funcName)s:%(levelname)s:%(message)s')
return args
class MethylDB(object):
def __init__(self, db_name, q=None):
import rocksdb
self._db_name = db_name
self._q = q
if self._q is None:
opts = rocksdb.Options()
opts.create_if_missing = True
opts.max_open_files = 300000
opts.max_open_files = -1 # Dangerous
opts.write_buffer_size = 2 * 512 * 1024 ** 2
opts.max_write_buffer_number = 3
opts.target_file_size_base = 512 * 1024 ** 2 # MB
# opts.compression = rocksdb.CompressionType.zlib_compression
opts.table_factory = rocksdb.BlockBasedTableFactory(
filter_policy=rocksdb.BloomFilterPolicy(10),
# cache_index_and_filter_blocks=True,
# optimize_filters_for_hits=True,
block_cache=rocksdb.LRUCache(5 * (1024 ** 3)),
block_size=64 * 1024,
block_cache_compressed=rocksdb.LRUCache(500 * (1024 ** 2)))
self._db = rocksdb.DB(self._db_name, opts)
A, mA, C, mC, G, T = 0, 1, 2, 3, 4, 5
def close(self):
del (self._db)
def _put(self, *args):
if self._q:
self._q.put(args)
else:
self._db.put(*args)
def __len__(self):
"Return approximate number of key-value pairs in the database"
return int(self._db.get_property(b"rocksdb.estimate-num-keys"))
def put(self, read_id, likelihoods, sequence=None):
from uuid import UUID
from numpy import uint8, ndarray
assert isinstance(likelihoods, ndarray), "Likelihoods must be of type ndarray"
assert likelihoods.ndim == 1, "Likelihoods must be one dimensional"
assert likelihoods.dtype == uint8, "Likelihoods must be of dtype uint8"
read_uuid = UUID(read_id)
self._put(read_uuid.bytes, likelihoods.tobytes())
# else:
# self._db.put(*args)
if sequence is not None:
self._put(read_uuid.bytes + b"/seq", sequence.encode("ascii"))
def get(self, read_id, with_sequence=False):
import numpy as np
from uuid import UUID
read_uuid = UUID(read_id)
mod_data = self._db.get(read_uuid.bytes)
if mod_data is None:
likelihoods = None
else:
likelihoods = np.frombuffer(mod_data, dtype=np.uint8)
if with_sequence:
likelihoods = likelihoods, self._db.get(read_uuid.bytes + b"/seq")
return likelihoods
def update_fast5(self, fast5_filepath, mod_index=3, verbose=False):
"""Update (i.e. add or change) the methylation data for reads in the given fast5 file.
mod_index gives the index of the modification call table to store in the database.
Default is mC modification. Indices: A,mA,C,mC,G,T = 0,1,2,3,4,5"""
from ont_fast5_api.fast5_interface import get_fast5_file
import numpy as np
import logging as log
if verbose:
from tqdm import tqdm
else:
def tqdm(x):
return x
log.info("Processing file {}".format(fast5_filepath))
UNMODIFIED_BASES = [b"A", b"A", b"C", b"C", b"G", b"T"]
assert mod_index >= 0 and mod_index < len(UNMODIFIED_BASES), "mod_index must be in the range 0-5."
BASE = UNMODIFIED_BASES[mod_index]
log.info("Looking for modification {} of base {}.".format(mod_index, BASE))
with get_fast5_file(fast5_filepath, mode="r") as f5:
for read_id in tqdm(f5.get_read_ids()):
# if read_idx%100:
# log.info("Processing read {}".format(read_id))
read = f5.get_read(read_id)
latest_basecall = read.get_latest_analysis('Basecall_1D')
mod_base_table = read.get_analysis_dataset(
latest_basecall, 'BaseCalled_template/ModBaseProbs')
if mod_base_table is None:
log.info("No ModBaseProbs for {}".format(read_id))
continue
fastq = read.get_analysis_dataset(
latest_basecall, 'BaseCalled_template/Fastq')
if fastq is None:
log.info("No Fastq for {}".format(read_id))
continue
seq_title, seq, _, qvals, _ = fastq.split("\n")
mod_likelihoods = mod_base_table[np.fromstring(seq, "|S1") == BASE, mod_index]
self.put(read_id, mod_likelihoods)
# assert (self.get(read_id) == mod_likelihoods).all(),"Mismatch on "+read_id
def _fast5_putter(fname, q):
import logging as log
import os
log.info("Processing {} to {} in process {}.".format(fname, q, os.getpid()))
mdb = MethylDB(None, q=q)
mdb.update_fast5(fname)
return (fname, str(q))
if __name__ == '__main__':
args = main()
mdb = MethylDB(args.mod_data)
import logging as log
log.info(args)
indir = args.input_fast5_dir
fnlist = glob.glob(os.path.join(indir, '*.fast5'))
log.info(f'Total files={len(fnlist)}')
if args.processes == 1:
for fn in fnlist:
mdb.update_fast5(fn, verbose=args.verbose)
elif args.processes > 1:
from tqdm import tqdm
import multiprocessing as mp
import itertools as it
from queue import Empty
procs = min(args.processes, len(fnlist))
log.info("Will run {} processes in parallel.".format(procs))
m = mp.Manager()
q = m.Queue(1000)
with mp.Pool(procs) as pool:
# Read the fast5 files in parallel and put the results in queue q
read_result = pool.starmap_async(_fast5_putter, zip(fnlist, it.repeat(q)))
for _ in tqdm(it.repeat(True)):
try:
data = q.get(timeout=1)
except Empty:
if read_result.ready():
# log.info("Fast5 processing is ready. Got {}".format(read_result.get()))
log.info("Fast5 processing is ready.")
break
else:
log.info("Stalling... Fast5 processing takes time.")
else:
mdb._db.put(*data)
| [
"logging.basicConfig",
"rocksdb.Options",
"uuid.UUID",
"itertools.repeat",
"argparse.ArgumentParser",
"os.path.join",
"ont_fast5_api.fast5_interface.get_fast5_file",
"multiprocessing.Pool",
"os.getpid",
"rocksdb.BloomFilterPolicy",
"multiprocessing.Manager",
"numpy.frombuffer",
"rocksdb.DB",... | [((267, 342), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract methylation from fast5 files"""'}), "(description='Extract methylation from fast5 files')\n", (290, 342), False, 'import argparse\n'), ((6265, 6279), 'logging.info', 'log.info', (['args'], {}), '(args)\n', (6273, 6279), True, 'import logging as log\n'), ((1094, 1199), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s:%(funcName)s:%(levelname)s:%(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s:%(funcName)s:%(levelname)s:%(message)s')\n", (1113, 1199), False, 'import logging\n'), ((3084, 3097), 'uuid.UUID', 'UUID', (['read_id'], {}), '(read_id)\n', (3088, 3097), False, 'from uuid import UUID\n'), ((3443, 3456), 'uuid.UUID', 'UUID', (['read_id'], {}), '(read_id)\n', (3447, 3456), False, 'from uuid import UUID\n'), ((6337, 6367), 'os.path.join', 'os.path.join', (['indir', '"""*.fast5"""'], {}), "(indir, '*.fast5')\n", (6349, 6367), False, 'import os\n'), ((1430, 1447), 'rocksdb.Options', 'rocksdb.Options', ([], {}), '()\n', (1445, 1447), False, 'import rocksdb\n'), ((2277, 2308), 'rocksdb.DB', 'rocksdb.DB', (['self._db_name', 'opts'], {}), '(self._db_name, opts)\n', (2287, 2308), False, 'import rocksdb\n'), ((3606, 3645), 'numpy.frombuffer', 'np.frombuffer', (['mod_data'], {'dtype': 'np.uint8'}), '(mod_data, dtype=np.uint8)\n', (3619, 3645), True, 'import numpy as np\n'), ((4755, 4795), 'ont_fast5_api.fast5_interface.get_fast5_file', 'get_fast5_file', (['fast5_filepath'], {'mode': '"""r"""'}), "(fast5_filepath, mode='r')\n", (4769, 4795), False, 'from ont_fast5_api.fast5_interface import get_fast5_file\n'), ((6053, 6064), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6062, 6064), False, 'import os\n'), ((6813, 6825), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (6823, 6825), True, 'import multiprocessing as mp\n'), ((6866, 6880), 'multiprocessing.Pool', 'mp.Pool', (['procs'], {}), '(procs)\n', (6873, 6880), True, 'import multiprocessing as mp\n'), ((1921, 1950), 'rocksdb.BloomFilterPolicy', 'rocksdb.BloomFilterPolicy', (['(10)'], {}), '(10)\n', (1946, 1950), False, 'import rocksdb\n'), ((2096, 2127), 'rocksdb.LRUCache', 'rocksdb.LRUCache', (['(5 * 1024 ** 3)'], {}), '(5 * 1024 ** 3)\n', (2112, 2127), False, 'import rocksdb\n'), ((2216, 2249), 'rocksdb.LRUCache', 'rocksdb.LRUCache', (['(500 * 1024 ** 2)'], {}), '(500 * 1024 ** 2)\n', (2232, 2249), False, 'import rocksdb\n'), ((7081, 7096), 'itertools.repeat', 'it.repeat', (['(True)'], {}), '(True)\n', (7090, 7096), True, 'import itertools as it\n'), ((7040, 7052), 'itertools.repeat', 'it.repeat', (['q'], {}), '(q)\n', (7049, 7052), True, 'import itertools as it\n'), ((5723, 5748), 'numpy.fromstring', 'np.fromstring', (['seq', '"""|S1"""'], {}), "(seq, '|S1')\n", (5736, 5748), True, 'import numpy as np\n'), ((7360, 7398), 'logging.info', 'log.info', (['"""Fast5 processing is ready."""'], {}), "('Fast5 processing is ready.')\n", (7368, 7398), True, 'import logging as log\n'), ((7479, 7531), 'logging.info', 'log.info', (['"""Stalling... Fast5 processing takes time."""'], {}), "('Stalling... Fast5 processing takes time.')\n", (7487, 7531), True, 'import logging as log\n')] |
###############################################################################
# Version: 1.1
# Last modified on: 3 April, 2016
# Developers: <NAME>
# email: m_(DOT)_epitropakis_(AT)_lancaster_(DOT)_ac_(DOT)_uk
###############################################################################
from builtins import object
import os
import numpy as np
# UNCOMMENT APPROPRIATELY
# MINMAX = 1 # Minimization
MINMAX = -1 # Maximization
class CFunction(object):
_dim = -1
_nofunc = -1
_C = 2000.0
_M = None
_weight = None
_fi = None
_z = None
_f_bias = 0
_fmaxi = None
_tmpx = None
def __init__(self, dim, nofunc):
self._dim = dim
self._nofunc = nofunc
self._lbound = []
self._ubound = []
self._O = []
self._lambda = []
self._function = []
self._bias = []
self._sigma = []
# Load optima
self.path = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(self.path, "data/optima.dat")
self.o = np.loadtxt(file_path)
def function_data_file(self, fn, dim):
return os.path.join(self.path, "data/CF{}_M_D{}.dat".format(fn, dim))
def evaluate(self, x):
pass
#
# n.b. get_lbound / get_ubound don't appear to be used in current codebase
#
# try / except (IndexError) would be more pythonic to check rather than assert
def get_lbound(self, ivar):
assert 0 <= ivar < self._dim, ["ivar is not in valid variable range: %d not in [0,%d]" % ivar, self._dim]
return self._lbound[ivar]
# try / except (IndexError) would be more pythonic to check rather than assert
def get_ubound(self, ivar):
assert 0 <= ivar < self._dim, ["ivar is not in valid variable range: %d not in [0,%d]" % ivar, self._dim]
return self._ubound[ivar]
def _evaluate_inner(self, x):
if self._function is None:
raise NameError("Composition functions' dict is uninitialized")
self._fi = np.zeros(self._nofunc)
self._calculate_weights(x)
for i in range(self._nofunc):
self._transform_to_z(x, i)
self._fi[i] = self._function[i](self._z)
tmpsum = np.zeros(self._nofunc)
for i in range(self._nofunc):
tmpsum[i] = self._weight[i] * (self._C * self._fi[i] / self._fmaxi[i] + self._bias[i])
return sum(tmpsum) * MINMAX + self._f_bias
def _calculate_weights(self, x):
self._weight = np.zeros(self._nofunc)
for i in range(self._nofunc):
mysum = sum((x-self._O[i])**2)
self._weight[i] = np.exp(-mysum/(2.0 * self._dim * self._sigma[i] * self._sigma[i]))
maxw = np.max(self._weight)
# maxi = self._weight.argmax(axis=0)
maxw10 = maxw**10
for i in range(self._nofunc):
if self._weight[i] != maxw:
# if i != maxi:
self._weight[i] = self._weight[i] * (1.0 - maxw10)
mysum = np.sum(self._weight)
for i in range(self._nofunc):
if mysum == 0.0:
self._weight[i] = 1.0 / (1.0 * self._nofunc)
else:
self._weight[i] = self._weight[i] / mysum
def _calculate_fmaxi(self):
self._fmaxi = np.zeros(self._nofunc)
if self._function is None:
raise NameError('Composition functions\' dict is uninitialized')
x5 = 5 * np.ones(self._dim)
for i in range(self._nofunc):
self._transform_to_z_noshift(x5, i)
self._fmaxi[i] = self._function[i](self._z)
def _transform_to_z_noshift(self, x, index):
# z_i = (x)/\lambda_i
tmpx = np.divide(x, self._lambda[index])
# Multiply z_i * M_i
self._z = np.dot(tmpx, self._M[index])
def _transform_to_z(self, x, index):
# Calculate z_i = (x - o_i)/\lambda_i
tmpx = np.divide((x - self._O[index]), self._lambda[index])
# Multiply z_i * M_i
self._z = np.dot(tmpx, self._M[index])
def _load_rotmat(self, fname):
self._M = []
with open(fname, 'r') as f:
tmp = np.zeros((self._dim, self._dim))
cline = 0
ctmp = 0
for line in f:
line = line.split()
if line:
line = [float(i) for i in line]
# re initialize array when reached dim
if ctmp % self._dim == 0:
tmp = np.zeros((self._dim, self._dim))
ctmp = 0
# add line to tmp
tmp[ctmp] = line[:self._dim]
# if we loaded self._nofunc * self._dim-1 lines break
if cline >= self._nofunc * self._dim-1:
break
# add array to _M when it is fully created
if cline % self._dim == 0:
self._M.append(tmp)
ctmp = ctmp + 1
cline = cline + 1
# Sphere function
def sphere(x):
return (x**2).sum()
# Rastrigin's function
def rastrigin(x):
return np.sum(x**2-10.*np.cos(2.*np.pi*x)+10)
# Griewank's function
def grienwank(x):
i = np.sqrt(np.arange(x.shape[0])+1.0)
return np.sum(x**2)/4000.0 - np.prod(np.cos(x/i)) + 1.0
# Weierstrass's function
def weierstrass(x):
alpha = 0.5
beta = 3.0
kmax = 20
dimensions = x.shape[0]
# exprf = 0.0
c1 = alpha**np.arange(kmax+1)
c2 = 2.0*np.pi*beta**np.arange(kmax+1)
f = 0
c = -dimensions*np.sum(c1*np.cos(c2*0.5))
for i in range(dimensions):
f += np.sum(c1*np.cos(c2*(x[i]+0.5)))
return f + c
def f8f2(x):
f2 = 100.0 * (x[0]**2 - x[1])**2 + (1.0 - x[0])**2
return 1.0 + (f2**2)/4000.0 - np.cos(f2)
# FEF8F2 function
def fef8f2(x):
dimensions = x.shape[0]
f = 0
for i in range(dimensions-1):
f += f8f2(x[[i, i+1]] + 1)
f += f8f2(x[[dimensions-1, 0]] + 1)
return f
| [
"numpy.ones",
"numpy.arange",
"os.path.join",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"os.path.dirname",
"numpy.cos",
"numpy.loadtxt",
"numpy.divide"
] | [((1001, 1043), 'os.path.join', 'os.path.join', (['self.path', '"""data/optima.dat"""'], {}), "(self.path, 'data/optima.dat')\n", (1013, 1043), False, 'import os\n'), ((1061, 1082), 'numpy.loadtxt', 'np.loadtxt', (['file_path'], {}), '(file_path)\n', (1071, 1082), True, 'import numpy as np\n'), ((2031, 2053), 'numpy.zeros', 'np.zeros', (['self._nofunc'], {}), '(self._nofunc)\n', (2039, 2053), True, 'import numpy as np\n'), ((2238, 2260), 'numpy.zeros', 'np.zeros', (['self._nofunc'], {}), '(self._nofunc)\n', (2246, 2260), True, 'import numpy as np\n'), ((2511, 2533), 'numpy.zeros', 'np.zeros', (['self._nofunc'], {}), '(self._nofunc)\n', (2519, 2533), True, 'import numpy as np\n'), ((2727, 2747), 'numpy.max', 'np.max', (['self._weight'], {}), '(self._weight)\n', (2733, 2747), True, 'import numpy as np\n'), ((3014, 3034), 'numpy.sum', 'np.sum', (['self._weight'], {}), '(self._weight)\n', (3020, 3034), True, 'import numpy as np\n'), ((3294, 3316), 'numpy.zeros', 'np.zeros', (['self._nofunc'], {}), '(self._nofunc)\n', (3302, 3316), True, 'import numpy as np\n'), ((3704, 3737), 'numpy.divide', 'np.divide', (['x', 'self._lambda[index]'], {}), '(x, self._lambda[index])\n', (3713, 3737), True, 'import numpy as np\n'), ((3785, 3813), 'numpy.dot', 'np.dot', (['tmpx', 'self._M[index]'], {}), '(tmpx, self._M[index])\n', (3791, 3813), True, 'import numpy as np\n'), ((3917, 3967), 'numpy.divide', 'np.divide', (['(x - self._O[index])', 'self._lambda[index]'], {}), '(x - self._O[index], self._lambda[index])\n', (3926, 3967), True, 'import numpy as np\n'), ((4017, 4045), 'numpy.dot', 'np.dot', (['tmpx', 'self._M[index]'], {}), '(tmpx, self._M[index])\n', (4023, 4045), True, 'import numpy as np\n'), ((5507, 5526), 'numpy.arange', 'np.arange', (['(kmax + 1)'], {}), '(kmax + 1)\n', (5516, 5526), True, 'import numpy as np\n'), ((5824, 5834), 'numpy.cos', 'np.cos', (['f2'], {}), '(f2)\n', (5830, 5834), True, 'import numpy as np\n'), ((954, 979), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (969, 979), False, 'import os\n'), ((2645, 2713), 'numpy.exp', 'np.exp', (['(-mysum / (2.0 * self._dim * self._sigma[i] * self._sigma[i]))'], {}), '(-mysum / (2.0 * self._dim * self._sigma[i] * self._sigma[i]))\n', (2651, 2713), True, 'import numpy as np\n'), ((3447, 3465), 'numpy.ones', 'np.ones', (['self._dim'], {}), '(self._dim)\n', (3454, 3465), True, 'import numpy as np\n'), ((4158, 4190), 'numpy.zeros', 'np.zeros', (['(self._dim, self._dim)'], {}), '((self._dim, self._dim))\n', (4166, 4190), True, 'import numpy as np\n'), ((5265, 5286), 'numpy.arange', 'np.arange', (['x.shape[0]'], {}), '(x.shape[0])\n', (5274, 5286), True, 'import numpy as np\n'), ((5550, 5569), 'numpy.arange', 'np.arange', (['(kmax + 1)'], {}), '(kmax + 1)\n', (5559, 5569), True, 'import numpy as np\n'), ((5303, 5317), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (5309, 5317), True, 'import numpy as np\n'), ((5333, 5346), 'numpy.cos', 'np.cos', (['(x / i)'], {}), '(x / i)\n', (5339, 5346), True, 'import numpy as np\n'), ((5608, 5624), 'numpy.cos', 'np.cos', (['(c2 * 0.5)'], {}), '(c2 * 0.5)\n', (5614, 5624), True, 'import numpy as np\n'), ((5680, 5705), 'numpy.cos', 'np.cos', (['(c2 * (x[i] + 0.5))'], {}), '(c2 * (x[i] + 0.5))\n', (5686, 5705), True, 'import numpy as np\n'), ((5184, 5207), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * x)'], {}), '(2.0 * np.pi * x)\n', (5190, 5207), True, 'import numpy as np\n'), ((4509, 4541), 'numpy.zeros', 'np.zeros', (['(self._dim, self._dim)'], {}), '((self._dim, self._dim))\n', (4517, 4541), True, 'import numpy as np\n')] |
"""
A module that contains a metaclass mixin that provides GF(2^m) arithmetic using explicit calculation.
"""
import numba
import numpy as np
from ._main import FieldClass, DirMeta
MULTIPLY = lambda a, b, *args: a * b
RECIPROCAL = lambda a, *args: 1 / a
class GF2mMeta(FieldClass, DirMeta):
"""
A metaclass for all GF(2^m) classes.
"""
# pylint: disable=no-value-for-parameter
# Need to have a unique cache of "calculate" functions for GF(2^m)
_FUNC_CACHE_CALCULATE = {}
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._prime_subfield = kwargs["prime_subfield"]
cls.compile(kwargs["compile"])
# Determine if the irreducible polynomial is primitive
if cls._is_primitive_poly is None:
# TODO: Clean this up
coeffs = cls.irreducible_poly.coeffs.view(np.ndarray).astype(cls.dtypes[-1])
x = np.array(cls.primitive_element, dtype=cls.dtypes[-1], ndmin=1)
add = cls._func_python("add")
multiply = cls._func_python("multiply")
cls._is_primitive_poly = cls._function_python("poly_evaluate")(coeffs, x, add, multiply, cls.characteristic, cls.degree, cls._irreducible_poly_int)[0] == 0
def _compile_ufuncs(cls):
super()._compile_ufuncs()
# Some explicit calculation functions are faster than using lookup tables. See https://github.com/mhostetter/galois/pull/92#issuecomment-835552639.
cls._ufuncs["add"] = np.bitwise_xor
cls._ufuncs["negative"] = np.positive
cls._ufuncs["subtract"] = np.bitwise_xor
def _set_globals(cls, name):
global MULTIPLY, RECIPROCAL
if name in ["reciprocal", "divide", "power", "log"]:
MULTIPLY = cls._func_calculate("multiply")
if name in ["divide", "power"]:
RECIPROCAL = cls._func_calculate("reciprocal")
def _reset_globals(cls):
global MULTIPLY, RECIPROCAL
MULTIPLY = cls._func_python("multiply")
RECIPROCAL = cls._func_python("reciprocal")
###############################################################################
# Arithmetic functions using explicit calculation
###############################################################################
@staticmethod
def _add_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
return a ^ b
@staticmethod
def _negative_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.positive()` is faster.
"""
return a
@staticmethod
def _subtract_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
return a ^ b
@staticmethod
@numba.extending.register_jitable
def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
a in GF(2^m), can be represented as a degree m-1 polynomial a(x) in GF(2)[x]
b in GF(2^m), can be represented as a degree m-1 polynomial b(x) in GF(2)[x]
p(x) in GF(2)[x] with degree m is the irreducible polynomial of GF(2^m)
a * b = c
= (a(x) * b(x)) % p(x) in GF(2)
= c(x)
= c
"""
ORDER = CHARACTERISTIC**DEGREE
# Re-order operands such that a > b so the while loop has less loops
if b > a:
a, b = b, a
c = 0
while b > 0:
if b & 0b1:
c ^= a # Add a(x) to c(x)
b >>= 1 # Divide b(x) by x
a <<= 1 # Multiply a(x) by x
if a >= ORDER:
a ^= IRREDUCIBLE_POLY # Compute a(x) % p(x)
return c
@staticmethod
@numba.extending.register_jitable
def _reciprocal_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
From Fermat's Little Theorem:
a in GF(p^m)
a^(p^m - 1) = 1
a * a^-1 = 1
a * a^-1 = a^(p^m - 1)
a^-1 = a^(p^m - 2)
"""
if a == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
ORDER = CHARACTERISTIC**DEGREE
exponent = ORDER - 2
result_s = a # The "squaring" part
result_m = 1 # The "multiplicative" part
while exponent > 1:
if exponent % 2 == 0:
result_s = MULTIPLY(result_s, result_s, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
exponent //= 2
else:
result_m = MULTIPLY(result_m, result_s, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
exponent -= 1
result = MULTIPLY(result_m, result_s, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
return result
@staticmethod
@numba.extending.register_jitable
def _divide_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
if a == 0:
return 0
else:
b_inv = RECIPROCAL(b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
return MULTIPLY(a, b_inv, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
@staticmethod
@numba.extending.register_jitable
def _power_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Square and Multiply Algorithm
a^13 = (1) * (a)^13
= (a) * (a)^12
= (a) * (a^2)^6
= (a) * (a^4)^3
= (a * a^4) * (a^4)^2
= (a * a^4) * (a^8)
= result_m * result_s
"""
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
if b == 0:
return 1
elif b < 0:
a = RECIPROCAL(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
b = abs(b)
result_s = a # The "squaring" part
result_m = 1 # The "multiplicative" part
while b > 1:
if b % 2 == 0:
result_s = MULTIPLY(result_s, result_s, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
b //= 2
else:
result_m = MULTIPLY(result_m, result_s, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
b -= 1
result = MULTIPLY(result_m, result_s, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
return result
@staticmethod
@numba.extending.register_jitable
def _log_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
TODO: Replace this with more efficient algorithm
a = α^m
b is a primitive element of the field
c = log(a, b)
a = b^c
"""
if a == 0:
raise ArithmeticError("Cannot compute the discrete logarithm of 0 in a Galois field.")
# Naive algorithm
ORDER = CHARACTERISTIC**DEGREE
result = 1
for i in range(0, ORDER - 1):
if result == a:
break
result = MULTIPLY(result, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY)
return i
###############################################################################
# Ufuncs written in NumPy operations (not JIT compiled)
###############################################################################
@staticmethod
def _sqrt(a):
"""
Fact 3.42 from https://cacr.uwaterloo.ca/hac/about/chap3.pdf.
"""
field = type(a)
return a ** (field.characteristic**(field.degree - 1))
| [
"numpy.array"
] | [((958, 1020), 'numpy.array', 'np.array', (['cls.primitive_element'], {'dtype': 'cls.dtypes[-1]', 'ndmin': '(1)'}), '(cls.primitive_element, dtype=cls.dtypes[-1], ndmin=1)\n', (966, 1020), True, 'import numpy as np\n')] |
from keras.backend import expand_dims
from keras.datasets.mnist import load_data
from keras.models import Sequential
from numpy.random import randint
from numpy.random import randn
from numpy import zeros
from numpy import ones
def loadDataset():
# load mnist dataset
(trainX, trainY), (_, _) = load_data()
# expand to 3d, i.e. add channel dimension
X = expand_dims(trainX, axis=-1)
# filter to single digit (just for testing purposes)
filtered = trainY == 8
X = X[filtered]
# convert from unsigned ints to floats
X = X.numpy().astype('float32')
# convert scale from 0,255 to -1,1
X = (X - 127.5) / 127.5
return X
def generateRealTrainingSamples(dataset, sampleNum):
ix = randint(0, dataset.shape[0], sampleNum)
X = dataset[ix]
y = ones((sampleNum, 1))
return X, y
def generateFakeTrainingSamples(generator: Sequential, latentDim, sampleNum):
xInput = generateLatentPoints(latentDim, sampleNum)
X = generator.predict(xInput)
y = zeros((sampleNum, 1))
return X, y
def generateFakeTrainingGanSamples(latentDim, sampleNum):
X = generateLatentPoints(latentDim, sampleNum)
y = ones((sampleNum, 1))
return X, y
def generateLatentPoints(latentDim, sampleNum):
xInput = randn(latentDim * sampleNum)
xInput = xInput.reshape((sampleNum, latentDim))
return xInput | [
"numpy.ones",
"keras.datasets.mnist.load_data",
"numpy.random.randint",
"numpy.zeros",
"keras.backend.expand_dims",
"numpy.random.randn"
] | [((304, 315), 'keras.datasets.mnist.load_data', 'load_data', ([], {}), '()\n', (313, 315), False, 'from keras.datasets.mnist import load_data\n'), ((371, 399), 'keras.backend.expand_dims', 'expand_dims', (['trainX'], {'axis': '(-1)'}), '(trainX, axis=-1)\n', (382, 399), False, 'from keras.backend import expand_dims\n'), ((726, 765), 'numpy.random.randint', 'randint', (['(0)', 'dataset.shape[0]', 'sampleNum'], {}), '(0, dataset.shape[0], sampleNum)\n', (733, 765), False, 'from numpy.random import randint\n'), ((794, 814), 'numpy.ones', 'ones', (['(sampleNum, 1)'], {}), '((sampleNum, 1))\n', (798, 814), False, 'from numpy import ones\n'), ((1008, 1029), 'numpy.zeros', 'zeros', (['(sampleNum, 1)'], {}), '((sampleNum, 1))\n', (1013, 1029), False, 'from numpy import zeros\n'), ((1164, 1184), 'numpy.ones', 'ones', (['(sampleNum, 1)'], {}), '((sampleNum, 1))\n', (1168, 1184), False, 'from numpy import ones\n'), ((1263, 1291), 'numpy.random.randn', 'randn', (['(latentDim * sampleNum)'], {}), '(latentDim * sampleNum)\n', (1268, 1291), False, 'from numpy.random import randn\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 13:15:24 2021
Animates streams of points/particles given their starting and ending locations
and number of particles in each flow.
@author: Mateusz
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.random import uniform
from foodwebviz.utils import squeeze_map
# here some global design choices
FPS = 60
# time between frames in ms
INTERVAL_BETWEEN_FRAMES = 1000 / FPS
# how long should the animation be in s
ANIMATION_LEGHT = 1
# global velocity along the lines so that it loops back on itself
VELOCITY = 0.1 # 1/(5*ANIMATION_LEGHT)
BOX_PARAMS = {'facecolor': 'white',
'alpha': 0.7,
'edgecolor': 'white',
'pad': 0.1}
def particles_in_one_flow(flows, x1, x2, y1, y2, start_node, max_part, map_fun):
'''
distribute particles
return particles moving from (x1,y1) to (x2, y2) and their start_node saved to define color later
spaced randomly (uniform dist) along a line defined by start and finish
with s in [0,1] tracing their progress along the line
'''
def stretch(a, b):
return(np.full(int(b), a))
lx = x2 - x1
ly = y2 - y1
# we need to normalize to path length
flow_density = int(flows * np.sqrt(lx**2 + ly**2) / 20)
s = uniform(0, 1, flow_density)
# we spread the particles randomly in direction perpendicular to the line
# making larger flows broader
width = squeeze_map(flows, 1, max_part, map_fun, 0.05, 3)
x1_new = stretch(x1, flow_density)
y1_new = stretch(y1, flow_density)
# spread them randomly
x1_new += uniform(-width / 2, width / 2, flow_density) if ly != 0.0 else 0.0
y1_new += uniform(-width / 2, width / 2, flow_density) if lx != 0.0 else 0.0
return pd.DataFrame({'s': s,
'x': x1_new + s * lx,
'y': y1_new + s * ly,
'x1': x1_new,
'y1': y1_new,
'lx': lx,
'ly': ly,
'start': start_node})
def init_particles(network_image, include_imports, include_exports, max_part, map_fun):
'''
given the network image with node positions
and the number of particles flowing between them, initialize particles
'''
xs = network_image.nodes.x
ys = network_image.nodes.y
# number of particles along a system flow
partNumber_sys_flows, partNumber_imports, partNumber_exports = network_image.particle_numbers
particles = pd.DataFrame()
for i in xs.index:
for j in xs.index:
# first the system flows
if partNumber_sys_flows.loc[i, j] != 0.0: # we do nothing for zero flows
particles = particles.append(
particles_in_one_flow(partNumber_sys_flows.loc[i, j], xs[i], xs[j], ys[i], ys[j],
start_node=i, max_part=max_part, map_fun=map_fun))
if include_imports:
particles = particles.append(
particles_in_one_flow(partNumber_imports.loc[i], xs[i], xs[i], 0.0, ys[i],
start_node=i, max_part=max_part, map_fun=map_fun))
if include_exports:
particles = particles.append(
particles_in_one_flow(partNumber_exports.loc[i], xs[i],
0.0 if xs[i] < 50 else 100.0, ys[i], ys[i],
start_node=i, max_part=max_part, map_fun=map_fun))
return particles.reset_index()
def _get_color_for_trophic_level(df, y, max_luminance, cmap):
# uses only the trophic level to define color on a continuous scale
def set_color(z, minVal, maxVal, max_luminance=0.85):
return np.interp(z, (minVal, maxVal), (0, max_luminance))
return [cmap(x) for x in set_color(df[y], df[y].min(), df[y].max(), max_luminance)]
def assign_colors(particles, netIm, max_luminance, cmap):
'''
specify colors using coordinates in columns x and y of the dataframe df
'''
netIm.nodes['color'] = _get_color_for_trophic_level(netIm.nodes, 'y', max_luminance, cmap=cmap)
particles['color'] = particles.apply(lambda x: netIm.nodes.loc[x['start'], 'color'], axis='columns')
return particles
# # RULES TO UPDATE FRAMES
# def getVelocity(row, lx, ly): # get axial velocity along lx given the direction (lx,ly)
# return(VELOCITY*row[lx]/np.sqrt(row[lx]**2+row[ly]**2))
def move_particles(particles, alpha, t, max_width):
def accelerate_imports(row):
'''
imports have shorter way to go, we make them go faster to be noticed
unless they are at higher trophic levels
'''
return 4 * VELOCITY if row['lx'] == 0 and np.abs(row['ly']) < 20 else 0.0
def fading_formula(x, max_width):
'''
how the position along the edge s in [0,1] is translated into alpha (transparency) value
we adapt the fading to max_width as a proxy for the complexity of the network
'''
# we shift the transparency by the minimal value that is attained in the middle
min_alpha = 1 / max_width
exponent_correction = 2 * int(max_width / 8)
# 1 at ends, 0.5 in the middle, parabolic dependence
return max([min([1, (np.abs(x - 0.5)**(2 + exponent_correction)) * 2**(2 + exponent_correction)]),
min_alpha])
# updating s and cycling within [0,1] Old case of constant progress over line
particles['s'] = (particles['s'] + VELOCITY * t +
particles.apply(accelerate_imports, axis='columns') * t) % 1
# which we save translated to 'x' and 'y'
particles['x'] = particles['x1'] + particles['lx'] * particles['s']
particles['y'] = particles['y1'] + particles['ly'] * particles['s']
# we make particles fade a bit when far from both the source and the target
particles['alpha'] = alpha * particles['s'].apply(fading_formula, max_width=max_width)
# adds a vertex in position x,y with biomass b to axis ax, given the largest biomass maxBio
def _add_vertex(row, ax, min_bio, max_bio, r_min, r_max, font_size,
alpha, map_fun=np.log10, list_of_abbrev=[]):
radius = squeeze_map(row['Biomass'], min_bio, max_bio, map_fun, r_min, r_max)
name = row['Names'].replace('PrimProd', '').strip()
if len(name) > 16:
old_name = name
# abbreviate long names
name = ' '.join(map(lambda x: x if len(x) <= 3 else f'{x[:3]}.', name.split(' ')))
list_of_abbrev.append(f'{name} = {old_name}\n')
vert_shift = 0.08 if (row['x_rank'] % 2) == 1 or (row['TrophicLevel'] == 1.0 and font_size > 20) else -0.1
txt = plt.text(max(row['x'] - len(name) * 0.03 * font_size, 1),
min(row['y'] + np.sign(vert_shift) * radius + vert_shift * font_size, 98),
name,
fontsize=font_size)
txt.set_bbox(BOX_PARAMS)
ax.add_patch(plt.Circle((row['x'], row['y']), radius, color=row['color'], alpha=alpha))
def add_vertices(ax, yDf, r_min, r_max, font_size, alpha, map_fun=np.log10):
'''
adds circular vertices to the axis ax
'''
yDf.sort_values(by='x')
yDf.sort_values(by='y')
list_of_abbrev = []
yDf.apply(_add_vertex,
axis='columns',
ax=ax,
r_min=r_min,
r_max=r_max,
min_bio=np.min(yDf['Biomass']),
max_bio=np.max(yDf['Biomass']),
font_size=font_size,
alpha=alpha,
map_fun=map_fun,
list_of_abbrev=list_of_abbrev)
list_of_abbrev.sort()
abbrev_leg = plt.text(100, 17, f"Abbreviations used:\n{''.join(list_of_abbrev)}",
fontsize=font_size,
horizontalalignment='right',
verticalalignment='bottom')
abbrev_leg.set_bbox(BOX_PARAMS)
def create_layer(frame, particles, netIm, alpha, t=INTERVAL_BETWEEN_FRAMES, max_width=8, particle_size=2):
def add_transparency_to_color(particle_row):
'''
set transparency within RGBA colours
'''
new_color = list(particle_row['color'])
# continuous cmaps have longer tuple with alpha as the fourth item:
new_color[3] = particle_row['alpha']
return tuple(new_color)
move_particles(particles, alpha, t, max_width)
plt.scatter(particles['x'], particles['y'],
s=particle_size,
# make particles fade except when around their target or start nodes
c=particles.apply(add_transparency_to_color, axis='columns'),
edgecolors={'none'})
# Create a new colormap from the colors cut to 0.8 (to avoid too light colors)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.gca().axis('off')
| [
"numpy.abs",
"matplotlib.pyplot.Circle",
"numpy.sqrt",
"foodwebviz.utils.squeeze_map",
"matplotlib.pyplot.gca",
"numpy.min",
"numpy.max",
"numpy.sign",
"numpy.interp",
"numpy.random.uniform",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim"
] | [((1375, 1402), 'numpy.random.uniform', 'uniform', (['(0)', '(1)', 'flow_density'], {}), '(0, 1, flow_density)\n', (1382, 1402), False, 'from numpy.random import uniform\n'), ((1532, 1581), 'foodwebviz.utils.squeeze_map', 'squeeze_map', (['flows', '(1)', 'max_part', 'map_fun', '(0.05)', '(3)'], {}), '(flows, 1, max_part, map_fun, 0.05, 3)\n', (1543, 1581), False, 'from foodwebviz.utils import squeeze_map\n'), ((1872, 2011), 'pandas.DataFrame', 'pd.DataFrame', (["{'s': s, 'x': x1_new + s * lx, 'y': y1_new + s * ly, 'x1': x1_new, 'y1':\n y1_new, 'lx': lx, 'ly': ly, 'start': start_node}"], {}), "({'s': s, 'x': x1_new + s * lx, 'y': y1_new + s * ly, 'x1':\n x1_new, 'y1': y1_new, 'lx': lx, 'ly': ly, 'start': start_node})\n", (1884, 2011), True, 'import pandas as pd\n'), ((2657, 2671), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2669, 2671), True, 'import pandas as pd\n'), ((6435, 6503), 'foodwebviz.utils.squeeze_map', 'squeeze_map', (["row['Biomass']", 'min_bio', 'max_bio', 'map_fun', 'r_min', 'r_max'], {}), "(row['Biomass'], min_bio, max_bio, map_fun, r_min, r_max)\n", (6446, 6503), False, 'from foodwebviz.utils import squeeze_map\n'), ((9052, 9068), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (9060, 9068), True, 'import matplotlib.pyplot as plt\n'), ((9074, 9090), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (9082, 9090), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1753), 'numpy.random.uniform', 'uniform', (['(-width / 2)', '(width / 2)', 'flow_density'], {}), '(-width / 2, width / 2, flow_density)\n', (1716, 1753), False, 'from numpy.random import uniform\n'), ((1791, 1835), 'numpy.random.uniform', 'uniform', (['(-width / 2)', '(width / 2)', 'flow_density'], {}), '(-width / 2, width / 2, flow_density)\n', (1798, 1835), False, 'from numpy.random import uniform\n'), ((3922, 3972), 'numpy.interp', 'np.interp', (['z', '(minVal, maxVal)', '(0, max_luminance)'], {}), '(z, (minVal, maxVal), (0, max_luminance))\n', (3931, 3972), True, 'import numpy as np\n'), ((7190, 7263), 'matplotlib.pyplot.Circle', 'plt.Circle', (["(row['x'], row['y'])", 'radius'], {'color': "row['color']", 'alpha': 'alpha'}), "((row['x'], row['y']), radius, color=row['color'], alpha=alpha)\n", (7200, 7263), True, 'import matplotlib.pyplot as plt\n'), ((7653, 7675), 'numpy.min', 'np.min', (["yDf['Biomass']"], {}), "(yDf['Biomass'])\n", (7659, 7675), True, 'import numpy as np\n'), ((7700, 7722), 'numpy.max', 'np.max', (["yDf['Biomass']"], {}), "(yDf['Biomass'])\n", (7706, 7722), True, 'import numpy as np\n'), ((9096, 9105), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9103, 9105), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1363), 'numpy.sqrt', 'np.sqrt', (['(lx ** 2 + ly ** 2)'], {}), '(lx ** 2 + ly ** 2)\n', (1344, 1363), True, 'import numpy as np\n'), ((4932, 4949), 'numpy.abs', 'np.abs', (["row['ly']"], {}), "(row['ly'])\n", (4938, 4949), True, 'import numpy as np\n'), ((7016, 7035), 'numpy.sign', 'np.sign', (['vert_shift'], {}), '(vert_shift)\n', (7023, 7035), True, 'import numpy as np\n'), ((5488, 5503), 'numpy.abs', 'np.abs', (['(x - 0.5)'], {}), '(x - 0.5)\n', (5494, 5503), True, 'import numpy as np\n')] |
import numpy as np
import torch
from .features_implementation import FeaturesImplementation
class PyTorchFeatures(FeaturesImplementation):
def __init__(self, tensor_list, device=None):
self._phi = tensor_list
self._device = device
def __call__(self, *args):
x = self._concatenate(args)
x = torch.from_numpy(np.atleast_2d(x))
y_list = [self._phi[i].forward(x) for i in range(len(self._phi))]
y = torch.stack(y_list, dim=-1)
y = y.detach().numpy()
if y.shape[0] == 1:
return y[0]
else:
return y
@property
def size(self):
return len(self._phi)
| [
"numpy.atleast_2d",
"torch.stack"
] | [((457, 484), 'torch.stack', 'torch.stack', (['y_list'], {'dim': '(-1)'}), '(y_list, dim=-1)\n', (468, 484), False, 'import torch\n'), ((352, 368), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (365, 368), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
import tensorflow as tf
from keras.models import load_model
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self):
# load classifier
cwd = os.path.dirname(os.path.realpath(__file__))
# load the keras Lenet model from the tl_classifier.h5 file
self.class_model = load_model(cwd+'/tl_classifier.h5')
self.class_graph = tf.get_default_graph()
# detection graph for detection rectangular shaped traffic lights in images
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
grahDef = tf.GraphDef()
with open(cwd+"/frozen_inference_graph.pb", 'rb') as file:
grahDef.ParseFromString(file.read())
tf.import_graph_def(grahDef, name="" )
self.session = tf.Session(graph=self.detection_graph)
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
self.tl_classes = [ TrafficLight.RED, TrafficLight.YELLOW, TrafficLight.GREEN ]
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Implement light color prediction
light_classification = TrafficLight.UNKNOWN
box = None
with self.detection_graph.as_default():
# Convert the recieved image from BGR to RGB.
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
tf_image_input = np.expand_dims(image,axis=0)
# Detect the box for trafic light rectangle
(detection_boxes, detection_scores, detection_classes, num_detections) = self.session.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: tf_image_input})
detection_boxes = np.squeeze(detection_boxes)
detection_classes = np.squeeze(detection_classes)
detection_scores = np.squeeze(detection_scores)
# Find first detection of signal. It's labeled with number 10
# Check if it fits into rectangular box so we can be sure it's a traffic light
for i, detection_class in enumerate(detection_classes.tolist()):
if detection_class == 10 and detection_scores[i] > 0.3:
dim = image.shape[0:2]
height, width = dim[0], dim[1]
box = np.array([int(detection_boxes[i][0]*height), int(detection_boxes[i][1]*width),
int(detection_boxes[i][2]*height), int(detection_boxes[i][3]*width)])
box_h = box[2] - box[0]
box_w = box[3] - box[1]
# too small to be a traffic light
if box_h < 20 or box_w < 20:
box = None
if box is None:
return light_classification
# cut the image into ROI (region of size) and resize the image to 32,32
# as the keras model has been trained on 32, 32 input shape
class_image = cv2.resize(image[box[0]:box[2], box[1]:box[3]], (32,32))
img_resize = np.expand_dims(class_image, axis=0).astype('float32')
with self.class_graph.as_default():
predict = self.class_model.predict(img_resize)
light_classification = self.tl_classes[np.argmax(predict)]
return light_classification
| [
"tensorflow.Graph",
"keras.models.load_model",
"tensorflow.Session",
"numpy.argmax",
"tensorflow.GraphDef",
"numpy.squeeze",
"os.path.realpath",
"cv2.cvtColor",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"cv2.resize",
"tensorflow.get_default_graph"
] | [((374, 411), 'keras.models.load_model', 'load_model', (["(cwd + '/tl_classifier.h5')"], {}), "(cwd + '/tl_classifier.h5')\n", (384, 411), False, 'from keras.models import load_model\n'), ((437, 459), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (457, 459), True, 'import tensorflow as tf\n'), ((576, 586), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (584, 586), True, 'import tensorflow as tf\n'), ((3719, 3776), 'cv2.resize', 'cv2.resize', (['image[box[0]:box[2], box[1]:box[3]]', '(32, 32)'], {}), '(image[box[0]:box[2], box[1]:box[3]], (32, 32))\n', (3729, 3776), False, 'import cv2\n'), ((250, 276), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (266, 276), False, 'import os\n'), ((657, 670), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (668, 670), True, 'import tensorflow as tf\n'), ((878, 916), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (888, 916), True, 'import tensorflow as tf\n'), ((2029, 2067), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (2041, 2067), False, 'import cv2\n'), ((2098, 2127), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2112, 2127), True, 'import numpy as np\n'), ((2496, 2523), 'numpy.squeeze', 'np.squeeze', (['detection_boxes'], {}), '(detection_boxes)\n', (2506, 2523), True, 'import numpy as np\n'), ((2556, 2585), 'numpy.squeeze', 'np.squeeze', (['detection_classes'], {}), '(detection_classes)\n', (2566, 2585), True, 'import numpy as np\n'), ((2617, 2645), 'numpy.squeeze', 'np.squeeze', (['detection_scores'], {}), '(detection_scores)\n', (2627, 2645), True, 'import numpy as np\n'), ((811, 848), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['grahDef'], {'name': '""""""'}), "(grahDef, name='')\n", (830, 848), True, 'import tensorflow as tf\n'), ((3798, 3833), 'numpy.expand_dims', 'np.expand_dims', (['class_image'], {'axis': '(0)'}), '(class_image, axis=0)\n', (3812, 3833), True, 'import numpy as np\n'), ((4007, 4025), 'numpy.argmax', 'np.argmax', (['predict'], {}), '(predict)\n', (4016, 4025), True, 'import numpy as np\n')] |
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import cv2 as cv
import numpy as np
import os
class ROI_transforms(object):
def __init__(self, size=(128, 128)):
super(ROI_transforms).__init__()
self.size = size
self.area = self.size[0] * self.size[1]
self.prosepect_pmax = 0.1
self.morph_method = [cv.MORPH_DILATE, cv.MORPH_ERODE, cv.MORPH_OPEN, cv.MORPH_CLOSE]
self.gray_mask_method = [self.rand_mask, self.gauss_mask, self.const_mask]
def transform_bit(self, x):
x = self.get_shape_mask(x)
x = self.rotate(x, angle=[0, 359])
if random.random() > 0.5:
x = self.scale(x, [0.5, 2], [0.5, 2])
if random.random() > 0.5:
x = self.random_morphological_processing(x, k_size=[3, 11])
# x = self.align_img(x)
x = self.crop_or_pad(x)
_, x = cv.threshold(x, 20, 255, cv.THRESH_BINARY)
return x
def transform_gray(self, x):
x = self.get_shape_mask(x)
x = self.rotate(x, angle=[0, 359])
if random.random() > 0.5:
x = self.scale(x, [0.5, 2], [0.5, 2])
# x = self.align_img(x)
x = self.crop_or_pad(x)
return x
def get_new_roi(self, mask):
"""
:param mask: (ndarray.uint8)[Height, Width]
:return:
"""
# 增加灰度图和二值图的判断
# 存在20-230灰度值像素则认定为灰度图
_, mask_dist = cv.threshold(mask, 20, 255, cv.THRESH_TOZERO)
_, mask_dist = cv.threshold(mask_dist, 230, 255, cv.THRESH_TOZERO_INV)
if np.count_nonzero(mask_dist) < 5:
# 二值图处理
# 1.mask增强
mask = self.transform_bit(mask)
# 2.灰度赋值
mask = mask.astype(np.float32) / 255
low = np.random.randint(0, 150)
high = low + np.random.randint(50, 105)
mask = self.gray_mask_method[np.random.randint(0, len(self.gray_mask_method) - 1)](mask, low, high)
if np.random.random() < 0.7:
# 平滑随机噪声
k = np.random.randint(1, 5) * 2 + 1
cv.GaussianBlur(mask, (k, k), k, mask)
else:
# 灰度图处理
# 增强
mask = self.transform_gray(mask)
scale = 0.8 + 0.4 * np.random.rand()
offset = np.random.randint(-10, 10)
# 随机线性变换
cv.convertScaleAbs(mask, mask, scale, offset)
mask = mask.astype(np.uint8)
# if mask.shape != self.size:
# cv.imshow("1", mask)
# cv.imshow("2", self.crop_or_pad(mask))
# cv.waitKey(0)
return mask
def get_shape_mask(self, x):
if np.count_nonzero(x) < 20:
return np.ones((np.random.randint(5, 15), np.random.randint(5, 15)), dtype=np.uint8) * 255
Row = np.argwhere(np.sum(x, axis=0) != 0)
Col = np.argwhere(np.sum(x, axis=1) != 0)
x = x[np.min(Col): np.max(Col) + 1, np.min(Row): np.max(Row) + 1]
# 控制像素数量
while np.count_nonzero(x) > self.area * self.prosepect_pmax:
scale = np.random.random()
scale = scale if scale > 0.5 else 0.5
x = cv.resize(src=x, dsize=(int(x.shape[1]*scale), int(x.shape[0]*scale)), interpolation=cv.INTER_NEAREST)
return x
# 旋转
def rotate(self, x, angle=0):
H, W = x.shape
if isinstance(angle, list):
assert len(angle) == 2
angle = np.random.randint(angle[0], angle[1])
x = np.pad(x, ((W//2, W//2), (H//2, H//2)), mode="constant", constant_values=0)
H, W = x.shape
m = cv.getRotationMatrix2D((W//2, H//2), angle, scale=1)
x = cv.warpAffine(x, m, (x.shape[1], x.shape[0]))
x = self.get_shape_mask(x)
return x
# 形态学处理
def random_morphological_processing(self, x, k_size=3):
if isinstance(k_size, list):
k_size = np.random.randint(k_size[0], k_size[1])
k_size = k_size // 2 * 2 + 1
element = cv.getStructuringElement(cv.MORPH_ELLIPSE, (k_size, k_size))
param = {"src": x, "kernel": element}
param["op"] = self.morph_method[random.randint(0, len(self.morph_method) - 1)]
y = cv.morphologyEx(**param)
if np.sum(y)//255 < 10:
return x
return y
# 放缩
def scale(self, x, scaleX_factor=1, scaleY_factor=1):
if isinstance(scaleX_factor, list):
assert len(scaleX_factor) == 2
scaleX_factor = scaleX_factor[0] + (scaleX_factor[1] - scaleX_factor[0]) * np.random.rand()
if isinstance(scaleY_factor, list):
assert len(scaleY_factor) == 2
scaleY_factor = scaleY_factor[0] + (scaleY_factor[1] - scaleY_factor[0]) * np.random.rand()
cv.resize(x, (int(x.shape[1] * scaleX_factor), int(x.shape[0] * scaleY_factor)), x,
interpolation=cv.INTER_LINEAR)
return x
# 回归尺寸
# def align_img(self, x):
# # if np.random.random() < 0.2:
# # x = self.resize(x)
# # else:
# # x = self.crop_or_pad(x)
# #
# x = self.crop_or_pad(x)
# cv.threshold(x, 20, 255, cv.THRESH_BINARY, x)
# return x
def resize(self, x):
x = np.resize(x, self.size)
return x
def crop_or_pad(self, x):
y = None
cnt = 0
while y is None or np.sum(y)//255 < 10:
H = x.shape[0] - self.size[0]
W = x.shape[1] - self.size[1]
if H < 0:
H = -H
pad_top = random.randint(0, H)
y = np.pad(x, ((pad_top, H - pad_top), (0, 0)), mode="constant", constant_values=0)
else:
crop_top = random.randint(0, H)
y = x[crop_top: crop_top + self.size[0]]
if W < 0:
W = -W
pad_left = random.randint(0, W)
y = np.pad(y, ((0, 0), (pad_left, W - pad_left)), mode="constant", constant_values=0)
else:
crop_left = random.randint(0, W)
y = y[:, crop_left: crop_left + self.size[1]]
# crop有时只裁剪到黑色区域,此时直接resize
if np.sum(y)//255 < 10:
cnt += 1
if cnt >= 5:
return np.resize(x, self.size).astype(np.uint8)
return y
# 随机mask灰度值
def rand_mask(self, mask, low, high):
gray_mask = np.random.randint(low, high, mask.shape) * mask
return gray_mask
def gauss_mask(self, mask, low, high):
mask = self.get_shape_mask(mask)
gauss_x = cv.getGaussianKernel(mask.shape[1], mask.shape[1])
gauss_y = cv.getGaussianKernel(mask.shape[0], mask.shape[0])
kyx = np.multiply(gauss_y, np.transpose(gauss_x))
mask = mask * kyx
Max = np.max(mask)
Min = np.min(np.where(mask == 0, Max, mask))
gray_mask = low + (mask - Min) / (Max - Min) * (high - low)
gray_mask = np.where(gray_mask > 0, gray_mask, 0)
gray_mask = self.crop_or_pad(gray_mask)
return gray_mask
def const_mask(self, mask, *args):
gray_mask = mask * np.random.randint(0, 255)
return gray_mask
# def genRandImg1(size,mask):
# path = './gg'
# picture_rand = os.listdir(path)
# len_rand_picture = len(picture_rand)
# x = random.randint(0, len_rand_picture - 1)
# name_image = picture_rand[x]
# picture = cv.imread(path + '/' + name_image, 0)
# # print(type(picture))
# picture = cv.resize(picture, (128,128))
# # print(picture)
# # _, mask_pict = cv.threshold(picture, 150, 255, cv.THRESH_BINARY)
# #
# # cv2.imshow('image',mask_pict)
# # cv2.waitKey()
# picture = picture.astype(np.float)
# return picture
def get_new_image(img, gray_mask):
gray_mask = gray_mask.astype(np.float32)
mask = np.where(gray_mask > 0, 1, 0)
# mask = np.where(gray_mask > 0, 255, 0)
# mask1=mask.astype(np.uint8)
# cv.imshow('mask',mask1)
# cv.waitKey(5000)
# cover
if random.random() > 0.8:
new_img = (img * (1 - mask) + gray_mask * mask)
else:
# new_img = (img * (1 - mask)) + gray_mask * mask * (255 - np.mean(img)) / 255
new_img = (img * (1 - mask)) + mask * img * (1 + (gray_mask - 127.5) / 127.5)
new_img = np.clip(new_img, 0, 255).astype(np.uint8)
return new_img
def smooth_edge(new_img, mask):
_, mask = cv.threshold(mask, 1, 255, cv.THRESH_BINARY)
element = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))
mask_dilate = cv.morphologyEx(mask, cv.MORPH_DILATE, element)
mask_erode = cv.morphologyEx(mask, cv.MORPH_ERODE, element)
mask_edge = ((mask_dilate - mask_erode) / 255).astype(np.float32)
new_img_gauss = cv.GaussianBlur(new_img, (5, 5), 5)
return (new_img * (1 - mask_edge) + new_img_gauss * mask_edge).astype(np.uint8)
class DefectiveGenerator(object):
def __init__(self,dir_Database,shape_Img,Limit_ROI=(20,10000),withDatabase=True):
"""
:param dir_Database: 缺陷ROI路径
:param shape_Img: 图片大小[height,width]
:param Limit_ROI: ROI外接矩形大小[lower ,upper]
:param withDatabase: true:从硬盘读入ROI false:算法生成ROI
"""
self.dir_Database=dir_Database
self.height_Img = shape_Img[0]
self.width_Img=shape_Img[1]
self.lowerLimit_ROI=Limit_ROI[0]
self.upperLimit_ROI=Limit_ROI[1]
#
self.roi_transform = ROI_transforms()
#从数据库读入ROI
self.names_ROIs,self.num_ROIs=self.loadROIs(self.dir_Database)
if self.num_ROIs<1:
print("the dataset is empty!")
def loadROIs(self,dir):
# ROIs=os.listdir(dir)
# 递归遍历文件
ROIs = list()
for root, dirs, files in os.walk(dir):
# print(root)
for file in files:
if file.endswith(".bmp") or file.endswith(".PNG"):
ROIs.append(os.path.join(root, file))
num_ROI=len(ROIs)
print('采用本地ROI个数为{}'.format(num_ROI))
return ROIs,num_ROI
def genRandImg(self,size):
mean=random.randint(-125,125)
fluct=random.randint(1,100)
low=mean-fluct #+(mean-fluct<0)*abs(mean-fluct)
high=mean+fluct #-(mean+fluct>255)*abs(255-(mean+fluct))
img=np.random.randint(low,high,size)
img=img.astype(np.float)
return img
def genRandImg1(self,size):
path = './gg'
picture_rand = os.listdir(path)
len_rand_picture = len(picture_rand)
x = random.randint(0, len_rand_picture - 1)
name_image = picture_rand[x]
picture = cv.imread(path + '/' + name_image, 0)
# print(type(picture))
picture = cv.resize(picture, (128,128))
# cv.imshow('image',picture)
# cv.waitKey()
# print(picture)
# _, mask_pict = cv.threshold(picture, 150, 255, cv.THRESH_BINARY)
#
# cv.imshow('image',picture)
# cv.waitKey(5000)
# picture = picture.astype(np.float)
# cv.imshow('image',picture)
# cv.waitKey(5000)
return picture
def apply(self,img):
ROI=self.randReadROI()
# 灰度mask处理
# 1.最小矩形提取
# 2.随机旋转和放缩
# 3.尺寸回归
# 二值mask处理
# roi增强
# 1.最小矩形提取形状
# 2.随机旋转和放缩
# 3.形态学处理
# 4.回归尺寸
# 返回二值掩模图
# 返回灰度roi
ROI_new = self.roi_transform.get_new_roi(ROI)
ROI_new = np.where(ROI_new > 0, 1, 0).astype(np.uint8)
#
# cv.imshow('mask',ROI_new)
# cv.waitKey(5000)
randd = random.randint(0, 1)
if randd == 0:
img_rand = self.genRandImg([self.height_Img, self.width_Img])
else:
img_rand = self.genRandImg1([self.height_Img, self.width_Img])
img_new = img.astype(np.float)
rand = random.randint(0, 1)
if rand == 0:
img_new = img_new * (1 - ROI_new) + img_rand * ROI_new
else:
img_new = img_new + img_rand * ROI_new
# img_new = img_new * (1 - ROI_new) + img_rand * ROI_new
img_new = np.clip(img_new, 0, 255).astype(np.uint8)
# ROI_new = (ROI_new * 255).astype(np.uint8)
# cv.imshow('mask',img_new)
# cv.imshow('mask', img_rand)
# cv.waitKey(5000)
return img_new, ROI_new
# img_new = get_new_image(img, ROI_new)
#
# img_new = smooth_edge(img_new, ROI_new)
# cv.imshow("img", img)
# cv.imshow("ROI", ROI)
# cv.imshow("ROI_new", ROI_new)
# cv.imshow("img_new", img_new)
# cv.waitKey(0)
#
# img_rand=self.genRandImg([self.height_Img, self.width_Img])
# img_new=img.astype(np.float)
# rand = np.random.randint(0, 1)
# if rand==0:
# img_new=img_new*(1-ROI_new)+img_rand*ROI_new
# else:
# img_new = img_new + img_rand * ROI_new
# img_new = img_new * (1 - ROI_new) + img_rand * ROI_new
# img_new=np.clip(img_new, 0, 255).astype(np.uint8)
# ROI_new=(ROI_new*255).astype(np.uint8)
# return img_new, ROI_new
def randReadROI(self):
while(1):
rand = random.randint(0, self.num_ROIs - 1)
name_Img = self.names_ROIs[rand]
img_Label = cv.imread(name_Img, 0)
cv.threshold(img_Label, 20, 255, cv.THRESH_TOZERO, img_Label)
if np.sum(img_Label) > 5:
return img_Label
def randVaryROI(self,ROI):
return ROI
# def randMoveROI(self,ROI):
# #求图像的域的大小
# Height_Domain = self.height_Img
# Width_Domain= self.width_Img
# #求ROI区域的坐标
# Rows,Cols = np.nonzero(ROI)
# #求ROI区域的外接矩形大小
# Width_ROI=np.max(Cols)-np.min(Cols)
# Height_ROI=np.max(Rows)-np.min(Rows)
# #随机设置ROI的起始坐标
# Row_Upleft=random.randint(0,Height_Domain-Height_ROI-1)
# Col_Upleft = random.randint(0, Width_Domain - Width_ROI-1)
# Rows=Rows-np.min(Rows)+Row_Upleft
# Cols=Cols-np.min(Cols)+Col_Upleft
# ROI_new=np.zeros([Height_Domain,Width_Domain])
# ROI_new[Rows,Cols]=1
# return ROI_new
# def genRandImg(self,size):
# mean=random.randint(-125,125)
# fluct=random.randint(1,100)
# low=mean-fluct #+(mean-fluct<0)*abs(mean-fluct)
# high=mean+fluct #-(mean+fluct>255)*abs(255-(mean+fluct))
# img=np.random.randint(low,high,size)
# img=img.astype(np.float)
# #
# return img
class RepairDataset(BaseDataset):
"""
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = self.A_size # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform=self.get_transform()
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
# ztb1数据换为128, 128
self.defectGen =DefectiveGenerator("../datasets/masks", (128, 128))
self.phase=opt.phase
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
# print(A_img.size)
# if self.phase=="train":
# B_img = Image.open(A_path).convert('L')
# # 正负采样:
# if np.random.random() < 0.5:
# B_img = self.transform(B_img)
# A_img,_ = self.defectGen.apply((np.array(B_img)))
# A_img=Image.fromarray(A_img)
# # apply image transformation
# A = self.transform_A(A_img)
# B = self.transform_B(B_img)
# return {'A': A, 'B': B, 'A_paths': A_path}
if self.phase == "train":
B_img = Image.open(A_path).convert('L')
B_img = self.transform(B_img)
if index % 2 == 0:
A_img, _ = self.defectGen.apply((np.array(B_img)))
# cv.imshow('image',A_img)
# cv.waitKey(5000)
A_img = Image.fromarray(A_img)
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
# print('缺陷样本')
return {'A': A, 'B': B, 'A_paths': A_path}
else:
B = self.transform_B(B_img)
# print('正样本')
return {'A': B, 'B': B, 'A_paths': A_path}
elif self.phase=="test":
A_img = Image.open(A_path).convert('L')
A = self.transform_A(A_img)
# B_mask_path=A_path.replace("testA","mask")
# dirname,fname=os.path.split(B_mask_path)
# B_mask_path=os.path.join(dirname,"groundT_"+fname)
# B_mask= Image.open(B_mask_path).convert('L')
# B_mask = self.transform_A(B_mask)
# 测试级无mask,使用原图
B_mask = A
return {'A': A, 'B': A, 'A_paths': A_path,'B_mask': B_mask}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return self.A_size
def get_transform(self):
import torchvision.transforms as transforms
l=[]
l.append(transforms.RandomHorizontalFlip())
l.append(transforms.RandomVerticalFlip())
l.append(transforms.Resize([128, 128]))
# l.append(transforms.RandomResizedCrop( 256, scale=(0.8, 1.0), ratio=(3. / 4., 4. / 3.)))
return transforms.Compose(l) | [
"numpy.clip",
"cv2.convertScaleAbs",
"numpy.random.rand",
"numpy.count_nonzero",
"numpy.array",
"os.walk",
"os.listdir",
"numpy.where",
"cv2.threshold",
"numpy.random.random",
"numpy.max",
"cv2.getGaussianKernel",
"numpy.resize",
"data.base_dataset.get_transform",
"numpy.min",
"data.ba... | [((7939, 7968), 'numpy.where', 'np.where', (['(gray_mask > 0)', '(1)', '(0)'], {}), '(gray_mask > 0, 1, 0)\n', (7947, 7968), True, 'import numpy as np\n'), ((8506, 8550), 'cv2.threshold', 'cv.threshold', (['mask', '(1)', '(255)', 'cv.THRESH_BINARY'], {}), '(mask, 1, 255, cv.THRESH_BINARY)\n', (8518, 8550), True, 'import cv2 as cv\n'), ((8565, 8615), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv.MORPH_ELLIPSE, (5, 5))\n', (8589, 8615), True, 'import cv2 as cv\n'), ((8634, 8681), 'cv2.morphologyEx', 'cv.morphologyEx', (['mask', 'cv.MORPH_DILATE', 'element'], {}), '(mask, cv.MORPH_DILATE, element)\n', (8649, 8681), True, 'import cv2 as cv\n'), ((8699, 8745), 'cv2.morphologyEx', 'cv.morphologyEx', (['mask', 'cv.MORPH_ERODE', 'element'], {}), '(mask, cv.MORPH_ERODE, element)\n', (8714, 8745), True, 'import cv2 as cv\n'), ((8836, 8871), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['new_img', '(5, 5)', '(5)'], {}), '(new_img, (5, 5), 5)\n', (8851, 8871), True, 'import cv2 as cv\n'), ((975, 1017), 'cv2.threshold', 'cv.threshold', (['x', '(20)', '(255)', 'cv.THRESH_BINARY'], {}), '(x, 20, 255, cv.THRESH_BINARY)\n', (987, 1017), True, 'import cv2 as cv\n'), ((1518, 1563), 'cv2.threshold', 'cv.threshold', (['mask', '(20)', '(255)', 'cv.THRESH_TOZERO'], {}), '(mask, 20, 255, cv.THRESH_TOZERO)\n', (1530, 1563), True, 'import cv2 as cv\n'), ((1587, 1642), 'cv2.threshold', 'cv.threshold', (['mask_dist', '(230)', '(255)', 'cv.THRESH_TOZERO_INV'], {}), '(mask_dist, 230, 255, cv.THRESH_TOZERO_INV)\n', (1599, 1642), True, 'import cv2 as cv\n'), ((3579, 3666), 'numpy.pad', 'np.pad', (['x', '((W // 2, W // 2), (H // 2, H // 2))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(x, ((W // 2, W // 2), (H // 2, H // 2)), mode='constant',\n constant_values=0)\n", (3585, 3666), True, 'import numpy as np\n'), ((3690, 3746), 'cv2.getRotationMatrix2D', 'cv.getRotationMatrix2D', (['(W // 2, H // 2)', 'angle'], {'scale': '(1)'}), '((W // 2, H // 2), angle, scale=1)\n', (3712, 3746), True, 'import cv2 as cv\n'), ((3755, 3800), 'cv2.warpAffine', 'cv.warpAffine', (['x', 'm', '(x.shape[1], x.shape[0])'], {}), '(x, m, (x.shape[1], x.shape[0]))\n', (3768, 3800), True, 'import cv2 as cv\n'), ((4079, 4139), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(k_size, k_size)'], {}), '(cv.MORPH_ELLIPSE, (k_size, k_size))\n', (4103, 4139), True, 'import cv2 as cv\n'), ((4285, 4309), 'cv2.morphologyEx', 'cv.morphologyEx', ([], {}), '(**param)\n', (4300, 4309), True, 'import cv2 as cv\n'), ((5323, 5346), 'numpy.resize', 'np.resize', (['x', 'self.size'], {}), '(x, self.size)\n', (5332, 5346), True, 'import numpy as np\n'), ((6669, 6719), 'cv2.getGaussianKernel', 'cv.getGaussianKernel', (['mask.shape[1]', 'mask.shape[1]'], {}), '(mask.shape[1], mask.shape[1])\n', (6689, 6719), True, 'import cv2 as cv\n'), ((6738, 6788), 'cv2.getGaussianKernel', 'cv.getGaussianKernel', (['mask.shape[0]', 'mask.shape[0]'], {}), '(mask.shape[0], mask.shape[0])\n', (6758, 6788), True, 'import cv2 as cv\n'), ((6888, 6900), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (6894, 6900), True, 'import numpy as np\n'), ((7044, 7081), 'numpy.where', 'np.where', (['(gray_mask > 0)', 'gray_mask', '(0)'], {}), '(gray_mask > 0, gray_mask, 0)\n', (7052, 7081), True, 'import numpy as np\n'), ((8120, 8135), 'random.random', 'random.random', ([], {}), '()\n', (8133, 8135), False, 'import random\n'), ((9846, 9858), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (9853, 9858), False, 'import os\n'), ((10188, 10213), 'random.randint', 'random.randint', (['(-125)', '(125)'], {}), '(-125, 125)\n', (10202, 10213), False, 'import random\n'), ((10227, 10249), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (10241, 10249), False, 'import random\n'), ((10385, 10419), 'numpy.random.randint', 'np.random.randint', (['low', 'high', 'size'], {}), '(low, high, size)\n', (10402, 10419), True, 'import numpy as np\n'), ((10548, 10564), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (10558, 10564), False, 'import os\n'), ((10622, 10661), 'random.randint', 'random.randint', (['(0)', '(len_rand_picture - 1)'], {}), '(0, len_rand_picture - 1)\n', (10636, 10661), False, 'import random\n'), ((10717, 10754), 'cv2.imread', 'cv.imread', (["(path + '/' + name_image)", '(0)'], {}), "(path + '/' + name_image, 0)\n", (10726, 10754), True, 'import cv2 as cv\n'), ((10804, 10834), 'cv2.resize', 'cv.resize', (['picture', '(128, 128)'], {}), '(picture, (128, 128))\n', (10813, 10834), True, 'import cv2 as cv\n'), ((11687, 11707), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (11701, 11707), False, 'import random\n'), ((11948, 11968), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (11962, 11968), False, 'import random\n'), ((14929, 14960), 'data.base_dataset.BaseDataset.__init__', 'BaseDataset.__init__', (['self', 'opt'], {}), '(self, opt)\n', (14949, 14960), False, 'from data.base_dataset import BaseDataset, get_transform\n'), ((14982, 15025), 'os.path.join', 'os.path.join', (['opt.dataroot', "(opt.phase + 'A')"], {}), "(opt.dataroot, opt.phase + 'A')\n", (14994, 15025), False, 'import os\n'), ((15675, 15723), 'data.base_dataset.get_transform', 'get_transform', (['self.opt'], {'grayscale': '(input_nc == 1)'}), '(self.opt, grayscale=input_nc == 1)\n', (15688, 15723), False, 'from data.base_dataset import BaseDataset, get_transform\n'), ((15753, 15802), 'data.base_dataset.get_transform', 'get_transform', (['self.opt'], {'grayscale': '(output_nc == 1)'}), '(self.opt, grayscale=output_nc == 1)\n', (15766, 15802), False, 'from data.base_dataset import BaseDataset, get_transform\n'), ((19139, 19160), 'torchvision.transforms.Compose', 'transforms.Compose', (['l'], {}), '(l)\n', (19157, 19160), True, 'import torchvision.transforms as transforms\n'), ((717, 732), 'random.random', 'random.random', ([], {}), '()\n', (730, 732), False, 'import random\n'), ((801, 816), 'random.random', 'random.random', ([], {}), '()\n', (814, 816), False, 'import random\n'), ((1158, 1173), 'random.random', 'random.random', ([], {}), '()\n', (1171, 1173), False, 'import random\n'), ((1654, 1681), 'numpy.count_nonzero', 'np.count_nonzero', (['mask_dist'], {}), '(mask_dist)\n', (1670, 1681), True, 'import numpy as np\n'), ((1862, 1887), 'numpy.random.randint', 'np.random.randint', (['(0)', '(150)'], {}), '(0, 150)\n', (1879, 1887), True, 'import numpy as np\n'), ((2391, 2417), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (2408, 2417), True, 'import numpy as np\n'), ((2451, 2496), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['mask', 'mask', 'scale', 'offset'], {}), '(mask, mask, scale, offset)\n', (2469, 2496), True, 'import cv2 as cv\n'), ((2755, 2774), 'numpy.count_nonzero', 'np.count_nonzero', (['x'], {}), '(x)\n', (2771, 2774), True, 'import numpy as np\n'), ((3089, 3108), 'numpy.count_nonzero', 'np.count_nonzero', (['x'], {}), '(x)\n', (3105, 3108), True, 'import numpy as np\n'), ((3164, 3182), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3180, 3182), True, 'import numpy as np\n'), ((3528, 3565), 'numpy.random.randint', 'np.random.randint', (['angle[0]', 'angle[1]'], {}), '(angle[0], angle[1])\n', (3545, 3565), True, 'import numpy as np\n'), ((3984, 4023), 'numpy.random.randint', 'np.random.randint', (['k_size[0]', 'k_size[1]'], {}), '(k_size[0], k_size[1])\n', (4001, 4023), True, 'import numpy as np\n'), ((6493, 6533), 'numpy.random.randint', 'np.random.randint', (['low', 'high', 'mask.shape'], {}), '(low, high, mask.shape)\n', (6510, 6533), True, 'import numpy as np\n'), ((6824, 6845), 'numpy.transpose', 'np.transpose', (['gauss_x'], {}), '(gauss_x)\n', (6836, 6845), True, 'import numpy as np\n'), ((6922, 6952), 'numpy.where', 'np.where', (['(mask == 0)', 'Max', 'mask'], {}), '(mask == 0, Max, mask)\n', (6930, 6952), True, 'import numpy as np\n'), ((7222, 7247), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (7239, 7247), True, 'import numpy as np\n'), ((8398, 8422), 'numpy.clip', 'np.clip', (['new_img', '(0)', '(255)'], {}), '(new_img, 0, 255)\n', (8405, 8422), True, 'import numpy as np\n'), ((13299, 13335), 'random.randint', 'random.randint', (['(0)', '(self.num_ROIs - 1)'], {}), '(0, self.num_ROIs - 1)\n', (13313, 13335), False, 'import random\n'), ((13405, 13427), 'cv2.imread', 'cv.imread', (['name_Img', '(0)'], {}), '(name_Img, 0)\n', (13414, 13427), True, 'import cv2 as cv\n'), ((13440, 13501), 'cv2.threshold', 'cv.threshold', (['img_Label', '(20)', '(255)', 'cv.THRESH_TOZERO', 'img_Label'], {}), '(img_Label, 20, 255, cv.THRESH_TOZERO, img_Label)\n', (13452, 13501), True, 'import cv2 as cv\n'), ((15096, 15142), 'data.image_folder.make_dataset', 'make_dataset', (['self.dir_A', 'opt.max_dataset_size'], {}), '(self.dir_A, opt.max_dataset_size)\n', (15108, 15142), False, 'from data.image_folder import make_dataset\n'), ((16733, 16767), 'random.randint', 'random.randint', (['(0)', '(self.B_size - 1)'], {}), '(0, self.B_size - 1)\n', (16747, 16767), False, 'import random\n'), ((18891, 18924), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (18922, 18924), True, 'import torchvision.transforms as transforms\n'), ((18943, 18974), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (18972, 18974), True, 'import torchvision.transforms as transforms\n'), ((18993, 19022), 'torchvision.transforms.Resize', 'transforms.Resize', (['[128, 128]'], {}), '([128, 128])\n', (19010, 19022), True, 'import torchvision.transforms as transforms\n'), ((1913, 1939), 'numpy.random.randint', 'np.random.randint', (['(50)', '(105)'], {}), '(50, 105)\n', (1930, 1939), True, 'import numpy as np\n'), ((2067, 2085), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2083, 2085), True, 'import numpy as np\n'), ((2186, 2224), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['mask', '(k, k)', 'k', 'mask'], {}), '(mask, (k, k), k, mask)\n', (2201, 2224), True, 'import cv2 as cv\n'), ((2910, 2927), 'numpy.sum', 'np.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2916, 2927), True, 'import numpy as np\n'), ((2960, 2977), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (2966, 2977), True, 'import numpy as np\n'), ((4321, 4330), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (4327, 4330), True, 'import numpy as np\n'), ((5631, 5651), 'random.randint', 'random.randint', (['(0)', 'H'], {}), '(0, H)\n', (5645, 5651), False, 'import random\n'), ((5672, 5751), 'numpy.pad', 'np.pad', (['x', '((pad_top, H - pad_top), (0, 0))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(x, ((pad_top, H - pad_top), (0, 0)), mode='constant', constant_values=0)\n", (5678, 5751), True, 'import numpy as np\n'), ((5797, 5817), 'random.randint', 'random.randint', (['(0)', 'H'], {}), '(0, H)\n', (5811, 5817), False, 'import random\n'), ((5947, 5967), 'random.randint', 'random.randint', (['(0)', 'W'], {}), '(0, W)\n', (5961, 5967), False, 'import random\n'), ((5988, 6073), 'numpy.pad', 'np.pad', (['y', '((0, 0), (pad_left, W - pad_left))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(y, ((0, 0), (pad_left, W - pad_left)), mode='constant',\n constant_values=0)\n", (5994, 6073), True, 'import numpy as np\n'), ((6116, 6136), 'random.randint', 'random.randint', (['(0)', 'W'], {}), '(0, W)\n', (6130, 6136), False, 'import random\n'), ((11553, 11580), 'numpy.where', 'np.where', (['(ROI_new > 0)', '(1)', '(0)'], {}), '(ROI_new > 0, 1, 0)\n', (11561, 11580), True, 'import numpy as np\n'), ((12207, 12231), 'numpy.clip', 'np.clip', (['img_new', '(0)', '(255)'], {}), '(img_new, 0, 255)\n', (12214, 12231), True, 'import numpy as np\n'), ((13517, 13534), 'numpy.sum', 'np.sum', (['img_Label'], {}), '(img_Label)\n', (13523, 13534), True, 'import numpy as np\n'), ((17617, 17639), 'PIL.Image.fromarray', 'Image.fromarray', (['A_img'], {}), '(A_img)\n', (17632, 17639), False, 'from PIL import Image\n'), ((2353, 2369), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2367, 2369), True, 'import numpy as np\n'), ((2998, 3009), 'numpy.min', 'np.min', (['Col'], {}), '(Col)\n', (3004, 3009), True, 'import numpy as np\n'), ((3028, 3039), 'numpy.min', 'np.min', (['Row'], {}), '(Row)\n', (3034, 3039), True, 'import numpy as np\n'), ((4622, 4638), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4636, 4638), True, 'import numpy as np\n'), ((4813, 4829), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4827, 4829), True, 'import numpy as np\n'), ((5455, 5464), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (5461, 5464), True, 'import numpy as np\n'), ((6254, 6263), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (6260, 6263), True, 'import numpy as np\n'), ((17343, 17361), 'PIL.Image.open', 'Image.open', (['A_path'], {}), '(A_path)\n', (17353, 17361), False, 'from PIL import Image\n'), ((17497, 17512), 'numpy.array', 'np.array', (['B_img'], {}), '(B_img)\n', (17505, 17512), True, 'import numpy as np\n'), ((2138, 2161), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (2155, 2161), True, 'import numpy as np\n'), ((2809, 2833), 'numpy.random.randint', 'np.random.randint', (['(5)', '(15)'], {}), '(5, 15)\n', (2826, 2833), True, 'import numpy as np\n'), ((2835, 2859), 'numpy.random.randint', 'np.random.randint', (['(5)', '(15)'], {}), '(5, 15)\n', (2852, 2859), True, 'import numpy as np\n'), ((3011, 3022), 'numpy.max', 'np.max', (['Col'], {}), '(Col)\n', (3017, 3022), True, 'import numpy as np\n'), ((3041, 3052), 'numpy.max', 'np.max', (['Row'], {}), '(Row)\n', (3047, 3052), True, 'import numpy as np\n'), ((10016, 10040), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (10028, 10040), False, 'import os\n'), ((18069, 18087), 'PIL.Image.open', 'Image.open', (['A_path'], {}), '(A_path)\n', (18079, 18087), False, 'from PIL import Image\n'), ((6356, 6379), 'numpy.resize', 'np.resize', (['x', 'self.size'], {}), '(x, self.size)\n', (6365, 6379), True, 'import numpy as np\n')] |
""" TensorMONK :: layers :: CarryResidue """
__all__ = ["ResidualOriginal", "ResidualComplex", "ResidualInverted",
"ResidualShuffle", "ResidualNeXt",
"SEResidualComplex", "SEResidualNeXt",
"SimpleFire", "CarryModular", "DenseBlock",
"ContextNet_Bottleneck", "SeparableConvolution", "MBBlock"]
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .convolution import Convolution
from ..activations import Activations
from ..regularizations import DropOut
from .utils import check_strides, check_residue, update_kwargs, compute_flops
from copy import deepcopy
import random
def drop_connect(tensor: torch.Tensor, p: float):
n = tensor.size(0)
retain = (torch.rand(n, dtype=tensor.dtype) + 1 - p).floor()
if retain.sum() == 0:
retain[random.randint(0, n-1)] = 1
retain = retain.view(-1, *([1] * (tensor.dim()-1))).to(tensor.device)
return tensor / (1 - p) * retain
class SEBlock(nn.Module):
r""" Squeeze-and-Excitation """
def __init__(self, tensor_size, r=16, **kwargs):
super(SEBlock, self).__init__()
show_msg = "x".join(["_"]+[str(x)for x in tensor_size[1:]]) + " += "
self.squeeze = nn.Parameter(torch.randn(
tensor_size[1]//r, tensor_size[1], 1, 1))
self.excitation = nn.Parameter(torch.randn(
tensor_size[1], tensor_size[1]//r, 1, 1))
nn.init.kaiming_uniform_(self.squeeze)
nn.init.kaiming_uniform_(self.excitation)
show_msg += "(pool -> conv({}) -> ".format(
"x".join(map(str, self.squeeze.shape))) + "relu -> "
show_msg += "conv({}) -> ".format(
"x".join(map(str, self.excitation.shape))) + "sigm)"
self.show_msg = show_msg
self.r = r
self.tensor_size = tensor_size
def forward(self, tensor):
se = F.avg_pool2d(tensor, tensor.shape[2:])
se = F.relu(F.conv2d(se, self.squeeze, None))
se = torch.sigmoid(F.conv2d(se, self.excitation, None))
return tensor * se
def flops(self):
return self.tensor_size[1]*self.r*2 + np.prod(self.tensor_size[1:])*2
def __repr__(self):
return self.show_msg
# =========================================================================== #
class ResidualOriginal(nn.Module):
r""" Residual block with two 3x3 convolutions - used in ResNet18 and
ResNet34. All args are similar to Convolution.
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False,
dropconnect=False, seblock=False, r=16, **kwargs):
super(ResidualOriginal, self).__init__()
self.is_dropconnect = dropconnect
self.p = dropout
dropout = 0. if dropconnect else dropout
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwgs = deepcopy(kwargs)
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm, None,
weight_nm, equalized, shift, bias)
self.Block1 = Convolution(tensor_size, filter_size, out_channels,
strides, **kwargs)
self.Block2 = Convolution(self.Block1.tensor_size, filter_size,
out_channels, 1, **kwargs)
if seblock:
self.seblock = SEBlock(self.Block2.tensor_size, r)
if check_residue(strides, tensor_size, out_channels):
if not pre_nm:
kwargs["activation"] = ""
self.edit_residue = Convolution(tensor_size, 1, out_channels,
strides, **kwargs)
if not pre_nm and activation is not None:
if activation.lower() in Activations.available():
self.activation = Activations(self.Block2.tensor_size,
activation.lower(), **kwgs)
self.tensor_size = self.Block2.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
residue = self.edit_residue(tensor) if hasattr(self, "edit_residue") \
else tensor
tensor = self.Block2(self.Block1(tensor))
if hasattr(self, "seblock"):
tensor = self.seblock(tensor)
if self.is_dropconnect and self.p > 0.:
tensor = drop_connect(tensor, self.p)
tensor = tensor + residue
if hasattr(self, "activation"):
tensor = self.activation(tensor)
return tensor
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:])
# =========================================================================== #
class ResidualComplex(nn.Module):
r"""Bottleneck Residual block with 1x1 (out_channels//4), 3x3
(out_channels//4), and 1x1 (out_channels) convolution - used in ResNet50,
ResNet101, and ResNet152. All args are similar to Convolution.
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False,
dropconnect=False, seblock=False, r=16, **kwargs):
super(ResidualComplex, self).__init__()
self.is_dropconnect = dropconnect
self.p = dropout
dropout = 0. if dropconnect else dropout
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwgs = deepcopy(kwargs)
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm, None,
weight_nm, equalized, shift, bias)
self.Block1 = Convolution(tensor_size, 1, out_channels//4, strides,
**kwargs)
self.Block2 = \
Convolution(self.Block1.tensor_size, filter_size, out_channels//4,
1, groups=groups, **kwargs)
if not pre_nm:
kwargs["activation"] = ""
self.Block3 = \
Convolution(self.Block2.tensor_size, 1, out_channels, 1, **kwargs)
if seblock:
self.seblock = SEBlock(self.Block3.tensor_size, r)
if check_residue(strides, tensor_size, out_channels):
self.edit_residue = Convolution(tensor_size, 1, out_channels,
strides, **kwargs)
if not pre_nm and activation is not None:
if activation.lower() in Activations.available():
self.activation = Activations(self.Block3.tensor_size,
activation.lower(), **kwgs)
self.tensor_size = self.Block3.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
residue = self.edit_residue(tensor) if hasattr(self, "edit_residue") \
else tensor
tensor = self.Block3(self.Block2(self.Block1(tensor)))
if hasattr(self, "seblock"):
tensor = self.seblock(tensor)
if self.is_dropconnect and self.p > 0.:
tensor = drop_connect(tensor, self.p)
tensor = tensor + residue
if hasattr(self, "activation"):
tensor = self.activation(tensor)
return tensor
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:])
# =========================================================================== #
class SEResidualComplex(nn.Module):
r"""Bottleneck Residual block with squeeze and excitation added. All args
are similar to Convolution.
Implemented - https://arxiv.org/pdf/1709.01507.pdf
Args:
r: channel reduction factor, default = 16
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, r=16, **kwargs):
super(SEResidualComplex, self).__init__()
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwgs = deepcopy(kwargs)
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm, None,
weight_nm, equalized, shift, bias)
self.Block1 = Convolution(tensor_size, 1, out_channels//4, strides,
**kwargs)
self.Block2 = \
Convolution(self.Block1.tensor_size, filter_size, out_channels//4,
1, groups=groups, **kwargs)
if not pre_nm:
kwargs["activation"] = ""
self.Block3 = \
Convolution(self.Block2.tensor_size, 1, out_channels, 1, **kwargs)
se = [nn.AvgPool2d(self.Block3.tensor_size[2:], stride=(1, 1)),
Convolution((1, out_channels, 1, 1), 1, out_channels//r, 1,
False, "relu"),
Convolution((1, out_channels//r, 1, 1), 1, out_channels, 1,
False, "sigm")]
self.SE = nn.Sequential(*se)
if check_residue(strides, tensor_size, out_channels):
self.edit_residue = Convolution(tensor_size, 1, out_channels,
strides, **kwargs)
if not pre_nm and activation is not None:
if activation.lower() in Activations.available():
self.activation = Activations(self.Block3.tensor_size,
activation.lower(), **kwgs)
self.tensor_size = self.Block3.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
residue = self.edit_residue(tensor) if hasattr(self, "edit_residue") \
else tensor
tensor = self.Block3(self.Block2(self.Block1(tensor)))
tensor = tensor * self.SE(tensor)
tensor = tensor + residue
if hasattr(self, "activation"):
tensor = self.activation(tensor)
return tensor
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:]) * 2
# =========================================================================== #
class ResidualNeXt(nn.Module):
r"""Bottleneck Residual block with 1x1 (out_channels//2), 3x3
(out_channels//2, & groups = out_channels//2), and 1x1 (out_channels)
convolution. All args are similar to Convolution.
Implemented - https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=32, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False,
dropconnect=False, seblock=False, r=16, **kwargs):
super(ResidualNeXt, self).__init__()
self.is_dropconnect = dropconnect
self.p = dropout
dropout = 0. if dropconnect else dropout
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm, None,
weight_nm, equalized, shift, bias)
self.Block1 = Convolution(tensor_size, 1, out_channels//2, 1, **kwargs)
self.Block2 = \
Convolution(self.Block1.tensor_size, filter_size, out_channels//2,
strides, groups=groups, **kwargs)
self.Block3 = \
Convolution(self.Block2.tensor_size, 1, out_channels, 1, **kwargs)
if seblock:
self.seblock = SEBlock(self.Block3.tensor_size, r)
if check_residue(strides, tensor_size, out_channels):
kwargs["activation"] = ""
self.edit_residue = Convolution(tensor_size, 1, out_channels,
strides, **kwargs)
self.tensor_size = self.Block3.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
residue = self.edit_residue(tensor) if hasattr(self, "edit_residue") \
else tensor
tensor = self.Block3(self.Block2(self.Block1(tensor)))
if hasattr(self, "seblock"):
tensor = self.seblock(tensor)
if self.is_dropconnect and self.p > 0.:
tensor = drop_connect(tensor, self.p)
tensor = tensor + residue
return tensor
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:])
# =========================================================================== #
class SEResidualNeXt(nn.Module):
r"""Custom module combining both Squeeze-and-Excitation and ResNeXt. All
args are similar to SEResidualComplex.
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=32, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, r=16, **kwargs):
super(SEResidualNeXt, self).__init__()
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm, None,
weight_nm, equalized, shift, bias)
self.Block1 = Convolution(tensor_size, 1, out_channels//2, 1, **kwargs)
self.Block2 = \
Convolution(self.Block1.tensor_size, filter_size, out_channels//2,
strides, groups=groups, **kwargs)
self.Block3 = \
Convolution(self.Block2.tensor_size, 1, out_channels, 1, **kwargs)
se = [nn.AvgPool2d(self.Block3.tensor_size[2:], stride=(1, 1)),
Convolution((1, out_channels, 1, 1), 1, out_channels//r, 1,
False, "relu"),
Convolution((1, out_channels//r, 1, 1), 1, out_channels, 1,
False, "sigm")]
self.SE = nn.Sequential(*se)
if check_residue(strides, tensor_size, out_channels):
kwargs["activation"] = ""
self.edit_residue = Convolution(tensor_size, 1, out_channels,
strides, **kwargs)
self.tensor_size = self.Block3.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
residue = self.edit_residue(tensor) if hasattr(self, "edit_residue")\
else tensor
tensor = self.Block3(self.Block2(self.Block1(tensor)))
return tensor * self.SE(tensor) + residue
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:])
# =========================================================================== #
class SeparableConvolution(nn.Module):
r""" SeparableConvolution """
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, **kwargs):
super(SeparableConvolution, self).__init__()
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
self.Block1 = Convolution(
tensor_size, filter_size, tensor_size[1], strides, pad, activation,
0., normalization, pre_nm, tensor_size[1], weight_nm, equalized,
shift, bias, dropblock, **kwargs)
self.Block2 = Convolution(self.Block1.tensor_size, 1, out_channels,
1, True, None)
self.tensor_size = self.Block2.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
return self.Block2(self.Block1(tensor))
def flops(self):
return compute_flops(self)
# =========================================================================== #
class ResidualInverted(nn.Module):
r""" Support for MobileNetV2 - https://arxiv.org/pdf/1801.04381.pdf
All args are similar to Convolution."""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, t=1, t_in=False,
dropconnect=False, seblock=False, r=16, **kwargs):
super(ResidualInverted, self).__init__()
self.is_dropconnect = dropconnect
self.p = dropout
dropout = 0. if dropconnect else dropout
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm, None,
weight_nm, equalized, shift, bias)
channels = int((tensor_size[1] if t_in else out_channels) * t)
self.Block1 = Convolution(tensor_size, 1, channels, 1, **kwargs)
self.Block2 = \
Convolution(self.Block1.tensor_size, filter_size, channels,
strides, groups=channels, **kwargs)
kwargs["activation"] = ""
self.Block3 = Convolution(self.Block2.tensor_size, 1, out_channels,
1, **kwargs)
if seblock:
self.seblock = SEBlock(self.Block3.tensor_size, r)
self.skip_residue = True if check_strides(strides) else False
if not self.skip_residue and tensor_size[1] != out_channels:
self.edit_residue = Convolution(tensor_size, 1, out_channels,
strides, **kwargs)
self.tensor_size = self.Block3.tensor_size
def forward(self, tensor):
if self.dropout is not None: # for dropout
tensor = self.dropout(tensor)
if not self.skip_residue: # For strides > 1
residue = self.edit_residue(tensor) if \
hasattr(self, "edit_residue") else tensor
tensor = self.Block3(self.Block2(self.Block1(tensor)))
if hasattr(self, "seblock"):
tensor = self.seblock(tensor)
if self.is_dropconnect and self.p > 0.:
tensor = drop_connect(tensor, self.p)
return tensor if self.skip_residue else tensor + residue
def flops(self):
# residue addition
flops = 0 if self.skip_residue else np.prod(self.tensor_size[1:])
return compute_flops(self) + flops
# =========================================================================== #
class ChannelShuffle(nn.Module):
r""" https://arxiv.org/pdf/1707.01083.pdf """
def __init__(self, groups, *args, **kwargs):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, tensor):
tensor_size = tensor.size()
tensor = tensor.view(tensor_size[0], self.groups, -1,
tensor_size[2], tensor_size[3])
tensor = tensor.transpose(2, 1).contiguous()
return tensor.view(tensor_size[0], -1,
tensor_size[2], tensor_size[3]).contiguous()
class ResidualShuffle(nn.Module):
r""" ShuffleNet supporting block - https://arxiv.org/pdf/1707.01083.pdf
All args are similar to Convolution. """
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=4, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, **kwargs):
super(ResidualShuffle, self).__init__()
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwgs = deepcopy(kwargs)
kwargs = update_kwargs(kwargs, None, None, out_channels, None,
True, activation, 0., normalization, pre_nm,
groups, weight_nm, equalized, shift, bias)
self.Block1 = Convolution(tensor_size, 1, **kwargs)
self.Shuffle = ChannelShuffle(groups)
kwargs["activation"] = ""
self.Block2 = Convolution(self.Block1.tensor_size, filter_size,
strides=strides, **kwargs)
self.Block3 = Convolution(self.Block2.tensor_size, 1, **kwargs)
self._flops = 0
if check_strides(strides) and tensor_size[1] == out_channels:
sz = strides + (1 if strides % 2 == 0 else 0)
self.edit_residue = nn.AvgPool2d(sz, strides, sz//2)
self._flops += tensor_size[1]*self.Block3.tensor_size[2] * \
self.Block3.tensor_size[3]*(sz*sz+1)
elif not check_strides(strides) and tensor_size[1] != out_channels:
self.edit_residue = Convolution(tensor_size, 1, out_channels,
**kwargs)
elif check_strides(strides) and tensor_size[1] != out_channels:
sz = strides + (1 if strides % 2 == 0 else 0)
t_size = (1, tensor_size[1], self.Block3.tensor_size[2],
self.Block3.tensor_size[3])
self.edit_residue = [nn.AvgPool2d(3, 2, 1),
Convolution(t_size, 1, **kwargs)]
self.edit_residue = nn.Sequential(*self.edit_residue)
self._flops = tensor_size[1]*self.Block3.tensor_size[2] * \
self.Block3.tensor_size[3]*(sz*sz+1)
self.tensor_size = self.Block3.tensor_size
if activation in ("maxo", "rmxo"): # switch to retain out_channels
activation = "relu"
self.Activation = Activations(self.Block3.tensor_size,
activation, **kwgs)
def forward(self, tensor):
if self.dropout is not None:
tensor = self.dropout(tensor)
residue = self.edit_residue(tensor) if hasattr(self, "edit_residue") \
else tensor
tensor = self.Block3(self.Block2(self.Shuffle(self.Block1(tensor))))
return self.Activation(tensor + residue)
def flops(self):
return compute_flops(self)+np.prod(self.tensor_size[1:])+self._flops
# =========================================================================== #
class SimpleFire(nn.Module):
r"""Fire block for SqueezeNet support. All args are similar to Convolution.
Implemented - https://arxiv.org/pdf/1602.07360.pdf
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, **kwargs):
super(SimpleFire, self).__init__()
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
kwargs = update_kwargs(kwargs, None, None, None,
None, True, None, 0., normalization, pre_nm,
groups, weight_nm, equalized, shift, bias)
self.Shrink = Convolution(tensor_size, 1, out_channels//4, 1,
activation=None, **kwargs)
self.Block3x3 = Convolution(self.Shrink.tensor_size, filter_size,
out_channels//2, strides,
activation=activation, **kwargs)
self.Block1x1 = Convolution(self.Shrink.tensor_size, 1,
out_channels - out_channels//2, strides,
activation=activation, **kwargs)
self.tensor_size = (1, out_channels) + self.Block3x3.tensor_size[2:]
def forward(self, tensor):
if self.dropout is not None:
tensor = self.dropout(tensor)
tensor = self.Shrink(tensor)
return torch.cat((self.Block3x3(tensor), self.Block1x1(tensor)), 1)
def flops(self):
return compute_flops(self)
# =========================================================================== #
class CarryModular(nn.Module):
r"""Similar to residual connection that concatenate the output to the input
when in_channels is less than out_channels. When in_channels is equal to
out_channels, removes the first growth_rate input channels
(tensor[:, :growth_rate]) and concatenates the new growth_rate at the end
(tensor[:, -growth_rate:]).
All args are similar to Convolution and requires out_channels >=
tensor_size[1].
Args:
growth_rate: out_channels of each sub block
block: any convolutional block is accepted (nn.Sequential or nn.Module)
default = SimpleFire
carry_network: only active when strides is >1. When string input =
"avg", does average pooling else max pool. Also, accepts
nn.Sequential/nn.Module. default = average pool.
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False, growth_rate=32,
block=SimpleFire, carry_network="avg", **kwargs):
super(CarryModular, self).__init__()
pad = True
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
if tensor_size[1] < out_channels: # adjusts growth_rate
growth_rate = out_channels - tensor_size[1]
else:
self.dynamic = True
self.network1 = block \
if isinstance(block, torch.nn.modules.container.Sequential) else \
block(tensor_size, filter_size, growth_rate, strides, pad,
activation, 0., normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, **kwargs)
self._flops = 0
if check_strides(strides):
if isinstance(carry_network, str):
self.network2 = nn.AvgPool2d((3, 3), stride=(2, 2), padding=1)\
if carry_network.lower() == "avg" else \
nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self._flops = tensor_size[1]*(tensor_size[2]//2) * \
(tensor_size[3]//2) * (3*3+1)
elif (isinstance(carry_network, list) or
isinstance(carry_network, tuple)):
self.network2 = nn.Sequential(*carry_network)
elif isinstance(carry_network,
torch.nn.modules.container.Sequential):
self.network2 = carry_network
else:
raise NotImplementedError
if isinstance(block, torch.nn.modules.container.Sequential):
_tensor_size = self.network1[-1].tensor_size
else:
_tensor_size = self.network1.tensor_size
self.tensor_size = (_tensor_size[0], out_channels,
_tensor_size[2], _tensor_size[3])
def forward(self, tensor):
if hasattr(self, "pre_network"): # for dropout
tensor = self.pre_network(tensor)
return torch.cat((self.network1(tensor), self.network2(tensor)), 1)
def flops(self):
return compute_flops(self) + self._flops
# =========================================================================== #
class DenseBlock(nn.Module):
r""" For DenseNet - https://arxiv.org/pdf/1608.06993.pdf
All args are similar to Convolution and requires out_channels =
tensor_size[1] + growth_rate * n_blocks.
Args:
growth_rate: out_channels of each sub block
block: any convolutional block is accepted, default = Convolution
n_blocks: number of sub blocks, default = 4
multiplier: growth_rate multiplier for 1x1 convolution
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, bias=False, dropblock=False,
growth_rate=16, block=Convolution, n_blocks=4,
multiplier=4, **kwargs):
super(DenseBlock, self).__init__()
assert out_channels == tensor_size[1] + growth_rate * n_blocks, \
"DenseBlock -- out_channels != tensor_size[1]+growth_rate*n_blocks"
kwargs = update_kwargs(kwargs, None, None, None, None, True,
activation, 0., normalization, pre_nm,
groups, weight_nm, equalized, shift, bias)
self.dropout = DropOut(tensor_size, dropout, dropblock, **kwargs)
tensor_size = list(tensor_size)
self._flops = 0
if check_strides(strides): # Update tensor_size
tensor_size[0] = 1
sz = strides + (1 if strides % 2 == 0 else 0)
tensor_size = list(F.avg_pool2d(torch.rand(*tensor_size),
sz, strides, sz//2).size())
self.pool = nn.AvgPool2d(sz, strides, sz//2)
self._flops += np.prod(tensor_size[1:]) * (sz*sz+1)
for n in range(1, n_blocks+1):
c = growth_rate*multiplier
t_size = (1, c, tensor_size[2], tensor_size[3])
dense = [block(tuple(tensor_size), 1, c, **kwargs),
block(t_size, filter_size, growth_rate, **kwargs)]
setattr(self, "block" + str(n), nn.Sequential(*dense))
tensor_size[1] += growth_rate
self.n_blocks = n_blocks
self.tensor_size = (tensor_size[0], out_channels, tensor_size[2],
tensor_size[3])
def forward(self, tensor):
if self.dropout is not None:
tensor = self.dropout(tensor)
if hasattr(self, "pool"):
tensor = self.pool(tensor)
for n in range(1, self.n_blocks+1):
tensor = torch.cat((tensor,
getattr(self, "block"+str(n))(tensor)), 1)
return tensor
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:])*3*3 + \
self._flops
# =========================================================================== #
class ContextNet_Bottleneck(nn.Module):
r""" bottleneck for contextnet - https://arxiv.org/pdf/1805.04554.pdf
- Table 1 """
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="relu", dropout=0., normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False,
shift=False, expansion=1, *args, **kwargs):
super(ContextNet_Bottleneck, self).__init__()
if dropout > 0.:
self.pre_network = nn.Dropout2d(dropout)
kwargs = update_kwargs(kwargs, tensor_size, filter_size, out_channels,
strides, True, activation, 0., normalization,
pre_nm, groups, weight_nm, equalized, shift)
self.network = nn.Sequential()
kwargs["filter_size"] = 1
kwargs["out_channels"] = tensor_size[1]*expansion
kwargs["strides"] = 1
self.network.add_module("Block1x1_t", Convolution(**kwargs))
kwargs["tensor_size"] = self.network[-1].tensor_size
kwargs["filter_size"] = filter_size
kwargs["out_channels"] = tensor_size[1]*expansion
kwargs["strides"] = strides
kwargs["groups"] = tensor_size[1]
self.network.add_module("Block3x3_DW11", Convolution(**kwargs))
kwargs["tensor_size"] = self.network[-1].tensor_size
kwargs["filter_size"] = 1 # cross check -- why name Block3x3_DW12?
kwargs["out_channels"] = tensor_size[1]*expansion
kwargs["strides"] = 1
kwargs["groups"] = groups
self.network.add_module("Block3x3_DW12", Convolution(**kwargs))
kwargs["tensor_size"] = self.network[-1].tensor_size
kwargs["filter_size"] = 1
kwargs["out_channels"] = out_channels
kwargs["activation"] = ""
kwargs["groups"] = groups
self.network.add_module("Block1x1", Convolution(**kwargs))
if check_residue(strides, tensor_size, out_channels):
kwargs["tensor_size"] = tensor_size
kwargs["filter_size"] = 1
kwargs["out_channels"] = out_channels
kwargs["strides"] = strides
kwargs["activation"] = ""
self.edit_residue = Convolution(**kwargs)
self.tensor_size = self.network[-1].tensor_size
def forward(self, tensor):
if hasattr(self, "pre_network"): # for dropout
tensor = self.pre_network(tensor)
if hasattr(self, "edit_residue"):
return self.network(tensor) + self.edit_residue(tensor)
return self.network(tensor) + tensor
def flops(self):
return compute_flops(self) + np.prod(self.tensor_size[1:])
# =========================================================================== #
class MBBlock(nn.Module):
r""" Support for EfficientNets - https://arxiv.org/pdf/1905.11946.pdf
Args (not in Convolution):
expansion (int): Expansion factor of tensor_size[1] for depthwise
convolution. initial 1x1 convolution is ignored when 1.
default = 1
seblock (bool): Adds Squeese and Excitation block.
default = False
r (int): factor for squeeze and excitation
default = 4
ichannels (int): This overwrites expansion parameter
default = None
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=1,
pad=True, activation="swish", dropout=0.,
normalization="batch", pre_nm=False,
expansion=1, seblock=False, r=4, ichannels=None, **kwargs):
super(MBBlock, self).__init__()
self.p = dropout
if ichannels is None:
ichannels = int(tensor_size[1] * expansion)
if ichannels != tensor_size[1]:
self.expand = Convolution(tensor_size, 1, ichannels, 1, True,
activation, 0., normalization, pre_nm,
**kwargs)
t_size = self.expand.tensor_size if expansion > 1 else tensor_size
self.depthwise = Convolution(t_size, filter_size, ichannels, strides,
True, activation, 0., normalization,
pre_nm, groups=ichannels, **kwargs)
if seblock:
self.squeeze = Convolution(self.depthwise.tensor_size, 1,
tensor_size[1]//r, 1, True, activation,
bias=True)
self.excitation = Convolution(self.squeeze.tensor_size, 1,
self.depthwise.tensor_size[1], 1,
True, "sigm", bias=True)
self.shrink = Convolution(self.depthwise.tensor_size, 1, out_channels,
1, True, None, 0., normalization,
pre_nm, **kwargs)
self.tensor_size = self.shrink.tensor_size
def forward(self, tensor):
o = self.expand(tensor) if hasattr(self, "expand") else tensor
o = self.depthwise(o)
if hasattr(self, "squeeze"):
o = o * self.excitation(self.squeeze(F.adaptive_avg_pool2d(o, 1)))
o = self.shrink(o)
if tensor.shape[1:] == o.shape[1:]:
if self.p > 0. and self.training:
o = drop_connect(o, self.p)
return tensor + o
return o
# from tensormonk.layers import Convolution
# from tensormonk.activations import Activations
# from tensormonk.regularizations import DropOut
# from tensormonk.layers.utils import check_strides, check_residue
# from tensormonk.layers.utils import update_kwargs, compute_flops
# tensor_size = (3, 64, 10, 10)
# x = torch.rand(*tensor_size)
# test = ResidualOriginal(tensor_size, 3, 64, 2, False, "relu", 0.,
# "batch", False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = ResidualComplex(tensor_size, 3, 64, 2, False, "relu", 0., "batch",
# False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = ResidualInverted(tensor_size, 3, 64, 1, False, "relu", 0., "batch",
# False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = ResidualShuffle(tensor_size, 3, 64, 2, False, "relu", 0., "batch",
# False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = SimpleFire(tensor_size, 3, 64, 2, False, "relu", 0.1, None, False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = CarryModular(tensor_size, 3, 128, 2, False, "relu", 0., None, False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = SEResidualComplex(tensor_size, 3, 64, 2, False, "relu", 0., "batch",
# False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = ResidualNeXt(tensor_size, 3, 64, 2, False, "relu", 0., "batch", False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = SEResidualNeXt(tensor_size, 3, 64, 2, False, "relu", 0., "batch",
# False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = DenseBlock(tensor_size, 3, 128, 2, True, "relu", 0., "batch", False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = ContextNet_Bottleneck(tensor_size, 3, 128, 1)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = MBBlock(tensor_size, 3, 64, 1, True, "swish", 0.5, seblock=True)
# test(torch.rand(*tensor_size)).size()
# test
# %timeit test(x).size()
| [
"torch.nn.functional.conv2d",
"numpy.prod",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Sequential",
"torch.nn.Dropout2d",
"torch.nn.functional.avg_pool2d",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"copy.deepcopy",
"torch.nn.AvgPool2d",
"random.randint",
"torch.rand",
... | [((1428, 1466), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['self.squeeze'], {}), '(self.squeeze)\n', (1452, 1466), True, 'import torch.nn as nn\n'), ((1475, 1516), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['self.excitation'], {}), '(self.excitation)\n', (1499, 1516), True, 'import torch.nn as nn\n'), ((1878, 1916), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['tensor', 'tensor.shape[2:]'], {}), '(tensor, tensor.shape[2:])\n', (1890, 1916), True, 'import torch.nn.functional as F\n'), ((3075, 3091), 'copy.deepcopy', 'deepcopy', (['kwargs'], {}), '(kwargs)\n', (3083, 3091), False, 'from copy import deepcopy\n'), ((5851, 5867), 'copy.deepcopy', 'deepcopy', (['kwargs'], {}), '(kwargs)\n', (5859, 5867), False, 'from copy import deepcopy\n'), ((8602, 8618), 'copy.deepcopy', 'deepcopy', (['kwargs'], {}), '(kwargs)\n', (8610, 8618), False, 'from copy import deepcopy\n'), ((9593, 9611), 'torch.nn.Sequential', 'nn.Sequential', (['*se'], {}), '(*se)\n', (9606, 9611), True, 'import torch.nn as nn\n'), ((14737, 14755), 'torch.nn.Sequential', 'nn.Sequential', (['*se'], {}), '(*se)\n', (14750, 14755), True, 'import torch.nn as nn\n'), ((20628, 20644), 'copy.deepcopy', 'deepcopy', (['kwargs'], {}), '(kwargs)\n', (20636, 20644), False, 'from copy import deepcopy\n'), ((31957, 31972), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (31970, 31972), True, 'import torch.nn as nn\n'), ((837, 861), 'random.randint', 'random.randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (851, 861), False, 'import random\n'), ((1247, 1301), 'torch.randn', 'torch.randn', (['(tensor_size[1] // r)', 'tensor_size[1]', '(1)', '(1)'], {}), '(tensor_size[1] // r, tensor_size[1], 1, 1)\n', (1258, 1301), False, 'import torch\n'), ((1353, 1407), 'torch.randn', 'torch.randn', (['tensor_size[1]', '(tensor_size[1] // r)', '(1)', '(1)'], {}), '(tensor_size[1], tensor_size[1] // r, 1, 1)\n', (1364, 1407), False, 'import torch\n'), ((1937, 1969), 'torch.nn.functional.conv2d', 'F.conv2d', (['se', 'self.squeeze', 'None'], {}), '(se, self.squeeze, None)\n', (1945, 1969), True, 'import torch.nn.functional as F\n'), ((1998, 2033), 'torch.nn.functional.conv2d', 'F.conv2d', (['se', 'self.excitation', 'None'], {}), '(se, self.excitation, None)\n', (2006, 2033), True, 'import torch.nn.functional as F\n'), ((4879, 4908), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (4886, 4908), True, 'import numpy as np\n'), ((7778, 7807), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (7785, 7807), True, 'import numpy as np\n'), ((9285, 9341), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['self.Block3.tensor_size[2:]'], {'stride': '(1, 1)'}), '(self.Block3.tensor_size[2:], stride=(1, 1))\n', (9297, 9341), True, 'import torch.nn as nn\n'), ((13160, 13189), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (13167, 13189), True, 'import numpy as np\n'), ((14429, 14485), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['self.Block3.tensor_size[2:]'], {'stride': '(1, 1)'}), '(self.Block3.tensor_size[2:], stride=(1, 1))\n', (14441, 14485), True, 'import torch.nn as nn\n'), ((15445, 15474), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (15452, 15474), True, 'import numpy as np\n'), ((19311, 19340), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (19318, 19340), True, 'import numpy as np\n'), ((21397, 21431), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['sz', 'strides', '(sz // 2)'], {}), '(sz, strides, sz // 2)\n', (21409, 21431), True, 'import torch.nn as nn\n'), ((29945, 29979), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['sz', 'strides', '(sz // 2)'], {}), '(sz, strides, sz // 2)\n', (29957, 29979), True, 'import torch.nn as nn\n'), ((31678, 31699), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (31690, 31699), True, 'import torch.nn as nn\n'), ((33819, 33848), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (33826, 33848), True, 'import numpy as np\n'), ((2130, 2159), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (2137, 2159), True, 'import numpy as np\n'), ((10654, 10683), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (10661, 10683), True, 'import numpy as np\n'), ((23002, 23031), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (23009, 23031), True, 'import numpy as np\n'), ((30005, 30029), 'numpy.prod', 'np.prod', (['tensor_size[1:]'], {}), '(tensor_size[1:])\n', (30012, 30029), True, 'import numpy as np\n'), ((30361, 30382), 'torch.nn.Sequential', 'nn.Sequential', (['*dense'], {}), '(*dense)\n', (30374, 30382), True, 'import torch.nn as nn\n'), ((745, 778), 'torch.rand', 'torch.rand', (['n'], {'dtype': 'tensor.dtype'}), '(n, dtype=tensor.dtype)\n', (755, 778), False, 'import torch\n'), ((22164, 22197), 'torch.nn.Sequential', 'nn.Sequential', (['*self.edit_residue'], {}), '(*self.edit_residue)\n', (22177, 22197), True, 'import torch.nn as nn\n'), ((26871, 26917), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(3, 3)'], {'stride': '(2, 2)', 'padding': '(1)'}), '((3, 3), stride=(2, 2), padding=1)\n', (26883, 26917), True, 'import torch.nn as nn\n'), ((27000, 27046), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3, 3)'], {'stride': '(2, 2)', 'padding': '(1)'}), '((3, 3), stride=(2, 2), padding=1)\n', (27012, 27046), True, 'import torch.nn as nn\n'), ((27304, 27333), 'torch.nn.Sequential', 'nn.Sequential', (['*carry_network'], {}), '(*carry_network)\n', (27317, 27333), True, 'import torch.nn as nn\n'), ((22042, 22063), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (22054, 22063), True, 'import torch.nn as nn\n'), ((31003, 31032), 'numpy.prod', 'np.prod', (['self.tensor_size[1:]'], {}), '(self.tensor_size[1:])\n', (31010, 31032), True, 'import numpy as np\n'), ((36353, 36380), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['o', '(1)'], {}), '(o, 1)\n', (36374, 36380), True, 'import torch.nn.functional as F\n'), ((29823, 29847), 'torch.rand', 'torch.rand', (['*tensor_size'], {}), '(*tensor_size)\n', (29833, 29847), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import os
import sys
# ensure `tests` directory path is on top of Python's module search
filedir = os.path.dirname(__file__)
sys.path.insert(0, filedir)
while filedir in sys.path[1:]:
sys.path.pop(sys.path.index(filedir)) # avoid duplication
import pytest
import numpy as np
import matplotlib.pyplot as plt
import contextlib, io
from copy import deepcopy
from backend import BASEDIR, tempdir, notify, _get_test_names
from deeptrain.util.misc import pass_on_error, argspec
from deeptrain.util.algorithms import ordered_shuffle
from deeptrain.util import TimeseriesPreprocessor
from deeptrain.util.data_loaders import DataLoader
from deeptrain import DataGenerator
datadir = os.path.join(BASEDIR, 'tests', 'data')
DATAGEN_CFG = dict(
data_path=os.path.join(datadir, 'image', 'train'),
labels_path=os.path.join(datadir, 'image', 'train', 'labels.h5'),
batch_size=128,
shuffle=True,
)
tests_done = {}
@notify(tests_done)
def test_advance_batch():
C = deepcopy(DATAGEN_CFG)
C['superbatch_path'] = os.path.join(datadir, 'image', 'train')
dg = DataGenerator(**C)
dg.advance_batch()
C['batch_size'] = 31
dg = DataGenerator(**C)
pass_on_error(dg.advance_batch)
C['batch_size'] = 256
dg = DataGenerator(**C)
dg.set_nums_to_process = []
pass_on_error(dg.advance_batch)
C['data_loader'] = 'pigeon'
pass_on_error(DataGenerator, **C)
@notify(tests_done)
def test_shuffle():
C = deepcopy(DATAGEN_CFG)
C['shuffle_group_batches'] = True
C['superbatch_path'] = os.path.join(datadir, 'image', 'train')
C['batch_size'] = 64
dg = DataGenerator(**C)
dg.preload_superbatch()
dg.advance_batch()
@notify(tests_done)
def test_kwargs():
C = deepcopy(DATAGEN_CFG)
C['shuffle_group_batches'] = True
C['shuffle_group_samples'] = True
DataGenerator(**C)
@notify(tests_done)
def test_data_loader():
def _test_auto_hdf5(C):
dg = DataGenerator(**C)
dg.advance_batch()
def _test_hdf5(C):
C['data_loader'] = 'hdf5'
dg = DataGenerator(**C)
dg.advance_batch()
def _test_exceptions(C):
C['data_loader'] = 'invalid_loader'
pass_on_error(DataGenerator, **C)
def _test_lz4f_dataset(C):
del C['labels_path']
C['data_path'] = os.path.join(datadir, 'image_lz4f', 'train',
'128batch__1.npy')
pass_on_error(DataGenerator, **C)
C['data_loader'] = 'numpy-lz4f'
pass_on_error(DataGenerator, **C)
C['data_batch_shape'] = (128, 28, 28, 1)
DataGenerator(**C)
def _test_unknown(C):
C['data_loader'] = lambda x: x
C['data_path'] = os.path.join(datadir, 'image_lz4f', 'train',
'128batch__1.npy')
pass_on_error(DataGenerator, **C)
def _test_validate_args(C):
pass_on_error(DataLoader, 1, 1)
kw = dict(path=C['data_path'], loader=1, filepaths=None)
pass_on_error(DataLoader, **kw)
kw['filepaths'] = ['x']
pass_on_error(DataLoader, **kw)
_C = deepcopy(DATAGEN_CFG)
_C['data_path'] = os.path.join(datadir, 'timeseries_split', 'train')
_C['labels_path'] = os.path.join(datadir, 'timeseries_split', 'train',
'labels.h5')
_C['batch_size'] = 128
names, fns = zip(*locals().items())
for name, fn in zip(names, fns):
if hasattr(fn, '__code__') and argspec(fn)[0] == 'C':
C = deepcopy(_C)
fn(C)
print("Passed", fn.__name__)
@notify(tests_done)
def test_labels_loaders():
def _test_no_loader():
C = deepcopy(DATAGEN_CFG)
C['labels_loader'] = None
C['labels_path'] = None
DataGenerator(**C)
_test_no_loader()
@notify(tests_done)
def test_preprocessors():
def _test_uninstantiated(C):
C['preprocessor'] = TimeseriesPreprocessor
C['preprocessor_configs'] = dict(window_size=5)
DataGenerator(**C)
def _test_instantiated(C):
TimeseriesPreprocessor(window_size=5)
def _test_start_increment(C):
pp = TimeseriesPreprocessor(window_size=25, start_increments=None)
try:
pp.start_increment = 5
# shouldn't be able to set with start_increments = None
assert False, ("shouldn't be able to set `start_increment`"
"with `start_increments == None`")
except ValueError:
pass
pp = TimeseriesPreprocessor(window_size=25, start_increments=[0, 5])
pp.start_increment = 5 # should throw a warning
try:
pp.start_increment = 5.0
assert False, "shouldn't be able to set `start_increment` to a float"
except ValueError:
pass
def _test_start_increment_warning(C):
pp = TimeseriesPreprocessor(window_size=25, start_increments=[0, 5])
str_io = io.StringIO()
with contextlib.redirect_stdout(str_io):
pp.start_increment = 4
output = str_io.getvalue()
assert "WARNING:" in output, "print(%s)" % output
names, fns = zip(*locals().items())
for name, fn in zip(names, fns):
if name.startswith('_test_') or name.startswith('test_'):
C = deepcopy(DATAGEN_CFG)
fn(C)
@notify(tests_done)
def test_shuffle_group_batches():
"""Ensure reshape doesn't mix batch and spatial dimensions"""
group_batch = np.random.randn(128, 28, 28, 1)
labels = np.random.randint(0, 2, (128, 10))
gb, lb = group_batch, labels
batch_size = 64
x0, x1 = gb[:64], gb[64:]
y0, y1 = lb[:64], lb[64:]
gb_shape, lb_shape = gb.shape, lb.shape
gb = gb.reshape(-1, batch_size, *gb_shape[1:])
lb = lb.reshape(-1, batch_size, *lb_shape[1:])
x0adiff = np.sum(np.abs(gb[0] - x0))
x1adiff = np.sum(np.abs(gb[1] - x1))
y0adiff = np.sum(np.abs(lb[0] - y0))
y1adiff = np.sum(np.abs(lb[1] - y1))
assert x0adiff == 0, ("x0 absdiff: %s" % x0adiff)
assert x1adiff == 0, ("x1 absdiff: %s" % x1adiff)
assert y0adiff == 0, ("y0 absdiff: %s" % y0adiff)
assert y1adiff == 0, ("y1 absdiff: %s" % y1adiff)
gb, lb = ordered_shuffle(gb, lb)
gb, lb = gb.reshape(*gb_shape), lb.reshape(*lb_shape)
assert (gb.shape == gb_shape) and (lb.shape == lb_shape)
@notify(tests_done)
def test_infer_info():
def _test_empty_data_path():
C = deepcopy(DATAGEN_CFG)
with tempdir() as dirpath:
C['data_path'] = dirpath
pass_on_error(DataGenerator, **C)
def _test_no_supported_file_ext():
C = deepcopy(DATAGEN_CFG)
with tempdir() as dirpath:
plt.plot([0, 1])
plt.gcf().savefig(os.path.join(dirpath, "img.png"))
C['data_path'] = dirpath
pass_on_error(DataGenerator, **C)
_test_empty_data_path()
_test_no_supported_file_ext()
@notify(tests_done)
def test_warnings_and_exceptions():
def _test_init():
C = deepcopy(DATAGEN_CFG)
C['superbatch_set_nums'] = 'all'
C['superbatch_path'] = 'x'
pass_on_error(DataGenerator, **C)
C = deepcopy(DATAGEN_CFG)
C['labels_path'] = 1
pass_on_error(DataGenerator, **C)
C['data_path'] = 1
pass_on_error(DataGenerator, **C)
def _test_misc():
C = deepcopy(DATAGEN_CFG)
dg = DataGenerator(**C)
dg.superbatch = {'1': 1, '2': 2}
dg.superbatch_set_nums = ['3']
pass_on_error(dg._get_next_batch, set_num='3', warn=True)
dg.all_labels = {}
pass_on_error(dg._get_next_labels, set_num='3')
pass_on_error(setattr, dg, 'load_data', 1)
pass_on_error(setattr, dg, 'load_labels', 1)
with tempdir() as dirpath:
path = os.path.join(dirpath, "arr.npy")
np.save(path, np.array([1]))
C = deepcopy(DATAGEN_CFG)
C['labels_path'] = None
C['data_path'] = path
pass_on_error(DataGenerator, **C)
def _test_make_group_batch_and_labels():
C = deepcopy(DATAGEN_CFG)
dg = DataGenerator(**C)
dg.batch = np.random.randn(128, 10)
dg.labels = np.random.randn(129, 10)
pass_on_error(dg._make_group_batch_and_labels, n_batches=2)
dg.shuffle_group_samples = True
dg.labels = dg.batch.copy()
dg._make_group_batch_and_labels(n_batches=2)
dg.labels_path = None
dg._make_group_batch_and_labels(n_batches=2)
dg.shuffle_group_batches = True
dg.shuffle_group_samples = False
dg._make_group_batch_and_labels(n_batches=2)
def _test_infer_and_set_info():
C = deepcopy(DATAGEN_CFG)
with tempdir() as dirpath:
path = os.path.join(dirpath, "arr.npy")
np.save(path, np.array([1]))
C['labels_path'] = None
C['data_loader'] = DataLoader(path, loader='numpy')
DataGenerator(**C)
C['labels_loader'] = DataLoader(path, loader='numpy')
DataGenerator(**C)
C['data_loader'] = DataGenerator
pass_on_error(DataGenerator, **C)
C['labels_loader'] = None
C['data_loader'] = DataLoader
DataGenerator(**C)
C['labels_loader'] = DataGenerator
pass_on_error(DataGenerator, **C)
_test_init()
_test_misc()
_test_make_group_batch_and_labels()
_test_infer_and_set_info()
tests_done.update({name: None for name in _get_test_names(__name__)})
if __name__ == '__main__':
pytest.main([__file__, "-s"])
| [
"deeptrain.util.algorithms.ordered_shuffle",
"sys.path.insert",
"backend.tempdir",
"deeptrain.DataGenerator",
"numpy.array",
"copy.deepcopy",
"backend.notify",
"deeptrain.util.misc.argspec",
"matplotlib.pyplot.plot",
"pytest.main",
"sys.path.index",
"io.StringIO",
"deeptrain.util.misc.pass_o... | [((123, 148), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (138, 148), False, 'import os\n'), ((149, 176), 'sys.path.insert', 'sys.path.insert', (['(0)', 'filedir'], {}), '(0, filedir)\n', (164, 176), False, 'import sys\n'), ((707, 745), 'os.path.join', 'os.path.join', (['BASEDIR', '"""tests"""', '"""data"""'], {}), "(BASEDIR, 'tests', 'data')\n", (719, 745), False, 'import os\n'), ((952, 970), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (958, 970), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((1432, 1450), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (1438, 1450), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((1713, 1731), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (1719, 1731), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((1883, 1901), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (1889, 1901), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((3610, 3628), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (3616, 3628), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((3836, 3854), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (3842, 3854), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((5374, 5392), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (5380, 5392), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((6392, 6410), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (6398, 6410), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((6970, 6988), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (6976, 6988), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((1005, 1026), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (1013, 1026), False, 'from copy import deepcopy\n'), ((1054, 1093), 'os.path.join', 'os.path.join', (['datadir', '"""image"""', '"""train"""'], {}), "(datadir, 'image', 'train')\n", (1066, 1093), False, 'import os\n'), ((1103, 1121), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (1116, 1121), False, 'from deeptrain import DataGenerator\n'), ((1180, 1198), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (1193, 1198), False, 'from deeptrain import DataGenerator\n'), ((1203, 1234), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['dg.advance_batch'], {}), '(dg.advance_batch)\n', (1216, 1234), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((1271, 1289), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (1284, 1289), False, 'from deeptrain import DataGenerator\n'), ((1326, 1357), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['dg.advance_batch'], {}), '(dg.advance_batch)\n', (1339, 1357), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((1395, 1428), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (1408, 1428), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((1479, 1500), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (1487, 1500), False, 'from copy import deepcopy\n'), ((1566, 1605), 'os.path.join', 'os.path.join', (['datadir', '"""image"""', '"""train"""'], {}), "(datadir, 'image', 'train')\n", (1578, 1605), False, 'import os\n'), ((1640, 1658), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (1653, 1658), False, 'from deeptrain import DataGenerator\n'), ((1759, 1780), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (1767, 1780), False, 'from copy import deepcopy\n'), ((1861, 1879), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (1874, 1879), False, 'from deeptrain import DataGenerator\n'), ((3133, 3154), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (3141, 3154), False, 'from copy import deepcopy\n'), ((3177, 3227), 'os.path.join', 'os.path.join', (['datadir', '"""timeseries_split"""', '"""train"""'], {}), "(datadir, 'timeseries_split', 'train')\n", (3189, 3227), False, 'import os\n'), ((3252, 3315), 'os.path.join', 'os.path.join', (['datadir', '"""timeseries_split"""', '"""train"""', '"""labels.h5"""'], {}), "(datadir, 'timeseries_split', 'train', 'labels.h5')\n", (3264, 3315), False, 'import os\n'), ((5511, 5542), 'numpy.random.randn', 'np.random.randn', (['(128)', '(28)', '(28)', '(1)'], {}), '(128, 28, 28, 1)\n', (5526, 5542), True, 'import numpy as np\n'), ((5556, 5590), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(128, 10)'], {}), '(0, 2, (128, 10))\n', (5573, 5590), True, 'import numpy as np\n'), ((6246, 6269), 'deeptrain.util.algorithms.ordered_shuffle', 'ordered_shuffle', (['gb', 'lb'], {}), '(gb, lb)\n', (6261, 6269), False, 'from deeptrain.util.algorithms import ordered_shuffle\n'), ((9609, 9638), 'pytest.main', 'pytest.main', (["[__file__, '-s']"], {}), "([__file__, '-s'])\n", (9620, 9638), False, 'import pytest\n'), ((225, 248), 'sys.path.index', 'sys.path.index', (['filedir'], {}), '(filedir)\n', (239, 248), False, 'import sys\n'), ((781, 820), 'os.path.join', 'os.path.join', (['datadir', '"""image"""', '"""train"""'], {}), "(datadir, 'image', 'train')\n", (793, 820), False, 'import os\n'), ((838, 890), 'os.path.join', 'os.path.join', (['datadir', '"""image"""', '"""train"""', '"""labels.h5"""'], {}), "(datadir, 'image', 'train', 'labels.h5')\n", (850, 890), False, 'import os\n'), ((1967, 1985), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (1980, 1985), False, 'from deeptrain import DataGenerator\n'), ((2084, 2102), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (2097, 2102), False, 'from deeptrain import DataGenerator\n'), ((2212, 2245), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (2225, 2245), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((2332, 2395), 'os.path.join', 'os.path.join', (['datadir', '"""image_lz4f"""', '"""train"""', '"""128batch__1.npy"""'], {}), "(datadir, 'image_lz4f', 'train', '128batch__1.npy')\n", (2344, 2395), False, 'import os\n'), ((2442, 2475), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (2455, 2475), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((2525, 2558), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (2538, 2558), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((2617, 2635), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (2630, 2635), False, 'from deeptrain import DataGenerator\n'), ((2727, 2790), 'os.path.join', 'os.path.join', (['datadir', '"""image_lz4f"""', '"""train"""', '"""128batch__1.npy"""'], {}), "(datadir, 'image_lz4f', 'train', '128batch__1.npy')\n", (2739, 2790), False, 'import os\n'), ((2837, 2870), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (2850, 2870), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((2912, 2943), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataLoader', '(1)', '(1)'], {}), '(DataLoader, 1, 1)\n', (2925, 2943), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((3018, 3049), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataLoader'], {}), '(DataLoader, **kw)\n', (3031, 3049), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((3091, 3122), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataLoader'], {}), '(DataLoader, **kw)\n', (3104, 3122), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((3695, 3716), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (3703, 3716), False, 'from copy import deepcopy\n'), ((3791, 3809), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (3804, 3809), False, 'from deeptrain import DataGenerator\n'), ((4029, 4047), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (4042, 4047), False, 'from deeptrain import DataGenerator\n'), ((4088, 4125), 'deeptrain.util.TimeseriesPreprocessor', 'TimeseriesPreprocessor', ([], {'window_size': '(5)'}), '(window_size=5)\n', (4110, 4125), False, 'from deeptrain.util import TimeseriesPreprocessor\n'), ((4174, 4235), 'deeptrain.util.TimeseriesPreprocessor', 'TimeseriesPreprocessor', ([], {'window_size': '(25)', 'start_increments': 'None'}), '(window_size=25, start_increments=None)\n', (4196, 4235), False, 'from deeptrain.util import TimeseriesPreprocessor\n'), ((4545, 4608), 'deeptrain.util.TimeseriesPreprocessor', 'TimeseriesPreprocessor', ([], {'window_size': '(25)', 'start_increments': '[0, 5]'}), '(window_size=25, start_increments=[0, 5])\n', (4567, 4608), False, 'from deeptrain.util import TimeseriesPreprocessor\n'), ((4898, 4961), 'deeptrain.util.TimeseriesPreprocessor', 'TimeseriesPreprocessor', ([], {'window_size': '(25)', 'start_increments': '[0, 5]'}), '(window_size=25, start_increments=[0, 5])\n', (4920, 4961), False, 'from deeptrain.util import TimeseriesPreprocessor\n'), ((4980, 4993), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (4991, 4993), False, 'import contextlib, io\n'), ((5873, 5891), 'numpy.abs', 'np.abs', (['(gb[0] - x0)'], {}), '(gb[0] - x0)\n', (5879, 5891), True, 'import numpy as np\n'), ((5914, 5932), 'numpy.abs', 'np.abs', (['(gb[1] - x1)'], {}), '(gb[1] - x1)\n', (5920, 5932), True, 'import numpy as np\n'), ((5955, 5973), 'numpy.abs', 'np.abs', (['(lb[0] - y0)'], {}), '(lb[0] - y0)\n', (5961, 5973), True, 'import numpy as np\n'), ((5996, 6014), 'numpy.abs', 'np.abs', (['(lb[1] - y1)'], {}), '(lb[1] - y1)\n', (6002, 6014), True, 'import numpy as np\n'), ((6479, 6500), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (6487, 6500), False, 'from copy import deepcopy\n'), ((6671, 6692), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (6679, 6692), False, 'from copy import deepcopy\n'), ((7059, 7080), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (7067, 7080), False, 'from copy import deepcopy\n'), ((7165, 7198), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (7178, 7198), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7212, 7233), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (7220, 7233), False, 'from copy import deepcopy\n'), ((7271, 7304), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (7284, 7304), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7341, 7374), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (7354, 7374), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7410, 7431), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (7418, 7431), False, 'from copy import deepcopy\n'), ((7445, 7463), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (7458, 7463), False, 'from deeptrain import DataGenerator\n'), ((7552, 7609), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['dg._get_next_batch'], {'set_num': '"""3"""', 'warn': '(True)'}), "(dg._get_next_batch, set_num='3', warn=True)\n", (7565, 7609), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7646, 7693), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['dg._get_next_labels'], {'set_num': '"""3"""'}), "(dg._get_next_labels, set_num='3')\n", (7659, 7693), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7703, 7745), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['setattr', 'dg', '"""load_data"""', '(1)'], {}), "(setattr, dg, 'load_data', 1)\n", (7716, 7745), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7754, 7798), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['setattr', 'dg', '"""load_labels"""', '(1)'], {}), "(setattr, dg, 'load_labels', 1)\n", (7767, 7798), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((8140, 8161), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (8148, 8161), False, 'from copy import deepcopy\n'), ((8175, 8193), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (8188, 8193), False, 'from deeptrain import DataGenerator\n'), ((8214, 8238), 'numpy.random.randn', 'np.random.randn', (['(128)', '(10)'], {}), '(128, 10)\n', (8229, 8238), True, 'import numpy as np\n'), ((8259, 8283), 'numpy.random.randn', 'np.random.randn', (['(129)', '(10)'], {}), '(129, 10)\n', (8274, 8283), True, 'import numpy as np\n'), ((8292, 8351), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['dg._make_group_batch_and_labels'], {'n_batches': '(2)'}), '(dg._make_group_batch_and_labels, n_batches=2)\n', (8305, 8351), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((8750, 8771), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (8758, 8771), False, 'from copy import deepcopy\n'), ((9179, 9212), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (9192, 9212), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((9294, 9312), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (9307, 9312), False, 'from deeptrain import DataGenerator\n'), ((9365, 9398), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (9378, 9398), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((3535, 3547), 'copy.deepcopy', 'deepcopy', (['_C'], {}), '(_C)\n', (3543, 3547), False, 'from copy import deepcopy\n'), ((5007, 5041), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['str_io'], {}), '(str_io)\n', (5033, 5041), False, 'import contextlib, io\n'), ((5331, 5352), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (5339, 5352), False, 'from copy import deepcopy\n'), ((6514, 6523), 'backend.tempdir', 'tempdir', ([], {}), '()\n', (6521, 6523), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((6585, 6618), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (6598, 6618), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((6706, 6715), 'backend.tempdir', 'tempdir', ([], {}), '()\n', (6713, 6715), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((6740, 6756), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]'], {}), '([0, 1])\n', (6748, 6756), True, 'import matplotlib.pyplot as plt\n'), ((6870, 6903), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (6883, 6903), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((7813, 7822), 'backend.tempdir', 'tempdir', ([], {}), '()\n', (7820, 7822), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((7854, 7886), 'os.path.join', 'os.path.join', (['dirpath', '"""arr.npy"""'], {}), "(dirpath, 'arr.npy')\n", (7866, 7886), False, 'import os\n'), ((7944, 7965), 'copy.deepcopy', 'deepcopy', (['DATAGEN_CFG'], {}), '(DATAGEN_CFG)\n', (7952, 7965), False, 'from copy import deepcopy\n'), ((8048, 8081), 'deeptrain.util.misc.pass_on_error', 'pass_on_error', (['DataGenerator'], {}), '(DataGenerator, **C)\n', (8061, 8081), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((8785, 8794), 'backend.tempdir', 'tempdir', ([], {}), '()\n', (8792, 8794), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((8826, 8858), 'os.path.join', 'os.path.join', (['dirpath', '"""arr.npy"""'], {}), "(dirpath, 'arr.npy')\n", (8838, 8858), False, 'import os\n'), ((8967, 8999), 'deeptrain.util.data_loaders.DataLoader', 'DataLoader', (['path'], {'loader': '"""numpy"""'}), "(path, loader='numpy')\n", (8977, 8999), False, 'from deeptrain.util.data_loaders import DataLoader\n'), ((9012, 9030), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (9025, 9030), False, 'from deeptrain import DataGenerator\n'), ((9065, 9097), 'deeptrain.util.data_loaders.DataLoader', 'DataLoader', (['path'], {'loader': '"""numpy"""'}), "(path, loader='numpy')\n", (9075, 9097), False, 'from deeptrain.util.data_loaders import DataLoader\n'), ((9110, 9128), 'deeptrain.DataGenerator', 'DataGenerator', ([], {}), '(**C)\n', (9123, 9128), False, 'from deeptrain import DataGenerator\n'), ((9549, 9574), 'backend._get_test_names', '_get_test_names', (['__name__'], {}), '(__name__)\n', (9564, 9574), False, 'from backend import BASEDIR, tempdir, notify, _get_test_names\n'), ((6787, 6819), 'os.path.join', 'os.path.join', (['dirpath', '"""img.png"""'], {}), "(dirpath, 'img.png')\n", (6799, 6819), False, 'import os\n'), ((7913, 7926), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (7921, 7926), True, 'import numpy as np\n'), ((8885, 8898), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8893, 8898), True, 'import numpy as np\n'), ((3496, 3507), 'deeptrain.util.misc.argspec', 'argspec', (['fn'], {}), '(fn)\n', (3503, 3507), False, 'from deeptrain.util.misc import pass_on_error, argspec\n'), ((6769, 6778), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6776, 6778), True, 'import matplotlib.pyplot as plt\n')] |
import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import graphviz as gv
class HiddenMarkovModel:
def __init__(
self,
observable_states,
hidden_states,
transition_matrix,
emission_matrix,
title="HMM",
):
"""Initialization function for HiddenMarkovModel
Attributes:
observable_states (list): A list containing the name of each observable state.
hidden_states (list): A list containing the name of each hidden state.
transition_matrix (2-D list): A matrix containing the transition probabilities.
emission_matrix (2-D list): A matrix containing the emission probabilities.
title (str): Title for the HMM project. Output files will be named with this attribute.
"""
self.observable_states = observable_states
self.hidden_states = hidden_states
self.transition_matrix = pd.DataFrame(
data=transition_matrix, columns=hidden_states, index=hidden_states
)
self.emission_matrix = pd.DataFrame(
data=emission_matrix, columns=observable_states, index=hidden_states
)
self.pi = self._calculate_stationary_distribution()
self.title = title
def print_model_info(self):
"""Prints the model in a readable manner."""
print("*" * 50)
print(f"Observable States: {self.observable_states}")
print(f"Emission Matrix:\n{self.emission_matrix}")
print(f"Hidden States: {self.hidden_states}")
print(f"Transition Matrix:\n{self.transition_matrix}")
print(f"Initial Probabilities: {self.pi}")
def visualize_model(self, output_dir="outputs", notebook=False):
"""Creates a transition and emission graph of the model.
Args:
output_dir (str): A directory will be created with this name. If the directory already exists then an error will be raised.
notebook (bool): Whether the model should be visualized for a notebook or a script. If False, then a png will be displayed. If True then the output will be displayed in the IPython cell.
"""
try:
os.mkdir(output_dir)
except FileExistsError:
raise FileExistsError(
"Directory already exists! Please provide a different output directory!"
)
output_loc = output_dir + "/" + self.title
G = nx.MultiDiGraph()
G.add_nodes_from(self.hidden_states)
# Get transition probabilities
hidden_edges = self._get_markov_edges(self.transition_matrix)
for (origin, destination), weight in hidden_edges.items():
G.add_edge(origin, destination, weight=weight, label=weight, color="blue")
# Get emission probabilities
emission_edges = self._get_markov_edges(self.emission_matrix)
for (origin, destination), weight in emission_edges.items():
G.add_edge(origin, destination, weight=weight, label=weight, color="red")
# Create graph and draw with edge labels
pos = nx.drawing.nx_pydot.graphviz_layout(G, prog="dot")
edge_labels = {(n1, n2): d["label"] for n1, n2, d in G.edges(data=True)}
nx.drawing.nx_pydot.write_dot(G, output_loc + ".dot")
s = gv.Source.from_file(output_loc + ".dot", format="png")
if notebook:
from IPython.display import display
display(s)
return
s.view()
def forward(self, input_seq):
"""Runs the Forward Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
Returns:
alpha (np.array): A matrix of the alpha values.
probs (numpy.float64): The computed probability of the input sequence.
"""
input_seq = np.array(input_seq)
n_states = len(self.hidden_states)
T = len(input_seq)
# Convert DataFrame to np.array
emission_matrix = self.emission_matrix.values
transition_matrix = self.transition_matrix.values
# Initialize alpha
alpha = np.zeros((n_states, T))
alpha[:, 0] = self.pi * emission_matrix[:, input_seq[0]]
for t in range(1, T):
for s in range(n_states):
alpha[s, t] = emission_matrix[s, input_seq[t]] * np.sum(
alpha[:, t - 1] * transition_matrix[:, s]
)
probs = alpha[:, -1].sum()
return alpha, probs
def backward(self, input_seq):
"""Runs the Backward Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
Returns:
beta (np.array): A matrix of the beta values.
probs (numpy.float64): The computed probability of the input sequence.
"""
input_seq = np.array(input_seq)
n_states = len(self.hidden_states)
T = len(input_seq)
# Convert DataFrame to np.array
emission_matrix = self.emission_matrix.values
transition_matrix = self.transition_matrix.values
# Initialize beta starting from last
beta = np.zeros((n_states, T))
beta[:, T - 1] = 1.0
for t in range(T - 2, -1, -1):
for s in range(n_states):
beta[s, t] = np.sum(
emission_matrix[:, input_seq[t + 1]]
* beta[:, t + 1]
* transition_matrix[s, :]
)
probs = sum(self.pi * emission_matrix[:, input_seq[0]] * beta[:, 0])
return beta, probs
def viterbi(self, input_seq):
"""Runs the Viterbi Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
Returns:
path (np.array): The output path for given input sequence.
delta (np.array): A matrix of the delta values.
phi (numpy.array): A matrix of the phi values.
"""
input_seq = np.array(input_seq)
n_states = len(self.hidden_states)
T = len(input_seq)
# Convert DataFrame to np.array
emission_matrix = self.emission_matrix.values
transition_matrix = self.transition_matrix.values
# Initial blank path
path = np.zeros(T, dtype=int)
# Delta = Highest probability of any path that reaches state i
delta = np.zeros((n_states, T))
# Phi = Argmax by time step for each state
phi = np.zeros((n_states, T))
# Initialize delta
delta[:, 0] = self.pi * emission_matrix[:, input_seq[0]]
print("*" * 50)
print("Starting Forward Walk")
for t in range(1, T):
for s in range(n_states):
delta[s, t] = (
np.max(delta[:, t - 1] * transition_matrix[:, s])
* emission_matrix[s, input_seq[t]]
)
phi[s, t] = np.argmax(delta[:, t - 1] * transition_matrix[:, s])
print(f"State={s} : Sequence={t} | phi[{s}, {t}]={phi[s, t]}")
print("*" * 50)
print("Start Backtrace")
path[T - 1] = np.argmax(delta[:, T - 1])
for t in range(T - 2, -1, -1):
path[t] = phi[path[t + 1], [t + 1]]
print(f"Path[{t}]={path[t]}")
return path, delta, phi
def _calculate_stationary_distribution(self):
"""Calculates the initial stationary distribution for the model.
Returns:
stationary (np.array): The stationary distribution.
"""
eig_vals, eig_vects = np.linalg.eig(self.transition_matrix.T.values)
_eig_vects = eig_vects[:, np.isclose(eig_vals, 1)]
_eig_vects = _eig_vects[:, 0]
stationary = _eig_vects / _eig_vects.sum()
stationary = stationary.real
return stationary
def _get_markov_edges(self, matrix):
"""Returns the edges between two states.
Args:
matrix (pd.DataFrame): A matrix attribute of the model.
Returns:
edges: A dictionary of the edges between each state.
"""
edges = {}
for col in matrix.columns:
for row in matrix.index:
edges[(row, col)] = matrix.loc[row, col]
return edges
def print_forward_result(alpha, a_prob):
"""Prints the result of the Forward Algorithm.
Args:
alpha (np.array): A matrix of the alpha values.
a_prob (numpy.float64): The computed probability from the alpha values.
"""
print("*" * 50)
print(f"Alpha:\n{alpha}\nProbability of sequence: {a_prob}")
def print_backward_result(beta, b_prob):
"""Prints the result of the Backward Algorithm.
Args:
beta (np.array): A matrix of the beta values.
b_prob (numpy.float64): The computed probability from the beta values.
"""
print("*" * 50)
print(f"Beta:\n{beta}\nProbability of sequence: {b_prob}")
def print_viterbi_result(input_seq, observable_states, hidden_states, path, delta, phi):
"""Prints the result of the Viterbi Algorithm.
Args:
input_seq (list): A list of the observed input sequence.
observable_states (list): A list containing the name of each observable state.
hidden_states (list): A list containing the name of each hidden state.
path (np.array): The output path for given input sequence.
delta (np.array): A matrix of the delta values.
phi (numpy.array): A matrix of the phi values.
"""
print("*" * 50)
print("Viterbi Result")
print(f"Delta:\n{delta}")
print(f"Phi:\n{phi}")
state_path = [hidden_states[p] for p in path]
inv_input_seq = [observable_states[i] for i in input_seq]
print(
f"Result:\n{pd.DataFrame().assign(Observation=inv_input_seq).assign(BestPath=state_path)}"
)
| [
"IPython.display.display",
"networkx.MultiDiGraph",
"numpy.isclose",
"numpy.linalg.eig",
"networkx.drawing.nx_pydot.graphviz_layout",
"graphviz.Source.from_file",
"numpy.argmax",
"networkx.drawing.nx_pydot.write_dot",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"os.mkdir",
"pan... | [((986, 1071), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'transition_matrix', 'columns': 'hidden_states', 'index': 'hidden_states'}), '(data=transition_matrix, columns=hidden_states, index=hidden_states\n )\n', (998, 1071), True, 'import pandas as pd\n'), ((1120, 1207), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'emission_matrix', 'columns': 'observable_states', 'index': 'hidden_states'}), '(data=emission_matrix, columns=observable_states, index=\n hidden_states)\n', (1132, 1207), True, 'import pandas as pd\n'), ((2490, 2507), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (2505, 2507), True, 'import networkx as nx\n'), ((3144, 3194), 'networkx.drawing.nx_pydot.graphviz_layout', 'nx.drawing.nx_pydot.graphviz_layout', (['G'], {'prog': '"""dot"""'}), "(G, prog='dot')\n", (3179, 3194), True, 'import networkx as nx\n'), ((3284, 3337), 'networkx.drawing.nx_pydot.write_dot', 'nx.drawing.nx_pydot.write_dot', (['G', "(output_loc + '.dot')"], {}), "(G, output_loc + '.dot')\n", (3313, 3337), True, 'import networkx as nx\n'), ((3351, 3405), 'graphviz.Source.from_file', 'gv.Source.from_file', (["(output_loc + '.dot')"], {'format': '"""png"""'}), "(output_loc + '.dot', format='png')\n", (3370, 3405), True, 'import graphviz as gv\n'), ((3887, 3906), 'numpy.array', 'np.array', (['input_seq'], {}), '(input_seq)\n', (3895, 3906), True, 'import numpy as np\n'), ((4174, 4197), 'numpy.zeros', 'np.zeros', (['(n_states, T)'], {}), '((n_states, T))\n', (4182, 4197), True, 'import numpy as np\n'), ((4900, 4919), 'numpy.array', 'np.array', (['input_seq'], {}), '(input_seq)\n', (4908, 4919), True, 'import numpy as np\n'), ((5204, 5227), 'numpy.zeros', 'np.zeros', (['(n_states, T)'], {}), '((n_states, T))\n', (5212, 5227), True, 'import numpy as np\n'), ((6033, 6052), 'numpy.array', 'np.array', (['input_seq'], {}), '(input_seq)\n', (6041, 6052), True, 'import numpy as np\n'), ((6321, 6343), 'numpy.zeros', 'np.zeros', (['T'], {'dtype': 'int'}), '(T, dtype=int)\n', (6329, 6343), True, 'import numpy as np\n'), ((6431, 6454), 'numpy.zeros', 'np.zeros', (['(n_states, T)'], {}), '((n_states, T))\n', (6439, 6454), True, 'import numpy as np\n'), ((6520, 6543), 'numpy.zeros', 'np.zeros', (['(n_states, T)'], {}), '((n_states, T))\n', (6528, 6543), True, 'import numpy as np\n'), ((7185, 7211), 'numpy.argmax', 'np.argmax', (['delta[:, T - 1]'], {}), '(delta[:, T - 1])\n', (7194, 7211), True, 'import numpy as np\n'), ((7623, 7669), 'numpy.linalg.eig', 'np.linalg.eig', (['self.transition_matrix.T.values'], {}), '(self.transition_matrix.T.values)\n', (7636, 7669), True, 'import numpy as np\n'), ((2235, 2255), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (2243, 2255), False, 'import os\n'), ((3488, 3498), 'IPython.display.display', 'display', (['s'], {}), '(s)\n', (3495, 3498), False, 'from IPython.display import display\n'), ((5364, 5455), 'numpy.sum', 'np.sum', (['(emission_matrix[:, input_seq[t + 1]] * beta[:, t + 1] * transition_matrix[\n s, :])'], {}), '(emission_matrix[:, input_seq[t + 1]] * beta[:, t + 1] *\n transition_matrix[s, :])\n', (5370, 5455), True, 'import numpy as np\n'), ((6973, 7025), 'numpy.argmax', 'np.argmax', (['(delta[:, t - 1] * transition_matrix[:, s])'], {}), '(delta[:, t - 1] * transition_matrix[:, s])\n', (6982, 7025), True, 'import numpy as np\n'), ((7704, 7727), 'numpy.isclose', 'np.isclose', (['eig_vals', '(1)'], {}), '(eig_vals, 1)\n', (7714, 7727), True, 'import numpy as np\n'), ((4397, 4446), 'numpy.sum', 'np.sum', (['(alpha[:, t - 1] * transition_matrix[:, s])'], {}), '(alpha[:, t - 1] * transition_matrix[:, s])\n', (4403, 4446), True, 'import numpy as np\n'), ((6822, 6871), 'numpy.max', 'np.max', (['(delta[:, t - 1] * transition_matrix[:, s])'], {}), '(delta[:, t - 1] * transition_matrix[:, s])\n', (6828, 6871), True, 'import numpy as np\n'), ((9806, 9820), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9818, 9820), True, 'import pandas as pd\n')] |
import pandas as pd
import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import datetime
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
style.use('ggplot')
df = quandl.get('WIKI/GOOGL')
# print(df.head())
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume',] ]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] *100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] *100.0
df = df [['Adj. Close', 'HL_PCT' , 'PCT_change', 'Adj. Volume']]
# print(df.head())
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace =True)
forecast_out = int(math.ceil(0.01*len(df)))
print(forecast_out)
df['label'] = df[forecast_col].shift(-forecast_out)
print (df.tail())
X = np.array(df.drop(['label'],1))
X = preprocessing.scale(X)
X_lately = X[-forecast_out:]
X = X[:-forecast_out]
df.dropna(inplace =True)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# clf = LinearRegression(n_jobs=-1)
# clf.fit(X_train, y_train)
# with open ('linearregression.pickle', 'wb') as f:
# pickle.dump(clf,f)
pickle_in = open('linearregression.pickle','rb')
clf = pickle.load(pickle_in)
accuracy = clf.score(X_test, y_test)
# print(accuracy)
forecast_set = clf.predict(X_lately)
print(forecast_set, accuracy, forecast_out )
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i]
df["Adj. Close"].plot()
df["Forecast"].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
| [
"datetime.datetime.fromtimestamp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"pickle.load",
"numpy.array",
"quandl.get",
"matplotlib.style.use",
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.scale",
"matplotlib.pyplot.show"
] | [((265, 284), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (274, 284), False, 'from matplotlib import style\n'), ((290, 314), 'quandl.get', 'quandl.get', (['"""WIKI/GOOGL"""'], {}), "('WIKI/GOOGL')\n", (300, 314), False, 'import quandl\n'), ((896, 918), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (915, 918), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1001, 1022), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (1009, 1022), True, 'import numpy as np\n'), ((1059, 1113), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1092, 1113), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1309, 1331), 'pickle.load', 'pickle.load', (['pickle_in'], {}), '(pickle_in)\n', (1320, 1331), False, 'import pickle\n'), ((1827, 1844), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (1837, 1844), True, 'import matplotlib.pyplot as plt\n'), ((1845, 1863), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (1855, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (1874, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1892, 1894), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1687), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['next_unix'], {}), '(next_unix)\n', (1676, 1687), False, 'import datetime\n')] |
"""
@AmineHorseman
Sep, 12th, 2016
"""
import tensorflow as tf
from tflearn import DNN
import time
import numpy as np
import argparse
import dlib
import cv2
import os
from skimage.feature import hog
from parameters import DATASET, TRAINING, NETWORK, VIDEO_PREDICTOR
from model import build_model
window_size = 24
window_step = 6
def load_model():
model = None
with tf.Graph().as_default():
print("loading pretrained model...")
network = build_model()
model = DNN(network)
if os.path.isfile(TRAINING.save_model_path):
model.load(TRAINING.save_model_path)
else:
print("Error: file '{}' not found".format(TRAINING.save_model_path))
return model
def get_landmarks(image, rects, predictor):
# this function have been copied from http://bit.ly/2cj7Fpq
if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return np.matrix([[p.x, p.y] for p in predictor(image, rects[0]).parts()])
def sliding_hog_windows(image):
hog_windows = []
for y in range(0, NETWORK.input_size, window_step):
for x in range(0, NETWORK.input_size, window_step):
window = image[y:y+window_size, x:x+window_size]
hog_windows.extend(hog(window, orientations=8, pixels_per_cell=(8, 8),
cells_per_block=(1, 1), visualise=False))
return hog_windows
def predict(image, model, shape_predictor=None):
# get landmarks
if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks or NETWORK.use_hog_sliding_window_and_landmarks:
face_rects = [dlib.rectangle(
left=0, top=0, right=NETWORK.input_size, bottom=NETWORK.input_size)]
face_landmarks = np.array(
[get_landmarks(image, face_rects, shape_predictor)])
features = face_landmarks
if NETWORK.use_hog_sliding_window_and_landmarks:
hog_features = sliding_hog_windows(image)
hog_features = np.asarray(hog_features)
face_landmarks = face_landmarks.flatten()
features = np.concatenate((face_landmarks, hog_features))
else:
hog_features, _ = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
hog_features = np.asarray(hog_features)
face_landmarks = face_landmarks.flatten()
features = np.concatenate((face_landmarks, hog_features))
tensor_image = image.reshape(
[-1, NETWORK.input_size, NETWORK.input_size, 1])
predicted_label = model.predict(
[tensor_image, features.reshape((1, -1))])
return get_emotion(predicted_label[0])
else:
tensor_image = image.reshape(
[-1, NETWORK.input_size, NETWORK.input_size, 1])
predicted_label = model.predict(tensor_image)
return get_emotion(predicted_label[0])
return None
def get_emotion(label):
if VIDEO_PREDICTOR.print_emotions:
print("- Angry: {0:.1f}%\n- Happy: {1:.1f}%\n- Sad: {2:.1f}%\n- Surprise: {3:.1f}%\n- Neutral: {4:.1f}%".format(
label[0]*100, label[1]*100, label[2]*100, label[3]*100, label[4]*100))
label = label.tolist()
return VIDEO_PREDICTOR.emotions[label.index(max(label))], max(label)
def starting(image):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv2.imread(image)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
# cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cropped = img[y - int(h/4):y + h + int(h/4), x - int(w/4):x + w + int(w/4)]
gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
gray = cv2.resize(gray, (48, 48))
cv2.imwrite('new_image.jpg', gray)
model = load_model()
image = cv2.imread('new_image.jpg', 0)
shape_predictor = dlib.shape_predictor(DATASET.shape_predictor_path)
start_time = time.time()
emotion, confidence = predict(image, model, shape_predictor)
total_time = time.time() - start_time
print("Prediction: {0} (confidence: {1:.1f}%)".format(emotion, confidence*100))
print("time: {0:.1f} sec".format(total_time))
return cropped,emotion,confidence*100
# parse arg to see if we need to launch training now or not yet
# parser = argparse.ArgumentParser()
# parser.add_argument("-i", "--image", help="Image file to predict")
# args = parser.parse_args()
# if args.image:
# if os.path.isfile(args.image):
# model = load_model()
# image = cv2.imread(args.image, 0)
# shape_predictor = dlib.shape_predictor(DATASET.shape_predictor_path)
# start_time = time.time()
# emotion, confidence = predict(image, model, shape_predictor)
# total_time = time.time() - start_time
# print("Prediction: {0} (confidence: {1:.1f}%)".format(
# emotion, confidence*100))
# print("time: {0:.1f} sec".format(total_time))
# else:
# print("Error: file '{}' not found".format(args.image))
| [
"cv2.imwrite",
"tensorflow.Graph",
"tflearn.DNN",
"dlib.rectangle",
"numpy.asarray",
"dlib.shape_predictor",
"os.path.isfile",
"model.build_model",
"skimage.feature.hog",
"cv2.cvtColor",
"time.time",
"numpy.concatenate",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.imread"
] | [((3370, 3430), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (3391, 3430), False, 'import cv2\n'), ((3441, 3458), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (3451, 3458), False, 'import cv2\n'), ((3470, 3507), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3482, 3507), False, 'import cv2\n'), ((3827, 3868), 'cv2.cvtColor', 'cv2.cvtColor', (['cropped', 'cv2.COLOR_BGR2GRAY'], {}), '(cropped, cv2.COLOR_BGR2GRAY)\n', (3839, 3868), False, 'import cv2\n'), ((3880, 3906), 'cv2.resize', 'cv2.resize', (['gray', '(48, 48)'], {}), '(gray, (48, 48))\n', (3890, 3906), False, 'import cv2\n'), ((3911, 3945), 'cv2.imwrite', 'cv2.imwrite', (['"""new_image.jpg"""', 'gray'], {}), "('new_image.jpg', gray)\n", (3922, 3945), False, 'import cv2\n'), ((3983, 4013), 'cv2.imread', 'cv2.imread', (['"""new_image.jpg"""', '(0)'], {}), "('new_image.jpg', 0)\n", (3993, 4013), False, 'import cv2\n'), ((4036, 4086), 'dlib.shape_predictor', 'dlib.shape_predictor', (['DATASET.shape_predictor_path'], {}), '(DATASET.shape_predictor_path)\n', (4056, 4086), False, 'import dlib\n'), ((4104, 4115), 'time.time', 'time.time', ([], {}), '()\n', (4113, 4115), False, 'import time\n'), ((465, 478), 'model.build_model', 'build_model', ([], {}), '()\n', (476, 478), False, 'from model import build_model\n'), ((495, 507), 'tflearn.DNN', 'DNN', (['network'], {}), '(network)\n', (498, 507), False, 'from tflearn import DNN\n'), ((519, 559), 'os.path.isfile', 'os.path.isfile', (['TRAINING.save_model_path'], {}), '(TRAINING.save_model_path)\n', (533, 559), False, 'import os\n'), ((4198, 4209), 'time.time', 'time.time', ([], {}), '()\n', (4207, 4209), False, 'import time\n'), ((1626, 1713), 'dlib.rectangle', 'dlib.rectangle', ([], {'left': '(0)', 'top': '(0)', 'right': 'NETWORK.input_size', 'bottom': 'NETWORK.input_size'}), '(left=0, top=0, right=NETWORK.input_size, bottom=NETWORK.\n input_size)\n', (1640, 1713), False, 'import dlib\n'), ((1995, 2019), 'numpy.asarray', 'np.asarray', (['hog_features'], {}), '(hog_features)\n', (2005, 2019), True, 'import numpy as np\n'), ((2097, 2143), 'numpy.concatenate', 'np.concatenate', (['(face_landmarks, hog_features)'], {}), '((face_landmarks, hog_features))\n', (2111, 2143), True, 'import numpy as np\n'), ((2188, 2284), 'skimage.feature.hog', 'hog', (['image'], {'orientations': '(8)', 'pixels_per_cell': '(16, 16)', 'cells_per_block': '(1, 1)', 'visualise': '(True)'}), '(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1),\n visualise=True)\n', (2191, 2284), False, 'from skimage.feature import hog\n'), ((2342, 2366), 'numpy.asarray', 'np.asarray', (['hog_features'], {}), '(hog_features)\n', (2352, 2366), True, 'import numpy as np\n'), ((2444, 2490), 'numpy.concatenate', 'np.concatenate', (['(face_landmarks, hog_features)'], {}), '((face_landmarks, hog_features))\n', (2458, 2490), True, 'import numpy as np\n'), ((377, 387), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (385, 387), True, 'import tensorflow as tf\n'), ((1270, 1366), 'skimage.feature.hog', 'hog', (['window'], {'orientations': '(8)', 'pixels_per_cell': '(8, 8)', 'cells_per_block': '(1, 1)', 'visualise': '(False)'}), '(window, orientations=8, pixels_per_cell=(8, 8), cells_per_block=(1, 1),\n visualise=False)\n', (1273, 1366), False, 'from skimage.feature import hog\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 14:29:03 2020
@author: <NAME>
Script used to create the formatted table for lag phase calculation by DMFit
DMFit requires data to be formatted in a specific way to be analysed by the
Excel add-in DMFit. In brief, the excel file needs to have two spreadsheets:
- one named Logc, containing the data to be analyized. This spreadsheet
should have three columns:
+ logc: contains the label of the strains for each data point
+ Time: contains the time points
+ 0: contains the OD values
- another named index0 containing only one column titled logc and listing
all the strains used.
For more information, please refer to the DMFit user manual included in the
package. DMFit can be found at:
https://www.combase.cc/index.php/en/8-category-en-gb/21-tools
(accessed April 21st 2020)
"""
from pathlib import Path
import pandas as pd
import numpy as np
def Technical_replicate_mean(FileName):
# This function is used to calculate the mean OD values of the technical
# replicates.
df = pd.read_excel(FileName, sheet_name="All Cycles") # Opens the raw data
# file
df2 = df[10:75] # Removes the metadata included at the top of the
# spreadsheet containing the data.
df3 = df2.sort_values('Unnamed: 3') # This sorts the data (if using a
# randomised plate) so that the technical replicates values are in
# neighbouring cells.
df4 = df3.loc[:, 'Unnamed: 292':] # This removes the columns containing
# the names of the wells and the label of the strains.
replicate_means = []
# Here we're going through the table and calculating the mean.
for i in range(0, (len(df4)-5), 3):
temp_df = df4.iloc[[i, i+1, i+2]] # Storing the 3 time series
# containing the OD values for one strain.
temp_mean = temp_df.mean(axis=0) # Calculating the mean of the OD
# values for every time point.
replicate_means.append(temp_mean) # Appends the time series of the
# mean OD value to a list storing the mean time series for each strain.
# I have 4 wells containing water so I calculate the mean OD separately for
# these. This can be skipped in general.
temp_df = df4.iloc[[60, 61, 62, 63]]
temp_mean = temp_df.mean(axis=0)
replicate_means.append(temp_mean)
return(replicate_means) # Returns the list containing the averaged time
# series of OD values
# Calculating the average of the technical replicates for each biological
# replicate
pathToBioRep1Data = "<Path to raw data Excel files for replicate 1"
pathToBioRep2Data = "<Path to raw data Excel files for replicate 2"
pathToBioRep3Data = "<Path to raw data Excel files for replicate 3"
nameOfRep1File = "<Name of Excel file for replicate 1"
nameOfRep2File = "<Name of Excel file for replicate 2"
nameOfRep3File = "<Name of Excel file for replicate 3"
# In my case:
# pathToBioRep1Data = Path("/Users/user/Documents/DPhil_Projects/Collaborations/KrishnaKumar_etal_DropletPrinting/RawData/Bacterial-Growth-Kinetics/2019-10-11_StrainsRav_GrowthCurves-1/")
# pathToBioRep2Data = Path("/Users/user/Documents/DPhil_Projects/Collaborations/KrishnaKumar_etal_DropletPrinting/RawData/Bacterial-Growth-Kinetics/2019-10-18_StrainsRav_GrowthCurves-2/")
#pathToBioRep3Data = Path("/Users/user/Documents/DPhil_Projects/Collaborations/KrishnaKumar_etal_DropletPrinting/RawData/Bacterial-Growth-Kinetics/2019-10-24_StrainsRav_GrowthCurves-3/")
# nameOfRep1File = "2019-10-11-TML_StrainRav_GrowthCurves-1.xlsx"
# nameOfRep2File = "2019-10-18-TML_StrainsRav_GrowthCurves-2.xlsx"
# nameOfRep3File = "2019-10-24-TML_StrainsRav_GrowthCurves-3.xlsx"
br1 = Technical_replicate_mean(pathToBioRep1Data / nameOfRep1File)
br2 = Technical_replicate_mean(pathToBioRep2Data / nameOfRep2File)
br3 = Technical_replicate_mean(pathToBioRep3Data / nameOfRep3File)
# If one wants to do the anlysis on the average of the biological replicates,
# this calculates it.
mean_biorep = []
for j in range(21):
temp_concatenated_data = pd.concat((br1[j], br2[j], br3[j]), axis=1)
mean_biorep.append(temp_concatenated_data.mean(axis=1))
def reformatingData(dataSet):
# This function reformats the data from being organised as one row per
# strain and a time value per column to the layout explained at the top
# of the script.
# This renames all the keys in the time series to the strain label.
dataSet[0] = dataSet[0].rename(lambda x: 'WT')
dataSet[1] = dataSet[1].rename(lambda x: 'WT GFP')
dataSet[2] = dataSet[2].rename(lambda x: 'WT RFP')
dataSet[3] = dataSet[3].rename(lambda x: 'E2')
dataSet[4] = dataSet[4].rename(lambda x: 'E2 GFP')
dataSet[5] = dataSet[5].rename(lambda x: 'E2 RFP')
dataSet[6] = dataSet[6].rename(lambda x: 'E7')
dataSet[7] = dataSet[7].rename(lambda x: 'E7 RFP')
dataSet[8] = dataSet[8].rename(lambda x: 'E8')
dataSet[9] = dataSet[9].rename(lambda x: 'E8 RFP')
dataSet[10] = dataSet[10].rename(lambda x: 'A')
dataSet[11] = dataSet[11].rename(lambda x: 'A GFP')
dataSet[12] = dataSet[12].rename(lambda x: 'A RFP')
dataSet[13] = dataSet[13].rename(lambda x: 'btuB')
dataSet[14] = dataSet[14].rename(lambda x: 'btuB GFP')
dataSet[15] = dataSet[15].rename(lambda x: 'btuB RFP')
dataSet[16] = dataSet[16].rename(lambda x: 'pC001')
dataSet[17] = dataSet[17].rename(lambda x: 'ImmE2 Ypet')
dataSet[18] = dataSet[18].rename(lambda x: 'ImmE2 NeonGreen')
time = np.arange(0, 1440, 5).tolist() # Creates a vector with the time
# points
# Converts the time series to dataframes and inserts/adds the time vector
# to the data.
for j in range(21):
dataSet[j] = dataSet[j].to_frame()
dataSet[j].insert(0, 'Time', time, True)
data = dataSet[0]
for k in range(18):
data = pd.concat([data, dataSet[k+1]])
data.rename(columns={0: 'logc'})
return(data)
# Here each biological replicates is reformatted using the function
# reformattingData above
data_br1 = reformatingData(br1)
data_br2 = reformatingData(br2)
data_br3 = reformatingData(br3)
# Reformatting the data the same way for the mean of the biological repliactes
data_mean = reformatingData(mean_biorep)
index0 = pd.DataFrame(['WT',
'WT GFP',
'WT RFP',
'E2',
'E2 GFP',
'E2 RFP',
'E7',
'E7 RFP',
'E8',
'E8 RFP',
'A',
'A GFP',
'A RFP',
'btuB',
'btuB GFP',
'btuB RFP',
'pC001',
'ImmE2 Ypet',
'ImmE2 NeonGreen'],
columns=['logc'])
# The following section saves the reformatted data to an excel file with the
# various sheets DMFit requires.
pathToOutput = "<Path to where the excel file should be saved>"
nameOfOutput = "<Name of output file>.xlsx"
# Example, in my case for replicate 1:
# pathToOutput = Path("/Users/user/Documents/DPhil_Projects/Collaborations/KrishnaKumar_etal_DropletPrinting/Methods/Bacterial-Growth-Kinetics/GrowthCurves_Lag/Test/")
# nameOfOutput = "2020-02-01-TML_LagPhase_data-br1.xlsx"
# This then creates a file containing the reformatted data for biological
# replicate 1. Biological replicates 2 and 3 can be outputted by filling an
# appropriate name for the output file above and replacing data_br1 by data_br2
# or data_br3 below.
writer = pd.ExcelWriter(pathToOutput / nameOfOutput, engine='xlsxwriter')
data_br1.to_excel(writer, sheet_name='Logc', index_label='logc')
# data_mean.to_excel(writer, sheet_name='Logc', index_label='logc')
index0.to_excel(writer, sheet_name='Index0', index=False)
writer.save()
writer.close()
| [
"pandas.read_excel",
"pandas.DataFrame",
"pandas.ExcelWriter",
"pandas.concat",
"numpy.arange"
] | [((6298, 6519), 'pandas.DataFrame', 'pd.DataFrame', (["['WT', 'WT GFP', 'WT RFP', 'E2', 'E2 GFP', 'E2 RFP', 'E7', 'E7 RFP', 'E8',\n 'E8 RFP', 'A', 'A GFP', 'A RFP', 'btuB', 'btuB GFP', 'btuB RFP',\n 'pC001', 'ImmE2 Ypet', 'ImmE2 NeonGreen']"], {'columns': "['logc']"}), "(['WT', 'WT GFP', 'WT RFP', 'E2', 'E2 GFP', 'E2 RFP', 'E7',\n 'E7 RFP', 'E8', 'E8 RFP', 'A', 'A GFP', 'A RFP', 'btuB', 'btuB GFP',\n 'btuB RFP', 'pC001', 'ImmE2 Ypet', 'ImmE2 NeonGreen'], columns=['logc'])\n", (6310, 6519), True, 'import pandas as pd\n'), ((7692, 7756), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['(pathToOutput / nameOfOutput)'], {'engine': '"""xlsxwriter"""'}), "(pathToOutput / nameOfOutput, engine='xlsxwriter')\n", (7706, 7756), True, 'import pandas as pd\n'), ((1113, 1161), 'pandas.read_excel', 'pd.read_excel', (['FileName'], {'sheet_name': '"""All Cycles"""'}), "(FileName, sheet_name='All Cycles')\n", (1126, 1161), True, 'import pandas as pd\n'), ((4087, 4130), 'pandas.concat', 'pd.concat', (['(br1[j], br2[j], br3[j])'], {'axis': '(1)'}), '((br1[j], br2[j], br3[j]), axis=1)\n', (4096, 4130), True, 'import pandas as pd\n'), ((5889, 5922), 'pandas.concat', 'pd.concat', (['[data, dataSet[k + 1]]'], {}), '([data, dataSet[k + 1]])\n', (5898, 5922), True, 'import pandas as pd\n'), ((5534, 5555), 'numpy.arange', 'np.arange', (['(0)', '(1440)', '(5)'], {}), '(0, 1440, 5)\n', (5543, 5555), True, 'import numpy as np\n')] |
import keras
from keras.models import load_model
from keras import backend as K
import math
import sys
import argparse
import numpy as np
import scipy.io as sio
import os
import glob
import h5py
import cv2
import gc
''' This code is based on <NAME>., <NAME>., & Arganda-Carreras,
I. (2017). "Vision-Based Fall Detection with Convolutional Neural Networks"
Wireless Communications and Mobile Computing, 2017.
Also, new features were added by <NAME> working in
Semantix.
'''
''' Documentation: class Fextractor
This class has a few methods:
extract
The only method that should be called outside of this class is:
extract: receives a CNN already trained until the last two full connected
layers and extract features from optical flows extracted from a video.
A feature is the result from a feedforward using a stack of optical flows,
later these features will be used for training these last two layers.
'''
class Fextractor:
def __init__(self, classes, id):
self.num_features = 4096
self.folders = []
self.classes = classes
self.classes_dirs = []
self.classes_videos = []
self.class_value = []
self.data_images = []
self.data_images_1 = []
self.x_size = 224
self.y_size = 224
self.id = id
def extract(self, stream, model, data_folder):
print("### Model loading", flush=True)
extractor_model = load_model(model)
features_file = stream + '_features_' + self.id + '.h5'
labels_file = stream + '_labels_' + self.id + '.h5'
samples_file = stream + '_samples_' + self.id + '.h5'
num_file = stream + '_num_' + self.id + '.h5'
features_key = 'features'
labels_key = 'labels'
samples_key = 'samples'
num_key = 'num'
sliding_height = 10
'''
Function to load the optical flow stacks, do a feed-forward through
the feature extractor (VGG16) and store the output feature vectors in
the file 'features_file' and the labels in 'labels_file'.
Input:
* extractor_model: CNN model until the last two layers.
* features_file: path to the hdf5 file where the extracted features are
going to be stored
* labels_file: path to the hdf5 file where the labels of the features
are going to be stored
* samples_file: path to the hdf5 file where the number of stacks in
each video is going to be stored
* num_file: path to the hdf5 file where the number of fall and not fall
videos are going to be stored
* features_key: name of the key for the hdf5 file to store the features
* labels_key: name of the key for the hdf5 file to store the labels
* samples_key: name of the key for the hdf5 file to store the samples
* num_key: name of the key for the hdf5 file to store the num
* data_folder: folder with class0 and class1 folders
* sliding_height: height of stack to process
'''
try:
flow_mean = sio.loadmat('flow_mean.mat')['image_mean']
except:
print("***********************************************************",
file=sys.stderr)
print("A flow_mean.mat file with mean values for your trained CNN",
file=sys.stderr)
print("should be in the same directory as fextractor.py. This",
file=sys.stderr)
print("file also needs a image_mean key", file=sys.stderr)
print("***********************************************************",
file=sys.stderr)
exit(1)
dirs = []
num_class = []
# File to store the extracted features and datasets to store them
# IMPORTANT NOTE: 'w' mode totally erases previous data
print("### Creating h5 files", flush=True)
h5features = h5py.File(features_file,'w')
h5labels = h5py.File(labels_file,'w')
h5samples = h5py.File(samples_file, 'w')
h5num_classes = h5py.File(num_file, 'w')
cams = ['cam1', 'cam2', 'cam3', 'cam4', 'cam5', 'cam6', 'cam7', 'cam8']
datas_in_cam = dict()
videos_in_cam = dict()
cam_video_count = dict()
for c in self.classes:
datas_in_cam[c] = dict()
videos_in_cam[c] = dict()
cam_video_count[c] = dict()
h5features.create_group(c)
h5labels.create_group(c)
h5samples.create_group(c)
for cam in cams:
datas_in_cam[c][cam] = 0
videos_in_cam[c][cam] = 0
cam_video_count[c][cam] = 0
h5features[c].create_group(cam)
h5labels[c].create_group(cam)
h5samples[c].create_group(cam)
if stream == 'temporal':
file_name = '/flow_x*.jpg'
file_name_1 = '/flow_y*.jpg'
elif stream == 'pose':
file_name = '/pose_*.jpg'
elif stream == 'spatial':
file_name = '/frame_*.jpg'
else:
print("INVALID STREAM ERROR")
exit(1)
for c in range(len(self.classes)):
num_class.append(0)
if self.classes[c] != 'Falls' and self.classes[c] != 'NotFalls':
print("Sorry. Classes possibles are Falls and NotFalls, its \
hardcoded and will be expanded really soon. Its being \
used inside Extracting Features for, setting label value")
exit(1)
for dir in self.classes_dirs[c]:
check_size = glob.glob(data_folder + self.classes[c] + '/' +
dir + '/flow_x*.jpg')
self.data = glob.glob(data_folder + self.classes[c] + '/' +
dir + file_name)
if int(len(check_size)) >= sliding_height:
# search with cam is being used in this dir
# dir is something like: chute01cam2 or chute01cam2_00
num_class[-1] += 1
for cam in cams:
if cam in dir:
videos_in_cam[self.classes[c]][cam] += 1
if stream == 'temporal':
datas_in_cam[self.classes[c]][cam] = datas_in_cam[self.classes[c]][cam] + len(self.data) - sliding_height + 1
else:
datas_in_cam[self.classes[c]][cam] = datas_in_cam[self.classes[c]][cam] + len(self.data) - sliding_height
self.folders.append(data_folder + self.classes[c] + '/' + dir)
dirs.append(dir)
self.class_value.append(self.classes[c])
datasets_f = dict()
datasets_l = dict()
datasets_s = dict()
for c in self.classes:
datasets_f[c] = dict()
datasets_l[c] = dict()
datasets_s[c] = dict()
for cam in cams:
datasets_f[c][cam] = h5features[c][cam].create_dataset(cam, shape=(datas_in_cam[c][cam], self.num_features), dtype='float64')
datasets_l[c][cam] = h5labels[c][cam].create_dataset(cam, shape=(datas_in_cam[c][cam], 1), dtype='float64')
datasets_s[c][cam] = h5samples[c][cam].create_dataset(cam, shape=(videos_in_cam[c][cam], 1), dtype='int32')
dataset_num = h5num_classes.create_dataset(num_key, shape=(len(self.classes), 1),
dtype='int32')
for c in range(len(self.classes)):
dataset_num[c] = num_class[c]
number = 0
cam_cont_sum = 0
cont = dict()
for c in self.classes:
cont[c] = dict()
for cam in cams:
cam_cont_sum += datas_in_cam[c][cam]
cont[c][cam] = 0
progress_cams = 0.0
print("### Extracting Features", flush=True)
for folder, dir, classe in zip(self.folders, dirs, self.class_value):
self.update_progress(progress_cams/cam_cont_sum)
self.data_images = glob.glob(folder + file_name)
self.data_images.sort()
if stream == 'temporal':
self.data_images_1 = glob.glob(folder + file_name_1)
self.data_images_1.sort()
elif stream == 'spatial' or stream == 'pose':
self.data_images = self.data_images[:-sliding_height]
else:
print("INVALID STREAM ERROR")
exit(1)
label = -1
if classe == 'Falls':
label = 0
else:
label = 1
#label = glob.glob(data_folder + classe + '/' + dir + '/' + '*.npy')
#label_values = np.load(label[0])
if stream == 'temporal':
nb_datas = len(self.data_images) - sliding_height + 1
elif stream == 'spatial' or 'pose':
nb_datas = len(self.data_images)
else:
print("INVALID STREAM ERROR")
exit(1)
amount_datas = 100
fraction_datas = nb_datas // amount_datas
iterr = iter(self.data_images)
image_c = 0
for fraction in range(fraction_datas):
if stream == 'temporal':
flow = np.zeros(shape=(self.x_size, self.y_size, 2*sliding_height,
amount_datas), dtype=np.float64)
for i in range(amount_datas + sliding_height -1):
flow_x_file = self.data_images[image_c]
flow_y_file = self.data_images_1[image_c]
image_c += 1
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
# Assign an image i to the jth stack in the kth position,
# but also in the j+1th stack in the k+1th position and so
# on (for sliding window)
for s in list(reversed(range(min(sliding_height,i+1)))):
if i-s < amount_datas:
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
del img_x,img_y
gc.collect()
# Restore last images from previous fraction to start next
# fraction
image_c = image_c - sliding_height + 1
# Subtract mean
flow = flow - np.tile(flow_mean[...,np.newaxis],
(1, 1, 1, flow.shape[3]))
# Transpose for channel ordering (Tensorflow in this case)
flow = np.transpose(flow, (3, 0, 1, 2))
predictions = np.zeros((amount_datas, self.num_features),
dtype=np.float64)
truth = np.zeros((amount_datas, 1), dtype='int8')
# Process each stack: do the feed-forward pass and store in the
# hdf5 file the output
for i in range(amount_datas):
prediction = extractor_model.predict(
np.expand_dims(flow[i, ...], 0))
predictions[i, ...] = prediction
#truth[i] = self.get_media_optflow(label_values, i+(fraction*amount_datas), sliding_height)
truth[i] = label
else:
predictions = np.zeros((amount_datas, self.num_features),
dtype=np.float64)
truth = np.zeros((amount_datas, 1), dtype='int8')
# Process each stack: do the feed-forward pass and store in the
# hdf5 file the output
for i in range(amount_datas):
frame = next(iterr)
frame = cv2.imread(frame)
predictions[i, ...] = extractor_model.predict(np.expand_dims(frame, 0))
truth[i] = label
for cam in cams:
if cam in dir:
datasets_f[classe][cam][cont[classe][cam]:cont[classe][cam]+amount_datas,:] = predictions
datasets_l[classe][cam][cont[classe][cam]:cont[classe][cam]+amount_datas,:] = truth
cont[classe][cam] += amount_datas
progress_cams += amount_datas
break
amount_datas = nb_datas % amount_datas
predictions = np.zeros((amount_datas, self.num_features),
dtype=np.float64)
truth = np.zeros((amount_datas, 1), dtype='int8')
if stream == 'temporal':
flow = np.zeros(shape=(self.x_size, self.y_size, 2*sliding_height,
amount_datas), dtype=np.float64)
for i in range(amount_datas + sliding_height - 1):
flow_x_file = self.data_images[image_c]
flow_y_file = self.data_images_1[image_c]
image_c += 1
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
# Assign an image i to the jth stack in the kth position,
# but also in the j+1th stack in the k+1th position and so on
# (for sliding window)
for s in list(reversed(range(min(sliding_height,i+1)))):
if i-s < amount_datas:
flow[:,:,2*s, i-s] = img_x
flow[:,:,2*s+1,i-s] = img_y
del img_x,img_y
gc.collect()
# Subtract mean
flow = flow - np.tile(flow_mean[...,np.newaxis],
(1, 1, 1, flow.shape[3]))
# Transpose for channel ordering (Tensorflow in this case)
flow = np.transpose(flow, (3, 0, 1, 2))
# Process each stack: do the feed-forward pass and store in the
# hdf5 file the output
for i in range(amount_datas):
prediction = extractor_model.predict(np.expand_dims(flow[i, ...],
0))
predictions[i, ...] = prediction
# todo: this 100 value is related to initial amount_datas
#truth[i] = self.get_media_optflow(label_values, fraction_datas* 100 + i, sliding_height)
truth[i] = label
else:
# Process each stack: do the feed-forward pass and store in the
# hdf5 file the output
for i in range(amount_datas):
frame = next(iterr)
frame = cv2.imread(frame)
predictions[i, ...] = extractor_model.predict(np.expand_dims(frame, 0))
truth[i] = label
for cam in cams:
if cam in dir:
datasets_f[classe][cam][cont[classe][cam]:cont[classe][cam]+amount_datas,:] = predictions
datasets_l[classe][cam][cont[classe][cam]:cont[classe][cam]+amount_datas,:] = truth
cont[classe][cam] += amount_datas
progress_cams += amount_datas
datasets_s[classe][cam][cam_video_count[classe][cam]] = nb_datas
cam_video_count[classe][cam] += 1
break
h5features.close()
h5labels.close()
h5samples.close()
h5num_classes.close()
def update_progress(self, workdone):
print("\rProgress: [{0:50s}] {1:.1f}%".format('#' * int(workdone * 50), workdone*100), end="", flush=True)
def get_media_optflow(self, label_values, i, sliding_height):
soma = 0
for j in range(i, i + sliding_height):
soma += label_values[i]
if soma / sliding_height >= 0.5:
return 1
else:
return 0
def get_dirs(self, data_folder):
for c in self.classes:
self.classes_dirs.append([f for f in os.listdir(data_folder + c)
if os.path.isdir(os.path.join(data_folder, c, f))])
self.classes_dirs[-1].sort()
self.classes_videos.append([])
for f in self.classes_dirs[-1]:
self.classes_videos[-1].append(data_folder + c+ '/' + f +
'/' + f + '.avi')
self.classes_videos[-1].sort()
if __name__ == '__main__':
print("***********************************************************",
file=sys.stderr)
print(" SEMANTIX - UNICAMP DATALAB 2018", file=sys.stderr)
print("***********************************************************",
file=sys.stderr)
argp = argparse.ArgumentParser(description='Do feature extraction tasks')
argp.add_argument("-data", dest='data_folder', type=str, nargs=1,
help='Usage: -data <path_to_your_data_folder>', required=True)
argp.add_argument("-streams", dest='streams', type=str, nargs='+',
help='So far, spatial, temporal, pose and its combinations \
Usage: -streams spatial temporal',
required=True)
argp.add_argument("-class", dest='classes', type=str, nargs='+',
help='Usage: -class <class0_name> <class1_name>..<n-th_class_name>',
required=True)
argp.add_argument("-id", dest='id', type=str, nargs=1,
help='Usage: -id <identifier_to_this_features>', required=True)
try:
args = argp.parse_args()
except:
argp.print_help(sys.stderr)
exit(1)
for stream in args.streams:
print("STREAM: " + stream)
fextractor = Fextractor(args.classes, args.id[0])
fextractor.get_dirs(args.data_folder[0])
fextractor.extract(stream, 'VGG16_' + stream, args.data_folder[0])
K.clear_session()
'''
todo: criar excecoes para facilitar o uso
'''
'''
todo: impressao dupla de help se -h ou --help eh passado
'''
| [
"numpy.tile",
"os.listdir",
"keras.models.load_model",
"argparse.ArgumentParser",
"scipy.io.loadmat",
"os.path.join",
"h5py.File",
"numpy.zeros",
"keras.backend.clear_session",
"gc.collect",
"numpy.expand_dims",
"numpy.transpose",
"cv2.imread",
"glob.glob"
] | [((17493, 17559), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Do feature extraction tasks"""'}), "(description='Do feature extraction tasks')\n", (17516, 17559), False, 'import argparse\n'), ((1476, 1493), 'keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (1486, 1493), False, 'from keras.models import load_model\n'), ((3992, 4021), 'h5py.File', 'h5py.File', (['features_file', '"""w"""'], {}), "(features_file, 'w')\n", (4001, 4021), False, 'import h5py\n'), ((4040, 4067), 'h5py.File', 'h5py.File', (['labels_file', '"""w"""'], {}), "(labels_file, 'w')\n", (4049, 4067), False, 'import h5py\n'), ((4087, 4115), 'h5py.File', 'h5py.File', (['samples_file', '"""w"""'], {}), "(samples_file, 'w')\n", (4096, 4115), False, 'import h5py\n'), ((4140, 4164), 'h5py.File', 'h5py.File', (['num_file', '"""w"""'], {}), "(num_file, 'w')\n", (4149, 4164), False, 'import h5py\n'), ((18613, 18630), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (18628, 18630), True, 'from keras import backend as K\n'), ((8326, 8355), 'glob.glob', 'glob.glob', (['(folder + file_name)'], {}), '(folder + file_name)\n', (8335, 8355), False, 'import glob\n'), ((13026, 13087), 'numpy.zeros', 'np.zeros', (['(amount_datas, self.num_features)'], {'dtype': 'np.float64'}), '((amount_datas, self.num_features), dtype=np.float64)\n', (13034, 13087), True, 'import numpy as np\n'), ((13129, 13170), 'numpy.zeros', 'np.zeros', (['(amount_datas, 1)'], {'dtype': '"""int8"""'}), "((amount_datas, 1), dtype='int8')\n", (13137, 13170), True, 'import numpy as np\n'), ((3131, 3159), 'scipy.io.loadmat', 'sio.loadmat', (['"""flow_mean.mat"""'], {}), "('flow_mean.mat')\n", (3142, 3159), True, 'import scipy.io as sio\n'), ((5733, 5802), 'glob.glob', 'glob.glob', (["(data_folder + self.classes[c] + '/' + dir + '/flow_x*.jpg')"], {}), "(data_folder + self.classes[c] + '/' + dir + '/flow_x*.jpg')\n", (5742, 5802), False, 'import glob\n'), ((5882, 5946), 'glob.glob', 'glob.glob', (["(data_folder + self.classes[c] + '/' + dir + file_name)"], {}), "(data_folder + self.classes[c] + '/' + dir + file_name)\n", (5891, 5946), False, 'import glob\n'), ((8467, 8498), 'glob.glob', 'glob.glob', (['(folder + file_name_1)'], {}), '(folder + file_name_1)\n', (8476, 8498), False, 'import glob\n'), ((13232, 13330), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.x_size, self.y_size, 2 * sliding_height, amount_datas)', 'dtype': 'np.float64'}), '(shape=(self.x_size, self.y_size, 2 * sliding_height, amount_datas),\n dtype=np.float64)\n', (13240, 13330), True, 'import numpy as np\n'), ((14508, 14540), 'numpy.transpose', 'np.transpose', (['flow', '(3, 0, 1, 2)'], {}), '(flow, (3, 0, 1, 2))\n', (14520, 14540), True, 'import numpy as np\n'), ((9582, 9680), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.x_size, self.y_size, 2 * sliding_height, amount_datas)', 'dtype': 'np.float64'}), '(shape=(self.x_size, self.y_size, 2 * sliding_height, amount_datas),\n dtype=np.float64)\n', (9590, 9680), True, 'import numpy as np\n'), ((11121, 11153), 'numpy.transpose', 'np.transpose', (['flow', '(3, 0, 1, 2)'], {}), '(flow, (3, 0, 1, 2))\n', (11133, 11153), True, 'import numpy as np\n'), ((11189, 11250), 'numpy.zeros', 'np.zeros', (['(amount_datas, self.num_features)'], {'dtype': 'np.float64'}), '((amount_datas, self.num_features), dtype=np.float64)\n', (11197, 11250), True, 'import numpy as np\n'), ((11308, 11349), 'numpy.zeros', 'np.zeros', (['(amount_datas, 1)'], {'dtype': '"""int8"""'}), "((amount_datas, 1), dtype='int8')\n", (11316, 11349), True, 'import numpy as np\n'), ((11945, 12006), 'numpy.zeros', 'np.zeros', (['(amount_datas, self.num_features)'], {'dtype': 'np.float64'}), '((amount_datas, self.num_features), dtype=np.float64)\n', (11953, 12006), True, 'import numpy as np\n'), ((12064, 12105), 'numpy.zeros', 'np.zeros', (['(amount_datas, 1)'], {'dtype': '"""int8"""'}), "((amount_datas, 1), dtype='int8')\n", (12072, 12105), True, 'import numpy as np\n'), ((13611, 13656), 'cv2.imread', 'cv2.imread', (['flow_x_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(flow_x_file, cv2.IMREAD_GRAYSCALE)\n', (13621, 13656), False, 'import cv2\n'), ((13685, 13730), 'cv2.imread', 'cv2.imread', (['flow_y_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(flow_y_file, cv2.IMREAD_GRAYSCALE)\n', (13695, 13730), False, 'import cv2\n'), ((14228, 14240), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14238, 14240), False, 'import gc\n'), ((14324, 14385), 'numpy.tile', 'np.tile', (['flow_mean[..., np.newaxis]', '(1, 1, 1, flow.shape[3])'], {}), '(flow_mean[..., np.newaxis], (1, 1, 1, flow.shape[3]))\n', (14331, 14385), True, 'import numpy as np\n'), ((15409, 15426), 'cv2.imread', 'cv2.imread', (['frame'], {}), '(frame)\n', (15419, 15426), False, 'import cv2\n'), ((9984, 10029), 'cv2.imread', 'cv2.imread', (['flow_x_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(flow_x_file, cv2.IMREAD_GRAYSCALE)\n', (9994, 10029), False, 'import cv2\n'), ((10062, 10107), 'cv2.imread', 'cv2.imread', (['flow_y_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(flow_y_file, cv2.IMREAD_GRAYSCALE)\n', (10072, 10107), False, 'import cv2\n'), ((10642, 10654), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10652, 10654), False, 'import gc\n'), ((10925, 10986), 'numpy.tile', 'np.tile', (['flow_mean[..., np.newaxis]', '(1, 1, 1, flow.shape[3])'], {}), '(flow_mean[..., np.newaxis], (1, 1, 1, flow.shape[3]))\n', (10932, 10986), True, 'import numpy as np\n'), ((12360, 12377), 'cv2.imread', 'cv2.imread', (['frame'], {}), '(frame)\n', (12370, 12377), False, 'import cv2\n'), ((14765, 14796), 'numpy.expand_dims', 'np.expand_dims', (['flow[i, ...]', '(0)'], {}), '(flow[i, ...], 0)\n', (14779, 14796), True, 'import numpy as np\n'), ((15493, 15517), 'numpy.expand_dims', 'np.expand_dims', (['frame', '(0)'], {}), '(frame, 0)\n', (15507, 15517), True, 'import numpy as np\n'), ((16770, 16797), 'os.listdir', 'os.listdir', (['(data_folder + c)'], {}), '(data_folder + c)\n', (16780, 16797), False, 'import os\n'), ((11642, 11673), 'numpy.expand_dims', 'np.expand_dims', (['flow[i, ...]', '(0)'], {}), '(flow[i, ...], 0)\n', (11656, 11673), True, 'import numpy as np\n'), ((12448, 12472), 'numpy.expand_dims', 'np.expand_dims', (['frame', '(0)'], {}), '(frame, 0)\n', (12462, 12472), True, 'import numpy as np\n'), ((16840, 16871), 'os.path.join', 'os.path.join', (['data_folder', 'c', 'f'], {}), '(data_folder, c, f)\n', (16852, 16871), False, 'import os\n')] |
import unittest
import numpy as np
from sklearn.datasets import make_classification
from skactiveml.classifier import ParzenWindowClassifier
from skactiveml.stream import (
FixedUncertainty,
VariableUncertainty,
Split,
RandomVariableUncertainty,
)
class TemplateTestUncertaintyZliobaite:
def setUp(self):
# initialise valid data to test uncertainty parameters
rand = np.random.RandomState(0)
stream_length = 100
train_init_size = 10
X, y = make_classification(
n_samples=stream_length + train_init_size,
random_state=rand.randint(2**31 - 1),
shuffle=True,
)
self.X = X[:train_init_size, :]
self.candidates = X[train_init_size:, :]
self.y = y[:train_init_size]
self.clf = ParzenWindowClassifier()
self.kwargs = dict(
candidates=self.candidates, clf=self.clf, X=self.X, y=self.y
)
def test_init_param_budget(self):
# budget must be defined as a float greater than 0
query_strategy = self.get_query_strategy()(budget=[])
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
query_strategy = self.get_query_strategy()(budget="string")
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
query_strategy = self.get_query_strategy()(budget=-1)
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
def test_init_param_budget_manager(self):
# budgetmanager must be defined as an object of an budget manager
# class
query_strategy = self.get_query_strategy()(budget_manager=[])
self.assertRaises(TypeError, query_strategy.query, **(self.kwargs))
def test_init_param_random_state(self):
query_strategy = self.get_query_strategy()(
random_state="string",
)
self.assertRaises(ValueError, query_strategy.query, **(self.kwargs))
def test_query_param_candidates(self):
# candidates must be defined as a two dimensinal array
query_strategy = self.get_query_strategy()()
self.assertRaises(
ValueError,
query_strategy.query,
candidates=1,
clf=self.clf,
X=self.X,
y=self.y,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=None,
clf=self.clf,
X=self.X,
y=self.y,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=np.ones(5),
clf=self.clf,
X=self.X,
y=self.y,
)
def test_query_param_clf(self):
# clf must be defined as a classifier
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf="string",
X=self.X,
y=self.y,
)
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=1,
X=self.X,
y=self.y,
)
def test_query_param_X(self):
# X must be defined as a two dimensinal array and must be equal in
# length to y
query_strategy = self.get_query_strategy()()
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=1,
y=self.y,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=None,
y=self.y,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=np.ones(5),
y=self.y,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X[1:],
y=self.y,
fit_clf=True,
)
def test_query_param_y(self):
# y must be defined as a one Dimensional array and must be equal in
# length to X
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=1,
fit_clf=True,
)
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=None,
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
fit_clf=True,
)
def test_query_param_sample_weight(self):
# sample weight needs to be a list that can be convertet to float
# equal in size of y
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
sample_weight="string",
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
sample_weight=["string", "numbers", "test"],
fit_clf=True,
)
self.assertRaises(
ValueError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
sample_weight=[1],
fit_clf=True,
)
def test_query_param_fit_clf(self):
# fit_clf needs to be a boolean
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y,
fit_clf="string",
)
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y,
fit_clf=1,
)
def test_query_param_return_utilities(self):
# return_utilities needs to be a boolean
query_strategy = self.get_query_strategy()()
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
return_utilities="string",
)
self.assertRaises(
TypeError,
query_strategy.query,
candidates=self.candidates,
clf=self.clf,
X=self.X,
y=self.y[1:],
return_utilities=1,
)
class TestSplit(TemplateTestUncertaintyZliobaite, unittest.TestCase):
def get_query_strategy(self):
return Split
class TestFixedUncertainty(TemplateTestUncertaintyZliobaite, unittest.TestCase):
def get_query_strategy(self):
return FixedUncertainty
class TestVariableUncertainty(
TemplateTestUncertaintyZliobaite, unittest.TestCase
):
def get_query_strategy(self):
return VariableUncertainty
class TestRandomVariableUncertainty(
TemplateTestUncertaintyZliobaite, unittest.TestCase
):
def get_query_strategy(self):
return RandomVariableUncertainty
| [
"skactiveml.classifier.ParzenWindowClassifier",
"numpy.ones",
"numpy.random.RandomState"
] | [((407, 431), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (428, 431), True, 'import numpy as np\n'), ((812, 836), 'skactiveml.classifier.ParzenWindowClassifier', 'ParzenWindowClassifier', ([], {}), '()\n', (834, 836), False, 'from skactiveml.classifier import ParzenWindowClassifier\n'), ((2621, 2631), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2628, 2631), True, 'import numpy as np\n'), ((4108, 4118), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (4115, 4118), True, 'import numpy as np\n')] |
"""Solve problems that have manufactured solutions."""
import unittest
import numpy as np
from skfem.models.poisson import laplace, mass
from skfem.mesh import MeshHex, MeshLine, MeshQuad, MeshTet, MeshTri
from skfem.element import (ElementHex1, ElementHexS2,
ElementLineP1, ElementLineP2, ElementLineMini,
ElementQuad1, ElementQuad2, ElementTetP1,
ElementTriP2)
from skfem.assembly import FacetBasis, InteriorBasis
from skfem import asm, condense, solve, LinearForm
class Line1D(unittest.TestCase):
"""Solve the following problem:
u'' = 0
u(0) = 0
u'(1) = 1
Solution is u(x) = x.
"""
e = ElementLineP1()
def runTest(self):
m = MeshLine(np.linspace(0., 1.))
m.refine(2)
ib = InteriorBasis(m, self.e)
fb = FacetBasis(m, self.e)
@LinearForm
def boundary_flux(v, w):
return v * (w.x[0] == 1.)
L = asm(laplace, ib)
b = asm(boundary_flux, fb)
D = m.nodes_satisfying(lambda x: x == 0.0)
I = ib.complement_dofs(D) # noqa E741
u = solve(*condense(L, b, I=I)) # noqa E741
np.testing.assert_array_almost_equal(u[ib.nodal_dofs[0]], m.p[0], -10)
class Line1DP2(Line1D):
e = ElementLineP2()
class Line1DMini(Line1D):
e = ElementLineMini()
class LineNegative1D(unittest.TestCase):
"""Solve the following problem:
u'' = 0
u(0) = 0
u'(1) = -1
Solution is u(x) = -x.
"""
e = ElementLineP1()
def runTest(self):
m = MeshLine(np.linspace(0., 1.))
m.refine(2)
ib = InteriorBasis(m, self.e)
m.define_boundary('left' ,lambda x: x[0] == 0.0)
m.define_boundary('right', lambda x: x[0] == 1.0)
fb = FacetBasis(m, self.e, facets=m.boundaries['right'])
@LinearForm
def boundary_flux(v, w):
return -w.x[0] * v
L = asm(laplace, ib)
b = asm(boundary_flux, fb)
D = ib.find_dofs()['left'].all()
I = ib.complement_dofs(D) # noqa E741
u = solve(*condense(L, b, I=I)) # noqa E741
np.testing.assert_array_almost_equal(u[ib.nodal_dofs[0]], -m.p[0], -10)
class LineNegative1DP2(LineNegative1D):
e = ElementLineP2()
class LineNegative1DMini(LineNegative1D):
e = ElementLineMini()
class LineNeumann1D(unittest.TestCase):
"""Solve the following problem:
-u'' + eps*u = 0
u'(0) = 1
u'(1) = 1
Solution is u(x) = x-0.5.
"""
e = ElementLineP1()
def runTest(self):
m = MeshLine(np.linspace(0., 1.))
m.refine(2)
ib = InteriorBasis(m, self.e)
fb = FacetBasis(m, self.e)
@LinearForm
def boundary_flux(v, w):
return v * (w.x[0] == 1) - v * (w.x[0] == 0)
L = asm(laplace, ib)
M = asm(mass, ib)
b = asm(boundary_flux, fb)
u = solve(L + 1e-6 * M, b)
np.testing.assert_array_almost_equal(u[ib.nodal_dofs[0]], m.p[0] - .5, -4)
class LineNeumann1DP2(LineNeumann1D):
e = ElementLineP2()
class LineNeumann1DMini(LineNeumann1D):
e = ElementLineMini()
class TestExactHexElement(unittest.TestCase):
mesh = MeshHex
elem = ElementHex1
funs = [
lambda x: 1 + x[0] * x[1] * x[2],
lambda x: 1 + x[0] * x[1] + x[1] * x[2] + x[0],
]
def set_bc(self, fun, basis):
return fun(basis.mesh.p)
def runTest(self):
m = self.mesh()
m.refine(4)
ib = InteriorBasis(m, self.elem())
A = asm(laplace, ib)
D = ib.get_dofs().all()
I = ib.complement_dofs(D)
for X in self.funs:
x = self.set_bc(X, ib)
Xh = x.copy()
x = solve(*condense(A, 0 * x, x=x, I=I))
self.assertLessEqual(np.sum(x - Xh), 1e-10)
class TestExactHexS2(TestExactHexElement):
elem = ElementHexS2
funs = [
lambda x: 1 + 0 * x[0],
]
def set_bc(self, fun, basis):
return fun(basis.doflocs)
class TestExactQuadElement(TestExactHexElement):
mesh = MeshQuad
elem = ElementQuad1
funs = [
lambda x: 1 + 0 * x[0],
lambda x: 1 + x[0] + x[1] + x[0] * x[1],
]
class TestExactTetElement(TestExactHexElement):
mesh = MeshTet
elem = ElementTetP1
funs = [
lambda x: 1 + 0 * x[0],
lambda x: 1 + x[0] + x[1] + x[2],
]
class TestExactTriElementP2(TestExactHexElement):
mesh = MeshTri
elem = ElementTriP2
funs = [
lambda x: 1 + 0 * x[0],
lambda x: 1 + x[0] + x[1] + x[0] * x[1],
]
def set_bc(self, fun, basis):
return fun(basis.doflocs)
class TestExactQuadElement2(TestExactTriElementP2):
mesh = MeshQuad
elem = ElementQuad2
if __name__ == '__main__':
unittest.main()
| [
"numpy.testing.assert_array_almost_equal",
"skfem.asm",
"skfem.condense",
"unittest.main",
"skfem.element.ElementLineP1",
"skfem.assembly.InteriorBasis",
"skfem.element.ElementLineP2",
"skfem.solve",
"numpy.linspace",
"numpy.sum",
"skfem.assembly.FacetBasis",
"skfem.element.ElementLineMini"
] | [((708, 723), 'skfem.element.ElementLineP1', 'ElementLineP1', ([], {}), '()\n', (721, 723), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((1304, 1319), 'skfem.element.ElementLineP2', 'ElementLineP2', ([], {}), '()\n', (1317, 1319), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((1356, 1373), 'skfem.element.ElementLineMini', 'ElementLineMini', ([], {}), '()\n', (1371, 1373), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((1539, 1554), 'skfem.element.ElementLineP1', 'ElementLineP1', ([], {}), '()\n', (1552, 1554), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((2281, 2296), 'skfem.element.ElementLineP2', 'ElementLineP2', ([], {}), '()\n', (2294, 2296), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((2349, 2366), 'skfem.element.ElementLineMini', 'ElementLineMini', ([], {}), '()\n', (2364, 2366), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((2543, 2558), 'skfem.element.ElementLineP1', 'ElementLineP1', ([], {}), '()\n', (2556, 2558), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((3087, 3102), 'skfem.element.ElementLineP2', 'ElementLineP2', ([], {}), '()\n', (3100, 3102), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((3153, 3170), 'skfem.element.ElementLineMini', 'ElementLineMini', ([], {}), '()\n', (3168, 3170), False, 'from skfem.element import ElementHex1, ElementHexS2, ElementLineP1, ElementLineP2, ElementLineMini, ElementQuad1, ElementQuad2, ElementTetP1, ElementTriP2\n'), ((4825, 4840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4838, 4840), False, 'import unittest\n'), ((823, 847), 'skfem.assembly.InteriorBasis', 'InteriorBasis', (['m', 'self.e'], {}), '(m, self.e)\n', (836, 847), False, 'from skfem.assembly import FacetBasis, InteriorBasis\n'), ((861, 882), 'skfem.assembly.FacetBasis', 'FacetBasis', (['m', 'self.e'], {}), '(m, self.e)\n', (871, 882), False, 'from skfem.assembly import FacetBasis, InteriorBasis\n'), ((988, 1004), 'skfem.asm', 'asm', (['laplace', 'ib'], {}), '(laplace, ib)\n', (991, 1004), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((1017, 1039), 'skfem.asm', 'asm', (['boundary_flux', 'fb'], {}), '(boundary_flux, fb)\n', (1020, 1039), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((1200, 1270), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['u[ib.nodal_dofs[0]]', 'm.p[0]', '(-10)'], {}), '(u[ib.nodal_dofs[0]], m.p[0], -10)\n', (1236, 1270), True, 'import numpy as np\n'), ((1654, 1678), 'skfem.assembly.InteriorBasis', 'InteriorBasis', (['m', 'self.e'], {}), '(m, self.e)\n', (1667, 1678), False, 'from skfem.assembly import FacetBasis, InteriorBasis\n'), ((1807, 1858), 'skfem.assembly.FacetBasis', 'FacetBasis', (['m', 'self.e'], {'facets': "m.boundaries['right']"}), "(m, self.e, facets=m.boundaries['right'])\n", (1817, 1858), False, 'from skfem.assembly import FacetBasis, InteriorBasis\n'), ((1957, 1973), 'skfem.asm', 'asm', (['laplace', 'ib'], {}), '(laplace, ib)\n', (1960, 1973), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((1986, 2008), 'skfem.asm', 'asm', (['boundary_flux', 'fb'], {}), '(boundary_flux, fb)\n', (1989, 2008), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((2159, 2230), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['u[ib.nodal_dofs[0]]', '(-m.p[0])', '(-10)'], {}), '(u[ib.nodal_dofs[0]], -m.p[0], -10)\n', (2195, 2230), True, 'import numpy as np\n'), ((2658, 2682), 'skfem.assembly.InteriorBasis', 'InteriorBasis', (['m', 'self.e'], {}), '(m, self.e)\n', (2671, 2682), False, 'from skfem.assembly import FacetBasis, InteriorBasis\n'), ((2696, 2717), 'skfem.assembly.FacetBasis', 'FacetBasis', (['m', 'self.e'], {}), '(m, self.e)\n', (2706, 2717), False, 'from skfem.assembly import FacetBasis, InteriorBasis\n'), ((2842, 2858), 'skfem.asm', 'asm', (['laplace', 'ib'], {}), '(laplace, ib)\n', (2845, 2858), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((2871, 2884), 'skfem.asm', 'asm', (['mass', 'ib'], {}), '(mass, ib)\n', (2874, 2884), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((2897, 2919), 'skfem.asm', 'asm', (['boundary_flux', 'fb'], {}), '(boundary_flux, fb)\n', (2900, 2919), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((2932, 2955), 'skfem.solve', 'solve', (['(L + 1e-06 * M)', 'b'], {}), '(L + 1e-06 * M, b)\n', (2937, 2955), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((2964, 3039), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['u[ib.nodal_dofs[0]]', '(m.p[0] - 0.5)', '(-4)'], {}), '(u[ib.nodal_dofs[0]], m.p[0] - 0.5, -4)\n', (3000, 3039), True, 'import numpy as np\n'), ((3576, 3592), 'skfem.asm', 'asm', (['laplace', 'ib'], {}), '(laplace, ib)\n', (3579, 3592), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((769, 790), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (780, 790), True, 'import numpy as np\n'), ((1600, 1621), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1611, 1621), True, 'import numpy as np\n'), ((2604, 2625), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2615, 2625), True, 'import numpy as np\n'), ((1157, 1176), 'skfem.condense', 'condense', (['L', 'b'], {'I': 'I'}), '(L, b, I=I)\n', (1165, 1176), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((2116, 2135), 'skfem.condense', 'condense', (['L', 'b'], {'I': 'I'}), '(L, b, I=I)\n', (2124, 2135), False, 'from skfem import asm, condense, solve, LinearForm\n'), ((3836, 3850), 'numpy.sum', 'np.sum', (['(x - Xh)'], {}), '(x - Xh)\n', (3842, 3850), True, 'import numpy as np\n'), ((3773, 3801), 'skfem.condense', 'condense', (['A', '(0 * x)'], {'x': 'x', 'I': 'I'}), '(A, 0 * x, x=x, I=I)\n', (3781, 3801), False, 'from skfem import asm, condense, solve, LinearForm\n')] |
## emotionProcessor-threaded.py
## This is a variation of the emotionProcessor class.
## The main difference between the two classes is that this
## class utilizes python's threading module to collect the
## audio metrics.
## Since this proved to offer little to no performance gains
## while still expending extra resources, this class was not
## utilized in the final build of the software. This class
## may, however, prove to be useful to future researchers
## looking to improve the performance of the AEDS softare.
## This class is included purely for educational purposes.
## All alterations made to this class from emotionProcessor.py
## were made by <NAME>.
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
from scipy.io import wavfile
from scipy.fftpack import fft
import wave
import numpy
import math
from python_speech_features import mfcc
from python_speech_features import delta
from python_speech_features import logfbank
import scipy.io.wavfile as wav
from pydub import AudioSegment
from pydub.silence import split_on_silence
from statistics import *
import numpy as np
import multiprocessing
from multiprocessing import *
import threading
class EmotionProcessor(object):
def __init__(self, fname):
self.fname= fname
def __enter__(self):
return self
def __exit__(self, exception, value, traceback):
self.close()
#mfccProc: extracts the MFCCs from given audio
# Written by <NAME>
# Creates 2d arrays for storage of the fbank feature, mfcc features
# and the delta of MFCC features
# Written By: <NAME>
def mfccProc(self):
(rate,sig) = audioBasicIO.readAudioFile(self.fname)
#Create 2d array for MFCC features
mfcc_feat = mfcc(sig,samplerate = 44100, nfft = 1103)
#Create 2d array for the delta of MFCC features
d_mfcc_feat = delta(mfcc_feat, 2)
#Create 2d array for the log of fbank features
fbank_feat = logfbank(sig,rate)
return(mfcc_feat)
def mfccProc2(self, results_dict):
(rate,sig) = audioBasicIO.readAudioFile(self.fname)
#Create 2d array for MFCC features
mfcc_feat = mfcc(sig,samplerate = 44100, nfft = 1103)
#Create 2d array for the delta of MFCC features
d_mfcc_feat = delta(mfcc_feat, 2)
#Create 2d array for the log of fbank features
fbank_feat = logfbank(sig,rate)
dev_array = []
for i in mfcc_feat:
temp = stdev(i)
dev_array.append(temp)
tone = stdev(dev_array)
results_dict["tone"] = tone
return(mfcc_feat)
def pitchProc(self):
[Fs,x] = audioBasicIO.readAudioFile(self.fname)
info=audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.025*Fs)
return info[0][1]
def pitchProc2(self, results_dict):
print("pitchProc2")
[Fs,x] = audioBasicIO.readAudioFile(self.fname)
info=audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.025*Fs)
results_dict["pitch"] = info[0][1]
return info[0][1]
def volumeProc(self):
freq, snd = wavfile.read(self.fname)
snd = snd/(2.**15)
s1 = snd[:]
n = len(s1)
p = fft(s1) #take the fourier transform
unique = int(math.ceil((n+1)/2.0))
p = p[0:unique]
p=abs(p)
p = p/float(n)
p=p**2
if n%2>0:
p[1:len(p)]=p[1:len(p)]*2
else:
p[1:len(p)-1]=p[1:len(p)-1]*2
freqArray = numpy.arange(0,unique,1.0)*(freq/n)
#numpy.set_printoptions(threshold = numpy.nan)
#rms_val = sqrt(mean(s1**2))
return(freqArray)
def volumeProc2(self, results_dict):
freq, snd = wavfile.read(self.fname)
snd = snd/(2.**15)
s1 = snd[:]
n = len(s1)
p = fft(s1) #take the fourier transform
unique = int(math.ceil((n+1)/2.0))
p = p[0:unique]
p=abs(p)
p = p/float(n)
p=p**2
if n%2>0:
p[1:len(p)]=p[1:len(p)]*2
else:
p[1:len(p)-1]=p[1:len(p)-1]*2
freqArray = numpy.arange(0,unique,1.0)*(freq/n)
#numpy.set_printoptions(threshold = numpy.nan)
#rms_val = sqrt(mean(s1**2))
results_dict["volume"] = freqArray
return(freqArray)
## gapProc: function that allows the extraction of the gaps between
## consecutive words.
## Inputs: self
## Output: an array containing the lengths of every gap between words
## Written By: <NAME> and <NAME>
def gapProc(self):
#def gapProc(self , lowest):
sound_file = AudioSegment.from_wav(self.fname)
audio_chunks = split_on_silence(sound_file,
# must be silent for at least 100ms
min_silence_len=1,
# consider it silent if quieter than -16 dBFS
silence_thresh=5)
# List made to store all of the silence .wav chunks
waveAry = []
# List made to store the lengths of the silence chunks
chunkLengthArray = []
for i, chunk in enumerate(audio_chunks):
out_file = ".//splitAudio//chunk{0}.wav".format(i)
#waveAry.append(chunk)
chunkLengthArray.append(len(chunk))
#If there were no silences, set the mean variable to 0
if len(chunkLengthArray) == 0:
avgChunkLength = 0
stdevChunkLength = 0
# If thee is exactly 1 silence, set the stdev to 0
# and the average chunk length to the value of the only silence
elif len(chunkLengthArray) == 1:
stdevChunkLength = 0
avgChunkLength = chunkLengthArray[0]
# Otherwise calculate the mean gap and stdev of the gaps and store
# them in variables
else:
avgChunkLength = mean(chunkLengthArray)
stdevChunkLength = stdev(chunkLengthArray)
# Return the array containing the lengths of the gaps
return(chunkLengthArray)
## gapProc: function that allows the extraction of the gaps between
## consecutive words.
## Inputs: self
## Output: an array containing the lengths of every gap between words
## Written By: <NAME> and <NAME>
def gapProc2(self, results_dict):
#def gapProc(self , lowest):
sound_file = AudioSegment.from_wav(self.fname)
audio_chunks = split_on_silence(sound_file,
# must be silent for at least 100ms
min_silence_len=1,
# consider it silent if quieter than -16 dBFS
silence_thresh=5)
# List made to store all of the silence .wav chunks
waveAry = []
# List made to store the lengths of the silence chunks
chunkLengthArray = []
for i, chunk in enumerate(audio_chunks):
out_file = ".//splitAudio//chunk{0}.wav".format(i)
#waveAry.append(chunk)
chunkLengthArray.append(len(chunk))
#If there were no silences, set the mean variable to 0
if len(chunkLengthArray) == 0:
avgChunkLength = 0
stdevChunkLength = 0
# If thee is exactly 1 silence, set the stdev to 0
# and the average chunk length to the value of the only silence
elif len(chunkLengthArray) == 1:
stdevChunkLength = 0
avgChunkLength = chunkLengthArray[0]
# Otherwise calculate the mean gap and stdev of the gaps and store
# them in variables
else:
avgChunkLength = mean(chunkLengthArray)
stdevChunkLength = stdev(chunkLengthArray)
# Return the array containing the lengths of the gaps
results_dict["wordGap"] = chunkLengthArray
return(chunkLengthArray)
## collectMetrics:
## Collects the audio metrics using the above methods,
## places them into a pandas array, and returns them
## for use by the software
## Written by: <NAME>
def collectMetrics(self):
print("Collecting Metrics")
queue = Queue()
results_dict = {"pitch":[], "volume":[],"tone":[],"wordGap":[], "wordGaplen":[]}
process_list = []
print("Creating process")
p1 = threading.Thread(target = self.pitchProc2, args=(results_dict,))
process_list.append(p1)
p2 = threading.Thread(target = self.volumeProc2, args=(results_dict,))
process_list.append(p2)
p3 = threading.Thread(target = self.mfccProc2, args=(results_dict,))
process_list.append(p3)
p4 = threading.Thread(target = self.gapProc2, args=(results_dict,))
process_list.append(p4)
# p5 = Process()
print("Starting process")
for process in process_list:
process.start()
#p1.start()
print("Ending Processes")
for proc in process_list:
proc.join()
#pitch = self.pitchProc()
pitch = results_dict["pitch"]
pitch = stdev(pitch)
#volume = self.volumeProc()
volume = results_dict["volume"]
volume = stdev(volume)
'''tone = self.mfccProc()
dev_array = []
for i in tone:
temp = stdev(i)
dev_array.append(temp)
tone = stdev(dev_array)'''
tone = results_dict["tone"]
#wordGap = self.gapProc()
wordGap = results_dict["wordGap"]
if(len(wordGap) != 0):
wordGaplen = len(wordGap)
wordGap = stdev(wordGap)
else:
wordGaplen = 0
wordGap = 0
user_profile = np.array([pitch, tone, volume, wordGap, wordGaplen])
return(user_profile)
| [
"pyAudioAnalysis.audioBasicIO.readAudioFile",
"math.ceil",
"pydub.silence.split_on_silence",
"numpy.arange",
"python_speech_features.delta",
"python_speech_features.logfbank",
"python_speech_features.mfcc",
"numpy.array",
"pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction",
"scipy.io.wavf... | [((1720, 1758), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['self.fname'], {}), '(self.fname)\n', (1746, 1758), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((1824, 1862), 'python_speech_features.mfcc', 'mfcc', (['sig'], {'samplerate': '(44100)', 'nfft': '(1103)'}), '(sig, samplerate=44100, nfft=1103)\n', (1828, 1862), False, 'from python_speech_features import mfcc\n'), ((1946, 1965), 'python_speech_features.delta', 'delta', (['mfcc_feat', '(2)'], {}), '(mfcc_feat, 2)\n', (1951, 1965), False, 'from python_speech_features import delta\n'), ((2044, 2063), 'python_speech_features.logfbank', 'logfbank', (['sig', 'rate'], {}), '(sig, rate)\n', (2052, 2063), False, 'from python_speech_features import logfbank\n'), ((2164, 2202), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['self.fname'], {}), '(self.fname)\n', (2190, 2202), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((2268, 2306), 'python_speech_features.mfcc', 'mfcc', (['sig'], {'samplerate': '(44100)', 'nfft': '(1103)'}), '(sig, samplerate=44100, nfft=1103)\n', (2272, 2306), False, 'from python_speech_features import mfcc\n'), ((2390, 2409), 'python_speech_features.delta', 'delta', (['mfcc_feat', '(2)'], {}), '(mfcc_feat, 2)\n', (2395, 2409), False, 'from python_speech_features import delta\n'), ((2488, 2507), 'python_speech_features.logfbank', 'logfbank', (['sig', 'rate'], {}), '(sig, rate)\n', (2496, 2507), False, 'from python_speech_features import logfbank\n'), ((2788, 2826), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['self.fname'], {}), '(self.fname)\n', (2814, 2826), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((2841, 2913), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction', 'audioFeatureExtraction.stFeatureExtraction', (['x', 'Fs', '(0.05 * Fs)', '(0.025 * Fs)'], {}), '(x, Fs, 0.05 * Fs, 0.025 * Fs)\n', (2883, 2913), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((3028, 3066), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['self.fname'], {}), '(self.fname)\n', (3054, 3066), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((3081, 3153), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction', 'audioFeatureExtraction.stFeatureExtraction', (['x', 'Fs', '(0.05 * Fs)', '(0.025 * Fs)'], {}), '(x, Fs, 0.05 * Fs, 0.025 * Fs)\n', (3123, 3153), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((3292, 3316), 'scipy.io.wavfile.read', 'wavfile.read', (['self.fname'], {}), '(self.fname)\n', (3304, 3316), False, 'from scipy.io import wavfile\n'), ((3400, 3407), 'scipy.fftpack.fft', 'fft', (['s1'], {}), '(s1)\n', (3403, 3407), False, 'from scipy.fftpack import fft\n'), ((3922, 3946), 'scipy.io.wavfile.read', 'wavfile.read', (['self.fname'], {}), '(self.fname)\n', (3934, 3946), False, 'from scipy.io import wavfile\n'), ((4030, 4037), 'scipy.fftpack.fft', 'fft', (['s1'], {}), '(s1)\n', (4033, 4037), False, 'from scipy.fftpack import fft\n'), ((4839, 4872), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['self.fname'], {}), '(self.fname)\n', (4860, 4872), False, 'from pydub import AudioSegment\n'), ((4897, 4962), 'pydub.silence.split_on_silence', 'split_on_silence', (['sound_file'], {'min_silence_len': '(1)', 'silence_thresh': '(5)'}), '(sound_file, min_silence_len=1, silence_thresh=5)\n', (4913, 4962), False, 'from pydub.silence import split_on_silence\n'), ((6555, 6588), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['self.fname'], {}), '(self.fname)\n', (6576, 6588), False, 'from pydub import AudioSegment\n'), ((6613, 6678), 'pydub.silence.split_on_silence', 'split_on_silence', (['sound_file'], {'min_silence_len': '(1)', 'silence_thresh': '(5)'}), '(sound_file, min_silence_len=1, silence_thresh=5)\n', (6629, 6678), False, 'from pydub.silence import split_on_silence\n'), ((8480, 8542), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.pitchProc2', 'args': '(results_dict,)'}), '(target=self.pitchProc2, args=(results_dict,))\n', (8496, 8542), False, 'import threading\n'), ((8592, 8655), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.volumeProc2', 'args': '(results_dict,)'}), '(target=self.volumeProc2, args=(results_dict,))\n', (8608, 8655), False, 'import threading\n'), ((8705, 8766), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.mfccProc2', 'args': '(results_dict,)'}), '(target=self.mfccProc2, args=(results_dict,))\n', (8721, 8766), False, 'import threading\n'), ((8816, 8876), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.gapProc2', 'args': '(results_dict,)'}), '(target=self.gapProc2, args=(results_dict,))\n', (8832, 8876), False, 'import threading\n'), ((9949, 10001), 'numpy.array', 'np.array', (['[pitch, tone, volume, wordGap, wordGaplen]'], {}), '([pitch, tone, volume, wordGap, wordGaplen])\n', (9957, 10001), True, 'import numpy as np\n'), ((3458, 3482), 'math.ceil', 'math.ceil', (['((n + 1) / 2.0)'], {}), '((n + 1) / 2.0)\n', (3467, 3482), False, 'import math\n'), ((3700, 3728), 'numpy.arange', 'numpy.arange', (['(0)', 'unique', '(1.0)'], {}), '(0, unique, 1.0)\n', (3712, 3728), False, 'import numpy\n'), ((4088, 4112), 'math.ceil', 'math.ceil', (['((n + 1) / 2.0)'], {}), '((n + 1) / 2.0)\n', (4097, 4112), False, 'import math\n'), ((4330, 4358), 'numpy.arange', 'numpy.arange', (['(0)', 'unique', '(1.0)'], {}), '(0, unique, 1.0)\n', (4342, 4358), False, 'import numpy\n')] |
import sys
sys.path.append("..")
from faigen.data.sequence import Dna2VecList,regex_filter
import pandas as pd
import numpy as np
import os
from functools import partial
import configargparse
from pathlib import Path
from Bio.SeqRecord import SeqRecord
import yaml
from pathlib import Path
import os
from shutil import copy
from tqdm import tqdm
def filter_by_count(df:pd.DataFrame, min=1)->pd.DataFrame:
res=df.copy()
drop = res.index[res.index.values[np.asarray(res.seq_count.values) < min]]
res.drop(drop, axis=0,inplace=True)
return res.reset_index(drop=True)
def filter_by_label(df:pd.DataFrame, word:str)->pd.DataFrame:
res,mask=df.copy(),[]
for x in df.label.values: mask.append(False if word in x else True)
drop = res.index[mask]
res.drop(drop, axis=0,inplace=True)
return res.reset_index(drop=True)
def main():
argp = configargparse.get_argument_parser()
argp.add_argument('-i', help='input label inventory csv', type=str)
argp.add_argument('-o', help='output folder', type=str)
argp.add_argument('-lsi', help='label selector (comma delimited numbers)', type=str)
argp.add_argument('-lsr', help='regular expression for labeling', type=str)
argp.add_argument('-rxkeep', help='keep if regular expression found', type=str)
argp.add_argument('-rxdrop', help='drop if regular expression found', type=str)
argp.add_argument('-d', help='label delimiter', type=str, default=" ")
argp.add_argument('-split', help='split by folders, coma delimited string', type=str, default="train,valid,test")
argp.add_argument('-portions', help='split by folders, coma delimited string', type=str, default="0.7,0.2,0.1")
args = {k:v for k,v in vars(argp.parse_args()).items()}
out = Path('/home/serge/database/data/genomes/ncbi-genomes-2019-04-07')
folders = {
'train': out / "Bacillus" / "train",
'valid': out / "Bacillus" / "valid",
'test': out / "Bacillus" / "test"
}
for k in folders:
if not os.path.exists(folders[k]):
os.makedirs(folders[k])
for i in tqdm(range(short_list.shape[0])):
cnt = short_list.loc[i, "seq_count"]
train = int(0.75 * cnt)
valid = cnt - train
files = short_list.loc[i, "files"]
for i in range(cnt):
copy(files[i], folders["train"]) if i < train else copy(files[i], folders["valid"]) | [
"os.path.exists",
"os.makedirs",
"pathlib.Path",
"numpy.asarray",
"configargparse.get_argument_parser",
"shutil.copy",
"sys.path.append"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((872, 908), 'configargparse.get_argument_parser', 'configargparse.get_argument_parser', ([], {}), '()\n', (906, 908), False, 'import configargparse\n'), ((1762, 1827), 'pathlib.Path', 'Path', (['"""/home/serge/database/data/genomes/ncbi-genomes-2019-04-07"""'], {}), "('/home/serge/database/data/genomes/ncbi-genomes-2019-04-07')\n", (1766, 1827), False, 'from pathlib import Path\n'), ((2019, 2045), 'os.path.exists', 'os.path.exists', (['folders[k]'], {}), '(folders[k])\n', (2033, 2045), False, 'import os\n'), ((2059, 2082), 'os.makedirs', 'os.makedirs', (['folders[k]'], {}), '(folders[k])\n', (2070, 2082), False, 'import os\n'), ((462, 494), 'numpy.asarray', 'np.asarray', (['res.seq_count.values'], {}), '(res.seq_count.values)\n', (472, 494), True, 'import numpy as np\n'), ((2320, 2352), 'shutil.copy', 'copy', (['files[i]', "folders['train']"], {}), "(files[i], folders['train'])\n", (2324, 2352), False, 'from shutil import copy\n'), ((2371, 2403), 'shutil.copy', 'copy', (['files[i]', "folders['valid']"], {}), "(files[i], folders['valid'])\n", (2375, 2403), False, 'from shutil import copy\n')] |
"""A pickleable wrapper for sharing NumPy ndarrays between processes using multiprocessing shared memory."""
import numpy as np
import multiprocessing.shared_memory as shm
class SharedNDArray:
"""Creates a new SharedNDArray, a pickleable wrapper for sharing NumPy ndarrays between
processes using multiprocessing shared memory.
SharedNDArrays are designed to be sent over multiprocessing.Pipe and Queue without serializing
or transmitting the underlying ndarray or buffer. While the associated file descriptor is
closed when the SharedNDArray is garbage collected, the underlying buffer is not released when
the process ends: you must manually call the unlink() method from the last process to use it.
Attributes:
array: The wrapped NumPy ndarray, backed by shared memory.
"""
def __init__(self, shape, dtype=np.float64, name=None):
"""Creates a new SharedNDArray.
If name is left blank, a new shared memory segment is created using a system defined name.
Args:
shape: Shape of the wrapped ndarray.
dtype: Data type of the wrapped ndarray.
name: Optional; the filesystem path of the underlying shared memory.
Returns:
A new SharedNDArray of the given shape and dtype and backed by the given optional name.
Raises:
SharedNDArrayError: if an error occurs.
"""
size = int(np.prod(shape)) * np.dtype(dtype).itemsize
if name:
self._shm = shm.SharedMemory(name)
else:
self._shm = shm.SharedMemory(None, create=True, size=size)
self.array = np.ndarray(shape, dtype, self._shm.buf, order='C')
@classmethod
def copy(cls, arr):
"""Creates a new SharedNDArray that is a copy of the given ndarray.
Args:
arr: The ndarray to copy.
Returns:
A new SharedNDArray object with the given ndarray's shape and data type and a copy of
its data.
Raises:
SharedNDArrayError: if an error occurs.
"""
new_shm = cls.zeros_like(arr)
new_shm.array[:] = arr
return new_shm
@classmethod
def zeros_like(cls, arr):
"""Creates a new zero-filled SharedNDArray with the shape and dtype of the given ndarray.
Raises:
SharedNDArrayError: if an error occurs.
"""
return cls(arr.shape, arr.dtype)
def unlink(self):
"""Marks the underlying shared for deletion.
This method should be called exactly once from one process. Failure to call it before all
processes exit will result in a memory leak! It will raise SharedNDArrayError if the
underlying shared memory was already marked for deletion from any process.
Raises:
SharedNDArrayError: if an error occurs.
"""
self._shm.unlink()
def __del__(self):
self._shm.close()
def __getstate__(self):
return self.array.shape, self.array.dtype, self._shm.name
def __setstate__(self, state):
self.__init__(*state)
| [
"numpy.prod",
"numpy.dtype",
"numpy.ndarray",
"multiprocessing.shared_memory.SharedMemory"
] | [((1652, 1702), 'numpy.ndarray', 'np.ndarray', (['shape', 'dtype', 'self._shm.buf'], {'order': '"""C"""'}), "(shape, dtype, self._shm.buf, order='C')\n", (1662, 1702), True, 'import numpy as np\n'), ((1523, 1545), 'multiprocessing.shared_memory.SharedMemory', 'shm.SharedMemory', (['name'], {}), '(name)\n', (1539, 1545), True, 'import multiprocessing.shared_memory as shm\n'), ((1584, 1630), 'multiprocessing.shared_memory.SharedMemory', 'shm.SharedMemory', (['None'], {'create': '(True)', 'size': 'size'}), '(None, create=True, size=size)\n', (1600, 1630), True, 'import multiprocessing.shared_memory as shm\n'), ((1439, 1453), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1446, 1453), True, 'import numpy as np\n'), ((1457, 1472), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1465, 1472), True, 'import numpy as np\n')] |
import torch
import argparse
import numpy as np
import sys
sys.path.insert(0, "/home/jwieting/min-risk-cl-simile/min-risk")
from fairseq.tasks.sim_utils import Example
from fairseq.tasks.sim_models import WordAveraging
from sacremoses import MosesDetokenizer
parser = argparse.ArgumentParser()
parser.add_argument('--length-penalty', type=float, default=0.25, metavar='D',
help='Weight of length penalty on SIM term.')
parser.add_argument('--sys-file', help='path to save checkpoints')
parser.add_argument('--src-file', help='path to save checkpoints')
args = parser.parse_args()
def score_output(args):
detok = MosesDetokenizer('en')
model = torch.load('simile-mrt/cl_sim/model.wmt.all.lc.100.0.0_25.pt',
map_location='cpu')
state_dict = model['state_dict']
vocab_words = model['vocab']
sim_args = model['args']
# turn off gpu
sim_args.gpu = -1
model = WordAveraging(sim_args, vocab_words, sp_file="simile-mrt/cl_sim/wmt.all.lc.sp.50k.model")
model.load_state_dict(state_dict, strict=True)
# use a fresh Dictionary for scoring, so that we can add new elements
lower_case = sim_args.lower_case
def make_example(sentence):
sentence = detok.detokenize(sentence.split())
if lower_case:
sentence = sentence.lower()
sentence = model.sp.EncodeAsPieces(sentence)
wp1 = Example(" ".join(sentence), lower=lower_case)
wp1.populate_embeddings(model.vocab)
return wp1
f_sys = open(args.sys_file, 'r')
lines_sys = f_sys.readlines()
f_src = open(args.src_file, 'r')
lines_src = f_src.readlines()
sim_pairs = []
for i in zip(lines_sys, lines_src):
s = i[0].strip()
r = i[1].strip()
s_sim = make_example(s)
r_sim = make_example(r)
sim_pairs.append((s_sim, r_sim))
scores = []
for idx, i in enumerate(sim_pairs):
wp1 = i[0]
wp2 = i[1]
wx1, wl1, wm1 = model.torchify_batch([wp1])
wx2, wl2, wm2 = model.torchify_batch([wp2])
score = model.scoring_function(wx1, wm1, wl1, wx2, wm2, wl2)
scores.append(score.item())
print("XLSIM-SIM: {0}".format(np.mean(scores)))
score_output(args)
| [
"numpy.mean",
"sys.path.insert",
"argparse.ArgumentParser",
"sacremoses.MosesDetokenizer",
"torch.load",
"fairseq.tasks.sim_models.WordAveraging"
] | [((60, 124), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/jwieting/min-risk-cl-simile/min-risk"""'], {}), "(0, '/home/jwieting/min-risk-cl-simile/min-risk')\n", (75, 124), False, 'import sys\n'), ((271, 296), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (294, 296), False, 'import argparse\n'), ((641, 663), 'sacremoses.MosesDetokenizer', 'MosesDetokenizer', (['"""en"""'], {}), "('en')\n", (657, 663), False, 'from sacremoses import MosesDetokenizer\n'), ((677, 764), 'torch.load', 'torch.load', (['"""simile-mrt/cl_sim/model.wmt.all.lc.100.0.0_25.pt"""'], {'map_location': '"""cpu"""'}), "('simile-mrt/cl_sim/model.wmt.all.lc.100.0.0_25.pt', map_location\n ='cpu')\n", (687, 764), False, 'import torch\n'), ((938, 1032), 'fairseq.tasks.sim_models.WordAveraging', 'WordAveraging', (['sim_args', 'vocab_words'], {'sp_file': '"""simile-mrt/cl_sim/wmt.all.lc.sp.50k.model"""'}), "(sim_args, vocab_words, sp_file=\n 'simile-mrt/cl_sim/wmt.all.lc.sp.50k.model')\n", (951, 1032), False, 'from fairseq.tasks.sim_models import WordAveraging\n'), ((2220, 2235), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2227, 2235), True, 'import numpy as np\n')] |
import numpy as np
from collections import defaultdict
class Agent:
def __init__(self, nA=6, epsilon=1.,alpha=0.2, gamma=1.,episode=1):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.episode = episode
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
if state in self.Q:
prob = [1 - self.epsilon + self.epsilon*1./self.nA if (x==np.argmax(self.Q[state])) else self.epsilon*1./self.nA for x in range(self.nA)]
return np.random.choice(np.arange(self.nA), p = prob)
else:
return np.random.choice(np.arange(self.nA))
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
self.epsilon = max(1./self.episode,0.001)
if done:
self.episode+=1
self.Q[state][action] = self.Q[state][action] + self.alpha*(reward - self.Q[state][action])
else:
next_action = self.select_action(next_state)
#policy = np.ones(self.nA)*self.epsilon/self.nA
#policy[np.argmax(self.Q[next_state])] = 1 - self.epsilon + self.epsilon*1./self.nA
# 3 choices:
# 1. Expected Sarsa
# 2. SarsaMax (Q-Learning)
# 3. Sarsa(0)
#expected_val = np.sum(self.Q[next_state]*policy)
expected_val = np.max(self.Q[next_state])
#expected_val = self.Q[next_state][next_action]
self.Q[state][action] += self.alpha*(reward+ (self.gamma*expected_val)-self.Q[state][action])
| [
"numpy.zeros",
"numpy.argmax",
"numpy.arange",
"numpy.max"
] | [((2215, 2241), 'numpy.max', 'np.max', (['self.Q[next_state]'], {}), '(self.Q[next_state])\n', (2221, 2241), True, 'import numpy as np\n'), ((328, 345), 'numpy.zeros', 'np.zeros', (['self.nA'], {}), '(self.nA)\n', (336, 345), True, 'import numpy as np\n'), ((983, 1001), 'numpy.arange', 'np.arange', (['self.nA'], {}), '(self.nA)\n', (992, 1001), True, 'import numpy as np\n'), ((1076, 1094), 'numpy.arange', 'np.arange', (['self.nA'], {}), '(self.nA)\n', (1085, 1094), True, 'import numpy as np\n'), ((867, 891), 'numpy.argmax', 'np.argmax', (['self.Q[state]'], {}), '(self.Q[state])\n', (876, 891), True, 'import numpy as np\n')] |
"""FEMTO dataset."""
import os
from pathlib import Path
import itertools
import json
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import pandas as pd
# from scipy.io import loadmat
_DESCRIPTION = """
FEMTO-ST bearing dataset used in the IEEE PHM 2012 Data Challenge for RUL (remaining useful lifetime) estimation.
Description
===========
This dataset consists of run-to-failure experiments carried on the PRONOSTIA platform. Data provided by this platform corresponds to normally degraded bearings, which means that the defects are not initially initiated on the bearings and that each degraded bearing contains almost all the types of defects (balls, rings and cage).
Data are acquired under three operating conditions (rotating speed and load force):
Condition 1. 1800 rpm and 4000 N: folders Bearing1_x
Condition 2. 1650 rpm and 4200 N: folders Bearing2_x
Condition 3. 1500 rpm and 5000 N: folders Bearing3_x
In order to avoid propagation of damages to the whole test bed (and for security reasons), all tests were stopped when the amplitude of the vibration signal overpassed 20g which is used as definition of the RUL.
Provided data contains the records of two acclerometers and one temperature sensor, and are splitted into the learning set of 6 experiments and the test set (truncated + full) of 11 experiments. The goal is to estimate the RUL on the test set. The learning set was quite small while the spread of the life duration of all bearings was very wide (from 1h to 7h).
Actual RULs (in second) of Test set
-----------------------------------
- Bearing1_3: 5730
- Bearing1_4: 339
- Bearing1_5: 1619
- Bearing1_6: 1460
- Bearing1_7: 7570
- Bearing2_3: 7530
- Bearing2_4: 1390
- Bearing2_5: 3090
- Bearing2_6: 1290
- Bearing2_7: 580
- Bearing3_3: 820
Homepage
--------
https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/#femto
http://www.femto-st.fr/
https://github.com/Lucky-Loek/ieee-phm-2012-data-challenge-dataset
Original data
=============
Format: CSV files
Number of channels: not fixed, up to 3
Vibration signals (horizontal and vertical)
- Sampling frequency: 25.6 kHz
- Recordings: 2560 (i.e. 1/10 s) are recorded each 10 seconds
Temperature signals
- Sampling frequency: 10 Hz
- Recordings: 600 samples are recorded each minute
Download
--------
https://github.com/Lucky-Loek/ieee-phm-2012-data-challenge-dataset
"""
_CITATION = """
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. PRONOSTIA: An Experimental Platform for Bearings Accelerated Life Test. IEEE International Conference on Prognostics and Health Management, Denver, CO, USA, 2012
"""
_SPLIT_PATH_MATCH = {
'train': 'Learning_set',
'test': 'Test_set',
'full_test': 'Full_Test_Set'
}
_PARSER_MATCH = {
'Bearing1': 1, # 'condition 1',
'Bearing2': 2, # 'condition 2',
'Bearing3': 3, # 'condition 3',
}
_DATA_URLS = 'https://github.com/Lucky-Loek/ieee-phm-2012-data-challenge-dataset/archive/refs/heads/master.zip'
class FEMTO(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for FEMTO dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
# Number of channels is named or not fixed
'signal': {
'vibration': tfds.features.Tensor(shape=(None, 2), dtype=tf.float64),
'temperature': tfds.features.Tensor(shape=(None, ), dtype=tf.float64),
},
# 'label': tfds.features.ClassLabel(names=['Healthy', 'Faulty', 'Unknown']),
'metadata': {
'OperatingCondition': tf.int32, # More operating conditions
# 'ID': tf.string, # ID of the bearing
# 'Lifetime': tf.float32, # Time of the run-to-failure experiment
'OriginalSplit': tf.string, # Original split
'FileName': tf.string, # Original filename with path
}
}),
# If there's a common (input, target) tuple from the
# features, specify them here. They'll be used if
# `as_supervised=True` in `builder.as_dataset`.
# supervised_keys=('signal', 'label'), # Set to `None` to disable
supervised_keys=None,
homepage='',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
if dl_manager._manual_dir.exists(): # prefer to use manually downloaded data
datadir = dl_manager._manual_dir
else: # automatically download data
_resource = tfds.download.Resource(url=_DATA_URLS, extract_method=tfds.download.ExtractMethod.ZIP) # in case that the extraction method cannot be deduced automatically from files
datadir = dl_manager.download_and_extract(_resource)
return {
sp: self._generate_examples(datadir/fn, sp) for sp, fn in _SPLIT_PATH_MATCH.items()
}
def _generate_examples(self, path, split):
# assert path.exists()
# If the download files are extracted
for fp in path.rglob('*.csv'): # do not use `rglob` if path has no subfolders
print(fp)
# print(fp.parent)
with open(fp,'r') as ff:
sep = ';' if ';' in ff.readline() else ','
dm = pd.read_csv(fp, sep=sep, header=None)
assert dm.shape[1] >= 5
if fp.name[:3] == 'acc':
_signal = {
'vibration': dm.iloc[:,-2:].values,
'temperature': np.array([])
}
elif fp.name[:4] == 'temp':
_signal = {
'vibration': np.array([]).reshape((-1,2)),
'temperature': dm.iloc[:,-1].values,
}
else:
continue
metadata = {
'OperatingCondition': _PARSER_MATCH[fp.parts[-2].split('_')[0]],
'OriginalSplit': split,
'FileName': os.path.join(*fp.parts[-3:])
}
yield hash(frozenset(metadata.items())), {
'signal': _signal,
'metadata': metadata
}
@staticmethod
def get_references():
try:
with open(Path(__file__).parent / 'Exported Items.bib') as fp:
return fp.read()
except:
pass
| [
"pandas.read_csv",
"tensorflow_datasets.features.Tensor",
"pathlib.Path",
"os.path.join",
"tensorflow_datasets.core.Version",
"numpy.array",
"tensorflow_datasets.download.Resource"
] | [((3092, 3118), 'tensorflow_datasets.core.Version', 'tfds.core.Version', (['"""1.0.0"""'], {}), "('1.0.0')\n", (3109, 3118), True, 'import tensorflow_datasets as tfds\n'), ((4707, 4798), 'tensorflow_datasets.download.Resource', 'tfds.download.Resource', ([], {'url': '_DATA_URLS', 'extract_method': 'tfds.download.ExtractMethod.ZIP'}), '(url=_DATA_URLS, extract_method=tfds.download.\n ExtractMethod.ZIP)\n', (4729, 4798), True, 'import tensorflow_datasets as tfds\n'), ((5378, 5415), 'pandas.read_csv', 'pd.read_csv', (['fp'], {'sep': 'sep', 'header': 'None'}), '(fp, sep=sep, header=None)\n', (5389, 5415), True, 'import pandas as pd\n'), ((5930, 5958), 'os.path.join', 'os.path.join', (['*fp.parts[-3:]'], {}), '(*fp.parts[-3:])\n', (5942, 5958), False, 'import os\n'), ((5569, 5581), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5577, 5581), True, 'import numpy as np\n'), ((3499, 3554), 'tensorflow_datasets.features.Tensor', 'tfds.features.Tensor', ([], {'shape': '(None, 2)', 'dtype': 'tf.float64'}), '(shape=(None, 2), dtype=tf.float64)\n', (3519, 3554), True, 'import tensorflow_datasets as tfds\n'), ((3583, 3636), 'tensorflow_datasets.features.Tensor', 'tfds.features.Tensor', ([], {'shape': '(None,)', 'dtype': 'tf.float64'}), '(shape=(None,), dtype=tf.float64)\n', (3603, 3636), True, 'import tensorflow_datasets as tfds\n'), ((6147, 6161), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (6151, 6161), False, 'from pathlib import Path\n'), ((5669, 5681), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5677, 5681), True, 'import numpy as np\n')] |
import sys
import tempfile
from qgis.core import (
QgsApplication,
QgsProcessingFeedback,
QgsVectorLayer,
QgsRasterLayer,
QgsProject
)
from qgis.analysis import QgsNativeAlgorithms
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(description="Prepare the houston dataset for roadnet")
parser.add_argument('--data_dir', type=str, default="/home/andrea/Downloads/Final RGB HR Imagery/5", help="dir containing the image")
args = parser.parse_args()
# See https://gis.stackexchange.com/a/155852/4972 for details about the prefix
QgsApplication.setPrefixPath('/usr', True)
qgs = QgsApplication([], False)
qgs.initQgis()
# Append the path where processing plugin can be found
sys.path.append('/usr/share/qgis/python/plugins')
import processing
from processing.core.Processing import Processing
Processing.initialize()
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
data_dir = Path(args.data_dir)
number = args.data_dir.split('/')[-1]
tif_file = [img for img in data_dir.rglob('UH_NAD*.tif')][0]
osm_file = [o for o in data_dir.rglob('centerline.geojson')][0]
print(data_dir)
print(tif_file)
print(osm_file)
# Load raster layer
rlayer = QgsRasterLayer(tif_file.as_posix(), tif_file.name.replace('.tif', ''))
if not rlayer.isValid():
print("Layer failed to load!")
else:
QgsProject.instance().addMapLayer(rlayer)
# Resample the raster layer and save to a file
path_to_resampled = (data_dir / "Houston-{}.tif".format(number)).as_posix()
print(path_to_resampled)
parameters = {
"INPUT": rlayer,
"RESAMPLING": 0,
"TARGET_RESOLUTION": 0.21,
"DATA_TYPE": 0,
"TARGET_EXTENT": rlayer.extent(),
"OUTPUT": path_to_resampled
}
processing.run("gdal:warpreproject", parameters)
resampled_layer = QgsRasterLayer(path_to_resampled, "resampled_tif")
if not resampled_layer.isValid():
print("Layer failed to load!")
else:
QgsProject.instance().addMapLayer(resampled_layer)
# Load vector layer
vlayer = QgsVectorLayer(osm_file.as_posix(), "centerline", "ogr")
if not vlayer.isValid():
print("Layer failed to load: vlayer")
else:
QgsProject.instance().addMapLayer(vlayer)
# Reproject the vector layer
#temp_dir = tempfile.TemporaryDirectory()
parameters = {
"INPUT": vlayer,
"TARGET_CRS": "EPSG:26915",
"OUTPUT": 'memory:Reprojected'
}
reproj_layer = processing.run("native:reprojectlayer", parameters)['OUTPUT']
if not reproj_layer.isValid():
print("Layer failed to load: reproj layer")
else:
QgsProject.instance().addMapLayer(reproj_layer)
# Rasterize the vector layer
path_to_rasterization = (data_dir / "centerline.tif").as_posix()
parameters = {
"INPUT": reproj_layer,
"BURN": 1,
"UNITS": 1,
"WIDTH": 0.21,
"HEIGHT": 0.21,
"EXTENT": resampled_layer.extent(),
"DATA_TYPE": 0,
"OUTPUT": path_to_rasterization
}
processing.run("gdal:rasterize", parameters)
rasterized_layer = QgsRasterLayer(path_to_rasterization, "rasterization")
if not rasterized_layer.isValid():
print("Layer failed to load: rasterized layer")
else:
QgsProject.instance().addMapLayer(rasterized_layer)
# Convert tif to png
import numpy as np
from osgeo import gdal
import cv2
ds = gdal.Open(path_to_rasterization)
myarray = np.array(ds.GetRasterBand(1).ReadAsArray(), dtype=np.uint8)
myarray[myarray==1] = 255
rgb = np.full(myarray.shape + (3,), 255, dtype=np.uint8)
rgb[myarray==255, 1:3] = 0
path_to_png = path_to_rasterization.replace('.tif', '.png')
cv2.imwrite(path_to_png, cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)) | [
"osgeo.gdal.Open",
"qgis.analysis.QgsNativeAlgorithms",
"processing.run",
"qgis.core.QgsRasterLayer",
"argparse.ArgumentParser",
"pathlib.Path",
"processing.core.Processing.Processing.initialize",
"qgis.core.QgsApplication.processingRegistry",
"qgis.core.QgsApplication.setPrefixPath",
"qgis.core.Q... | [((262, 340), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Prepare the houston dataset for roadnet"""'}), "(description='Prepare the houston dataset for roadnet')\n", (285, 340), False, 'import argparse\n'), ((584, 626), 'qgis.core.QgsApplication.setPrefixPath', 'QgsApplication.setPrefixPath', (['"""/usr"""', '(True)'], {}), "('/usr', True)\n", (612, 626), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((633, 658), 'qgis.core.QgsApplication', 'QgsApplication', (['[]', '(False)'], {}), '([], False)\n', (647, 658), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((730, 779), 'sys.path.append', 'sys.path.append', (['"""/usr/share/qgis/python/plugins"""'], {}), "('/usr/share/qgis/python/plugins')\n", (745, 779), False, 'import sys\n'), ((849, 872), 'processing.core.Processing.Processing.initialize', 'Processing.initialize', ([], {}), '()\n', (870, 872), False, 'from processing.core.Processing import Processing\n'), ((956, 975), 'pathlib.Path', 'Path', (['args.data_dir'], {}), '(args.data_dir)\n', (960, 975), False, 'from pathlib import Path\n'), ((1731, 1779), 'processing.run', 'processing.run', (['"""gdal:warpreproject"""', 'parameters'], {}), "('gdal:warpreproject', parameters)\n", (1745, 1779), False, 'import processing\n'), ((1799, 1849), 'qgis.core.QgsRasterLayer', 'QgsRasterLayer', (['path_to_resampled', '"""resampled_tif"""'], {}), "(path_to_resampled, 'resampled_tif')\n", (1813, 1849), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((2890, 2934), 'processing.run', 'processing.run', (['"""gdal:rasterize"""', 'parameters'], {}), "('gdal:rasterize', parameters)\n", (2904, 2934), False, 'import processing\n'), ((2954, 3008), 'qgis.core.QgsRasterLayer', 'QgsRasterLayer', (['path_to_rasterization', '"""rasterization"""'], {}), "(path_to_rasterization, 'rasterization')\n", (2968, 3008), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((3239, 3271), 'osgeo.gdal.Open', 'gdal.Open', (['path_to_rasterization'], {}), '(path_to_rasterization)\n', (3248, 3271), False, 'from osgeo import gdal\n'), ((3375, 3425), 'numpy.full', 'np.full', (['(myarray.shape + (3,))', '(255)'], {'dtype': 'np.uint8'}), '(myarray.shape + (3,), 255, dtype=np.uint8)\n', (3382, 3425), True, 'import numpy as np\n'), ((921, 942), 'qgis.analysis.QgsNativeAlgorithms', 'QgsNativeAlgorithms', ([], {}), '()\n', (940, 942), False, 'from qgis.analysis import QgsNativeAlgorithms\n'), ((2382, 2433), 'processing.run', 'processing.run', (['"""native:reprojectlayer"""', 'parameters'], {}), "('native:reprojectlayer', parameters)\n", (2396, 2433), False, 'import processing\n'), ((3539, 3575), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb', 'cv2.COLOR_RGB2BGR'], {}), '(rgb, cv2.COLOR_RGB2BGR)\n', (3551, 3575), False, 'import cv2\n'), ((873, 908), 'qgis.core.QgsApplication.processingRegistry', 'QgsApplication.processingRegistry', ([], {}), '()\n', (906, 908), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((1360, 1381), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (1379, 1381), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((1929, 1950), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (1948, 1950), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((2144, 2165), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (2163, 2165), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((2533, 2554), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (2552, 2554), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n'), ((3106, 3127), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (3125, 3127), False, 'from qgis.core import QgsApplication, QgsProcessingFeedback, QgsVectorLayer, QgsRasterLayer, QgsProject\n')] |
import math
import cv2
import numpy as np
from PIL import Image
from skimage import exposure
from DataToCloud import dataToCloud
def calc_projection_image (ptCloud, pcloud, messyValidRGB):
ptCloudPoints = np.asarray(ptCloud.points)
validIndices = np.argwhere(
np.isfinite(ptCloudPoints[:, 0]) & np.isfinite(ptCloudPoints[:, 1]) & np.isfinite(ptCloudPoints[:, 2]))
count = np.asarray(ptCloud.points).shape[0]
indices = np.arange(0, count)
[u, v] = np.unravel_index(indices, (pcloud.shape[0], pcloud.shape[1]), order='F')
imagePoints = np.column_stack((u[validIndices], v[validIndices]))
projImage = np.zeros((np.max(imagePoints[:, 0]) + 1, np.max(imagePoints[:, 1]) + 1, 3))
for i in range(imagePoints.shape[0]):
projImage[imagePoints[i, 0], imagePoints[i, 1], :] = messyValidRGB[i, :]
projImage = projImage.astype(np.uint8)
return projImage
def dn_to_rad(RGB, MS480, MS520, MS550, MS670, MS700, MS730, MS780):
# Ratio calculated from old Banana MS image and new banana MS image for each Band
# In order to bring the corent RAW data to RADIANCE
# Coefficients to Banana in Rahan
DN2RAD = [0.059057, 0.192245, 0.594233, 1.198960, 1.871885, 2.034510, 2.075143]
MS480 = np.multiply(MS480, DN2RAD[0])
MS520 = np.multiply(MS520, DN2RAD[1])
MS550 = np.multiply(MS550, DN2RAD[2])
MS670 = np.multiply(MS670, DN2RAD[3])
MS700 = np.multiply(MS700, DN2RAD[4])
MS730 = np.multiply(MS730, DN2RAD[5])
MS780 = np.multiply(MS780, DN2RAD[6])
# Convert Multi Data image into 3D point cloud
# With dataToCloud_AgriEye function
# Saving the Multi data value
pcloudMS480 = dataToCloud(RGB, MS480, 0)
MS480rad = pcloudMS480[:, :, 2]
pcloudMS520 = dataToCloud(RGB, MS520, 0)
MS520rad = pcloudMS520[:, :, 2]
pcloudMS550 = dataToCloud(RGB, MS550, 0)
MS550rad = pcloudMS550[:, :, 2]
pcloudMS670 = dataToCloud(RGB, MS670, 0)
MS670rad = pcloudMS670[:, :, 2]
pcloudMS700 = dataToCloud(RGB, MS700, 0)
MS700rad = pcloudMS700[:, :, 2]
pcloudMS730 = dataToCloud(RGB, MS730, 0)
MS730rad = pcloudMS730[:, :, 2]
pcloudMS780 = dataToCloud(RGB, MS780, 0)
MS780rad = pcloudMS780[:, :, 2]
MSradlist = [MS480rad, MS520rad, MS550rad, MS670rad, MS700rad, MS730rad, MS780rad]
return MSradlist
def enhance(MS670rad, MS550rad, MS480rad):
cat = np.concatenate((MS670rad, MS550rad, MS480rad), axis=1) #AXIS CHANGED FROM 2 TO 1
img = cat.astype(np.uint8)
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
return exposure.rescale_intensity(img, in_range=(p2, p98))
def bdrf_correction():
pass
def calc_3d_correction():
pass
def radians_to_reflectance(Rad3d_corr480, Rad3d_corr520, Rad3d_corr550, Rad3d_corr670, Rad3d_corr700, Rad3d_corr730, Rad3d_corr780 ):
Gain = [0.0428, 0.0301, 0.0179, 0.0056, 0.0089, 0.0083, 0.0074];
Ref3d_corr480 = Rad3d_corr480 * Gain(0);
Ref3d_corr520 = Rad3d_corr520 * Gain(1);
Ref3d_corr550 = Rad3d_corr550 * Gain(2);
Ref3d_corr670 = Rad3d_corr670 * Gain(3);
Ref3d_corr700 = Rad3d_corr700 * Gain(4);
Ref3d_corr730 = Rad3d_corr730 * Gain(5);
Ref3d_corr780 = Rad3d_corr780 * Gain(6);
pass
def ver_3d_corrected_brdf():
#Verification of 3D corrected based BRDF
pass
def rotate_bound(image, angle):
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv2.warpAffine(image, M, (nW, nH))
def add_image(img1, img2, x_center, y_center, x_scale, y_scale, angle):
img2 = img2.resize((int(x_scale * img2.size[0]), int(y_scale * img2.size[1])), resample=Image.BICUBIC) # Image.ANTIALIAS
img2 = img2.rotate(angle, resample=Image.BICUBIC, expand=True)
rows, cols, channels = np.asarray(img2).shape
x_from = x_center - math.floor(cols / 2.)
y_from = y_center - math.floor(rows / 2.)
img1.paste(img2, (x_from, y_from), img2)
# tmp_mask = image_to_mask(img2)
# tmp_mask = Image.fromarray(tmp_mask)
# img1.paste(img2, (x_from, y_from), tmp_mask)
return img1
def image_to_mask(image):
# AZ TODO: check if OK when image is 2D (grayscale)
img_sum = np.sum(image, axis=-1)
mask = img_sum > 0
#se = scipy.ndimage.generate_binary_structure(2, 1)
#mask = scipy.ndimage.binary_erosion(mask, structure=se, iterations = 2)
return mask
def mask_to_image(mask):
x, y, z = mask.shape
image = np.zeros((x, y), dtype=np.uint8)
for i in range(0, z):
mask_color = int(((i + 1) / z) * 255)
image += mask[:, :, i] * np.cast[np.uint8](mask_color)
return image
def add_image_without_transparency(img1, img2, x_center, y_center, x_scale, y_scale, angle):
img2 = cv2.resize(img2, None, fx=x_scale, fy=y_scale, interpolation=cv2.INTER_CUBIC)
img2 = rotate_bound(img2, 360 - angle)
rows, cols, channels = img2.shape
x_from = x_center - math.floor(cols / 2.)
x_to = x_center + math.ceil(cols / 2.)
y_from = y_center - math.floor(rows / 2.)
y_to = y_center + math.ceil(rows / 2.)
y_max, x_max, _ = img1.shape
if x_from < 0:
img2 = img2[:, -x_from:]
x_from = 0
if x_to >= x_max:
img2 = img2[:, :-(x_to - x_max + 1)]
x_to = x_max - 1
if y_from < 0:
img2 = img2[-y_from:, :]
y_from = 0
if y_to >= y_max:
img2 = img2[:-(y_to - y_max + 1), :]
y_to = y_max - 1
roi = img1[y_from:y_to, x_from:x_to]
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img2_fg = cv2.bitwise_and(img2, img2, mask=mask)
#dst = cv2.add(img1_bg, img2_fg[:, :, :]) # AZ (remove alpha)
dst = cv2.add(img1_bg, img2_fg[:, :, 0:3])
img1[y_from:y_to, x_from:x_to] = dst
return img1
| [
"math.floor",
"numpy.column_stack",
"numpy.isfinite",
"numpy.arange",
"numpy.multiply",
"cv2.threshold",
"numpy.asarray",
"numpy.max",
"numpy.unravel_index",
"numpy.concatenate",
"cv2.add",
"numpy.abs",
"cv2.warpAffine",
"skimage.exposure.rescale_intensity",
"cv2.cvtColor",
"cv2.getRot... | [((214, 240), 'numpy.asarray', 'np.asarray', (['ptCloud.points'], {}), '(ptCloud.points)\n', (224, 240), True, 'import numpy as np\n'), ((447, 466), 'numpy.arange', 'np.arange', (['(0)', 'count'], {}), '(0, count)\n', (456, 466), True, 'import numpy as np\n'), ((480, 552), 'numpy.unravel_index', 'np.unravel_index', (['indices', '(pcloud.shape[0], pcloud.shape[1])'], {'order': '"""F"""'}), "(indices, (pcloud.shape[0], pcloud.shape[1]), order='F')\n", (496, 552), True, 'import numpy as np\n'), ((571, 622), 'numpy.column_stack', 'np.column_stack', (['(u[validIndices], v[validIndices])'], {}), '((u[validIndices], v[validIndices]))\n', (586, 622), True, 'import numpy as np\n'), ((1256, 1285), 'numpy.multiply', 'np.multiply', (['MS480', 'DN2RAD[0]'], {}), '(MS480, DN2RAD[0])\n', (1267, 1285), True, 'import numpy as np\n'), ((1298, 1327), 'numpy.multiply', 'np.multiply', (['MS520', 'DN2RAD[1]'], {}), '(MS520, DN2RAD[1])\n', (1309, 1327), True, 'import numpy as np\n'), ((1340, 1369), 'numpy.multiply', 'np.multiply', (['MS550', 'DN2RAD[2]'], {}), '(MS550, DN2RAD[2])\n', (1351, 1369), True, 'import numpy as np\n'), ((1382, 1411), 'numpy.multiply', 'np.multiply', (['MS670', 'DN2RAD[3]'], {}), '(MS670, DN2RAD[3])\n', (1393, 1411), True, 'import numpy as np\n'), ((1424, 1453), 'numpy.multiply', 'np.multiply', (['MS700', 'DN2RAD[4]'], {}), '(MS700, DN2RAD[4])\n', (1435, 1453), True, 'import numpy as np\n'), ((1466, 1495), 'numpy.multiply', 'np.multiply', (['MS730', 'DN2RAD[5]'], {}), '(MS730, DN2RAD[5])\n', (1477, 1495), True, 'import numpy as np\n'), ((1508, 1537), 'numpy.multiply', 'np.multiply', (['MS780', 'DN2RAD[6]'], {}), '(MS780, DN2RAD[6])\n', (1519, 1537), True, 'import numpy as np\n'), ((1682, 1708), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS480', '(0)'], {}), '(RGB, MS480, 0)\n', (1693, 1708), False, 'from DataToCloud import dataToCloud\n'), ((1764, 1790), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS520', '(0)'], {}), '(RGB, MS520, 0)\n', (1775, 1790), False, 'from DataToCloud import dataToCloud\n'), ((1846, 1872), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS550', '(0)'], {}), '(RGB, MS550, 0)\n', (1857, 1872), False, 'from DataToCloud import dataToCloud\n'), ((1928, 1954), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS670', '(0)'], {}), '(RGB, MS670, 0)\n', (1939, 1954), False, 'from DataToCloud import dataToCloud\n'), ((2010, 2036), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS700', '(0)'], {}), '(RGB, MS700, 0)\n', (2021, 2036), False, 'from DataToCloud import dataToCloud\n'), ((2092, 2118), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS730', '(0)'], {}), '(RGB, MS730, 0)\n', (2103, 2118), False, 'from DataToCloud import dataToCloud\n'), ((2174, 2200), 'DataToCloud.dataToCloud', 'dataToCloud', (['RGB', 'MS780', '(0)'], {}), '(RGB, MS780, 0)\n', (2185, 2200), False, 'from DataToCloud import dataToCloud\n'), ((2401, 2455), 'numpy.concatenate', 'np.concatenate', (['(MS670rad, MS550rad, MS480rad)'], {'axis': '(1)'}), '((MS670rad, MS550rad, MS480rad), axis=1)\n', (2415, 2455), True, 'import numpy as np\n'), ((2553, 2580), 'numpy.percentile', 'np.percentile', (['img', '(2, 98)'], {}), '(img, (2, 98))\n', (2566, 2580), True, 'import numpy as np\n'), ((2593, 2644), 'skimage.exposure.rescale_intensity', 'exposure.rescale_intensity', (['img'], {'in_range': '(p2, p98)'}), '(img, in_range=(p2, p98))\n', (2619, 2644), False, 'from skimage import exposure\n'), ((3431, 3477), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', '(-angle)', '(1.0)'], {}), '((cX, cY), -angle, 1.0)\n', (3454, 3477), False, 'import cv2\n'), ((3488, 3503), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (3494, 3503), True, 'import numpy as np\n'), ((3514, 3529), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (3520, 3529), True, 'import numpy as np\n'), ((3674, 3708), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(nW, nH)'], {}), '(image, M, (nW, nH))\n', (3688, 3708), False, 'import cv2\n'), ((4409, 4431), 'numpy.sum', 'np.sum', (['image'], {'axis': '(-1)'}), '(image, axis=-1)\n', (4415, 4431), True, 'import numpy as np\n'), ((4667, 4699), 'numpy.zeros', 'np.zeros', (['(x, y)'], {'dtype': 'np.uint8'}), '((x, y), dtype=np.uint8)\n', (4675, 4699), True, 'import numpy as np\n'), ((4957, 5034), 'cv2.resize', 'cv2.resize', (['img2', 'None'], {'fx': 'x_scale', 'fy': 'y_scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(img2, None, fx=x_scale, fy=y_scale, interpolation=cv2.INTER_CUBIC)\n', (4967, 5034), False, 'import cv2\n'), ((5715, 5753), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_BGR2GRAY'], {}), '(img2, cv2.COLOR_BGR2GRAY)\n', (5727, 5753), False, 'import cv2\n'), ((5770, 5820), 'cv2.threshold', 'cv2.threshold', (['img2gray', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img2gray, 1, 255, cv2.THRESH_BINARY)\n', (5783, 5820), False, 'import cv2\n'), ((5836, 5857), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (5851, 5857), False, 'import cv2\n'), ((5872, 5912), 'cv2.bitwise_and', 'cv2.bitwise_and', (['roi', 'roi'], {'mask': 'mask_inv'}), '(roi, roi, mask=mask_inv)\n', (5887, 5912), False, 'import cv2\n'), ((5927, 5965), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img2', 'img2'], {'mask': 'mask'}), '(img2, img2, mask=mask)\n', (5942, 5965), False, 'import cv2\n'), ((6043, 6079), 'cv2.add', 'cv2.add', (['img1_bg', 'img2_fg[:, :, 0:3]'], {}), '(img1_bg, img2_fg[:, :, 0:3])\n', (6050, 6079), False, 'import cv2\n'), ((4003, 4019), 'numpy.asarray', 'np.asarray', (['img2'], {}), '(img2)\n', (4013, 4019), True, 'import numpy as np\n'), ((4050, 4072), 'math.floor', 'math.floor', (['(cols / 2.0)'], {}), '(cols / 2.0)\n', (4060, 4072), False, 'import math\n'), ((4096, 4118), 'math.floor', 'math.floor', (['(rows / 2.0)'], {}), '(rows / 2.0)\n', (4106, 4118), False, 'import math\n'), ((5142, 5164), 'math.floor', 'math.floor', (['(cols / 2.0)'], {}), '(cols / 2.0)\n', (5152, 5164), False, 'import math\n'), ((5186, 5207), 'math.ceil', 'math.ceil', (['(cols / 2.0)'], {}), '(cols / 2.0)\n', (5195, 5207), False, 'import math\n'), ((5231, 5253), 'math.floor', 'math.floor', (['(rows / 2.0)'], {}), '(rows / 2.0)\n', (5241, 5253), False, 'import math\n'), ((5275, 5296), 'math.ceil', 'math.ceil', (['(rows / 2.0)'], {}), '(rows / 2.0)\n', (5284, 5296), False, 'import math\n'), ((351, 383), 'numpy.isfinite', 'np.isfinite', (['ptCloudPoints[:, 2]'], {}), '(ptCloudPoints[:, 2])\n', (362, 383), True, 'import numpy as np\n'), ((397, 423), 'numpy.asarray', 'np.asarray', (['ptCloud.points'], {}), '(ptCloud.points)\n', (407, 423), True, 'import numpy as np\n'), ((281, 313), 'numpy.isfinite', 'np.isfinite', (['ptCloudPoints[:, 0]'], {}), '(ptCloudPoints[:, 0])\n', (292, 313), True, 'import numpy as np\n'), ((316, 348), 'numpy.isfinite', 'np.isfinite', (['ptCloudPoints[:, 1]'], {}), '(ptCloudPoints[:, 1])\n', (327, 348), True, 'import numpy as np\n'), ((650, 675), 'numpy.max', 'np.max', (['imagePoints[:, 0]'], {}), '(imagePoints[:, 0])\n', (656, 675), True, 'import numpy as np\n'), ((681, 706), 'numpy.max', 'np.max', (['imagePoints[:, 1]'], {}), '(imagePoints[:, 1])\n', (687, 706), True, 'import numpy as np\n')] |
import numpy as np
from scipy.interpolate import RectBivariateSpline
def TemplateCorrection(T, It1, rect, p0 = np.zeros(2)):
threshold = 0.1
x1_t, y1_t, x2_t, y2_t = rect[0], rect[1], rect[2], rect[3]
Iy, Ix = np.gradient(It1)
rows_img, cols_img = It1.shape
rows_rect, cols_rect = T.shape
dp = [[cols_img], [rows_img]]
# what can be precomputed
y = np.arange(0, rows_img, 1)
x = np.arange(0, cols_img, 1)
spline1 = RectBivariateSpline(y, x, It1)
spline_gx = RectBivariateSpline(y, x, Ix)
spline_gy = RectBivariateSpline(y, x, Iy)
jac = np.array([[1,0],[0,1]])
while np.square(dp).sum() > threshold:
x1_w, y1_w = x1_t + p0[0], y1_t + p0[1]
x2_w, y2_w = x2_t + p0[0], y2_t + p0[1]
cw = np.linspace(x1_w, x2_w, cols_rect)
rw = np.linspace(y1_w, y2_w, rows_rect)
ccw, rrw = np.meshgrid(cw, rw)
warpImg = spline1.ev(rrw, ccw)
#compute error image
err = T - warpImg
errImg = err.reshape(-1,1)
#compute gradient
Ix_w = spline_gx.ev(rrw, ccw)
Iy_w = spline_gy.ev(rrw, ccw)
#I is (n,2)
I = np.vstack((Ix_w.ravel(),Iy_w.ravel())).T
#computer Hessian
delta = I @ jac
#H is (2,2)
H = delta.T @ delta
#compute dp
#dp is (2,2)@(2,n)@(n,1) = (2,1)
dp = np.linalg.inv(H) @ (delta.T) @ errImg
#update parameters
p0[0] += dp[0,0]
p0[1] += dp[1,0]
rect[0] += p0[0]
rect[1] += p0[1]
rect[2] += p0[0]
rect[3] += p0[1]
| [
"scipy.interpolate.RectBivariateSpline",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.linalg.inv",
"numpy.meshgrid",
"numpy.gradient",
"numpy.arange"
] | [((115, 126), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (123, 126), True, 'import numpy as np\n'), ((229, 245), 'numpy.gradient', 'np.gradient', (['It1'], {}), '(It1)\n', (240, 245), True, 'import numpy as np\n'), ((401, 426), 'numpy.arange', 'np.arange', (['(0)', 'rows_img', '(1)'], {}), '(0, rows_img, 1)\n', (410, 426), True, 'import numpy as np\n'), ((436, 461), 'numpy.arange', 'np.arange', (['(0)', 'cols_img', '(1)'], {}), '(0, cols_img, 1)\n', (445, 461), True, 'import numpy as np\n'), ((477, 507), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['y', 'x', 'It1'], {}), '(y, x, It1)\n', (496, 507), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((525, 554), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['y', 'x', 'Ix'], {}), '(y, x, Ix)\n', (544, 554), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((572, 601), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['y', 'x', 'Iy'], {}), '(y, x, Iy)\n', (591, 601), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((613, 639), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (621, 639), True, 'import numpy as np\n'), ((806, 840), 'numpy.linspace', 'np.linspace', (['x1_w', 'x2_w', 'cols_rect'], {}), '(x1_w, x2_w, cols_rect)\n', (817, 840), True, 'import numpy as np\n'), ((855, 889), 'numpy.linspace', 'np.linspace', (['y1_w', 'y2_w', 'rows_rect'], {}), '(y1_w, y2_w, rows_rect)\n', (866, 889), True, 'import numpy as np\n'), ((910, 929), 'numpy.meshgrid', 'np.meshgrid', (['cw', 'rw'], {}), '(cw, rw)\n', (921, 929), True, 'import numpy as np\n'), ((654, 667), 'numpy.square', 'np.square', (['dp'], {}), '(dp)\n', (663, 667), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (1488, 1491), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Analyze how important a Unicode block is for the different languages."""
# core modules
import logging
# 3rd party modules
import click
import numpy as np
# internal modules
from lidtk.data import wili
@click.command(name='analyze-unicode-block', help=__doc__)
@click.option('--start', default=123, show_default=True)
@click.option('--end',
default=456,
show_default=True,
help='End of Unicode range')
def main(start, end):
"""Run."""
# Read data
data = wili.load_data()
logging.info("Finished loading data")
lang_amounts = {}
for paragraph, label in zip(data['x_train'], data['y_train']):
if label not in lang_amounts:
lang_amounts[label] = []
chars_in_range = 0
for char in paragraph:
if start <= ord(char) <= end:
chars_in_range += 1
amount = float(chars_in_range) / len(paragraph)
lang_amounts[label].append(amount)
for key in lang_amounts.keys():
lang_amounts[key] = np.array(lang_amounts[key]).mean() * 100
print('Label Chars in range [{} - {}]'.format(start, end))
print('-' * 80)
lang_a = sorted(lang_amounts.items(), key=lambda n: n[1], reverse=True)
for i, (label, chars_in_range) in enumerate(lang_a, start=1):
print('{:>3}. {:<10} {:>5.2f}%'
.format(i, label, chars_in_range))
| [
"click.option",
"numpy.array",
"lidtk.data.wili.load_data",
"click.command",
"logging.info"
] | [((235, 292), 'click.command', 'click.command', ([], {'name': '"""analyze-unicode-block"""', 'help': '__doc__'}), "(name='analyze-unicode-block', help=__doc__)\n", (248, 292), False, 'import click\n'), ((294, 349), 'click.option', 'click.option', (['"""--start"""'], {'default': '(123)', 'show_default': '(True)'}), "('--start', default=123, show_default=True)\n", (306, 349), False, 'import click\n'), ((351, 438), 'click.option', 'click.option', (['"""--end"""'], {'default': '(456)', 'show_default': '(True)', 'help': '"""End of Unicode range"""'}), "('--end', default=456, show_default=True, help=\n 'End of Unicode range')\n", (363, 438), False, 'import click\n'), ((540, 556), 'lidtk.data.wili.load_data', 'wili.load_data', ([], {}), '()\n', (554, 556), False, 'from lidtk.data import wili\n'), ((561, 598), 'logging.info', 'logging.info', (['"""Finished loading data"""'], {}), "('Finished loading data')\n", (573, 598), False, 'import logging\n'), ((1064, 1091), 'numpy.array', 'np.array', (['lang_amounts[key]'], {}), '(lang_amounts[key])\n', (1072, 1091), True, 'import numpy as np\n')] |
"""Main module."""
import pandas as pd
import numpy as np
import re
import os
from shipflowmotionshelpers import errors
def _load_time_series(file_path:str)->pd.DataFrame:
"""Load time series from ShipFlowMotions into a pandas data frame
Parameters
----------
file_path : str
Where is the motions file?
Returns
-------
pd.DataFrame
Pandas data frame with time as index
"""
_,ext = os.path.splitext(file_path)
if ext == '.csv':
df = _load_motions_csv(file_path=file_path)
elif ext == '.ts':
df = _load_motions_old(file_path=file_path)
else:
raise ValueError('Unknown time series file extension:%s' % ext)
df['phi1d'] = np.deg2rad(df['V4'])
df['phi2d'] = np.deg2rad(df['A4'])
return df
def _load_motions_old(file_path:str):
"""
Load time series data from ShipFlow Motions file (old format).
"""
df = pd.read_csv(file_path, sep=' +', index_col=1)
df['phi'] = np.deg2rad(df['P4'])
df['dX'] = df['V1'] # Speed in global X-direction
return df
def _load_motions_csv(file_path:str):
"""
Load time series data from ShipFlow Motions file.
"""
df = pd.read_csv(file_path, sep=',', index_col=1)
df['phi'] = np.deg2rad(df['P4'])
df['dX'] = df['V1'] # Speed in global X-direction
df['ts'] = df['Time_step']
#print(df['ts'])
return df
def _extract_parameters(s:str)->dict:
"""
The functions parses all parameters from a ShipFlow Motions indata file.
The function searches for:
x = ...
and saves all those occurences as a key value pair in a dict.
Parameters
----------
s : str
Motions indata file content as string.
Returns
----------
parameters : dict
"""
# matching: x = ....
key_value_pairs = re.findall(pattern='(\w+) *= *"*([^ ^, ^" ^ ^\n ^)]+)', string=s)
# matching x (jadadajada...) : ....
key_value_pairs_2 = re.findall(pattern='(\w+) *\([^\)]+\) *: *([^\n]+)', string=s)
# adding the key_value_pairs_2 to the list:
key_value_pairs+=key_value_pairs_2 # (This list may contain duplicates so that certain value are overwritten below)
parameters = {}
for key_value_pair in key_value_pairs:
key = key_value_pair[0]
value = key_value_pair[1]
try:
value=float(value)
except:
pass
else:
if value%1 == 0: # if no decimals...
value=int(value)
pass
parameters[key]=value
return parameters
def extract_parameters_from_file(file_path:str)->pd.Series:
"""
The functions parses all parameters from a ShipFlow Motions indata file.
The function searches for:
x = ...
and saves all those occurences as a key value pair in a dict.
Parameters
----------
file_path : str
path to Motions indata file
Returns
----------
parameters : dict
"""
with open(file_path, mode='r') as file:
s = file.read()
## Remove commented lines:
s_without_commented_lines = re.sub(pattern='\/.*\n', repl='', string=s)
parameters = _extract_parameters(s=s_without_commented_lines)
s_parameters = pd.Series(data=parameters, name=file_path)
return s_parameters
def load_parameters(file_path:str)->pd.DataFrame:
"""Load input file, output file and time series from a ShipFlow Motions simulation
The files should follow the following nameing convention:
input file name: {file_path}
output file name: {file_path}_OUTPUT
output time series: {file_path}_TS.csv or {file_path}.ts
Parameters
----------
file_path : str
file path to the input file name (the other files are assumed to follow the naming convention above)
Note! This can also be a list of paths
Returns
----------
parameters : pd.DataFrame
All parameters from input file(s) and output file(s)
"""
if isinstance(file_path,str):
file_paths=[file_path]
else:
file_paths = file_path
df_parameters = pd.DataFrame()
for file_path in file_paths:
parameters = _load_parameters(file_path=file_path)
df_parameters = df_parameters.append(parameters)
return df_parameters
def _load_parameters(file_path:str)->pd.DataFrame:
"""Load input file, output file and time series from a ShipFlow Motions simulation
The files should follow the following nameing convention:
input file name: {file_path}
output file name: {file_path}_OUTPUT
output time series: {file_path}_TS.csv or {file_path}.ts
Parameters
----------
file_path : str
file path to the input file name (the other files are assumed to follow the naming convention above)
Returns
----------
parameters : pd.DataFrame
All parameters from input file(s) and output file(s)
"""
## Input parameter file:
file_path_indata = file_path
parameters_in = extract_parameters_from_file(file_path = file_path_indata)
## Output parameter file:
file_path_output = '%s_OUTPUT' % file_path
parameters_out = extract_parameters_from_file(file_path = file_path_output)
## Joining Input and Output parameters:
data = dict(parameters_in)
data.update(dict(parameters_out)) # overwriting duplicates
_,name = os.path.split(file_path)
parameters = pd.Series(data =data, name=name)
parameters['file_path_ts'] = os.path.abspath('%s_TS.csv' % file_path)
return parameters
def load_time_series(df_parameters:pd.DataFrame):
"""Load all time series associated with the file sin the df_parameters.
Parameters
----------
df_parameters : pd.DataFrame
Data fram with input and output parameters and with the column "file_path_ts" so that the time series can be found
"""
if not 'file_path_ts' in df_parameters:
raise errors.TimeSeriesFilePathError('df_parameters must contain the column "file_path_ts"')
time_series = {}
for name, parameters in df_parameters.iterrows():
file_path = parameters['file_path_ts']
time_series[name] = _load_time_series(file_path=file_path)
return time_series
| [
"pandas.Series",
"pandas.read_csv",
"shipflowmotionshelpers.errors.TimeSeriesFilePathError",
"os.path.splitext",
"os.path.split",
"numpy.deg2rad",
"pandas.DataFrame",
"re.sub",
"re.findall",
"os.path.abspath"
] | [((437, 464), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (453, 464), False, 'import os\n'), ((715, 735), 'numpy.deg2rad', 'np.deg2rad', (["df['V4']"], {}), "(df['V4'])\n", (725, 735), True, 'import numpy as np\n'), ((754, 774), 'numpy.deg2rad', 'np.deg2rad', (["df['A4']"], {}), "(df['A4'])\n", (764, 774), True, 'import numpy as np\n'), ((930, 975), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'sep': '""" +"""', 'index_col': '(1)'}), "(file_path, sep=' +', index_col=1)\n", (941, 975), True, 'import pandas as pd\n'), ((992, 1012), 'numpy.deg2rad', 'np.deg2rad', (["df['P4']"], {}), "(df['P4'])\n", (1002, 1012), True, 'import numpy as np\n'), ((1205, 1249), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'sep': '""","""', 'index_col': '(1)'}), "(file_path, sep=',', index_col=1)\n", (1216, 1249), True, 'import pandas as pd\n'), ((1266, 1286), 'numpy.deg2rad', 'np.deg2rad', (["df['P4']"], {}), "(df['P4'])\n", (1276, 1286), True, 'import numpy as np\n'), ((1849, 1918), 're.findall', 're.findall', ([], {'pattern': '"""(\\\\w+) *= *"*([^ ^, ^" ^ ^\n ^)]+)"""', 'string': 's'}), '(pattern="""(\\\\w+) *= *"*([^ ^, ^" ^ ^\n ^)]+)""", string=s)\n', (1859, 1918), False, 'import re\n'), ((1985, 2054), 're.findall', 're.findall', ([], {'pattern': '"""(\\\\w+) *\\\\([^\\\\)]+\\\\) *: *([^\n]+)"""', 'string': 's'}), '(pattern="""(\\\\w+) *\\\\([^\\\\)]+\\\\) *: *([^\n]+)""", string=s)\n', (1995, 2054), False, 'import re\n'), ((3168, 3212), 're.sub', 're.sub', ([], {'pattern': '"""\\\\/.*\n"""', 'repl': '""""""', 'string': 's'}), "(pattern='\\\\/.*\\n', repl='', string=s)\n", (3174, 3212), False, 'import re\n'), ((3307, 3349), 'pandas.Series', 'pd.Series', ([], {'data': 'parameters', 'name': 'file_path'}), '(data=parameters, name=file_path)\n', (3316, 3349), True, 'import pandas as pd\n'), ((4197, 4211), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4209, 4211), True, 'import pandas as pd\n'), ((5487, 5511), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (5500, 5511), False, 'import os\n'), ((5534, 5565), 'pandas.Series', 'pd.Series', ([], {'data': 'data', 'name': 'name'}), '(data=data, name=name)\n', (5543, 5565), True, 'import pandas as pd\n'), ((5601, 5641), 'os.path.abspath', 'os.path.abspath', (["('%s_TS.csv' % file_path)"], {}), "('%s_TS.csv' % file_path)\n", (5616, 5641), False, 'import os\n'), ((6054, 6145), 'shipflowmotionshelpers.errors.TimeSeriesFilePathError', 'errors.TimeSeriesFilePathError', (['"""df_parameters must contain the column "file_path_ts\\""""'], {}), '(\n \'df_parameters must contain the column "file_path_ts"\')\n', (6084, 6145), False, 'from shipflowmotionshelpers import errors\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import numpy as np
import common
def load_json(filename):
with open(filename, "r") as f:
return json.load(f)
def load_data(filename):
j = load_json(filename)
return np.array(j["h_max"]), np.array(j["L2Error"]), np.array(j["H1Error"])
def convg(dir_sol, dir_fig, show_plot, save_plot):
fn1 = dir_sol+'convergence_lShaped_singular_interpolant_qu.json'
fn2 = dir_sol+'convergence_lShaped_singular_interpolant_br.json'
hMax, L2err1, H1err1 = load_data(fn1)
hMax, L2err2, H1err2 = load_data(fn2)
err1 = np.sqrt(L2err1**2 + H1err1**2)
err2 = np.sqrt(L2err2**2 + H1err2**2)
print('Error Qu: ', err1)
print('Error Br: ', err2)
# linear fitting
m = hMax.size
linfit1 = np.polyfit(np.log(hMax[m-3:m]), np.log(err1[m-3:m]), 1)
linfit2 = np.polyfit(np.log(hMax[m-3:m]), np.log(err2[m-3:m]), 1)
ref1 = np.exp(np.polyval(linfit1, np.log(hMax))+0.5)
ref2 = np.exp(np.polyval(linfit2, np.log(hMax))-0.5)
slope1 = linfit1[0]
slope2 = linfit2[0]
myPlotDict = {}
myPlotDict['show_plot'] = show_plot
myPlotDict['save_plot'] = save_plot
# plot error vs mesh size
myPlotDict['xlabel'] = r'Meshsize $h$ [log]'
myPlotDict['legend_loc'] = 'upper left'
myPlotDict['data_markers'] = ['bs-', 'ro-']
myPlotDict['data_labels'] = ['qu', 'br']
myPlotDict['ylim'] = []
myPlotDict['ref_data_markers'] = ['b--', 'r-.']
myPlotDict['title'] = ''
myPlotDict['ylabel'] = r'Error in $H^{1}(\mathcal{D})$-norm [log]'
myPlotDict['out_filename'] = dir_fig+'convg_lShaped_singular_interpolant.pdf'
myPlotDict['xlim'] = [8E-3, 5E-1]
# myPlotDict['ylim'] = [1E-4, 5]
myPlotDict['ref_data_labels'] = ['$O(h^{%2.2f})$'%slope1, '$O(h^{%2.2f})$'%slope2]
common.plotLogLogData([hMax, hMax], [err1, err2], [ref1, ref2], myPlotDict)
if __name__ == "__main__":
show_plot = True
save_plot = True
dir_sol = '../../output/lShaped_interpolant/'
dir_fig = '../../figures/lShaped_interpolant/'
convg(dir_sol, dir_fig, show_plot, save_plot)
# End of file
| [
"numpy.sqrt",
"numpy.log",
"numpy.array",
"common.plotLogLogData",
"json.load"
] | [((609, 643), 'numpy.sqrt', 'np.sqrt', (['(L2err1 ** 2 + H1err1 ** 2)'], {}), '(L2err1 ** 2 + H1err1 ** 2)\n', (616, 643), True, 'import numpy as np\n'), ((651, 685), 'numpy.sqrt', 'np.sqrt', (['(L2err2 ** 2 + H1err2 ** 2)'], {}), '(L2err2 ** 2 + H1err2 ** 2)\n', (658, 685), True, 'import numpy as np\n'), ((1847, 1922), 'common.plotLogLogData', 'common.plotLogLogData', (['[hMax, hMax]', '[err1, err2]', '[ref1, ref2]', 'myPlotDict'], {}), '([hMax, hMax], [err1, err2], [ref1, ref2], myPlotDict)\n', (1868, 1922), False, 'import common\n'), ((170, 182), 'json.load', 'json.load', (['f'], {}), '(f)\n', (179, 182), False, 'import json\n'), ((248, 268), 'numpy.array', 'np.array', (["j['h_max']"], {}), "(j['h_max'])\n", (256, 268), True, 'import numpy as np\n'), ((270, 292), 'numpy.array', 'np.array', (["j['L2Error']"], {}), "(j['L2Error'])\n", (278, 292), True, 'import numpy as np\n'), ((294, 316), 'numpy.array', 'np.array', (["j['H1Error']"], {}), "(j['H1Error'])\n", (302, 316), True, 'import numpy as np\n'), ((811, 832), 'numpy.log', 'np.log', (['hMax[m - 3:m]'], {}), '(hMax[m - 3:m])\n', (817, 832), True, 'import numpy as np\n'), ((832, 853), 'numpy.log', 'np.log', (['err1[m - 3:m]'], {}), '(err1[m - 3:m])\n', (838, 853), True, 'import numpy as np\n'), ((881, 902), 'numpy.log', 'np.log', (['hMax[m - 3:m]'], {}), '(hMax[m - 3:m])\n', (887, 902), True, 'import numpy as np\n'), ((902, 923), 'numpy.log', 'np.log', (['err2[m - 3:m]'], {}), '(err2[m - 3:m])\n', (908, 923), True, 'import numpy as np\n'), ((964, 976), 'numpy.log', 'np.log', (['hMax'], {}), '(hMax)\n', (970, 976), True, 'import numpy as np\n'), ((1021, 1033), 'numpy.log', 'np.log', (['hMax'], {}), '(hMax)\n', (1027, 1033), True, 'import numpy as np\n')] |
import gym
import pybullet_envs
import numpy as np
from ppo.agent import Agent
if __name__ == '__main__':
env = gym.make('AntBulletEnv-v0')
learn_interval = 100
batch_size = 5000
n_epochs = 1000
learning_rate = 0.0003
observation_space = env.observation_space.shape[0]
action_space = env.action_space.shape[0]
agent = Agent(n_actions=action_space, batch_size=batch_size,
learning_rate=learning_rate, n_epochs=n_epochs, input_dims=observation_space)
n_games = 300
best_score = env.reward_range[0]
score_history = []
learn_iters = 0
avg_score = 0
n_steps = 0
for i in range(n_games):
observation = env.reset()
done = False
score = 0
while not done:
action, prob, val = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
n_steps += 1
score += reward
agent.push(observation, action, prob, val, reward, done)
if n_steps % learn_interval == 0:
agent.learn()
observation = observation_
score_history.append(score)
avg_score = np.mean(score_history[-100:])
if avg_score > best_score:
best_score = avg_score
print(f'Episode: {i} / Score: {score} / AVG Score (100): {avg_score}') | [
"numpy.mean",
"ppo.agent.Agent",
"gym.make"
] | [((119, 146), 'gym.make', 'gym.make', (['"""AntBulletEnv-v0"""'], {}), "('AntBulletEnv-v0')\n", (127, 146), False, 'import gym\n'), ((354, 489), 'ppo.agent.Agent', 'Agent', ([], {'n_actions': 'action_space', 'batch_size': 'batch_size', 'learning_rate': 'learning_rate', 'n_epochs': 'n_epochs', 'input_dims': 'observation_space'}), '(n_actions=action_space, batch_size=batch_size, learning_rate=\n learning_rate, n_epochs=n_epochs, input_dims=observation_space)\n', (359, 489), False, 'from ppo.agent import Agent\n'), ((1187, 1216), 'numpy.mean', 'np.mean', (['score_history[-100:]'], {}), '(score_history[-100:])\n', (1194, 1216), True, 'import numpy as np\n')] |
from kid_readout.analysis.timeseries import fftfilt, decimating_fir
import numpy as np
def test_decimating_fir():
np.random.seed(123)
x = np.random.randn(2**16) + 1j*np.random.randn(2**16)
dfir = decimating_fir.DecimatingFIR(downsample_factor=16,num_taps=1024)
gold = fftfilt.fftfilt(dfir.coefficients.ravel(),x)[15::16]
result = dfir.process(x)
assert np.allclose(gold,result)
test_decimating_fir.__test__ = False #disable for now until we have a chance to debug
def test_fir1d_history():
np.random.seed(123)
coeff = np.random.randn(16)
data = np.random.randn(2**10)
fir1 = decimating_fir.FIR1D(coeff)
fir2 = decimating_fir.FIR1D(coeff)
full = fir1.apply(data)
compare = np.empty_like(full)
part1 = fir2.apply(data[:len(data)//2])
part2 = fir2.apply(data[len(data)//2:])
compare[:len(part1)] = part1
compare[len(part1):] = part2
assert np.allclose(full,compare)
def test_fir1d_nstream():
np.random.seed(123)
coeff = np.random.randn(16)
data = np.random.randn(2,2**10)
fir1 = decimating_fir.FIR1D(coeff)
fir2 = decimating_fir.FIR1D(coeff)
full = fir1.apply(data)
full = fir1.apply(data) #apply twice to excercise history
stream1 = fir2.apply(data[0,:])
stream1 = fir2.apply(data[0,:])
assert np.allclose(stream1,full[0,:]) | [
"kid_readout.analysis.timeseries.decimating_fir.FIR1D",
"numpy.allclose",
"kid_readout.analysis.timeseries.decimating_fir.DecimatingFIR",
"numpy.empty_like",
"numpy.random.seed",
"numpy.random.randn"
] | [((119, 138), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (133, 138), True, 'import numpy as np\n'), ((209, 274), 'kid_readout.analysis.timeseries.decimating_fir.DecimatingFIR', 'decimating_fir.DecimatingFIR', ([], {'downsample_factor': '(16)', 'num_taps': '(1024)'}), '(downsample_factor=16, num_taps=1024)\n', (237, 274), False, 'from kid_readout.analysis.timeseries import fftfilt, decimating_fir\n'), ((378, 403), 'numpy.allclose', 'np.allclose', (['gold', 'result'], {}), '(gold, result)\n', (389, 403), True, 'import numpy as np\n'), ((521, 540), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (535, 540), True, 'import numpy as np\n'), ((553, 572), 'numpy.random.randn', 'np.random.randn', (['(16)'], {}), '(16)\n', (568, 572), True, 'import numpy as np\n'), ((584, 608), 'numpy.random.randn', 'np.random.randn', (['(2 ** 10)'], {}), '(2 ** 10)\n', (599, 608), True, 'import numpy as np\n'), ((618, 645), 'kid_readout.analysis.timeseries.decimating_fir.FIR1D', 'decimating_fir.FIR1D', (['coeff'], {}), '(coeff)\n', (638, 645), False, 'from kid_readout.analysis.timeseries import fftfilt, decimating_fir\n'), ((657, 684), 'kid_readout.analysis.timeseries.decimating_fir.FIR1D', 'decimating_fir.FIR1D', (['coeff'], {}), '(coeff)\n', (677, 684), False, 'from kid_readout.analysis.timeseries import fftfilt, decimating_fir\n'), ((727, 746), 'numpy.empty_like', 'np.empty_like', (['full'], {}), '(full)\n', (740, 746), True, 'import numpy as np\n'), ((912, 938), 'numpy.allclose', 'np.allclose', (['full', 'compare'], {}), '(full, compare)\n', (923, 938), True, 'import numpy as np\n'), ((969, 988), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (983, 988), True, 'import numpy as np\n'), ((1001, 1020), 'numpy.random.randn', 'np.random.randn', (['(16)'], {}), '(16)\n', (1016, 1020), True, 'import numpy as np\n'), ((1032, 1059), 'numpy.random.randn', 'np.random.randn', (['(2)', '(2 ** 10)'], {}), '(2, 2 ** 10)\n', (1047, 1059), True, 'import numpy as np\n'), ((1068, 1095), 'kid_readout.analysis.timeseries.decimating_fir.FIR1D', 'decimating_fir.FIR1D', (['coeff'], {}), '(coeff)\n', (1088, 1095), False, 'from kid_readout.analysis.timeseries import fftfilt, decimating_fir\n'), ((1107, 1134), 'kid_readout.analysis.timeseries.decimating_fir.FIR1D', 'decimating_fir.FIR1D', (['coeff'], {}), '(coeff)\n', (1127, 1134), False, 'from kid_readout.analysis.timeseries import fftfilt, decimating_fir\n'), ((1310, 1342), 'numpy.allclose', 'np.allclose', (['stream1', 'full[0, :]'], {}), '(stream1, full[0, :])\n', (1321, 1342), True, 'import numpy as np\n'), ((147, 171), 'numpy.random.randn', 'np.random.randn', (['(2 ** 16)'], {}), '(2 ** 16)\n', (162, 171), True, 'import numpy as np\n'), ((175, 199), 'numpy.random.randn', 'np.random.randn', (['(2 ** 16)'], {}), '(2 ** 16)\n', (190, 199), True, 'import numpy as np\n')] |
#coding:utf-8
# 感知器 y = f(Wn * x + b)
# 代码实现的是一个逻辑AND操作,输入最后一项一直为1,代表我们可以理解偏置项b的特征值输入一直为1
# 这样就是 y = f(Wn+1*[x,1]), Wn+1就是b
# https://www.zybuluo.com/hanbingtao/note/433855
from numpy import array, dot, random
from random import choice
def fun_1_or_0(x): return 0 if x < 0 else 1
training_data = [(array([0, 0, 1]), 0), (array([0, 1, 1]), 0),
(array([1, 0, 1]), 0), (array([1, 1, 1]), 1)]
weights = random.random(3)
print("before traning, weights:",weights)
learning_rate = 0.2
num_iteratios = 100
for i in range(num_iteratios):
input, truth = choice(training_data)
result = dot(weights, input)
error = truth - fun_1_or_0(result)
weights += learning_rate * error * input
print("after traning, weights:",weights)
for x, _ in training_data:
result = dot(x, weights)
print("{}:{}->{}".format(x[:2], result, fun_1_or_0(result)))
| [
"numpy.random.random",
"numpy.array",
"numpy.dot",
"random.choice"
] | [((425, 441), 'numpy.random.random', 'random.random', (['(3)'], {}), '(3)\n', (438, 441), False, 'from numpy import array, dot, random\n'), ((575, 596), 'random.choice', 'choice', (['training_data'], {}), '(training_data)\n', (581, 596), False, 'from random import choice\n'), ((610, 629), 'numpy.dot', 'dot', (['weights', 'input'], {}), '(weights, input)\n', (613, 629), False, 'from numpy import array, dot, random\n'), ((802, 817), 'numpy.dot', 'dot', (['x', 'weights'], {}), '(x, weights)\n', (805, 817), False, 'from numpy import array, dot, random\n'), ((306, 322), 'numpy.array', 'array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (311, 322), False, 'from numpy import array, dot, random\n'), ((329, 345), 'numpy.array', 'array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (334, 345), False, 'from numpy import array, dot, random\n'), ((369, 385), 'numpy.array', 'array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (374, 385), False, 'from numpy import array, dot, random\n'), ((392, 408), 'numpy.array', 'array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (397, 408), False, 'from numpy import array, dot, random\n')] |
import numpy as np
import torch
import torch.nn as nn
import torchvision
import pandas as pd
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn import metrics
import torchvision.transforms as transforms
def get_target_label_idx(labels, targets):
return np.argwhere(np.isin(labels, targets)).flatten().tolist()
def global_contrast_normalization(x: torch.tensor, scale='l2'):
n_features = int(np.prod(x.shape))
mean = torch.mean(x)
x -= mean
if scale == 'l1':
x_scale = torch.mean(torch.abs(x))
if scale == 'l2':
x_scale = torch.sqrt(torch.sum(x ** 2)) / n_features
x /= x_scale
return x
def OneClass(train_dataset,test_dataset,Class):
Samples = []
for i in train_dataset:
x,y = i
if y == Class:
Samples.append(x)
len(Samples)
labels = []
test_points = []
bp = 0
counter = 0
for i in test_dataset:
x,y = i
if y == Class:
bp+=1
LBL = 0
labels.append(LBL)
test_points.append(x)
elif y!=Class:
counter+=1
LBL = 1
labels.append(LBL)
test_points.append(x)
return Samples,test_points,labels
| [
"numpy.prod",
"torch.abs",
"torch.mean",
"numpy.isin",
"torch.sum"
] | [((461, 474), 'torch.mean', 'torch.mean', (['x'], {}), '(x)\n', (471, 474), False, 'import torch\n'), ((431, 447), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (438, 447), True, 'import numpy as np\n'), ((541, 553), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (550, 553), False, 'import torch\n'), ((607, 624), 'torch.sum', 'torch.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (616, 624), False, 'import torch\n'), ((298, 322), 'numpy.isin', 'np.isin', (['labels', 'targets'], {}), '(labels, targets)\n', (305, 322), True, 'import numpy as np\n')] |
"""
===================================
Pose Coupling of Dual Cartesian DMP
===================================
A dual Cartesian DMP is learned from an artificially generated demonstration
and replayed with and without a coupling of the pose of the two end effectors.
The red line indicates the DMP without coupling term and the orange line marks
the DMP with coupling term.
"""
print(__doc__)
import warnings
import numpy as np
import matplotlib.pyplot as plt
from movement_primitives.dmp import DualCartesianDMP, CouplingTermDualCartesianPose
import pytransform3d.rotations as pr
import pytransform3d.transformations as pt
import pytransform3d.trajectories as ptr
from pytransform3d.urdf import UrdfTransformManager
from movement_primitives.testing.simulation import SimulationMockup
dt = 0.01
int_dt = 0.001
execution_time = 1.0
desired_distance = np.array([ # right arm to left arm
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, -1.2],
[0.0, 0.0, 0.0, 1.0]
])
desired_distance[:3, :3] = pr.matrix_from_compact_axis_angle([np.deg2rad(180), 0, 0])
ct = CouplingTermDualCartesianPose(
desired_distance=desired_distance, couple_position=True, couple_orientation=True,
lf=(1.0, 0.0), k=1, c1=0.1, c2=1000) # c2=10000 in simulation
rh5 = SimulationMockup(dt=dt)
Y = np.zeros((1001, 14))
T = np.linspace(0, 1, len(Y))
sigmoid = 0.5 * (np.tanh(1.5 * np.pi * (T - 0.5)) + 1.0)
radius = 0.5
circle1 = radius * np.cos(np.deg2rad(90) + np.deg2rad(90) * sigmoid)
circle2 = radius * np.sin(np.deg2rad(90) + np.deg2rad(90) * sigmoid)
Y[:, 0] = circle1
Y[:, 1] = 0.55
Y[:, 2] = circle2
R_three_fingers_front = pr.matrix_from_axis_angle([0, 0, 1, 0.5 * np.pi])
R_to_center_start = pr.matrix_from_axis_angle([1, 0, 0, np.deg2rad(0)])
# introduces coupling error (default goal: -90; error at: -110)
R_to_center_end = pr.matrix_from_axis_angle([1, 0, 0, np.deg2rad(-110)])
q_start = pr.quaternion_from_matrix(R_three_fingers_front.dot(R_to_center_start))
q_end = -pr.quaternion_from_matrix(R_three_fingers_front.dot(R_to_center_end))
for i, t in enumerate(T):
Y[i, 3:7] = pr.quaternion_slerp(q_start, q_end, t)
circle1 = radius * np.cos(np.deg2rad(270) + np.deg2rad(90) * sigmoid)
circle2 = radius * np.sin(np.deg2rad(270) + np.deg2rad(90) * sigmoid)
Y[:, 7] = circle1
Y[:, 8] = 0.55
Y[:, 9] = circle2
R_three_fingers_front = pr.matrix_from_axis_angle([0, 0, 1, 0.5 * np.pi])
R_to_center_start = pr.matrix_from_axis_angle([1, 0, 0, np.deg2rad(-180)])
R_to_center_end = pr.matrix_from_axis_angle([1, 0, 0, np.deg2rad(-270)])
q_start = pr.quaternion_from_matrix(R_three_fingers_front.dot(R_to_center_start))
q_end = pr.quaternion_from_matrix(R_three_fingers_front.dot(R_to_center_end))
for i, t in enumerate(T):
Y[i, 10:] = pr.quaternion_slerp(q_start, q_end, t)
tm = UrdfTransformManager()
try:
with open("abstract-urdf-gripper/urdf/GripperLeft.urdf", "r") as f:
tm.load_urdf(f, mesh_path="abstract-urdf-gripper/urdf/")
with open("abstract-urdf-gripper/urdf/GripperRight.urdf", "r") as f:
tm.load_urdf(f, mesh_path="abstract-urdf-gripper/urdf/")
except FileNotFoundError:
warnings.warn("URDF not found")
tm.add_transform("ALWristPitch_Link", "base", np.eye(4))
tm.add_transform("ARWristPitch_Link", "base", np.eye(4))
dmp = DualCartesianDMP(
execution_time=execution_time, dt=dt,
n_weights_per_dim=10, int_dt=int_dt, p_gain=0.0)
dmp.imitate(T, Y)
ax = pr.plot_basis(ax_s=0.8, s=0.1)
for coupling_term, color in [(ct, "orange"), (None, "red")]:
dmp.reset()
rh5.goto_ee_state(Y[0])
desired_positions, positions, desired_velocities, velocities = \
rh5.step_through_cartesian(dmp, Y[0], np.zeros(12), execution_time,
coupling_term=coupling_term)
P = np.asarray(positions)
V = np.asarray(velocities)
ptr.plot_trajectory(ax=ax, P=P[:, :7], s=0.05, color=color, show_direction=False)
ptr.plot_trajectory(ax=ax, P=P[:, 7:], s=0.05, color=color, show_direction=False)
for t in range(0, len(P), 50):
gripper_left2base = pt.transform_from_pq(P[t, :7])
gripper_right2base = pt.transform_from_pq(P[t, 7:])
tm.add_transform("ALWristPitch_Link", "base", gripper_left2base)
tm.add_transform("ARWristPitch_Link", "base", gripper_right2base)
ax = tm.plot_visuals(frame="base", ax=ax)
ax.view_init(elev=0, azim=90)
plt.show()
| [
"pytransform3d.rotations.quaternion_slerp",
"numpy.eye",
"pytransform3d.rotations.plot_basis",
"movement_primitives.dmp.DualCartesianDMP",
"numpy.asarray",
"numpy.tanh",
"pytransform3d.transformations.transform_from_pq",
"numpy.array",
"numpy.zeros",
"numpy.deg2rad",
"movement_primitives.dmp.Cou... | [((858, 961), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.2], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.2],\n [0.0, 0.0, 0.0, 1.0]])\n', (866, 961), True, 'import numpy as np\n'), ((1092, 1249), 'movement_primitives.dmp.CouplingTermDualCartesianPose', 'CouplingTermDualCartesianPose', ([], {'desired_distance': 'desired_distance', 'couple_position': '(True)', 'couple_orientation': '(True)', 'lf': '(1.0, 0.0)', 'k': '(1)', 'c1': '(0.1)', 'c2': '(1000)'}), '(desired_distance=desired_distance,\n couple_position=True, couple_orientation=True, lf=(1.0, 0.0), k=1, c1=\n 0.1, c2=1000)\n', (1121, 1249), False, 'from movement_primitives.dmp import DualCartesianDMP, CouplingTermDualCartesianPose\n'), ((1283, 1306), 'movement_primitives.testing.simulation.SimulationMockup', 'SimulationMockup', ([], {'dt': 'dt'}), '(dt=dt)\n', (1299, 1306), False, 'from movement_primitives.testing.simulation import SimulationMockup\n'), ((1312, 1332), 'numpy.zeros', 'np.zeros', (['(1001, 14)'], {}), '((1001, 14))\n', (1320, 1332), True, 'import numpy as np\n'), ((1647, 1696), 'pytransform3d.rotations.matrix_from_axis_angle', 'pr.matrix_from_axis_angle', (['[0, 0, 1, 0.5 * np.pi]'], {}), '([0, 0, 1, 0.5 * np.pi])\n', (1672, 1696), True, 'import pytransform3d.rotations as pr\n'), ((2364, 2413), 'pytransform3d.rotations.matrix_from_axis_angle', 'pr.matrix_from_axis_angle', (['[0, 0, 1, 0.5 * np.pi]'], {}), '([0, 0, 1, 0.5 * np.pi])\n', (2389, 2413), True, 'import pytransform3d.rotations as pr\n'), ((2809, 2831), 'pytransform3d.urdf.UrdfTransformManager', 'UrdfTransformManager', ([], {}), '()\n', (2829, 2831), False, 'from pytransform3d.urdf import UrdfTransformManager\n'), ((3296, 3403), 'movement_primitives.dmp.DualCartesianDMP', 'DualCartesianDMP', ([], {'execution_time': 'execution_time', 'dt': 'dt', 'n_weights_per_dim': '(10)', 'int_dt': 'int_dt', 'p_gain': '(0.0)'}), '(execution_time=execution_time, dt=dt, n_weights_per_dim=10,\n int_dt=int_dt, p_gain=0.0)\n', (3312, 3403), False, 'from movement_primitives.dmp import DualCartesianDMP, CouplingTermDualCartesianPose\n'), ((3432, 3462), 'pytransform3d.rotations.plot_basis', 'pr.plot_basis', ([], {'ax_s': '(0.8)', 's': '(0.1)'}), '(ax_s=0.8, s=0.1)\n', (3445, 3462), True, 'import pytransform3d.rotations as pr\n'), ((4397, 4407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4405, 4407), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2147), 'pytransform3d.rotations.quaternion_slerp', 'pr.quaternion_slerp', (['q_start', 'q_end', 't'], {}), '(q_start, q_end, t)\n', (2128, 2147), True, 'import pytransform3d.rotations as pr\n'), ((2764, 2802), 'pytransform3d.rotations.quaternion_slerp', 'pr.quaternion_slerp', (['q_start', 'q_end', 't'], {}), '(q_start, q_end, t)\n', (2783, 2802), True, 'import pytransform3d.rotations as pr\n'), ((3221, 3230), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3227, 3230), True, 'import numpy as np\n'), ((3278, 3287), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3284, 3287), True, 'import numpy as np\n'), ((3786, 3807), 'numpy.asarray', 'np.asarray', (['positions'], {}), '(positions)\n', (3796, 3807), True, 'import numpy as np\n'), ((3816, 3838), 'numpy.asarray', 'np.asarray', (['velocities'], {}), '(velocities)\n', (3826, 3838), True, 'import numpy as np\n'), ((3844, 3930), 'pytransform3d.trajectories.plot_trajectory', 'ptr.plot_trajectory', ([], {'ax': 'ax', 'P': 'P[:, :7]', 's': '(0.05)', 'color': 'color', 'show_direction': '(False)'}), '(ax=ax, P=P[:, :7], s=0.05, color=color, show_direction=\n False)\n', (3863, 3930), True, 'import pytransform3d.trajectories as ptr\n'), ((3930, 4016), 'pytransform3d.trajectories.plot_trajectory', 'ptr.plot_trajectory', ([], {'ax': 'ax', 'P': 'P[:, 7:]', 's': '(0.05)', 'color': 'color', 'show_direction': '(False)'}), '(ax=ax, P=P[:, 7:], s=0.05, color=color, show_direction=\n False)\n', (3949, 4016), True, 'import pytransform3d.trajectories as ptr\n'), ((1063, 1078), 'numpy.deg2rad', 'np.deg2rad', (['(180)'], {}), '(180)\n', (1073, 1078), True, 'import numpy as np\n'), ((1380, 1412), 'numpy.tanh', 'np.tanh', (['(1.5 * np.pi * (T - 0.5))'], {}), '(1.5 * np.pi * (T - 0.5))\n', (1387, 1412), True, 'import numpy as np\n'), ((1753, 1766), 'numpy.deg2rad', 'np.deg2rad', (['(0)'], {}), '(0)\n', (1763, 1766), True, 'import numpy as np\n'), ((1887, 1903), 'numpy.deg2rad', 'np.deg2rad', (['(-110)'], {}), '(-110)\n', (1897, 1903), True, 'import numpy as np\n'), ((2470, 2486), 'numpy.deg2rad', 'np.deg2rad', (['(-180)'], {}), '(-180)\n', (2480, 2486), True, 'import numpy as np\n'), ((2543, 2559), 'numpy.deg2rad', 'np.deg2rad', (['(-270)'], {}), '(-270)\n', (2553, 2559), True, 'import numpy as np\n'), ((3142, 3173), 'warnings.warn', 'warnings.warn', (['"""URDF not found"""'], {}), "('URDF not found')\n", (3155, 3173), False, 'import warnings\n'), ((3684, 3696), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (3692, 3696), True, 'import numpy as np\n'), ((4075, 4105), 'pytransform3d.transformations.transform_from_pq', 'pt.transform_from_pq', (['P[t, :7]'], {}), '(P[t, :7])\n', (4095, 4105), True, 'import pytransform3d.transformations as pt\n'), ((4135, 4165), 'pytransform3d.transformations.transform_from_pq', 'pt.transform_from_pq', (['P[t, 7:]'], {}), '(P[t, 7:])\n', (4155, 4165), True, 'import pytransform3d.transformations as pt\n'), ((1460, 1474), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (1470, 1474), True, 'import numpy as np\n'), ((1529, 1543), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (1539, 1543), True, 'import numpy as np\n'), ((2175, 2190), 'numpy.deg2rad', 'np.deg2rad', (['(270)'], {}), '(270)\n', (2185, 2190), True, 'import numpy as np\n'), ((2245, 2260), 'numpy.deg2rad', 'np.deg2rad', (['(270)'], {}), '(270)\n', (2255, 2260), True, 'import numpy as np\n'), ((1477, 1491), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (1487, 1491), True, 'import numpy as np\n'), ((1546, 1560), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (1556, 1560), True, 'import numpy as np\n'), ((2193, 2207), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (2203, 2207), True, 'import numpy as np\n'), ((2263, 2277), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (2273, 2277), True, 'import numpy as np\n')] |
import numpy as np
C_BASE_SCALES = {
"cromatic": np.arange(12),
"diatonic": (0, 2, 4, 5, 7, 9, 11),
"melodic minor": (0, 2, 3, 5, 7, 9, 11),
"harmonic minor": (0, 2, 3, 5, 7, 8, 11),
}
NOTES = ("C", "Db", "D", "Eb", "E", "F", "F#", "G", "Ab", "A", "Bb", "B")
DEGREES = ("I", "II", "III", "IV", "V", "VI", "VII")
# fmt: off
INTERVALS = [
"P1", "m2", "M2", "m3", "M3", "P4", "TT", "P5", "m6", "M6", "m7", "M7", "P8"
]
# fmt: on
PITCHSET_INTERVALS = {
# Triads
"": (4, 3),
"m": (3, 4),
# 7th chords
"maj7": (4, 3, 4),
"m7": (3, 4, 3),
"7": (4, 3, 3),
"m7b5": (3, 3, 4),
"m7(maj7)": (3, 4, 4),
"maj7(#5)": (4, 4, 3),
"dim": (3, 3, 3),
# Scales
" cromatic": tuple(np.full(11, 1)),
" ionian": (2, 2, 1, 2, 2, 2),
" harmonic minor": (2, 1, 2, 2, 1, 3),
" melodic minor": (2, 1, 2, 2, 2, 2),
}
PITCHSET_NAMES = {intervals: chord for chord, intervals in PITCHSET_INTERVALS.items()}
| [
"numpy.full",
"numpy.arange"
] | [((54, 67), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (63, 67), True, 'import numpy as np\n'), ((747, 761), 'numpy.full', 'np.full', (['(11)', '(1)'], {}), '(11, 1)\n', (754, 761), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, PowerNorm, Normalize
from scipy import fftpack
def pix_intensity_hist(vals, generator, noise_vector_length, inv_transf, channel_axis, fname=None, Xterm=True, window=None, multichannel=False):
"""Plots a histogram of pixel intensities for validation set and generated samples"""
num = len(vals)
samples = generator.predict(np.random.normal(size=(num,1,noise_vector_length)))
if multichannel:
samples = np.take(samples,0,axis=channel_axis) # take the scaled channel
samples = inv_transf(samples) # transform back to original data scale
valhist, bin_edges = np.histogram(vals.flatten(), bins=25)
samphist, _ = np.histogram(samples.flatten(), bins=bin_edges)
centers = (bin_edges[:-1] + bin_edges[1:]) / 2
plt.figure()
plt.errorbar(centers, valhist, yerr=np.sqrt(valhist), fmt='o-', label='validation')
plt.errorbar(centers, samphist, yerr=np.sqrt(samphist), fmt='o-', label='generated')
plt.yscale('log')
plt.legend(loc='upper right')
plt.xlabel('Pixel value')
plt.ylabel('Counts')
plt.title('Pixel Intensity Histogram')
if window:
plt.axis(window)
if Xterm:
plt.draw()
else:
plt.savefig(fname, format='png')
plt.close()
valhist = valhist[:-5]
samphist = samphist[:-5]
return np.sum(np.divide(np.power(valhist - samphist, 2.0), valhist))
def save_img_grid(generator, noise_vector_length, inv_transf, channel_axis, fname=None, Xterm=True, scale='lin', multichannel=False):
"""Plots a grid of generated images"""
imgs_per_side = 2
samples = generator.predict(np.random.normal(size=(imgs_per_side**2,1,noise_vector_length)))
if multichannel:
samples = np.take(samples,0,axis=channel_axis) # take the scaled channel
else:
samples = np.squeeze(samples)
genimg_sidelen = samples.shape[2]
tot_len=imgs_per_side*genimg_sidelen
gridimg = np.zeros((tot_len, tot_len))
cnt = 0
for i in range(imgs_per_side):
for j in range(imgs_per_side):
gridimg[i*genimg_sidelen:(i+1)*genimg_sidelen, j*genimg_sidelen:(j+1)*genimg_sidelen] \
= samples[cnt,:,:]
cnt += 1
plt.figure(figsize=(5,4))
if scale == 'pwr':
imgmap = plt.pcolormesh(gridimg, norm=PowerNorm(gamma=0.2, vmin=0., vmax=2000),
cmap='Blues') # Power normalized color scale
else:
imgmap = plt.imshow(gridimg, cmap='Blues', norm=Normalize(vmin=-1., vmax=1.)) # Linear color scale
plt.colorbar(imgmap)
plt.plot([tot_len//2, tot_len //2], [0, tot_len], 'k-', linewidth='0.6')
plt.plot([0, tot_len], [tot_len//2, tot_len //2], 'k-', linewidth='0.6')
plt.axis([0, tot_len, 0 , tot_len])
plt.tick_params(axis='both', which='both',bottom=False,top=False,left=False,right=False,
labelbottom=False, labelleft=False)
plt.title('Generated Images')
if Xterm:
plt.draw()
else:
plt.savefig(fname, format='png')
plt.close()
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(x.max()-x.min())/2.0, (x.max()-x.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof
def power_spectrum(image):
"""Computes azimuthal average of 2D power spectrum of a np array image"""
GLOBAL_MEAN = 0.9998563 # this should be the mean pixel value of the training+validation datasets
F1 = fftpack.fft2((image - GLOBAL_MEAN)/GLOBAL_MEAN)
F2 = fftpack.fftshift(F1)
pspec2d = np.abs(F2)**2
P_k = azimuthalAverage(pspec2d)
k = np.arange(len(P_k))
return k, P_k
def batch_Pk(arr):
"""Computes power spectrum for a batch of images"""
Pk_arr = []
for idx in range(arr.shape[0]):
k, P_k = power_spectrum(np.squeeze(arr[idx]))
Pk_arr.append(P_k)
return k, np.array(Pk_arr)
def pspect(val_imgs, generator, invtransform, noise_vect_len, channel_axis, fname=None, Xterm=True, multichannel=False):
"""plots mean and std deviation of power spectrum over validation set + generated samples"""
num = val_imgs.shape[0]
gen_imgs = generator.predict(np.random.normal(size=(num,1,noise_vect_len)))
if multichannel:
gen_imgs = np.take(gen_imgs,0,axis=channel_axis) # take the scaled channel
else:
gen_imgs = np.squeeze(gen_imgs)
gen_imgs = invtransform(gen_imgs)
k, Pk_val = batch_Pk(val_imgs)
k, Pk_gen = batch_Pk(gen_imgs)
val_mean = np.mean(Pk_val, axis=0)
gen_mean = np.mean(Pk_gen, axis=0)
val_std = np.std(Pk_val, axis=0)
gen_std = np.std(Pk_gen, axis=0)
plt.figure()
plt.fill_between(k, gen_mean - gen_std, gen_mean + gen_std, color='red', alpha=0.4)
plt.plot(k, gen_mean, 'r--')
plt.plot(k, val_mean, 'k:')
plt.plot(k, val_mean + val_std, 'k-')
plt.plot(k, val_mean - val_std, 'k-')
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$P(k)$')
plt.xlabel(r'$k$')
plt.title('Power Spectrum')
if Xterm:
plt.draw()
else:
plt.savefig(fname, format='png')
plt.close()
return np.sum(np.divide(np.power(gen_mean - val_mean, 2.0), val_mean))
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
"numpy.array",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"scipy.fftpack.fft2",
"numpy.take",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.hyp... | [((860, 872), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (870, 872), True, 'import matplotlib.pyplot as plt\n'), ((1054, 1071), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1064, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1105), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1086, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1135), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixel value"""'], {}), "('Pixel value')\n", (1120, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1160), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (1150, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1203), 'matplotlib.pyplot.title', 'plt.title', (['"""Pixel Intensity Histogram"""'], {}), "('Pixel Intensity Histogram')\n", (1174, 1203), True, 'import matplotlib.pyplot as plt\n'), ((2020, 2048), 'numpy.zeros', 'np.zeros', (['(tot_len, tot_len)'], {}), '((tot_len, tot_len))\n', (2028, 2048), True, 'import numpy as np\n'), ((2295, 2321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (2305, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2650), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgmap'], {}), '(imgmap)\n', (2642, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2730), 'matplotlib.pyplot.plot', 'plt.plot', (['[tot_len // 2, tot_len // 2]', '[0, tot_len]', '"""k-"""'], {'linewidth': '"""0.6"""'}), "([tot_len // 2, tot_len // 2], [0, tot_len], 'k-', linewidth='0.6')\n", (2663, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2732, 2807), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, tot_len]', '[tot_len // 2, tot_len // 2]', '"""k-"""'], {'linewidth': '"""0.6"""'}), "([0, tot_len], [tot_len // 2, tot_len // 2], 'k-', linewidth='0.6')\n", (2740, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2843), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, tot_len, 0, tot_len]'], {}), '([0, tot_len, 0, tot_len])\n', (2817, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2982), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'left': '(False)', 'right': '(False)', 'labelbottom': '(False)', 'labelleft': '(False)'}), "(axis='both', which='both', bottom=False, top=False, left=\n False, right=False, labelbottom=False, labelleft=False)\n", (2864, 2982), True, 'import matplotlib.pyplot as plt\n'), ((2998, 3027), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Images"""'], {}), "('Generated Images')\n", (3007, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3536), 'numpy.indices', 'np.indices', (['image.shape'], {}), '(image.shape)\n', (3523, 3536), True, 'import numpy as np\n'), ((3640, 3678), 'numpy.hypot', 'np.hypot', (['(x - center[0])', '(y - center[1])'], {}), '(x - center[0], y - center[1])\n', (3648, 3678), True, 'import numpy as np\n'), ((3713, 3731), 'numpy.argsort', 'np.argsort', (['r.flat'], {}), '(r.flat)\n', (3723, 3731), True, 'import numpy as np\n'), ((4203, 4235), 'numpy.cumsum', 'np.cumsum', (['i_sorted'], {'dtype': 'float'}), '(i_sorted, dtype=float)\n', (4212, 4235), True, 'import numpy as np\n'), ((4552, 4601), 'scipy.fftpack.fft2', 'fftpack.fft2', (['((image - GLOBAL_MEAN) / GLOBAL_MEAN)'], {}), '((image - GLOBAL_MEAN) / GLOBAL_MEAN)\n', (4564, 4601), False, 'from scipy import fftpack\n'), ((4609, 4629), 'scipy.fftpack.fftshift', 'fftpack.fftshift', (['F1'], {}), '(F1)\n', (4625, 4629), False, 'from scipy import fftpack\n'), ((5588, 5611), 'numpy.mean', 'np.mean', (['Pk_val'], {'axis': '(0)'}), '(Pk_val, axis=0)\n', (5595, 5611), True, 'import numpy as np\n'), ((5627, 5650), 'numpy.mean', 'np.mean', (['Pk_gen'], {'axis': '(0)'}), '(Pk_gen, axis=0)\n', (5634, 5650), True, 'import numpy as np\n'), ((5665, 5687), 'numpy.std', 'np.std', (['Pk_val'], {'axis': '(0)'}), '(Pk_val, axis=0)\n', (5671, 5687), True, 'import numpy as np\n'), ((5702, 5724), 'numpy.std', 'np.std', (['Pk_gen'], {'axis': '(0)'}), '(Pk_gen, axis=0)\n', (5708, 5724), True, 'import numpy as np\n'), ((5730, 5742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5740, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5747, 5834), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['k', '(gen_mean - gen_std)', '(gen_mean + gen_std)'], {'color': '"""red"""', 'alpha': '(0.4)'}), "(k, gen_mean - gen_std, gen_mean + gen_std, color='red',\n alpha=0.4)\n", (5763, 5834), True, 'import matplotlib.pyplot as plt\n'), ((5835, 5863), 'matplotlib.pyplot.plot', 'plt.plot', (['k', 'gen_mean', '"""r--"""'], {}), "(k, gen_mean, 'r--')\n", (5843, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5868, 5895), 'matplotlib.pyplot.plot', 'plt.plot', (['k', 'val_mean', '"""k:"""'], {}), "(k, val_mean, 'k:')\n", (5876, 5895), True, 'import matplotlib.pyplot as plt\n'), ((5900, 5937), 'matplotlib.pyplot.plot', 'plt.plot', (['k', '(val_mean + val_std)', '"""k-"""'], {}), "(k, val_mean + val_std, 'k-')\n", (5908, 5937), True, 'import matplotlib.pyplot as plt\n'), ((5942, 5979), 'matplotlib.pyplot.plot', 'plt.plot', (['k', '(val_mean - val_std)', '"""k-"""'], {}), "(k, val_mean - val_std, 'k-')\n", (5950, 5979), True, 'import matplotlib.pyplot as plt\n'), ((5984, 6001), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (5994, 6001), True, 'import matplotlib.pyplot as plt\n'), ((6006, 6023), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6016, 6023), True, 'import matplotlib.pyplot as plt\n'), ((6028, 6048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(k)$"""'], {}), "('$P(k)$')\n", (6038, 6048), True, 'import matplotlib.pyplot as plt\n'), ((6054, 6071), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (6064, 6071), True, 'import matplotlib.pyplot as plt\n'), ((6077, 6104), 'matplotlib.pyplot.title', 'plt.title', (['"""Power Spectrum"""'], {}), "('Power Spectrum')\n", (6086, 6104), True, 'import matplotlib.pyplot as plt\n'), ((443, 495), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num, 1, noise_vector_length)'}), '(size=(num, 1, noise_vector_length))\n', (459, 495), True, 'import numpy as np\n'), ((534, 572), 'numpy.take', 'np.take', (['samples', '(0)'], {'axis': 'channel_axis'}), '(samples, 0, axis=channel_axis)\n', (541, 572), True, 'import numpy as np\n'), ((1227, 1243), 'matplotlib.pyplot.axis', 'plt.axis', (['window'], {}), '(window)\n', (1235, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1276), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1274, 1276), True, 'import matplotlib.pyplot as plt\n'), ((1295, 1327), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'format': '"""png"""'}), "(fname, format='png')\n", (1306, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1347), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1345, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1778), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(imgs_per_side ** 2, 1, noise_vector_length)'}), '(size=(imgs_per_side ** 2, 1, noise_vector_length))\n', (1727, 1778), True, 'import numpy as np\n'), ((1815, 1853), 'numpy.take', 'np.take', (['samples', '(0)'], {'axis': 'channel_axis'}), '(samples, 0, axis=channel_axis)\n', (1822, 1853), True, 'import numpy as np\n'), ((1907, 1926), 'numpy.squeeze', 'np.squeeze', (['samples'], {}), '(samples)\n', (1917, 1926), True, 'import numpy as np\n'), ((3050, 3060), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3058, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3079, 3111), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'format': '"""png"""'}), "(fname, format='png')\n", (3090, 3111), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3131), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3129, 3131), True, 'import matplotlib.pyplot as plt\n'), ((4016, 4032), 'numpy.where', 'np.where', (['deltar'], {}), '(deltar)\n', (4024, 4032), True, 'import numpy as np\n'), ((4644, 4654), 'numpy.abs', 'np.abs', (['F2'], {}), '(F2)\n', (4650, 4654), True, 'import numpy as np\n'), ((4964, 4980), 'numpy.array', 'np.array', (['Pk_arr'], {}), '(Pk_arr)\n', (4972, 4980), True, 'import numpy as np\n'), ((5262, 5309), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num, 1, noise_vect_len)'}), '(size=(num, 1, noise_vect_len))\n', (5278, 5309), True, 'import numpy as np\n'), ((5349, 5388), 'numpy.take', 'np.take', (['gen_imgs', '(0)'], {'axis': 'channel_axis'}), '(gen_imgs, 0, axis=channel_axis)\n', (5356, 5388), True, 'import numpy as np\n'), ((5443, 5463), 'numpy.squeeze', 'np.squeeze', (['gen_imgs'], {}), '(gen_imgs)\n', (5453, 5463), True, 'import numpy as np\n'), ((6127, 6137), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6135, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6156, 6188), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'format': '"""png"""'}), "(fname, format='png')\n", (6167, 6188), True, 'import matplotlib.pyplot as plt\n'), ((6197, 6208), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6206, 6208), True, 'import matplotlib.pyplot as plt\n'), ((913, 929), 'numpy.sqrt', 'np.sqrt', (['valhist'], {}), '(valhist)\n', (920, 929), True, 'import numpy as np\n'), ((1002, 1019), 'numpy.sqrt', 'np.sqrt', (['samphist'], {}), '(samphist)\n', (1009, 1019), True, 'import numpy as np\n'), ((1432, 1465), 'numpy.power', 'np.power', (['(valhist - samphist)', '(2.0)'], {}), '(valhist - samphist, 2.0)\n', (1440, 1465), True, 'import numpy as np\n'), ((4901, 4921), 'numpy.squeeze', 'np.squeeze', (['arr[idx]'], {}), '(arr[idx])\n', (4911, 4921), True, 'import numpy as np\n'), ((6237, 6271), 'numpy.power', 'np.power', (['(gen_mean - val_mean)', '(2.0)'], {}), '(gen_mean - val_mean, 2.0)\n', (6245, 6271), True, 'import numpy as np\n'), ((2390, 2431), 'matplotlib.colors.PowerNorm', 'PowerNorm', ([], {'gamma': '(0.2)', 'vmin': '(0.0)', 'vmax': '(2000)'}), '(gamma=0.2, vmin=0.0, vmax=2000)\n', (2399, 2431), False, 'from matplotlib.colors import LogNorm, PowerNorm, Normalize\n'), ((2575, 2605), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(-1.0)', 'vmax': '(1.0)'}), '(vmin=-1.0, vmax=1.0)\n', (2584, 2605), False, 'from matplotlib.colors import LogNorm, PowerNorm, Normalize\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 17 16:45:30 2017
@author: RunNing
"""
import random
import numpy as np
import matplotlib.pyplot as plt
import abc
class Algorithm(metaclass=abc.ABCMeta):
@abc.abstractmethod
def reset(self):
return
@abc.abstractmethod
def select(self, t):
return
@abc.abstractmethod
def receive(self, i, r):
return
class Simulator:
def __init__(self, n, params):
self.n = n
self.params = params
def simulate(self, algorithm, play_rounds, repeat_times):
regret = np.zeros(play_rounds+1)
max_reward = np.array(range(play_rounds+1)) * max(self.params)
for repeat_idx in range(repeat_times):
reward = 0
algorithm.reset()
for t in range(1, play_rounds+1):
i = algorithm.select(t)
param = self.params[i]
sample = random.uniform(0, 1)
if param >= sample:
r = 1
else:
r = 0
algorithm.receive(i, r)
reward += r
regret[t] += max_reward[t] - reward
regret = regret / repeat_times
return regret
class NaiveAlgorithm:
def __init__(self, n, explore_rounds):
self.n = n
self.explore_rounds = explore_rounds
self.sum_reward = None
self.fixed_selection = None
self.idx_max_param = None
def reset(self):
self.sum_reward = np.zeros(self.n)
self.fixed_selection = np.zeros(self.n * self.explore_rounds + 1, dtype=int)
t = 1
for i in range(self.n):
for j in range(self.explore_rounds):
self.fixed_selection[t] = i
t += 1
def select(self, t):
if t <= self.n * self.explore_rounds:
return self.fixed_selection[t]
else:
if t == self.n * self.explore_rounds + 1:
self.idx_max_param = np.argmax(self.sum_reward)
return self.idx_max_param
def receive(self, i, r):
self.sum_reward[i] += r
class EpsilonGreedy:
def __init__(self, n, epsilon):
self.n = n
self.epsilon = epsilon
self.sum_reward = None
self.mean_reward = None
self.counts = None
def reset(self):
self.sum_reward = np.zeros(self.n)
self.mean_reward = np.zeros(self.n)
self.counts = np.zeros(self.n)
def select(self, t):
sample = random.uniform(0, 1)
if sample < self.epsilon:
flag = True
else:
flag = False
if flag is True:
i = random.randint(0, self.n-1)
else:
i = np.argmax(self.mean_reward)
return i
def receive(self, i, r):
self.counts[i] += 1
self.sum_reward[i] += r
self.mean_reward[i] = self.sum_reward[i] / self.counts[i]
class UCB:
def __init__(self, n):
self.n = n
self.fixed_selection = None
self.sum_reward = None
self.mean_reward = None
self.counts = None
def reset(self):
self.sum_reward = np.zeros(self.n)
self.mean_reward = np.zeros(self.n)
self.counts = np.zeros(self.n)
self.fixed_selection = np.arange(-1, self.n)
def select(self, t):
if t <= self.n:
return self.fixed_selection[t]
else:
delta = np.sqrt(1/t)
tmp = 2*np.log(1/delta)/self.counts
upper_confidence_bounds = self.mean_reward + np.sqrt(self.mean_reward*tmp) + tmp
i = np.argmax(upper_confidence_bounds)
return i
def receive(self, i, r):
self.counts[i] += 1
self.sum_reward[i] += r
self.mean_reward[i] = self.sum_reward[i] / self.counts[i]
class TS:
def __init__(self, n):
self.n = n
self.beta_S = None
self.beta_F = None
def reset(self):
self.beta_S = np.ones(self.n)
self.beta_F = np.ones(self.n)
def select(self, t):
theta = np.zeros(self.n)
for i in range(self.n):
theta[i] = random.betavariate(self.beta_S[i], self.beta_F[i])
return np.argmax(theta)
def receive(self, i, r):
if r == 1:
self.beta_S[i] += 1
else:
self.beta_F[i] += 1
Algorithm.register(NaiveAlgorithm)
Algorithm.register(EpsilonGreedy)
Algorithm.register(UCB)
Algorithm.register(TS)
def cpu_lower_bound(params, play_rounds):
def kl_divergence(p, q):
if p == q:
return 1e-9
else:
return p*np.log(p/q) + (1-p)*np.log((1-p)/(1-q))
p_max = max(params)
coef = np.sum([(p_max-p)/kl_divergence(p, p_max) for p in params])
regret = np.arange(play_rounds+1, dtype=float)
regret[1:] = coef*np.log(regret[1:])
return regret
def run_experiment(setting):
play_rounds = 100000
repeat_times = 1000
n = setting[0]
notation = setting[1]
params = setting[2]
simulator = Simulator(n, params)
lower_bound = cpu_lower_bound(params, play_rounds)
algorithms = [[NaiveAlgorithm(n, 100), 'Naive'],
[EpsilonGreedy(n, 0.1), 'Greedy'],
[UCB(n), 'UCB'],
[TS(n), 'TS']]
result = [[simulator.simulate(algorithm, play_rounds, repeat_times), name]
for algorithm, name in algorithms]
plt.figure(figsize=(10, 5))
plt.subplot(121)
for regret, name in result:
plt.plot(regret, label=name)
plt.legend(loc='upper left', frameon=False)
plt.xlabel('t')
plt.ylabel('regret')
plt.title(notation+' normal axis')
plt.subplot(122)
for regret, name in result:
plt.plot(regret, label=name)
plt.plot(lower_bound, label='lower_bound')
plt.legend(loc='upper left', frameon=False)
plt.xscale('log')
plt.xlabel('t')
plt.ylabel('regret')
plt.title(notation+' log axis')
plt.savefig(notation+'.png', dpi=600)
if __name__ == '__main__':
from multiprocessing import Pool
settings = [[2, 'A1', [0.9, 0.8]],
[2, 'A2', [0.6, 0.5]],
[2, 'A3', [0.9, 0.2]],
[5, 'B1', [0.9, 0.88, 0.86, 0.84, 0.82]],
[5, 'B2', [0.6, 0.58, 0.56, 0.54, 0.52]],
[5, 'B3', [0.9, 0.7, 0.5, 0.3, 0.1]],
[15, 'C1', [0.9, 0.88, 0.86, 0.84, 0.82, 0.8, 0.78,
0.76, 0.74, 0.72, 0.7, 0.68, 0.66, 0.64, 0.62]],
[15, 'C2', [0.65, 0.63, 0.61, 0.59, 0.57, 0.55, 0.53,
0.51, 0.49, 0.47, 0.45, 0.43, 0.41, 0.39, 0.37]],
[15, 'C3', [0.89, 0.83, 0.77, 0.71, 0.65, 0.59, 0.53,
0.47, 0.41, 0.35, 0.29, 0.23, 0.17, 0.11, 0.05]]]
pool = Pool()
pool_outputs = pool.map(run_experiment, settings)
pool.close()
pool.join()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"random.randint",
"random.uniform",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.argmax",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyp... | [((5032, 5071), 'numpy.arange', 'np.arange', (['(play_rounds + 1)'], {'dtype': 'float'}), '(play_rounds + 1, dtype=float)\n', (5041, 5071), True, 'import numpy as np\n'), ((5737, 5764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (5747, 5764), True, 'import matplotlib.pyplot as plt\n'), ((5770, 5786), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (5781, 5786), True, 'import matplotlib.pyplot as plt\n'), ((5863, 5906), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'frameon': '(False)'}), "(loc='upper left', frameon=False)\n", (5873, 5906), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5927), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (5922, 5927), True, 'import matplotlib.pyplot as plt\n'), ((5933, 5953), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""regret"""'], {}), "('regret')\n", (5943, 5953), True, 'import matplotlib.pyplot as plt\n'), ((5959, 5995), 'matplotlib.pyplot.title', 'plt.title', (["(notation + ' normal axis')"], {}), "(notation + ' normal axis')\n", (5968, 5995), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6015), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (6010, 6015), True, 'import matplotlib.pyplot as plt\n'), ((6092, 6134), 'matplotlib.pyplot.plot', 'plt.plot', (['lower_bound'], {'label': '"""lower_bound"""'}), "(lower_bound, label='lower_bound')\n", (6100, 6134), True, 'import matplotlib.pyplot as plt\n'), ((6140, 6183), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'frameon': '(False)'}), "(loc='upper left', frameon=False)\n", (6150, 6183), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6206), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (6199, 6206), True, 'import matplotlib.pyplot as plt\n'), ((6212, 6227), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (6222, 6227), True, 'import matplotlib.pyplot as plt\n'), ((6233, 6253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""regret"""'], {}), "('regret')\n", (6243, 6253), True, 'import matplotlib.pyplot as plt\n'), ((6259, 6292), 'matplotlib.pyplot.title', 'plt.title', (["(notation + ' log axis')"], {}), "(notation + ' log axis')\n", (6268, 6292), True, 'import matplotlib.pyplot as plt\n'), ((6296, 6335), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(notation + '.png')"], {'dpi': '(600)'}), "(notation + '.png', dpi=600)\n", (6307, 6335), True, 'import matplotlib.pyplot as plt\n'), ((7194, 7200), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (7198, 7200), False, 'from multiprocessing import Pool\n'), ((630, 655), 'numpy.zeros', 'np.zeros', (['(play_rounds + 1)'], {}), '(play_rounds + 1)\n', (638, 655), True, 'import numpy as np\n'), ((1604, 1620), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (1612, 1620), True, 'import numpy as np\n'), ((1653, 1706), 'numpy.zeros', 'np.zeros', (['(self.n * self.explore_rounds + 1)'], {'dtype': 'int'}), '(self.n * self.explore_rounds + 1, dtype=int)\n', (1661, 1706), True, 'import numpy as np\n'), ((2504, 2520), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (2512, 2520), True, 'import numpy as np\n'), ((2549, 2565), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (2557, 2565), True, 'import numpy as np\n'), ((2589, 2605), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (2597, 2605), True, 'import numpy as np\n'), ((2652, 2672), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2666, 2672), False, 'import random\n'), ((3329, 3345), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (3337, 3345), True, 'import numpy as np\n'), ((3374, 3390), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (3382, 3390), True, 'import numpy as np\n'), ((3414, 3430), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (3422, 3430), True, 'import numpy as np\n'), ((3463, 3484), 'numpy.arange', 'np.arange', (['(-1)', 'self.n'], {}), '(-1, self.n)\n', (3472, 3484), True, 'import numpy as np\n'), ((4175, 4190), 'numpy.ones', 'np.ones', (['self.n'], {}), '(self.n)\n', (4182, 4190), True, 'import numpy as np\n'), ((4214, 4229), 'numpy.ones', 'np.ones', (['self.n'], {}), '(self.n)\n', (4221, 4229), True, 'import numpy as np\n'), ((4275, 4291), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (4283, 4291), True, 'import numpy as np\n'), ((4416, 4432), 'numpy.argmax', 'np.argmax', (['theta'], {}), '(theta)\n', (4425, 4432), True, 'import numpy as np\n'), ((5097, 5115), 'numpy.log', 'np.log', (['regret[1:]'], {}), '(regret[1:])\n', (5103, 5115), True, 'import numpy as np\n'), ((5829, 5857), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'label': 'name'}), '(regret, label=name)\n', (5837, 5857), True, 'import matplotlib.pyplot as plt\n'), ((6058, 6086), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'label': 'name'}), '(regret, label=name)\n', (6066, 6086), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2846), 'random.randint', 'random.randint', (['(0)', '(self.n - 1)'], {}), '(0, self.n - 1)\n', (2831, 2846), False, 'import random\n'), ((2877, 2904), 'numpy.argmax', 'np.argmax', (['self.mean_reward'], {}), '(self.mean_reward)\n', (2886, 2904), True, 'import numpy as np\n'), ((3618, 3632), 'numpy.sqrt', 'np.sqrt', (['(1 / t)'], {}), '(1 / t)\n', (3625, 3632), True, 'import numpy as np\n'), ((3791, 3825), 'numpy.argmax', 'np.argmax', (['upper_confidence_bounds'], {}), '(upper_confidence_bounds)\n', (3800, 3825), True, 'import numpy as np\n'), ((4349, 4399), 'random.betavariate', 'random.betavariate', (['self.beta_S[i]', 'self.beta_F[i]'], {}), '(self.beta_S[i], self.beta_F[i])\n', (4367, 4399), False, 'import random\n'), ((983, 1003), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (997, 1003), False, 'import random\n'), ((2102, 2128), 'numpy.argmax', 'np.argmax', (['self.sum_reward'], {}), '(self.sum_reward)\n', (2111, 2128), True, 'import numpy as np\n'), ((3652, 3669), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (3658, 3669), True, 'import numpy as np\n'), ((3738, 3769), 'numpy.sqrt', 'np.sqrt', (['(self.mean_reward * tmp)'], {}), '(self.mean_reward * tmp)\n', (3745, 3769), True, 'import numpy as np\n'), ((4869, 4882), 'numpy.log', 'np.log', (['(p / q)'], {}), '(p / q)\n', (4875, 4882), True, 'import numpy as np\n'), ((4889, 4914), 'numpy.log', 'np.log', (['((1 - p) / (1 - q))'], {}), '((1 - p) / (1 - q))\n', (4895, 4914), True, 'import numpy as np\n')] |
from numpy.lib.twodim_base import mask_indices
from libs.MyType import *
import numpy as np
def getVecLength3D(v:Vec3D):
length=np.sqrt(v.x**2+v.y**2+v.z**2)
return length
def vectDot(v1:Vec3D,v2:Vec3D,default=True):#!这里改写了原来的方法,将default参数改为TRUE来使用原来的方法
if default:
l3=np.sqrt((v1.x-v2.x)**2+(v1.y-v2.y)**2+(v1.z-v2.z)**2)
l1=getVecLength3D(v1)
l2=getVecLength3D(v2)
costheta=(l1*l1+l2*l2-l3*l3)/(2*l1*l2)
result=l1*l2*costheta
return result
else:
return v1.x*v2.x+v1.y*v2.y+v1.z*v2.z
def vectCross(vec1:Vec3D,vec2:Vec3D):
res=Vec3D()
res.x=vec1.y*vec2.z-vec2.y*vec1.z
res.y=vec1.z*vec2.x-vec1.x*vec2.z
res.z=vec1.x*vec2.y-vec2.x*vec1.y
return res
def scaleVect(vec:Vec3D,sclfactor):
sclvect=Vec3D(sclfactor*vec.x,sclfactor*vec.y,sclfactor*vec.z)
return sclvect
def addVect(vec1:Vec3D,vec2:Vec3D):
resVec=Vec3D(vec1.x+vec2.x,vec1.y+vec2.y,vec1.z+vec2.z)
return resVec
def subtractVect(vec1:Vec3D,vec2:Vec3D):
res=Vec3D(vec1.x-vec2.x,vec1.y-vec2.y,vec1.z-vec2.z)
return res
def cal_eigenvalue():
mat=np.array([[1,2*np.sqrt(3)+3j,3], [12,24,5],[-2,5,98]])
eigen,feature=np.linalg.eig(mat)
return eigen
def getDist2Point3D(point1:Vec3D,point2:Vec3D):
return np.sqrt(np.power(point1.x-point2.x,2)+np.power(point1.y-point2.y,2)+np.power(point1.z-point2.z,2))
def linearInterpolation3dMoreFrac(xfrac,yfrac,zfrac,leftbottom,lefttop,rightbottom,righttop,leftbottomNextZ,lefttopNextZ,rightbottomNextZ,righttopNextZ,units):
res_x=(1-zfrac)*((1-xfrac)*(1-yfrac)*leftbottom.x+xfrac*(1-yfrac)*rightbottom.x+(1-xfrac)*yfrac*lefttop.x+xfrac*yfrac*righttop.x)+zfrac*((1-xfrac)*(1-yfrac)*leftbottomNextZ.x+xfrac*(1-yfrac)*rightbottomNextZ.x+(1-xfrac)*yfrac*lefttopNextZ.x+xfrac*yfrac*righttopNextZ.x)
res_y=(1-zfrac)*((1-xfrac)*(1-yfrac)*leftbottom.y+xfrac*(1-yfrac)*rightbottom.y+(1-xfrac)*yfrac*lefttop.y+xfrac*yfrac*righttop.y)+zfrac*((1-xfrac)*(1-yfrac)*leftbottomNextZ.y+xfrac*(1-yfrac)*rightbottomNextZ.y+(1-xfrac)*yfrac*lefttopNextZ.y+xfrac*yfrac*righttopNextZ.y)
res_z=(1-zfrac)*((1-xfrac)*(1-yfrac)*leftbottom.z+xfrac*(1-yfrac)*rightbottom.z+(1-xfrac)*yfrac*lefttop.z+xfrac*yfrac*righttop.z)+zfrac*((1-xfrac)*(1-yfrac)*leftbottomNextZ.z+xfrac*(1-yfrac)*rightbottomNextZ.z+(1-xfrac)*yfrac*lefttopNextZ.z+xfrac*yfrac*righttopNextZ.z)
res_x/=units
res_y/=units
res_z/=units
return Vec3D(res_x,res_y,res_z)
if __name__ == "__main__":
print(cal_eigenvalue()[1].imag)
| [
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.power"
] | [((133, 172), 'numpy.sqrt', 'np.sqrt', (['(v.x ** 2 + v.y ** 2 + v.z ** 2)'], {}), '(v.x ** 2 + v.y ** 2 + v.z ** 2)\n', (140, 172), True, 'import numpy as np\n'), ((1212, 1230), 'numpy.linalg.eig', 'np.linalg.eig', (['mat'], {}), '(mat)\n', (1225, 1230), True, 'import numpy as np\n'), ((291, 360), 'numpy.sqrt', 'np.sqrt', (['((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2 + (v1.z - v2.z) ** 2)'], {}), '((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2 + (v1.z - v2.z) ** 2)\n', (298, 360), True, 'import numpy as np\n'), ((1376, 1408), 'numpy.power', 'np.power', (['(point1.z - point2.z)', '(2)'], {}), '(point1.z - point2.z, 2)\n', (1384, 1408), True, 'import numpy as np\n'), ((1316, 1348), 'numpy.power', 'np.power', (['(point1.x - point2.x)', '(2)'], {}), '(point1.x - point2.x, 2)\n', (1324, 1348), True, 'import numpy as np\n'), ((1346, 1378), 'numpy.power', 'np.power', (['(point1.y - point2.y)', '(2)'], {}), '(point1.y - point2.y, 2)\n', (1354, 1378), True, 'import numpy as np\n'), ((1154, 1164), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1161, 1164), True, 'import numpy as np\n')] |
import fastscapelib_fortran as fs
import numpy as np
import xsimlab as xs
from .grid import UniformRectilinearGrid2D
@xs.process
class TotalVerticalMotion:
"""Sum up all vertical motions of bedrock and topographic surface,
respectively.
Vertical motions may result from external forcing, erosion and/or
feedback of erosion on tectonics (isostasy).
"""
bedrock_upward_vars = xs.group('bedrock_upward')
surface_upward_vars = xs.group('surface_upward')
surface_downward_vars = xs.group('surface_downward')
bedrock_upward = xs.variable(
dims=('y', 'x'),
intent='out',
description='bedrock motion in upward direction'
)
surface_upward = xs.variable(
dims=('y', 'x'),
intent='out',
description='topographic surface motion in upward direction'
)
def run_step(self):
self.bedrock_upward = sum(self.bedrock_upward_vars)
self.surface_upward = (sum(self.surface_upward_vars) -
sum(self.surface_downward_vars))
@xs.process
class SurfaceTopography:
"""Update the elevation of the (land and/or submarine) surface
topography.
"""
elevation = xs.variable(
dims=('y', 'x'),
intent='inout',
description='surface topography elevation'
)
motion_upward = xs.foreign(TotalVerticalMotion, 'surface_upward')
def finalize_step(self):
self.elevation += self.motion_upward
@xs.process
class SurfaceToErode:
"""Defines the topographic surface used for the computation of erosion
processes.
In this process class, it simply corresponds to the topographic
surface, unchanged, at the current time step.
Sometimes it would make sense to compute erosion processes after
having applied other processes such as tectonic forcing. This
could be achieved by subclassing.
"""
topo_elevation = xs.foreign(SurfaceTopography, 'elevation')
elevation = xs.variable(
dims=('y', 'x'),
intent='out',
description='surface elevation before erosion'
)
def run_step(self):
self.elevation = self.topo_elevation
@xs.process
class Bedrock:
"""Update the elevation of bedrock (i.e., land and/or submarine
basement).
"""
elevation = xs.variable(
dims=('y', 'x'),
intent='inout',
description='bedrock elevation'
)
depth = xs.on_demand(
dims=('y', 'x'),
description='bedrock depth below topographic surface'
)
bedrock_motion_up = xs.foreign(TotalVerticalMotion, 'bedrock_upward')
surface_motion_up = xs.foreign(TotalVerticalMotion, 'surface_upward')
surface_elevation = xs.foreign(SurfaceTopography, 'elevation')
@depth.compute
def _depth(self):
return self.surface_elevation - self.elevation
def initialize(self):
if np.any(self.elevation > self.surface_elevation):
raise ValueError("Encountered bedrock elevation higher than "
"topographic surface elevation.")
def run_step(self):
self._elevation_next = np.minimum(
self.elevation + self.bedrock_motion_up,
self.surface_elevation + self.surface_motion_up
)
def finalize_step(self):
self.elevation = self._elevation_next
@xs.process
class UniformSedimentLayer:
"""Uniform sediment (or regolith, or soil) layer.
This layer has uniform properties (undefined in this class) and
generally undergo under active erosion, transport and deposition
processes.
"""
surf_elevation = xs.foreign(SurfaceTopography, 'elevation')
bedrock_elevation = xs.foreign(Bedrock, 'elevation')
thickness = xs.variable(
dims=('y', 'x'),
intent='out',
description='sediment layer thickness'
)
@thickness.compute
def _get_thickness(self):
return self.surf_elevation - self.bedrock_elevation
def initialize(self):
self.thickness = self._get_thickness()
def run_step(self):
self.thickness = self._get_thickness()
@xs.process
class TerrainDerivatives:
"""Compute, on demand, terrain derivatives such as slope or
curvature.
"""
shape = xs.foreign(UniformRectilinearGrid2D, 'shape')
spacing = xs.foreign(UniformRectilinearGrid2D, 'spacing')
elevation = xs.foreign(SurfaceTopography, 'elevation')
slope = xs.on_demand(
dims=('y', 'x'),
description='terrain local slope'
)
curvature = xs.on_demand(
dims=('y', 'x'),
description='terrain local curvature'
)
@slope.compute
def _slope(self):
slope = np.empty_like(self.elevation)
ny, nx = self.shape
dy, dx = self.spacing
fs.slope(self.elevation.ravel(), slope.ravel(), nx, ny, dx, dy)
return slope
@curvature.compute
def _curvature(self):
curv = np.empty_like(self.elevation)
ny, nx = self.shape
dy, dx = self.spacing
fs.curvature(self.elevation.ravel(), curv.ravel(), nx, ny, dx, dy)
return curv
@xs.process
class StratigraphicHorizons:
"""Generate a fixed number of stratigraphic horizons.
A horizon is active, i.e., it tracks the evolution of the
land/submarine topographic surface until it is "frozen" at a given
time. Beyond this freezing (or deactivation) time, the horizon
will only be affected by tectonic deformation and/or erosion.
To compute diagnostics on those horizons, you can create a
subclass where you can add "on_demand" variables.
"""
freeze_time = xs.variable(
dims='horizon',
description='horizon freezing (deactivation) time',
static=True
)
horizon = xs.index(dims='horizon', description='horizon number')
active = xs.variable(
dims='horizon',
intent='out',
description='whether the horizon is active or not'
)
surf_elevation = xs.foreign(SurfaceTopography, 'elevation')
elevation_motion = xs.foreign(TotalVerticalMotion, 'surface_upward')
bedrock_motion = xs.foreign(TotalVerticalMotion, 'bedrock_upward')
elevation = xs.variable(
dims=('horizon', 'y', 'x'),
intent='out',
description='elevation of horizon surfaces'
)
@xs.runtime(args='sim_start')
def initialize(self, start_time):
if np.any(self.freeze_time < start_time):
raise ValueError("'freeze_time' value must be greater than the "
"time of the beginning of the simulation")
self.elevation = np.repeat(self.surf_elevation[None, :, :],
self.freeze_time.size,
axis=0)
self.horizon = np.arange(0, len(self.freeze_time))
self.active = np.full_like(self.freeze_time, True, dtype=bool)
@xs.runtime(args='step_start')
def run_step(self, current_time):
self.active = current_time < self.freeze_time
def finalize_step(self):
elevation_next = self.surf_elevation + self.elevation_motion
self.elevation[self.active] = elevation_next
self.elevation[~self.active] = np.minimum(
self.elevation[~self.active] + self.bedrock_motion,
elevation_next
)
| [
"xsimlab.foreign",
"numpy.repeat",
"xsimlab.runtime",
"numpy.minimum",
"numpy.full_like",
"numpy.any",
"xsimlab.variable",
"numpy.empty_like",
"xsimlab.group",
"xsimlab.on_demand",
"xsimlab.index"
] | [((403, 429), 'xsimlab.group', 'xs.group', (['"""bedrock_upward"""'], {}), "('bedrock_upward')\n", (411, 429), True, 'import xsimlab as xs\n'), ((456, 482), 'xsimlab.group', 'xs.group', (['"""surface_upward"""'], {}), "('surface_upward')\n", (464, 482), True, 'import xsimlab as xs\n'), ((511, 539), 'xsimlab.group', 'xs.group', (['"""surface_downward"""'], {}), "('surface_downward')\n", (519, 539), True, 'import xsimlab as xs\n'), ((562, 659), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('y', 'x')", 'intent': '"""out"""', 'description': '"""bedrock motion in upward direction"""'}), "(dims=('y', 'x'), intent='out', description=\n 'bedrock motion in upward direction')\n", (573, 659), True, 'import xsimlab as xs\n'), ((706, 815), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('y', 'x')", 'intent': '"""out"""', 'description': '"""topographic surface motion in upward direction"""'}), "(dims=('y', 'x'), intent='out', description=\n 'topographic surface motion in upward direction')\n", (717, 815), True, 'import xsimlab as xs\n'), ((1201, 1294), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('y', 'x')", 'intent': '"""inout"""', 'description': '"""surface topography elevation"""'}), "(dims=('y', 'x'), intent='inout', description=\n 'surface topography elevation')\n", (1212, 1294), True, 'import xsimlab as xs\n'), ((1341, 1390), 'xsimlab.foreign', 'xs.foreign', (['TotalVerticalMotion', '"""surface_upward"""'], {}), "(TotalVerticalMotion, 'surface_upward')\n", (1351, 1390), True, 'import xsimlab as xs\n'), ((1915, 1957), 'xsimlab.foreign', 'xs.foreign', (['SurfaceTopography', '"""elevation"""'], {}), "(SurfaceTopography, 'elevation')\n", (1925, 1957), True, 'import xsimlab as xs\n'), ((1975, 2070), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('y', 'x')", 'intent': '"""out"""', 'description': '"""surface elevation before erosion"""'}), "(dims=('y', 'x'), intent='out', description=\n 'surface elevation before erosion')\n", (1986, 2070), True, 'import xsimlab as xs\n'), ((2303, 2380), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('y', 'x')", 'intent': '"""inout"""', 'description': '"""bedrock elevation"""'}), "(dims=('y', 'x'), intent='inout', description='bedrock elevation')\n", (2314, 2380), True, 'import xsimlab as xs\n'), ((2424, 2513), 'xsimlab.on_demand', 'xs.on_demand', ([], {'dims': "('y', 'x')", 'description': '"""bedrock depth below topographic surface"""'}), "(dims=('y', 'x'), description=\n 'bedrock depth below topographic surface')\n", (2436, 2513), True, 'import xsimlab as xs\n'), ((2556, 2605), 'xsimlab.foreign', 'xs.foreign', (['TotalVerticalMotion', '"""bedrock_upward"""'], {}), "(TotalVerticalMotion, 'bedrock_upward')\n", (2566, 2605), True, 'import xsimlab as xs\n'), ((2630, 2679), 'xsimlab.foreign', 'xs.foreign', (['TotalVerticalMotion', '"""surface_upward"""'], {}), "(TotalVerticalMotion, 'surface_upward')\n", (2640, 2679), True, 'import xsimlab as xs\n'), ((2705, 2747), 'xsimlab.foreign', 'xs.foreign', (['SurfaceTopography', '"""elevation"""'], {}), "(SurfaceTopography, 'elevation')\n", (2715, 2747), True, 'import xsimlab as xs\n'), ((3616, 3658), 'xsimlab.foreign', 'xs.foreign', (['SurfaceTopography', '"""elevation"""'], {}), "(SurfaceTopography, 'elevation')\n", (3626, 3658), True, 'import xsimlab as xs\n'), ((3683, 3715), 'xsimlab.foreign', 'xs.foreign', (['Bedrock', '"""elevation"""'], {}), "(Bedrock, 'elevation')\n", (3693, 3715), True, 'import xsimlab as xs\n'), ((3733, 3820), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('y', 'x')", 'intent': '"""out"""', 'description': '"""sediment layer thickness"""'}), "(dims=('y', 'x'), intent='out', description=\n 'sediment layer thickness')\n", (3744, 3820), True, 'import xsimlab as xs\n'), ((4246, 4291), 'xsimlab.foreign', 'xs.foreign', (['UniformRectilinearGrid2D', '"""shape"""'], {}), "(UniformRectilinearGrid2D, 'shape')\n", (4256, 4291), True, 'import xsimlab as xs\n'), ((4306, 4353), 'xsimlab.foreign', 'xs.foreign', (['UniformRectilinearGrid2D', '"""spacing"""'], {}), "(UniformRectilinearGrid2D, 'spacing')\n", (4316, 4353), True, 'import xsimlab as xs\n'), ((4370, 4412), 'xsimlab.foreign', 'xs.foreign', (['SurfaceTopography', '"""elevation"""'], {}), "(SurfaceTopography, 'elevation')\n", (4380, 4412), True, 'import xsimlab as xs\n'), ((4426, 4490), 'xsimlab.on_demand', 'xs.on_demand', ([], {'dims': "('y', 'x')", 'description': '"""terrain local slope"""'}), "(dims=('y', 'x'), description='terrain local slope')\n", (4438, 4490), True, 'import xsimlab as xs\n'), ((4529, 4597), 'xsimlab.on_demand', 'xs.on_demand', ([], {'dims': "('y', 'x')", 'description': '"""terrain local curvature"""'}), "(dims=('y', 'x'), description='terrain local curvature')\n", (4541, 4597), True, 'import xsimlab as xs\n'), ((5624, 5721), 'xsimlab.variable', 'xs.variable', ([], {'dims': '"""horizon"""', 'description': '"""horizon freezing (deactivation) time"""', 'static': '(True)'}), "(dims='horizon', description=\n 'horizon freezing (deactivation) time', static=True)\n", (5635, 5721), True, 'import xsimlab as xs\n'), ((5762, 5816), 'xsimlab.index', 'xs.index', ([], {'dims': '"""horizon"""', 'description': '"""horizon number"""'}), "(dims='horizon', description='horizon number')\n", (5770, 5816), True, 'import xsimlab as xs\n'), ((5831, 5929), 'xsimlab.variable', 'xs.variable', ([], {'dims': '"""horizon"""', 'intent': '"""out"""', 'description': '"""whether the horizon is active or not"""'}), "(dims='horizon', intent='out', description=\n 'whether the horizon is active or not')\n", (5842, 5929), True, 'import xsimlab as xs\n'), ((5977, 6019), 'xsimlab.foreign', 'xs.foreign', (['SurfaceTopography', '"""elevation"""'], {}), "(SurfaceTopography, 'elevation')\n", (5987, 6019), True, 'import xsimlab as xs\n'), ((6043, 6092), 'xsimlab.foreign', 'xs.foreign', (['TotalVerticalMotion', '"""surface_upward"""'], {}), "(TotalVerticalMotion, 'surface_upward')\n", (6053, 6092), True, 'import xsimlab as xs\n'), ((6114, 6163), 'xsimlab.foreign', 'xs.foreign', (['TotalVerticalMotion', '"""bedrock_upward"""'], {}), "(TotalVerticalMotion, 'bedrock_upward')\n", (6124, 6163), True, 'import xsimlab as xs\n'), ((6181, 6284), 'xsimlab.variable', 'xs.variable', ([], {'dims': "('horizon', 'y', 'x')", 'intent': '"""out"""', 'description': '"""elevation of horizon surfaces"""'}), "(dims=('horizon', 'y', 'x'), intent='out', description=\n 'elevation of horizon surfaces')\n", (6192, 6284), True, 'import xsimlab as xs\n'), ((6316, 6344), 'xsimlab.runtime', 'xs.runtime', ([], {'args': '"""sim_start"""'}), "(args='sim_start')\n", (6326, 6344), True, 'import xsimlab as xs\n'), ((6890, 6919), 'xsimlab.runtime', 'xs.runtime', ([], {'args': '"""step_start"""'}), "(args='step_start')\n", (6900, 6919), True, 'import xsimlab as xs\n'), ((2883, 2930), 'numpy.any', 'np.any', (['(self.elevation > self.surface_elevation)'], {}), '(self.elevation > self.surface_elevation)\n', (2889, 2930), True, 'import numpy as np\n'), ((3125, 3229), 'numpy.minimum', 'np.minimum', (['(self.elevation + self.bedrock_motion_up)', '(self.surface_elevation + self.surface_motion_up)'], {}), '(self.elevation + self.bedrock_motion_up, self.surface_elevation +\n self.surface_motion_up)\n', (3135, 3229), True, 'import numpy as np\n'), ((4678, 4707), 'numpy.empty_like', 'np.empty_like', (['self.elevation'], {}), '(self.elevation)\n', (4691, 4707), True, 'import numpy as np\n'), ((4926, 4955), 'numpy.empty_like', 'np.empty_like', (['self.elevation'], {}), '(self.elevation)\n', (4939, 4955), True, 'import numpy as np\n'), ((6394, 6431), 'numpy.any', 'np.any', (['(self.freeze_time < start_time)'], {}), '(self.freeze_time < start_time)\n', (6400, 6431), True, 'import numpy as np\n'), ((6608, 6681), 'numpy.repeat', 'np.repeat', (['self.surf_elevation[None, :, :]', 'self.freeze_time.size'], {'axis': '(0)'}), '(self.surf_elevation[None, :, :], self.freeze_time.size, axis=0)\n', (6617, 6681), True, 'import numpy as np\n'), ((6835, 6883), 'numpy.full_like', 'np.full_like', (['self.freeze_time', '(True)'], {'dtype': 'bool'}), '(self.freeze_time, True, dtype=bool)\n', (6847, 6883), True, 'import numpy as np\n'), ((7205, 7283), 'numpy.minimum', 'np.minimum', (['(self.elevation[~self.active] + self.bedrock_motion)', 'elevation_next'], {}), '(self.elevation[~self.active] + self.bedrock_motion, elevation_next)\n', (7215, 7283), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
def create_graph(model_file=None):
if not model_file:
model_file = FLAGS.model_file
#
with open(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
#
#
# 文件地址
image_path = "c:/Users/vvpen/Desktop/素材/dog.png"
model_file = "d:/temp/ai\pb/frozen_graph.pb"
# 初始化模型
create_graph(model_file)
with tf.Session() as sess:
# 读取图片文件
image_data = open(image_path, 'rb').read()
# 读取输入进来的图片,并进行解码
imgIn = tf.placeholder(name="input", dtype=tf.string)
image = tf.image.decode_jpeg(imgIn, channels=3)
# 增加一个维度
image = tf.expand_dims(image, 0)
# 获取图片矩阵 1 * 32 * 32 * 3
image_v = sess.run(image, feed_dict={imgIn: image_data})
print(image_v.shape)
print(type(image_v))
# 拿到图片矩阵数据后,直接调用模型
softmax_tensor = sess.graph.get_tensor_by_name("CifarNet/Predictions/Softmax:0")
perdictions = sess.run(softmax_tensor, {"input:0": image_v})
# perdictions = sess.run(softmax_tensor, {"CifarNet:0": image_v})
perdictions = np.squeeze(perdictions)
print(perdictions)
print(np.argmax(perdictions))
#
| [
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.argmax",
"tensorflow.GraphDef",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.expand_dims",
"tensorflow.image.decode_jpeg"
] | [((495, 507), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (505, 507), True, 'import tensorflow as tf\n'), ((611, 656), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""input"""', 'dtype': 'tf.string'}), "(name='input', dtype=tf.string)\n", (625, 656), True, 'import tensorflow as tf\n'), ((669, 708), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['imgIn'], {'channels': '(3)'}), '(imgIn, channels=3)\n', (689, 708), True, 'import tensorflow as tf\n'), ((734, 758), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (748, 758), True, 'import tensorflow as tf\n'), ((1161, 1184), 'numpy.squeeze', 'np.squeeze', (['perdictions'], {}), '(perdictions)\n', (1171, 1184), True, 'import numpy as np\n'), ((234, 247), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (245, 247), True, 'import tensorflow as tf\n'), ((304, 343), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (323, 343), True, 'import tensorflow as tf\n'), ((1218, 1240), 'numpy.argmax', 'np.argmax', (['perdictions'], {}), '(perdictions)\n', (1227, 1240), True, 'import numpy as np\n')] |
import numpy as np
from deepscratch.models.layers.activations.activation import Activation
class Softmax(Activation):
def __call__(self, data):
exp = np.exp(data - np.max(data, axis=-1, keepdims=True))
return exp / np.sum(exp, axis=-1, keepdims=True)
def backward(self, data):
softmax = self.__call__(data)
return softmax * (1 - softmax) | [
"numpy.sum",
"numpy.max"
] | [((237, 272), 'numpy.sum', 'np.sum', (['exp'], {'axis': '(-1)', 'keepdims': '(True)'}), '(exp, axis=-1, keepdims=True)\n', (243, 272), True, 'import numpy as np\n'), ((178, 214), 'numpy.max', 'np.max', (['data'], {'axis': '(-1)', 'keepdims': '(True)'}), '(data, axis=-1, keepdims=True)\n', (184, 214), True, 'import numpy as np\n')] |
import argparse
import os, os.path
import torch
import numpy as np
import soundfile as sf
def load_waveglow(args, parser):
if not args.from_repo:
return load_waveglow_from_hub()
return load_waveglow_from_repo(args, parser)
def load_waveglow_from_hub():
waveglow = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to('cuda')
waveglow.eval()
return waveglow, None
def load_waveglow_from_repo(args, parser):
from inference import load_and_setup_model
from waveglow.denoiser import Denoiser
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True)
if args.denoiser:
denoiser = None
else:
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.cuda()
return waveglow, denoiser
def mel2wav(waveglow, mel, chunk_size):
# split into chunks to avoid overloading GPU memory
mel = mel.unsqueeze(0)
chunks = torch.split(mel, chunk_size, dim=2)
audio = np.zeros([0])
print('Generating chunks...')
for i, chunk in enumerate(chunks):
print(' {} / {}'.format(i, len(chunks)))
with torch.no_grad():
generated = waveglow.infer(chunk.to('cuda'))
audio = np.concatenate((audio, generated[0].data.cpu().numpy()), axis=0)
return audio
def mels2wavs(parser):
args, _ = parser.parse_known_args()
input_dir = args.input_dir
output_dir = args.output_dir
chunk_size = args.chunk_size
waveglow, denoiser = load_waveglow(args, parser)
for root, _, fnames in sorted(os.walk(input_dir)):
for fname in fnames:
path = os.path.join(root, fname)
mel = torch.tensor(torch.load(path))
if args.fp16:
mel = mel.half()
audio = mel2wav(waveglow, mel, chunk_size)
out_path = os.path.join(output_dir, os.path.splitext(fname)[0] + '.wav')
print('Writing audio to', out_path)
save_soundfile(audio, out_path)
def save_soundfile(audio, filename, samplerate=22050):
sf.write(filename, audio, samplerate=samplerate)
# add 2 spectrograms together, see if the result makes sense
# DEPRECIATED
def test_combine_mels(mel_long, mel_short, out_file):
waveglow = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to('cuda')
waveglow.eval()
# mel_long = torch.load('/home/devin/data/ml/test_ljspeech/wavs/darker-excerpt.mel')
# mel_short = torch.load('/home/devin/data/ml/test_ljspeech/wavs/03-01-01-01-01-01-01.mel')
mel_2 = torch.zeros(mel_long.shape)
# need to exp since these are log-mel spectrograms
mel_2[:, :mel_short.shape[1]] = torch.exp(mel_short)
mel = torch.exp(mel_long) * 0.5 + mel_2 * 0.5
mel = torch.log(mel).unsqueeze(0)
with torch.no_grad():
audio = waveglow.infer(mel.to('cuda'))
audio_out = audio[0].data.cpu().numpy()
sf.write(out_file, audio_out, samplerate=22050)
"""main"""
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_dir', type=str, required=True)
parser.add_argument('-o', '--output_dir', type=str, required=True)
parser.add_argument('--chunk_size', type=int, default=80)
parser.add_argument('--waveglow', type=str, default='voclone/checkpoints/checkpoint_WaveGlow_last.pt')
parser.add_argument('--fp16', type=bool, default=False)
parser.add_argument('--cpu', type=bool, default=False)
parser.add_argument('--denoiser', type=bool, default=True)
parser.add_argument('--from_repo', type=bool, default=False)
mels2wavs(parser)
| [
"torch.split",
"torch.hub.load",
"torch.log",
"argparse.ArgumentParser",
"torch.load",
"os.path.join",
"os.path.splitext",
"torch.exp",
"soundfile.write",
"numpy.zeros",
"torch.no_grad",
"inference.load_and_setup_model",
"waveglow.denoiser.Denoiser",
"torch.zeros",
"os.walk"
] | [((288, 361), 'torch.hub.load', 'torch.hub.load', (['"""nvidia/DeepLearningExamples:torchhub"""', '"""nvidia_waveglow"""'], {}), "('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')\n", (302, 361), False, 'import torch\n'), ((645, 748), 'inference.load_and_setup_model', 'load_and_setup_model', (['"""WaveGlow"""', 'parser', 'args.waveglow', 'args.fp16', 'args.cpu'], {'forward_is_infer': '(True)'}), "('WaveGlow', parser, args.waveglow, args.fp16, args.cpu,\n forward_is_infer=True)\n", (665, 748), False, 'from inference import load_and_setup_model\n'), ((1095, 1130), 'torch.split', 'torch.split', (['mel', 'chunk_size'], {'dim': '(2)'}), '(mel, chunk_size, dim=2)\n', (1106, 1130), False, 'import torch\n'), ((1143, 1156), 'numpy.zeros', 'np.zeros', (['[0]'], {}), '([0])\n', (1151, 1156), True, 'import numpy as np\n'), ((2212, 2260), 'soundfile.write', 'sf.write', (['filename', 'audio'], {'samplerate': 'samplerate'}), '(filename, audio, samplerate=samplerate)\n', (2220, 2260), True, 'import soundfile as sf\n'), ((2407, 2480), 'torch.hub.load', 'torch.hub.load', (['"""nvidia/DeepLearningExamples:torchhub"""', '"""nvidia_waveglow"""'], {}), "('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')\n", (2421, 2480), False, 'import torch\n'), ((2785, 2812), 'torch.zeros', 'torch.zeros', (['mel_long.shape'], {}), '(mel_long.shape)\n', (2796, 2812), False, 'import torch\n'), ((2904, 2924), 'torch.exp', 'torch.exp', (['mel_short'], {}), '(mel_short)\n', (2913, 2924), False, 'import torch\n'), ((3134, 3181), 'soundfile.write', 'sf.write', (['out_file', 'audio_out'], {'samplerate': '(22050)'}), '(out_file, audio_out, samplerate=22050)\n', (3142, 3181), True, 'import soundfile as sf\n'), ((3257, 3282), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3280, 3282), False, 'import argparse\n'), ((856, 874), 'waveglow.denoiser.Denoiser', 'Denoiser', (['waveglow'], {}), '(waveglow)\n', (864, 874), False, 'from waveglow.denoiser import Denoiser\n'), ((1716, 1734), 'os.walk', 'os.walk', (['input_dir'], {}), '(input_dir)\n', (1723, 1734), False, 'import os, os.path\n'), ((3022, 3037), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3035, 3037), False, 'import torch\n'), ((1295, 1310), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1308, 1310), False, 'import torch\n'), ((1785, 1810), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (1797, 1810), False, 'import os, os.path\n'), ((2935, 2954), 'torch.exp', 'torch.exp', (['mel_long'], {}), '(mel_long)\n', (2944, 2954), False, 'import torch\n'), ((2985, 2999), 'torch.log', 'torch.log', (['mel'], {}), '(mel)\n', (2994, 2999), False, 'import torch\n'), ((1842, 1858), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1852, 1858), False, 'import torch\n'), ((2022, 2045), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (2038, 2045), False, 'import os, os.path\n')] |
import numpy as np
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
class EmotionDetector:
def __init__(self, ):
self.model = Sequential()
self.model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
self.model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(1024, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(7, activation='softmax'))
self.model.load_weights('./src/emotion_detection/model.h5')
self.face_detection = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
self.settings = {
'scaleFactor': 1.3,
'minNeighbors': 5,
'minSize': (50, 50)
}
self.emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
self.face_scale = (48, 48)
def run_detection_bytes(self, imgdata):
as_array = np.frombuffer(imgdata, dtype=np.uint8)
img = cv2.imdecode(as_array, flags=cv2.IMREAD_GRAYSCALE)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detected = self.face_detection.detectMultiScale(
img, **self.settings)
for x, y, w, h in detected:
cv2.rectangle(img, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = img[y:y + h, x:x + w]
face = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = self.model.predict(face)
maxindex = int(np.argmax(prediction))
# test
cv2.putText(img, self.emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
# save output for testing
cv2.imwrite('test.jpg', img)
return self.emotion_dict[maxindex], prediction[0][maxindex].item()
return None
def run_detection_loop(self):
camera = cv2.VideoCapture(0)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
while True:
_, img = camera.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detected = self.face_detection.detectMultiScale(
gray, **self.settings)
for x, y, w, h in detected:
cv2.rectangle(img, (x, y), (x+w, y+h), (245, 135, 66), 2)
cv2.rectangle(img, (x, y), (x+w//3, y+20), (245, 135, 66), -1)
face = gray[y+5:y+h-5, x+20:x+w-20]
face = cv2.resize(face, self.face_scale)
face = face/255.0
yield face
cv2.imshow('Facial Expression', img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if cv2.waitKey(5) != -1:
break
camera.release()
cv2.destroyAllWindows()
# some tests
if __name__ == "__main__":
recognizer = EmotionDetector()
recognizer.run_loop()
| [
"cv2.rectangle",
"cv2.imshow",
"tensorflow.keras.layers.Dense",
"cv2.destroyAllWindows",
"cv2.imdecode",
"cv2.CascadeClassifier",
"tensorflow.keras.layers.Conv2D",
"numpy.frombuffer",
"cv2.waitKey",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"cv2... | [((230, 242), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (240, 242), False, 'from tensorflow.keras.models import Sequential\n'), ((1088, 1176), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')"], {}), "(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n", (1109, 1176), False, 'import cv2\n'), ((1547, 1585), 'numpy.frombuffer', 'np.frombuffer', (['imgdata'], {'dtype': 'np.uint8'}), '(imgdata, dtype=np.uint8)\n', (1560, 1585), True, 'import numpy as np\n'), ((1601, 1651), 'cv2.imdecode', 'cv2.imdecode', (['as_array'], {'flags': 'cv2.IMREAD_GRAYSCALE'}), '(as_array, flags=cv2.IMREAD_GRAYSCALE)\n', (1613, 1651), False, 'import cv2\n'), ((2551, 2570), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2567, 2570), False, 'import cv2\n'), ((3450, 3473), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3471, 3473), False, 'import cv2\n'), ((267, 341), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(48, 48, 1)'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1))\n", (273, 341), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((364, 413), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (370, 413), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((438, 468), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (450, 468), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((493, 506), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (500, 506), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((532, 582), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (538, 582), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((607, 637), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (619, 637), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((662, 712), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (668, 712), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((737, 767), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (749, 767), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((792, 805), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (799, 805), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((831, 840), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (838, 840), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((865, 895), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (870, 895), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((920, 932), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (927, 932), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((957, 987), 'tensorflow.keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (962, 987), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1847, 1915), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y - 50)', '(x + w, y + h + 10)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)\n', (1860, 1915), False, 'import cv2\n'), ((2187, 2317), 'cv2.putText', 'cv2.putText', (['img', 'self.emotion_dict[maxindex]', '(x + 20, y - 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(img, self.emotion_dict[maxindex], (x + 20, y - 60), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n', (2198, 2317), False, 'import cv2\n'), ((2360, 2388), 'cv2.imwrite', 'cv2.imwrite', (['"""test.jpg"""', 'img'], {}), "('test.jpg', img)\n", (2371, 2388), False, 'import cv2\n'), ((2748, 2785), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2760, 2785), False, 'import cv2\n'), ((3264, 3300), 'cv2.imshow', 'cv2.imshow', (['"""Facial Expression"""', 'img'], {}), "('Facial Expression', img)\n", (3274, 3300), False, 'import cv2\n'), ((3319, 3355), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3331, 3355), False, 'import cv2\n'), ((2132, 2153), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (2141, 2153), True, 'import numpy as np\n'), ((2943, 3004), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(245, 135, 66)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (245, 135, 66), 2)\n', (2956, 3004), False, 'import cv2\n'), ((3017, 3085), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w // 3, y + 20)', '(245, 135, 66)', '(-1)'], {}), '(img, (x, y), (x + w // 3, y + 20), (245, 135, 66), -1)\n', (3030, 3085), False, 'import cv2\n'), ((3155, 3188), 'cv2.resize', 'cv2.resize', (['face', 'self.face_scale'], {}), '(face, self.face_scale)\n', (3165, 3188), False, 'import cv2\n'), ((3372, 3386), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (3383, 3386), False, 'import cv2\n'), ((2002, 2032), 'cv2.resize', 'cv2.resize', (['roi_gray', '(48, 48)'], {}), '(roi_gray, (48, 48))\n', (2012, 2032), False, 'import cv2\n')] |
import numpy as np # numerical tools
from scipy import integrate
from scipy import interpolate
c_light=299792.458#in km/s
#Find nearest value
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
#### DATA SN
def get_SN_info(targetname):
data_sn=np.loadtxt('Info_SNe_KAIT.txt',usecols=[1,2,3,4,5,6,7]).transpose()
name_SN_kait=np.array(np.genfromtxt('Info_SNe_KAIT.txt',usecols=[0],dtype='str'))
ind_SN=np.where(np.array(name_SN_kait)==targetname)[0][0]
A_V=data_sn[0][ind_SN]
z_hel=data_sn[1][ind_SN]*1.0/c_light
err_z_hel=data_sn[2][ind_SN]*1.0/c_light
JD_explo=data_sn[3][ind_SN]
err_JD_explo=data_sn[4][ind_SN]
z_cmb=data_sn[5][ind_SN]*1.0/c_light
err_z_cmb=data_sn[6][ind_SN]*1.0/c_light
return A_V,z_hel,err_z_hel,JD_explo,err_JD_explo,z_cmb,err_z_cmb
#Get SN photometry
def get_sn(targetname):
data_sn=open('Nat_KAIT/%s.txt'%targetname,'r')
lines = data_sn.readlines()
fields = lines[0].split()
ind_B=np.where(np.array(fields)=='B')[0][0]
ind_V=np.where(np.array(fields)=='V')[0][0]
ind_R=np.where(np.array(fields)=='R')[0][0]
ind_I=np.where(np.array(fields)=='I')[0][0]
MJD = {}
mags = {}
emags = {}
tel = {}
for i in range(4):
this_filter = ['B','V','R','I']
MJD[this_filter[i]] = []
mags[this_filter[i]] = []
emags[this_filter[i]] = []
tel[this_filter[i]] = []
for j in range(np.size(lines)):
if (j!=0):
if ((lines[j].split()[ind_B+1])<'0.8') and ((lines[j].split()[ind_B+1])!='NaN') and ((lines[j].split()[0][0])!='#'):
mags['B'].append(float(lines[j].split()[ind_B]))
emags['B'].append(float(lines[j].split()[ind_B+1]))
MJD['B'].append(float(lines[j].split()[1]))
tel['B'].append(lines[j].split()[3])
if ((lines[j].split()[ind_V+1])<'0.8') and ((lines[j].split()[ind_V+1])!='NaN')and ((lines[j].split()[0][0])!='#'):
mags['V'].append(float(lines[j].split()[ind_V]))
emags['V'].append(float(lines[j].split()[ind_V+1]))
MJD['V'].append(float(lines[j].split()[1]))
tel['V'].append(lines[j].split()[3])
if ((lines[j].split()[ind_R+1])<'0.8') and ((lines[j].split()[ind_R+1])!='NaN') and ((lines[j].split()[0][0])!='#'):
mags['R'].append(float(lines[j].split()[ind_R]))
emags['R'].append(float(lines[j].split()[ind_R+1]))
MJD['R'].append(float(lines[j].split()[1]))
tel['R'].append(lines[j].split()[3])
if ((lines[j].split()[ind_I+1])<'0.8') and ((lines[j].split()[ind_I+1])!='NaN') and ((lines[j].split()[0][0])!='#'):
mags['I'].append(float(lines[j].split()[ind_I]))
emags['I'].append(float(lines[j].split()[ind_I+1]))
MJD['I'].append(float(lines[j].split()[1]))
tel['I'].append(lines[j].split()[3])
for f in MJD:
MJD[f],mags[f],emags[f],tel[f]=zip(*sorted(zip(MJD[f],mags[f],emags[f],tel[f])))
MJD[f] = np.array(MJD[f])
mags[f] = np.array(mags[f])
emags[f] = np.array(emags[f])
tel[f] = np.array(tel[f])
return MJD,mags,emags,tel
#Linear interpolation of the magnitude
def inter_mag(MJD,mags,emags):
B_band=interpolate.interp1d(MJD['B'],mags['B'])
B_band_plus=interpolate.interp1d(MJD['B'],mags['B']+emags['B'])
V_band=interpolate.interp1d(MJD['V'],mags['V'])
V_band_plus=interpolate.interp1d(MJD['V'],mags['V']+emags['V'])
if np.size(MJD['R'])>0:
R_band=interpolate.interp1d(MJD['R'],mags['R'])
R_band_plus=interpolate.interp1d(MJD['R'],mags['R']+emags['R'])
else:
R_band=[]
R_band_plus=[]
I_band=interpolate.interp1d(MJD['I'],mags['I'])
I_band_plus=interpolate.interp1d(MJD['I'],mags['I']+emags['I'])
return B_band,B_band_plus,V_band,V_band_plus,R_band,R_band_plus,I_band,I_band_plus
#Derive for each CSP filter the effective wavelength
def effective_wavelength_csp(lam_spec,flux_spec,filter_name):
### Each transmission function ###########
trans_u=np.loadtxt('Filters/CSP/u_swope.txt')
lambda_u=trans_u[:,0]
s_u=trans_u[:,1]
trans_g=np.loadtxt('Filters/CSP/g_swope.txt')
lambda_g=trans_g[:,0]
s_g=trans_g[:,1]
trans_r=np.loadtxt('Filters/CSP/r_swope.txt')
lambda_r=trans_r[:,0]
s_r=trans_r[:,1]
trans_i=np.loadtxt('Filters/CSP/i_swope.txt')
lambda_i=trans_i[:,0]
s_i=trans_i[:,1]
trans_V=np.loadtxt('Filters/CSP/V_swope.txt')
lambda_V=trans_V[:,0]
s_V=trans_V[:,1]
trans_B=np.loadtxt('Filters/CSP/B_swope.txt')
lambda_B=trans_B[:,0]
s_B=trans_B[:,1]
F_u_func=interpolate.interp1d(lambda_u,s_u) #interpolation Filtre u
F_B_func=interpolate.interp1d(lambda_B,s_B) #interpolation Filtre B
F_V_func=interpolate.interp1d(lambda_V,s_V) #interpolation Filtre V
F_g_func=interpolate.interp1d(lambda_g,s_g)
F_r_func=interpolate.interp1d(lambda_r,s_r) #interpolation Filtre t
F_i_func=interpolate.interp1d(lambda_i,s_i) #interpolation Filtre i
N_pt=3000
lambda_u=np.linspace(min(lambda_u),max(lambda_u),N_pt)
lambda_B=np.linspace(min(lambda_B),max(lambda_B),N_pt)
lambda_V=np.linspace(min(lambda_V),max(lambda_V),N_pt)
lambda_g=np.linspace(min(lambda_g),max(lambda_g),N_pt)
lambda_r=np.linspace(min(lambda_r),max(lambda_r),N_pt)
lambda_i=np.linspace(min(lambda_i),max(lambda_i),N_pt)
if filter_name==str('u'):
F_filter_func=interpolate.interp1d(lambda_u,F_u_func(lambda_u)) #interpolation Filtre B
lam_filter=lambda_u
if filter_name==str('B'):
F_filter_func=interpolate.interp1d(lambda_B,F_B_func(lambda_B)) #interpolation Filtre B
lam_filter=lambda_B
if filter_name==str('g'):
F_filter_func=interpolate.interp1d(lambda_g,F_g_func(lambda_g))
lam_filter=lambda_g
if filter_name==str('V'):
F_filter_func=interpolate.interp1d(lambda_V,F_V_func(lambda_V)) #interpolation Filtre V
lam_filter=lambda_V
if filter_name==str('r'):
F_filter_func=interpolate.interp1d(lambda_r,F_r_func(lambda_r)) #interpolation Filtre r
lam_filter=lambda_r
if filter_name==str('i'):
F_filter_func=interpolate.interp1d(lambda_i,F_i_func(lambda_i)) #interpolation Filtre i
lam_filter=lambda_i
# interpolation spectre
F_spec=interpolate.interp1d(lam_spec,flux_spec)
# New wavelength vector with wavelength of filter + spectrum
wavelength_to_interpolate=np.concatenate([lam_spec,lam_filter])
# Sort the wavelength
wavelength_to_interpolate.sort()
# We select only the wavelenght in the filter
wavelength_to_interpolate_2=wavelength_to_interpolate[(wavelength_to_interpolate>min(lam_filter)) & (wavelength_to_interpolate<max(lam_filter))]
# We calculate the filter response
interpolate_filter_response=F_filter_func(wavelength_to_interpolate_2)
# We calculate SEDter
SED_inside_filter=F_spec(wavelength_to_interpolate_2)
# num=f*s*lambda
num=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2*wavelength_to_interpolate_2
# num=f*s
dem=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2
# integral de num / integral de dem
lambda_eff_filter=np.trapz(num)*1.0/np.trapz(dem)
return lambda_eff_filter
def effective_wavelength_KAIT(lam_spec,flux_spec,filter_name):
### KAIT 2 ###########
trans_B_kait2=np.loadtxt('Filters/KAIT_NICKEL/B_kait2.txt')
lambda_B_kait2=trans_B_kait2[:,0]
s_B_kait2=trans_B_kait2[:,1]
trans_V_kait2=np.loadtxt('Filters/KAIT_NICKEL/V_kait2.txt')
lambda_V_kait2=trans_V_kait2[:,0]
s_V_kait2=trans_V_kait2[:,1]
trans_R_kait2=np.loadtxt('Filters/KAIT_NICKEL/R_kait2.txt')
lambda_R_kait2=trans_R_kait2[:,0]
s_R_kait2=trans_R_kait2[:,1]
trans_I_kait2=np.loadtxt('Filters/KAIT_NICKEL/I_kait2.txt')
lambda_I_kait2=trans_I_kait2[:,0]
s_I_kait2=trans_I_kait2[:,1]
dlambda_B_kait2=lambda_B_kait2[1]-lambda_B_kait2[0]
dlambda_V_kait2=lambda_V_kait2[1]-lambda_V_kait2[0]
dlambda_R_kait2=lambda_R_kait2[1]-lambda_R_kait2[0]
dlambda_I_kait2=lambda_I_kait2[1]-lambda_I_kait2[0]
### KAIT 3 ###########
trans_B_kait3=np.loadtxt('Filters/KAIT_NICKEL/B_kait3.txt')
lambda_B_kait3=trans_B_kait3[:,0]
s_B_kait3=trans_B_kait3[:,1]
trans_V_kait3=np.loadtxt('Filters/KAIT_NICKEL/V_kait3.txt')
lambda_V_kait3=trans_V_kait3[:,0]
s_V_kait3=trans_V_kait3[:,1]
trans_R_kait3=np.loadtxt('Filters/KAIT_NICKEL/R_kait3.txt')
lambda_R_kait3=trans_R_kait3[:,0]
s_R_kait3=trans_R_kait3[:,1]
trans_I_kait3=np.loadtxt('Filters/KAIT_NICKEL/I_kait3.txt')
lambda_I_kait3=trans_I_kait3[:,0]
s_I_kait3=trans_I_kait3[:,1]
dlambda_B_kait3=lambda_B_kait3[1]-lambda_B_kait3[0]
dlambda_V_kait3=lambda_V_kait3[1]-lambda_V_kait3[0]
dlambda_R_kait3=lambda_R_kait3[1]-lambda_R_kait3[0]
dlambda_I_kait3=lambda_I_kait3[1]-lambda_I_kait3[0]
### KAIT 4 ###########
trans_B_kait4=np.loadtxt('Filters/KAIT_NICKEL/B_kait4.txt')
lambda_B_kait4=trans_B_kait4[:,0]
s_B_kait4=trans_B_kait4[:,1]
trans_V_kait4=np.loadtxt('Filters/KAIT_NICKEL/V_kait4.txt')
lambda_V_kait4=trans_V_kait4[:,0]
s_V_kait4=trans_V_kait4[:,1]
trans_R_kait4=np.loadtxt('Filters/KAIT_NICKEL/R_kait4.txt')
lambda_R_kait4=trans_R_kait4[:,0]
s_R_kait4=trans_R_kait4[:,1]
trans_I_kait4=np.loadtxt('Filters/KAIT_NICKEL/I_kait4.txt')
lambda_I_kait4=trans_I_kait4[:,0]
s_I_kait4=trans_I_kait4[:,1]
dlambda_B_kait4=lambda_B_kait4[1]-lambda_B_kait4[0]
dlambda_V_kait4=lambda_V_kait4[1]-lambda_V_kait4[0]
dlambda_R_kait4=lambda_R_kait4[1]-lambda_R_kait4[0]
dlambda_I_kait4=lambda_I_kait4[1]-lambda_I_kait4[0]
### Nickel 1 ###########
trans_B_nickel1=np.loadtxt('Filters/KAIT_NICKEL/B_nickel1.txt')
lambda_B_nickel1=trans_B_nickel1[:,0]
s_B_nickel1=trans_B_nickel1[:,1]
trans_V_nickel1=np.loadtxt('Filters/KAIT_NICKEL/V_nickel1.txt')
lambda_V_nickel1=trans_V_nickel1[:,0]
s_V_nickel1=trans_V_nickel1[:,1]
trans_R_nickel1=np.loadtxt('Filters/KAIT_NICKEL/R_nickel1.txt')
lambda_R_nickel1=trans_R_nickel1[:,0]
s_R_nickel1=trans_R_nickel1[:,1]
trans_I_nickel1=np.loadtxt('Filters/KAIT_NICKEL/I_nickel1.txt')
lambda_I_nickel1=trans_I_nickel1[:,0]
s_I_nickel1=trans_I_nickel1[:,1]
dlambda_B_nicke1l=lambda_B_nickel1[1]-lambda_B_nickel1[0]
dlambda_V_nickel1=lambda_V_nickel1[1]-lambda_V_nickel1[0]
dlambda_R_nickel1=lambda_R_nickel1[1]-lambda_R_nickel1[0]
dlambda_I_nickel1=lambda_I_nickel1[1]-lambda_I_nickel1[0]
### Nickel 2 ###########
trans_B_nickel2=np.loadtxt('Filters/KAIT_NICKEL/B_nickel2.txt')
lambda_B_nickel2=trans_B_nickel2[:,0]
s_B_nickel2=trans_B_nickel2[:,1]
trans_V_nickel2=np.loadtxt('Filters/KAIT_NICKEL/V_nickel2.txt')
lambda_V_nickel2=trans_V_nickel2[:,0]
s_V_nickel2=trans_V_nickel2[:,1]
trans_R_nickel2=np.loadtxt('Filters/KAIT_NICKEL/R_nickel2.txt')
lambda_R_nickel2=trans_R_nickel2[:,0]
s_R_nickel2=trans_R_nickel2[:,1]
trans_I_nickel2=np.loadtxt('Filters/KAIT_NICKEL/I_nickel2.txt')
lambda_I_nickel2=trans_I_nickel2[:,0]
s_I_nickel2=trans_I_nickel2[:,1]
dlambda_B_nickel2=lambda_B_nickel2[1]-lambda_B_nickel2[0]
dlambda_V_nickel2=lambda_V_nickel2[1]-lambda_V_nickel2[0]
dlambda_R_nickel2=lambda_R_nickel2[1]-lambda_R_nickel2[0]
dlambda_I_nickel2=lambda_I_nickel2[1]-lambda_I_nickel2[0]
F_B_kait2_func=interpolate.interp1d(lambda_B_kait2,s_B_kait2)
F_V_kait2_func=interpolate.interp1d(lambda_V_kait2,s_V_kait2)
F_R_kait2_func=interpolate.interp1d(lambda_R_kait2,s_R_kait2)
F_I_kait2_func=interpolate.interp1d(lambda_I_kait2,s_I_kait2)
F_B_kait3_func=interpolate.interp1d(lambda_B_kait3,s_B_kait3)
F_V_kait3_func=interpolate.interp1d(lambda_V_kait3,s_V_kait3)
F_R_kait3_func=interpolate.interp1d(lambda_R_kait3,s_R_kait3)
F_I_kait3_func=interpolate.interp1d(lambda_I_kait3,s_I_kait3)
F_B_kait4_func=interpolate.interp1d(lambda_B_kait4,s_B_kait4)
F_V_kait4_func=interpolate.interp1d(lambda_V_kait4,s_V_kait4)
F_R_kait4_func=interpolate.interp1d(lambda_R_kait4,s_R_kait4)
F_I_kait4_func=interpolate.interp1d(lambda_I_kait4,s_I_kait4)
F_B_nickel1_func=interpolate.interp1d(lambda_B_nickel1,s_B_nickel1)
F_V_nickel1_func=interpolate.interp1d(lambda_V_nickel1,s_V_nickel1)
F_R_nickel1_func=interpolate.interp1d(lambda_R_nickel1,s_R_nickel1)
F_I_nickel1_func=interpolate.interp1d(lambda_I_nickel1,s_I_nickel1)
F_B_nickel2_func=interpolate.interp1d(lambda_B_nickel2,s_B_nickel2)
F_V_nickel2_func=interpolate.interp1d(lambda_V_nickel2,s_V_nickel2)
F_R_nickel2_func=interpolate.interp1d(lambda_R_nickel2,s_R_nickel2)
F_I_nickel2_func=interpolate.interp1d(lambda_I_nickel2,s_I_nickel2)
N_pt=5000
lambda_B_kait2=np.linspace(min(lambda_B_kait2),max(lambda_B_kait2),N_pt)
lambda_V_kait2=np.linspace(min(lambda_V_kait2),max(lambda_V_kait2),N_pt)
lambda_R_kait2=np.linspace(min(lambda_R_kait2),max(lambda_R_kait2),N_pt)
lambda_I_kait2=np.linspace(min(lambda_I_kait2),max(lambda_I_kait2),N_pt)
lambda_B_kait3=np.linspace(min(lambda_B_kait3),max(lambda_B_kait3),N_pt)
lambda_V_kait3=np.linspace(min(lambda_V_kait3),max(lambda_V_kait3),N_pt)
lambda_R_kait3=np.linspace(min(lambda_R_kait3),max(lambda_R_kait3),N_pt)
lambda_I_kait3=np.linspace(min(lambda_I_kait3),max(lambda_I_kait3),N_pt)
lambda_B_kait4=np.linspace(min(lambda_B_kait4),max(lambda_B_kait4),N_pt)
lambda_V_kait4=np.linspace(min(lambda_V_kait4),max(lambda_V_kait4),N_pt)
lambda_R_kait4=np.linspace(min(lambda_R_kait4),max(lambda_R_kait4),N_pt)
lambda_I_kait4=np.linspace(min(lambda_I_kait4),max(lambda_I_kait4),N_pt)
lambda_B_nickel1=np.linspace(min(lambda_B_nickel1),max(lambda_B_nickel1),N_pt)
lambda_V_nickel1=np.linspace(min(lambda_V_nickel1),max(lambda_V_nickel1),N_pt)
lambda_R_nickel1=np.linspace(min(lambda_R_nickel1),max(lambda_R_nickel1),N_pt)
lambda_I_nickel1=np.linspace(min(lambda_I_nickel1),max(lambda_I_nickel1),N_pt)
lambda_B_nickel2=np.linspace(min(lambda_B_nickel2),max(lambda_B_nickel2),N_pt)
lambda_V_nickel2=np.linspace(min(lambda_V_nickel2),max(lambda_V_nickel2),N_pt)
lambda_R_nickel2=np.linspace(min(lambda_R_nickel2),max(lambda_R_nickel2),N_pt)
lambda_I_nickel2=np.linspace(min(lambda_I_nickel2),max(lambda_I_nickel2),N_pt)
if filter_name==str('Bkait2'):
F_filter_func=interpolate.interp1d(lambda_B_kait2,F_B_kait2_func(lambda_B_kait2))
lam_filter=lambda_B_kait2
if filter_name==str('Vkait2'):
F_filter_func=interpolate.interp1d(lambda_V_kait2,F_V_kait2_func(lambda_V_kait2))
lam_filter=lambda_V_kait2
if filter_name==str('Rkait2'):
F_filter_func=interpolate.interp1d(lambda_R_kait2,F_R_kait2_func(lambda_R_kait2))
lam_filter=lambda_R_kait2
if filter_name==str('Ikait2'):
F_filter_func=interpolate.interp1d(lambda_I_kait2,F_I_kait2_func(lambda_I_kait2))
lam_filter=lambda_I_kait2
if filter_name==str('Bkait3'):
F_filter_func=interpolate.interp1d(lambda_B_kait3,F_B_kait3_func(lambda_B_kait3))
lam_filter=lambda_B_kait3
if filter_name==str('Vkait3'):
F_filter_func=interpolate.interp1d(lambda_V_kait3,F_V_kait3_func(lambda_V_kait3))
lam_filter=lambda_V_kait3
if filter_name==str('Rkait3'):
F_filter_func=interpolate.interp1d(lambda_R_kait3,F_R_kait3_func(lambda_R_kait3))
lam_filter=lambda_R_kait3
if filter_name==str('Ikait3'):
F_filter_func=interpolate.interp1d(lambda_I_kait3,F_I_kait3_func(lambda_I_kait3))
lam_filter=lambda_I_kait3
if filter_name==str('Bkait4'):
F_filter_func=interpolate.interp1d(lambda_B_kait4,F_B_kait4_func(lambda_B_kait4))
lam_filter=lambda_B_kait4
if filter_name==str('Vkait4'):
F_filter_func=interpolate.interp1d(lambda_V_kait4,F_V_kait4_func(lambda_V_kait4))
lam_filter=lambda_V_kait4
if filter_name==str('Rkait4'):
F_filter_func=interpolate.interp1d(lambda_R_kait4,F_R_kait4_func(lambda_R_kait4))
lam_filter=lambda_R_kait4
if filter_name==str('Ikait4'):
F_filter_func=interpolate.interp1d(lambda_I_kait4,F_I_kait4_func(lambda_I_kait4))
lam_filter=lambda_I_kait4
if filter_name==str('Bnickel1'):
F_filter_func=interpolate.interp1d(lambda_B_nickel1,F_B_nickel1_func(lambda_B_nickel1))
lam_filter=lambda_B_nickel1
if filter_name==str('Vnickel1'):
F_filter_func=interpolate.interp1d(lambda_V_nickel1,F_V_nickel1_func(lambda_V_nickel1))
lam_filter=lambda_V_nickel1
if filter_name==str('Rnickel1'):
F_filter_func=interpolate.interp1d(lambda_R_nickel1,F_R_nickel1_func(lambda_R_nickel1))
lam_filter=lambda_R_nickel1
if filter_name==str('Inickel1'):
F_filter_func=interpolate.interp1d(lambda_I_nickel1,F_I_nickel1_func(lambda_I_nickel1))
lam_filter=lambda_I_nickel1
if filter_name==str('Bnickel2'):
F_filter_func=interpolate.interp1d(lambda_B_nickel2,F_B_nickel2_func(lambda_B_nickel2))
lam_filter=lambda_B_nickel2
if filter_name==str('Vnickel2'):
F_filter_func=interpolate.interp1d(lambda_V_nickel2,F_V_nickel2_func(lambda_V_nickel2))
lam_filter=lambda_V_nickel2
if filter_name==str('Rnickel2'):
F_filter_func=interpolate.interp1d(lambda_R_nickel2,F_R_nickel2_func(lambda_R_nickel2))
lam_filter=lambda_R_nickel2
if filter_name==str('Inickel2'):
F_filter_func=interpolate.interp1d(lambda_I_nickel2,F_I_nickel2_func(lambda_I_nickel2))
lam_filter=lambda_I_nickel2
# interpolation spectre
F_spec=interpolate.interp1d(lam_spec,flux_spec)
# New wavelength vector with wavelength of filter + spectrum
wavelength_to_interpolate=np.concatenate([lam_spec,lam_filter])
# Sort the wavelength
wavelength_to_interpolate.sort()
# We select only the wavelenght in the filter
wavelength_to_interpolate_2=wavelength_to_interpolate[(wavelength_to_interpolate>min(lam_filter)) & (wavelength_to_interpolate<max(lam_filter))]
# We calculate the filter response
interpolate_filter_response=F_filter_func(wavelength_to_interpolate_2)
# We calculate SEDter
SED_inside_filter=F_spec(wavelength_to_interpolate_2)
# num=f*s*lambda
num=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2*wavelength_to_interpolate_2
# num=f*s
dem=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2
# integral de num / integral de dem
lambda_eff_filter=np.trapz(num)*1.0/np.trapz(dem)
return lambda_eff_filter
def ccm_unred(wave, flux, av, **kwargs):
"""
NAME:
CCM_UNRED
PURPOSE:
Deredden a flux vector using the CCM 1989 parameterization
EXPLANATION:
The reddening curve is that of Cardelli, Clayton, and Mathis (1989 ApJ.
345, 245), including the update for the near-UV given by O'Donnell
(1994, ApJ, 422, 158). Parameterization is valid from the IR to the
far-UV (3.5 microns to 0.1 microns).
Users might wish to consider using the alternate procedure FM_UNRED
which uses the extinction curve of Fitzpatrick (1999).
CALLING SEQUENCE:
ccm_unred(wave, flux, ebv [, R_V = ])
INPUT:
WAVE - wavelength vector (Angstroms)
FLUX - calibrated flux vector, same number of elements as WAVE
If only 3 parameters are supplied, then this vector will
updated on output to contain the dereddened flux.
EBV - color excess E(B-V), scalar. If a negative EBV is supplied,
then fluxes will be reddened rather than deredenned.
OUTPUT:
FUNRED - unreddened flux vector, same units and number of elements
as FLUX
OPTIONAL INPUT KEYWORD
R_V - scalar specifying the ratio of total selective extinction
R(V) = A(V) / E(B - V). If not specified, then R_V = 3.1
Extreme values of R(V) range from 2.75 to 5.3
EXAMPLE:
Determine how a flat spectrum (in wavelength) between 1200 A and 3200 A
is altered by a reddening of E(B-V) = 0.1. Assume an "average"
reddening for the diffuse interstellar medium (R(V) = 3.1)
>>> w = 1200 + arange(40)*50 #Create a wavelength vector
>>> f = w*0 + 1 #Create a "flat" flux vector
>>> fnew = ccm_unred(w, f, -0.1) #Redden (negative E(B-V)) flux vector
>>> plot(w,fnew)
NOTES:
(1) The CCM curve shows good agreement with the Savage & Mathis (1979)
ultraviolet curve shortward of 1400 A, but is probably
preferable between 1200 and 1400 A.
(2) Many sightlines with peculiar ultraviolet interstellar extinction
can be represented with a CCM curve, if the proper value of
R(V) is supplied.
(3) Curve is extrapolated between 912 and 1000 A as suggested by
Longo et al. (1989, ApJ, 339,474)
(4) Use the 4 parameter calling sequence if you wish to save the
original flux vector.
(5) Valencic et al. (2004, ApJ, 616, 912) revise the ultraviolet CCM
curve (3.3 -- 8.0 um-1). But since their revised curve does
not connect smoothly with longer and shorter wavelengths, it is
not included here.
REQUIRED MODULES:
scipy, numpy
REVISION HISTORY:
Written <NAME> Hughes/STX January, 1992
Extrapolate curve for wavelengths between 900 and 1000 A Dec. 1993
Use updated coefficients for near-UV from O'Donnell Feb 1994
Allow 3 parameter calling sequence April 1998
Converted to IDLV5.0 April 1998
Ported to Python <NAME> August 2012
"""
# Import modules
import numpy as n
# Set defaults
R_V = 3.1
for key in kwargs:
if key.lower() == 'r_v':
R_V = kwargs[key]
if isinstance(wave, int) or isinstance(wave, float):
x = 10000. / n.array([wave]) # Convert to inverse microns
else:
x = 10000. / n.array(wave) # Convert to inverse microns
npts = len( x )
a = n.zeros((npts))
b = n.zeros((npts))
###############################
good = n.where( (x > 0.3) & (x < 1.1) ) # Infrared
Ngood = len(x[good])
if Ngood > 0:
a[good] = 0.574 * x[good]**(1.61)
b[good] = -0.527 * x[good]**(1.61)
###############################
good = n.where( (x >= 1.1) & (x < 3.3) ) # Optical/NIR
Ngood = len(good[0])
if Ngood > 0: # Use new constants from O'Donnell (1994)
y = x[good] - 1.82
#c1 = n.array([ 0.32999, -0.77530, 0.01979, 0.72085, # Original
# -0.02427, -0.50447, 0.17699, 1. ]) # coefficients
#c2 = n.array([ -2.09002, 5.30260, -0.62251, -5.38434, # from CCM89
# 1.07233, 2.28305, 1.41338, 0. ])
c1 = n.array([ -0.505 , 1.647, -0.827, -1.718, # New coefficients
1.137, 0.701, -0.609, 0.104, 1. ]) # from O'Donnell
c2 = n.array([ 3.347, -10.805, 5.491, 11.102, # (1994)
-7.985, -3.989, 2.908, 1.952, 0. ])
a[good] = n.polyval(c1, y)
b[good] = n.polyval(c2, y)
###############################
good = n.where( (x >= 3.3) & (x < 8) ) # Mid-UV
Ngood = len(x[good])
if Ngood > 0:
y = x[good]
F_a = n.zeros((Ngood))
F_b = n.zeros((Ngood))
good1 = n.where( (y > 5.9) )
Ngood1 = len(y[good1])
if Ngood1 > 0:
y1 = y[good1] - 5.9
F_a[good1] = -0.04473 * y1**2 - 0.009779 * y1**3
F_b[good1] = 0.2130 * y1**2 + 0.1207 * y1**3
a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a
b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b
###############################
good = n.where( (x >= 8) & (x < 11) ) #Far-UV
Ngood = len(x[good])
if Ngood > 0:
y = x[good] - 8.
c1 = [ -0.070, 0.137, -0.628, -1.073 ]
c2 = [ 0.374, -0.420, 4.257, 13.670 ]
a[good] = n.polyval(c1, y)
b[good] = n.polyval(c2, y)
###############################
# Now apply extinction correction to input flux vector
A_V = av
A_lambda = A_V * (a + b / R_V)
return flux * 10.**(0.4 * A_lambda)
| [
"numpy.abs",
"numpy.trapz",
"numpy.where",
"numpy.size",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.zeros",
"numpy.polyval",
"numpy.concatenate",
"numpy.loadtxt",
"numpy.genfromtxt"
] | [((2997, 3038), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['B']", "mags['B']"], {}), "(MJD['B'], mags['B'])\n", (3017, 3038), False, 'from scipy import interpolate\n'), ((3051, 3105), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['B']", "(mags['B'] + emags['B'])"], {}), "(MJD['B'], mags['B'] + emags['B'])\n", (3071, 3105), False, 'from scipy import interpolate\n'), ((3111, 3152), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['V']", "mags['V']"], {}), "(MJD['V'], mags['V'])\n", (3131, 3152), False, 'from scipy import interpolate\n'), ((3165, 3219), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['V']", "(mags['V'] + emags['V'])"], {}), "(MJD['V'], mags['V'] + emags['V'])\n", (3185, 3219), False, 'from scipy import interpolate\n'), ((3402, 3443), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['I']", "mags['I']"], {}), "(MJD['I'], mags['I'])\n", (3422, 3443), False, 'from scipy import interpolate\n'), ((3456, 3510), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['I']", "(mags['I'] + emags['I'])"], {}), "(MJD['I'], mags['I'] + emags['I'])\n", (3476, 3510), False, 'from scipy import interpolate\n'), ((3763, 3800), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/CSP/u_swope.txt"""'], {}), "('Filters/CSP/u_swope.txt')\n", (3773, 3800), True, 'import numpy as np\n'), ((3851, 3888), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/CSP/g_swope.txt"""'], {}), "('Filters/CSP/g_swope.txt')\n", (3861, 3888), True, 'import numpy as np\n'), ((3939, 3976), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/CSP/r_swope.txt"""'], {}), "('Filters/CSP/r_swope.txt')\n", (3949, 3976), True, 'import numpy as np\n'), ((4027, 4064), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/CSP/i_swope.txt"""'], {}), "('Filters/CSP/i_swope.txt')\n", (4037, 4064), True, 'import numpy as np\n'), ((4115, 4152), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/CSP/V_swope.txt"""'], {}), "('Filters/CSP/V_swope.txt')\n", (4125, 4152), True, 'import numpy as np\n'), ((4203, 4240), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/CSP/B_swope.txt"""'], {}), "('Filters/CSP/B_swope.txt')\n", (4213, 4240), True, 'import numpy as np\n'), ((4293, 4328), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_u', 's_u'], {}), '(lambda_u, s_u)\n', (4313, 4328), False, 'from scipy import interpolate\n'), ((4362, 4397), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_B', 's_B'], {}), '(lambda_B, s_B)\n', (4382, 4397), False, 'from scipy import interpolate\n'), ((4431, 4466), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_V', 's_V'], {}), '(lambda_V, s_V)\n', (4451, 4466), False, 'from scipy import interpolate\n'), ((4500, 4535), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_g', 's_g'], {}), '(lambda_g, s_g)\n', (4520, 4535), False, 'from scipy import interpolate\n'), ((4546, 4581), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_r', 's_r'], {}), '(lambda_r, s_r)\n', (4566, 4581), False, 'from scipy import interpolate\n'), ((4615, 4650), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_i', 's_i'], {}), '(lambda_i, s_i)\n', (4635, 4650), False, 'from scipy import interpolate\n'), ((5874, 5915), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lam_spec', 'flux_spec'], {}), '(lam_spec, flux_spec)\n', (5894, 5915), False, 'from scipy import interpolate\n'), ((6004, 6042), 'numpy.concatenate', 'np.concatenate', (['[lam_spec, lam_filter]'], {}), '([lam_spec, lam_filter])\n', (6018, 6042), True, 'import numpy as np\n'), ((6912, 6957), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/B_kait2.txt"""'], {}), "('Filters/KAIT_NICKEL/B_kait2.txt')\n", (6922, 6957), True, 'import numpy as np\n'), ((7038, 7083), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/V_kait2.txt"""'], {}), "('Filters/KAIT_NICKEL/V_kait2.txt')\n", (7048, 7083), True, 'import numpy as np\n'), ((7164, 7209), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/R_kait2.txt"""'], {}), "('Filters/KAIT_NICKEL/R_kait2.txt')\n", (7174, 7209), True, 'import numpy as np\n'), ((7290, 7335), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/I_kait2.txt"""'], {}), "('Filters/KAIT_NICKEL/I_kait2.txt')\n", (7300, 7335), True, 'import numpy as np\n'), ((7653, 7698), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/B_kait3.txt"""'], {}), "('Filters/KAIT_NICKEL/B_kait3.txt')\n", (7663, 7698), True, 'import numpy as np\n'), ((7779, 7824), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/V_kait3.txt"""'], {}), "('Filters/KAIT_NICKEL/V_kait3.txt')\n", (7789, 7824), True, 'import numpy as np\n'), ((7905, 7950), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/R_kait3.txt"""'], {}), "('Filters/KAIT_NICKEL/R_kait3.txt')\n", (7915, 7950), True, 'import numpy as np\n'), ((8031, 8076), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/I_kait3.txt"""'], {}), "('Filters/KAIT_NICKEL/I_kait3.txt')\n", (8041, 8076), True, 'import numpy as np\n'), ((8395, 8440), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/B_kait4.txt"""'], {}), "('Filters/KAIT_NICKEL/B_kait4.txt')\n", (8405, 8440), True, 'import numpy as np\n'), ((8521, 8566), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/V_kait4.txt"""'], {}), "('Filters/KAIT_NICKEL/V_kait4.txt')\n", (8531, 8566), True, 'import numpy as np\n'), ((8647, 8692), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/R_kait4.txt"""'], {}), "('Filters/KAIT_NICKEL/R_kait4.txt')\n", (8657, 8692), True, 'import numpy as np\n'), ((8773, 8818), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/I_kait4.txt"""'], {}), "('Filters/KAIT_NICKEL/I_kait4.txt')\n", (8783, 8818), True, 'import numpy as np\n'), ((9142, 9189), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/B_nickel1.txt"""'], {}), "('Filters/KAIT_NICKEL/B_nickel1.txt')\n", (9152, 9189), True, 'import numpy as np\n'), ((9280, 9327), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/V_nickel1.txt"""'], {}), "('Filters/KAIT_NICKEL/V_nickel1.txt')\n", (9290, 9327), True, 'import numpy as np\n'), ((9418, 9465), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/R_nickel1.txt"""'], {}), "('Filters/KAIT_NICKEL/R_nickel1.txt')\n", (9428, 9465), True, 'import numpy as np\n'), ((9556, 9603), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/I_nickel1.txt"""'], {}), "('Filters/KAIT_NICKEL/I_nickel1.txt')\n", (9566, 9603), True, 'import numpy as np\n'), ((9958, 10005), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/B_nickel2.txt"""'], {}), "('Filters/KAIT_NICKEL/B_nickel2.txt')\n", (9968, 10005), True, 'import numpy as np\n'), ((10096, 10143), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/V_nickel2.txt"""'], {}), "('Filters/KAIT_NICKEL/V_nickel2.txt')\n", (10106, 10143), True, 'import numpy as np\n'), ((10234, 10281), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/R_nickel2.txt"""'], {}), "('Filters/KAIT_NICKEL/R_nickel2.txt')\n", (10244, 10281), True, 'import numpy as np\n'), ((10372, 10419), 'numpy.loadtxt', 'np.loadtxt', (['"""Filters/KAIT_NICKEL/I_nickel2.txt"""'], {}), "('Filters/KAIT_NICKEL/I_nickel2.txt')\n", (10382, 10419), True, 'import numpy as np\n'), ((10747, 10794), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_B_kait2', 's_B_kait2'], {}), '(lambda_B_kait2, s_B_kait2)\n', (10767, 10794), False, 'from scipy import interpolate\n'), ((10811, 10858), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_V_kait2', 's_V_kait2'], {}), '(lambda_V_kait2, s_V_kait2)\n', (10831, 10858), False, 'from scipy import interpolate\n'), ((10875, 10922), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_R_kait2', 's_R_kait2'], {}), '(lambda_R_kait2, s_R_kait2)\n', (10895, 10922), False, 'from scipy import interpolate\n'), ((10939, 10986), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_I_kait2', 's_I_kait2'], {}), '(lambda_I_kait2, s_I_kait2)\n', (10959, 10986), False, 'from scipy import interpolate\n'), ((11003, 11050), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_B_kait3', 's_B_kait3'], {}), '(lambda_B_kait3, s_B_kait3)\n', (11023, 11050), False, 'from scipy import interpolate\n'), ((11067, 11114), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_V_kait3', 's_V_kait3'], {}), '(lambda_V_kait3, s_V_kait3)\n', (11087, 11114), False, 'from scipy import interpolate\n'), ((11131, 11178), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_R_kait3', 's_R_kait3'], {}), '(lambda_R_kait3, s_R_kait3)\n', (11151, 11178), False, 'from scipy import interpolate\n'), ((11195, 11242), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_I_kait3', 's_I_kait3'], {}), '(lambda_I_kait3, s_I_kait3)\n', (11215, 11242), False, 'from scipy import interpolate\n'), ((11259, 11306), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_B_kait4', 's_B_kait4'], {}), '(lambda_B_kait4, s_B_kait4)\n', (11279, 11306), False, 'from scipy import interpolate\n'), ((11323, 11370), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_V_kait4', 's_V_kait4'], {}), '(lambda_V_kait4, s_V_kait4)\n', (11343, 11370), False, 'from scipy import interpolate\n'), ((11387, 11434), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_R_kait4', 's_R_kait4'], {}), '(lambda_R_kait4, s_R_kait4)\n', (11407, 11434), False, 'from scipy import interpolate\n'), ((11451, 11498), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_I_kait4', 's_I_kait4'], {}), '(lambda_I_kait4, s_I_kait4)\n', (11471, 11498), False, 'from scipy import interpolate\n'), ((11517, 11568), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_B_nickel1', 's_B_nickel1'], {}), '(lambda_B_nickel1, s_B_nickel1)\n', (11537, 11568), False, 'from scipy import interpolate\n'), ((11587, 11638), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_V_nickel1', 's_V_nickel1'], {}), '(lambda_V_nickel1, s_V_nickel1)\n', (11607, 11638), False, 'from scipy import interpolate\n'), ((11657, 11708), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_R_nickel1', 's_R_nickel1'], {}), '(lambda_R_nickel1, s_R_nickel1)\n', (11677, 11708), False, 'from scipy import interpolate\n'), ((11727, 11778), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_I_nickel1', 's_I_nickel1'], {}), '(lambda_I_nickel1, s_I_nickel1)\n', (11747, 11778), False, 'from scipy import interpolate\n'), ((11797, 11848), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_B_nickel2', 's_B_nickel2'], {}), '(lambda_B_nickel2, s_B_nickel2)\n', (11817, 11848), False, 'from scipy import interpolate\n'), ((11867, 11918), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_V_nickel2', 's_V_nickel2'], {}), '(lambda_V_nickel2, s_V_nickel2)\n', (11887, 11918), False, 'from scipy import interpolate\n'), ((11937, 11988), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_R_nickel2', 's_R_nickel2'], {}), '(lambda_R_nickel2, s_R_nickel2)\n', (11957, 11988), False, 'from scipy import interpolate\n'), ((12007, 12058), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lambda_I_nickel2', 's_I_nickel2'], {}), '(lambda_I_nickel2, s_I_nickel2)\n', (12027, 12058), False, 'from scipy import interpolate\n'), ((16659, 16700), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['lam_spec', 'flux_spec'], {}), '(lam_spec, flux_spec)\n', (16679, 16700), False, 'from scipy import interpolate\n'), ((16789, 16827), 'numpy.concatenate', 'np.concatenate', (['[lam_spec, lam_filter]'], {}), '([lam_spec, lam_filter])\n', (16803, 16827), True, 'import numpy as np\n'), ((21223, 21236), 'numpy.zeros', 'n.zeros', (['npts'], {}), '(npts)\n', (21230, 21236), True, 'import numpy as n\n'), ((21249, 21262), 'numpy.zeros', 'n.zeros', (['npts'], {}), '(npts)\n', (21256, 21262), True, 'import numpy as n\n'), ((21318, 21348), 'numpy.where', 'n.where', (['((x > 0.3) & (x < 1.1))'], {}), '((x > 0.3) & (x < 1.1))\n', (21325, 21348), True, 'import numpy as n\n'), ((21546, 21577), 'numpy.where', 'n.where', (['((x >= 1.1) & (x < 3.3))'], {}), '((x >= 1.1) & (x < 3.3))\n', (21553, 21577), True, 'import numpy as n\n'), ((22485, 22514), 'numpy.where', 'n.where', (['((x >= 3.3) & (x < 8))'], {}), '((x >= 3.3) & (x < 8))\n', (22492, 22514), True, 'import numpy as n\n'), ((23126, 23154), 'numpy.where', 'n.where', (['((x >= 8) & (x < 11))'], {}), '((x >= 8) & (x < 11))\n', (23133, 23154), True, 'import numpy as n\n'), ((379, 439), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Info_SNe_KAIT.txt"""'], {'usecols': '[0]', 'dtype': '"""str"""'}), "('Info_SNe_KAIT.txt', usecols=[0], dtype='str')\n", (392, 439), True, 'import numpy as np\n'), ((1365, 1379), 'numpy.size', 'np.size', (['lines'], {}), '(lines)\n', (1372, 1379), True, 'import numpy as np\n'), ((2782, 2798), 'numpy.array', 'np.array', (['MJD[f]'], {}), '(MJD[f])\n', (2790, 2798), True, 'import numpy as np\n'), ((2811, 2828), 'numpy.array', 'np.array', (['mags[f]'], {}), '(mags[f])\n', (2819, 2828), True, 'import numpy as np\n'), ((2842, 2860), 'numpy.array', 'np.array', (['emags[f]'], {}), '(emags[f])\n', (2850, 2860), True, 'import numpy as np\n'), ((2872, 2888), 'numpy.array', 'np.array', (['tel[f]'], {}), '(tel[f])\n', (2880, 2888), True, 'import numpy as np\n'), ((3221, 3238), 'numpy.size', 'np.size', (["MJD['R']"], {}), "(MJD['R'])\n", (3228, 3238), True, 'import numpy as np\n'), ((3251, 3292), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['R']", "mags['R']"], {}), "(MJD['R'], mags['R'])\n", (3271, 3292), False, 'from scipy import interpolate\n'), ((3306, 3360), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["MJD['R']", "(mags['R'] + emags['R'])"], {}), "(MJD['R'], mags['R'] + emags['R'])\n", (3326, 3360), False, 'from scipy import interpolate\n'), ((6767, 6780), 'numpy.trapz', 'np.trapz', (['dem'], {}), '(dem)\n', (6775, 6780), True, 'import numpy as np\n'), ((17552, 17565), 'numpy.trapz', 'np.trapz', (['dem'], {}), '(dem)\n', (17560, 17565), True, 'import numpy as np\n'), ((22070, 22144), 'numpy.array', 'n.array', (['[-0.505, 1.647, -0.827, -1.718, 1.137, 0.701, -0.609, 0.104, 1.0]'], {}), '([-0.505, 1.647, -0.827, -1.718, 1.137, 0.701, -0.609, 0.104, 1.0])\n', (22077, 22144), True, 'import numpy as n\n'), ((22242, 22317), 'numpy.array', 'n.array', (['[3.347, -10.805, 5.491, 11.102, -7.985, -3.989, 2.908, 1.952, 0.0]'], {}), '([3.347, -10.805, 5.491, 11.102, -7.985, -3.989, 2.908, 1.952, 0.0])\n', (22249, 22317), True, 'import numpy as n\n'), ((22384, 22400), 'numpy.polyval', 'n.polyval', (['c1', 'y'], {}), '(c1, y)\n', (22393, 22400), True, 'import numpy as n\n'), ((22419, 22435), 'numpy.polyval', 'n.polyval', (['c2', 'y'], {}), '(c2, y)\n', (22428, 22435), True, 'import numpy as n\n'), ((22619, 22633), 'numpy.zeros', 'n.zeros', (['Ngood'], {}), '(Ngood)\n', (22626, 22633), True, 'import numpy as n\n'), ((22650, 22664), 'numpy.zeros', 'n.zeros', (['Ngood'], {}), '(Ngood)\n', (22657, 22664), True, 'import numpy as n\n'), ((22683, 22699), 'numpy.where', 'n.where', (['(y > 5.9)'], {}), '(y > 5.9)\n', (22690, 22699), True, 'import numpy as n\n'), ((23367, 23383), 'numpy.polyval', 'n.polyval', (['c1', 'y'], {}), '(c1, y)\n', (23376, 23383), True, 'import numpy as n\n'), ((23402, 23418), 'numpy.polyval', 'n.polyval', (['c2', 'y'], {}), '(c2, y)\n', (23411, 23418), True, 'import numpy as n\n'), ((185, 206), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (191, 206), True, 'import numpy as np\n'), ((288, 350), 'numpy.loadtxt', 'np.loadtxt', (['"""Info_SNe_KAIT.txt"""'], {'usecols': '[1, 2, 3, 4, 5, 6, 7]'}), "('Info_SNe_KAIT.txt', usecols=[1, 2, 3, 4, 5, 6, 7])\n", (298, 350), True, 'import numpy as np\n'), ((6749, 6762), 'numpy.trapz', 'np.trapz', (['num'], {}), '(num)\n', (6757, 6762), True, 'import numpy as np\n'), ((17534, 17547), 'numpy.trapz', 'np.trapz', (['num'], {}), '(num)\n', (17542, 17547), True, 'import numpy as np\n'), ((21045, 21060), 'numpy.array', 'n.array', (['[wave]'], {}), '([wave])\n', (21052, 21060), True, 'import numpy as n\n'), ((21134, 21147), 'numpy.array', 'n.array', (['wave'], {}), '(wave)\n', (21141, 21147), True, 'import numpy as n\n'), ((457, 479), 'numpy.array', 'np.array', (['name_SN_kait'], {}), '(name_SN_kait)\n', (465, 479), True, 'import numpy as np\n'), ((977, 993), 'numpy.array', 'np.array', (['fields'], {}), '(fields)\n', (985, 993), True, 'import numpy as np\n'), ((1022, 1038), 'numpy.array', 'np.array', (['fields'], {}), '(fields)\n', (1030, 1038), True, 'import numpy as np\n'), ((1067, 1083), 'numpy.array', 'np.array', (['fields'], {}), '(fields)\n', (1075, 1083), True, 'import numpy as np\n'), ((1112, 1128), 'numpy.array', 'np.array', (['fields'], {}), '(fields)\n', (1120, 1128), True, 'import numpy as np\n')] |
"""
Gradcam visualization ref modified from implementation by fchollet (https://keras.io/examples/vision/grad_cam)
"""
import cv2
import numpy as np
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Display
import matplotlib.cm as cm
from model_modified import efficientdet_mod
from model import efficientdet
from utils import preprocess_image
def parse_args(args):
"""
Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Gradcam visualization script for Efficientdet.')
parser.add_argument('--model_path', help='Path to trained model.', default='efficientdet-d1.h5')
parser.add_argument('--phi', help='Hyper parameter phi', default=1, type=int, choices=(0, 1, 2, 3, 4, 5, 6))
parser.add_argument('--viz_cls', help='coco class to visualize', type=int, default=0)
parser.add_argument('--img_path', help='image to visualize', default='sample\\person.jpg')
print(vars(parser.parse_args(args)))
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
image_path = args.img_path
top_pred_index = args.viz_cls
model_path = args.model_path
phi = args.phi
weighted_bifpn = True
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
num_classes = 90
score_threshold = 0.3
#load modified efficientdet
#get the last conv layer before inference and prediction models
conv_layer_out,pred_models = efficientdet_mod(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
num_layers = len(conv_layer_out)
for i in range(num_layers):
conv_layer_out[i].load_weights(model_path, by_name=True)
pred_models[i].load_weights(model_path, by_name=True)
image = cv2.imread(image_path)
src_image = image.copy()
# BGR -> RGB
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale = preprocess_image(image, image_size=image_size)
#to be used for display
img = keras.preprocessing.image.img_to_array(image)
image = [np.expand_dims(image, axis=0)]
#Create an combined image with all gradcams from different layers
out_image = np.zeros((image_size*2,image_size*3,3), np.uint8)
out_image[0:h,0:w,:] = src_image
display_row = 0
display_col = 0
for i in range(0,num_layers):
#relative position is merged display image
display_col += 1
if display_col == 3:
display_row = 1
display_col = 0
with tf.GradientTape() as tape:
last_conv_layer_output = conv_layer_out[i](image)
preds = pred_models[i](last_conv_layer_output)
top_class_channel = preds[:, :, top_pred_index]
# use automatic differentiation to compute the gradients
grads = tape.gradient(top_class_channel, last_conv_layer_output)
# compute the guided gradients
castConvOutputs = tf.math.abs(last_conv_layer_output)
castGrads = tf.cast(grads > 0, "float32")
guidedGrads = castConvOutputs * castGrads * grads
convOutputs = last_conv_layer_output[0]
guidedGrads = guidedGrads[0]
# compute the average of the gradient values, and using them
# as weights, compute the ponderation of the filters with
# respect to the weights
weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
heatmap = cv2.resize(cam.numpy(), (image_sizes[phi], image_sizes[phi]))
# normalize the heatmap such that all values lie in the range
# [0, 1], scale the resulting values to the range [0, 255],
# and then convert to an unsigned 8-bit integer
eps = 0.000001
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) + eps
heatmap = numer / denom
heatmap = (heatmap * 255).astype("uint8")
# We use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# We use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# We create an image with RGB colorized heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((image_sizes[phi], image_sizes[phi]))
jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)
#exchange b and r
jet_heatmap = jet_heatmap[:, :, ::-1]
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap * 0.3 + img
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
out_image[display_row*image_size:(display_row+1)*image_size,
display_col*image_size:(display_col+1)*image_size,:] = superimposed_img
cv2.imwrite("out.jpg",out_image)
if __name__ == '__main__':
main()
| [
"utils.preprocess_image",
"cv2.imwrite",
"tensorflow.math.abs",
"argparse.ArgumentParser",
"tensorflow.multiply",
"model_modified.efficientdet_mod",
"tensorflow.keras.preprocessing.image.array_to_img",
"numpy.zeros",
"tensorflow.GradientTape",
"numpy.expand_dims",
"numpy.min",
"tensorflow.redu... | [((548, 638), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gradcam visualization script for Efficientdet."""'}), "(description=\n 'Gradcam visualization script for Efficientdet.')\n", (571, 638), False, 'import argparse\n'), ((1693, 1812), 'model_modified.efficientdet_mod', 'efficientdet_mod', ([], {'phi': 'phi', 'weighted_bifpn': 'weighted_bifpn', 'num_classes': 'num_classes', 'score_threshold': 'score_threshold'}), '(phi=phi, weighted_bifpn=weighted_bifpn, num_classes=\n num_classes, score_threshold=score_threshold)\n', (1709, 1812), False, 'from model_modified import efficientdet_mod\n'), ((2108, 2130), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2118, 2130), False, 'import cv2\n'), ((2258, 2304), 'utils.preprocess_image', 'preprocess_image', (['image'], {'image_size': 'image_size'}), '(image, image_size=image_size)\n', (2274, 2304), False, 'from utils import preprocess_image\n'), ((2347, 2392), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['image'], {}), '(image)\n', (2385, 2392), False, 'from tensorflow import keras\n'), ((2528, 2583), 'numpy.zeros', 'np.zeros', (['(image_size * 2, image_size * 3, 3)', 'np.uint8'], {}), '((image_size * 2, image_size * 3, 3), np.uint8)\n', (2536, 2583), True, 'import numpy as np\n'), ((5334, 5367), 'cv2.imwrite', 'cv2.imwrite', (['"""out.jpg"""', 'out_image'], {}), "('out.jpg', out_image)\n", (5345, 5367), False, 'import cv2\n'), ((2407, 2436), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2421, 2436), True, 'import numpy as np\n'), ((3329, 3364), 'tensorflow.math.abs', 'tf.math.abs', (['last_conv_layer_output'], {}), '(last_conv_layer_output)\n', (3340, 3364), True, 'import tensorflow as tf\n'), ((3386, 3415), 'tensorflow.cast', 'tf.cast', (['(grads > 0)', '"""float32"""'], {}), "(grads > 0, 'float32')\n", (3393, 3415), True, 'import tensorflow as tf\n'), ((3762, 3802), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['guidedGrads'], {'axis': '(0, 1)'}), '(guidedGrads, axis=(0, 1))\n', (3776, 3802), True, 'import tensorflow as tf\n'), ((4446, 4464), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (4457, 4464), True, 'import matplotlib.cm as cm\n'), ((4686, 4737), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['jet_heatmap'], {}), '(jet_heatmap)\n', (4724, 4737), False, 'from tensorflow import keras\n'), ((4841, 4892), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['jet_heatmap'], {}), '(jet_heatmap)\n', (4879, 4892), False, 'from tensorflow import keras\n'), ((5113, 5169), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['superimposed_img'], {}), '(superimposed_img)\n', (5151, 5169), False, 'from tensorflow import keras\n'), ((2909, 2926), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2924, 2926), True, 'import tensorflow as tf\n'), ((3832, 3865), 'tensorflow.multiply', 'tf.multiply', (['weights', 'convOutputs'], {}), '(weights, convOutputs)\n', (3843, 3865), True, 'import tensorflow as tf\n'), ((4215, 4230), 'numpy.min', 'np.min', (['heatmap'], {}), '(heatmap)\n', (4221, 4230), True, 'import numpy as np\n'), ((4538, 4552), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (4547, 4552), True, 'import numpy as np\n')] |
import os
import subprocess
import argparse
import torch
import json
# import h5py
import gzip, csv
import numpy as np
from tqdm import tqdm
from torch.nn.utils.rnn import pad_sequence
from transformers import *
def get_sentence_features(batches, tokenizer, model, device, maxlen=500):
features = tokenizer.batch_encode_plus(batches, padding=True,
return_attention_mask=True, return_token_type_ids=True,
truncation=True, max_length=maxlen)
attention_mask = torch.tensor(features['attention_mask'], device=device)
input_ids = torch.tensor(features['input_ids'], device=device)
token_type_ids=torch.tensor(features['token_type_ids'], device=device)
# (batch, seq_len, nfeature)
token_embeddings = model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)[0]
# mean of embeddings as sentence embeddings
embeddings = (attention_mask.unsqueeze(-1) * token_embeddings).sum(1) / attention_mask.sum(1).unsqueeze(-1)
return embeddings
def hdf5_create_dataset(group, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
group.create_dataset(f'{cur}', embed.shape,
dtype='float32' if not fp16 else 'float16', data=embed)
cur += 1
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
group.create_dataset(f'{cur}', embed.shape,
dtype='float32' if not fp16 else 'float16', data=embed)
cur += 1
def jsonl_create_dataset(output_file, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
fout = open(output_file, 'w')
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
fout.write(json.dumps({cur: embed.tolist()}))
fout.write('\n')
cur += 1
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
fout.write(json.dumps({cur: embed.tolist()}))
fout.write('\n')
cur += 1
fout.close()
def csv_create_dataset(output_file, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
fout = gzip.open(output_file, 'wt')
# fout = open(output_file, 'w')
fieldnames = ['embedding']
writer = csv.DictWriter(fout, fieldnames=fieldnames)
writer.writeheader()
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
writer.writerow({'embedding': embed.tolist()})
cur += 1
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
writer.writerow({'embedding': embed.tolist()})
cur += 1
fout.close()
def np_create_dataset(output_file, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
# fout = open(output_file, 'w')
proc = subprocess.run(['wc', '-l', input_file], capture_output=True)
dstore_size = int(proc.stdout.decode('utf-8').split()[0])
dtype = 'float16' if fp16 else 'float32'
print(f'{dstore_size} examples')
dstore = np.memmap(output_file,
dtype=dtype,
mode='w+',
shape=(dstore_size, model.config.hidden_size),
)
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
dstore[cur:cur+embeddings.size(0)] = embeddings.cpu().numpy().astype(dtype)
cur += embeddings.size(0)
assert model.config.hidden_size == embeddings.size(1)
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
dstore[cur:cur+embeddings.size(0)] = embeddings.cpu().numpy().astype(dtype)
cur += embeddings.size(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='pre-compute the Bert embeddings')
parser.add_argument('dataset', type=str, help='the path to the dataset name')
parser.add_argument('--split', type=str, default=None,
help='if specified, only compute for this split')
parser.add_argument('--fp32', action='store_true', default=False,
help='whether to use half float point. It uses half float by default')
parser.add_argument('--sent-bert', action='store_true', default=False,
help='whether to use sentence-BERT')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
save_dir = f"precompute_embedding_datasets/{args.dataset}"
os.makedirs(save_dir, exist_ok=True)
device = "cuda" if args.cuda else "cpu"
model_name = 'bert-base-uncased' if not args.sent_bert else 'sentence-transformers/bert-base-nli-mean-tokens'
model_short = 'bert' if not args.sent_bert else 'sentbert'
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model.to(device)
model.eval()
gname_list = [args.split] if args.split is not None else ['valid', 'test', 'template', 'train']
batch_size = 128
for gname in gname_list:
if os.path.isfile(f'datasets/{args.dataset}/{gname}.txt'):
np_create_dataset(os.path.join(save_dir, f'{args.dataset}.{model_short}.{gname}.npy'),
os.path.join(f'datasets/{args.dataset}/{gname}.txt'), not args.fp32)
# for gname in gname_list:
# if os.path.isfile(f'datasets/{args.dataset}/{gname}.txt'):
# csv_create_dataset(os.path.join(save_dir, f'{args.dataset}.{model_short}.{gname}.csv.gz'),
# os.path.join(f'datasets/{args.dataset}/{gname}.txt'), args.fp16)
# for gname in gname_list:
# if os.path.isfile(f'datasets/{args.dataset}/{gname}.txt'):
# with h5py.File(os.path.join(save_dir, f'{args.dataset}.{model_short}.{gname}.hdf5'), 'w') as fout:
# hdf5_create_dataset(fout, os.path.join(f'datasets/{args.dataset}/{gname}.txt'))
| [
"csv.DictWriter",
"argparse.ArgumentParser",
"gzip.open",
"os.makedirs",
"tqdm.tqdm",
"subprocess.run",
"numpy.memmap",
"os.path.join",
"os.path.isfile",
"torch.tensor",
"torch.cuda.is_available",
"torch.no_grad"
] | [((486, 541), 'torch.tensor', 'torch.tensor', (["features['attention_mask']"], {'device': 'device'}), "(features['attention_mask'], device=device)\n", (498, 541), False, 'import torch\n'), ((558, 608), 'torch.tensor', 'torch.tensor', (["features['input_ids']"], {'device': 'device'}), "(features['input_ids'], device=device)\n", (570, 608), False, 'import torch\n'), ((628, 683), 'torch.tensor', 'torch.tensor', (["features['token_type_ids']"], {'device': 'device'}), "(features['token_type_ids'], device=device)\n", (640, 683), False, 'import torch\n'), ((1192, 1198), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (1196, 1198), False, 'from tqdm import tqdm\n'), ((2606, 2612), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (2610, 2612), False, 'from tqdm import tqdm\n'), ((3988, 3994), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (3992, 3994), False, 'from tqdm import tqdm\n'), ((4006, 4034), 'gzip.open', 'gzip.open', (['output_file', '"""wt"""'], {}), "(output_file, 'wt')\n", (4015, 4034), False, 'import gzip, csv\n'), ((4116, 4159), 'csv.DictWriter', 'csv.DictWriter', (['fout'], {'fieldnames': 'fieldnames'}), '(fout, fieldnames=fieldnames)\n', (4130, 4159), False, 'import gzip, csv\n'), ((5458, 5464), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (5462, 5464), False, 'from tqdm import tqdm\n'), ((5513, 5574), 'subprocess.run', 'subprocess.run', (["['wc', '-l', input_file]"], {'capture_output': '(True)'}), "(['wc', '-l', input_file], capture_output=True)\n", (5527, 5574), False, 'import subprocess\n'), ((5733, 5831), 'numpy.memmap', 'np.memmap', (['output_file'], {'dtype': 'dtype', 'mode': '"""w+"""', 'shape': '(dstore_size, model.config.hidden_size)'}), "(output_file, dtype=dtype, mode='w+', shape=(dstore_size, model.\n config.hidden_size))\n", (5742, 5831), True, 'import numpy as np\n'), ((6842, 6912), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""pre-compute the Bert embeddings"""'}), "(description='pre-compute the Bert embeddings')\n", (6865, 6912), False, 'import argparse\n'), ((7429, 7454), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7452, 7454), False, 'import torch\n'), ((7524, 7560), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (7535, 7560), False, 'import os\n'), ((8095, 8149), 'os.path.isfile', 'os.path.isfile', (['f"""datasets/{args.dataset}/{gname}.txt"""'], {}), "(f'datasets/{args.dataset}/{gname}.txt')\n", (8109, 8149), False, 'import os\n'), ((2004, 2019), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2017, 2019), False, 'import torch\n'), ((3412, 3427), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3425, 3427), False, 'import torch\n'), ((4914, 4929), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4927, 4929), False, 'import torch\n'), ((6571, 6586), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6584, 6586), False, 'import torch\n'), ((8181, 8248), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{args.dataset}.{model_short}.{gname}.npy"""'], {}), "(save_dir, f'{args.dataset}.{model_short}.{gname}.npy')\n", (8193, 8248), False, 'import os\n'), ((8266, 8318), 'os.path.join', 'os.path.join', (['f"""datasets/{args.dataset}/{gname}.txt"""'], {}), "(f'datasets/{args.dataset}/{gname}.txt')\n", (8278, 8318), False, 'import os\n'), ((1416, 1431), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1429, 1431), False, 'import torch\n'), ((2865, 2880), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2878, 2880), False, 'import torch\n'), ((4403, 4418), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4416, 4418), False, 'import torch\n'), ((6139, 6154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6152, 6154), False, 'import torch\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
- rnn_mode - the low level implementation of lstm cell: one of CUDNN,
BASIC, or BLOCK, representing cudnn_lstm, basic_lstm, and
lstm_block_cell classes.
The data required for this example is in the data/ dir of the
PTB dataset from <NAME>'s webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
import reader
import util
from tensorflow.python.client import device_lib
from tensorflow.python import debug as tf_debug
from ptb_input import PTBInput
import config as cf
import flags
from ptb_model import PTBModel
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size * max(1, flags.FLAGS.num_gpus) /
(time.time() - start_time)))
return np.exp(costs / iters)
def main(_):
if not flags.FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
gpus = [
x.name for x in device_lib.list_local_devices() if x.device_type == "GPU"
]
if flags.FLAGS.num_gpus > len(gpus):
raise ValueError(
"Your machine has only %d gpus "
"which is less than the requested --num_gpus=%d."
% (len(gpus), flags.FLAGS.num_gpus))
raw_data = reader.ptb_raw_data(flags.FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = cf.get_config()
eval_config = cf.get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(
config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config, input_=test_input)
models = {"Train": m, "Valid": mvalid, "Test": mtest}
for name, model in models.items():
model.export_ops(name)
metagraph = tf.train.export_meta_graph()
if tf.__version__ < "1.1.0" and flags.FLAGS.num_gpus > 1:
raise ValueError("num_gpus > 1 is not supported for TensorFlow versions "
"below 1.1.0")
soft_placement = False
if flags.FLAGS.num_gpus > 1:
soft_placement = True
util.auto_parallel(metagraph, m)
with tf.Graph().as_default():
tf.train.import_meta_graph(metagraph)
for model in models.values():
model.import_ops()
sv = tf.train.Supervisor(logdir=flags.FLAGS.save_path)
config_proto = tf.ConfigProto(allow_soft_placement=soft_placement)
with sv.managed_session(config=config_proto) as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if flags.FLAGS.save_path:
print("Saving model to %s." % flags.FLAGS.save_path)
sv.saver.save(session, flags.FLAGS.save_path, global_step=sv.global_step)
#if __name__ == "__main__":
# tf.app.run()
main(_)
| [
"tensorflow.Graph",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.variable_scope",
"reader.ptb_raw_data",
"numpy.exp",
"config.get_config",
"tensorflow.train.import_meta_graph",
"tensorflow.name_scope",
"tensorflow.train.Supervisor",
"ptb_input.PTBInput",
"tensorflow.Conf... | [((2802, 2813), 'time.time', 'time.time', ([], {}), '()\n', (2811, 2813), False, 'import time\n'), ((3684, 3705), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (3690, 3705), True, 'import numpy as np\n'), ((4137, 4179), 'reader.ptb_raw_data', 'reader.ptb_raw_data', (['flags.FLAGS.data_path'], {}), '(flags.FLAGS.data_path)\n', (4156, 4179), False, 'import reader\n'), ((4242, 4257), 'config.get_config', 'cf.get_config', ([], {}), '()\n', (4255, 4257), True, 'import config as cf\n'), ((4274, 4289), 'config.get_config', 'cf.get_config', ([], {}), '()\n', (4287, 4289), True, 'import config as cf\n'), ((4398, 4466), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-config.init_scale)', 'config.init_scale'], {}), '(-config.init_scale, config.init_scale)\n', (4427, 4466), True, 'import tensorflow as tf\n'), ((5581, 5609), 'tensorflow.train.export_meta_graph', 'tf.train.export_meta_graph', ([], {}), '()\n', (5607, 5609), True, 'import tensorflow as tf\n'), ((5954, 5991), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['metagraph'], {}), '(metagraph)\n', (5980, 5991), True, 'import tensorflow as tf\n'), ((6060, 6109), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {'logdir': 'flags.FLAGS.save_path'}), '(logdir=flags.FLAGS.save_path)\n', (6079, 6109), True, 'import tensorflow as tf\n'), ((6129, 6180), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'soft_placement'}), '(allow_soft_placement=soft_placement)\n', (6143, 6180), True, 'import tensorflow as tf\n'), ((3856, 3887), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (3885, 3887), False, 'from tensorflow.python.client import device_lib\n'), ((4477, 4499), 'tensorflow.name_scope', 'tf.name_scope', (['"""Train"""'], {}), "('Train')\n", (4490, 4499), True, 'import tensorflow as tf\n'), ((4521, 4580), 'ptb_input.PTBInput', 'PTBInput', ([], {'config': 'config', 'data': 'train_data', 'name': '"""TrainInput"""'}), "(config=config, data=train_data, name='TrainInput')\n", (4529, 4580), False, 'from ptb_input import PTBInput\n'), ((4737, 4779), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Training Loss"""', 'm.cost'], {}), "('Training Loss', m.cost)\n", (4754, 4779), True, 'import tensorflow as tf\n'), ((4786, 4826), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Learning Rate"""', 'm.lr'], {}), "('Learning Rate', m.lr)\n", (4803, 4826), True, 'import tensorflow as tf\n'), ((4837, 4859), 'tensorflow.name_scope', 'tf.name_scope', (['"""Valid"""'], {}), "('Valid')\n", (4850, 4859), True, 'import tensorflow as tf\n'), ((4881, 4940), 'ptb_input.PTBInput', 'PTBInput', ([], {'config': 'config', 'data': 'valid_data', 'name': '"""ValidInput"""'}), "(config=config, data=valid_data, name='ValidInput')\n", (4889, 4940), False, 'from ptb_input import PTBInput\n'), ((5103, 5152), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Validation Loss"""', 'mvalid.cost'], {}), "('Validation Loss', mvalid.cost)\n", (5120, 5152), True, 'import tensorflow as tf\n'), ((5163, 5184), 'tensorflow.name_scope', 'tf.name_scope', (['"""Test"""'], {}), "('Test')\n", (5176, 5184), True, 'import tensorflow as tf\n'), ((5205, 5267), 'ptb_input.PTBInput', 'PTBInput', ([], {'config': 'eval_config', 'data': 'test_data', 'name': '"""TestInput"""'}), "(config=eval_config, data=test_data, name='TestInput')\n", (5213, 5267), False, 'from ptb_input import PTBInput\n'), ((5884, 5916), 'util.auto_parallel', 'util.auto_parallel', (['metagraph', 'm'], {}), '(metagraph, m)\n', (5902, 5916), False, 'import util\n'), ((4355, 4365), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4363, 4365), True, 'import tensorflow as tf\n'), ((4592, 4655), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('Model', reuse=None, initializer=initializer)\n", (4609, 4655), True, 'import tensorflow as tf\n'), ((4669, 4730), 'ptb_model.PTBModel', 'PTBModel', ([], {'is_training': '(True)', 'config': 'config', 'input_': 'train_input'}), '(is_training=True, config=config, input_=train_input)\n', (4677, 4730), False, 'from ptb_model import PTBModel\n'), ((4952, 5015), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('Model', reuse=True, initializer=initializer)\n", (4969, 5015), True, 'import tensorflow as tf\n'), ((5034, 5096), 'ptb_model.PTBModel', 'PTBModel', ([], {'is_training': '(False)', 'config': 'config', 'input_': 'valid_input'}), '(is_training=False, config=config, input_=valid_input)\n', (5042, 5096), False, 'from ptb_model import PTBModel\n'), ((5290, 5353), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('Model', reuse=True, initializer=initializer)\n", (5307, 5353), True, 'import tensorflow as tf\n'), ((5371, 5437), 'ptb_model.PTBModel', 'PTBModel', ([], {'is_training': '(False)', 'config': 'eval_config', 'input_': 'test_input'}), '(is_training=False, config=eval_config, input_=test_input)\n', (5379, 5437), False, 'from ptb_model import PTBModel\n'), ((5925, 5935), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5933, 5935), True, 'import tensorflow as tf\n'), ((3532, 3553), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (3538, 3553), True, 'import numpy as np\n'), ((3646, 3657), 'time.time', 'time.time', ([], {}), '()\n', (3655, 3657), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.colors
from pycocotools.coco import COCO
import numpy as np
import skimage.io as sio
from tqdm import tqdm
from PIL import Image
filecoco = "annotations_coco.json"
coco = COCO(filecoco)
catIDs = coco.getCatIds()
cats = coco.loadCats(catIDs)
nms = [cat["name"] for cat in cats]
filterClasses = nms
catIds = coco.getCatIds(1)
imgIds = coco.getImgIds(catIds=catIds)
print("No of images: ", len(imgIds))
cmp = matplotlib.colors.ListedColormap(
["tan", "cyan", "pink", "forestgreen", "blue", "purple", "crimson"]
)
def getClassName(classID, cats):
for i in range(len(cats)):
if cats[i]["id"] == classID:
return cats[i]["name"]
return None
def mask_generator(img_id):
img = coco.loadImgs(img_id)[0]
mask = np.zeros((img["height"], img["width"]))
annIds = coco.getAnnIds(imgIds=img["id"], catIds=catIds)
anns = coco.loadAnns(annIds)
for i in range(len(anns)):
className = getClassName(anns[i]["category_id"], cats)
pixel_value = filterClasses.index(className) + 1
mask = np.maximum(coco.annToMask(anns[i]) * pixel_value, mask)
int_mask = mask.astype(np.uint8)
int_mask[int_mask == 0] = 1
# np.savetxt(img["file_name"][:-5] + "_gt.csv", int_mask, fmt="%d", delimiter=",")
filename = img["file_name"].split("/")[-1]
rgb_filename = "rgb_labels/" + filename[:-5] + ".png"
label_filename = "labels/" + filename[:-5] + ".png"
Image.fromarray(int_mask).save(label_filename)
plt.imsave(rgb_filename, int_mask, vmin=1, vmax=7, cmap=cmp)
print(f"\nCreating RGB and Ground truth labels for {len(imgIds)} images")
for img_id in tqdm(imgIds):
mask_generator(img_id)
print("Done") | [
"PIL.Image.fromarray",
"matplotlib.pyplot.imsave",
"tqdm.tqdm",
"pycocotools.coco.COCO",
"numpy.zeros"
] | [((268, 282), 'pycocotools.coco.COCO', 'COCO', (['filecoco'], {}), '(filecoco)\n', (272, 282), False, 'from pycocotools.coco import COCO\n'), ((1723, 1735), 'tqdm.tqdm', 'tqdm', (['imgIds'], {}), '(imgIds)\n', (1727, 1735), False, 'from tqdm import tqdm\n'), ((844, 883), 'numpy.zeros', 'np.zeros', (["(img['height'], img['width'])"], {}), "((img['height'], img['width']))\n", (852, 883), True, 'import numpy as np\n'), ((1572, 1632), 'matplotlib.pyplot.imsave', 'plt.imsave', (['rgb_filename', 'int_mask'], {'vmin': '(1)', 'vmax': '(7)', 'cmap': 'cmp'}), '(rgb_filename, int_mask, vmin=1, vmax=7, cmap=cmp)\n', (1582, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1546), 'PIL.Image.fromarray', 'Image.fromarray', (['int_mask'], {}), '(int_mask)\n', (1536, 1546), False, 'from PIL import Image\n')] |
from dataclasses import dataclass
import numpy as np
@dataclass(unsafe_hash=True)
class Fraction:
_numerator: int # Numerator
_denominator: int # Denomenator
def __init__(self, numerator, denominator):
self._numerator = numerator
self._denominator = denominator
gcd = np.gcd(numerator, denominator)
if gcd > 1:
self._numerator = int(numerator / gcd)
self._denominator = int(denominator / gcd)
def __add__(self, f):
if self._denominator == f._denominator:
n = self._numerator + f._numerator
d = self._denominator
else:
lcm = int(abs(self._denominator * f._denominator) / np.gcd(self._denominator, f._denominator))
ns = int(lcm / self._denominator) * self._numerator
nf = int(lcm / f._denominator) * f._numerator
n = ns + nf
d = lcm
return Fraction(n, d)
def __sub__(self, f):
if self._denominator == f._denominator:
n = self._numerator - f._numerator
d = self._denominator
else:
lcm = int(abs(self._denominator * f._denominator) / np.gcd(self._denominator, f._denominator))
ns = int(lcm / self._denominator) * self._numerator
nf = int(lcm / f._denominator) * f._numerator
n = ns - nf
d = lcm
return Fraction(n, d)
def __mul__(self, f):
n = self._numerator * f._numerator
d = self._denominator * f._denominator
return Fraction(n, d)
def __truediv__(self, f):
if f._numerator == 0:
raise ZeroDivisionError
n = self._numerator * f._denominator
d = self._denominator * f._numerator
if n == 0:
return Fraction(0, 1)
return Fraction(n, d)
def __eq__(self, f):
n1, n2 = self._numerator / self._denominator, f._numerator / f._denominator
return n1 == n2
def __gt__(self, f):
n1, n2 = self._numerator / self._denominator, f._numerator / f._denominator
return n1 > n2
def __ge__(self, f):
n1, n2 = self._numerator / self._denominator, f._numerator / f._denominator
return n1 >= n2
def __lt__(self, f):
n1, n2 = self._numerator / self._denominator, f._numerator / f._denominator
return n1 < n2
def __le__(self, f):
n1, n2 = self._numerator / self._denominator, f._numerator / f._denominator
return n1 <= n2
def __pos__(self):
return Fraction(self._numerator, self._denominator)
def __neg__(self):
return Fraction(-self._numerator, self._denominator)
def __int__(self):
return int(self._numerator // self._denominator)
def __float__(self):
return float(self._numerator / self._denominator)
def main():
a = Fraction(1, 2)
b = Fraction(3, 4)
print(a, b)
if __name__ == "__main__":
main()
| [
"numpy.gcd",
"dataclasses.dataclass"
] | [((56, 83), 'dataclasses.dataclass', 'dataclass', ([], {'unsafe_hash': '(True)'}), '(unsafe_hash=True)\n', (65, 83), False, 'from dataclasses import dataclass\n'), ((316, 346), 'numpy.gcd', 'np.gcd', (['numerator', 'denominator'], {}), '(numerator, denominator)\n', (322, 346), True, 'import numpy as np\n'), ((707, 748), 'numpy.gcd', 'np.gcd', (['self._denominator', 'f._denominator'], {}), '(self._denominator, f._denominator)\n', (713, 748), True, 'import numpy as np\n'), ((1181, 1222), 'numpy.gcd', 'np.gcd', (['self._denominator', 'f._denominator'], {}), '(self._denominator, f._denominator)\n', (1187, 1222), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Generate data for examples"""
# author: <NAME>, <NAME>, Duke University; <NAME>, <NAME>
# Copyright Duke University 2020
# License: MIT
import pandas as pd
import numpy as np
def generate_uniform_given_importance(num_control=1000, num_treated=1000,
num_cov=4, min_val=0,
max_val=3, covar_importance=[4, 3, 2, 1],
bi_mean=2, bi_stdev=1):
"""
This generates data according to the discrete uniform distribution
"""
x_c = np.random.randint(min_val, max_val, size=(num_control, num_cov))
x_t = np.random.randint(min_val, max_val, size=(num_treated, num_cov))
y_c = np.dot(x_c, np.array(covar_importance)) # y for control group
# this is beta
treatment_eff_coef = np.random.normal(bi_mean, bi_stdev, size=num_cov)
treatment_effect = np.dot(x_t, treatment_eff_coef) # this is beta*x
# yc is just the 1st term of the below summation. Thus, CATT is the 2nd term
y_t = np.dot(x_t, np.array(covar_importance)) + treatment_effect
true_catt = treatment_effect
df1 = pd.DataFrame(x_c, columns=range(num_cov))
df1['outcome'] = y_c
df1['treated'] = 0
df2 = pd.DataFrame(x_t, columns=range(num_cov))
df2['outcome'] = y_t
df2['treated'] = 1
data_frame = pd.concat([df2, df1])
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(['index'], axis=1)
return data_frame, true_catt
def generate_binomial_given_importance(num_control=1000, num_treated=1000,
num_cov=5, bernoulli_param=0.5,
bi_mean=2, bi_stdev=1,
covar_importance=[4, 3, 2, 1, 0.01]):
'''
This function generates data where the covariates exponentially decay with
importance. The x's are all binary.
'''
# data for control group
x_c = np.random.binomial(1, bernoulli_param, size=(num_control, num_cov))
# data for treated group
x_t = np.random.binomial(1, bernoulli_param, size=(num_treated, num_cov))
y_c = np.dot(x_c, np.array(covar_importance)) # y for control group
# this is beta
treatment_eff_coef = np.random.normal(bi_mean, bi_stdev, size=num_cov)
treatment_effect = np.dot(x_t, treatment_eff_coef) # this is beta*x
# yc is just the 1st term of the below summation. Thus, CATT is the 2nd term
y_t = np.dot(x_t, np.array(covar_importance)) + treatment_effect
true_catt = treatment_effect
df1 = pd.DataFrame(x_c, columns=range(num_cov))
df1['outcome'] = y_c
df1['treated'] = 0
df2 = pd.DataFrame(x_t, columns=range(num_cov))
df2['outcome'] = y_t
df2['treated'] = 1
data_frame = pd.concat([df2, df1])
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(['index'], axis=1)
return data_frame, true_catt
def generate_binomial_decay_importance(num_control=1000, num_treated=1000,
num_cov=5, bernoulli_param=0.5,
bi_mean=2, bi_stdev=1):
'''
This function generates data where the covariates exponentially decay with
importance. The x's are all binary.
'''
# data for control group
x_c = np.random.binomial(1, bernoulli_param, size=(num_control, num_cov))
# data for treated group
x_t = np.random.binomial(1, bernoulli_param, size=(num_treated, num_cov))
dense_bs = [64*((1/4)**(i+1)) for i in range(num_cov)]
y_c = np.dot(x_c, np.array(dense_bs)) # y for control group
# this is beta
treatment_eff_coef = np.random.normal(bi_mean, bi_stdev, size=num_cov)
treatment_effect = np.dot(x_t, treatment_eff_coef) # this is beta*x
# yc is just the 1st term of the below summation. Thus, CATT is the 2nd term
y_t = np.dot(x_t, np.array(dense_bs)) + treatment_effect
true_catt = treatment_effect
df1 = pd.DataFrame(x_c, columns=range(num_cov))
df1['outcome'] = y_c
df1['treated'] = 0
df2 = pd.DataFrame(x_t, columns=range(num_cov))
df2['outcome'] = y_t
df2['treated'] = 1
data_frame = pd.concat([df2, df1])
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(['index'], axis=1)
return data_frame, true_catt
| [
"numpy.random.normal",
"numpy.array",
"numpy.dot",
"numpy.random.randint",
"pandas.concat",
"numpy.random.binomial"
] | [((578, 642), 'numpy.random.randint', 'np.random.randint', (['min_val', 'max_val'], {'size': '(num_control, num_cov)'}), '(min_val, max_val, size=(num_control, num_cov))\n', (595, 642), True, 'import numpy as np\n'), ((653, 717), 'numpy.random.randint', 'np.random.randint', (['min_val', 'max_val'], {'size': '(num_treated, num_cov)'}), '(min_val, max_val, size=(num_treated, num_cov))\n', (670, 717), True, 'import numpy as np\n'), ((836, 885), 'numpy.random.normal', 'np.random.normal', (['bi_mean', 'bi_stdev'], {'size': 'num_cov'}), '(bi_mean, bi_stdev, size=num_cov)\n', (852, 885), True, 'import numpy as np\n'), ((909, 940), 'numpy.dot', 'np.dot', (['x_t', 'treatment_eff_coef'], {}), '(x_t, treatment_eff_coef)\n', (915, 940), True, 'import numpy as np\n'), ((1361, 1382), 'pandas.concat', 'pd.concat', (['[df2, df1]'], {}), '([df2, df1])\n', (1370, 1382), True, 'import pandas as pd\n'), ((1973, 2040), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'bernoulli_param'], {'size': '(num_control, num_cov)'}), '(1, bernoulli_param, size=(num_control, num_cov))\n', (1991, 2040), True, 'import numpy as np\n'), ((2081, 2148), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'bernoulli_param'], {'size': '(num_treated, num_cov)'}), '(1, bernoulli_param, size=(num_treated, num_cov))\n', (2099, 2148), True, 'import numpy as np\n'), ((2267, 2316), 'numpy.random.normal', 'np.random.normal', (['bi_mean', 'bi_stdev'], {'size': 'num_cov'}), '(bi_mean, bi_stdev, size=num_cov)\n', (2283, 2316), True, 'import numpy as np\n'), ((2340, 2371), 'numpy.dot', 'np.dot', (['x_t', 'treatment_eff_coef'], {}), '(x_t, treatment_eff_coef)\n', (2346, 2371), True, 'import numpy as np\n'), ((2792, 2813), 'pandas.concat', 'pd.concat', (['[df2, df1]'], {}), '([df2, df1])\n', (2801, 2813), True, 'import pandas as pd\n'), ((3328, 3395), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'bernoulli_param'], {'size': '(num_control, num_cov)'}), '(1, bernoulli_param, size=(num_control, num_cov))\n', (3346, 3395), True, 'import numpy as np\n'), ((3436, 3503), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'bernoulli_param'], {'size': '(num_treated, num_cov)'}), '(1, bernoulli_param, size=(num_treated, num_cov))\n', (3454, 3503), True, 'import numpy as np\n'), ((3674, 3723), 'numpy.random.normal', 'np.random.normal', (['bi_mean', 'bi_stdev'], {'size': 'num_cov'}), '(bi_mean, bi_stdev, size=num_cov)\n', (3690, 3723), True, 'import numpy as np\n'), ((3747, 3778), 'numpy.dot', 'np.dot', (['x_t', 'treatment_eff_coef'], {}), '(x_t, treatment_eff_coef)\n', (3753, 3778), True, 'import numpy as np\n'), ((4191, 4212), 'pandas.concat', 'pd.concat', (['[df2, df1]'], {}), '([df2, df1])\n', (4200, 4212), True, 'import pandas as pd\n'), ((741, 767), 'numpy.array', 'np.array', (['covar_importance'], {}), '(covar_importance)\n', (749, 767), True, 'import numpy as np\n'), ((2172, 2198), 'numpy.array', 'np.array', (['covar_importance'], {}), '(covar_importance)\n', (2180, 2198), True, 'import numpy as np\n'), ((3587, 3605), 'numpy.array', 'np.array', (['dense_bs'], {}), '(dense_bs)\n', (3595, 3605), True, 'import numpy as np\n'), ((1062, 1088), 'numpy.array', 'np.array', (['covar_importance'], {}), '(covar_importance)\n', (1070, 1088), True, 'import numpy as np\n'), ((2493, 2519), 'numpy.array', 'np.array', (['covar_importance'], {}), '(covar_importance)\n', (2501, 2519), True, 'import numpy as np\n'), ((3900, 3918), 'numpy.array', 'np.array', (['dense_bs'], {}), '(dense_bs)\n', (3908, 3918), True, 'import numpy as np\n')] |
import numpy
from scipy import integrate
def create_state_mtx(state, nx, ny, nz, dof):
state_mtx = numpy.zeros([nx, ny, nz, dof])
for k in range(nz):
for j in range(ny):
for i in range(nx):
for d in range(dof):
state_mtx[i, j, k, d] = state[d + i * dof + j * dof * nx + k * dof * nx * ny]
return state_mtx
def create_state_vec(state_mtx, nx, ny, nz, dof):
state = numpy.zeros(nx * ny * nz * dof)
row = 0
for k in range(nz):
for j in range(ny):
for i in range(nx):
for d in range(dof):
state[row] = state_mtx[i, j, k, d]
row += 1
return state
def create_uniform_coordinate_vector(start, end, nx):
dx = (end - start) / nx
return numpy.roll(numpy.arange(start - dx, end + 2 * dx, dx), -2)
def create_stretched_coordinate_vector(start, end, nx, sigma):
if start < 0 or end > 1:
raise ValueError('Grid stretching currently only works for a [0, 1] domain')
x = create_uniform_coordinate_vector(start, end, nx)
return 0.5 * (1 + numpy.tanh(2 * sigma * (x - 0.5)) / numpy.tanh(sigma))
def compute_streamfunction(u, v, x, y):
x, y = numpy.meshgrid(x, y)
psiv = integrate.cumtrapz(v.T, x, axis=1, initial=0)
psiu = integrate.cumtrapz(u.T, y, axis=0, initial=0)
return ((-psiu + psiv[0]) + (psiv - psiu[:, 0][:, None])) / 2
| [
"scipy.integrate.cumtrapz",
"numpy.tanh",
"numpy.zeros",
"numpy.meshgrid",
"numpy.arange"
] | [((105, 135), 'numpy.zeros', 'numpy.zeros', (['[nx, ny, nz, dof]'], {}), '([nx, ny, nz, dof])\n', (116, 135), False, 'import numpy\n'), ((439, 470), 'numpy.zeros', 'numpy.zeros', (['(nx * ny * nz * dof)'], {}), '(nx * ny * nz * dof)\n', (450, 470), False, 'import numpy\n'), ((1224, 1244), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1238, 1244), False, 'import numpy\n'), ((1257, 1302), 'scipy.integrate.cumtrapz', 'integrate.cumtrapz', (['v.T', 'x'], {'axis': '(1)', 'initial': '(0)'}), '(v.T, x, axis=1, initial=0)\n', (1275, 1302), False, 'from scipy import integrate\n'), ((1314, 1359), 'scipy.integrate.cumtrapz', 'integrate.cumtrapz', (['u.T', 'y'], {'axis': '(0)', 'initial': '(0)'}), '(u.T, y, axis=0, initial=0)\n', (1332, 1359), False, 'from scipy import integrate\n'), ((811, 853), 'numpy.arange', 'numpy.arange', (['(start - dx)', '(end + 2 * dx)', 'dx'], {}), '(start - dx, end + 2 * dx, dx)\n', (823, 853), False, 'import numpy\n'), ((1117, 1150), 'numpy.tanh', 'numpy.tanh', (['(2 * sigma * (x - 0.5))'], {}), '(2 * sigma * (x - 0.5))\n', (1127, 1150), False, 'import numpy\n'), ((1153, 1170), 'numpy.tanh', 'numpy.tanh', (['sigma'], {}), '(sigma)\n', (1163, 1170), False, 'import numpy\n')] |
#!/usr/bin/env python3
import itertools as it
import random
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import numpy as np
import seaborn as sns
params = {
"axes.labelsize" : 16,
"xtick.labelsize" : 12,
"ytick.labelsize" : 12,
"text.usetex" : True,
"font.family" : "sans-serif"
}
def map_polynomial_features(x, degree = 2):
"""
Function to map variables into an n degree polynomial features
Parameters
--------------------------------------------------
x : ndarray of shape (n_samples, n_features)
degree: int the nth degree polynomial to map features
Returns
--------------------------------------------------
feature_map: ndarray of shape (n_samples, n_features quadratic terms)
"""
m = x.shape[0]
n = x.shape[1]
x = np.hstack((np.ones((m, 1)), x))
feature_map = list()
for i in range(m):
for k, v in enumerate(it.combinations_with_replacement(x[i], degree)):
if k == 0:
pass
else:
feature_map.append(np.product(v))
return np.array(feature_map).reshape(m, -1)
def split_train_test(x, y, prop_train = 80, validation = False, seed = None):
"""
Function to split the a dataset into training set with probability p and testing sets
Parameters
--------------------------------------------------
x : ndarray of shape (n_samples, n_features)
y : ndarray of shape (n_samples, 1)
prop_train : (int) percentage to be included in the training set
random_seed:
Returns
--------------------------------------------------
x_train:
x_test :
y_train:
y_test :
"""
np.random.seed(seed = seed)
if ((x.shape[0] == y.shape[0]) == False):
raise Exception(f"both x: {x.shape} and y: {y.shape} should be of length n_samples.")
m = x.shape[0]
y = y.reshape(-1, 1)
index = list(range(0, m))
cut_one = int((prop_train * m) / 100)
np.random.shuffle(index)
if validation:
cut_two = int((m - cut_one) / 2)
in_train, in_test, in_validation = np.array_split(
ary = index,
indices_or_sections = [cut_one, cut_two]
)
return x[in_train], x[in_test], x[in_validation], y[in_train], y[in_test], y[in_validation]
else:
in_train, in_test = np.array_split(
ary = index,
indices_or_sections = [cut_one]
)
return x[in_train], x[in_test], y[in_train], y[in_test]
def plot_cost_function(cost = None, width = 10.0, height = 6.5):
"""
Function to plot the cost function from an optimization algorithm e.g. gradient descent
Parameters
--------------------------------------------------
cost : ndarray of shape (iterations, 1)
width : float plot width in inches (default: 10.0)
height: float plot height in inches (default: 6.5)
Returns
--------------------------------------------------
figure: None displays the cost function plot and returns
"""
plt.rcParams.update(params)
fig, ax = plt.subplots(figsize = (width, height))
ax = sns.lineplot(x = range(len(cost)), y = cost, scalex = True, scaley = True)
ax.axhline(y = min(cost), color = "r", linewidth = 0.5, linestyle = "--")
ax.set_ylabel("Cost $J(\\theta)$")
ax.set_xlabel("Number of Iterations $(t)$")
ax.xaxis.set_major_locator(tkr.MaxNLocator(integer = True))
ax.margins(0.05)
ax.axis("tight")
ax.grid(True)
fig.tight_layout()
plt.show()
def mean_squared_error(y_prime, y_test):
"""
Function to calculate the The Mean Squared Error (MSE)
Parameters
--------------------------------------------------
y_prime: ndarray of shape (n_samples, 1)
y_test : ndarray of shape (n_samples, 1)
Returns
--------------------------------------------------
mse: The Mean Squared Error (MSE)
"""
m = y_test.shape[0]
n = y_test.shape[1]
mse = (1 / m) * np.sum(np.square(y_prime - y_test))
return mse
def root_mean_squared_error(y_prime, y_test):
"""
Function to calculate the The Root Mean Squared Error (RMSE)
Parameters
--------------------------------------------------
y_prime: ndarray of shape (n_samples, 1)
y_test : ndarray of shape (n_samples, 1)
Returns
--------------------------------------------------
rmse: The Root Mean Squared Error (RMSE)
"""
rmse = np.sqrt(mean_squared_error(y_prime, y_test))
return rmse
def accuracy(y_prime, y_test):
"""
Function to calculate the accuracy of a model
Parameters
--------------------------------------------------
y_prime: ndarray of shape (n_samples, 1)
y_test : ndarray of shape (n_samples, 1)
Returns
--------------------------------------------------
accuracy: The fraction of predictions the model got right
"""
accuracy = np.mean(y_test.flatten() == y_prime.flatten())
return accuracy
def missing_var_pct(df = None):
"""
Function that return variables that have missing values and the percentage
of total observations that are missing
Parameters:
--------------------------------------------------
df: DataFrame
Returns:
--------------------------------------------------
missing : Pandas Series with variables and their respective missing percentages
"""
pct_missing = df.isnull().mean().sort_values(ascending = False) * 100
pct_missing = pct_missing.loc[pct_missing > 0].round(2)
if len(pct_missing) > 0:
print(f"{pct_missing}")
else:
print("The dataframe has no missing values in any column.")
def drop_missing_var(df = None, threshold = 0.8):
"""
Function that removes variables that have missing percentages above a threshold.
Parameters:
--------------------------------------------------
df : DataFrame
threshold: float, the threshold for missing percentage value in decimals
Returns:
--------------------------------------------------
df: Pandas DataFrame with variables removed
"""
remove = df.columns[df.isnull().mean() > threshold].to_list()
df = df.drop(remove, axis = 1)
return df
def change_vars_to_categorical(df = None, vars_to_change = []):
"""
Function that changes all non-numeric variables to categorical datatype.
Parameters:
--------------------------------------------------
df : DataFrame
vars_to_change: list, the variables in the list are converted to categorical datatype.
Returns:
--------------------------------------------------
df: DataFrame with categorical datatypes converted
"""
cat_vars = df.select_dtypes(exclude = "number").columns.to_list()
if len(vars_to_change) > 0:
cat_vars = vars_to_change
for var in cat_vars:
df[var] = df[var].astype("category")
return df
def split_numerical_categorical(df = None):
"""
Function that creates a list for numerical and categorical variables respectively
Parameters:
--------------------------------------------------
df: DataFrame
Returns:
--------------------------------------------------
num_df: Dataframe of numerical variables only
cat_df: Dataframe of categorical variables only
"""
num_vars = df.select_dtypes(include = "number").columns.to_list()
cat_vars = df.select_dtypes(exclude = "number").columns.to_list()
num_df = df[num_vars]
cat_df = df[cat_vars]
return num_df, cat_df
| [
"numpy.product",
"numpy.random.shuffle",
"numpy.ones",
"numpy.square",
"numpy.array_split",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"numpy.random.seed",
"itertools.combinations_with_replacement",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show... | [((1780, 1805), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (1794, 1805), True, 'import numpy as np\n'), ((2088, 2112), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (2105, 2112), True, 'import numpy as np\n'), ((3182, 3209), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (3201, 3209), True, 'import matplotlib.pyplot as plt\n'), ((3224, 3261), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (3236, 3261), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3673, 3675), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2283), 'numpy.array_split', 'np.array_split', ([], {'ary': 'index', 'indices_or_sections': '[cut_one, cut_two]'}), '(ary=index, indices_or_sections=[cut_one, cut_two])\n', (2232, 2283), True, 'import numpy as np\n'), ((2472, 2528), 'numpy.array_split', 'np.array_split', ([], {'ary': 'index', 'indices_or_sections': '[cut_one]'}), '(ary=index, indices_or_sections=[cut_one])\n', (2486, 2528), True, 'import numpy as np\n'), ((3544, 3573), 'matplotlib.ticker.MaxNLocator', 'tkr.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (3559, 3573), True, 'import matplotlib.ticker as tkr\n'), ((864, 879), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (871, 879), True, 'import numpy as np\n'), ((964, 1010), 'itertools.combinations_with_replacement', 'it.combinations_with_replacement', (['x[i]', 'degree'], {}), '(x[i], degree)\n', (996, 1010), True, 'import itertools as it\n'), ((1137, 1158), 'numpy.array', 'np.array', (['feature_map'], {}), '(feature_map)\n', (1145, 1158), True, 'import numpy as np\n'), ((4148, 4175), 'numpy.square', 'np.square', (['(y_prime - y_test)'], {}), '(y_prime - y_test)\n', (4157, 4175), True, 'import numpy as np\n'), ((1110, 1123), 'numpy.product', 'np.product', (['v'], {}), '(v)\n', (1120, 1123), True, 'import numpy as np\n')] |
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
#pylint: disable=C0103,W1202,C0302
"""
The module contains the class RoiDetector that allows to detect ROI on frames received
from a video sequence using motion detection + background subtraction approaches.
"""
from collections import namedtuple
import math
import datetime
import logging as log
import cv2
import numpy as np
# stores rectangle as tuple of left_x, top_y, width, and height
Rect = namedtuple("Rect", ["tl_x", "tl_y", "w", "h"])
Point = namedtuple("Point", ["x", "y"])
IncreaseRectParams = namedtuple("IncreaseRectParams", ["increase_cell_coeff", "shift_x", "shift_y"])
CellParams = namedtuple("CellParams", ["cell_height", "cell_aspect_ratio", "cell_overlap",
"num_cells_x", "num_cells_y", "list_v_len"])
CellData = namedtuple("CellData", ["rect", "increased_rect", "list_v", "calculated"])
ConnectedComponent = namedtuple("ConnectedComponent",
["label_id", "mask", "centroid", "rect", "area", "num"])
SHOULD_SHOW = False
def _my_imshow(name, img):
if SHOULD_SHOW:
cv2.imshow(name, img)
SHOULD_SHOW_DEBUG = False
def _dbg_imshow(name, img):
if SHOULD_SHOW_DEBUG:
_my_imshow(name, img)
def _log_work_time(prefix, name_point, begin_work_time):
work_dt = datetime.datetime.now() - begin_work_time
work_dt_ms = int(1000*work_dt.total_seconds())
log.debug("{}: {} = {} ms".format(prefix, name_point, work_dt_ms))
def _get_rect_in_center(image_shape, size):
H, W = image_shape[:2]
w, h = size
tl_x = int(W / 2. - w / 2.)
tl_y = int(H / 2. - h / 2.)
return Rect(tl_x, tl_y, w, h)
def _get_center_fp(rect):
tl_x, tl_y, w, h = rect
c_x = tl_x + w/2.
c_y = tl_y + h/2.
return Point(c_x, c_y)
def _get_center(rect):
c_x, c_y = _get_center_fp(rect)
c_x = int(c_x)
c_y = int(c_y)
return Point(c_x, c_y)
def _increase_rect(rect, increase_rect_params):
coeff = increase_rect_params.increase_cell_coeff
shift_x = increase_rect_params.shift_x
shift_y = increase_rect_params.shift_y
_, _, w, h = rect
new_w = math.ceil(w * coeff + 2*shift_x)
new_h = math.ceil(h * coeff + 2*shift_y)
c_x, c_y = _get_center_fp(rect)
new_tl_x = math.floor(c_x - new_w / 2.)
new_tl_y = math.floor(c_y - new_h / 2.)
return Rect(new_tl_x, new_tl_y, new_w, new_h)
def get_rect_tl(rect):
"""
Returns namedtuple Point that is top-left corner of the given
namedtuple Rect
"""
return Point(rect[0], rect[1])
def get_rect_br(rect):
"""
Returns namedtuple Point that is bottom-right corner of the given
namedtuple Rect
"""
tl_x, tl_y, w, h = rect
return Point(tl_x + w - 1, tl_y + h - 1)
#pylint: disable=R0914
def _intersect_rects(rect1, rect2):
if rect1 is None or rect2 is None:
return None
tl_x_1, tl_y_1 = get_rect_tl(rect1)
tl_x_2, tl_y_2 = get_rect_tl(rect2)
tl_x = max(tl_x_1, tl_x_2)
tl_y = max(tl_y_1, tl_y_2)
br_x_1, br_y_1 = get_rect_br(rect1)
br_x_2, br_y_2 = get_rect_br(rect2)
br_x = min(br_x_1, br_x_2)
br_y = min(br_y_1, br_y_2)
if br_x < tl_x or br_y < tl_y:
return Rect(0, 0, 0, 0)
w = br_x - tl_x + 1
h = br_y - tl_y + 1
return Rect(tl_x, tl_y, w, h)
#pylint: disable=R0914
def _get_union_rects(rect1, rect2):
if rect1 is None or rect2 is None:
return None
tl_x_1, tl_y_1 = get_rect_tl(rect1)
tl_x_2, tl_y_2 = get_rect_tl(rect2)
tl_x = min(tl_x_1, tl_x_2)
tl_y = min(tl_y_1, tl_y_2)
br_x_1, br_y_1 = get_rect_br(rect1)
br_x_2, br_y_2 = get_rect_br(rect2)
br_x = max(br_x_1, br_x_2)
br_y = max(br_y_1, br_y_2)
if br_x < tl_x or br_y < tl_y:
return Rect(0, 0, 0, 0)
w = br_x - tl_x + 1
h = br_y - tl_y + 1
return Rect(tl_x, tl_y, w, h)
def _get_area_rect(rect):
if rect is None:
return None
assert rect.w >= 0 and rect.h >= 0
return rect.w * rect.h
def _get_iou_rects(rect1, rect2):
if rect1 is None or rect2 is None:
return None
rect12 = _intersect_rects(rect1, rect2)
a1 = _get_area_rect(rect1)
a2 = _get_area_rect(rect2)
a12 = _get_area_rect(rect12)
return float(a12) / (a1 + a2 - a12)
def _scale_rect(rect, scale):
scaled_vals = [int(scale * v) for v in rect]
return Rect(*scaled_vals)
def _get_subimage(image, rect):
tl_x, tl_y, w, h = rect
subimage = image[tl_y : tl_y + h, tl_x : tl_x + w, :]
assert subimage.shape[0] == h
assert subimage.shape[1] == w
return subimage.copy()
def _get_median_of_rects(rects):
list_tl_x = []
list_tl_y = []
list_br_x = []
list_br_y = []
for r in rects:
tl_x, tl_y = get_rect_tl(r)
br_x, br_y = get_rect_br(r)
list_tl_x.append(tl_x)
list_tl_y.append(tl_y)
list_br_x.append(br_x)
list_br_y.append(br_y)
list_tl_x = np.array(list_tl_x)
list_tl_y = np.array(list_tl_y)
list_br_x = np.array(list_br_x)
list_br_y = np.array(list_br_y)
tl_x = np.median(list_tl_x)
tl_y = np.median(list_tl_y)
br_x = np.median(list_br_x)
br_y = np.median(list_br_y)
w = br_x - tl_x + 1
h = br_y - tl_y + 1
return Rect(tl_x, tl_y, w, h)
def _draw_match(match, min_val, max_val, show_size_coeff=10):
if not SHOULD_SHOW:
return
divisor = (max_val - min_val) if max_val > min_val else 1.
match_to_draw = (match - min_val) / divisor
h, w = match_to_draw.shape[:2]
match_to_draw = cv2.resize(match_to_draw, (show_size_coeff * w, show_size_coeff * h))
_dbg_imshow("match", match_to_draw)
def _run_match_template_on_rect(image, prev_image, rect, increased_rect):
subimage = _get_subimage(image, increased_rect)
prev_template = _get_subimage(prev_image, rect)
match = cv2.matchTemplate(subimage, prev_template, cv2.TM_SQDIFF)
min_val, max_val, min_loc, _ = cv2.minMaxLoc(match)
dx, dy = min_loc
template_h, template_w = prev_template.shape[:2]
subimage_h, subimage_w = subimage.shape[:2]
v_x = -(subimage_w / 2.) + dx + template_w / 2.
v_y = -(subimage_h / 2.) + dy + template_h / 2.
v = Point(v_x, v_y)
_draw_match(match, min_val, max_val)
return v
def _draw_arrow(image, pt, v, color1=(0, 255, 0), color2=None):
if not SHOULD_SHOW:
return
if color2 is None:
color2 = color1
pt = np.array(pt)
v = np.array(v)
pt2_a = pt + v
pt2_b = pt + (v / 2.)
pt2_a = pt2_a.astype(np.int32)
pt2_b = pt2_b.astype(np.int32)
cv2.line(image, tuple(pt), tuple(pt2_b), color=color2, thickness=3)
cv2.line(image, tuple(pt), tuple(pt2_a), color=color1, thickness=1)
def _draw_rect(image, rect, color=(255, 0, 0), thickness=3):
if not SHOULD_SHOW:
return
if rect is None:
return
tl_x, tl_y, w, h = rect
br_x = tl_x + w
br_y = tl_y + h
cv2.rectangle(image, (tl_x, tl_y), (br_x, br_y), color, thickness)
def _decrease_image_to_min_side(image, target_min_side):
h, w = image.shape[:2]
min_side = min(h, w)
scale = float(target_min_side) / min_side
new_w = math.ceil(scale * w)
new_h = math.ceil(scale * h)
image = cv2.resize(image, (new_w, new_h))
return image
def _get_median_from_list(list_v, N_median):
xs = np.array([x for x, y in list_v[-N_median:]])
median_x = np.median(xs)
ys = np.array([y for x, y in list_v[-N_median:]])
median_y = np.median(ys)
return Point(median_x, median_y)
def _check_is_rect_valid(rect, frame_shape):
tl_x, tl_y, w, h = rect
frame_h, frame_w = frame_shape[:2]
if tl_x < 0 or tl_y < 0:
return False
br_x = tl_x + w - 1
br_y = tl_y + h - 1
if br_x > frame_w or br_y > frame_h:
return False
return True
#pylint: disable=R0903
class RoiMotionDetector:
"""
The class estimates regular motion in central region of a frame.
"""
@staticmethod
#pylint: disable=R0914
def _init_cell_data(i, j, frame_shape, cell_params, increase_rect_params):
frame_h, frame_w = frame_shape[:2]
frame_cx = frame_w / 2.
frame_cy = frame_h / 2.
cell_h = cell_params.cell_height
cell_w = int(cell_params.cell_aspect_ratio * cell_h)
assert cell_params.num_cells_x % 2 == 1 and cell_params.num_cells_y % 2 == 1
rel_i = i - cell_params.num_cells_y // 2
rel_j = j - cell_params.num_cells_x // 2
cell_cx = frame_cx + rel_j * cell_w
cell_cy = frame_cy + rel_i * cell_h
tl_x = math.floor(cell_cx - cell_w / 2.)
tl_y = math.floor(cell_cy - cell_h / 2.)
cell_rect = Rect(tl_x, tl_y, cell_w, cell_h)
cell_increased_rect = _increase_rect(cell_rect, increase_rect_params)
is_valid = _check_is_rect_valid(cell_rect, frame_shape)
is_increased_valid = _check_is_rect_valid(cell_increased_rect, frame_shape)
log.debug("_init_cell_data: (i,j) = {}, (rel_i, rel_j) = {}, cell_rect = {}, "
"cell_increased_rect = {}, is_valid={}, is_increased_valid={}".format(
(i, j), (rel_i, rel_j), cell_rect, cell_increased_rect,
is_valid, is_increased_valid))
if not is_valid or not is_increased_valid:
return None
cell_data = CellData(rect=cell_rect, increased_rect=cell_increased_rect,
list_v=[], calculated={"median": None})
return cell_data
#pylint: disable=R0913
def __init__(self, frame_shape, cell_params, increase_rect_params, N_median, min_motion=6):
self.frame_shape = frame_shape
self.cell_params = cell_params
self.increase_rect_params = increase_rect_params
self.N_median = N_median
self.min_motion = min_motion
self.total_v = Point(0, 0)
self.cells_data = {}
for i in range(self.cell_params.num_cells_y):
for j in range(self.cell_params.num_cells_x):
cell_data = self._init_cell_data(i, j, frame_shape=frame_shape,
cell_params=cell_params,
increase_rect_params=increase_rect_params)
if cell_data is None:
continue
self.cells_data[(i, j)] = cell_data
def _handle_cell(self, cell_data, frame, prev_frame):
rect = cell_data.rect
increased_rect = cell_data.increased_rect
assert _check_is_rect_valid(rect, frame.shape)
assert _check_is_rect_valid(increased_rect, frame.shape)
v = _run_match_template_on_rect(frame, prev_frame, rect, increased_rect)
log.debug(" v = {}".format(v))
cell_data.list_v.append(v)
while len(cell_data.list_v) > self.cell_params.list_v_len:
del cell_data.list_v[0]
def _recalculate_median_in_cell(self, cell_data):
cell_data.calculated["median"] = _get_median_from_list(cell_data.list_v, self.N_median)
log.debug("_recalculate_median_in_cell: rect = {} median = {}".format(
cell_data.rect, cell_data.calculated["median"]))
def _recalculate_medians(self):
for i, j in sorted(self.cells_data.keys()):
log.debug("_recalculate_medians: (i,j)={}".format((i, j)))
cell_data = self.cells_data[(i, j)]
self._recalculate_median_in_cell(cell_data)
def _drop_last_motion_in_cells(self):
for i, j in sorted(self.cells_data.keys()):
log.debug("_drop_last_motion_in_cells: (i,j)={}".format((i, j)))
cell_data = self.cells_data[(i, j)]
del cell_data.list_v[-1]
log.debug("now len(list_v) = {}".format(len(cell_data.list_v)))
def handle_image(self, frame, prev_frame):
"""
The method receives the current frame and the previous frame
and updates the field total_v that stores the estimates regular motion
on the last frames of the video sequence.
"""
begin_work_time = datetime.datetime.now()
assert frame.shape == prev_frame.shape
img_to_show = frame.copy()
prev_img_to_show = prev_frame.copy()
num_cells_with_motions = 0
for i, j in sorted(self.cells_data.keys()):
log.debug("handle_image: (i,j)={}".format((i, j)))
cell_data = self.cells_data[(i, j)]
self._handle_cell(cell_data, frame, prev_frame)
rect = cell_data.rect
v = cell_data.list_v[-1]
if np.linalg.norm(v) >= self.min_motion:
num_cells_with_motions += 1
_draw_arrow(prev_img_to_show, _get_center(rect), v, (0, 255, 0))
_draw_rect(prev_img_to_show, rect, color=(255, 0, 0), thickness=1)
log.debug("num_cells_with_motions = {}".format(num_cells_with_motions))
if num_cells_with_motions < len(self.cells_data) // 2:
self._drop_last_motion_in_cells()
self.total_v = None
log.debug("total_v = {}".format(self.total_v))
return img_to_show, prev_img_to_show
self._recalculate_medians()
list_medians = [np.array(cell_data.calculated["median"])
for cell_data in self.cells_data.values()
if cell_data.calculated.get("median")]
log.debug("len(list_medians) = {}".format(len(list_medians)))
list_medians = [v for v in list_medians if np.linalg.norm(v) >= self.min_motion]
#
# Idea for future development:
# add check that most of the motions are directed like the median of them
#
log.debug("after filtering len(list_medians) = {}".format(len(list_medians)))
if list_medians:
list_medians = np.array(list_medians)
log.debug("list_medians =\n%s", str(list_medians))
total_v = np.median(list_medians, axis=0)
total_v = Point(*total_v.tolist())
else:
total_v = Point(0, 0)
self.total_v = total_v
log.debug("total_v = {}".format(self.total_v))
work_time = datetime.datetime.now() - begin_work_time
work_time_ms = int(1000*work_time.total_seconds())
log.debug("RoiMotionDetector.handle_image: work_time = {} ms".format(work_time_ms))
return img_to_show, prev_img_to_show
def _get_subframe_for_motion(frame, vx, vy):
def _get_subframe_from_tl(frame, vx, vy):
assert vx >= 0 and vy >= 0
h, w = frame.shape[:2]
assert vx < w and vy < h
return frame[: h - vy, : w - vx]
def _get_subframe_from_br(frame, vx, vy):
assert vx <= 0 and vy <= 0
return frame[-vy:, -vx:]
vx_p = int((vx + abs(vx)) / 2)
vx_n = int((vx - abs(vx)) / 2)
vy_p = int((vy + abs(vy)) / 2)
vy_n = int((vy - abs(vy)) / 2)
assert vx_p >= 0 and vy_p >= 0
assert vx_n <= 0 and vy_n <= 0
assert vx == vx_p + vx_n
assert vy == vy_p + vy_n
assert abs(vx_p * vx_n) < 1e-8 and abs(vy_p * vy_n) < 1e-8
subframe = _get_subframe_from_tl(frame, vx_p, vy_p)
subframe = _get_subframe_from_br(subframe, vx_n, vy_n)
return subframe
def _move_mask_back_to_frame_size(mask, vx, vy, frame_shape):
assert vx == int(vx)
assert vy == int(vy)
assert len(mask.shape) == 2
mask_h, mask_w = mask.shape[:2]
frame_h, frame_w = frame_shape[:2]
assert frame_h == mask_h + abs(vy)
assert frame_w == mask_w + abs(vx)
if vx != 0:
mask_horiz_shift = np.zeros((mask_h, abs(vx)), mask.dtype)
if vx > 0:
mask = np.hstack((mask_horiz_shift, mask))
else:
mask = np.hstack((mask, mask_horiz_shift))
assert mask.shape[0] == mask_h
assert mask.shape[1] == frame_w
if vy != 0:
mask_vert_shift = np.zeros((abs(vy), frame_w), mask.dtype)
if vy > 0:
mask = np.vstack((mask_vert_shift, mask))
else:
mask = np.vstack((mask, mask_vert_shift))
assert mask.shape[:2] == frame_shape[:2]
return mask
#pylint: disable=R0915
def _get_diff_as_mask(frame, prev_frame, total_v, blur_kernel_size=5, max_diff_to_be_same=10):
begin_work_time = datetime.datetime.now()
assert total_v is not None
vx, vy = total_v
vx = int(vx)
vy = int(vy)
prev_subframe = _get_subframe_for_motion(prev_frame, vx, vy)
subframe = _get_subframe_for_motion(frame, -vx, -vy)
prev_subframe_nomotion = _get_subframe_for_motion(prev_frame, -vx, -vy)
assert subframe.shape == prev_subframe.shape
subframe = subframe.astype(np.float32)
prev_subframe = prev_subframe.astype(np.float32)
prev_subframe_nomotion = prev_subframe_nomotion.astype(np.float32)
_log_work_time("_get_diff_as_mask", "dt after subframes", begin_work_time)
subframe = cv2.blur(subframe, (blur_kernel_size, blur_kernel_size))
prev_subframe = cv2.blur(prev_subframe, (blur_kernel_size, blur_kernel_size))
prev_subframe_nomotion = cv2.blur(prev_subframe_nomotion, (blur_kernel_size, blur_kernel_size))
_log_work_time("_get_diff_as_mask", "dt after blur", begin_work_time)
diff = (subframe - prev_subframe)
diff_nomotion = (subframe - prev_subframe_nomotion)
diff_min = np.amin(diff)
diff_max = np.amax(diff)
diff_to_show = (diff - diff_min) / (diff_max - diff_min)
_log_work_time("_get_diff_as_mask", "dt after diff", begin_work_time)
_dbg_imshow("prev_subframe", prev_subframe/255.)
_dbg_imshow("subframe", subframe/255.)
_dbg_imshow("diff", diff_to_show)
min_diff_nomotion = np.amin(diff_nomotion)
max_diff_nomotion = np.amax(diff_nomotion)
_dbg_imshow("diff_nomotion",
(diff_nomotion-min_diff_nomotion)/(max_diff_nomotion-min_diff_nomotion))
absdiff = np.abs(diff)
absdiff_nomotion = np.abs(diff_nomotion)
assert absdiff.shape[2] == 3
absdiff1 = absdiff[:, :, 0] + absdiff[:, :, 1] + absdiff[:, :, 2]
absdiff_nomotion1 = (absdiff_nomotion[:, :, 0] + absdiff_nomotion[:, :, 1]
+ absdiff_nomotion[:, :, 2])
_log_work_time("_get_diff_as_mask", "dt after absdiff", begin_work_time)
_dbg_imshow("absdiff1 from max", absdiff1 / np.amax(absdiff1))
_dbg_imshow("absdiff_nomotion1 from max", absdiff_nomotion1 / np.amax(absdiff_nomotion1))
assert absdiff_nomotion1.shape == absdiff1.shape
mask1 = np.zeros(absdiff1.shape, np.float32)
mask1[absdiff1 <= max_diff_to_be_same] = 1
mask2 = np.zeros(absdiff1.shape, np.float32)
mask2[absdiff_nomotion1 <= max_diff_to_be_same] = 1
_dbg_imshow("mask1", mask1)
_dbg_imshow("mask2", mask1)
mask1[mask2 == 1] = 0
_dbg_imshow("mask1-mask2", mask1)
_log_work_time("_get_diff_as_mask", "dt after masking", begin_work_time)
mask1 = _move_mask_back_to_frame_size(mask1, vx, vy, frame.shape)
work_time = datetime.datetime.now() - begin_work_time
work_time_ms = int(1000*work_time.total_seconds())
log.debug("_get_diff_as_mask: work_time = {} ms".format(work_time_ms))
return mask1
def _convert_connection_components(retval, labels, stats, centroids, original_mask):
assert np.amax(labels) == retval - 1
connected_components = [None] * retval
for i in range(retval):
mask = np.array(labels == i, dtype=np.uint8)
stat_for_label = stats[i]
stat_left = stat_for_label[cv2.CC_STAT_LEFT]
stat_top = stat_for_label[cv2.CC_STAT_TOP]
stat_width = stat_for_label[cv2.CC_STAT_WIDTH]
stat_height = stat_for_label[cv2.CC_STAT_HEIGHT]
rect = Rect(stat_left, stat_top, stat_width, stat_height)
centr = centroids[i]
area = _get_area_rect(rect)
num = int(np.sum(original_mask[mask == 1]))
if area > labels.shape[0] * labels.shape[1] / 16:
log.debug("_convert_connection_components: i = {}".format(i))
log.debug("_convert_connection_components: rect = {}".format(rect))
log.debug("_convert_connection_components: centr = {}".format(centr))
log.debug("_convert_connection_components: area = {}".format(area))
log.debug("_convert_connection_components: num = {}".format(num))
component = ConnectedComponent(label_id=i, mask=mask, centroid=centr, rect=rect,
area=area, num=num)
connected_components[i] = component
return connected_components
def _find_threshold(average_mask, min_quantile=0.6):
assert 0 < min_quantile < 0.99
cur_min = np.amin(average_mask)
cur_max = np.amax(average_mask)
if cur_min == cur_max:
return cur_min
hist, bin_edges = np.histogram(average_mask, 20)
assert bin_edges[0] >= 0, "bin_edges={}, min={}, max={}".format(
bin_edges, np.amin(average_mask), np.amax(average_mask))
total_num_el = average_mask.shape[0] * average_mask.shape[1]
num_vals_lt_cur_bin_edge = 0
for i in range(1, len(bin_edges)):
cur_bin_edge = bin_edges[i]
assert cur_bin_edge > 0
num_vals_lt_cur_bin_edge += hist[i-1]
if num_vals_lt_cur_bin_edge >= min_quantile * total_num_el:
return cur_bin_edge
raise RuntimeError("Error of the algorithm -- wrong work with histogram")
def _find_best_bbox_from_motion_mask(average_motion_mask,
quantile=0.6,
max_num_of_best_masks_to_unite=10,
desired_rel_num_pixels_in_united_mask=0.3):
begin_work_time = datetime.datetime.now()
quantile_edge = _find_threshold(average_motion_mask, quantile)
thresholded_mask = np.array(average_motion_mask >= quantile_edge).astype(np.uint8)
thresholded_mask_to_show = cv2.cvtColor(thresholded_mask*255, cv2.COLOR_GRAY2BGR)
_dbg_imshow("thresholded mask", thresholded_mask_to_show)
log.debug("total_el_in mask = {}".format(
average_motion_mask.shape[0] * average_motion_mask.shape[1]))
log.debug("num_el gt quantile_edge in mask = {}".format(
np.transpose(np.nonzero(average_motion_mask >= quantile_edge)).shape))
retval, labels, stats, centroids = cv2.connectedComponentsWithStats(thresholded_mask)
connected_components = _convert_connection_components(retval, labels, stats, centroids,
thresholded_mask)
connected_components_sorted_by_num = sorted(connected_components, key=lambda c: -int(c.num))
for ii in range(min(max_num_of_best_masks_to_unite+2, len(connected_components_sorted_by_num))):
# this cycle is for debugging only
cur_component = connected_components_sorted_by_num[ii]
log.debug("connected_components_sorted_by_num[{}] = {}".format(ii, cur_component))
_dbg_imshow("conn component ii="+str(ii), cur_component.mask * 255)
desired_num = int(average_motion_mask.shape[0]
* average_motion_mask.shape[1]
* desired_rel_num_pixels_in_united_mask)
best_components = []
sum_best_components_num = 0
log.debug("scanning connected components: desired_num = {}".format(desired_num))
for ii in range(min(max_num_of_best_masks_to_unite, len(connected_components_sorted_by_num))):
log.debug("scanning connected components: ii = {}".format(ii))
cur_component = connected_components_sorted_by_num[ii]
best_components.append(cur_component)
log.debug("scanning connected components: cur_component.num = {}".format(cur_component.num))
sum_best_components_num += cur_component.num
log.debug("scanning connected components: sum_best_components_num = {}".format(
sum_best_components_num))
if sum_best_components_num >= desired_num:
break
if not best_components:
return None
res_bbox = best_components[0].rect
for c in best_components[1:]:
res_bbox = _get_union_rects(res_bbox, c.rect)
best_component_to_show = cv2.cvtColor(best_components[0].mask*255, cv2.COLOR_GRAY2BGR)
for c in best_components[1:]:
best_component_to_show += cv2.cvtColor(c.mask*255, cv2.COLOR_GRAY2BGR)
for c in best_components:
_draw_rect(best_component_to_show, c.rect, (255, 0, 0), 3)
_my_imshow("best_component", best_component_to_show)
_log_work_time("_find_best_bbox_from_motion_mask", "work_time", begin_work_time)
return res_bbox
#pylint: disable=R0902,R0913,C0111
class RoiDetectorImpl:
"""
Implementation class -- allows to detect ROI on frames received from a video sequence
using motion detection + background subtraction approaches.
"""
def __init__(self, cell_params, increase_rect_params, N_median, min_motion_to_work,
max_num_of_best_masks_to_unite=5,
desired_rel_num_pixels_in_united_mask=0.3,
required_num_motions_in_last_frames=5,
num_last_frames_to_count_motions=1000):
self.frame_size = None # (w, h)
self.motion_detector = None
self.cell_params = cell_params
self.increase_rect_params = increase_rect_params
self.N_median = N_median
self.min_motion_to_work = min_motion_to_work
self.sum_motion_masks = None
self.num_summed_masks = 0
self.res_bbox = None
self.res_bbox_confidence = 0
self.work_times_in_sec = []
self.max_num_work_times = 10000
self.rel_threshold_for_center = 0.9
self.quantile_for_best_bbox = 0.5
self.result_img_to_show = None
self.max_num_of_best_masks_to_unite = max_num_of_best_masks_to_unite
self.desired_rel_num_pixels_in_united_mask = desired_rel_num_pixels_in_united_mask
self.required_num_motions_in_last_frames = required_num_motions_in_last_frames
self.num_last_frames_to_count_motions = num_last_frames_to_count_motions
self.last_frame_ids_with_motions = []
#pylint: disable=R0914
def handle_frame(self, frame, prev_frame, frame_id):
"""
The method receives frame, the previous frame, and the frame number, and returns
roi where there is some regular motion on several last frames of the video sequence.
"""
if self.frame_size is None:
h, w = frame.shape[:2]
self.frame_size = (w, h)
self.motion_detector = RoiMotionDetector(self.frame_size[::-1], self.cell_params,
self.increase_rect_params, self.N_median,
self.min_motion_to_work)
assert self.frame_size is not None
assert self.motion_detector is not None
begin_work_time = datetime.datetime.now()
assert frame.shape == prev_frame.shape
assert tuple(frame.shape[:2]) == tuple(self.frame_size[::-1]), (
"frame.shape[:2]={}, self.frame_size[::-1]={}".format(frame.shape[:2],
self.frame_size[::-1]))
assert isinstance(frame_id, int)
assert (not self.last_frame_ids_with_motions
or (self.last_frame_ids_with_motions[-1] < frame_id))
self.res_bbox = None
img_to_show, prev_img_to_show = self.motion_detector.handle_image(frame, prev_frame)
self.result_img_to_show = prev_img_to_show
total_v = self.motion_detector.total_v
log.debug("main: total_v = {}".format(total_v))
if total_v is not None:
motion_mask = _get_diff_as_mask(frame, prev_frame, total_v)
self.last_frame_ids_with_motions.append(frame_id)
if self.num_summed_masks > 0:
self.sum_motion_masks = self.sum_motion_masks + motion_mask
self.num_summed_masks += 1
else:
self.sum_motion_masks = motion_mask.copy().astype(np.float32)
self.num_summed_masks = 1
# required to calculate res_bbox_confidence
while (self.last_frame_ids_with_motions and
(frame_id - self.last_frame_ids_with_motions[0]
>
self.num_last_frames_to_count_motions)):
del self.last_frame_ids_with_motions[0]
# simple approach to calculate res_bbox_confidence
if len(self.last_frame_ids_with_motions) >= self.required_num_motions_in_last_frames:
self.res_bbox_confidence = 1
else:
self.res_bbox_confidence = 0
_dbg_imshow("frame", img_to_show)
_dbg_imshow("prev frame", self.result_img_to_show)
if self.num_summed_masks > 0:
average_motion_mask = self.sum_motion_masks / self.num_summed_masks
_dbg_imshow("average motion mask", average_motion_mask)
self.res_bbox = _find_best_bbox_from_motion_mask(
average_motion_mask=average_motion_mask,
quantile=self.quantile_for_best_bbox,
max_num_of_best_masks_to_unite=self.max_num_of_best_masks_to_unite,
desired_rel_num_pixels_in_united_mask=self.desired_rel_num_pixels_in_united_mask)
if self.res_bbox:
conf_color = (200, 155, 0) if self.res_bbox_confidence == 1 else (255, 255, 55)
_draw_rect(self.result_img_to_show, self.res_bbox, conf_color, 2)
_my_imshow("result", self.result_img_to_show)
work_time = datetime.datetime.now() - begin_work_time
self.work_times_in_sec.append(work_time.total_seconds())
while len(self.work_times_in_sec) > self.max_num_work_times:
del self.work_times_in_sec[0]
work_time_ms = int(1000*work_time.total_seconds())
log.debug("work_time = {} ms".format(work_time_ms))
avg_work_time_ms = int(1000*np.average(self.work_times_in_sec))
log.debug("avg work_time = {} ms".format(avg_work_time_ms))
return self.res_bbox
def get_res_bbox(self):
return self.res_bbox
def get_res_bbox_confidence(self):
""" At the moment it either 0 or 1 """
return self.res_bbox_confidence
def get_num_summed_masks(self):
return self.num_summed_masks
def get_result_img_to_show(self):
return self.result_img_to_show
#pylint: disable=R0903
class RoiDetector:
"""
The class allows to detect ROI on frames received from a video sequence
using motion detection + background subtraction approaches.
"""
@staticmethod
def _create_default_roi_detector_impl(desired_min_side):
increase_cell_coeff = 1.4
shift_x = 1
shift_y = 1
increase_rect_params = IncreaseRectParams(increase_cell_coeff, shift_x, shift_y)
grid_cell_size = int(25.0 / 160.0 * desired_min_side)
cell_aspect_ratio = 1
num_cells_x = 3
num_cells_y = 3
cell_params = CellParams(cell_height=grid_cell_size,
cell_aspect_ratio=cell_aspect_ratio,
cell_overlap=0, num_cells_x=num_cells_x,
num_cells_y=num_cells_y, list_v_len=100)
N_median = 20
min_motion_to_work = 1.5 / 160.0 * desired_min_side
roi_detector_impl = RoiDetectorImpl(cell_params, increase_rect_params,
N_median, min_motion_to_work)
return roi_detector_impl
def __init__(self, frame_step):
"""
Constructor.
The only parameter of the constructor is frame step that should be used during detection.
The value depends on the frame rate of the input video.
The recommended value for video stream with frame rate 30 frames per second is frame_step=5.
"""
self.frame_step = frame_step
# this is the most important metric parameter
# it depends on the quality of the video
self.desired_min_side = 160
self.max_frames_keep_bbox = 50
self.max_len_bboxes_list = 100
self.impl = self._create_default_roi_detector_impl(self.desired_min_side)
self.frame_idx = -1
self.prev_frame = None
self.last_frame_detected_bbox = None
self.detected_bboxes_for_avg = []
self.scale = None
@staticmethod
def _prepare_frame_for_default_roi_detector(frame, desired_min_side):
h, w = frame.shape[:2]
min_sz = min(h, w)
scale = float(desired_min_side) / min_sz
target_size = (int(w * scale), int(h * scale))
scaled_frame = cv2.resize(frame, target_size)
return scaled_frame, scale
def handle_frame(self, frame):
"""
The main method of the class.
The frames should be passed to the method with a constant frame rate
(~30 frames per second).
The method returns
* either bounding box of detected ROI,
(in this case it returns bounding box as namedtuple Rect),
* or None if it cannot make detection with sufficient confidence.
"""
self.frame_idx += 1
log.debug("frame_idx = {}".format(self.frame_idx))
cur_frame, scale = self._prepare_frame_for_default_roi_detector(
frame, self.desired_min_side)
assert cur_frame is not None
assert scale > 0
if self.scale is None:
self.scale = scale
assert self.scale == scale
if self.prev_frame is None:
assert self.frame_idx == 0
self.prev_frame = cur_frame.copy()
log.debug("return None as self.prev_frame is None")
return None
if self.frame_idx % self.frame_step == 0:
detected_bbox = self.impl.handle_frame(cur_frame, self.prev_frame, self.frame_idx)
detected_bbox_confidence = self.impl.get_res_bbox_confidence()
if detected_bbox_confidence > 0:
self.last_frame_detected_bbox = self.frame_idx
self.detected_bboxes_for_avg.append(detected_bbox)
while len(self.detected_bboxes_for_avg) > self.max_len_bboxes_list:
del self.detected_bboxes_for_avg[0]
self.prev_frame = cur_frame.copy()
else:
log.debug("skipping frame")
should_return_bbox = (
(self.last_frame_detected_bbox is not None) and
(self.frame_idx - self.last_frame_detected_bbox) < self.max_frames_keep_bbox)
if not should_return_bbox:
return None
avg_bbox = _get_median_of_rects(self.detected_bboxes_for_avg)
rescaled_bbox = _scale_rect(avg_bbox, 1.0 / self.scale)
return rescaled_bbox
| [
"cv2.rectangle",
"logging.debug",
"math.floor",
"numpy.hstack",
"cv2.imshow",
"numpy.array",
"numpy.linalg.norm",
"numpy.histogram",
"cv2.minMaxLoc",
"numpy.vstack",
"cv2.matchTemplate",
"cv2.blur",
"numpy.abs",
"collections.namedtuple",
"numpy.amin",
"numpy.average",
"cv2.cvtColor",... | [((982, 1028), 'collections.namedtuple', 'namedtuple', (['"""Rect"""', "['tl_x', 'tl_y', 'w', 'h']"], {}), "('Rect', ['tl_x', 'tl_y', 'w', 'h'])\n", (992, 1028), False, 'from collections import namedtuple\n'), ((1037, 1068), 'collections.namedtuple', 'namedtuple', (['"""Point"""', "['x', 'y']"], {}), "('Point', ['x', 'y'])\n", (1047, 1068), False, 'from collections import namedtuple\n'), ((1091, 1170), 'collections.namedtuple', 'namedtuple', (['"""IncreaseRectParams"""', "['increase_cell_coeff', 'shift_x', 'shift_y']"], {}), "('IncreaseRectParams', ['increase_cell_coeff', 'shift_x', 'shift_y'])\n", (1101, 1170), False, 'from collections import namedtuple\n'), ((1184, 1310), 'collections.namedtuple', 'namedtuple', (['"""CellParams"""', "['cell_height', 'cell_aspect_ratio', 'cell_overlap', 'num_cells_x',\n 'num_cells_y', 'list_v_len']"], {}), "('CellParams', ['cell_height', 'cell_aspect_ratio',\n 'cell_overlap', 'num_cells_x', 'num_cells_y', 'list_v_len'])\n", (1194, 1310), False, 'from collections import namedtuple\n'), ((1358, 1432), 'collections.namedtuple', 'namedtuple', (['"""CellData"""', "['rect', 'increased_rect', 'list_v', 'calculated']"], {}), "('CellData', ['rect', 'increased_rect', 'list_v', 'calculated'])\n", (1368, 1432), False, 'from collections import namedtuple\n'), ((1455, 1548), 'collections.namedtuple', 'namedtuple', (['"""ConnectedComponent"""', "['label_id', 'mask', 'centroid', 'rect', 'area', 'num']"], {}), "('ConnectedComponent', ['label_id', 'mask', 'centroid', 'rect',\n 'area', 'num'])\n", (1465, 1548), False, 'from collections import namedtuple\n'), ((2690, 2724), 'math.ceil', 'math.ceil', (['(w * coeff + 2 * shift_x)'], {}), '(w * coeff + 2 * shift_x)\n', (2699, 2724), False, 'import math\n'), ((2735, 2769), 'math.ceil', 'math.ceil', (['(h * coeff + 2 * shift_y)'], {}), '(h * coeff + 2 * shift_y)\n', (2744, 2769), False, 'import math\n'), ((2820, 2849), 'math.floor', 'math.floor', (['(c_x - new_w / 2.0)'], {}), '(c_x - new_w / 2.0)\n', (2830, 2849), False, 'import math\n'), ((2864, 2893), 'math.floor', 'math.floor', (['(c_y - new_h / 2.0)'], {}), '(c_y - new_h / 2.0)\n', (2874, 2893), False, 'import math\n'), ((5504, 5523), 'numpy.array', 'np.array', (['list_tl_x'], {}), '(list_tl_x)\n', (5512, 5523), True, 'import numpy as np\n'), ((5540, 5559), 'numpy.array', 'np.array', (['list_tl_y'], {}), '(list_tl_y)\n', (5548, 5559), True, 'import numpy as np\n'), ((5576, 5595), 'numpy.array', 'np.array', (['list_br_x'], {}), '(list_br_x)\n', (5584, 5595), True, 'import numpy as np\n'), ((5612, 5631), 'numpy.array', 'np.array', (['list_br_y'], {}), '(list_br_y)\n', (5620, 5631), True, 'import numpy as np\n'), ((5644, 5664), 'numpy.median', 'np.median', (['list_tl_x'], {}), '(list_tl_x)\n', (5653, 5664), True, 'import numpy as np\n'), ((5676, 5696), 'numpy.median', 'np.median', (['list_tl_y'], {}), '(list_tl_y)\n', (5685, 5696), True, 'import numpy as np\n'), ((5708, 5728), 'numpy.median', 'np.median', (['list_br_x'], {}), '(list_br_x)\n', (5717, 5728), True, 'import numpy as np\n'), ((5740, 5760), 'numpy.median', 'np.median', (['list_br_y'], {}), '(list_br_y)\n', (5749, 5760), True, 'import numpy as np\n'), ((6114, 6183), 'cv2.resize', 'cv2.resize', (['match_to_draw', '(show_size_coeff * w, show_size_coeff * h)'], {}), '(match_to_draw, (show_size_coeff * w, show_size_coeff * h))\n', (6124, 6183), False, 'import cv2\n'), ((6417, 6474), 'cv2.matchTemplate', 'cv2.matchTemplate', (['subimage', 'prev_template', 'cv2.TM_SQDIFF'], {}), '(subimage, prev_template, cv2.TM_SQDIFF)\n', (6434, 6474), False, 'import cv2\n'), ((6511, 6531), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['match'], {}), '(match)\n', (6524, 6531), False, 'import cv2\n'), ((7002, 7014), 'numpy.array', 'np.array', (['pt'], {}), '(pt)\n', (7010, 7014), True, 'import numpy as np\n'), ((7023, 7034), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (7031, 7034), True, 'import numpy as np\n'), ((7506, 7572), 'cv2.rectangle', 'cv2.rectangle', (['image', '(tl_x, tl_y)', '(br_x, br_y)', 'color', 'thickness'], {}), '(image, (tl_x, tl_y), (br_x, br_y), color, thickness)\n', (7519, 7572), False, 'import cv2\n'), ((7744, 7764), 'math.ceil', 'math.ceil', (['(scale * w)'], {}), '(scale * w)\n', (7753, 7764), False, 'import math\n'), ((7777, 7797), 'math.ceil', 'math.ceil', (['(scale * h)'], {}), '(scale * h)\n', (7786, 7797), False, 'import math\n'), ((7811, 7844), 'cv2.resize', 'cv2.resize', (['image', '(new_w, new_h)'], {}), '(image, (new_w, new_h))\n', (7821, 7844), False, 'import cv2\n'), ((7918, 7962), 'numpy.array', 'np.array', (['[x for x, y in list_v[-N_median:]]'], {}), '([x for x, y in list_v[-N_median:]])\n', (7926, 7962), True, 'import numpy as np\n'), ((7978, 7991), 'numpy.median', 'np.median', (['xs'], {}), '(xs)\n', (7987, 7991), True, 'import numpy as np\n'), ((8002, 8046), 'numpy.array', 'np.array', (['[y for x, y in list_v[-N_median:]]'], {}), '([y for x, y in list_v[-N_median:]])\n', (8010, 8046), True, 'import numpy as np\n'), ((8062, 8075), 'numpy.median', 'np.median', (['ys'], {}), '(ys)\n', (8071, 8075), True, 'import numpy as np\n'), ((16826, 16849), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16847, 16849), False, 'import datetime\n'), ((17448, 17504), 'cv2.blur', 'cv2.blur', (['subframe', '(blur_kernel_size, blur_kernel_size)'], {}), '(subframe, (blur_kernel_size, blur_kernel_size))\n', (17456, 17504), False, 'import cv2\n'), ((17525, 17586), 'cv2.blur', 'cv2.blur', (['prev_subframe', '(blur_kernel_size, blur_kernel_size)'], {}), '(prev_subframe, (blur_kernel_size, blur_kernel_size))\n', (17533, 17586), False, 'import cv2\n'), ((17616, 17686), 'cv2.blur', 'cv2.blur', (['prev_subframe_nomotion', '(blur_kernel_size, blur_kernel_size)'], {}), '(prev_subframe_nomotion, (blur_kernel_size, blur_kernel_size))\n', (17624, 17686), False, 'import cv2\n'), ((17872, 17885), 'numpy.amin', 'np.amin', (['diff'], {}), '(diff)\n', (17879, 17885), True, 'import numpy as np\n'), ((17901, 17914), 'numpy.amax', 'np.amax', (['diff'], {}), '(diff)\n', (17908, 17914), True, 'import numpy as np\n'), ((18209, 18231), 'numpy.amin', 'np.amin', (['diff_nomotion'], {}), '(diff_nomotion)\n', (18216, 18231), True, 'import numpy as np\n'), ((18256, 18278), 'numpy.amax', 'np.amax', (['diff_nomotion'], {}), '(diff_nomotion)\n', (18263, 18278), True, 'import numpy as np\n'), ((18416, 18428), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (18422, 18428), True, 'import numpy as np\n'), ((18452, 18473), 'numpy.abs', 'np.abs', (['diff_nomotion'], {}), '(diff_nomotion)\n', (18458, 18473), True, 'import numpy as np\n'), ((19015, 19051), 'numpy.zeros', 'np.zeros', (['absdiff1.shape', 'np.float32'], {}), '(absdiff1.shape, np.float32)\n', (19023, 19051), True, 'import numpy as np\n'), ((19111, 19147), 'numpy.zeros', 'np.zeros', (['absdiff1.shape', 'np.float32'], {}), '(absdiff1.shape, np.float32)\n', (19119, 19147), True, 'import numpy as np\n'), ((21153, 21174), 'numpy.amin', 'np.amin', (['average_mask'], {}), '(average_mask)\n', (21160, 21174), True, 'import numpy as np\n'), ((21189, 21210), 'numpy.amax', 'np.amax', (['average_mask'], {}), '(average_mask)\n', (21196, 21210), True, 'import numpy as np\n'), ((21284, 21314), 'numpy.histogram', 'np.histogram', (['average_mask', '(20)'], {}), '(average_mask, 20)\n', (21296, 21314), True, 'import numpy as np\n'), ((22167, 22190), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22188, 22190), False, 'import datetime\n'), ((22377, 22433), 'cv2.cvtColor', 'cv2.cvtColor', (['(thresholded_mask * 255)', 'cv2.COLOR_GRAY2BGR'], {}), '(thresholded_mask * 255, cv2.COLOR_GRAY2BGR)\n', (22389, 22433), False, 'import cv2\n'), ((22789, 22839), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['thresholded_mask'], {}), '(thresholded_mask)\n', (22821, 22839), False, 'import cv2\n'), ((24628, 24691), 'cv2.cvtColor', 'cv2.cvtColor', (['(best_components[0].mask * 255)', 'cv2.COLOR_GRAY2BGR'], {}), '(best_components[0].mask * 255, cv2.COLOR_GRAY2BGR)\n', (24640, 24691), False, 'import cv2\n'), ((1654, 1675), 'cv2.imshow', 'cv2.imshow', (['name', 'img'], {}), '(name, img)\n', (1664, 1675), False, 'import cv2\n'), ((1861, 1884), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1882, 1884), False, 'import datetime\n'), ((9161, 9195), 'math.floor', 'math.floor', (['(cell_cx - cell_w / 2.0)'], {}), '(cell_cx - cell_w / 2.0)\n', (9171, 9195), False, 'import math\n'), ((9210, 9244), 'math.floor', 'math.floor', (['(cell_cy - cell_h / 2.0)'], {}), '(cell_cy - cell_h / 2.0)\n', (9220, 9244), False, 'import math\n'), ((12662, 12685), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12683, 12685), False, 'import datetime\n'), ((19497, 19520), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19518, 19520), False, 'import datetime\n'), ((19783, 19798), 'numpy.amax', 'np.amax', (['labels'], {}), '(labels)\n', (19790, 19798), True, 'import numpy as np\n'), ((19899, 19936), 'numpy.array', 'np.array', (['(labels == i)'], {'dtype': 'np.uint8'}), '(labels == i, dtype=np.uint8)\n', (19907, 19936), True, 'import numpy as np\n'), ((21404, 21425), 'numpy.amin', 'np.amin', (['average_mask'], {}), '(average_mask)\n', (21411, 21425), True, 'import numpy as np\n'), ((21427, 21448), 'numpy.amax', 'np.amax', (['average_mask'], {}), '(average_mask)\n', (21434, 21448), True, 'import numpy as np\n'), ((24758, 24804), 'cv2.cvtColor', 'cv2.cvtColor', (['(c.mask * 255)', 'cv2.COLOR_GRAY2BGR'], {}), '(c.mask * 255, cv2.COLOR_GRAY2BGR)\n', (24770, 24804), False, 'import cv2\n'), ((27376, 27399), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (27397, 27399), False, 'import datetime\n'), ((33173, 33203), 'cv2.resize', 'cv2.resize', (['frame', 'target_size'], {}), '(frame, target_size)\n', (33183, 33203), False, 'import cv2\n'), ((13790, 13830), 'numpy.array', 'np.array', (["cell_data.calculated['median']"], {}), "(cell_data.calculated['median'])\n", (13798, 13830), True, 'import numpy as np\n'), ((14400, 14422), 'numpy.array', 'np.array', (['list_medians'], {}), '(list_medians)\n', (14408, 14422), True, 'import numpy as np\n'), ((14508, 14539), 'numpy.median', 'np.median', (['list_medians'], {'axis': '(0)'}), '(list_medians, axis=0)\n', (14517, 14539), True, 'import numpy as np\n'), ((14743, 14766), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14764, 14766), False, 'import datetime\n'), ((16220, 16255), 'numpy.hstack', 'np.hstack', (['(mask_horiz_shift, mask)'], {}), '((mask_horiz_shift, mask))\n', (16229, 16255), True, 'import numpy as np\n'), ((16289, 16324), 'numpy.hstack', 'np.hstack', (['(mask, mask_horiz_shift)'], {}), '((mask, mask_horiz_shift))\n', (16298, 16324), True, 'import numpy as np\n'), ((16519, 16553), 'numpy.vstack', 'np.vstack', (['(mask_vert_shift, mask)'], {}), '((mask_vert_shift, mask))\n', (16528, 16553), True, 'import numpy as np\n'), ((16587, 16621), 'numpy.vstack', 'np.vstack', (['(mask, mask_vert_shift)'], {}), '((mask, mask_vert_shift))\n', (16596, 16621), True, 'import numpy as np\n'), ((18835, 18852), 'numpy.amax', 'np.amax', (['absdiff1'], {}), '(absdiff1)\n', (18842, 18852), True, 'import numpy as np\n'), ((18920, 18946), 'numpy.amax', 'np.amax', (['absdiff_nomotion1'], {}), '(absdiff_nomotion1)\n', (18927, 18946), True, 'import numpy as np\n'), ((20336, 20368), 'numpy.sum', 'np.sum', (['original_mask[mask == 1]'], {}), '(original_mask[mask == 1])\n', (20342, 20368), True, 'import numpy as np\n'), ((22282, 22328), 'numpy.array', 'np.array', (['(average_motion_mask >= quantile_edge)'], {}), '(average_motion_mask >= quantile_edge)\n', (22290, 22328), True, 'import numpy as np\n'), ((30067, 30090), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30088, 30090), False, 'import datetime\n'), ((34164, 34215), 'logging.debug', 'log.debug', (['"""return None as self.prev_frame is None"""'], {}), "('return None as self.prev_frame is None')\n", (34173, 34215), True, 'import logging as log\n'), ((34852, 34879), 'logging.debug', 'log.debug', (['"""skipping frame"""'], {}), "('skipping frame')\n", (34861, 34879), True, 'import logging as log\n'), ((13160, 13177), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (13174, 13177), True, 'import numpy as np\n'), ((30440, 30474), 'numpy.average', 'np.average', (['self.work_times_in_sec'], {}), '(self.work_times_in_sec)\n', (30450, 30474), True, 'import numpy as np\n'), ((14081, 14098), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (14095, 14098), True, 'import numpy as np\n'), ((22692, 22740), 'numpy.nonzero', 'np.nonzero', (['(average_motion_mask >= quantile_edge)'], {}), '(average_motion_mask >= quantile_edge)\n', (22702, 22740), True, 'import numpy as np\n')] |
'''
@author: <NAME>
@author: <NAME>
@maintainer: <NAME>
@contact: <EMAIL>, <EMAIL>
@date: 14.08.2015
@version: 1.2+
@copyright: Copyright (c) 2015-2017, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
@license : BSD-2-Clause
'''
import numpy as np
from .module import Module
# -------------------------------
# Sum Pooling layer
# -------------------------------
class AveragePool(Module):
def __init__(self,pool=(2,2),stride=(2,2)):
'''
Constructor for the average pooling layer object
Parameters
----------
pool : tuple (h,w)
the size of the pooling mask in vertical (h) and horizontal (w) direction
stride : tuple (h,w)
the vertical (h) and horizontal (w) step sizes between filter applications.
'''
Module.__init__(self)
self.pool = pool
self.stride = stride
def forward(self,X):
'''
Realizes the forward pass of an input through the average pooling layer.
Parameters
----------
X : numpy.ndarray
a network input, shaped (N,H,W,D), with
N = batch size
H, W, D = input size in heigth, width, depth
Returns
-------
Y : numpy.ndarray
the average-pooled outputs, reduced in size due to given stride and pooling size
'''
self.X = X
N,H,W,D = X.shape
hpool, wpool = self.pool
hstride, wstride = self.stride
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hpool) / hstride + 1
Wout = (W - wpool) / wstride + 1
normalizer = 1./np.sqrt(hpool*wpool)
#initialize pooled output
self.Y = np.zeros((N,int(Hout),int(Wout),D))
for i in range(int(Hout)):
for j in range(int(Wout)):
self.Y[:,i,j,:] = X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ].mean(axis=(1,2)) * normalizer #normalizer to keep the output well conditioned
return self.Y
def backward(self,DY):
'''
Backward-passes an input error gradient DY towards the input neurons of this average pooling layer.
Parameters
----------
DY : numpy.ndarray
an error gradient shaped same as the output array of forward, i.e. (N,Hy,Wy,Dy) with
N = number of samples in the batch
Hy = heigth of the output
Wy = width of the output
Dy = output depth = input depth
Returns
-------
DX : numpy.ndarray
the error gradient propagated towards the input
'''
# DY is of shape N, Hout, Wout, nfilters
N,H,W,D = self.X.shape
hpool, wpool = self.pool
hstride, wstride = self.stride
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hpool) / hstride + 1
Wout = (W - wpool) / wstride + 1
normalizer = 1./np.sqrt(hpool * wpool)
#distribute the gradient (1 * DY) towards across all contributing inputs evenly
DX = np.zeros_like(self.X)
for i in xrange(Hout):
for j in xrange(Wout):
DX[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += DY[:,i:i+1,j:j+1,:] * normalizer # 0normalizer to produce well-conditioned gradients
return DX
def clean(self):
self.X = None
self.Y = None
def _simple_lrp(self,R):
'''
LRP according to Eq(56) in DOI: 10.1371/journal.pone.0130140
'''
N,H,W,D = self.X.shape
hpool, wpool = self.pool
hstride, wstride = self.stride
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hpool) / hstride + 1
Wout = (W - wpool) / wstride + 1
Rx = np.zeros(self.X.shape)
for i in xrange(Hout):
for j in xrange(Wout):
Z = self.X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ] #input activations.
Zs = Z.sum(axis=(1,2),keepdims=True)
Zs += 1e-12*((Zs >= 0)*2-1) # add a weak numerical stabilizer to cushion an all-zero input
Rx[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += (Z/Zs) * R[:,i:i+1,j:j+1,:] #distribute relevance propoprtional to input activations per layer
return Rx
def _flat_lrp(self,R):
'''
distribute relevance for each output evenly to the output neurons' receptive fields.
'''
N,H,W,D = self.X.shape
hpool, wpool = self.pool
hstride, wstride = self.stride
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hpool) / hstride + 1
Wout = (W - wpool) / wstride + 1
Rx = np.zeros_like(self.X,dtype=np.float)
for i in xrange(Hout):
for j in xrange(Wout):
Z = np.ones([N,hpool,wpool,D])
Zs = Z.sum(axis=(1,2),keepdims=True)
Rx[:,i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool,:] += (Z / Zs) * R[:,i:i+1,j:j+1,:]
return Rx
def _ww_lrp(self,R):
'''
due to uniform weights used for sum pooling (1), this method defaults to _flat_lrp(R)
'''
return self._flat_lrp(R)
def _epsilon_lrp(self,R,epsilon):
'''
LRP according to Eq(58) in DOI: 10.1371/journal.pone.0130140
'''
N,H,W,D = self.X.shape
hpool, wpool = self.pool
hstride, wstride = self.stride
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hpool) / hstride + 1
Wout = (W - wpool) / wstride + 1
Rx = np.zeros(self.X.shape)
for i in range(int(Hout)):
for j in range(int(Wout)):
Z = self.X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ] #input activations.
Zs = Z.sum(axis=(1,2),keepdims=True)
Zs += epsilon*((Zs >= 0)*2-1) # add a epsilon stabilizer to cushion an all-zero input
Rx[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += (Z/Zs) * R[:,i:i+1,j:j+1,:] #distribute relevance propoprtional to input activations per layer
return Rx
# yes, we can do this. no, it will not make sense most of the time. by default, _lrp_simple will be called. see line 152
def _alphabeta_lrp(self,R,alpha):
'''
LRP according to Eq(60) in DOI: 10.1371/journal.pone.0130140
'''
beta = 1-alpha
N,H,W,D = self.X.shape
hpool, wpool = self.pool
hstride, wstride = self.stride
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hpool) / hstride + 1
Wout = (W - wpool) / wstride + 1
#distribute the gradient towards across all inputs evenly
Rx = np.zeros(self.X.shape)
for i in xrange(Hout):
for j in xrange(Wout):
Z = self.X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ] #input activations.
if not alpha == 0:
Zp = Z * (Z > 0)
Zsp = Zp.sum(axis=(1,2),keepdims=True) +1e-16 #zero division is quite likely in sum pooling layers when using the alpha-variant
Ralpha = (Zp/Zsp) * R[:,i:i+1,j:j+1,:]
else:
Ralpha = 0
if not beta == 0:
Zn = Z * (Z < 0)
Zsn = Zn.sum(axis=(1,2),keepdims=True) - 1e-16 #zero division is quite likely in sum pooling layers when using the alpha-variant
Rbeta = (Zn/Zsn) * R[:,i:i+1,j:j+1,:]
else:
Rbeta = 0
Rx[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += Ralpha + Rbeta
return Rx
| [
"numpy.sqrt",
"numpy.zeros",
"numpy.zeros_like",
"numpy.ones"
] | [((3131, 3152), 'numpy.zeros_like', 'np.zeros_like', (['self.X'], {}), '(self.X)\n', (3144, 3152), True, 'import numpy as np\n'), ((3881, 3903), 'numpy.zeros', 'np.zeros', (['self.X.shape'], {}), '(self.X.shape)\n', (3889, 3903), True, 'import numpy as np\n'), ((4871, 4908), 'numpy.zeros_like', 'np.zeros_like', (['self.X'], {'dtype': 'np.float'}), '(self.X, dtype=np.float)\n', (4884, 4908), True, 'import numpy as np\n'), ((5796, 5818), 'numpy.zeros', 'np.zeros', (['self.X.shape'], {}), '(self.X.shape)\n', (5804, 5818), True, 'import numpy as np\n'), ((6993, 7015), 'numpy.zeros', 'np.zeros', (['self.X.shape'], {}), '(self.X.shape)\n', (7001, 7015), True, 'import numpy as np\n'), ((1665, 1687), 'numpy.sqrt', 'np.sqrt', (['(hpool * wpool)'], {}), '(hpool * wpool)\n', (1672, 1687), True, 'import numpy as np\n'), ((3006, 3028), 'numpy.sqrt', 'np.sqrt', (['(hpool * wpool)'], {}), '(hpool * wpool)\n', (3013, 3028), True, 'import numpy as np\n'), ((4995, 5024), 'numpy.ones', 'np.ones', (['[N, hpool, wpool, D]'], {}), '([N, hpool, wpool, D])\n', (5002, 5024), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the pyxir Decent quantizer simulation runtime"""
import unittest
import numpy as np
import pyxir as px
from pyxir.runtime import base
from pyxir.runtime.decentq_sim.runtime_decentq_sim import RuntimeDecentQSim
from pyxir.graph.xgraph_factory import XGraphFactory
from pyxir.graph.layer import xlayer
class TestDecentQSimRuntime(unittest.TestCase):
def test_init(self):
K = np.reshape(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32),
(2, 1, 2, 2))
i = px.ops.input('input', [1, 1, 4, 4])
k = px.ops.constant('kernel', K)
c = px.ops.conv2d(
op_name='conv1',
input_layer=i,
weights_layer=k,
kernel_size=[2, 2],
strides=[1, 1],
padding_hw=[0, 0],
dilation=[1, 1],
groups=1,
channels=2,
data_layout='NCHW',
kernel_layout='OIHW'
)
c.target='cpu'
c.subgraph='xp0'
xlayers = [i, k, c]
xgraph = XGraphFactory().build_from_xlayer(xlayers)
xgraph.meta_attrs['quant_keys'] = ['xp0']
xgraph.meta_attrs['xp0'] = {'q_eval': '/path/to/q_eval'}
sim_runtime = RuntimeDecentQSim('test', xgraph)
# We can succesfully initialize a RuntimeDecentQSim object
if __name__ == '__main__':
unittest.main()
| [
"pyxir.ops.input",
"pyxir.ops.conv2d",
"pyxir.ops.constant",
"numpy.array",
"pyxir.graph.xgraph_factory.XGraphFactory",
"unittest.main",
"pyxir.runtime.decentq_sim.runtime_decentq_sim.RuntimeDecentQSim"
] | [((1976, 1991), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1989, 1991), False, 'import unittest\n'), ((1139, 1174), 'pyxir.ops.input', 'px.ops.input', (['"""input"""', '[1, 1, 4, 4]'], {}), "('input', [1, 1, 4, 4])\n", (1151, 1174), True, 'import pyxir as px\n'), ((1187, 1215), 'pyxir.ops.constant', 'px.ops.constant', (['"""kernel"""', 'K'], {}), "('kernel', K)\n", (1202, 1215), True, 'import pyxir as px\n'), ((1228, 1435), 'pyxir.ops.conv2d', 'px.ops.conv2d', ([], {'op_name': '"""conv1"""', 'input_layer': 'i', 'weights_layer': 'k', 'kernel_size': '[2, 2]', 'strides': '[1, 1]', 'padding_hw': '[0, 0]', 'dilation': '[1, 1]', 'groups': '(1)', 'channels': '(2)', 'data_layout': '"""NCHW"""', 'kernel_layout': '"""OIHW"""'}), "(op_name='conv1', input_layer=i, weights_layer=k, kernel_size=\n [2, 2], strides=[1, 1], padding_hw=[0, 0], dilation=[1, 1], groups=1,\n channels=2, data_layout='NCHW', kernel_layout='OIHW')\n", (1241, 1435), True, 'import pyxir as px\n'), ((1843, 1876), 'pyxir.runtime.decentq_sim.runtime_decentq_sim.RuntimeDecentQSim', 'RuntimeDecentQSim', (['"""test"""', 'xgraph'], {}), "('test', xgraph)\n", (1860, 1876), False, 'from pyxir.runtime.decentq_sim.runtime_decentq_sim import RuntimeDecentQSim\n'), ((1023, 1087), 'numpy.array', 'np.array', (['[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]'], {'dtype': 'np.float32'}), '([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32)\n', (1031, 1087), True, 'import numpy as np\n'), ((1663, 1678), 'pyxir.graph.xgraph_factory.XGraphFactory', 'XGraphFactory', ([], {}), '()\n', (1676, 1678), False, 'from pyxir.graph.xgraph_factory import XGraphFactory\n')] |
import math
import numpy as np
def relu(pixel_vals, bias=0):
'''Takes tuple (r,g,b) and returns the relu transformation.
For use within each individual node in a dense layer before being passed onto the next layer
bias 0'ed out by default
'''
return (pixel_vals * (pixel_vals > 0) + bias,)
def sigmoid(pixel_vals, bias=0):
'''returns sigmoid activation of a pixel
bias 0'ed out by default'''
return (1/(1+np.exp(-pixel_vals) + bias))
def tanh(pixel_vals, bias=0):
'''returns hyperbolic tan of a tuple of pixel_vals
bias 0'ed out by default'''
return (np.tanh(pixel_vals) + bias)
| [
"numpy.exp",
"numpy.tanh"
] | [((598, 617), 'numpy.tanh', 'np.tanh', (['pixel_vals'], {}), '(pixel_vals)\n', (605, 617), True, 'import numpy as np\n'), ((439, 458), 'numpy.exp', 'np.exp', (['(-pixel_vals)'], {}), '(-pixel_vals)\n', (445, 458), True, 'import numpy as np\n')] |
import os
import pandas as pd
from numpy.random import default_rng
def create_sample(
input_file="../../classes_input/test_input.csv",
output_file=None,
percentage_sample=25,
exclude_samples=None,
):
if not output_file:
exclude = ""
if exclude_samples:
excluded_names = [
os.path.splitext(os.path.basename(x))[0].replace(
"test_input_sampled_", ""
)
for x in exclude_samples
]
exclude = f"_exclude_{'_'.join(excluded_names)}"
output_file = (
f"../../classes_input/test_input_sampled_{percentage_sample}{exclude}.csv"
)
rng = default_rng()
input_df = pd.read_csv(input_file)
all_classes = pd.unique(input_df["class_id"])
excluded_classes = set()
for f in exclude_samples:
df = pd.read_csv(f)
excluded_classes = excluded_classes.union(pd.unique(df["class_id"]))
classes_to_sample = list(set(all_classes) - excluded_classes)
class_sample_size = round(len(all_classes) * percentage_sample / 100)
sampled_classes = rng.choice(classes_to_sample, class_sample_size)
sampled_df = input_df[input_df.class_id.isin(sampled_classes)]
sampled_df.to_csv(
output_file, sep=",", encoding="utf-8", float_format="%g", index=False
)
| [
"pandas.unique",
"numpy.random.default_rng",
"pandas.read_csv",
"os.path.basename"
] | [((700, 713), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (711, 713), False, 'from numpy.random import default_rng\n'), ((730, 753), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (741, 753), True, 'import pandas as pd\n'), ((772, 803), 'pandas.unique', 'pd.unique', (["input_df['class_id']"], {}), "(input_df['class_id'])\n", (781, 803), True, 'import pandas as pd\n'), ((877, 891), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (888, 891), True, 'import pandas as pd\n'), ((942, 967), 'pandas.unique', 'pd.unique', (["df['class_id']"], {}), "(df['class_id'])\n", (951, 967), True, 'import pandas as pd\n'), ((355, 374), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (371, 374), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 16:32:08 2020
@author: LionelMassoulard
"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, is_classifier, is_regressor
from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer
from aikit.tools.data_structure_helper import make2dimensions, convert_generic
from aikit.enums import DataTypes
from aikit.transformers.categories import _OrdinalOneHotEncoder
class _BaseOrdinalClassifier(BaseEstimator, ClassifierMixin):
""" this class is the base class for Ordinal classifier,
it contains methods to convert the target into another format that will be used to fit the underlying classifier
"""
def _prepare_target(self, y, klass, conversion_type):
""" prepare the target so that it can be given to the underlying model to use
Parameters
----------
y : array
the original target
klass : type
the encoder to use for the target
conversion_type : DataType
the output type desired by the target
Set
---
self._mono_target : bool
does the original problem as one target or not
self._target_encoded : the encoder used on the target
Returns
--------
y_encoded : array
the modified target
"""
self._mono_target = y.ndim == 1
self._target_dtype = y.dtype
if isinstance(self.classes, str) and self.classes == "auto":
categories = "auto"
else:
if self._mono_target:
categories = [self.classes] # because OrdinalEncoder expect a list
else:
if not isinstance(self.classes, list):
raise TypeError("For multi-target classes should be a list, instead I got %s" % str(type(self.classes)))
categories = self.classes
self._target_encoder = klass(categories=categories, dtype=np.int32)
yd2 = convert_generic(make2dimensions(y), output_type=conversion_type)
if conversion_type == DataTypes.NumpyArray and yd2.dtype.kind == 'U':
yd2 = yd2.astype(np.object, copy=False)
y_encoded = self._target_encoder.fit_transform(yd2)
return y_encoded
@property
def classes_(self):
if self._mono_target:
return self._target_encoder.categories_[0]
else:
return self._target_encoder.categories_
class ClassifierFromRegressor(_BaseOrdinalClassifier):
""" this class transform a regressor into a classifier
it can be used for ordinal classification
This model will transform the target into an increasing list of integer
and then fit a regression on that
"""
def __init__(self,
regressor,
kernel_windows=0.2,
classes="auto"
):
self.regressor=regressor
self.classes=classes
self.kernel_windows=kernel_windows
def fit(self, X, y):
if not is_regressor(self.regressor):
raise TypeError("regressor should be a sklearn regressor")
y_encoded = self._prepare_target(y, klass=OrdinalEncoder, conversion_type=DataTypes.NumpyArray)
if self._mono_target:
y_int = y_encoded[:,0]
assert len(self._target_encoder.categories_) == 1
else:
y_int = y_encoded # I keep the multi-dimension
assert len(self._target_encoder.categories_) == y.shape[1]
self.regressor.fit(X, y_int)
return self
def predict(self, X):
y_hat = self.regressor.predict(X) # call regressor
y_int_hat = (y_hat + 0.5).astype(np.int32) # conversion to closest int
y_hat = self._target_encoder.inverse_transform(make2dimensions(y_int_hat))
if self._mono_target:
y_hat = y_hat[:, 0]
return y_hat.astype(self._target_dtype)
def predict_proba(self, X):
y_hat = self.regressor.predict(X) # call regressor
if self._mono_target:
y_hat_2d = y_hat[:, np.newaxis]
assert y_hat.ndim == 1
else:
y_hat_2d = y_hat
assert y_hat.ndim == 2
probas = []
for j, category in enumerate(self._target_encoder.categories_):
pivot_integer = np.arange(len(category))[np.newaxis, :]
distance_to_pivot = np.abs(y_hat_2d[:, j:(j+1)] - pivot_integer)
proba = self.distance_to_proba(distance_to_pivot)
probas.append(proba)
if self._mono_target:
return probas[0]
else:
return probas
def distance_to_proba(self, d):
""" convert a distance to a probability """
e = np.exp(-d/self.kernel_windows) # TODO : find a good heuristic for that kernel_windows
return e / e.sum(axis=1, keepdims=True)
class OrdinalClassifier(_BaseOrdinalClassifier):
""" This class transform a classifier to make it more suited to ordinal classification.
It does so by changing the Target using the OrdinalOneHotEncoder transformer
Concretely if we have 4 ordered classes 'y=A', 'y=B', 'y=C', 'y=D' with ('A' < 'B' < 'C' < 'D')
It creates 3 targets :
'y > A' , 'y>B' and 'y>C'
The classifier is then fitted on those target.
At the end to make a prediction we call the underlying classifier and recreates the proba
See the paper
https://www.cs.waikato.ac.nz/~eibe/pubs/ordinal_tech_report.pdf
for more detailed explanation
"""
def __init__(self, classifier, classes="auto"):
self.classifier = classifier
self.classes=classes
def fit(self, X , y):
if not is_classifier(self.classifier):
raise TypeError("classifier should be a sklearn classifier")
y_int = self._prepare_target(y, klass = _OrdinalOneHotEncoder, conversion_type=DataTypes.DataFrame)
y_int = convert_generic(y_int, output_type=DataTypes.NumpyArray)
self.classifier.fit(X, y_int)
return self
@staticmethod
def _aggregate_probas_over(probas_over):
""" helper method to go from the probabilities that the target is stricly above something to the proba of each class """
# For example, if we have 4 ordered classes 'A', 'B', 'C' and 'D'
#
# probas_over = [ proba(y > A), proba(y > B), proba(y > C)] . So a list of 3 (= 4 -1 ) elements
#
# This corresponds to the probas of target above something
# probas_over[j] := P( Y > classes[j] ) #
# To go back to P( Y == classes[j] ) I need to do
# P( Y == classes[0] ) = 1- P(Y > classes[0] ) # smallest target
# P( Y == classes[j] ) = P( Y > classes[j-1] ) - P( Y > classes[j] ) # intermediate target
# P( Y == classes[J-1] ) = P( Y > classes[J-2] ) # highest target
J = len(probas_over)
classes_proba = []
classes_proba.append( 1- probas_over[0] )
for j in range(1, J):
classes_proba.append( probas_over[j-1] - probas_over[j] )
classes_proba.append(probas_over[J-1])
probas = np.concatenate(classes_proba, axis=1)
return probas
def predict_proba(self, X):
# Call classifier
y_hat_proba = self.classifier.predict_proba(X)
# Retrive the proba of 1 from proba matrix
probas_over = []
for proba, cl in zip(y_hat_proba, self.classifier.classes_):
if cl[0] == 1:
p = proba[:,0]
else:
p = proba[:,1] # should always be the case
assert len(cl) == 2
assert cl.tolist() == [0,1]
assert proba.shape[1] == 2
probas_over.append(p[:, np.newaxis])
# Now let's re-aggregate the proba of each class
probas = []
start_index = 0
for target_index, categories in enumerate(self._target_encoder.categories_):
end_index = start_index + len(categories) - 1 # start and end of current target
probas_over_for_target = probas_over[start_index:end_index]
p = self._aggregate_probas_over(probas_over_for_target)
probas.append(p)
start_index = end_index
if self._mono_target:
return probas[0]
else:
return probas
def predict(self, X):
probas = self.predict_proba(X)
classes = self.classes_
if self._mono_target:
y_hat = classes[probas.argmax(axis=1)]
else:
all_pred = [ c[p.argmax(axis=1)][:, np.newaxis] for p, c in zip(probas, classes)]
y_hat = np.concatenate(all_pred, axis=1)
return y_hat.astype(self._target_dtype)
class RegressorFromClassifier(BaseEstimator, RegressorMixin):
""" Transform a Classifier into a regressor
does it by clustering the target and fit a classification
"""
def __init__(self,
classifier,
strategy="kmeans",
n_bins=10,
y_clusterer=None
):
self.classifier=classifier
self.strategy=strategy
self.n_bins=n_bins
self.y_clusterer=y_clusterer
def get_default_y_cluster(self, y=None):
""" this methods returns the default clusterer to use, if y_clusterer is None """
return KBinsDiscretizer(n_bins=self.n_bins, strategy=self.strategy, encode="ordinal")
def fit(self, X, y):
self._mono_target = y.ndim == 1
if self.y_clusterer is None:
y_clusterer = self.get_default_y_cluster(y)
else:
y_clusterer = self.y_clusterer
# TODO : check that it is a clusterer
if not is_classifier(self.classifier):
raise TypeError("classifier should be a classifer")
yd2 = make2dimensions(y)
if hasattr(y_clusterer, "fit_predict"):
y_cl = y_clusterer.fit_predict(yd2)
else:
y_cl = y_clusterer.fit_transform(yd2).astype('int32')
if y_cl.ndim == 1:
y_cl = y_cl[:, np.newaxis]
if self._mono_target and y_cl.shape[1] > 1:
raise ValueError("The cluster should return only 1 dimensional clusters")
self._mono_cluster = y_cl.shape[1] == 1
self.classifier.fit(X, y_cl) # fit classifier on result of cluster
if self._mono_cluster:
classes = [self.classifier.classes_]
else:
classes = self.classifier.classes_
all_mean_mapping = self._compute_y_mean(yd2, y_cl)
all_y_mean_mapping_matrix = []
for classe, y_mean_mapping in zip(classes, all_mean_mapping):
mat = np.concatenate([y_mean_mapping[cl] for cl in classe], axis=0)
all_y_mean_mapping_matrix.append(mat)
self._all_y_mean_matrix = all_y_mean_mapping_matrix
return self
def _compute_y_mean(self, yd2, y_cl):
""" compute the mean of each target within each cluster
Those value will be needed in order to make the final predictions
"""
assert y_cl.ndim == 2
all_mean_mapping = []
for j in range(y_cl.shape[1]):
index_dico = {cl : g.index for cl, g in pd.DataFrame({"y":y_cl[:, j]}).groupby("y")}
if self._mono_cluster and not self._mono_target:
# it means that
# 1. I have more than one target ...
# 2. ... but the cluster returns one dimension only
mean_mapping = {cl:yd2[index.values,:].mean(axis=0, keepdims=True) for cl, index in index_dico.items()}
else:
mean_mapping = {cl:yd2[index.values,j:(j+1)].mean(axis=0, keepdims=True) for cl, index in index_dico.items()}
all_mean_mapping.append(mean_mapping)
return all_mean_mapping # for each cluster, mean of each target
def predict(self, X):
y_hat_probas = self.classifier.predict_proba(X)
if self._mono_cluster:
y_hat_probas = [y_hat_probas]
y_hats = [ np.dot(y_hat_proba, y_mean_mapping_matrix) for y_hat_proba, y_mean_mapping_matrix in zip(y_hat_probas, self._all_y_mean_matrix) ]
if len(y_hats) > 1:
y_hat = np.concatenate(y_hats, axis=1)
else:
y_hat = y_hats[0]
if self._mono_target:
return y_hat[:, 0]
else:
return y_hat
| [
"numpy.abs",
"sklearn.preprocessing.KBinsDiscretizer",
"sklearn.base.is_classifier",
"pandas.DataFrame",
"numpy.exp",
"aikit.tools.data_structure_helper.make2dimensions",
"numpy.dot",
"numpy.concatenate",
"sklearn.base.is_regressor",
"aikit.tools.data_structure_helper.convert_generic"
] | [((4998, 5030), 'numpy.exp', 'np.exp', (['(-d / self.kernel_windows)'], {}), '(-d / self.kernel_windows)\n', (5004, 5030), True, 'import numpy as np\n'), ((6238, 6294), 'aikit.tools.data_structure_helper.convert_generic', 'convert_generic', (['y_int'], {'output_type': 'DataTypes.NumpyArray'}), '(y_int, output_type=DataTypes.NumpyArray)\n', (6253, 6294), False, 'from aikit.tools.data_structure_helper import make2dimensions, convert_generic\n'), ((7533, 7570), 'numpy.concatenate', 'np.concatenate', (['classes_proba'], {'axis': '(1)'}), '(classes_proba, axis=1)\n', (7547, 7570), True, 'import numpy as np\n'), ((9932, 10010), 'sklearn.preprocessing.KBinsDiscretizer', 'KBinsDiscretizer', ([], {'n_bins': 'self.n_bins', 'strategy': 'self.strategy', 'encode': '"""ordinal"""'}), "(n_bins=self.n_bins, strategy=self.strategy, encode='ordinal')\n", (9948, 10010), False, 'from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer\n'), ((10453, 10471), 'aikit.tools.data_structure_helper.make2dimensions', 'make2dimensions', (['y'], {}), '(y)\n', (10468, 10471), False, 'from aikit.tools.data_structure_helper import make2dimensions, convert_generic\n'), ((2163, 2181), 'aikit.tools.data_structure_helper.make2dimensions', 'make2dimensions', (['y'], {}), '(y)\n', (2178, 2181), False, 'from aikit.tools.data_structure_helper import make2dimensions, convert_generic\n'), ((3224, 3252), 'sklearn.base.is_regressor', 'is_regressor', (['self.regressor'], {}), '(self.regressor)\n', (3236, 3252), False, 'from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, is_classifier, is_regressor\n'), ((4016, 4042), 'aikit.tools.data_structure_helper.make2dimensions', 'make2dimensions', (['y_int_hat'], {}), '(y_int_hat)\n', (4031, 4042), False, 'from aikit.tools.data_structure_helper import make2dimensions, convert_generic\n'), ((4649, 4693), 'numpy.abs', 'np.abs', (['(y_hat_2d[:, j:j + 1] - pivot_integer)'], {}), '(y_hat_2d[:, j:j + 1] - pivot_integer)\n', (4655, 4693), True, 'import numpy as np\n'), ((6008, 6038), 'sklearn.base.is_classifier', 'is_classifier', (['self.classifier'], {}), '(self.classifier)\n', (6021, 6038), False, 'from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, is_classifier, is_regressor\n'), ((9177, 9209), 'numpy.concatenate', 'np.concatenate', (['all_pred'], {'axis': '(1)'}), '(all_pred, axis=1)\n', (9191, 9209), True, 'import numpy as np\n'), ((10330, 10360), 'sklearn.base.is_classifier', 'is_classifier', (['self.classifier'], {}), '(self.classifier)\n', (10343, 10360), False, 'from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, is_classifier, is_regressor\n'), ((11356, 11417), 'numpy.concatenate', 'np.concatenate', (['[y_mean_mapping[cl] for cl in classe]'], {'axis': '(0)'}), '([y_mean_mapping[cl] for cl in classe], axis=0)\n', (11370, 11417), True, 'import numpy as np\n'), ((12809, 12851), 'numpy.dot', 'np.dot', (['y_hat_proba', 'y_mean_mapping_matrix'], {}), '(y_hat_proba, y_mean_mapping_matrix)\n', (12815, 12851), True, 'import numpy as np\n'), ((12996, 13026), 'numpy.concatenate', 'np.concatenate', (['y_hats'], {'axis': '(1)'}), '(y_hats, axis=1)\n', (13010, 13026), True, 'import numpy as np\n'), ((11938, 11969), 'pandas.DataFrame', 'pd.DataFrame', (["{'y': y_cl[:, j]}"], {}), "({'y': y_cl[:, j]})\n", (11950, 11969), True, 'import pandas as pd\n')] |
###################################################################################################
# Project : Global Challenges Research Fund (GCRF) African SWIFT (Science for Weather
# Information and Forecasting Techniques.
#
# Program name : dewpoint_HL.py
#
# Author : <NAME>, University of Leeds, NCAS
#
# Date created : Jan 2019
#
# Purpose : Plot dewpoint images as part of SWIFT_GFSplotting.
#
# Revision History :
#
# Usage : Can be used as part of wider plotting repository or independently e.g.
# "python3 dewpoint.py time lev lat lon lat lon"
# where time is in the initialisation time in the form YYYYMMDDHH
###################################################################################################
import numpy as np
import Nio as nio
import Ngl as ngl
import glob
import datetime as dt
import sys
import os
import datetime
GFS_dir = os.environ['SWIFT_GFS']
#####################################################################################################
# 9-point smoother function, required to make the geopotential contours look better.
#
def smth9(x,p,q):
#
# Run a 9-point smoother on the 2D numpy.array x using weights
# p and q. Return the smoothed array.
#
#
# Get array dimensions and check on sizes.
#
ni = x.shape[0]
nj = x.shape[1]
if (ni < 3 or nj < 3):
print("smth9: both array dimensions must be at least three.")
sys.exit()
#
# Smooth.
#
po4 = p/4.
qo4 = q/4.
output = np.zeros([ni,nj],'f')
for j in range(1,nj-1):
for i in range(1,ni-1):
jm1 = j-1
jp1 = j+1
im1 = i-1
ip1 = i+1
term1 = po4*(x[im1,j]+x[i,jm1]+x[ip1,j]+x[i,jp1]-4.*x[i,j])
term2 = qo4*(x[im1,jp1]+x[im1,jm1]+x[ip1,jm1]+x[ip1,jp1]-4.*x[i,j])
output[i,j] = float(x[i,j]) + term1 + term2
#
# Set the perimeter values to the original x values.
#
output[0,:] = x[0,:]
output[ni-1,:] = x[ni-1,:]
output[:,0] = x[:,0]
output[:,nj-1] = x[:,nj-1]
#
# Return smoothed array.
#
return output
###################################################################################################
# Main script to plot dewpoint
# define directory
diri = (os.getcwd())+"/"
# forecast times (currently set to plot 0 to 48 hours)
fore = (os.popen("cat %s/controls/namelist | grep 'fore:' | awk -F: '{print $2}' | tr ',' ' '"%(GFS_dir))).read().split()
fore = [np.int(f) for f in fore]
# accept initialisation time and dates and pressure level as arguments
init_dt = (sys.argv[1])
lev_hPa = (sys.argv[2])
# read in domains and accept lat and lon limits as arguments
b = open(GFS_dir+"/controls/domains")
domains_content = b.readlines()
key_list = []
latlon_list = []
for domain in domains_content:
key_list.append(domain.split(":")[0])
latlon_str = (domain.split(":")[1]).strip().split(",")
latlon_flt = []
for ll in latlon_str:
latlon_flt.append(float(ll))
latlon_list.append(latlon_flt)
del(latlon_flt)
domains_dict = dict(zip(key_list,latlon_list))
latbl = float(sys.argv[3])
lonbl = float(sys.argv[4])
lattr = float(sys.argv[5])
lontr = float(sys.argv[6])
region = "unnamedregion"
for domain in domains_dict.keys():
if ((latbl == domains_dict[domain][0] and lattr == domains_dict[domain][2]) or (latbl == domains_dict[domain][2] or lattr == domains_dict[domain][0])) and ((lonbl == domains_dict[domain][1] and lontr == domains_dict[domain][3]) or (lonbl == domains_dict[domain][3] and lontr == domains_dict[domain][1])):
region = domain.strip()
# arrange lat and lon values to get bottom left and top right lat lon values
if latbl == lattr or lonbl == lontr:
sys.exit('lat and lon values must be different')
else:
if latbl < lattr:
latbl, lattr = lattr, latbl
if lonbl > lontr:
lonbl, lontr = lontr, lonbl
# read in analysis files
a_fili = "analysis_gfs_4_%s_%s00_000.nc" % (init_dt[:8], init_dt[8:10])
# read pressure levels from analysis file
analysis = nio.open_file(diri+a_fili)
level_dim = analysis.variables["TMP_P0_L100_GLL0"].dimensions[0]
levs_p1 = analysis.variables[level_dim]
levs_p = ['{:.0f}'.format(x) for x in levs_p1[:]/100.0]
del levs_p1
# identify level index
lev_index = levs_p.index(lev_hPa)
# read in lat
lat1 = analysis.variables["lat_0"]
lat_temp = lat1[:]
latbl_idx = (np.abs(lat_temp-latbl)).argmin()
lattr_idx = (np.abs(lat_temp-lattr)).argmin()
if latbl_idx == lattr_idx:
sys.exit('lat values are not different enough, they must have relate to different grid points')
elif latbl_idx > 1 and lattr_idx < len(lat_temp)-2:
lat_box1 = latbl_idx-2
lat_box2 = lattr_idx+2
lat = lat_temp[lat_box1:lat_box2]
else:
lat_box1 = latbl_idx
lat_box2 = lattr_idx
lat = lat_temp[lat_box1:lat_box2]
del(latbl_idx)
del(lattr_idx)
del(lat1)
del(lat_temp)
# read in lon
lon1 = analysis.variables["lon_0"]
# check to see if box crosses Greenwich Meridian. If so then the lon values must be modified for plot to work.
if (np.sign(lonbl) + np.sign(lontr)) >= -1 and (np.sign(lonbl) + np.sign(lontr)) <= 1:
lonbl, lontr = lontr, lonbl
lon_temp = np.where(lon1[:]>=180.0, lon1[:]-360.0, lon1[:])
lonbl_idx = (np.abs(lon_temp-lonbl)).argmin()
lontr_idx = (np.abs(lon_temp-lontr)).argmin()
if lonbl_idx == lontr_idx:
sys.exit('lon values are not different enough, they must have relate to different grid points')
elif lontr_idx > len(lon_temp)/2 and lonbl_idx <= len(lon_temp)/2:
lon_box1 = lonbl_idx+2
lon_box2 = lontr_idx-2
lon_box3 = len(lon_temp)-1
lon_temp1 = lon_temp[0:lon_box1]
lon_temp2 = lon_temp[lon_box2:lon_box3]
else:
lon_box1 = lonbl_idx
lon_box2 = lontr_idx
lon_box3 = len(lon_temp)-1
lon_temp1 = lon_temp[0:lon_box1]
lon_temp2 = lon_temp[lon_box2:lon_box3]
lon = np.append(lon_temp2, lon_temp1)
del(lon_temp1)
del(lon_temp2)
del(lonbl_idx)
del(lontr_idx)
del(lon_temp)
else:
lon_temp = lon1[:]
lonbl_idx = (np.abs(lon_temp-lonbl)).argmin()
lontr_idx = (np.abs(lon_temp-lontr)).argmin()
if lonbl_idx == lontr_idx:
sys.exit('lon values are not different enough, they must have relate to different grid points')
elif lonbl_idx > 1 and lontr_idx < len(lon_temp)-2:
lon_box1 = lonbl_idx-2
lon_box2 = lontr_idx+2
lon = lon_temp[lon_box1:lon_box2]
else:
lon_box1 = lonbl_idx
lon_box2 = lontr_idx
lon = lon_temp[lon_box1:lon_box2]
# read in temperature and relative humidity, checking whether box crosses Greenwich Meridian.
if (np.sign(lonbl) + np.sign(lontr)) >= -1 and (np.sign(lonbl) + np.sign(lontr)) <= 1:
temp1 = analysis.variables["TMP_P0_L100_GLL0"][lev_index,:,:]
temp_temp1 = temp1[lat_box1:lat_box2,0:lon_box1]
temp_temp2 = temp1[lat_box1:lat_box2,lon_box2:lon_box3]
temp = np.concatenate((temp_temp2,temp_temp1),axis=1)
del temp1
del temp_temp1
del temp_temp2
rh1 = analysis.variables["RH_P0_L100_GLL0"][lev_index,:,:]/100.0
rh_temp1 = rh1[lat_box1:lat_box2,0:lon_box1]
rh_temp2 = rh1[lat_box1:lat_box2,lon_box2:lon_box3]
rh = np.concatenate((rh_temp2,rh_temp1),axis=1)
rh = np.where(rh == 0.0, 0.0001, rh)
del rh1
del rh_temp1
del rh_temp2
else:
temp1 = analysis.variables["TMP_P0_L100_GLL0"][lev_index,:,:]
temp = temp1[lat_box1:lat_box2,lon_box1:lon_box2]
del temp1
rh1 = analysis.variables["RH_P0_L100_GLL0"][lev_index,:,:]/100.0
rh = rh1[lat_box1:lat_box2,lon_box1:lon_box2]
rh = np.where(rh == 0.0, 0.0001, rh)
del rh1
# calculate dewpoint temperature for water
c1 = 6.10780
c2 = np.where(temp > 273.15, 17.08085, 17.84362)
c3 = np.where(temp > 273.15, 234.175, 245.425)
ps = c1*np.exp((c2*(temp-273.15))/(c3+(temp-273.15)))
pd = ps*rh
dewpoint = ((np.log(pd/c1))*c3*-1.0)/((np.log(pd/c1))-c2)
dewpoint = smth9(dewpoint, 0.5, 0.25)
del temp
del rh
del c1
del c2
del c3
del ps
del pd
# create 2d lat and lon
lat2d = np.zeros((len(lat),len(lon)))
lon2d = np.zeros((len(lat),len(lon)))
for i in range(0, len(lon)):
lat2d[:,i] = lat
for i in range(0, len(lat)):
lon2d[i,:] = lon
# open workspace for forecast plots
wks_type = "png"
wks = ngl.open_wks(wks_type, "GFSanalysis_%s_%s_dewpoint_%shPa" % (region, init_dt[0:10], lev_hPa))
# define resources for forecast plots
res = ngl.Resources()
res.nglDraw = False
res.nglFrame = False
res.vpWidthF = 0.9
res.vpHeightF = 0.6
cmap = ngl.read_colormap_file("WhiteBlueGreenYellowRed")
res.mpGridAndLimbOn = False
res.cnFillPalette = cmap[:30:-1]
res.pmTickMarkDisplayMode = "Never"
res.cnInfoLabelOn = False
res.cnFillOn = True
res.cnLineLabelsOn = False
res.cnLinesOn = False
res.cnMonoLineLabelFontColor = True
res.lbAutoManage = False
res.lbLabelFontHeightF = 0.005
res.lbOrientation = "horizontal"
res.lbLabelAngleF = 45
res.pmLabelBarOrthogonalPosF = -1.
res.pmLabelBarParallelPosF = 0.25
res.pmLabelBarWidthF = 0.3
res.pmLabelBarHeightF = 0.1
res.lbTitleString = "%shPa dewpoint" % (lev_hPa)
res.lbTitleFontHeightF = 0.0125
res.sfXArray = lon2d
res.sfYArray = lat2d
res.mpProjection = "CylindricalEquidistant"
res.mpLimitMode = "LatLon" # Limit the map view.
res.mpMinLonF = lontr
res.mpMaxLonF = lonbl
res.mpMinLatF = lattr
res.mpMaxLatF = latbl
res.mpPerimOn = True
res.mpOutlineBoundarySets = "AllBoundaries"
res.mpNationalLineColor = "gray40"
res.mpNationalLineThicknessF = 1.5
res.mpGeophysicalLineColor = "gray40"
res.mpGeophysicalLineThicknessF = 1.5
res.cnMonoLineColor = True
res.cnLevelSelectionMode = "ManualLevels"
res.cnMinLevelValF = -37.5
res.cnMaxLevelValF = 22.0
res.cnLevelSpacingF = 2.5
res.cnLineThicknessF = 2.5
# create dewpoint plot for analysis data
dp_plot = ngl.contour_map(wks,dewpoint,res)
del res.mpProjection
del res.mpLimitMode
del res.mpMinLonF
del res.mpMaxLonF
del res.mpMinLatF
del res.mpMaxLatF
del res.mpPerimOn
del res.mpOutlineBoundarySets
del res.mpNationalLineColor
del res.mpNationalLineThicknessF
del res.mpGeophysicalLineColor
del res.mpGeophysicalLineThicknessF
del res.mpGridAndLimbOn
# if pressure levels are 1000 or 925 hPa mark on 15 degree C contour in black (ITD)
if (lev_hPa == "925") or (lev_hPa == "1000"):
res.cnFillOn = False
res.cnLineLabelBackgroundColor = -1
res.cnLineLabelDensityF = 0.8
res.cnLineLabelFontColor = "Black"
res.cnLineLabelFontHeightF = 0.015
res.cnLineLabelPerimOn = False
res.cnLineLabelsOn = True
res.cnLinesOn = True
res.cnMonoLineLabelFontColor = True
res.lbLabelFontHeightF = 0.01
res.cnLineDashPattern = 11
res.cnLineThicknessF = 5.0
res.cnLineColor = "purple"
res.cnLevelSelectionMode = "ManualLevels"
res.cnMinLevelValF = -85.0
res.cnMaxLevelValF = 115.0
res.cnLevelSpacingF = 100.0
# plot ITD and overlay on colour contours
dp_plot2 = ngl.contour(wks,dewpoint,res)
ngl.overlay(dp_plot,dp_plot2)
ngl.maximize_plot(wks, dp_plot)
ngl.draw(dp_plot)
ngl.frame(wks)
ngl.destroy(wks)
del res
del dewpoint
###################################################################################################
# open forecast file
f_fili = "GFS_forecast_%s_%s.nc" % (init_dt[:8], init_dt[8:10])
forecast = nio.open_file(diri+f_fili)
# loop through forecast times
for i in range(0, len(fore)):
# create string for valid time
valid_date = (datetime.datetime(int(init_dt[:4]), int(init_dt[4:6]), int(init_dt[6:8]), int(init_dt[8:10])) + datetime.timedelta(hours=int(fore[i]))).strftime("%Y%m%d%H")
# read in temperature and relative humidity, checking whether box crosses Greenwich Meridian.
if (np.sign(lonbl) + np.sign(lontr)) >= -1 and (np.sign(lonbl) + np.sign(lontr)) <= 1:
temp1 = forecast.variables["TMP_P0_L100_GLL0"][i,lev_index,:,:]
temp_temp1 = temp1[lat_box1:lat_box2,0:lon_box1]
temp_temp2 = temp1[lat_box1:lat_box2,lon_box2:lon_box3]
temp = np.concatenate((temp_temp2,temp_temp1),axis=1)
del temp1
del temp_temp1
del temp_temp2
rh1 = forecast.variables["RH_P0_L100_GLL0"][i,lev_index,:,:]/100.0
rh_temp1 = rh1[lat_box1:lat_box2,0:lon_box1]
rh_temp2 = rh1[lat_box1:lat_box2,lon_box2:lon_box3]
rh = np.concatenate((rh_temp2,rh_temp1),axis=1)
rh = np.where(rh == 0.0, 0.0001, rh)
del rh1
del rh_temp1
del rh_temp2
else:
temp1 = forecast.variables["TMP_P0_L100_GLL0"][i,lev_index,:,:]
temp = temp1[lat_box1:lat_box2,lon_box1:lon_box2]
del temp1
rh1 = forecast.variables["RH_P0_L100_GLL0"][i,lev_index,:,:]/100.0
rh = rh1[lat_box1:lat_box2,lon_box1:lon_box2]
rh = np.where(rh == 0.0, 0.0001, rh)
del rh1
# calculate dewpoint temperature
c1 = np.zeros_like(temp)
c1 = 6.10780
c2 = np.zeros_like(temp)
c2 = np.where(temp > 273.15, 17.08085, 17.84362)
c3 = np.zeros_like(temp)
c3 = np.where(temp > 273.15, 234.175, 245.425)
ps = c1*np.exp((c2*(temp-273.15))/(c3+(temp-273.15)))
pd = ps*rh
dewpoint = ((np.log(pd/c1))*c3*-1.0)/((np.log(pd/c1))-c2)
dewpoint = smth9(dewpoint, 0.5, 0.25)
del temp
del rh
del c1
del c2
del c3
del ps
del pd
# open workspace for forecast plots
wks_type = "png"
wks = ngl.open_wks(wks_type, "GFSforecast_%s_%s_dewpoint_%shPa_%s_%03d" % (region, valid_date, lev_hPa, init_dt[0:10], fore[i]))
# define resources for forecast plots
res = ngl.Resources()
res.nglDraw = False
res.nglFrame = False
res.vpWidthF = 0.9
res.vpHeightF = 0.6
cmap = ngl.read_colormap_file("WhiteBlueGreenYellowRed")
res.mpGridAndLimbOn = False
res.cnFillPalette = cmap[:30:-1]
res.pmTickMarkDisplayMode = "Never"
# res.tiMainString = "%s hPa dewpoint with respect to water forecast %s +%03d" % (lev_hPa, init_dt[0:10], fore[i])
# res.tiMainFontHeightF = 0.015
res.cnInfoLabelOn = False
res.cnFillOn = True
res.cnInfoLabelOn = False
res.cnLineLabelsOn = False
res.cnLinesOn = False
res.cnMonoLineLabelFontColor = True
res.lbAutoManage = False
res.lbLabelFontHeightF = 0.005
res.lbOrientation = "horizontal"
res.lbLabelAngleF = 45
res.pmLabelBarOrthogonalPosF = -1.
res.pmLabelBarParallelPosF = 0.25
res.pmLabelBarWidthF = 0.3
res.pmLabelBarHeightF = 0.1
res.lbTitleString = "%shPa dewpoint" % (lev_hPa)
res.lbTitleFontHeightF = 0.0125
res.sfXArray = lon2d
res.sfYArray = lat2d
res.mpProjection = "CylindricalEquidistant"
res.mpLimitMode = "LatLon" # Limit the map view.
res.mpMinLonF = lontr
res.mpMaxLonF = lonbl
res.mpMinLatF = lattr
res.mpMaxLatF = latbl
res.mpPerimOn = True
res.mpOutlineBoundarySets = "AllBoundaries"
res.mpNationalLineColor = "gray40"
res.mpNationalLineThicknessF = 1.5
res.mpGeophysicalLineColor = "gray40"
res.mpGeophysicalLineThicknessF = 1.5
res.cnMonoLineColor = True
res.cnLineDashPattern = 11
res.cnLineThicknessF = 5.0
res.cnLineColor = "purple"
res.cnLevelSelectionMode = "ManualLevels"
res.cnMinLevelValF = -38.5
res.cnMaxLevelValF = 21.0
res.cnLevelSpacingF = 2.5
res.cnLineThicknessF = 2.5
# create dewpoint plots for forecast times
dp_plot = ngl.contour_map(wks,dewpoint,res)
del res.mpProjection
del res.mpLimitMode
del res.mpMinLonF
del res.mpMaxLonF
del res.mpMinLatF
del res.mpMaxLatF
del res.mpPerimOn
del res.mpOutlineBoundarySets
del res.mpNationalLineColor
del res.mpNationalLineThicknessF
del res.mpGeophysicalLineColor
del res.mpGeophysicalLineThicknessF
del res.mpGridAndLimbOn
# if pressure levels are 1000 or 925 hPa mark on 14 degree C contour in black (ITD)
if (lev_hPa == "925") or (lev_hPa == "1000"):
res.cnFillOn = False
res.cnLineLabelBackgroundColor = -1
res.cnLineLabelDensityF = 0.8
res.cnLineLabelFontColor = "Black"
res.cnLineLabelFontHeightF = 0.015
res.cnLineLabelPerimOn = False
res.cnLineLabelsOn = True
res.cnLinesOn = True
res.cnMonoLineLabelFontColor = True
res.lbLabelFontHeightF = 0.01
res.cnLevelSelectionMode = "ManualLevels"
res.cnMinLevelValF = -86.0
res.cnMaxLevelValF = 114.0
res.cnLevelSpacingF = 100.0
res.cnLineThicknessF = 2.5
# plot ITD and overlay on colour contours
dp_plot2 = ngl.contour(wks,dewpoint,res)
ngl.overlay(dp_plot,dp_plot2)
ngl.maximize_plot(wks, dp_plot)
ngl.draw(dp_plot)
ngl.frame(wks)
ngl.destroy(wks)
del res
del dewpoint
os.system('mogrify -trim *_'+region+'_'+init_dt[0:10]+'_dewpoint_'+lev_hPa+'hPa.png')
#if region == "WA" or region == "unknownWA":
# os.system('mogrify -resize 886x600 *_'+region+'_'+init_dt[0:10]+'_dewpoint_'+lev_hPa+'hPa.png')
#elif region == "EA" or region == "unknownEA":
# os.system('mogrify -resize 600x733 *_'+region+'_'+init_dt[0:10]+'_dewpoint_'+lev_hPa+'hPa.png')
os.system('mv *_'+region+'_'+init_dt[0:10]+'_dewpoint_'+lev_hPa+'hPa.png %s/MARTIN/GFS/'%(GFS_dir)+region+'/'+init_dt[0:10]+'/dewpoint_'+lev_hPa)
os.system('mogrify -trim *'+region+'_*dewpoint_'+lev_hPa+'hPa_'+init_dt[0:10]+'*.png')
#if region == "WA" or region == "unknownWA":
# os.system('mogrify -resize 886x600 *'+region+'_*dewpoint_'+lev_hPa+'hPa_'+init_dt[0:10]+'*.png')
#elif region == "EA" or region == "unknownEA":
# os.system('mogrify -resize 600x733 *'+region+'_*dewpoint_'+lev_hPa+'hPa_'+init_dt[0:10]+'*.png')
os.system('mv *'+region+'_*dewpoint_'+lev_hPa+'hPa_'+init_dt[0:10]+'*.png %s/MARTIN/GFS/'%(GFS_dir)+region+'/'+init_dt[0:10]+'/dewpoint_'+lev_hPa)
| [
"Ngl.open_wks",
"numpy.log",
"Ngl.frame",
"Ngl.contour",
"sys.exit",
"Ngl.destroy",
"Ngl.read_colormap_file",
"numpy.where",
"numpy.exp",
"os.popen",
"Ngl.draw",
"numpy.concatenate",
"numpy.abs",
"Ngl.contour_map",
"Ngl.Resources",
"numpy.sign",
"Ngl.maximize_plot",
"numpy.int",
... | [((4047, 4075), 'Nio.open_file', 'nio.open_file', (['(diri + a_fili)'], {}), '(diri + a_fili)\n', (4060, 4075), True, 'import Nio as nio\n'), ((7688, 7731), 'numpy.where', 'np.where', (['(temp > 273.15)', '(17.08085)', '(17.84362)'], {}), '(temp > 273.15, 17.08085, 17.84362)\n', (7696, 7731), True, 'import numpy as np\n'), ((7738, 7779), 'numpy.where', 'np.where', (['(temp > 273.15)', '(234.175)', '(245.425)'], {}), '(temp > 273.15, 234.175, 245.425)\n', (7746, 7779), True, 'import numpy as np\n'), ((8262, 8359), 'Ngl.open_wks', 'ngl.open_wks', (['wks_type', "('GFSanalysis_%s_%s_dewpoint_%shPa' % (region, init_dt[0:10], lev_hPa))"], {}), "(wks_type, 'GFSanalysis_%s_%s_dewpoint_%shPa' % (region,\n init_dt[0:10], lev_hPa))\n", (8274, 8359), True, 'import Ngl as ngl\n'), ((8402, 8417), 'Ngl.Resources', 'ngl.Resources', ([], {}), '()\n', (8415, 8417), True, 'import Ngl as ngl\n'), ((8509, 8558), 'Ngl.read_colormap_file', 'ngl.read_colormap_file', (['"""WhiteBlueGreenYellowRed"""'], {}), "('WhiteBlueGreenYellowRed')\n", (8531, 8558), True, 'import Ngl as ngl\n'), ((9990, 10025), 'Ngl.contour_map', 'ngl.contour_map', (['wks', 'dewpoint', 'res'], {}), '(wks, dewpoint, res)\n', (10005, 10025), True, 'import Ngl as ngl\n'), ((11265, 11296), 'Ngl.maximize_plot', 'ngl.maximize_plot', (['wks', 'dp_plot'], {}), '(wks, dp_plot)\n', (11282, 11296), True, 'import Ngl as ngl\n'), ((11297, 11314), 'Ngl.draw', 'ngl.draw', (['dp_plot'], {}), '(dp_plot)\n', (11305, 11314), True, 'import Ngl as ngl\n'), ((11315, 11329), 'Ngl.frame', 'ngl.frame', (['wks'], {}), '(wks)\n', (11324, 11329), True, 'import Ngl as ngl\n'), ((11331, 11347), 'Ngl.destroy', 'ngl.destroy', (['wks'], {}), '(wks)\n', (11342, 11347), True, 'import Ngl as ngl\n'), ((11568, 11596), 'Nio.open_file', 'nio.open_file', (['(diri + f_fili)'], {}), '(diri + f_fili)\n', (11581, 11596), True, 'import Nio as nio\n'), ((17208, 17309), 'os.system', 'os.system', (["('mogrify -trim *_' + region + '_' + init_dt[0:10] + '_dewpoint_' + lev_hPa +\n 'hPa.png')"], {}), "('mogrify -trim *_' + region + '_' + init_dt[0:10] + '_dewpoint_' +\n lev_hPa + 'hPa.png')\n", (17217, 17309), False, 'import os\n'), ((17587, 17762), 'os.system', 'os.system', (["('mv *_' + region + '_' + init_dt[0:10] + '_dewpoint_' + lev_hPa + \n 'hPa.png %s/MARTIN/GFS/' % GFS_dir + region + '/' + init_dt[0:10] +\n '/dewpoint_' + lev_hPa)"], {}), "('mv *_' + region + '_' + init_dt[0:10] + '_dewpoint_' + lev_hPa +\n 'hPa.png %s/MARTIN/GFS/' % GFS_dir + region + '/' + init_dt[0:10] +\n '/dewpoint_' + lev_hPa)\n", (17596, 17762), False, 'import os\n'), ((17734, 17836), 'os.system', 'os.system', (["('mogrify -trim *' + region + '_*dewpoint_' + lev_hPa + 'hPa_' + init_dt[0:\n 10] + '*.png')"], {}), "('mogrify -trim *' + region + '_*dewpoint_' + lev_hPa + 'hPa_' +\n init_dt[0:10] + '*.png')\n", (17743, 17836), False, 'import os\n'), ((18116, 18293), 'os.system', 'os.system', (["('mv *' + region + '_*dewpoint_' + lev_hPa + 'hPa_' + init_dt[0:10] + \n '*.png %s/MARTIN/GFS/' % GFS_dir + region + '/' + init_dt[0:10] +\n '/dewpoint_' + lev_hPa)"], {}), "('mv *' + region + '_*dewpoint_' + lev_hPa + 'hPa_' + init_dt[0:10\n ] + '*.png %s/MARTIN/GFS/' % GFS_dir + region + '/' + init_dt[0:10] +\n '/dewpoint_' + lev_hPa)\n", (18125, 18293), False, 'import os\n'), ((1565, 1588), 'numpy.zeros', 'np.zeros', (['[ni, nj]', '"""f"""'], {}), "([ni, nj], 'f')\n", (1573, 1588), True, 'import numpy as np\n'), ((2273, 2284), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2282, 2284), False, 'import os\n'), ((2477, 2486), 'numpy.int', 'np.int', (['f'], {}), '(f)\n', (2483, 2486), True, 'import numpy as np\n'), ((3727, 3775), 'sys.exit', 'sys.exit', (['"""lat and lon values must be different"""'], {}), "('lat and lon values must be different')\n", (3735, 3775), False, 'import sys\n'), ((4502, 4607), 'sys.exit', 'sys.exit', (['"""lat values are not different enough, they must have relate to different grid points"""'], {}), "(\n 'lat values are not different enough, they must have relate to different grid points'\n )\n", (4510, 4607), False, 'import sys\n'), ((5183, 5235), 'numpy.where', 'np.where', (['(lon1[:] >= 180.0)', '(lon1[:] - 360.0)', 'lon1[:]'], {}), '(lon1[:] >= 180.0, lon1[:] - 360.0, lon1[:])\n', (5191, 5235), True, 'import numpy as np\n'), ((5904, 5935), 'numpy.append', 'np.append', (['lon_temp2', 'lon_temp1'], {}), '(lon_temp2, lon_temp1)\n', (5913, 5935), True, 'import numpy as np\n'), ((6914, 6962), 'numpy.concatenate', 'np.concatenate', (['(temp_temp2, temp_temp1)'], {'axis': '(1)'}), '((temp_temp2, temp_temp1), axis=1)\n', (6928, 6962), True, 'import numpy as np\n'), ((7190, 7234), 'numpy.concatenate', 'np.concatenate', (['(rh_temp2, rh_temp1)'], {'axis': '(1)'}), '((rh_temp2, rh_temp1), axis=1)\n', (7204, 7234), True, 'import numpy as np\n'), ((7241, 7272), 'numpy.where', 'np.where', (['(rh == 0.0)', '(0.0001)', 'rh'], {}), '(rh == 0.0, 0.0001, rh)\n', (7249, 7272), True, 'import numpy as np\n'), ((7581, 7612), 'numpy.where', 'np.where', (['(rh == 0.0)', '(0.0001)', 'rh'], {}), '(rh == 0.0, 0.0001, rh)\n', (7589, 7612), True, 'import numpy as np\n'), ((7789, 7842), 'numpy.exp', 'np.exp', (['(c2 * (temp - 273.15) / (c3 + (temp - 273.15)))'], {}), '(c2 * (temp - 273.15) / (c3 + (temp - 273.15)))\n', (7795, 7842), True, 'import numpy as np\n'), ((11200, 11231), 'Ngl.contour', 'ngl.contour', (['wks', 'dewpoint', 'res'], {}), '(wks, dewpoint, res)\n', (11211, 11231), True, 'import Ngl as ngl\n'), ((11234, 11264), 'Ngl.overlay', 'ngl.overlay', (['dp_plot', 'dp_plot2'], {}), '(dp_plot, dp_plot2)\n', (11245, 11264), True, 'import Ngl as ngl\n'), ((13067, 13086), 'numpy.zeros_like', 'np.zeros_like', (['temp'], {}), '(temp)\n', (13080, 13086), True, 'import numpy as np\n'), ((13112, 13131), 'numpy.zeros_like', 'np.zeros_like', (['temp'], {}), '(temp)\n', (13125, 13131), True, 'import numpy as np\n'), ((13140, 13183), 'numpy.where', 'np.where', (['(temp > 273.15)', '(17.08085)', '(17.84362)'], {}), '(temp > 273.15, 17.08085, 17.84362)\n', (13148, 13183), True, 'import numpy as np\n'), ((13193, 13212), 'numpy.zeros_like', 'np.zeros_like', (['temp'], {}), '(temp)\n', (13206, 13212), True, 'import numpy as np\n'), ((13221, 13262), 'numpy.where', 'np.where', (['(temp > 273.15)', '(234.175)', '(245.425)'], {}), '(temp > 273.15, 234.175, 245.425)\n', (13229, 13262), True, 'import numpy as np\n'), ((13579, 13705), 'Ngl.open_wks', 'ngl.open_wks', (['wks_type', "('GFSforecast_%s_%s_dewpoint_%shPa_%s_%03d' % (region, valid_date, lev_hPa,\n init_dt[0:10], fore[i]))"], {}), "(wks_type, 'GFSforecast_%s_%s_dewpoint_%shPa_%s_%03d' % (region,\n valid_date, lev_hPa, init_dt[0:10], fore[i]))\n", (13591, 13705), True, 'import Ngl as ngl\n'), ((13751, 13766), 'Ngl.Resources', 'ngl.Resources', ([], {}), '()\n', (13764, 13766), True, 'import Ngl as ngl\n'), ((13873, 13922), 'Ngl.read_colormap_file', 'ngl.read_colormap_file', (['"""WhiteBlueGreenYellowRed"""'], {}), "('WhiteBlueGreenYellowRed')\n", (13895, 13922), True, 'import Ngl as ngl\n'), ((15782, 15817), 'Ngl.contour_map', 'ngl.contour_map', (['wks', 'dewpoint', 'res'], {}), '(wks, dewpoint, res)\n', (15797, 15817), True, 'import Ngl as ngl\n'), ((17088, 17119), 'Ngl.maximize_plot', 'ngl.maximize_plot', (['wks', 'dp_plot'], {}), '(wks, dp_plot)\n', (17105, 17119), True, 'import Ngl as ngl\n'), ((17123, 17140), 'Ngl.draw', 'ngl.draw', (['dp_plot'], {}), '(dp_plot)\n', (17131, 17140), True, 'import Ngl as ngl\n'), ((17144, 17158), 'Ngl.frame', 'ngl.frame', (['wks'], {}), '(wks)\n', (17153, 17158), True, 'import Ngl as ngl\n'), ((17163, 17179), 'Ngl.destroy', 'ngl.destroy', (['wks'], {}), '(wks)\n', (17174, 17179), True, 'import Ngl as ngl\n'), ((1500, 1510), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1508, 1510), False, 'import sys\n'), ((4392, 4416), 'numpy.abs', 'np.abs', (['(lat_temp - latbl)'], {}), '(lat_temp - latbl)\n', (4398, 4416), True, 'import numpy as np\n'), ((4438, 4462), 'numpy.abs', 'np.abs', (['(lat_temp - lattr)'], {}), '(lat_temp - lattr)\n', (4444, 4462), True, 'import numpy as np\n'), ((5368, 5473), 'sys.exit', 'sys.exit', (['"""lon values are not different enough, they must have relate to different grid points"""'], {}), "(\n 'lon values are not different enough, they must have relate to different grid points'\n )\n", (5376, 5473), False, 'import sys\n'), ((6192, 6297), 'sys.exit', 'sys.exit', (['"""lon values are not different enough, they must have relate to different grid points"""'], {}), "(\n 'lon values are not different enough, they must have relate to different grid points'\n )\n", (6200, 6297), False, 'import sys\n'), ((7887, 7902), 'numpy.log', 'np.log', (['(pd / c1)'], {}), '(pd / c1)\n', (7893, 7902), True, 'import numpy as np\n'), ((12251, 12299), 'numpy.concatenate', 'np.concatenate', (['(temp_temp2, temp_temp1)'], {'axis': '(1)'}), '((temp_temp2, temp_temp1), axis=1)\n', (12265, 12299), True, 'import numpy as np\n'), ((12550, 12594), 'numpy.concatenate', 'np.concatenate', (['(rh_temp2, rh_temp1)'], {'axis': '(1)'}), '((rh_temp2, rh_temp1), axis=1)\n', (12564, 12594), True, 'import numpy as np\n'), ((12604, 12635), 'numpy.where', 'np.where', (['(rh == 0.0)', '(0.0001)', 'rh'], {}), '(rh == 0.0, 0.0001, rh)\n', (12612, 12635), True, 'import numpy as np\n'), ((12978, 13009), 'numpy.where', 'np.where', (['(rh == 0.0)', '(0.0001)', 'rh'], {}), '(rh == 0.0, 0.0001, rh)\n', (12986, 13009), True, 'import numpy as np\n'), ((13275, 13328), 'numpy.exp', 'np.exp', (['(c2 * (temp - 273.15) / (c3 + (temp - 273.15)))'], {}), '(c2 * (temp - 273.15) / (c3 + (temp - 273.15)))\n', (13281, 13328), True, 'import numpy as np\n'), ((17017, 17048), 'Ngl.contour', 'ngl.contour', (['wks', 'dewpoint', 'res'], {}), '(wks, dewpoint, res)\n', (17028, 17048), True, 'import Ngl as ngl\n'), ((17054, 17084), 'Ngl.overlay', 'ngl.overlay', (['dp_plot', 'dp_plot2'], {}), '(dp_plot, dp_plot2)\n', (17065, 17084), True, 'import Ngl as ngl\n'), ((5053, 5067), 'numpy.sign', 'np.sign', (['lonbl'], {}), '(lonbl)\n', (5060, 5067), True, 'import numpy as np\n'), ((5070, 5084), 'numpy.sign', 'np.sign', (['lontr'], {}), '(lontr)\n', (5077, 5084), True, 'import numpy as np\n'), ((5097, 5111), 'numpy.sign', 'np.sign', (['lonbl'], {}), '(lonbl)\n', (5104, 5111), True, 'import numpy as np\n'), ((5114, 5128), 'numpy.sign', 'np.sign', (['lontr'], {}), '(lontr)\n', (5121, 5128), True, 'import numpy as np\n'), ((5249, 5273), 'numpy.abs', 'np.abs', (['(lon_temp - lonbl)'], {}), '(lon_temp - lonbl)\n', (5255, 5273), True, 'import numpy as np\n'), ((5298, 5322), 'numpy.abs', 'np.abs', (['(lon_temp - lontr)'], {}), '(lon_temp - lontr)\n', (5304, 5322), True, 'import numpy as np\n'), ((6073, 6097), 'numpy.abs', 'np.abs', (['(lon_temp - lonbl)'], {}), '(lon_temp - lonbl)\n', (6079, 6097), True, 'import numpy as np\n'), ((6122, 6146), 'numpy.abs', 'np.abs', (['(lon_temp - lontr)'], {}), '(lon_temp - lontr)\n', (6128, 6146), True, 'import numpy as np\n'), ((6644, 6658), 'numpy.sign', 'np.sign', (['lonbl'], {}), '(lonbl)\n', (6651, 6658), True, 'import numpy as np\n'), ((6661, 6675), 'numpy.sign', 'np.sign', (['lontr'], {}), '(lontr)\n', (6668, 6675), True, 'import numpy as np\n'), ((6688, 6702), 'numpy.sign', 'np.sign', (['lonbl'], {}), '(lonbl)\n', (6695, 6702), True, 'import numpy as np\n'), ((6705, 6719), 'numpy.sign', 'np.sign', (['lontr'], {}), '(lontr)\n', (6712, 6719), True, 'import numpy as np\n'), ((7861, 7876), 'numpy.log', 'np.log', (['(pd / c1)'], {}), '(pd / c1)\n', (7867, 7876), True, 'import numpy as np\n'), ((13379, 13394), 'numpy.log', 'np.log', (['(pd / c1)'], {}), '(pd / c1)\n', (13385, 13394), True, 'import numpy as np\n'), ((2355, 2462), 'os.popen', 'os.popen', (['("cat %s/controls/namelist | grep \'fore:\' | awk -F: \'{print $2}\' | tr \',\' \' \'"\n % GFS_dir)'], {}), '(\n "cat %s/controls/namelist | grep \'fore:\' | awk -F: \'{print $2}\' | tr \',\' \' \'"\n % GFS_dir)\n', (2363, 2462), False, 'import os\n'), ((11967, 11981), 'numpy.sign', 'np.sign', (['lonbl'], {}), '(lonbl)\n', (11974, 11981), True, 'import numpy as np\n'), ((11984, 11998), 'numpy.sign', 'np.sign', (['lontr'], {}), '(lontr)\n', (11991, 11998), True, 'import numpy as np\n'), ((12011, 12025), 'numpy.sign', 'np.sign', (['lonbl'], {}), '(lonbl)\n', (12018, 12025), True, 'import numpy as np\n'), ((12028, 12042), 'numpy.sign', 'np.sign', (['lontr'], {}), '(lontr)\n', (12035, 12042), True, 'import numpy as np\n'), ((13353, 13368), 'numpy.log', 'np.log', (['(pd / c1)'], {}), '(pd / c1)\n', (13359, 13368), True, 'import numpy as np\n')] |
import numpy as np
from gurobipy import *
# Author: <NAME>
# Date: 2020-04-01
def get_approx_planes(P0, B, D, p_min, p_max, Relaxed=False):
# Return the approximation plane
# with the constraint pt' B pt + b' pt + c = 0
#
# P0 should be feasible
# Gurobipy is imported via *
mod = Model()
eps_p = pow(10, -8)
# up to GN :)
n_gen = len(B)
N=n_gen
#Linear terms corresponds to the sum of the production
b=-np.ones(n_gen)
# Problem is non convex because of the quadratic equality constraint-> solve the convex relaxation
mod.setParam( 'OutputFlag', False )
mod.Params.timeLimit = 100
p = mod.addVars(range(n_gen), lb=p_min, ub=p_max, name='p')
if (Relaxed==False):
mod.Params.NonConvex = 2
mod.addConstr(0 ==
quicksum(
B[i, j]*p[i]*p[j] for i in range(n_gen) for j in range(n_gen))
+ quicksum( b[i]*p[i] for i in range(n_gen))
+ D, name='Demand')
else:
mod.addConstr(0 >=
quicksum(
B[i, j]*p[i]*p[j] for i in range(n_gen) for j in range(n_gen))
+ quicksum( b[i]*p[i] for i in range(n_gen))
+ D, name='Demand')
n = -B @ P0 - b*0.5
n_normed = n / np.linalg.norm(n)
mod.setObjective(quicksum(n_normed[g]*(p[g]-P0[g]) for g in range(n_gen)), GRB.MAXIMIZE)
mod.Params.mipgap = 0.00001
mod.update()
mod.optimize()
x = mod.getAttr('x', p)
x = np.array(list(x.values()))
scalar_product = mod.getAttr('ObjVal')
k_lower = -n @ P0
k_upper = k_lower - n.T @ (n_normed * scalar_product)
# Could be simplified since n.T @ n_normed = 1
assert abs(np.dot(n, x) + k_upper) < eps_p
return (n, k_lower, k_upper)
| [
"numpy.dot",
"numpy.ones",
"numpy.linalg.norm"
] | [((485, 499), 'numpy.ones', 'np.ones', (['n_gen'], {}), '(n_gen)\n', (492, 499), True, 'import numpy as np\n'), ((1308, 1325), 'numpy.linalg.norm', 'np.linalg.norm', (['n'], {}), '(n)\n', (1322, 1325), True, 'import numpy as np\n'), ((1754, 1766), 'numpy.dot', 'np.dot', (['n', 'x'], {}), '(n, x)\n', (1760, 1766), True, 'import numpy as np\n')] |
"""
eval auc curve
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import roc_auc_score,roc_curve,auc,average_precision_score
from net.utils.parser import load_config,parse_args
import net.utils.logging_tool as logging
from sklearn import metrics
import os
import scipy.io as scio
import math
logger=logging.get_logger(__name__)
def show_line_one_video(y_score):
x=np.arange(len(y_score))
plt.plot(x,y_score)
plt.show()
def show_score_ground_true(y_score,y_label,title_name,norm_mode,cfg):
plt.cla()
plt.title(title_name)
plt.ylim((0, 1))
x = np.arange(len(y_score))
plt.plot(x, y_score,"r-",label="pred_score")
plt.plot(x,y_label,"g-",label="ground_true")
plt.legend() # 添加图例
save_folder = os.path.join(
cfg.TEST.SAVE_NPY_PATH, "Temporal_plt",norm_mode
)
os.makedirs(save_folder,exist_ok=True)
# if not os.path.exists(save_folder):
# os.makedirs(save_folder)
plt.savefig(os.path.join(
save_folder, title_name + ".png"
))
# plt.show()
def roc_draw(y_pred_score,y_label):
"""
draw roc
:param y_pred:
:param y_score:
:return:
"""
fpr, tpr, thresholds =roc_curve(
y_label, y_pred_score, pos_label=None, sample_weight=None,drop_intermediate=True
)
# plt.title("roc curve")
# plt.plot(fpr, tpr, marker='o')
# plt.show()
def save_fpr_tpr(fpr, tpr,mat_name,roc_value):
"""
draw roc
:param y_pred:
:param y_score:
:return:
"""
fpr=np.expand_dims(fpr,axis=1)
tpr=np.expand_dims(tpr,axis=1)
mat_name=mat_name.split("/")[-1]
mat_new=r"F:\SPL_Save_Folder\SRF\UCF_Crime\roc_mat/"+mat_name+str(roc_value)[2:6]+".mat"
scio.savemat(mat_new, {'X': fpr, "Y": tpr, "description ": "UCF Crime ROC Cruve"+mat_name})
plt.title("roc curve")
plt.plot(fpr, tpr, )
plt.show()
def cal_auc(y_pred,y_label,cfg):
"""
calculate auc
:param y_pred:
:param y_label:
:return:
"""
assert len(y_pred)==len(y_label)
fpr, tpr, thresholds = metrics.roc_curve(y_label, y_pred)
# save fpr, tpr
# metrics.auc(fpr, tpr)
#plt x=fpr,y=tpr
# plt roc curve img
rec_auc = auc(fpr, tpr)
save_fpr_tpr(fpr,tpr,cfg.OUTPUT_DIR,rec_auc)
plt.title("UCF-Crime SRF ")
plt.plot(fpr,tpr)
plt.show()
# auc=roc_auc_score(y_label,y_pred)
return rec_auc
def UCF_GROUND_TRUE(anao_txt):
"""
load
F:/AnomalyDataset/Ucf_Crime_Split/annotation/Temporal_Anomaly_Annotation_Time.txt
:param ANAO_TXT:
:return:
"""
r_lines=[]
total_length=0
with open(anao_txt,"r") as f:
lines=f.readlines()
for line in lines:
line=line.strip()
total_length+=(int(line.split(" ")[-1])//16*16)
r_lines.append(line)
return r_lines
def ucf_label_pred_score(label_line,pred_array):
"""
pred array is custom score or feature nums score
:param label_line:
:param pred_array:
:return:
"""
#Abuse028_x264.mp4 Abuse 165 240 -1 -1 1412
video_name,abnormal_class, F_L,F_R,S_L,S_R,T_length =label_line.split(" ")
F_L=int(F_L)
F_R=int(F_R)
S_L=int(S_L)
S_R=int(S_R)
T_length=int(T_length)
pred_scores=[]
# make score to T_length each feature contain 16 non-overlap frames
feature_num=(T_length)//16
for item in pred_array:
_item=[item]*16
pred_scores+=_item
# ground ture
ground_ture=[0]*feature_num*16
if F_L!=-1 and F_R!=-1:
ground_ture[F_L:F_R+1]=[i+1 for i in ground_ture[F_L:F_R+1]]
if S_L!=-1 and S_R!=-1:
ground_ture[S_L:S_R + 1] = [i + 1 for i in ground_ture[S_L:S_R + 1]]
# # cut ground true drop the last 15 frames (at most )
# ground_ture=ground_ture[:featuer_num*16]
assert len(pred_scores)==len(ground_ture) ,"miss match in length of pred score and ground true "
# draw line to visual
#show_score_ground_true(pred_scores,ground_ture,video_name)
return pred_scores,ground_ture
def ucf_label_pred_score_unmerged(label_line,pred_array,cfg):
"""
pred array is custom score or feature nums score
slide windows to do pred
:param label_line:
:param pred_array:
:return:
"""
#Abuse028_x264.mp4 Abuse 165 240 -1 -1 1412
video_name,abnormal_class, F_L,F_R,S_L,S_R,T_length =label_line.split(" ")
F_L=int(F_L)
F_R=int(F_R)
S_L=int(S_L)
S_R=int(S_R)
T_length=int(T_length)
pred_scores=[]
# make score to T_length each feature contain 16 non-overlap frames
feature_num=(T_length)//16
assert int(feature_num)==len(pred_array) ,"miss match in feature num"
for item in pred_array:
_item=[item]*16
pred_scores+=_item
# ground ture
ground_ture=[0]*T_length
if F_L!=-1 and F_R!=-1:
ground_ture[F_L:F_R+1]=[i+1 for i in ground_ture[F_L:F_R+1]]
if S_L!=-1 and S_R!=-1:
ground_ture[S_L:S_R + 1] = [i + 1 for i in ground_ture[S_L:S_R + 1]]
# # cut ground true drop the last 15 frames (at most )
ground_ture=ground_ture[:feature_num*16]
# pred score take the last
# pred_scores+=[0]*int(T_length-feature_num*16)
assert len(pred_scores)==len(ground_ture) ,"miss match in length of pred score and ground true "
# draw line to visual
#show_score_ground_true(pred_scores,ground_ture,video_name.split(".")[0],"norm",cfg)
return pred_scores,ground_ture
def ucf_label_pred_score_merged(label_line,pred_array,cfg):
"""
pred array is 32 score or feature nums score
1 for abnormal and 0 for normal
:param label_line:
:param pred_array:
:return:
"""
#Abuse028_x264.mp4 Abuse 165 240 -1 -1 1412
video_name,abnormal_class, F_L,F_R,S_L,S_R,T_length =label_line.split(" ")
F_L=int(F_L)
F_R=int(F_R)
S_L=int(S_L)
S_R=int(S_R)
T_length=int(T_length)
# if math.isnan(min(pred_array)):
# raise RuntimeError(
# "ERROR : Got NAN losses {}".format(video_name)
# )
pred_scores=[0]*T_length
# ground ture
ground_ture=[0]*T_length
if F_L!=-1:
ground_ture[F_L:F_R+1]=[i+1 for i in ground_ture[F_L:F_R+1]]
if S_L!=-1:
ground_ture[S_L:S_R + 1] = [i + 1 for i in ground_ture[S_L:S_R + 1]]
segments_len = T_length // 32
for i in range(32):
segment_start_frame = int(i * segments_len)
segment_end_frame = int((i + 1) * segments_len)
pred_scores[segment_start_frame:segment_end_frame] = [pred_array[i]]*(segment_end_frame-segment_start_frame)
pred_scores[int(32 * segments_len):] = [pred_array[-1]] * (len(pred_scores[int(32 * segments_len):]))
assert len(pred_scores)==len(ground_ture) ,"miss match in length of pred score and ground true "
# draw line to visual
# show_score_ground_true(pred_scores,ground_ture,video_name)
return pred_scores,ground_ture
def get_label_and_score(ano_line,save_folder,cfg):
y_preds=[]
y_labels=[]
for line in ano_line:
video_name, abnormal_class, F_L, F_R, S_L, S_R, T_length = line.split(" ")
# load npy
pred_array=np.load(
os.path.join(
save_folder,video_name.split(".")[0]+".npy"
)
)
# merge or unmerged
# print("cfg.UCF_CRIME_FEATURE.TEST_MODE:{}".format(cfg.UCF_CRIME_FEATURE.TEST_MODE))
y_pred, y_label = ucf_label_pred_score_unmerged(line, pred_array, cfg)
# if cfg.UCF_CRIME_FEATURE.TEST_MODE in ["test_merged_l2norm"]:
# y_pred,y_label=ucf_label_pred_score_merged(line,pred_array,cfg)
# elif cfg.UCF_CRIME_FEATURE.TEST_MODE in ["test_unmerged_l2norm"]:
# y_pred, y_label = ucf_label_pred_score_unmerged(line, pred_array,cfg)
y_preds+=y_pred
y_labels+=y_label
# y_preds=(np.array(y_preds)/max(np.array(y_preds)))
return y_preds,y_labels
def eval_auc_roc(cfg):
"""
load y_pred_score len = list * cfg.TEST.VIDEO_NUM
load y_label
:param cfg:
:return:
"""
# logging.setup_logging(cfg.OUTPUT_DIR,cfg.AUC_LOGFILE_NAME)
# load ground true
ano_line=UCF_GROUND_TRUE(
r"E:\datasets\UCFCrime/Temporal_Anomaly_Annotation_Time.txt"
)
y_pred_score,y_label=get_label_and_score(
ano_line,os.path.join(cfg.TEST.SAVE_NPY_PATH,"PRED_TEST_SCORE"),cfg
)
auc_values=[]
assert len(y_pred_score)==len(y_label) ,"len{} and len{}not match".format("y_pred_score","y_label")
# show_score_ground_true(y_pred_score,y_label,"total")
auc_value = cal_auc(y_pred_score,y_label,cfg)
# ap_value=cal_AP(y_pred_score,y_label,cfg)
print("auc_value:",auc_value)
# print("ap_value:",ap_value)
# logger.info("test mode in :{}".format(cfg.UCF_CRIME_FEATURE.TEST_MODE))
# logger.info("total auc value:{}".format(auc_value))
def show_all_npy(save_score_npy_folder):
# npy root
npy_list=os.listdir(save_score_npy_folder)
for n in npy_list:
demo=np.load(
os.path.join(
save_score_npy_folder,n
)
)
print("video name",n)
print(max(demo)-min(demo))
if __name__=="__main__":
"""
load pred score
score close to 0 mean anomaly
load ground true
cal auc value
draw roc
"""
args=parse_args()
cfg=load_config(args)
eval_auc_roc(cfg)
| [
"os.listdir",
"scipy.io.savemat",
"os.makedirs",
"net.utils.parser.load_config",
"sklearn.metrics.auc",
"matplotlib.pyplot.plot",
"os.path.join",
"net.utils.logging_tool.get_logger",
"net.utils.parser.parse_args",
"sklearn.metrics.roc_curve",
"numpy.expand_dims",
"matplotlib.pyplot.title",
"... | [((330, 358), 'net.utils.logging_tool.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (348, 358), True, 'import net.utils.logging_tool as logging\n'), ((429, 449), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_score'], {}), '(x, y_score)\n', (437, 449), True, 'import matplotlib.pyplot as plt\n'), ((453, 463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (461, 463), True, 'import matplotlib.pyplot as plt\n'), ((540, 549), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (547, 549), True, 'import matplotlib.pyplot as plt\n'), ((554, 575), 'matplotlib.pyplot.title', 'plt.title', (['title_name'], {}), '(title_name)\n', (563, 575), True, 'import matplotlib.pyplot as plt\n'), ((580, 596), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (588, 596), True, 'import matplotlib.pyplot as plt\n'), ((633, 679), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_score', '"""r-"""'], {'label': '"""pred_score"""'}), "(x, y_score, 'r-', label='pred_score')\n", (641, 679), True, 'import matplotlib.pyplot as plt\n'), ((682, 729), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_label', '"""g-"""'], {'label': '"""ground_true"""'}), "(x, y_label, 'g-', label='ground_true')\n", (690, 729), True, 'import matplotlib.pyplot as plt\n'), ((731, 743), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (741, 743), True, 'import matplotlib.pyplot as plt\n'), ((770, 833), 'os.path.join', 'os.path.join', (['cfg.TEST.SAVE_NPY_PATH', '"""Temporal_plt"""', 'norm_mode'], {}), "(cfg.TEST.SAVE_NPY_PATH, 'Temporal_plt', norm_mode)\n", (782, 833), False, 'import os\n'), ((851, 890), 'os.makedirs', 'os.makedirs', (['save_folder'], {'exist_ok': '(True)'}), '(save_folder, exist_ok=True)\n', (862, 890), False, 'import os\n'), ((1208, 1304), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_label', 'y_pred_score'], {'pos_label': 'None', 'sample_weight': 'None', 'drop_intermediate': '(True)'}), '(y_label, y_pred_score, pos_label=None, sample_weight=None,\n drop_intermediate=True)\n', (1217, 1304), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc, average_precision_score\n'), ((1535, 1562), 'numpy.expand_dims', 'np.expand_dims', (['fpr'], {'axis': '(1)'}), '(fpr, axis=1)\n', (1549, 1562), True, 'import numpy as np\n'), ((1570, 1597), 'numpy.expand_dims', 'np.expand_dims', (['tpr'], {'axis': '(1)'}), '(tpr, axis=1)\n', (1584, 1597), True, 'import numpy as np\n'), ((1733, 1831), 'scipy.io.savemat', 'scio.savemat', (['mat_new', "{'X': fpr, 'Y': tpr, 'description ': 'UCF Crime ROC Cruve' + mat_name}"], {}), "(mat_new, {'X': fpr, 'Y': tpr, 'description ': \n 'UCF Crime ROC Cruve' + mat_name})\n", (1745, 1831), True, 'import scipy.io as scio\n'), ((1831, 1853), 'matplotlib.pyplot.title', 'plt.title', (['"""roc curve"""'], {}), "('roc curve')\n", (1840, 1853), True, 'import matplotlib.pyplot as plt\n'), ((1858, 1876), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1866, 1876), True, 'import matplotlib.pyplot as plt\n'), ((1883, 1893), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1891, 1893), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2114), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_label', 'y_pred'], {}), '(y_label, y_pred)\n', (2097, 2114), False, 'from sklearn import metrics\n'), ((2230, 2243), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2233, 2243), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc, average_precision_score\n'), ((2298, 2326), 'matplotlib.pyplot.title', 'plt.title', (['"""UCF-Crime SRF """'], {}), "('UCF-Crime SRF ')\n", (2307, 2326), True, 'import matplotlib.pyplot as plt\n'), ((2331, 2349), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2339, 2349), True, 'import matplotlib.pyplot as plt\n'), ((2353, 2363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2361, 2363), True, 'import matplotlib.pyplot as plt\n'), ((8990, 9023), 'os.listdir', 'os.listdir', (['save_score_npy_folder'], {}), '(save_score_npy_folder)\n', (9000, 9023), False, 'import os\n'), ((9392, 9404), 'net.utils.parser.parse_args', 'parse_args', ([], {}), '()\n', (9402, 9404), False, 'from net.utils.parser import load_config, parse_args\n'), ((9413, 9430), 'net.utils.parser.load_config', 'load_config', (['args'], {}), '(args)\n', (9424, 9430), False, 'from net.utils.parser import load_config, parse_args\n'), ((984, 1030), 'os.path.join', 'os.path.join', (['save_folder', "(title_name + '.png')"], {}), "(save_folder, title_name + '.png')\n", (996, 1030), False, 'import os\n'), ((8369, 8424), 'os.path.join', 'os.path.join', (['cfg.TEST.SAVE_NPY_PATH', '"""PRED_TEST_SCORE"""'], {}), "(cfg.TEST.SAVE_NPY_PATH, 'PRED_TEST_SCORE')\n", (8381, 8424), False, 'import os\n'), ((9082, 9120), 'os.path.join', 'os.path.join', (['save_score_npy_folder', 'n'], {}), '(save_score_npy_folder, n)\n', (9094, 9120), False, 'import os\n')] |
from pathlib import Path
import numpy as np
import skimage.io
from matplotlib import pyplot as plt
from segmentation_models import get_preprocessing
from tools.model import configs
from tools.model import input_preprocessing
from utils import image as image_utils
BACKBONE_INPUT_PREPROCESSING = get_preprocessing(configs.BACKBONE)
INPUT_PREPROCESSOR = input_preprocessing.InputPreprocessor(
configs.INPUT_SIZE, configs.INPUT_CHANNELS, configs.CLASSES_NUMBER,
configs.AUGMENTATIONS, BACKBONE_INPUT_PREPROCESSING)
def show_augmented_image(path: Path):
image = skimage.io.imread(str(path), as_gray=True)
image = INPUT_PREPROCESSOR.preprocess_image(image)[0].astype(np.float32)
print('Image:', image.dtype, image.min(), image.max(), image.shape)
n = 64
image_array = np.zeros((n, *INPUT_PREPROCESSOR.image_input_size, INPUT_PREPROCESSOR.image_input_channels))
for i in range(n):
aug_image, mask = INPUT_PREPROCESSOR.augmentate_image_mask(image, None)
# Normalize once again image to [0, 1] after augmentation
aug_image = image_utils.normalized_image(aug_image)
aug_image = aug_image * 255
aug_image = np.stack((aug_image,) * INPUT_PREPROCESSOR.image_input_channels, axis=-1)
image_array[i] = aug_image
# skimage.io.imshow(aug_image.astype(np.uint8))
# skimage.io.show()
plot_n_images(image_array, n)
skimage.io.show()
def plot_n_images(image_array, n):
""" plot first n images
n has to be a square number
"""
first_n_images = image_array[:n, :]
grid_size = int(np.sqrt(n))
fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size, sharex=True, sharey=True, figsize=(8, 8))
for r in range(grid_size):
for c in range(grid_size):
ax_array[r, c].imshow(first_n_images[grid_size * r + c].astype(np.uint8))
plt.xticks(np.array([]))
plt.yticks(np.array([]))
if __name__ == '__main__':
show_augmented_image(Path(r'D:\Temp\model_rotate_test\9.png'))
| [
"numpy.sqrt",
"pathlib.Path",
"utils.image.normalized_image",
"tools.model.input_preprocessing.InputPreprocessor",
"segmentation_models.get_preprocessing",
"numpy.zeros",
"numpy.stack",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((299, 334), 'segmentation_models.get_preprocessing', 'get_preprocessing', (['configs.BACKBONE'], {}), '(configs.BACKBONE)\n', (316, 334), False, 'from segmentation_models import get_preprocessing\n'), ((357, 524), 'tools.model.input_preprocessing.InputPreprocessor', 'input_preprocessing.InputPreprocessor', (['configs.INPUT_SIZE', 'configs.INPUT_CHANNELS', 'configs.CLASSES_NUMBER', 'configs.AUGMENTATIONS', 'BACKBONE_INPUT_PREPROCESSING'], {}), '(configs.INPUT_SIZE, configs.\n INPUT_CHANNELS, configs.CLASSES_NUMBER, configs.AUGMENTATIONS,\n BACKBONE_INPUT_PREPROCESSING)\n', (394, 524), False, 'from tools.model import input_preprocessing\n'), ((799, 896), 'numpy.zeros', 'np.zeros', (['(n, *INPUT_PREPROCESSOR.image_input_size, INPUT_PREPROCESSOR.\n image_input_channels)'], {}), '((n, *INPUT_PREPROCESSOR.image_input_size, INPUT_PREPROCESSOR.\n image_input_channels))\n', (807, 896), True, 'import numpy as np\n'), ((1629, 1721), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'grid_size', 'ncols': 'grid_size', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 8)'}), '(nrows=grid_size, ncols=grid_size, sharex=True, sharey=True,\n figsize=(8, 8))\n', (1641, 1721), True, 'from matplotlib import pyplot as plt\n'), ((1082, 1121), 'utils.image.normalized_image', 'image_utils.normalized_image', (['aug_image'], {}), '(aug_image)\n', (1110, 1121), True, 'from utils import image as image_utils\n'), ((1179, 1252), 'numpy.stack', 'np.stack', (['((aug_image,) * INPUT_PREPROCESSOR.image_input_channels)'], {'axis': '(-1)'}), '((aug_image,) * INPUT_PREPROCESSOR.image_input_channels, axis=-1)\n', (1187, 1252), True, 'import numpy as np\n'), ((1597, 1607), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1604, 1607), True, 'import numpy as np\n'), ((1998, 2040), 'pathlib.Path', 'Path', (['"""D:\\\\Temp\\\\model_rotate_test\\\\9.png"""'], {}), "('D:\\\\Temp\\\\model_rotate_test\\\\9.png')\n", (2002, 2040), False, 'from pathlib import Path\n'), ((1893, 1905), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1901, 1905), True, 'import numpy as np\n'), ((1930, 1942), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1938, 1942), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Author : tyty
# Date : 2018-6-21
from __future__ import division
import numpy as np
import pandas as pd
import tools as tl
class AritificialNeuralNetworks(object):
def __init__(self, layers, learningRate, trainX, trainY, testX, testY, epoch):
# input params
self.layers = layers
self.lr = learningRate
self.epoch = epoch
# cal the mean and std
self.mean = [np.mean(i) for i in trainX.T]
self.stdVar = [np.std(i) for i in trainX.T]
# for epoch show
self.trainXPrediction = trainX
self.trainYPrediction = trainY
self.testXPrediction = testX
self.testYPrediction = testY
#process the trainX data and trainY data
self.trainX = self.Normalization(trainX)
self.trainY = self.oneHotDataProcessing(trainY)
# self.trainY = trainY
self.weights = [np.random.uniform(-0.5, 0.5, [y, x]) for x, y in zip(layers[:-1], layers[1:])]
self.biases = [np.zeros([y, 1]) for y in layers[1:]]
# self.biases = [np.random.uniform(-1, 1, [y, 1]) for y in layers[1:]]
# print self.weights[0]
self.cntLayer = len(self.layers) - 1
self.error = None
def fitTransform(self):
for i in range(self.epoch):
for trainX, trainY in zip(self.trainX, self.trainY):
# 2 step to train the network
# 1.forwardUpdate the network params
netLayerInput, netLayerOuput = self.forwardUpdate(trainX)
# 2. backForwardUpdata the network params
self.backForwardUpdate(netLayerInput, netLayerOuput, trainY)
# * print every epoch TrainSet OR TestSet Accuracy *
print ("Epoch {0} : testSet accuracy: {1} / {2} | trainSet accuracy: {3} / {4}".\
format(i, self.prediction(testX=self.testXPrediction, testY=self.testYPrediction)[1], len(self.testXPrediction),\
self.prediction(testX=self.trainXPrediction, testY=self.trainYPrediction)[1], len(self.trainXPrediction)))
def forwardUpdate(self, trainX):
tmpTrain = trainX
layerOutput = []
layerInput = []
# forward update
for layer in range(self.cntLayer):
layerInput.append(tmpTrain)
# cal the train value
tmpTrain = np.dot(tmpTrain, self.weights[layer].T) + self.biases[layer].T
# activate the train value - sigmoid
tmpTrain = [self.sigmoid(i) for i in tmpTrain]
# tmpTrain = [self.ReLU(i) for i in tmpTrain]
# tmpTrain = [self.tanh(i) for i in tmpTrain]
layerOutput.append(tmpTrain)
return layerInput, layerOutput
def backForwardUpdate(self, netLayerInput, netLayerOutput, trainY):
trainY = np.array(trainY)
#reverse the order to cal the gradient
for layerIndex, netInput, netOutput in zip(range(self.cntLayer)[ : : -1],\
netLayerInput[ : : -1], netLayerOutput[ : : -1]):
netIn = np.array(netInput)
netOut = np.array(netOutput)
# cal the error of y - last layer
if layerIndex == (self.cntLayer - 1):
self.error = self.sigmoidPrime(netOut) * self.costFunction(realY=trainY,\
outputY=netOut)
# self.error = self.ReLUPrime(netOut) * self.costFunction(realY=trainY,\
# outputY=netOut)
# self.error = self.tanhPrime(netOut) * self.costFunction(realY=trainY,\
# outputY=netOut)
else:
# update the error of hidden layer
# "layerIndex + 1" index the behind layer
self.error = np.dot(self.error, self.weights[layerIndex + 1])
self.error = self.sigmoidPrime(netOut) * self.error
# self.error = self.ReLUPrime(netOut) * self.error
# self.error = self.tanhPrime(netOut) * self.error
# !! extract the No.2 Axis of error -- Error of each layer !!
self.error = self.error[0]
#update the weights and biases
for n in range(len(self.weights[layerIndex])):
self.weights[layerIndex][n] = self.weights[layerIndex][n] + netIn * self.lr * self.error[n]
self.biases[layerIndex] = (self.biases[layerIndex].T + self.lr * self.error).T
#-----------------------------------------------
#----- Activation function and its prime format
def sigmoid(self, inputX):
return [1 / (1 + np.math.exp(-i)) for i in inputX]
def sigmoidPrime(self, outputY):
return outputY * (1 - outputY)
def tanh(self, inputX):
return np.tanh(inputX)
def tanhPrime(self, outputY):
return 1.0 - outputY ** 2
def ReLU(self, inputX):
inputXReLU = inputX
inputXReLU[inputXReLU < 0] = 0
return inputXReLU
def ReLUPrime(self, outputY):
outputYReLU = outputY
outputYReLU[outputYReLU >= 0] = 1
outputYReLU[outputYReLU < 0] = 0
return outputYReLU
#---End of the activation function
#-----------------------------------------------
# cost function / loss function
def costFunction(self, realY, outputY):
return (realY - outputY)
# input params : trainSet X
# output params : (X - mean(X)) / std(X)
def Normalization(self, trainX):
# reverse the trainX [40,4]->[4->40]
# print data.shape
data = trainX.T
for i in range(len(data)):
data[i] = (data[i] - self.mean[i]) / self.stdVar[i]
return data.T
# input params : trainSet Y
# output params : ont hot Y e.g.[3] = [0, 0, 1, 0], [4] = [0, 0, 0, 1]
def oneHotDataProcessing(self, trainY):
res = []
# lenght of onehot data is self.layers[-1]
# print self.layers[-1]
for i in trainY:
# initial the temp list -> 0
temp = [0] * self.layers[-1]
# cal the idx of '1'
idx = int(i - 1)
# mark the '1'
temp[idx] = 1
res.append(temp)
# print res
return res
def prediction(self, testX, testY):
res = 0
result = []
testX = np.array(testX).T
# print (self.mean)
# cal the test data and stdValue
mean = [np.mean(i) for i in testX]
stdVar = [np.std(i) for i in testX]
for i in range(len(testX)):
# use trainSet mean std or testSet mean std
# testX[i] = (testX[i]- self.mean[i]) / self.stdVar[i]
testX[i] = (testX[i] - mean[i]) / stdVar[i]
testX = testX.T
for tX in testX:
tmp = tX
for layer in range(self.cntLayer):
tmp = np.dot(tmp, self.weights[layer].T) + self.biases[layer].T
tmp = [self.sigmoid(i) for i in tmp]
# tmp = [self.ReLU(i) for i in tmp]
# tmp = [self.tanh(i) for i in tmp]
result.append(np.argmax(tmp) + 1)
for realY, predY in zip(testY, result):
if realY == predY:
res += 1
accuracy = res / len(testY)
return accuracy, res
def AritificialNeuralNetworksModelMain():
train, trainy, test, testy = tl.createDataSet()
# layers, learningRate, trainX, trainY, testX, testY, epoch
ANNModel = AritificialNeuralNetworks(layers=[4, 150, 4], learningRate=0.1, trainX=train,\
trainY=trainy, testX=test, testY=testy, epoch = 600)
# clock time
import time
start = time.clock()
# fit the model with training data
ANNModel.fitTransform()
end = time.clock()
print ("Training time : " + str(end - start) + "s")
# cal the accuracy
accuracy = ANNModel.prediction(testX=test, testY=testy)[0]
print ("The accuracy of the test dataSet : " + str(accuracy))
if __name__ == '__main__':
AritificialNeuralNetworksModelMain()
| [
"numpy.mean",
"time.clock",
"numpy.tanh",
"numpy.argmax",
"tools.createDataSet",
"numpy.array",
"numpy.random.uniform",
"numpy.zeros",
"numpy.dot",
"numpy.std",
"numpy.math.exp"
] | [((7558, 7576), 'tools.createDataSet', 'tl.createDataSet', ([], {}), '()\n', (7574, 7576), True, 'import tools as tl\n'), ((7875, 7887), 'time.clock', 'time.clock', ([], {}), '()\n', (7885, 7887), False, 'import time\n'), ((7967, 7979), 'time.clock', 'time.clock', ([], {}), '()\n', (7977, 7979), False, 'import time\n'), ((2857, 2873), 'numpy.array', 'np.array', (['trainY'], {}), '(trainY)\n', (2865, 2873), True, 'import numpy as np\n'), ((4960, 4975), 'numpy.tanh', 'np.tanh', (['inputX'], {}), '(inputX)\n', (4967, 4975), True, 'import numpy as np\n'), ((455, 465), 'numpy.mean', 'np.mean', (['i'], {}), '(i)\n', (462, 465), True, 'import numpy as np\n'), ((510, 519), 'numpy.std', 'np.std', (['i'], {}), '(i)\n', (516, 519), True, 'import numpy as np\n'), ((933, 969), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '[y, x]'], {}), '(-0.5, 0.5, [y, x])\n', (950, 969), True, 'import numpy as np\n'), ((1037, 1053), 'numpy.zeros', 'np.zeros', (['[y, 1]'], {}), '([y, 1])\n', (1045, 1053), True, 'import numpy as np\n'), ((3112, 3130), 'numpy.array', 'np.array', (['netInput'], {}), '(netInput)\n', (3120, 3130), True, 'import numpy as np\n'), ((3152, 3171), 'numpy.array', 'np.array', (['netOutput'], {}), '(netOutput)\n', (3160, 3171), True, 'import numpy as np\n'), ((6520, 6535), 'numpy.array', 'np.array', (['testX'], {}), '(testX)\n', (6528, 6535), True, 'import numpy as np\n'), ((6625, 6635), 'numpy.mean', 'np.mean', (['i'], {}), '(i)\n', (6632, 6635), True, 'import numpy as np\n'), ((6670, 6679), 'numpy.std', 'np.std', (['i'], {}), '(i)\n', (6676, 6679), True, 'import numpy as np\n'), ((2400, 2439), 'numpy.dot', 'np.dot', (['tmpTrain', 'self.weights[layer].T'], {}), '(tmpTrain, self.weights[layer].T)\n', (2406, 2439), True, 'import numpy as np\n'), ((3973, 4021), 'numpy.dot', 'np.dot', (['self.error', 'self.weights[layerIndex + 1]'], {}), '(self.error, self.weights[layerIndex + 1])\n', (3979, 4021), True, 'import numpy as np\n'), ((4805, 4820), 'numpy.math.exp', 'np.math.exp', (['(-i)'], {}), '(-i)\n', (4816, 4820), True, 'import numpy as np\n'), ((7051, 7085), 'numpy.dot', 'np.dot', (['tmp', 'self.weights[layer].T'], {}), '(tmp, self.weights[layer].T)\n', (7057, 7085), True, 'import numpy as np\n'), ((7292, 7306), 'numpy.argmax', 'np.argmax', (['tmp'], {}), '(tmp)\n', (7301, 7306), True, 'import numpy as np\n')] |
import pytest
import numpy as np
import mymath.bindings
def test_dot():
v1 = [1., 2, 3, -5.5, 42]
v2 = [-3.2, 0, 13, 6, -3.14]
result = mymath.bindings.dot(vector1=v1, vector2=v2)
assert pytest.approx(result) == np.dot(v1, v2)
def test_normalize():
v = [1., 2, 3, -5.5, 42]
result = mymath.bindings.normalize(input=v)
assert pytest.approx(result) == np.array(v) / np.linalg.norm(v)
def test_dot_numpy():
v1 = np.array([1., 2, 3, -5.5, 42])
v2 = np.array([-3.2, 0, 13, 6, -3.14])
result = mymath.bindings.dot_numpy(in_1=v1, in_2=v2)
assert pytest.approx(result) == np.dot(v1, v2)
def test_normalize_numpy():
v = np.array([1., 2, 3, -5.5, 42])
result = mymath.bindings.normalize_numpy(in_1=v)
assert isinstance(result, np.ndarray)
assert pytest.approx(result) == np.array(v) / np.linalg.norm(v)
def test_assertion():
v1 = np.array([1., 2, 3, -5.5])
v2 = np.array([42.])
with pytest.raises(RuntimeError):
_ = mymath.bindings.dot_numpy(in_1=v1, in_2=v2)
| [
"pytest.approx",
"numpy.array",
"numpy.dot",
"pytest.raises",
"numpy.linalg.norm"
] | [((452, 483), 'numpy.array', 'np.array', (['[1.0, 2, 3, -5.5, 42]'], {}), '([1.0, 2, 3, -5.5, 42])\n', (460, 483), True, 'import numpy as np\n'), ((492, 525), 'numpy.array', 'np.array', (['[-3.2, 0, 13, 6, -3.14]'], {}), '([-3.2, 0, 13, 6, -3.14])\n', (500, 525), True, 'import numpy as np\n'), ((674, 705), 'numpy.array', 'np.array', (['[1.0, 2, 3, -5.5, 42]'], {}), '([1.0, 2, 3, -5.5, 42])\n', (682, 705), True, 'import numpy as np\n'), ((904, 931), 'numpy.array', 'np.array', (['[1.0, 2, 3, -5.5]'], {}), '([1.0, 2, 3, -5.5])\n', (912, 931), True, 'import numpy as np\n'), ((940, 956), 'numpy.array', 'np.array', (['[42.0]'], {}), '([42.0])\n', (948, 956), True, 'import numpy as np\n'), ((207, 228), 'pytest.approx', 'pytest.approx', (['result'], {}), '(result)\n', (220, 228), False, 'import pytest\n'), ((232, 246), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (238, 246), True, 'import numpy as np\n'), ((361, 382), 'pytest.approx', 'pytest.approx', (['result'], {}), '(result)\n', (374, 382), False, 'import pytest\n'), ((595, 616), 'pytest.approx', 'pytest.approx', (['result'], {}), '(result)\n', (608, 616), False, 'import pytest\n'), ((620, 634), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (626, 634), True, 'import numpy as np\n'), ((813, 834), 'pytest.approx', 'pytest.approx', (['result'], {}), '(result)\n', (826, 834), False, 'import pytest\n'), ((966, 993), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (979, 993), False, 'import pytest\n'), ((386, 397), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (394, 397), True, 'import numpy as np\n'), ((400, 417), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (414, 417), True, 'import numpy as np\n'), ((838, 849), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (846, 849), True, 'import numpy as np\n'), ((852, 869), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (866, 869), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.