code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
import numpy as np import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.ml.feature import IDF, Tokenizer, CountVectorizer def isin(element, test_elements, assume_unique=False, invert=False): """ Impliments the numpy function isin() for legacy versions of the library """ element = np.asarray(element) return np.in1d(element, test_elements, assume_unique=assume_unique, invert=invert).reshape(element.shape) udf_template = """ def bm25(query, tf, idf): mean_dl = {} k = {} b = {} idf_values, tf_indices, tf_values, query_indices = idf.values, tf.indices, tf.values, query.indices freq_nidx = tf_indices[-1]+1 freq_indices = np.concatenate((tf_indices, np.array([freq_nidx]))) term_frequencies = np.concatenate((tf_values, np.array([0.0]))) #get idf vector idf_ = idf_values #get term frequencies intersect = np.intersect1d(query_indices, freq_indices) idx = np.where(isin(query_indices, intersect), query_indices, freq_nidx) freq_idx = np.searchsorted(freq_indices, idx) tf_ = term_frequencies[freq_idx].reshape(-1) #get doc length dl_ = tf_values.sum() #get scores used to compute bm25 ntf_ = tf_ / dl_ ntf_score = ntf_.sum() tf_score = tf_.sum() tfidf_score = np.dot(ntf_, idf_) #get bm25 n_term = k * (1 - b + b * dl_/mean_dl) bm25 = np.dot(idf_, (tf_ * (k + 1)) / ((tf_) + n_term)) #return all scores return T.Row('tf', 'ntf', 'tfidf', 'bm25')(float(tf_score), float(ntf_score), float(tfidf_score), float(bm25)) schema = T.StructType([ T.StructField("tf", T.FloatType(), False), T.StructField("ntf", T.FloatType(), False), T.StructField("tfidf", T.FloatType(), False), T.StructField("bm25", T.FloatType(), False)]) self.udf = F.udf(bm25, returnType=schema) """ class BM25Model(object): """ Computes BM25 score. """ def __init__(self, k=1.2, b=.75): self.k = k self.b = b self.tok = Tokenizer(inputCol='__input', outputCol='__tokens') self.vec = CountVectorizer(inputCol='__tokens', outputCol='__counts') self.idf = IDF(inputCol='__counts', outputCol='__idf') self.train_col = None self.udf = None self.is_fit = False def fit(self, df, train_col): """ Does fitting on input df. df: a pyspark dataframe. train_col (string): The name of the column containing training documents. Returns: self, a """ self.train_col = train_col df_ = self.tok.transform(df.withColumnRenamed(train_col, '__input')) mean_dl = df_.select(F.mean(F.size(F.col('__tokens')))).collect()[0][0] self.vec = self.vec.fit(df_) df_ = self.vec.transform(df_) self.idf = self.idf.fit(df_) #this will reset value of self.udf to be a working udf function. exec(udf_template.format(mean_dl, self.k, self.b)) self.is_fit = True return self def transform(self, df, score_col, bm25_output_name='bm25', tf_output_name=None, ntf_output_name=None, tfidf_output_name=None): """ Computes BM25 score, along with normalized term frequency (ntf) and tfidf. These three additional scores come "for free" with bm25 but are only returned optionally. """ if not self.is_fit: raise Exception("You must fit the BM25 model with a call to .fit() first.") columns = df.columns df_ = self.tok.transform(df.withColumnRenamed(score_col, '__input')) df_ = self.vec.transform(df_) df_ = self.idf.transform(df_) df_ = (df_.withColumnRenamed('__counts', '__query_counts') .withColumnRenamed('__input', score_col) ).select(columns + [score_col, '__query_counts', '__idf']) df_ = self.tok.transform(df_.withColumnRenamed(self.train_col, '__input')) df_ = self.vec.transform(df_) df_ = df_.withColumnRenamed('__counts', '__item_counts') df_ = df_.withColumn('bm25', self.udf(F.col('__query_counts'), F.col('__item_counts'), F.col('__idf'))) df_ = df_.withColumnRenamed('__input', self.train_col) computed_values = df_.withColumn('more', F.explode(F.array(F.col('bm25')))).select(columns + ['bm25.*']) #this is logic for naming output column(s) final_selection = columns if bm25_output_name is not None: computed_values = computed_values.withColumnRenamed('bm25', bm25_output_name) final_selection.append(bm25_output_name) if tf_output_name is not None: computed_values = computed_values.withColumnRenamed('tf', tf_output_name) final_selection.append(tf_output_name) if ntf_output_name is not None: computed_values = computed_values.withColumnRenamed('ntf', ntf_output_name) final_selection.append(ntf_output_name) if tfidf_output_name is not None: computed_values = computed_values.withColumnRenamed('tfidf', tfidf_output_name) final_selection.append(tfidf_output_name) return computed_values.select(final_selection)
[ "pyspark.ml.feature.IDF", "pyspark.ml.feature.Tokenizer", "numpy.in1d", "numpy.asarray", "pyspark.ml.feature.CountVectorizer", "pyspark.sql.functions.col" ]
[((327, 346), 'numpy.asarray', 'np.asarray', (['element'], {}), '(element)\n', (337, 346), True, 'import numpy as np\n'), ((2063, 2114), 'pyspark.ml.feature.Tokenizer', 'Tokenizer', ([], {'inputCol': '"""__input"""', 'outputCol': '"""__tokens"""'}), "(inputCol='__input', outputCol='__tokens')\n", (2072, 2114), False, 'from pyspark.ml.feature import IDF, Tokenizer, CountVectorizer\n'), ((2134, 2192), 'pyspark.ml.feature.CountVectorizer', 'CountVectorizer', ([], {'inputCol': '"""__tokens"""', 'outputCol': '"""__counts"""'}), "(inputCol='__tokens', outputCol='__counts')\n", (2149, 2192), False, 'from pyspark.ml.feature import IDF, Tokenizer, CountVectorizer\n'), ((2212, 2255), 'pyspark.ml.feature.IDF', 'IDF', ([], {'inputCol': '"""__counts"""', 'outputCol': '"""__idf"""'}), "(inputCol='__counts', outputCol='__idf')\n", (2215, 2255), False, 'from pyspark.ml.feature import IDF, Tokenizer, CountVectorizer\n'), ((358, 433), 'numpy.in1d', 'np.in1d', (['element', 'test_elements'], {'assume_unique': 'assume_unique', 'invert': 'invert'}), '(element, test_elements, assume_unique=assume_unique, invert=invert)\n', (365, 433), True, 'import numpy as np\n'), ((4185, 4208), 'pyspark.sql.functions.col', 'F.col', (['"""__query_counts"""'], {}), "('__query_counts')\n", (4190, 4208), True, 'import pyspark.sql.functions as F\n'), ((4210, 4232), 'pyspark.sql.functions.col', 'F.col', (['"""__item_counts"""'], {}), "('__item_counts')\n", (4215, 4232), True, 'import pyspark.sql.functions as F\n'), ((4234, 4248), 'pyspark.sql.functions.col', 'F.col', (['"""__idf"""'], {}), "('__idf')\n", (4239, 4248), True, 'import pyspark.sql.functions as F\n'), ((4381, 4394), 'pyspark.sql.functions.col', 'F.col', (['"""bm25"""'], {}), "('bm25')\n", (4386, 4394), True, 'import pyspark.sql.functions as F\n'), ((2756, 2773), 'pyspark.sql.functions.col', 'F.col', (['"""__tokens"""'], {}), "('__tokens')\n", (2761, 2773), True, 'import pyspark.sql.functions as F\n')]
import math, pickle from copy import deepcopy from itertools import count from functools import reduce import numpy as np # from qiskit import QuantumCircuit, QuantumRegister, execute # # from qiskit.extensions import Initialize # from qiskit.circuit.library import Diagonal, GroverOperator #from tqdm.notebook import tqdm from tqdm import tqdm from circuit_builder import CircuitBuilder from utils import prob_to_angles from stats import EpisodeStats class QRQLAgent: def __init__(self, i, alpha, gamma, R, exploration='6x6', mode='classical'): self.i = i # self.backend = backend self.alpha, self.gamma, self.R, self.exploration, self.mode = alpha, gamma, R, exploration, mode self.memory = dict() def save(self, model): with open(model + '.pkl', 'wb') as f: pickle.dump(self.memory, f, pickle.HIGHEST_PROTOCOL) f.close() def load(self, model): with open(model + '.pkl', 'rb') as f: self.memory = pickle.load(f) f.close() def train(self, env, num_episodes): stats = EpisodeStats( episode_results=np.empty(num_episodes, dtype=str), episode_steps=np.empty(num_episodes, dtype=int), episode_rewards=np.empty(num_episodes, dtype=float), explored_states=np.empty(num_episodes, dtype=int) ) for i_episode in tqdm(range(num_episodes), position=self.i, desc=f'QRQL-{self.i}', disable=None): env.reset(i_episode + 1) total_rewards = 0.0 state = env.state() actions = env.actions() if self.exploration == '6x6': tau = 0.2 + (20 - 0.2) / (1 + math.e**(0.5 * (i_episode / 1000))) elif self.exploration == '8x8': tau = 0.2 + (20 - 0.2) / (1 + math.e**(0.35 * (i_episode / 1000))) elif self.exploration == '6x6_changing': if i_episode < 20000: tau = 0.2 + (20 - 0.2) / (1 + math.e**(0.5*(i_episode / 1000))) else: tau = 0.2 + (20 - 0.2) / (1 + math.e**(0.5*((i_episode - 20000) / 1000))) elif self.exploration == '8x8_changing': if i_episode < 25000: tau = 0.2 + (20 - 0.2) / (1 + math.e**(0.35*(i_episode / 1000))) else: tau = 0.2 + (20 - 0.2) / (1 + math.e**(0.35*((i_episode - 12500) / 1000))) for t in count(): if state not in self.memory: self.memory[state] = np.zeros(len(actions)), np.arange(len(actions)) q_values, flags = self.memory[state] # Exploration policy using softmax shift = q_values - np.max(q_values) prob = np.exp((1 / tau) * shift) / np.sum(np.exp((1 / tau) * shift)) # Quantum deliberation deliberation = self.classical_deliberation if self.mode == 'classical' else self.quantum_deliberation action = 0 if prob.size == 1 else deliberation(prob, flags) next_state, next_actions, reward = env.step(actions[action]) reward = reward[0] + self.gamma * reward[2] - reward[1] # Update estimate if next_state in self.memory: max_next_q = np.max(self.memory[next_state][0]) else: max_next_q = 0.0 td_target = reward + self.gamma * max_next_q td_error = td_target - q_values[action] q_values[action] += self.alpha * td_error if q_values.size > 1: if q_values[action] < 0.0: flags = np.delete(flags, np.where(flags == action)) else: flags = np.append(flags, action) if action not in flags else flags if flags.size == 0: f = np.arange(q_values.size) flags = np.delete(f, np.where(f == action)) self.memory[state] = q_values, flags total_rewards += reward if env.is_over: stats.episode_results[i_episode] = env.winner stats.episode_steps[i_episode] = t + 1 stats.episode_rewards[i_episode] = total_rewards stats.explored_states[i_episode] = len(self.memory.keys()) break state, actions = next_state, next_actions return stats def classical_deliberation(self, prob, flags): if self.R == 0: return np.random.choice(prob.size, p=prob) action = None for i_reflection in count(): action = np.random.choice(prob.size, p=prob) if action in flags or i_reflection + 1 >= self.R: break return action def quantum_deliberation(self, prob, flags): if self.R == 0: return np.random.choice(prob.size, p=prob) epsilon = reduce(lambda e, i: e + prob[i], flags, 0.0) epsilon = 1.0 if epsilon >= 1.0 else epsilon theta = math.asin(math.sqrt(epsilon)) k = math.ceil(1 / math.sqrt(epsilon)) action = None for i_reflection in count(): m = np.random.randint(0, k + 1) final_prob = np.array([ (math.sqrt(p) / math.sqrt(epsilon)) * math.sin((2 * m + 1) * theta) if i in flags else (math.sqrt(p) / math.sqrt(1 - epsilon)) * math.cos((2 * m + 1) * theta) for i,p in enumerate(prob) ])**2 # Fix sum is not 1.0 final_prob /= np.sum(final_prob) action = np.random.choice(final_prob.size, p=final_prob) if action in flags or i_reflection + 1 >= self.R: break return action # def quantum_deliberation(self, prob, flags): # num_qubits = 1 if prob.size == 1 else math.ceil(math.log2(prob.size)) # if prob.size != 2**num_qubits: # prob = np.append(prob, [0] * (2**num_qubits - prob.size)) # epsilon = reduce(lambda e, i: e + prob[i], flags, 0.0) # epsilon = 1.0 if epsilon >= 1.0 else epsilon # k = math.ceil(1 / math.sqrt(epsilon)) # U = CircuitBuilder(self.backend).get_U(num_qubits, prob_to_angles(prob)) # for i_reflection in count(): # qreg = QuantumRegister(num_qubits, name='q') # circ = QuantumCircuit(qreg) # circ.append(U.to_instruction(), qreg) # m = np.random.randint(0, k) # if m > 25: # print("ERROR! Big number ", m) # m = 25 # if m > 0: # grover = GroverOperator( # oracle=Diagonal([-1 if i in flags else 1 for i in range(2**num_qubits)]), # state_preparation=U # ).repeat(m) # circ.append(grover.to_instruction(), qreg) # circ.measure_all() # result = execute(circ, backend=self.backend, shots=1).result() # counts = result.get_counts(circ) # action = int(max(counts, key=counts.get), 2) # if action in flags or i_reflection >= self.R: # break # return action
[ "pickle.dump", "functools.reduce", "numpy.random.choice", "numpy.where", "pickle.load", "math.sqrt", "numpy.max", "numpy.exp", "numpy.sum", "numpy.random.randint", "itertools.count", "numpy.empty", "numpy.append", "math.cos", "math.sin", "numpy.arange" ]
[((4782, 4789), 'itertools.count', 'count', ([], {}), '()\n', (4787, 4789), False, 'from itertools import count\n'), ((5104, 5148), 'functools.reduce', 'reduce', (['(lambda e, i: e + prob[i])', 'flags', '(0.0)'], {}), '(lambda e, i: e + prob[i], flags, 0.0)\n', (5110, 5148), False, 'from functools import reduce\n'), ((5347, 5354), 'itertools.count', 'count', ([], {}), '()\n', (5352, 5354), False, 'from itertools import count\n'), ((827, 879), 'pickle.dump', 'pickle.dump', (['self.memory', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.memory, f, pickle.HIGHEST_PROTOCOL)\n', (838, 879), False, 'import math, pickle\n'), ((1002, 1016), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1013, 1016), False, 'import math, pickle\n'), ((2476, 2483), 'itertools.count', 'count', ([], {}), '()\n', (2481, 2483), False, 'from itertools import count\n'), ((4694, 4729), 'numpy.random.choice', 'np.random.choice', (['prob.size'], {'p': 'prob'}), '(prob.size, p=prob)\n', (4710, 4729), True, 'import numpy as np\n'), ((4812, 4847), 'numpy.random.choice', 'np.random.choice', (['prob.size'], {'p': 'prob'}), '(prob.size, p=prob)\n', (4828, 4847), True, 'import numpy as np\n'), ((5049, 5084), 'numpy.random.choice', 'np.random.choice', (['prob.size'], {'p': 'prob'}), '(prob.size, p=prob)\n', (5065, 5084), True, 'import numpy as np\n'), ((5228, 5246), 'math.sqrt', 'math.sqrt', (['epsilon'], {}), '(epsilon)\n', (5237, 5246), False, 'import math, pickle\n'), ((5372, 5399), 'numpy.random.randint', 'np.random.randint', (['(0)', '(k + 1)'], {}), '(0, k + 1)\n', (5389, 5399), True, 'import numpy as np\n'), ((5737, 5755), 'numpy.sum', 'np.sum', (['final_prob'], {}), '(final_prob)\n', (5743, 5755), True, 'import numpy as np\n'), ((5778, 5825), 'numpy.random.choice', 'np.random.choice', (['final_prob.size'], {'p': 'final_prob'}), '(final_prob.size, p=final_prob)\n', (5794, 5825), True, 'import numpy as np\n'), ((1139, 1172), 'numpy.empty', 'np.empty', (['num_episodes'], {'dtype': 'str'}), '(num_episodes, dtype=str)\n', (1147, 1172), True, 'import numpy as np\n'), ((1200, 1233), 'numpy.empty', 'np.empty', (['num_episodes'], {'dtype': 'int'}), '(num_episodes, dtype=int)\n', (1208, 1233), True, 'import numpy as np\n'), ((1263, 1298), 'numpy.empty', 'np.empty', (['num_episodes'], {'dtype': 'float'}), '(num_episodes, dtype=float)\n', (1271, 1298), True, 'import numpy as np\n'), ((1328, 1361), 'numpy.empty', 'np.empty', (['num_episodes'], {'dtype': 'int'}), '(num_episodes, dtype=int)\n', (1336, 1361), True, 'import numpy as np\n'), ((5275, 5293), 'math.sqrt', 'math.sqrt', (['epsilon'], {}), '(epsilon)\n', (5284, 5293), False, 'import math, pickle\n'), ((2760, 2776), 'numpy.max', 'np.max', (['q_values'], {}), '(q_values)\n', (2766, 2776), True, 'import numpy as np\n'), ((2800, 2823), 'numpy.exp', 'np.exp', (['(1 / tau * shift)'], {}), '(1 / tau * shift)\n', (2806, 2823), True, 'import numpy as np\n'), ((3360, 3394), 'numpy.max', 'np.max', (['self.memory[next_state][0]'], {}), '(self.memory[next_state][0])\n', (3366, 3394), True, 'import numpy as np\n'), ((2835, 2858), 'numpy.exp', 'np.exp', (['(1 / tau * shift)'], {}), '(1 / tau * shift)\n', (2841, 2858), True, 'import numpy as np\n'), ((3982, 4006), 'numpy.arange', 'np.arange', (['q_values.size'], {}), '(q_values.size)\n', (3991, 4006), True, 'import numpy as np\n'), ((3765, 3790), 'numpy.where', 'np.where', (['(flags == action)'], {}), '(flags == action)\n', (3773, 3790), True, 'import numpy as np\n'), ((3854, 3878), 'numpy.append', 'np.append', (['flags', 'action'], {}), '(flags, action)\n', (3863, 3878), True, 'import numpy as np\n'), ((4052, 4073), 'numpy.where', 'np.where', (['(f == action)'], {}), '(f == action)\n', (4060, 4073), True, 'import numpy as np\n'), ((5491, 5520), 'math.sin', 'math.sin', (['((2 * m + 1) * theta)'], {}), '((2 * m + 1) * theta)\n', (5499, 5520), False, 'import math, pickle\n'), ((5602, 5631), 'math.cos', 'math.cos', (['((2 * m + 1) * theta)'], {}), '((2 * m + 1) * theta)\n', (5610, 5631), False, 'import math, pickle\n'), ((5454, 5466), 'math.sqrt', 'math.sqrt', (['p'], {}), '(p)\n', (5463, 5466), False, 'import math, pickle\n'), ((5469, 5487), 'math.sqrt', 'math.sqrt', (['epsilon'], {}), '(epsilon)\n', (5478, 5487), False, 'import math, pickle\n'), ((5561, 5573), 'math.sqrt', 'math.sqrt', (['p'], {}), '(p)\n', (5570, 5573), False, 'import math, pickle\n'), ((5576, 5598), 'math.sqrt', 'math.sqrt', (['(1 - epsilon)'], {}), '(1 - epsilon)\n', (5585, 5598), False, 'import math, pickle\n')]
from raytracing import * import numpy import matplotlib.pyplot as plt from matplotlib import pyplot as plt import signal import os def sig(a, b): print("got sigint, exitting!") os._exit(0) def timercb(e): print("timer") signal.signal(signal.SIGINT, sig) f = 50 f_obj = 5 f_tube = 100 f_galvo = 100 f_poly2 = 75 f_poly1 = 45 x = numpy.linspace(-20,20,21) y = [] for d in x: nRays = 1000 # You can change this inputRays = RandomUniformRays(yMax=0.25e-3, maxCount=nRays) # at center outputRays = Rays() # output histogram path = OpticalPath() path.append(Space(d=d*1e-3)) path.append(System2f(f=f_obj)) path.append(System4f(f1=f_tube,f2=f_galvo)) path.append(System4f(f1=f_poly2,f2=f_poly1)) path.append(Space(d=300)) path.append(System2f(f=40)) path.append(Aperture(diameter=40e-3)) outputRays = path.traceManyThrough(inputRays,progress=False) y.append(outputRays.count/inputRays.count) print('.') plt.plot(x,y) plt.show(block=True)
[ "signal.signal", "matplotlib.pyplot.plot", "numpy.linspace", "os._exit", "matplotlib.pyplot.show" ]
[((235, 268), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'sig'], {}), '(signal.SIGINT, sig)\n', (248, 268), False, 'import signal\n'), ((348, 375), 'numpy.linspace', 'numpy.linspace', (['(-20)', '(20)', '(21)'], {}), '(-20, 20, 21)\n', (362, 375), False, 'import numpy\n'), ((978, 992), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (986, 992), True, 'from matplotlib import pyplot as plt\n'), ((992, 1012), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1000, 1012), True, 'from matplotlib import pyplot as plt\n'), ((187, 198), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (195, 198), False, 'import os\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/9/21 14:06 # @Author : ganliang # @File : ndcopy.py # @Desc : 数据复制 import numpy as np a = np.arange(10) b = np.copy(a) print ("修改之前:") print (a) print (id(a)) print (b) print (id(b)) a[0] = 100 print ("修改之后:") print (a) print (id(a)) print (b) print (id(b)) c=a.reshape(5, 2) print (c)
[ "numpy.copy", "numpy.arange" ]
[((163, 176), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (172, 176), True, 'import numpy as np\n'), ((181, 191), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (188, 191), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt from matplotlib import cm import math """ ############# GENERAL GAS CLASS ########### """ class Gas(): def __init__(self,T,P,R_u=8.31447): self.T = T self.P = P self.R_u=R_u self.normalshock=self.Shock(self) def gas_list(self): print(" Code\t","Gas","\n", "----\t","---","\n", "Air\t","Air" ,"\n", "Ar\t\t","Argon" ,"\n" , "CO2\t","Carbon dioxide" ,"\n", "CO\t\t","Carbon monoxide" ,"\n", "N2\t\t","Nitrogen" ,"\n" ) def area(self, diameter): return (np.pi*(diameter**2))/4 def critical_area(self,massflowrate): return massflowrate/(self.P*1000*(self.k**(1/2))*(2/(self.k+1))**((self.k+1)/(2*self.k-2))/((self.R*1000*self.T)**(1/2))) def critical_m_dot(self, Ma, diameter=1): return self.critical_density()*self.area(diameter)*self.critical_speed_of_sound(Ma) def critical_temperature(self, Ma): return self.stagnation_temp(Ma)*2/(self.k+1) def critical_pressure(self): return self.P*(2/(self.k+1))**(self.k/(self.k-1)) def critical_density(self): return self.rho*(2/(self.k+1))**(1/(self.k-1)) def critical_speed_of_sound(self, Ma): return np.sqrt(self.k*self.R*self.critical_temperature(Ma)*1000) def density(self): return self.P/(self.R*self.T) def diameter(self, area): return np.sqrt(4/np.pi*area) def enthalpy(self): return self.cp*self.T def exit_temperature(self,Mach): return self.T/(1+(self.k-1)/2*Mach**2) def exit_pressure(self,Mach): return self.P/(1+(self.k-1)/2*Mach**2)**(self.k/(self.k-1)) def exit_density(self, Mach): return self.rho/(1+(self.k-1)/2*Mach**2)**(1/(self.k-1)) def exit_speed(self, Mach): return Mach*np.sqrt(self.k*self.R*self.exit_temperature(Mach)*1000) def exit_area(self, Throat_Area, Mach): return Throat_Area*(1/Mach)*((2/(self.k+1))*(1+(self.k-1)/2*Mach**2))**((self.k+1)/(2*self.k-2)) def mach_number(self, velocity): return velocity/self.speed_of_sound() def m_dot(self, velocity, diameter=1): return self.density()*self.area(diameter)*velocity def mfr(self,velocity, diameter): return self.critical_pressure()*self.area(diameter)*self.mach_number(velocity)*np.sqrt(self.k/(self.R*self.critical_temperature())) def mass_flowrate(self, velocity, diameter=1): return (self.area(diameter)*self.mach_number(velocity)*self.stagnation_pressure(velocity)*np.sqrt(self.k/(self.R*self.stagnation_temp(velocity))))\ /((1+(self.k-1)*(self.mach_number(velocity)**2)/2)**((self.k+1)/(2*(self.k-1)))) def ma_finder(self, section, area_ratio, show_iterations=False, tolerance=10e-6, method="bisection"): try: if section !="upward" and section !="downward": raise NameError("Please specify the flow by using these keywords: \"upward\" or \"downward\"") def finder(Ma): value = (1/Ma*((1+0.5*(self.k-1)*Ma**2)/(0.5*(self.k+1)))**(0.5*(self.k+1)/(self.k-1))) if method=='golden' or method=='secant': target = abs(value - area_ratio) elif method=='bisection': target = value - area_ratio return target # def check_boundaries(Ma_0, Ma_1): # if section=="upward": # if Ma_0>1 or Ma_1>1: # Ma_0 = 1/Ma_0 # Ma_1 = Ma_0+0.001 # # print("ma kucuk 1 den calisti") # elif section=="downward": # if Ma_0<1 or Ma_1<1: # Ma_0 = 1+Ma_0 # Ma_1 = Ma_0+0.1 # # print("ma buyuk 1 den calisti") if section=="upward": if method=='bisection': Ma=bisection_method( finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations) elif method=='secant': Ma=secant_method( finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations) elif method=='golden': Ma=golden_section(finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations) elif section=="downward": if method=='bisection': Ma=bisection_method( finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations) elif method=='secant': Ma=secant_method( finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations) elif method=='golden': Ma=golden_section(finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations) return Ma except NameError: raise NameError("Please specify the flow by using these keywords: \"upward\" or \"downward\"") from None except ValueError: raise ValueError("Given area is smaller than throat area. Program has terminated.\n Hint: You could change the division number.") from None def plot(self,area_start, area_end, Mach_start, y_axis='T', color_bar='Ma', division=250 ,x_axis='A', method="bisection"): area_upward = np.linspace(area_start, self.throat_area(area_start,Mach_start), division) area_downward = np.linspace(self.throat_area(area_start,Mach_start), area_end, division) area_total = np.concatenate((area_upward,area_downward)) ST = self.stagnation_temp(Mach_start) temp_upward = [] Ma_upward = [] for i in range(division): ratio = self.throat_area_ratio(area_upward[i], area_start, Mach_start) Ma=self.ma_finder("upward",ratio,method=method) Ma_upward.append(Ma) temp_upward.append(self.temperature(Ma, ST)) temp_downward = [] Ma_downward = [] for i in range(division): ratio = self.throat_area_ratio(area_downward[i], area_start, Mach_start) Ma=self.ma_finder("downward",ratio,method=method) Ma_downward.append(Ma) temp_downward.append(self.temperature(Ma, ST)) temp_total = temp_upward +temp_downward Ma_total = Ma_upward +Ma_downward fig = plt.figure(figsize=(10,7.5)) ax = fig.add_subplot(111) xs = np.linspace(0,1,2*division) if y_axis == 'T': y_lbl='Temperature (K)' if color_bar=='Ma': color = Ma_total mp = ax.scatter((xs),(temp_total),c=color,cmap=plt.cm.get_cmap('jet')) c_lbl = 'Mach Number' elif color_bar=='T': mp = ax.scatter((xs),(temp_total),c=temp_total,cmap=plt.cm.get_cmap('jet')) c_lbl = 'T (K)' elif y_axis == 'Ma': y_lbl='Mach Number' if color_bar=='Ma': color = Ma_total mp = ax.scatter((xs),(Ma_total),c=color,cmap=plt.cm.get_cmap('jet')) c_lbl = 'Mach Number' elif color_bar=='T': mp = ax.scatter((xs),(Ma_total),c=temp_total,cmap=plt.cm.get_cmap('jet')) c_lbl = 'T (K)' cb = plt.colorbar(mp) cb.set_label(c_lbl) ax.set(title=r'Converging- Diverging Nozzle', xlabel='Area $m^2$', ylabel=y_lbl) tick_labels=[] for j in np.linspace(0,(2*division),7): if j==2*division: tick_labels.append(round(area_total[-1],4)) else: tick_labels.append(round(area_total[int(j)],4)) plt.xticks(np.linspace(0,1,7),tick_labels) plt.show() def pressure(self, Mach, Stagnation_Pressure): return Stagnation_Pressure/((1+0.5*(self.k-1)*Mach**2)**(self.k/(self.k-1))) def speed_of_sound(self): return np.sqrt(self.k*self.R*self.T*1000) def stagnation_temp(self,Mach): return self.T*(1+(self.k-1)/2*Mach**2) def stagnation_pressure(self,Mach): return self.P*(1+0.5*(self.k-1)*Mach**2)**(self.k/(self.k-1)) def temperature(self, Mach, Stagnation_Temperature): return Stagnation_Temperature/(1+(self.k-1)/2*Mach**2) def throat_area(self,known_area,Mach): return known_area/((1/Mach)*((2/(self.k+1))*(1+(self.k-1)/2*Mach**2))**((self.k+1)/(2*self.k-2))) def throat_area_ratio(self,wanted_area, known_area,known_Mach): return wanted_area/self.throat_area(known_area, known_Mach) class Shock(): def __init__(self, gas): self.gas = gas def P2(self, Ma1, P1): return P1*(1/(self.gas.k+1)*(2*self.gas.k*Ma1**2-(self.gas.k-1))) def Ma2(self,Ma1): return np.sqrt(((self.gas.k-1)*Ma1**2+2)/(2*self.gas.k*Ma1**2-(self.gas.k-1))) def P0_2(self,Stagnation_Pressure, Ma1): return Stagnation_Pressure*((((self.gas.k+1)*Ma1**2)/(2+(self.gas.k-1)*Ma1**2))**(self.gas.k/(self.gas.k-1))\ *((self.gas.k+1)/(2*self.gas.k*Ma1**2-(self.gas.k-1)))**(1/(self.gas.k-1))) def area_shock_star(self, area1_star, Ma1): return area1_star*(self.Ma2(Ma1)/Ma1)*((2+(self.gas.k-1)*Ma1**2)/(2+(self.gas.k-1)*self.Ma2(Ma1)**2))**((self.gas.k+1)/(2*self.gas.k-2)) def Ma_beforeshock(self, P2_P1): return np.sqrt((P2_P1*(self.gas.k+1)+(self.gas.k-1))/(2*self.gas.k)) def T2(self,T1,Ma1): return T1*(2+(self.gas.k-1)*Ma1**2)*(2*self.gas.k*Ma1**2-(self.gas.k-1))/(((self.gas.k+1)**2)*(Ma1**2)) def V2(self, T1, V1): return np.sqrt(2*self.gas.cp*(T1-self.T2(T1, V1/(self.gas.speed_of_sound())))+V1**2) class Air(Gas): def __init__(self,T=298.15,P=101.325): super().__init__(T, P) self.M=28.97 self.k=1.4 self.R=self.R_u/self.M self.cp=1.9327E-10*self.T**4 - 7.9999E-07*self.T**3 + 1.1407E-03*self.T**2 - 4.4890E-01*self.T + 1.0575E+03 self.rho = self.P/(self.R*self.T) class CO2(Gas): def __init__(self,T=298.15,P=101.325): super().__init__(T, P) self.M=44.01 self.k=1.289 self.R=self.R_u/self.M self.cp=0.849 self.rho = self.P/(self.R*self.T) class CO(Gas): def __init__(self,T=298.15,P=101.325): super().__init__(T, P) self.M=28.01 self.k=1.4 self.R=self.R_u/self.M self.cp=1.039 self.rho = self.P/(self.R*self.T) class N2(Gas): def __init__(self,T=298.15,P=101.325): super().__init__(T, P) self.M=28.01 self.k=1.4 self.R=self.R_u/self.M self.cp=1.040 self.rho = self.P/(self.R*self.T) class Ar(Gas): def __init__(self,T=298.15,P=101.325): super().__init__(T, P) self.M=39.95 self.k=1.667 self.R=self.R_u/self.M self.cp=0.5203 self.rho = self.P/(self.R*self.T) """ ############# NUMERICAL METHODS ########### """ def golden_section(func,starting, ending, show_iterations=False, tolerance = 10e-6): gr=(np.sqrt(5)+1)/2-1 dm=tolerance a0 = starting+dm b0 = ending-dm count=0 while True: count+=1 # print(finder(Ma_0)) # print(finder(Ma_1)) d=gr*(b0-a0) a1=a0+d b1=b0-d if abs((a1-b1)/a1)<=tolerance: if 1>=ending: print("The Mach number below unity is: ",a1,"\n") elif starting>=1: print("The Mach number above unity is: ",a1,"\n") break else: if func(a0)>func(b0): a0=a1 b1=b1 else: a0=a0 b0=b1 if show_iterations ==True: print("Iteration ", count, " :",a1) return (a1+b1)/2 def secant_method(func, lower_bound, upper_bound, show_iterations=False,tolerance=10e-6): Ma_0 = (upper_bound+lower_bound)/2 dMa = 0.01 Ma_1 = Ma_0+dMa count=0 while True: count+=1 Ma_2 = Ma_1 - func(Ma_1)*(Ma_1-Ma_0)/(func(Ma_1)-func(Ma_0)) if show_iterations ==True: print("Iteration ", count, " :",Ma_2) if func(Ma_2)<=tolerance: if show_iterations ==True: print("The Mach number below unity is: ",Ma_2,"\n") break else: Ma_0 = Ma_1 Ma_1 = Ma_2 return Ma_2 def bisection_method(func, lower_bound, upper_bound, show_iterations=False,tolerance=10e-6): if lower_bound==0 : lower_bound+=tolerance a=lower_bound b= upper_bound count = 0 while True: count+=1 c = (a+b)/2 if abs(func(c))<=tolerance: if show_iterations ==True: print("The root is: ",c,"\n") break else: if func(a)*func(c)>func(b)*func(c): b=b a=c else: a=a b=c if show_iterations ==True: print("Iteration ", count, " :",c) return c """ ############# ROCKET NOZZLE CLASS ########### """ class Nozzle(Gas): def __init__(self, class_gas): self.T=class_gas.T self.P=class_gas.P self.k=class_gas.k self.M=class_gas.M self.k=class_gas.k self.R=class_gas.R_u/class_gas.M self.cp=class_gas.cp self.rho = class_gas.P/(class_gas.R*class_gas.T) def critical_throat_pressure(self): return self.P*(2/(self.k+1))**(self.k/(self.k-1)) def exit_mach(self,backflow_pressure): if self.ischoked(backflow_pressure): Ma = 1 else: Ma = np.sqrt(5*((self.P/backflow_pressure)**(2/7)-1)) return Ma def ischoked(self, backflow_pressure ): if backflow_pressure < self.critical_pressure(): condition=True else: condition = False return condition def massflowrate(self, backflow_pressure, area): if self.ischoked(backflow_pressure): mdot = (area*self.P*1000)/(np.sqrt(self.R*self.T*1000))*np.sqrt((2*self.k/(self.k-1))*((self.critical_pressure()/self.P)**(2/self.k))*(1-(self.critical_pressure()/self.P)**(1-1/self.k))) else: mdot = (area*self.P*1000)/(np.sqrt(self.R*self.T*1000))*np.sqrt((2*self.k/(self.k-1))*((backflow_pressure/self.P)**(2/self.k))*(1-(backflow_pressure/self.P)**(1-1/self.k))) return mdot class RocketNozzle(Gas): def __init__(self, class_gas): self.T=class_gas.T self.P=class_gas.P self.k=class_gas.k self.M=class_gas.M self.k=class_gas.k self.R=class_gas.R_u/class_gas.M self.cp=class_gas.cp self.rho = class_gas.P/(class_gas.R*class_gas.T) self.normalshock=self.Shock(self) def geometry(self, area_start, area_throat, area_end, division=250, color = 'black'): A_start = area_start A1_star = area_throat A_exit = area_end division = 250 r1=int((A_start/A1_star)/(A_start/A1_star+A_exit/A1_star)*division) r2=int((A_exit/A1_star)/(A_start/A1_star+A_exit/A1_star)*division) area_upward = np.linspace((A_start), (A1_star), r1) area_downward = np.linspace((A1_star), (A_exit), r2) area_total = np.concatenate((area_upward,area_downward)) diameter_total = self.diameter(area_total) # plt.style.use('dark_background') fig = plt.figure(figsize=(12,6)) ax = fig.add_subplot(111) xs = np.linspace(0,1,r1+r2) tick_labels=[] for j in np.linspace(0,(r1+r2),11): if j==r1+r2: tick_labels.append(round(area_total[-1],4)) else: tick_labels.append(round(area_total[int(j)],4)) plt.xticks(np.linspace(0,1,11),tick_labels) plt.plot(xs,diameter_total/2,color=color,linewidth=3) plt.plot(xs,-diameter_total/2,color=color,linewidth=3) centerline,=plt.plot(xs, 0*xs,linewidth=1,color=color) dashes=[30,5,5,5] centerline.set_dashes(dashes) plt.xlabel("Area (m2)") plt.ylabel("Radius (m)") plt.title("Rocket Nozzle Geometry") plt.show() plt.style.use('default') def shock(self, exit_pressure, throat_area, exit_area, start_area, plot=True,division = 250): def shock_finder(A_shock): ratio = A_shock/throat_area M1 = self.ma_finder('downward', ratio) P1 = self.pressure(M1, self.P) T1 = self.temperature(M1, self.T) M2 = self.normalshock.Ma2(M1) P2 = self.normalshock.P2(M1,P1) T2 = self.normalshock.T2(T1, M1) P02 = self.normalshock.P0_2(self.P, M1) A2_star = self.normalshock.area_shock_star(throat_area, M1) ratio2 = exit_area/A2_star Me = self.ma_finder('upward', ratio2) Pe = self.pressure(Me,P02) target = Pe-exit_pressure return target if shock_finder(exit_area)>0: print("There is no shock wave in the rocket nozzle") A_shock = None else: A_shock=bisection_method( shock_finder,throat_area, exit_area, tolerance = 10e-3,show_iterations=True) def shock_plot(start_area): A_start = start_area A1_star = throat_area A_exit = exit_area r1=int((A_start/A1_star)/(A_start/A1_star+A_exit/A1_star)*division) r2=int((A_exit/A1_star)/(A_start/A1_star+A_exit/A1_star)*division) area_upward = np.linspace((start_area), (throat_area), r1) area_downward = np.linspace((throat_area), (exit_area), r2) area_total = np.concatenate((area_upward,area_downward)) def find_closest(A, target): #A must be sorted idx = A.searchsorted(target) idx = np.clip(idx, 1, len(A)-1) left = A[idx-1] right = A[idx] idx -= target - left < right - target return idx idx=find_closest(area_total,A_shock) r=self.diameter(A_shock)/2 plt.style.use('dark_background') self.geometry(start_area, throat_area, exit_area,color='white') y=np.linspace(r,-r) # correction = ((A_shock/throat_area)+(start_area/throat_area))/((exit_area/throat_area)+(start_area/throat_area)) x=A_shock*np.sin(5000*y)+idx/division plt.plot(x,y,color='gold') plt.show() plt.style.use('default') if plot==True: shock_plot(start_area) return A_shock """ ############# RELATIONS CLASS ########### """ class relations: def change_in_entropy(T2,T1,P2,P1,cp,R): return cp*np.log(T2/T1)-R*np.log(P2/P1)
[ "matplotlib.pyplot.cm.get_cmap", "numpy.sqrt", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.plot", "matplotlib.pyplot.style.use", "numpy.log", "matplotlib.pyplot.figure", "numpy.linspace", "numpy.concatenate", "numpy.sin", "matplotl...
[((1554, 1579), 'numpy.sqrt', 'np.sqrt', (['(4 / np.pi * area)'], {}), '(4 / np.pi * area)\n', (1561, 1579), True, 'import numpy as np\n'), ((5698, 5742), 'numpy.concatenate', 'np.concatenate', (['(area_upward, area_downward)'], {}), '((area_upward, area_downward))\n', (5712, 5742), True, 'import numpy as np\n'), ((6639, 6668), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7.5)'}), '(figsize=(10, 7.5))\n', (6649, 6668), True, 'import matplotlib.pyplot as plt\n'), ((6715, 6746), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(2 * division)'], {}), '(0, 1, 2 * division)\n', (6726, 6746), True, 'import numpy as np\n'), ((7596, 7612), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mp'], {}), '(mp)\n', (7608, 7612), True, 'import matplotlib.pyplot as plt\n'), ((7820, 7851), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * division)', '(7)'], {}), '(0, 2 * division, 7)\n', (7831, 7851), True, 'import numpy as np\n'), ((8104, 8114), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8112, 8114), True, 'import matplotlib.pyplot as plt\n'), ((8298, 8338), 'numpy.sqrt', 'np.sqrt', (['(self.k * self.R * self.T * 1000)'], {}), '(self.k * self.R * self.T * 1000)\n', (8305, 8338), True, 'import numpy as np\n'), ((15803, 15836), 'numpy.linspace', 'np.linspace', (['A_start', 'A1_star', 'r1'], {}), '(A_start, A1_star, r1)\n', (15814, 15836), True, 'import numpy as np\n'), ((15865, 15897), 'numpy.linspace', 'np.linspace', (['A1_star', 'A_exit', 'r2'], {}), '(A1_star, A_exit, r2)\n', (15876, 15897), True, 'import numpy as np\n'), ((15923, 15967), 'numpy.concatenate', 'np.concatenate', (['(area_upward, area_downward)'], {}), '((area_upward, area_downward))\n', (15937, 15967), True, 'import numpy as np\n'), ((16084, 16111), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (16094, 16111), True, 'import matplotlib.pyplot as plt\n'), ((16158, 16184), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(r1 + r2)'], {}), '(0, 1, r1 + r2)\n', (16169, 16184), True, 'import numpy as np\n'), ((16235, 16262), 'numpy.linspace', 'np.linspace', (['(0)', '(r1 + r2)', '(11)'], {}), '(0, r1 + r2, 11)\n', (16246, 16262), True, 'import numpy as np\n'), ((16502, 16560), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', '(diameter_total / 2)'], {'color': 'color', 'linewidth': '(3)'}), '(xs, diameter_total / 2, color=color, linewidth=3)\n', (16510, 16560), True, 'import matplotlib.pyplot as plt\n'), ((16564, 16623), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', '(-diameter_total / 2)'], {'color': 'color', 'linewidth': '(3)'}), '(xs, -diameter_total / 2, color=color, linewidth=3)\n', (16572, 16623), True, 'import matplotlib.pyplot as plt\n'), ((16639, 16685), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', '(0 * xs)'], {'linewidth': '(1)', 'color': 'color'}), '(xs, 0 * xs, linewidth=1, color=color)\n', (16647, 16685), True, 'import matplotlib.pyplot as plt\n'), ((16763, 16786), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Area (m2)"""'], {}), "('Area (m2)')\n", (16773, 16786), True, 'import matplotlib.pyplot as plt\n'), ((16795, 16819), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Radius (m)"""'], {}), "('Radius (m)')\n", (16805, 16819), True, 'import matplotlib.pyplot as plt\n'), ((16828, 16863), 'matplotlib.pyplot.title', 'plt.title', (['"""Rocket Nozzle Geometry"""'], {}), "('Rocket Nozzle Geometry')\n", (16837, 16863), True, 'import matplotlib.pyplot as plt\n'), ((16881, 16891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16889, 16891), True, 'import matplotlib.pyplot as plt\n'), ((16900, 16924), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (16913, 16924), True, 'import matplotlib.pyplot as plt\n'), ((8046, 8066), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(7)'], {}), '(0, 1, 7)\n', (8057, 8066), True, 'import numpy as np\n'), ((9200, 9296), 'numpy.sqrt', 'np.sqrt', (['(((self.gas.k - 1) * Ma1 ** 2 + 2) / (2 * self.gas.k * Ma1 ** 2 - (self.gas\n .k - 1)))'], {}), '(((self.gas.k - 1) * Ma1 ** 2 + 2) / (2 * self.gas.k * Ma1 ** 2 - (\n self.gas.k - 1)))\n', (9207, 9296), True, 'import numpy as np\n'), ((9829, 9902), 'numpy.sqrt', 'np.sqrt', (['((P2_P1 * (self.gas.k + 1) + (self.gas.k - 1)) / (2 * self.gas.k))'], {}), '((P2_P1 * (self.gas.k + 1) + (self.gas.k - 1)) / (2 * self.gas.k))\n', (9836, 9902), True, 'import numpy as np\n'), ((14265, 14323), 'numpy.sqrt', 'np.sqrt', (['(5 * ((self.P / backflow_pressure) ** (2 / 7) - 1))'], {}), '(5 * ((self.P / backflow_pressure) ** (2 / 7) - 1))\n', (14272, 14323), True, 'import numpy as np\n'), ((16452, 16473), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (16463, 16473), True, 'import numpy as np\n'), ((18404, 18444), 'numpy.linspace', 'np.linspace', (['start_area', 'throat_area', 'r1'], {}), '(start_area, throat_area, r1)\n', (18415, 18444), True, 'import numpy as np\n'), ((18477, 18516), 'numpy.linspace', 'np.linspace', (['throat_area', 'exit_area', 'r2'], {}), '(throat_area, exit_area, r2)\n', (18488, 18516), True, 'import numpy as np\n'), ((18546, 18590), 'numpy.concatenate', 'np.concatenate', (['(area_upward, area_downward)'], {}), '((area_upward, area_downward))\n', (18560, 18590), True, 'import numpy as np\n'), ((19005, 19037), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (19018, 19037), True, 'import matplotlib.pyplot as plt\n'), ((19129, 19147), 'numpy.linspace', 'np.linspace', (['r', '(-r)'], {}), '(r, -r)\n', (19140, 19147), True, 'import numpy as np\n'), ((19336, 19364), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""gold"""'}), "(x, y, color='gold')\n", (19344, 19364), True, 'import matplotlib.pyplot as plt\n'), ((19375, 19385), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19383, 19385), True, 'import matplotlib.pyplot as plt\n'), ((19398, 19422), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (19411, 19422), True, 'import matplotlib.pyplot as plt\n'), ((11602, 11612), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11609, 11612), True, 'import numpy as np\n'), ((14908, 15050), 'numpy.sqrt', 'np.sqrt', (['(2 * self.k / (self.k - 1) * (backflow_pressure / self.P) ** (2 / self.k) *\n (1 - (backflow_pressure / self.P) ** (1 - 1 / self.k)))'], {}), '(2 * self.k / (self.k - 1) * (backflow_pressure / self.P) ** (2 /\n self.k) * (1 - (backflow_pressure / self.P) ** (1 - 1 / self.k)))\n', (14915, 15050), True, 'import numpy as np\n'), ((19681, 19696), 'numpy.log', 'np.log', (['(T2 / T1)'], {}), '(T2 / T1)\n', (19687, 19696), True, 'import numpy as np\n'), ((19697, 19712), 'numpy.log', 'np.log', (['(P2 / P1)'], {}), '(P2 / P1)\n', (19703, 19712), True, 'import numpy as np\n'), ((14666, 14697), 'numpy.sqrt', 'np.sqrt', (['(self.R * self.T * 1000)'], {}), '(self.R * self.T * 1000)\n', (14673, 14697), True, 'import numpy as np\n'), ((14879, 14910), 'numpy.sqrt', 'np.sqrt', (['(self.R * self.T * 1000)'], {}), '(self.R * self.T * 1000)\n', (14886, 14910), True, 'import numpy as np\n'), ((19296, 19312), 'numpy.sin', 'np.sin', (['(5000 * y)'], {}), '(5000 * y)\n', (19302, 19312), True, 'import numpy as np\n'), ((6943, 6965), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (6958, 6965), True, 'import matplotlib.pyplot as plt\n'), ((7106, 7128), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (7121, 7128), True, 'import matplotlib.pyplot as plt\n'), ((7349, 7371), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (7364, 7371), True, 'import matplotlib.pyplot as plt\n'), ((7510, 7532), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (7525, 7532), True, 'import matplotlib.pyplot as plt\n')]
import numpy as np import numpy.random as rnd import simple_optimise as mlopt vec = rnd.randn(10) mat = rnd.randn(10, 10) mat += mat.T # Single output, single input def f1_1(x): return x**2.0 def fD_1(x): return vec * x def f1_D(x): return x.dot(mat.dot(x)) def f1_DD(x): return vec.dot(x.dot(vec)) def fDD_DD(x): return x * 3 fd1_1 = mlopt.finite_difference(f1_1, 3.0) fdD_1 = mlopt.finite_difference(fD_1, 3.0) fd1_D = mlopt.finite_difference(f1_D, rnd.randn(10)) fd1_DD = mlopt.finite_difference(f1_DD, rnd.randn(10, 10)) fdDD_DD = mlopt.finite_difference(fDD_DD, rnd.randn(10, 10))
[ "simple_optimise.finite_difference", "numpy.random.randn" ]
[((86, 99), 'numpy.random.randn', 'rnd.randn', (['(10)'], {}), '(10)\n', (95, 99), True, 'import numpy.random as rnd\n'), ((106, 123), 'numpy.random.randn', 'rnd.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (115, 123), True, 'import numpy.random as rnd\n'), ((368, 402), 'simple_optimise.finite_difference', 'mlopt.finite_difference', (['f1_1', '(3.0)'], {}), '(f1_1, 3.0)\n', (391, 402), True, 'import simple_optimise as mlopt\n'), ((411, 445), 'simple_optimise.finite_difference', 'mlopt.finite_difference', (['fD_1', '(3.0)'], {}), '(fD_1, 3.0)\n', (434, 445), True, 'import simple_optimise as mlopt\n'), ((484, 497), 'numpy.random.randn', 'rnd.randn', (['(10)'], {}), '(10)\n', (493, 497), True, 'import numpy.random as rnd\n'), ((539, 556), 'numpy.random.randn', 'rnd.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (548, 556), True, 'import numpy.random as rnd\n'), ((600, 617), 'numpy.random.randn', 'rnd.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (609, 617), True, 'import numpy.random as rnd\n')]
import natsort import numpy as np import pandas as pd import plotly.io as pio import plotly.express as px import plotly.graph_objects as go import plotly.figure_factory as ff import re import traceback from io import BytesIO from sklearn.decomposition import PCA from sklearn.metrics import pairwise as pw import json import statistics import matplotlib.pyplot as plt import matplotlib_venn as venn from matplotlib_venn import venn2, venn3, venn3_circles from PIL import Image from upsetplot import from_memberships from upsetplot import plot as upplot import pkg_resources def natsort_index_keys(x): order = natsort.natsorted(np.unique(x.values)) return pd.Index([order.index(el) for el in x], name=x.name) def natsort_list_keys(x): order = natsort.natsorted(np.unique(x)) return [order.index(el) for el in x] class SpatialDataSet: regex = { "imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$" } acquisition_set_dict = { "LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"], "LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"], "LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"], "LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"], "SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"], "Custom": ["(?!Protein IDs|Gene names)"] } Spectronaut_columnRenaming = { "R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score", "PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity" } css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3", "#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise", "khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen", "coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse", "chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta", "darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace", "olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan", "fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan", "lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey", "lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen", "mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin", "olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru", "pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver", "skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey", "aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"] analysed_datasets_dict = {} df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")), usecols=lambda x: bool(re.match("Gene name|Compartment", x))) df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"}) df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"}) def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs): self.filename = filename self.expname = expname self.acquisition = acquisition self.name_pattern = name_pattern self.comment = comment self.imported_columns = self.regex["imported_columns"] self.fractions, self.map_names = [], [] self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame() if acquisition == "SILAC - MQ": if "RatioHLcount" not in kwargs.keys(): self.RatioHLcount = 2 else: self.RatioHLcount = kwargs["RatioHLcount"] del kwargs["RatioHLcount"] if "RatioVariability" not in kwargs.keys(): self.RatioVariability = 30 else: self.RatioVariability = kwargs["RatioVariability"] del kwargs["RatioVariability"] elif acquisition == "Custom": self.custom_columns = kwargs["custom_columns"] self.custom_normalized = kwargs["custom_normalized"] self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$" #elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut": else: if "summed_MSMS_counts" not in kwargs.keys(): self.summed_MSMS_counts = 2 else: self.summed_MSMS_counts = kwargs["summed_MSMS_counts"] del kwargs["summed_MSMS_counts"] if "consecutiveLFQi" not in kwargs.keys(): self.consecutiveLFQi = 4 else: self.consecutiveLFQi = kwargs["consecutiveLFQi"] del kwargs["consecutiveLFQi"] #self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"] if "organism" not in kwargs.keys(): marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot"))) self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])} else: assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes") marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"]))) self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])} self.organism = kwargs["organism"] del kwargs["organism"] self.analysed_datasets_dict = {} self.analysis_summary_dict = {} def data_reading(self, filename=None, content=None): """ Data import. Can read the df_original from a file or buffer. df_original contains all information of the raw file; tab separated file is imported, Args: self: filename: string imported_columns : dictionry; columns that correspond to this regular expression will be imported filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success. content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer. Returns: self.df_orginal: raw, unprocessed dataframe, single level column index """ # use instance attribute if no filename is provided if filename is None: filename = self.filename # if no buffer is provided for the content read straight from the file if content is None: content = filename if filename.endswith("xls") or filename.endswith("txt"): self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True) else: #assuming csv file self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True) assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5 self.filename = filename return self.df_original def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None): """ Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe, characterized by a flat column index. These tasks is performed by following functions: indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern) spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern) stringency_silac(df_index) normalization_01_silac(df_stringency_mapfracstacked): logarithmization_silac(df_stringency_mapfracstacked): stringency_lfq(df_index): normalization_01_lfq(df_stringency_mapfracstacked): logarithmization_lfq(df_stringency_mapfracstacked): Args: self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ" additional arguments can be used to override the value set by the class init function Returns: self: map_names: list of Map names df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name df_log_stacked: df; log transformed data analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean ["changes in shape after filtering"] ["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";" ["Analysis parameters"] : {"acquisition" : ..., "filename" : ..., #SILAC# "Ratio H/L count 1 (>=X)" : ..., "Ratio H/L count 2 (>=Y, var<Z)" : ..., "Ratio variability (<Z, count>=Y)" : ... #LFQ# "consecutive data points" : ..., "summed MS/MS counts" : ... } """ if name_pattern is None: name_pattern = self.name_pattern if self.acquisition == "SILAC - MQ": if RatioHLcount is None: RatioHLcount = self.RatioHLcount if RatioVariability is None: RatioVariability = self.RatioVariability elif self.acquisition == "Custom": if custom_columns is None: custom_columns = self.custom_columns if custom_normalized is None: custom_normalized = self.custom_normalized else: if summed_MSMS_counts is None: summed_MSMS_counts = self.summed_MSMS_counts if consecutiveLFQi is None: consecutiveLFQi = self.consecutiveLFQi shape_dict = {} def indexingdf(): """ For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]" (SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and potential contaminants. Args: self: df_original: dataframe, columns defined through self.imported_columns acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ" fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K name_pattern: regular expression, to identify Map-Fraction-(Replicate) Returns: self: df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type shape_dict["Original size"] of df_original shape_dict["Shape after categorical filtering"] of df_index fractions: list of fractions e.g. ["01K", "03K", ...] """ df_original = self.df_original.copy() df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True) df_original = df_original.set_index([col for col in df_original.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False]) # multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name multiindex = pd.MultiIndex.from_arrays( arrays=[ [[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0] for col in df_original.columns], [re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns], [re.match(self.name_pattern, col).group("frac") for col in df_original.columns], ], names=["Set", "Map", "Fraction"] ) df_original.columns = multiindex df_original.sort_index(1, inplace=True) shape_dict["Original size"] = df_original.shape try: df_index = df_original.xs( np.nan, 0, "Reverse") except: pass try: df_index = df_index.xs( np.nan, 0, "Potential contaminant") except: pass try: df_index = df_index.xs( np.nan, 0, "Only identified by site") except: pass df_index.replace(0, np.nan, inplace=True) shape_dict["Shape after categorical filtering"] = df_index.shape df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True) fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique()) ##############Cyt should get only be removed if it is not an NMC split if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4: df_index.drop("Cyt", axis=1, level="Fraction", inplace=True) try: if self.acquisition == "LFQ5 - MQ": df_index.drop("01K", axis=1, level="Fraction", inplace=True) except: pass self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique())) self.df_index = df_index return df_index def custom_indexing_and_normalization(): df_original = self.df_original.copy() df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True) df_original = df_original.set_index([col for col in df_original.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False]) # multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name multiindex = pd.MultiIndex.from_arrays( arrays=[ ["normalized profile" for col in df_original.columns], [re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns], [re.match(self.name_pattern, col).group("frac") for col in df_original.columns], ], names=["Set", "Map", "Fraction"] ) df_original.columns = multiindex df_original.sort_index(1, inplace=True) shape_dict["Original size"] = df_original.shape # for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later df_index = df_original.copy() self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique())) self.df_index = df_index return df_index def spectronaut_LFQ_indexingdf(): """ For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and "LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map" in df_renamed["Map"]) as level labels. !!! !!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!! !!! Args: self: df_original: dataframe, columns defined through self.imported_columns Spectronaut_columnRenaming acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut" fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K name_pattern: regular expression, to identify Map-Fraction-(Replicate) Returns: self: df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type shape_dict["Original size"] of df_index fractions: list of fractions e.g. ["01K", "03K", ...] """ df_original = self.df_original.copy() df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming) df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]] df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join( re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]] df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False]) df_index.columns.names = ["Set"] # In case fractionated data was used this needs to be catched and aggregated try: df_index = df_index.unstack(["Map", "Fraction"]) except ValueError: df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0) df_index = df_index.unstack(["Map", "Fraction"]) df_index.replace(0, np.nan, inplace=True) shape_dict["Original size"]=df_index.shape fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique()) #Cyt is removed only if it is not an NMC split if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4: df_index.drop("Cyt", axis=1, level="Fraction", inplace=True) try: if self.acquisition == "LFQ5 - Spectronaut": df_index.drop("01K", axis=1, level="Fraction", inplace=True) except: pass self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique())) self.df_index = df_index return df_index def stringency_silac(df_index): """ The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median. Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife. Args: df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type RatioHLcount: int, 2 RatioVariability: int, 30 df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index fractions: list of fractions e.g. ["01K", "03K", ...] Returns: df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked """ # Fraction and Map will be stacked df_stack = df_index.stack(["Fraction", "Map"]) # filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account # zip: allows direct comparison of count and var # only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked #default setting: RatioHLcount = 2 ; RatioVariability = 30 df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability) for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]] shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape # "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median # np.median([...]): only entries, that are not NANs are considered df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\ .unstack(["Fraction", "Map"])\ .apply(lambda x: x/np.nanmedian(x), axis=0)\ .stack(["Map", "Fraction"]) df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join( pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"])) # dataframe is grouped (Map, id), that allows the filtering for complete profiles df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions)) shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape # Ratio H/L is converted into Ratio L/H df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x) #Annotation with marker genes df_organellarMarkerSet = self.df_organellarMarkerSet df_stringency_mapfracstacked.reset_index(inplace=True) df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names") df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True) df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True) return df_stringency_mapfracstacked def normalization_01_silac(df_stringency_mapfracstacked): """ The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L"). Args: df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked; columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices self: fractions: list of fractions e.g. ["01K", "03K", ...] data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs) Returns: df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices; plotting is possible now self: analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness column: "Experiment", "Map", "Data completeness", "Profile completeness" no row index """ df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction") # 0:1 normalization of Ratio L/H df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0) df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame (df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"])) # "Ratio H/L" will be renamed to "normalized profile" df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns] return df_01_stacked def logarithmization_silac(df_stringency_mapfracstacked): """ The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L"). Args: df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices Returns: df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L" data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices; PCA is possible now """ # logarithmizing, basis of 2 df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2) df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join( pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"])) # "Ratio H/L" will be renamed to "log profile" df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns] return df_log_stacked def stringency_lfq(df_index): """ The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2] (LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included. Data is annotated based on specified marker set e.g. eLife. Args: df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ self: df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index fractions: list of fractions e.g. ["01K", "03K", ...] summed_MSMS_counts: int, 2 consecutiveLFQi: int, 4 Returns: df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a single-level column index self: shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked """ df_index = df_index.stack("Map") # sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation df_index.sort_index(axis=1, level=0, inplace=True) # "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe minms = (len(self.fractions) * self.summed_MSMS_counts) if minms > 0: df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms] shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape df_stringency_mapfracstacked = df_mscount_mapstacked.copy() else: df_stringency_mapfracstacked = df_index.copy() # series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True) df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[ df_stringency_mapfracstacked[("LFQ intensity")]\ .apply(lambda x: np.isfinite(x), axis=0)\ .apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)] shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction") #Annotation with marker genes df_organellarMarkerSet = self.df_organellarMarkerSet df_stringency_mapfracstacked.reset_index(inplace=True) df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names") df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns if c!="MS/MS count" and c!="LFQ intensity"], inplace=True) df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True) return df_stringency_mapfracstacked def normalization_01_lfq(df_stringency_mapfracstacked): """ The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity"). Args: df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a single-level column index self: fractions: list of fractions e.g. ["01K", "03K", ...] Returns: df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now """ df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction") # 0:1 normalization of Ratio L/H df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0) df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1) df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack( "Fraction"),columns=["LFQ intensity"])) # rename columns: "LFQ intensity" into "normalized profile" df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in df_01_stacked.columns] #imputation df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction") df_01_stacked = df_01_stacked.sort_index() return df_01_stacked def logarithmization_lfq(df_stringency_mapfracstacked): """The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity"). Args: df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a single-level column index Returns: df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now """ df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2) df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1) df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"])) # "LFQ intensity" will be renamed to "log profile" df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns] return df_log_stacked def split_ids_uniprot(el): """ This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID. """ p1 = el.split(";")[0] if "-" not in p1: return p1 else: p = p1.split("-")[0] if p in el.split(";"): return p else: return p1 if self.acquisition == "SILAC - MQ": # Index data df_index = indexingdf() map_names = df_index.columns.get_level_values("Map").unique() self.map_names = map_names # Run stringency filtering and normalization df_stringency_mapfracstacked = stringency_silac(df_index) self.df_stringencyFiltered = df_stringency_mapfracstacked self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked) self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked) # format and reduce 0-1 normalized data for comparison with other experiments df_01_comparison = self.df_01_stacked.copy() comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs") df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs") df_01_comparison.set_index(comp_ids, append=True, inplace=True) df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1) df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"]) df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values] df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore") # poopulate analysis summary dictionary with (meta)data unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))] unique_proteins.sort() self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json() self.analysis_summary_dict["Unique Proteins"] = unique_proteins self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy() analysis_parameters = {"acquisition" : self.acquisition, "filename" : self.filename, "comment" : self.comment, "Ratio H/L count" : self.RatioHLcount, "Ratio variability" : self.RatioVariability, "organism" : self.organism, } self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy() # TODO this line needs to be removed. self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy() elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut": #if not summed_MS_counts: # summed_MS_counts = self.summed_MS_counts #if not consecutiveLFQi: # consecutiveLFQi = self.consecutiveLFQi if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ": df_index = indexingdf() elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut": df_index = spectronaut_LFQ_indexingdf() map_names = df_index.columns.get_level_values("Map").unique() self.map_names = map_names df_stringency_mapfracstacked = stringency_lfq(df_index) self.df_stringencyFiltered = df_stringency_mapfracstacked self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked) self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked) df_01_comparison = self.df_01_stacked.copy() comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs") df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs") df_01_comparison.set_index(comp_ids, append=True, inplace=True) df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore") df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"]) df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values] df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore") self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index() unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))] unique_proteins.sort() self.analysis_summary_dict["Unique Proteins"] = unique_proteins self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy() analysis_parameters = {"acquisition" : self.acquisition, "filename" : self.filename, "comment" : self.comment, "consecutive data points" : self.consecutiveLFQi, "summed MS/MS counts" : self.summed_MSMS_counts, "organism" : self.organism, } self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy() self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy() #return self.df_01_stacked elif self.acquisition == "Custom": df_index = custom_indexing_and_normalization() map_names = df_index.columns.get_level_values("Map").unique() self.map_names = map_names df_01_stacked = df_index.stack(["Map", "Fraction"]) df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names") df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True) df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True) self.df_01_stacked = df_01_stacked df_01_comparison = self.df_01_stacked.copy() comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs") df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs") df_01_comparison.set_index(comp_ids, append=True, inplace=True) df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore") df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"]) df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values] df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore") self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index() unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))] unique_proteins.sort() self.analysis_summary_dict["Unique Proteins"] = unique_proteins self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy() analysis_parameters = {"acquisition" : self.acquisition, "filename" : self.filename, "comment" : self.comment, "organism" : self.organism, } self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy() self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy() else: return "I do not know this" def plot_log_data(self): """ Args: self.df_log_stacked Returns: log_histogram: Histogram of log transformed data """ log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys), x="log profile", facet_col="Fraction", facet_row="Map", template="simple_white", labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")} ) log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""})) log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""})) log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper", text="log2(LFQ intensity)") log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50, text="count") log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) return log_histogram def quantity_profiles_proteinGroups(self): """ Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated. Args: self: acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ" df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name Returns: self: df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information: npg_t: protein groups per experiment total quantity npgf_t = groups with valid profiles per experiment total quanitity npr_t: profiles with any valid values nprf_t = total number of valid profiles npg_i: protein groups per experiment intersection npgf_i = groups with valid profiles per experiment intersection npr_i: profiles with any valid values in the intersection nprf_i = total number of valid profiles in the intersection npr_t_dc: profiles, % values != nan nprf_t_dc = profiles, total, filtered, % values != nan npr_i_dc: profiles, intersection, % values != nan nprf_i_dc = profiles, intersection, filtered, % values != nan df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ... npg_f = protein groups, per fraction or npgf_f = protein groups, filtered, per fraction df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ... npg_f_dc = protein groups, per fraction, % values != nan or npgf_f_dc = protein groups, filtered, per fraction, % values != nan """ if self.acquisition == "SILAC - MQ": df_index = self.df_index["Ratio H/L"] df_01_stacked = self.df_01_stacked["normalized profile"] elif self.acquisition.startswith("LFQ"): df_index = self.df_index["LFQ intensity"] df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan) elif self.acquisition == "Custom": df_index = self.df_index["normalized profile"] df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan) #unfiltered npg_t = df_index.shape[0] df_index_MapStacked = df_index.stack("Map") npr_t = df_index_MapStacked.shape[0]/len(self.map_names) npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape) #filtered npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0] df_01_MapStacked = df_01_stacked.unstack("Fraction") nprf_t = df_01_MapStacked.shape[0]/len(self.map_names) nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape) #unfiltered intersection try: df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names)) except: df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names)) npr_i = df_index_intersection.shape[0]/len(self.map_names) npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape) npg_i = df_index_intersection.unstack("Map").shape[0] #filtered intersection try: df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names)) except: df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names)) nprf_i = df_01_intersection.shape[0]/len(self.map_names) nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape) npgf_i = df_01_intersection.unstack("Map").shape[0] # summarize in dataframe and save to attribute df_quantity_pr_pg = pd.DataFrame( { "filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")), "type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")), "number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")), "number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")), "data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))}) self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index() self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json() #additional depth assessment per fraction dict_npgf = {} dict_npg = {} list_npg_dc = [] list_npgf_dc = [] for df_intersection in [df_index_intersection, df_01_intersection]: for fraction in self.fractions: df_intersection_frac = df_intersection[fraction] npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac) npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts() if fraction not in dict_npg.keys(): dict_npg[fraction] = npgF_f list_npg_dc.append(npgF_f_dc) else: dict_npgf[fraction] = npgF_f list_npgf_dc.append(npgF_f_dc) df_npg = pd.DataFrame(dict_npg) df_npg.index.name = "Protein Groups present in:" df_npg.rename_axis("Fraction", axis=1, inplace=True) df_npg = df_npg.stack("Fraction").reset_index() df_npg = df_npg.rename({0: "Protein Groups"}, axis=1) df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys) df_npgf = pd.DataFrame(dict_npgf) df_npgf.index.name = "Protein Groups present in:" df_npgf.rename_axis("Fraction", axis=1, inplace=True) df_npgf = df_npgf.stack("Fraction").reset_index() df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1) df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys) max_df_npg = df_npg["Protein Groups present in:"].max() min_df_npg = df_npg["Protein Groups present in:"].min() rename_numOFnans = {} for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)): if y == 1: rename_numOFnans[x] = "{} Map".format(y) elif y == 0: rename_numOFnans[x] = "PG not identified".format(y) else: rename_numOFnans[x] = "{} Maps".format(y) for keys in rename_numOFnans.keys(): df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys] df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys] # summarize in dataframe and save to attributes self.df_npg_dc = pd.DataFrame( { "Fraction" : pd.Series(self.fractions), "Data completeness before filtering": pd.Series(list_npg_dc), "Data completeness after filtering": pd.Series(list_npgf_dc), }) self.df_npg = df_npg self.df_npgf = df_npgf def plot_quantity_profiles_proteinGroups(self): """ Args: self: df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above Returns: """ df_quantity_pr_pg = self.df_quantity_pr_pg layout = go.Layout(barmode="overlay", xaxis_tickangle=90, autosize=False, width=300, height=500, xaxis=go.layout.XAxis(linecolor="black", linewidth=1, #title="Map", mirror=True), yaxis=go.layout.YAxis(linecolor="black", linewidth=1, mirror=True), template="simple_white") fig_npg = go.Figure() for t in df_quantity_pr_pg["type"].unique(): plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t] fig_npg.add_trace(go.Bar( x=plot_df["filtering"], y=plot_df["number of protein groups"], name=t)) fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups")) fig_npr = go.Figure() for t in df_quantity_pr_pg["type"].unique(): plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t] fig_npr.add_trace(go.Bar( x=plot_df["filtering"], y=plot_df["number of profiles"], name=t)) fig_npr.update_layout(layout, title="Number of Profiles") df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering") fig_npr_dc = go.Figure() for t in df_quantity_pr_pg["filtering"].unique(): plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t] fig_npr_dc.add_trace(go.Bar( x=plot_df["type"], y=plot_df["data completeness of profiles"], name=t)) fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness")) #fig_npr_dc.update_xaxes(tickangle=30) fig_npg_F = px.bar(self.df_npg, x="Fraction", y="Protein Groups", color="Protein Groups present in:", template="simple_white", title = "Protein groups per fraction - before filtering", width=500) fig_npgf_F = px.bar(self.df_npgf, x="Fraction", y="Protein Groups", color="Protein Groups present in:", template="simple_white", title = "Protein groups per fraction - after filtering", width=500) fig_npg_F_dc = go.Figure() for data_type in ["Data completeness after filtering", "Data completeness before filtering"]: fig_npg_F_dc.add_trace(go.Bar( x=self.df_npg_dc["Fraction"], y=self.df_npg_dc[data_type], name=data_type)) fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600) return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc def perform_pca(self): """ PCA will be performed, using logarithmized data. Args: self: markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...} "V-type proton ATP df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity" and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored as single level indices df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now Returns: self: df_pca: df, PCA was performed, while keeping the information of the Maps columns: "PC1", "PC2", "PC3" index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment" df_pca_combined: df, PCA was performed across the Maps columns: "PC1", "PC2", "PC3" index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment" df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent throughout all maps / coverage filtering. """ markerproteins = self.markerproteins if self.acquisition == "SILAC - MQ": df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna() df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna() elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom": df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna() df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna() pca = PCA(n_components=3) # df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3" df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked)) df_pca.columns = ["PC1", "PC2", "PC3"] df_pca.index = df_01orlog_fracunstacked.index self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"]) # df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3" df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked)) df_pca_combined.columns = ["PC1", "PC2", "PC3"] df_pca_combined.index = df_01orlog_MapFracUnstacked.index self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"]) map_names = self.map_names df_pca_all_marker_cluster_maps = pd.DataFrame() df_pca_filtered = df_pca.unstack("Map").dropna() for clusters in markerproteins: for marker in markerproteins[clusters]: try: plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False) except KeyError: continue df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append( plot_try_pca) if len(df_pca_all_marker_cluster_maps) == 0: df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map") else: df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map") self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"]) def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False): """" PCA plot will be generated Args: self: df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment", Returns: pca_figure: global PCA plot """ if collapse_maps == False: df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index() else: df_global_pca = self.df_pca_combined.reset_index() for i in self.markerproteins[cluster_of_interest]: df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection" compartments = self.df_organellarMarkerSet["Compartment"].unique() compartment_color = dict(zip(compartments, self.css_color)) compartment_color["Selection"] = "black" compartment_color["undefined"] = "lightgrey" fig_global_pca = px.scatter(data_frame=df_global_pca, x=x_PCA, y=y_PCA, color="Compartment", color_discrete_map=compartment_color, title= "Protein subcellular localization by PCA for {}".format(map_of_interest) if collapse_maps == False else "Protein subcellular localization by PCA of combined maps", hover_data=["Protein IDs", "Gene names", "Compartment"], template="simple_white", opacity=0.9 ) return fig_global_pca def plot_cluster_pca(self, cluster_of_interest="Proteasome"): """ PCA plot will be generated Args: self: markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...} df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent throughout all maps / coverage filtering. Returns: pca_figure: PCA plot, for one protein cluster all maps are plotted """ df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps map_names = self.map_names markerproteins = self.markerproteins try: for maps in map_names: df_setofproteins_PCA = pd.DataFrame() for marker in markerproteins[cluster_of_interest]: try: plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"], drop_level=False) except KeyError: continue df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca) df_setofproteins_PCA.reset_index(inplace=True) if maps == map_names[0]: pca_figure = go.Figure( data=[go.Scatter3d(x=df_setofproteins_PCA.PC1, y=df_setofproteins_PCA.PC2, z=df_setofproteins_PCA.PC3, hovertext=df_setofproteins_PCA["Gene names"], mode="markers", name=maps )]) else: pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1, y=df_setofproteins_PCA.PC2, z=df_setofproteins_PCA.PC3, hovertext=df_setofproteins_PCA["Gene names"], mode="markers", name=maps )) pca_figure.update_layout(autosize=False, width=500, height=500, title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest), template="simple_white") return pca_figure except: return "This protein cluster was not quantified" def calc_biological_precision(self): """ This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances. TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure. TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions. Args: self attributes: markerproteins: dict, contains marker protein assignments df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered Returns: df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data self attributes: df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked df_allclusters_01_unfiltered_mapfracunstacked df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview) df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked genenames_sortedout_list = list of gene names with incomplete coverage analysis_summary_dict entries: "Manhattan distances" = df_distance_noindex "Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted """ df_alldistances_individual_mapfracunstacked = pd.DataFrame() df_alldistances_aggregated_mapunstacked = pd.DataFrame() df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame() for cluster in self.markerproteins.keys(): # collect data irrespective of coverage df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster) df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered) # filter for coverage and calculate distances df_cluster = df_cluster_unfiltered.dropna() if len(df_cluster) == 0: continue df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster) df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual) df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated) if len(df_alldistances_individual_mapfracunstacked) == 0: self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"]) self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"]) self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"]) self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"]) self.genenames_sortedout_list = "No clusters found" return pd.DataFrame(), pd.DataFrame(), pd.DataFrame() else: df_alldistances_aggregated_mapunstacked.columns.name = "Map" ## Get compatibility with plotting functions, by mimicking assignment of old functions: # old output of distance_calculation self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1) self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json() # old output of multiple_iterations # self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by: self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked # kept for testing of quantification table: self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map") # same as before, but now already abs self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map") df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction") df_dist_to_median.name = "distance" df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index)) self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json() self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names") if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")] return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked def get_marker_proteins_unfiltered(self, cluster): """ This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage. Args: cluster: str, cluster name, should be one of self.markerproteins.keys() self attributes: df_01_stacked: df, contains the fully stacked 0-1 normalized data markerproteins: dict, contains marker protein assignments Returns: df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked. self attribtues: None """ df_in = self.df_01_stacked["normalized profile"].unstack("Fraction") markers = self.markerproteins[cluster] # retrieve marker proteins df_cluster_unfiltered = pd.DataFrame() for marker in markers: try: df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False) except: continue df_cluster_unfiltered = df_cluster_unfiltered.append(df_p) if len(df_cluster_unfiltered) == 0: return df_cluster_unfiltered # Unstack maps and add Cluster to index df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map") df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True) return df_cluster_unfiltered def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"): """ Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster. Per default this is the manhattan distance to the median profile. Args: df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format. complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median. distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError. self attributes: None Returns: df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances. df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference. self attribtues: None """ df_distances_aggregated = pd.DataFrame() ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1) # loop over maps maps = set(df_cluster.columns.get_level_values("Map")) for m in maps: if distance_measure == "manhattan": d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1)) else: raise ValueError(distance_measure) d_m = pd.DataFrame(d_m, columns=[m], index=df_cluster.index) df_distances_aggregated = pd.concat([df_distances_aggregated, d_m], axis=1) df_distances_aggregated.columns.set_names(names="Map", inplace=True) return df_distances_aggregated, df_distances_individual def profiles_plot(self, map_of_interest="Map1", cluster_of_interest="Proteasome"): """ The function allows the plotting of filtered and normalized spatial proteomic data using plotly.express. The median profile is also calculated based on the overlapping proteins. Profiles of proteins that are not quantified in all maps are dashed. Args: map_of_interest: str, must be in self.map_names cluster_of_interest: str, must be in self.markerproteins.keys() self attribtues: df_allclusters_01_unfiltered_mapfracunstacked: df, contains 0-1 normalized profiles for all markerproteins detected in any map Returns: abundance_profiles_and_median_figure: plotly line plot, displaying the relative abundance profiles. """ try: df_setofproteins = self.df_allclusters_01_unfiltered_mapfracunstacked.xs(cluster_of_interest, level="Cluster", axis=0) df_setofproteins_median = df_setofproteins.dropna().xs(map_of_interest, level="Map", axis=1).median(axis=0) # fractions get sorted df_setofproteins = df_setofproteins.xs(map_of_interest, level="Map", axis=1).stack("Fraction") df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index)) df_setofproteins.name = "normalized profile" # make it available for plotting df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index)) df_setofproteins = df_setofproteins.reset_index() abundance_profiles_figure = px.line(df_setofproteins, x="Fraction", y="normalized profile", color="Gene names", line_group="Sequence" if "Sequence" in df_setofproteins.columns else "Gene names", template="simple_white", title="Relative abundance profile for {} of <br>the protein cluster: {}".format(map_of_interest, cluster_of_interest) ) df_setofproteins_median.name = "normalized profile" #fractions get sorted df_setofproteins_median = df_setofproteins_median.reindex(index=natsort.natsorted(df_setofproteins_median.index)) # make it available for plotting df_setofproteins_median = df_setofproteins_median.reset_index() df_setofproteins_median.insert(0, "Gene names", np.repeat("Median profile", len(df_setofproteins_median))) abundance_profiles_and_median_figure = abundance_profiles_figure.add_scatter(x=df_setofproteins_median["Fraction"], y=df_setofproteins_median["normalized profile"], name="Median profile" ) # dash lines for proteins that have insufficient coverage across maps abundance_profiles_and_median_figure.for_each_trace(lambda x: x.update(line={"dash":"dash"}), selector=lambda x: x.name in self.genenames_sortedout_list) return abundance_profiles_and_median_figure except: return "This protein cluster was not quantified" def quantification_overview(self, cluster_of_interest="Proteasome"): """ Args: self.df_allclusters_clusterdist_fracunstacked_unfiltered columns: 01K, 03K, 06K, 12K, 24K, 80K index: Gene names, Protein IDs, C-Score, Q-value, Map, Compartment, Cluster Returns: df """ df_quantification_overview = self.df_allclusters_clusterdist_fracunstacked_unfiltered.xs(cluster_of_interest, level="Cluster", axis=0)\ [self.fractions[0]].unstack("Map") if "Sequence" in df_quantification_overview.index.names: df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i in ["Sequence","Gene names"]]) else: df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i=="Gene names"]) df_quantification_overview = df_quantification_overview.notnull().replace({True: "x", False: "-"}) return df_quantification_overview def distance_boxplot(self, cluster_of_interest="Proteasome"): """ A box plot for 1 desired cluster, and across all maps is generated displaying the distribution of the e.g. Manhattan distance. Args: self: df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored map_names: individual map names are stored as an index Returns: distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown """ map_names = self.map_names df_distance_noindex = self.df_distance_noindex # "Gene names", "Map", "Cluster" and transferred into the index df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"]) if "Sequence" in df_distance_map_cluster_gene_in_index.columns: df_distance_map_cluster_gene_in_index.set_index("Sequence", append=True, inplace=True) df_cluster_xmaps_distance_with_index = pd.DataFrame() try: # for each individual map and a defined cluster data will be extracted from the dataframe # "df_distance_map_cluster_gene_in_index" and appended to the new dataframe df_cluster_xmaps_distance_with_index for maps in map_names: plot_try = df_distance_map_cluster_gene_in_index.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False) df_cluster_xmaps_distance_with_index = df_cluster_xmaps_distance_with_index.append(plot_try) df_cluster_xmaps_distance_with_index["Combined Maps"] = "Combined Maps" #number of proteins within one cluster self.proteins_quantified_across_all_maps = df_cluster_xmaps_distance_with_index.unstack("Map").shape[0] # index will be reset, required by px.box df_cluster_xmaps_distance = df_cluster_xmaps_distance_with_index.reset_index() distance_boxplot_figure = go.Figure() distance_boxplot_figure.add_trace(go.Box( x=df_cluster_xmaps_distance["Map"], y=df_cluster_xmaps_distance["distance"], boxpoints="all", whiskerwidth=0.2, marker_size=2, hovertext=df_cluster_xmaps_distance["Gene names"] )) distance_boxplot_figure.add_trace(go.Box( x=df_cluster_xmaps_distance["Combined Maps"], y=df_cluster_xmaps_distance["distance"], boxpoints="all", whiskerwidth=0.2, marker_size=2, hovertext=df_cluster_xmaps_distance["Gene names"] )) distance_boxplot_figure.update_layout( title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest), autosize=False, showlegend=False, width=500, height=500, # black box around the graph xaxis=go.layout.XAxis(linecolor="black", linewidth=1, title="Map", mirror=True), yaxis=go.layout.YAxis(linecolor="black", linewidth=1, title="distance", mirror=True), template="simple_white" ) return distance_boxplot_figure except: self.cache_cluster_quantified = False def distance_to_median_boxplot(self, cluster_of_interest="Proteasome"): """ A box plot for 1 desired cluster, across all maps and fractions is generated displaying the distribution of the distance to the median. For each fraction, one box plot will be displayed. Args: self: df_allclusters_clusterdist_fracunstacked, dataframe with single level column, stored as attribute (self.allclusters_clusterdist_fracunstacked), in which "Fraction" is unstacked. It contains only the normalized data of individual protein clusters substracted by the median of the respective protein cluster for each fraction. map_names: individual map names are stored as an index Returns: distance_to_median_boxplot_figure: Box plot. Along the x-axis, the maps are shown, along the y-axis the distances is plotted """ df_boxplot_manymaps = pd.DataFrame() try: # for each individual map and a defined cluster data will be extracted from the dataframe # "df_allclusters_clusterdist_fracunstacked" and appended to the new dataframe df_boxplot_manymaps for maps in self.map_names: plot_try = self.df_allclusters_clusterdist_fracunstacked.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False) df_boxplot_manymaps = df_boxplot_manymaps.append(plot_try) self.df_boxplot_manymaps = df_boxplot_manymaps # index will be reset, required by px.violin df_boxplot_manymaps = abs(df_boxplot_manymaps.stack("Fraction")) df_boxplot_manymaps.name = "distance" df_boxplot_manymaps = df_boxplot_manymaps.reindex(index=natsort.natsorted(df_boxplot_manymaps.index)) df_boxplot_manymaps = df_boxplot_manymaps.reset_index() # box plot will be generated, every fraction will be displayed in a single plot distance_to_median_boxplot_figure = px.box(df_boxplot_manymaps, x="Map", y="distance", facet_col="Fraction", facet_col_wrap=2, boxmode="overlay", height=900, width=700, points="all", hover_name="Gene names", template="simple_white", title="Distribution of the distance to the median for <br>the protein cluster: {}".format(cluster_of_interest)) return distance_to_median_boxplot_figure except: return "This protein cluster was not quantified" def dynamic_range(self): """ Dynamic range of each individual protein clusters (of the median profile) across all maps is calculated" Args: self: markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...} df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns "MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively Returns: fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster" """ df_setofproteins_allMaps = pd.DataFrame() df_dynamicRange = pd.DataFrame() df_01_stacked = self.df_01_stacked for clusters in self.markerproteins: try: df_setofproteins_allMaps = pd.DataFrame() for marker in self.markerproteins[clusters]: try: df_marker_allMaps = df_01_stacked.xs(marker, level="Gene names", drop_level=False) except KeyError: continue df_setofproteins_allMaps = df_setofproteins_allMaps.append(df_marker_allMaps) df_setofproteins_allMaps_median = df_setofproteins_allMaps["normalized profile"].unstack("Fraction").median() df_dynamicRange = df_dynamicRange.append(pd.DataFrame(np.array([[max(df_setofproteins_allMaps_median), min(df_setofproteins_allMaps_median), max(df_setofproteins_allMaps_median)-min(df_setofproteins_allMaps_median), clusters]]), columns=["Max", "Min", "Dynamic Range", "Cluster"]), ignore_index=True) except: continue self.analysis_summary_dict["Dynamic Range"] = df_dynamicRange.to_json() def plot_dynamic_range(self): """ Dynamic range of each individual protein clusters (of the median profile) across all maps is displayed" Args: self: markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...} df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns "MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively Returns: fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster" """ fig_dynamicRange = px.bar(pd.read_json(self.analysis_summary_dict["Dynamic Range"]), x="Cluster", y="Dynamic Range", base="Min", template="simple_white", width=1000, height=500).update_xaxes(categoryorder="total ascending") return fig_dynamicRange def results_overview_table(self): """ Dataframe will be created, that provides information about "range", "mean" and "standardeviation", given as the column names, based on the data given in df_distance_noindex Args: self: df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...} """ df_distance_noindex = self.df_distance_noindex df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"]) map_names = self.map_names df_overview = pd.DataFrame() for clusters in self.markerproteins: #if a certain cluster is not available in the dataset at all try: for maps in map_names: df_dist_map_cluster = df_distance_map_cluster_gene_in_index.xs((clusters, maps), level=["Cluster", "Map"], drop_level=False) statistic_table = {"range": (df_dist_map_cluster["distance"].max(axis=0)) - (df_dist_map_cluster["distance"].min(axis=0)), "median": df_dist_map_cluster["distance"].median(axis=0), "standardeviation": df_dist_map_cluster["distance"].std(axis=0), "Cluster": clusters, "Map": maps } statistic_series = pd.Series(data=statistic_table) df_statistic_table_individual_cluster = pd.DataFrame(statistic_series).T df_overview = df_overview.append(df_statistic_table_individual_cluster) df_dist_cluster = df_distance_map_cluster_gene_in_index.xs(clusters, level="Cluster") statistic_table_combined = { "range": (df_dist_cluster["distance"].max(axis=0)) - (df_dist_cluster["distance"].min(axis=0)), "median": df_dist_cluster["distance"].median(axis=0), "standardeviation": df_dist_cluster["distance"].std(axis=0), "Cluster": clusters, "Map": "combined maps" } statistic_series_combined = pd.Series(data=statistic_table_combined) df_statistic_table_individual_cluster = pd.DataFrame(statistic_series_combined).T df_overview = df_overview.append(df_statistic_table_individual_cluster) except: continue try: df_overview.set_index(["Cluster", "Map"], inplace=True) df_overview.sort_index(axis=0, level=0, inplace=True) except: df_overview = pd.DataFrame() self.analysis_summary_dict["Overview table"] = df_overview.reset_index().to_json() self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy() #self.analysis_summary_dict.clear() return df_overview def reframe_df_01ORlog_for_Perseus(self, df_01ORlog): """" To be available for Perseus df_01_stacked needs to be reframed. Args: df_01ORlog: df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored map_names: individual map names are stored as an index Returns: df_01ORlog_svm: LFQ: columns: "MS/MS count_Map1_01K", "normalized profile_Map1_01K" index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Compartment" SILAC: columns: e.g. "Ratio H/L count_MAP2_80K", "Ratio H/L variability [%]_MAP1_03K", "normalized profile_MAP5_03K" index: "Q-value", "Score", "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "id", "Compartment" """ df_01ORlog_svm = df_01ORlog.copy() #df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1) index_ExpMap = df_01ORlog_svm.index.get_level_values("Map")+"_"+df_01ORlog_svm.index.get_level_values("Fraction") index_ExpMap.name = "Map_Frac" df_01ORlog_svm.set_index(index_ExpMap, append=True, inplace=True) df_01ORlog_svm.index = df_01ORlog_svm.index.droplevel(["Map", "Fraction"]) df_01ORlog_svm = df_01ORlog_svm.unstack("Map_Frac") #df_01ORlog_svm = df_01ORlog_svm.dropna(axis=0, subset=df_01ORlog_svm.loc[[], ["normalized profile"]].columns) df_01ORlog_svm.columns = ["_".join(col) for col in df_01ORlog_svm.columns.values] df_01ORlog_svm.rename(index={"undefined" : np.nan}, level="Compartment", inplace=True) return df_01ORlog_svm class SpatialDataSetComparison: analysed_datasets_dict = SpatialDataSet.analysed_datasets_dict css_color = SpatialDataSet.css_color cache_stored_SVM = True def __init__(self, ref_exp="Exp2", **kwargs): #clusters_for_ranking=["Proteasome", "Lysosome"] #self.clusters_for_ranking = clusters_for_ranking self.ref_exp = ref_exp self.json_dict = {} #self.fractions, self.map_names = [], [] #self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame() #collapse_maps,collapse_cluster, cluster_of_interest_comparison, multi_choice, multi_choice_venn, x_PCA_comp, y_PCA_comp #if "organism" not in kwargs.keys(): # self.markerproteins = self.markerproteins_set["Human - Swissprot"] #else: # assert kwargs["organism"] in self.markerproteins_set.keys() # self.markerproteins = self.markerproteins_set[kwargs["organism"]] # del kwargs["organism"] #self.unique_proteins_total = unique_proteins_total self.exp_names, self.exp_map_names = [], [] self.df_01_filtered_combined, self.df_distance_comp = pd.DataFrame(), pd.DataFrame() self.df_quantity_pr_pg_combined, self.df_dynamicRange_combined = pd.DataFrame(), pd.DataFrame() def read_jsonFile(self): #, content=None """ Read-out of the JSON-file and currently analysed dataset, stored in "analysed_datasets_dict". It wil create df_distances_combined ("Gene names", "Cluster" are stacked; "Map" and Experiment names (are not stored in an additional level name) are unstacked. Layout will be adjusted for distance-plotting. Args: self.json_dict: contains the dictionary stored in AnalysedDatasets.json {"Experiment name" : { "changes in shape after filtering" : { ##SILAC## "Original size" : tuple, "Shape after categorical filtering" : tuple, "Shape after Ratio H/L count (>= 3)/var (count>=2, var<30) filtering" : tuple, "Shape after filtering for complete profiles" : tuple, ##LFQ/spectronaut## "Original size" : tuple, "Shape after MS/MS value filtering" : tuple, "Shape after consecutive value filtering" : tuple, }, "quantity: profiles/protein groups" : df - number of protein groups | number of profiles | data completeness of profiles "Unique Proteins": list, "Analysis parameters" : { "acquisition" : str, "filename" : str, ##SILAC## "Ratio H/L count 1 (>= X)" : int, "Ratio H/L count 2 (>=Y, var<Z)" : int, "Ratio variability (<Z, count>=Y)" : int, ##LFQ/spectronaut## "consecutive data points" : int, "summed MS/MS counts" : int, }, "0/1 normalized data - mean" : df - mean of all datapoints, "0/1 normalized data" : df - individual cluster, "Distances to the median profile" : df - individual cluster, "Manhattan distances" : df - individual cluster, "Dynamic Range": df - individual cluster, "Overview table" : df - individual cluster, ##if user perform the Misclassification Analysis befor downloading the dictionary AnalysedDatasets.json## {"Misclassification Analysis": { "True: ER" : { "Recall": int, "FDR": int, "Precision": int, "F1": int } "True: NPC" : {...} ... "Summary": { "Total - Recall": int, "Membrane - Recall" : int, "Av per organelle - Recall": int, "Median per organelle - Recall" : int, "Av precision organelles" : int, "Av F1 organelles" : int, "Av F1 all clusters" : int, } } } } Returns: self: df_01_filtered_combined: df, "Fraction" is unstacked; "Experiment", "Gene names", "Map", "Exp_Map" are stacked df_distance_comp: df, no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map", "Exp_Map", "distance" "distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles", "data completeness of profiles", "Experiment" df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment" unique_proteins_total: dict, key: Experiment name, value: unique protein (groups) exp_map_names: list of unique Exp_Map - fusions e.g. LFQ_Map1 exp_names: list of unique Experiment names - e.g. LFQ """ json_dict = self.json_dict #add experiments that are not stored in AnalysedDAtasets.json for comparison #try: #if len(SpatialDataSet.analysed_datasets_dict.keys())>=1: # json_dict.update(SpatialDataSet.analysed_datasets_dict) ##except: #else: # pass self.analysis_parameters_total = {} unique_proteins_total = {} df_01_combined = pd.DataFrame() for exp_name in json_dict.keys(): for data_type in json_dict[exp_name].keys(): if data_type == "0/1 normalized data": df_01_toadd = pd.read_json(json_dict[exp_name][data_type]) df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"], inplace=True) if "Sequence" in df_01_toadd.columns: df_01_toadd.set_index(["Sequence"], inplace=True, append=True) df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")], inplace=True) df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"]) df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True) df_01_toadd.set_index(pd.Series(["?".join([str(i) for i in el]) for el in df_01_toadd.index.values], name="join"), append=True, inplace=True) if len(df_01_combined) == 0: df_01_combined = df_01_toadd.copy() else: df_01_combined = pd.concat([df_01_combined,df_01_toadd], sort=False, axis=1) elif data_type == "quantity: profiles/protein groups" and exp_name == list(json_dict.keys())[0]: df_quantity_pr_pg_combined = pd.read_json(json_dict[exp_name][data_type]) df_quantity_pr_pg_combined["Experiment"] = exp_name elif data_type == "quantity: profiles/protein groups" and exp_name != list(json_dict.keys())[0]: df_quantity_pr_pg_toadd = pd.read_json(json_dict[exp_name][data_type]) df_quantity_pr_pg_toadd["Experiment"] = exp_name df_quantity_pr_pg_combined = pd.concat([df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd]) elif data_type == "Manhattan distances" and exp_name == list(json_dict.keys())[0]: df_distances_combined = pd.read_json(json_dict[exp_name][data_type]) df_distances_combined = df_distances_combined.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy() if "Sequence" in df_distances_combined.columns: df_distances_combined.set_index(["Sequence"], inplace=True, append=True) df_distances_combined = df_distances_combined[["distance"]].unstack(["Map"]) df_distances_combined.rename(columns = {"distance":exp_name}, inplace=True) elif data_type == "Manhattan distances" and exp_name != list(json_dict.keys())[0]: df_distances_toadd = pd.read_json(json_dict[exp_name][data_type]) df_distances_toadd = df_distances_toadd.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy() if "Sequence" in df_distances_toadd.columns: df_distances_toadd.set_index(["Sequence"], inplace=True, append=True) df_distances_toadd = df_distances_toadd[["distance"]].unstack(["Map"]) df_distances_toadd.rename(columns = {"distance":exp_name}, inplace=True) df_distances_combined = pd.concat([df_distances_combined, df_distances_toadd], axis=1)#, join="inner") elif data_type == "Dynamic Range" and exp_name == list(json_dict.keys())[0]: df_dynamicRange_combined = pd.read_json(json_dict[exp_name][data_type]) df_dynamicRange_combined["Experiment"] = exp_name elif data_type == "Dynamic Range" and exp_name != list(json_dict.keys())[0]: df_dynamicRange_toadd = pd.read_json(json_dict[exp_name][data_type]) df_dynamicRange_toadd["Experiment"] = exp_name df_dynamicRange_combined = pd.concat([df_dynamicRange_combined, df_dynamicRange_toadd]) # if data_type == "Overview table" and exp_name == list(json_dict.keys())[0]: # #convert into dataframe # df_distanceOverview_combined = pd.read_json(json_dict[exp_name][data_type]) # df_distanceOverview_combined["Experiment"] = exp_name # df_distanceOverview_combined = df_distanceOverview_combined.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"]) # # elif data_type == "Overview table" and exp_name != list(json_dict.keys())[0]: # df_distanceOverview_toadd = pd.read_json(json_dict[exp_name][data_type]) # df_distanceOverview_toadd["Experiment"] = exp_name # df_distanceOverview_toadd = df_distanceOverview_toadd.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"]) # #dataframes will be concatenated, only proteins/Profiles that are in both df will be retained # df_distanceOverview_combined = pd.concat([df_distanceOverview_combined, df_distanceOverview_toadd]) elif data_type == "Unique Proteins": unique_proteins_total[exp_name] = json_dict[exp_name][data_type] elif data_type == "Analysis parameters": self.analysis_parameters_total[exp_name] = json_dict[exp_name][data_type] #try: # for paramters in json_dict[exp_name][data_type].keys(): # if paramters=="acquisition": # acquisition_loaded.append(json_dict[exp_name][data_type][paramters]) # #elif parameters=="Non valid profiles": #except: # continue # df_01_combined = df_01_combined.droplevel("join", axis=0) #filter for consistently quantified proteins (they have to be in all fractions and all maps) #df_01_filtered_combined = df_01_mean_combined.dropna() df_01_combined.columns.names = ["Experiment", "Map", "Fraction"] #reframe it to make it ready for PCA df_01_filtered_combined = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0) #df_01_filtered_combined = df_01_combined.stack(["Experiment"]).dropna(axis=1) df_01_filtered_combined = df_01_filtered_combined.div(df_01_filtered_combined.sum(axis=1), axis=0) #df_01_filtered_combined = df_01_combined.copy() #df_01_filtered_combined.columns.names = ["Experiment", "Fraction", "Map"] ## Replace protein IDs by the unifying protein ID across experiments #comparison_IDs = pd.Series([split_ids_uniprot(el) for el in df_01_filtered_combined.index.get_level_values("Protein IDs")], # name="Protein IDs") #df_01_filtered_combined.index = df_01_filtered_combined.index.droplevel("Protein IDs") #df_01_filtered_combined.set_index(comparison_IDs, append=True, inplace=True) ##reframe it to make it ready for PCA | dropna: to make sure, that you do consider only fractions that are in all experiments #df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1) index_ExpMap = df_01_filtered_combined.index.get_level_values("Experiment")+"_"+df_01_filtered_combined.index.get_level_values("Map") index_ExpMap.name = "Exp_Map" df_01_filtered_combined.set_index(index_ExpMap, append=True, inplace=True) df_distances_combined.columns.names = ["Experiment", "Map"] series = df_distances_combined.stack(["Experiment", "Map"]) series.name = "distance" df_distance_comp = series.to_frame() #fuse Experiment and Map into one column = "Exp_Map" index_dist_ExpMap = df_distance_comp.index.get_level_values("Experiment")+"_"+df_distance_comp.index.get_level_values("Map") index_dist_ExpMap.name = "Exp_Map" df_distance_comp.set_index(index_dist_ExpMap, append=True, inplace=True) #new #self.df_distance_comp2 = df_distance_comp.copy() df_distance_comp.reset_index(level=['Protein IDs'], inplace=True) df_distance_comp["Protein IDs"] = df_distance_comp["Protein IDs"].str.split(";", expand=True)[0] df_distance_comp = df_distance_comp.set_index("Protein IDs", append=True).unstack(["Experiment", "Exp_Map", "Map"]).dropna().stack(["Experiment", "Exp_Map", "Map"]).reset_index() #df_distance_comp.reset_index(inplace=True) self.unique_proteins_total = unique_proteins_total self.exp_names = list(df_01_filtered_combined.index.get_level_values("Experiment").unique()) self.exp_map_names = list(index_dist_ExpMap.unique()) self.df_01_filtered_combined = df_01_filtered_combined #self.df_01_mean_filtered_combined = df_01_mean_filtered_combined self.df_quantity_pr_pg_combined = df_quantity_pr_pg_combined self.df_dynamicRange_combined = df_dynamicRange_combined self.df_distance_comp = df_distance_comp try: organism = json_dict[list(json_dict.keys())[0]]["Analysis parameters"]['organism'] except: organism = "Homo sapiens - Uniprot" marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(organism))) self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])} self.clusters_for_ranking = self.markerproteins.keys() def perform_pca_comparison(self): """ PCA will be performed, using logarithmized data. Args: self: df_01_filtered_combined: df, which contains 0/1 normalized data for each map - for all experiments columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K" index: "Protein IDs", "Gene names", "Compartment", "Experiment", "Map", "Exp_Map" df_01_mean_filtered_combined: df, which contains (global) 0/1 normalized data across all maps (mean) - for all experiments and for all protein IDs, that are consistent throughout all experiments columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K" index: "Gene names", "Protein IDs", "Compartment", "Experiment" Returns: self: df_pca_for_plotting: PCA processed dataframe index: "Experiment", "Gene names", "Map", "Exp_Map" columns: "PC1", "PC2", "PC3" contains only marker genes, that are consistent throughout all maps / experiments df_global_pca: PCA processed dataframe index: "Gene names", "Protein IDs", "Compartment", "Experiment", columns: "PC1", "PC2", "PC3" contains all protein IDs, that are consistent throughout all experiments """ markerproteins = self.markerproteins.copy() #df_01_filtered_combined = self.df_01_filtered_combined #df_01_filtered_combined = self.df_01_filtered_combined df_mean = pd.DataFrame() for exp in self.exp_names: df_exp = self.df_01_filtered_combined.stack("Fraction").unstack(["Experiment", "Map","Exp_Map"])[exp].mean(axis=1).to_frame(name=exp) df_mean = pd.concat([df_mean, df_exp], axis=1) df_mean = df_mean.rename_axis("Experiment", axis="columns").stack("Experiment").unstack("Fraction") pca = PCA(n_components=3) df_pca = pd.DataFrame(pca.fit_transform(df_mean)) df_pca.columns = ["PC1", "PC2", "PC3"] df_pca.index = df_mean.index try: markerproteins["PSMA subunits"] = [item for sublist in [re.findall("PSMA.*",p) for p in markerproteins["Proteasome"]] for item in sublist] markerproteins["PSMB subunits"] = [item for sublist in [re.findall("PSMB.*",p) for p in markerproteins["Proteasome"]] for item in sublist] del markerproteins["Proteasome"] except: pass ###only one df, make annotation at that time df_cluster = pd.DataFrame([(k, i) for k, l in markerproteins.items() for i in l], columns=["Cluster", "Gene names"]) df_global_pca = df_pca.reset_index().merge(df_cluster, how="left", on="Gene names") df_global_pca.Cluster.replace(np.NaN, "Undefined", inplace=True) self.markerproteins_splitProteasome = markerproteins self.df_pca = df_pca self.df_global_pca = df_global_pca def plot_pca_comparison(self, cluster_of_interest_comparison="Proteasome", multi_choice=["Exp1", "Exp2"]): """ A PCA plot for desired experiments (multi_choice) and 1 desired cluster is generated. Either the maps for every single experiment are displayed individually or in a combined manner Args: self: markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...} multi_choice: list of experiment names cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome") df_pca: PCA processed dataframe index: "Experiment", "Gene names", "Map", "Exp_Map" columns: "PC1", "PC2", "PC3" contains only marker genes, that are consistent throughout all maps / experiments Returns: pca_figure: PCA plot for a specified protein cluster. """ df_pca = self.df_pca.copy() markerproteins = self.markerproteins try: df_setofproteins_PCA = pd.DataFrame() for map_or_exp in multi_choice: for marker in markerproteins[cluster_of_interest_comparison]: try: plot_try_pca = df_pca.xs((marker, map_or_exp), level=["Gene names", "Experiment"], drop_level=False) except KeyError: continue df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca) df_setofproteins_PCA.reset_index(inplace=True) df_setofproteins_PCA = df_setofproteins_PCA.assign(Experiment_lexicographic_sort=pd.Categorical(df_setofproteins_PCA["Experiment"], categories=multi_choice, ordered=True)) df_setofproteins_PCA.sort_values("Experiment_lexicographic_sort", inplace=True) pca_figure = px.scatter_3d(df_setofproteins_PCA, x="PC1", y="PC2", z="PC3", color="Experiment", template="simple_white", hover_data=["Gene names"] ) pca_figure.update_layout(autosize=False, width=700, height=500, title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest_comparison), template="simple_white" ) return pca_figure except: return "This protein cluster was not identified in all experiments" def plot_global_pca_comparison(self, cluster_of_interest_comparison="Proteasome", x_PCA="PC1", y_PCA="PC3", markerset_or_cluster=False, multi_choice=["Exp1", "Exp2"]): """" PCA plot will be generated Args: self: df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index multi_choice: list of experiment names css_color: list of colors df_global_pca: PCA processed dataframe index: "Gene names", "Protein IDs", "Compartment", "Experiment", columns: "PC1", "PC2", "PC3" contains all protein IDs, that are consistent throughout all experiments Returns: pca_figure: global PCA plot, clusters based on the markerset based (df_organellarMarkerSet) are color coded. """ df_global_pca_exp = self.df_global_pca.loc[self.df_global_pca["Experiment"].isin(multi_choice)] df_global_pca_exp.reset_index(inplace=True) compartments = list(SpatialDataSet.df_organellarMarkerSet["Compartment"].unique()) compartment_color = dict(zip(compartments, self.css_color)) compartment_color["Selection"] = "black" compartment_color["undefined"] = "lightgrey" compartments.insert(0, "undefined") compartments.insert(len(compartments), "Selection") cluster = self.markerproteins_splitProteasome.keys() cluster_color = dict(zip(cluster, self.css_color)) cluster_color["Undefined"] = "lightgrey" if markerset_or_cluster == True: df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster!="Undefined"].sort_values(by="Cluster") df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster=="Undefined"].append(df_global_pca) else: for i in self.markerproteins[cluster_of_interest_comparison]: df_global_pca_exp.loc[df_global_pca_exp["Gene names"] == i, "Compartment"] = "Selection" df_global_pca = df_global_pca_exp.assign(Compartment_lexicographic_sort = pd.Categorical(df_global_pca_exp["Compartment"], categories=[x for x in compartments], ordered=True)) df_global_pca.sort_values(["Compartment_lexicographic_sort", "Experiment"], inplace=True) fig_global_pca = px.scatter(data_frame=df_global_pca, x=x_PCA, y=y_PCA, color="Compartment" if markerset_or_cluster == False else "Cluster", color_discrete_map=compartment_color if markerset_or_cluster == False else cluster_color, title="Protein subcellular localization by PCA", hover_data=["Protein IDs", "Gene names", "Compartment"], facet_col="Experiment", facet_col_wrap=2, opacity=0.9, template="simple_white" ) fig_global_pca.update_layout(autosize=False, width=1800 if markerset_or_cluster == False else 1600, height=400*(int(len(multi_choice) / 2) + (len(multi_choice) % 2 > 0)), template="simple_white" ) return fig_global_pca def get_marker_proteins(self, experiments, cluster): df_in = self.df_01_filtered_combined.copy() markers = self.markerproteins[cluster] # retrieve marker proteins df_cluster = pd.DataFrame() for marker in markers: try: df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False) except: continue df_cluster = df_cluster.append(df_p) if len(df_cluster) == 0: return df_cluster # filter for all selected experiments df_cluster = df_cluster.droplevel("Exp_Map", axis=0) df_cluster = df_cluster.unstack(["Experiment", "Map"]) if any([el not in df_cluster.columns.get_level_values("Experiment") for el in experiments]): return pd.DataFrame() drop_experiments = [el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments] if len(drop_experiments) > 0: df_cluster.drop([el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments], level="Experiment", axis=1, inplace=True) df_cluster.dropna(inplace=True) if len(df_cluster) == 0: return df_cluster df_cluster.set_index(pd.Index(np.repeat(cluster, len(df_cluster)), name="Cluster"), append=True, inplace=True) return df_cluster def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"): df_distances = pd.DataFrame() # loop over experiments experiments = set(df_cluster.columns.get_level_values("Experiment")) for exp in experiments: df_exp = df_cluster.xs(exp, level="Experiment", axis=1) ref_profile = pd.DataFrame(df_exp.apply(complex_profile, axis=0, result_type="expand")).T # loop over maps maps = set(df_exp.columns.get_level_values("Map")) for m in maps: if distance_measure == "manhattan": d_m = pw.manhattan_distances(df_exp.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1)) else: raise ValueError(distance_measure) d_m = pd.DataFrame(d_m, columns=[(exp, m)], index=df_exp.index) df_distances = pd.concat([df_distances, d_m], axis=1) df_distances.columns = pd.MultiIndex.from_tuples(df_distances.columns, names=["Experiment", "Map"]) return df_distances def calc_biological_precision(self, experiments=None, clusters=None): """ Method to calculate the distance table for assessing biological precision """ df_distances = pd.DataFrame() if experiments is None: experiments = self.exp_names if clusters is None: clusters = self.markerproteins.keys() for cluster in clusters: df_cluster = self.get_marker_proteins(experiments, cluster) if len(df_cluster) == 0: continue dists_cluster = self.calc_cluster_distances(df_cluster) df_distances = df_distances.append(dists_cluster) df_distances = df_distances.stack(["Experiment", "Map"]).reset_index()\ .sort_values(["Experiment","Gene names"]).rename({0: "distance"}, axis=1) df_distances.insert(0, "Exp_Map", ["_".join([e,m]) for e,m in zip(df_distances["Experiment"], df_distances["Map"])]) self.df_distance_comp = df_distances return df_distances def get_complex_coverage(self, min_n=5): full_coverage = {} for complx in self.markerproteins.keys(): df = self.get_marker_proteins(self.exp_names, complx) if len(df) >= min_n: full_coverage[complx] = len(df) partial_coverage = {} for exp in self.exp_names: for complx in self.markerproteins.keys(): if complx in full_coverage.keys(): continue df = self.get_marker_proteins([exp], complx) #print(df) if complx in partial_coverage.keys(): partial_coverage[complx].append(len(df)) else: partial_coverage[complx] = [len(df)] no_coverage = {} for k in partial_coverage.keys(): if all([el < min_n for el in partial_coverage[k]]): no_coverage[k] = partial_coverage[k] for k in no_coverage.keys(): del partial_coverage[k] self.coverage_lists = [full_coverage, partial_coverage, no_coverage] return full_coverage, partial_coverage, no_coverage def distance_boxplot_comparison(self, cluster_of_interest_comparison="Proteasome", collapse_maps=False, multi_choice=["Exp1", "Exp2"]): """ A box plot for desired experiments (multi_choice) and 1 desired cluster is generated displaying the distribution of the e.g. Manhattan distance. Either the maps for every single experiment are displayed individually or in a combined manner. Args: self: multi_choice: list of experiment names collapse_maps: boolean cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome") map_names: individual map names are stored as an index df_distance_comp: df_distance_comp: no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map", "Exp_Map", "distance" "distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored Returns: distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown """ #an error massage, if no Experiments are selected, will be displayed already, that is why: return "" if len(multi_choice)>=1: pass else: return ("") df_distance_comp = self.df_distance_comp.copy() #set categroical column, allowing lexicographic sorting df_distance_comp["Experiment_lexicographic_sort"] = pd.Categorical(df_distance_comp["Experiment"], categories=multi_choice, ordered=True) df_distance_comp.sort_values(["Experiment_lexicographic_sort", "Map"], inplace=True) if collapse_maps == False: #get only values form experiment of interest df_distance_selectedExp = df_distance_comp.loc[df_distance_comp["Experiment"].isin(multi_choice)] #get only values form cluster of interest df_distance_selectedExp = df_distance_selectedExp.loc[df_distance_selectedExp["Cluster"]==cluster_of_interest_comparison] if df_distance_selectedExp.shape[0] == 0: self.cache_cluster_quantified = False else: individual_distance_boxplot_figure=go.Figure() for i, exp in enumerate(multi_choice): df_plot=df_distance_selectedExp[df_distance_selectedExp["Experiment"]==exp] individual_distance_boxplot_figure.add_trace(go.Box( x=[df_plot["Experiment"], df_plot["Map"]], y=df_plot["distance"], #line=dict(color=pio.templates["simple_white"].layout["colorway"][i]), boxpoints="all", whiskerwidth=0.2, marker_size=2, name=exp, hovertext=df_plot["Gene names"] )) individual_distance_boxplot_figure.update_layout(boxmode="group", xaxis_tickangle=90, title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest_comparison), autosize=False, width=350*len(multi_choice), height=500, xaxis=go.layout.XAxis(linecolor="black", linewidth=1, title="Experiment", mirror=True), yaxis=go.layout.YAxis(linecolor="black", linewidth=1, title="Distance", mirror=True), template="simple_white") return individual_distance_boxplot_figure else: map_or_exp_names = multi_choice level_of_interest = "Experiment" boxplot_color = "Experiment" df_distance_selectedExp_global = df_distance_comp # "Gene names", "Map", "Cluster" and transferred into the index df_distance_selectedExp_global.set_index(["Gene names", level_of_interest, "Cluster"], inplace=True) df_cluster_xmaps_distance_global = pd.DataFrame() # for each individual map and a defined cluster data will be extracted from the dataframe # "df_distance_selectedExp_global" and appended to the new dataframe df_cluster_xmaps_distance_global for map_or_exp in map_or_exp_names: plot_try = df_distance_selectedExp_global.xs((cluster_of_interest_comparison, map_or_exp), level=["Cluster", level_of_interest], drop_level=False) df_cluster_xmaps_distance_global = df_cluster_xmaps_distance_global.append(plot_try) df_cluster_xmaps_distance_global.sort_values("Experiment_lexicographic_sort", inplace=True) df_cluster_xmaps_distance_global.reset_index(inplace=True) distance_boxplot_figure = px.box(df_cluster_xmaps_distance_global, x=level_of_interest, y="distance", points="all", hover_name="Gene names", color=boxplot_color, template="simple_white", title="Global Manhattan distance distribution for the protein cluster: {}".format(cluster_of_interest_comparison) ) distance_boxplot_figure.update_layout(autosize=False, width=250*len(multi_choice), height=500, xaxis=go.layout.XAxis(linecolor="black", linewidth=1, title="Map", mirror=True), yaxis=go.layout.YAxis(linecolor="black", linewidth=1, title="distance", mirror=True), template="simple_white" ) return distance_boxplot_figure def plot_biological_precision(self, multi_choice=None, clusters_for_ranking=None, min_members=5, reference=""): if multi_choice is None: multi_choice = self.exp_names if clusters_for_ranking is None: clusters_for_ranking = self.clusters_for_ranking if len(multi_choice) == 0 or len(clusters_for_ranking) == 0: return("Please provide at least one experiment and one cluster for ranking") df = self.df_distance_comp.copy() df = df[df["Experiment"].isin(multi_choice)] df = df[df["Cluster"].isin(clusters_for_ranking)] df_m = df.groupby(["Cluster", "Experiment", "Map"]).filter(lambda x: len(x)>=min_members) df_c = df_m.groupby(["Cluster", "Experiment"]).median().reset_index() df_m = df_m.groupby(["Cluster", "Experiment", "Map"]).median().reset_index() df_m = df_m.assign(Experiment_lexicographic_sort = pd.Categorical(df_m["Experiment"], categories=multi_choice, ordered=True)) df_m = df_m.sort_values("Experiment_lexicographic_sort").drop("Experiment_lexicographic_sort", axis=1)\ .groupby("Experiment", as_index=False, group_keys=False, sort=False).apply(lambda x: x.sort_values("distance", ascending=False)) df_c = df_c.assign(Experiment_lexicographic_sort = pd.Categorical(df_c["Experiment"], categories=multi_choice, ordered=True)) df_c = df_c.sort_values("Experiment_lexicographic_sort").drop("Experiment_lexicographic_sort", axis=1)\ .groupby("Experiment", as_index=False, group_keys=False, sort=False).apply(lambda x: x.sort_values("distance", ascending=False)) bp_stacked_bar = px.bar(df_m, x="Experiment", y="distance", color="Cluster", hover_data=["Map"], width=400+80*len(multi_choice), template="simple_white", height=100+30*len(clusters_for_ranking)).update_layout(legend_traceorder="reversed") bp_box_minus_min = px.box(df_m.set_index(["Experiment", "Cluster", "Map"]).unstack(["Experiment", "Map"])\ .apply(lambda x: x-x.min(), axis=1).stack(["Experiment", "Map"]).reset_index()\ .sort_values(["Experiment"], key=lambda x: [multi_choice.index(el) for el in x]), x="Experiment", y="distance", color="Experiment", hover_data=["Cluster", "Map"], width=200+100*len(multi_choice), template="simple_white", height=400, points="all")\ .update_yaxes(title="distance - cluster offset (minimum)") bp_box_minus_ref = px.box(df_c.set_index(["Experiment", "Cluster"]).unstack(["Experiment"])\ .apply(lambda x: x/x[("distance", reference)], axis=1).stack(["Experiment"]).reset_index()\ .sort_values(["Experiment"], key=lambda x: [multi_choice.index(el) for el in x])\ .loc[lambda x: x.Experiment != reference], x="Experiment", y="distance", color="Experiment", hover_data=["Cluster"], color_discrete_sequence=[px.colors.qualitative.D3[multi_choice.index(el)] for el in multi_choice if el != reference], width=200+100*len(multi_choice), template="simple_white", height=400, points="all")\ .update_yaxes(title="distance relative to {}".format(reference)) return bp_stacked_bar, bp_box_minus_min, bp_box_minus_ref def distance_ranking_barplot_comparison(self, collapse_cluster=False, multi_choice=["Exp1", "Exp2"], clusters_for_ranking=None, ranking_boxPlot="Box plot"):#, toggle_sumORmedian=False): #ref_exp="Exp1", if clusters_for_ranking is None: clusters_for_ranking = self.clusters_for_ranking #an error massage, if no Experiments are selected, will be displayed already, that is why: return "" if len(multi_choice)>=1: pass else: return ("") #dict_cluster_normalizedMedian = {} #multi_choice = i_multi_choice.value #clusters_for_ranking = i_clusters_for_ranking.value df_distance_comp = self.df_distance_comp.copy() df_distance_comp = df_distance_comp[df_distance_comp["Experiment"].isin(multi_choice)] df_distance_comp = df_distance_comp[df_distance_comp["Cluster"].isin(clusters_for_ranking)] df_quantified_cluster = df_distance_comp.reset_index() df_quantified_cluster = df_distance_comp.drop_duplicates(subset=["Cluster", "Experiment"]).set_index(["Cluster", "Experiment"])["distance"].unstack("Cluster") self.df_quantified_cluster = df_quantified_cluster.notnull().replace({True: "x", False: "-"}) dict_quantified_cluster = {} dict_cluster_normalizedMedian_ref = {} dict_median_distance_ranking = {} for cluster in clusters_for_ranking: try: df_cluster = df_distance_comp[df_distance_comp["Cluster"]==cluster] cluster_quantitity = df_cluster["Gene names"].unique().size if cluster_quantitity>= 5: dict_quantified_cluster[cluster] = cluster_quantitity all_median_one_cluster_several_exp = {} #ref = df_cluster["distance"].median() for exp in multi_choice: median = df_cluster[df_cluster["Experiment"]==exp]["distance"].median() all_median_one_cluster_several_exp[exp] = float(median) #new #if exp == ref_exp: # ref = median ref = np.median(list(all_median_one_cluster_several_exp.values())) dict_median_distance_ranking[cluster] = all_median_one_cluster_several_exp median_ranking_ref = {exp: median/ref for exp, median in all_median_one_cluster_several_exp.items()} dict_cluster_normalizedMedian_ref[cluster] = median_ranking_ref else: continue except: continue self.cluster_above_treshold = dict_quantified_cluster.keys() self.df_quantified_cluster2 = pd.DataFrame.from_dict({"Number of PG per Cluster":dict_quantified_cluster}).T df_cluster_normalizedMedian_ref = pd.DataFrame(dict_cluster_normalizedMedian_ref) df_cluster_normalizedMedian_ref.index.name="Experiment" df_cluster_normalizedMedian_ref.rename_axis("Cluster", axis=1, inplace=True) #median makes a huge differnece, improves result of DIA, MQ, libary df_RelDistanceRanking = pd.concat([df_cluster_normalizedMedian_ref.median(axis=1), df_cluster_normalizedMedian_ref.sem(axis=1)], axis=1, keys=["Distance Ranking (rel, median)", "SEM"]).reset_index().sort_values("Distance Ranking (rel, median)") ranking_sum = df_cluster_normalizedMedian_ref.sum(axis=1).round(2) ranking_sum.name = "Normalized Median - Sum" df_ranking_sum = ranking_sum.reset_index() #ranking_product = df_cluster_normalizedMedian.product(axis=1).round(2) #ranking_product.name = "Normalized Median - Product" #df_globalRanking = pd.concat([pd.DataFrame(ranking_sum), pd.DataFrame(ranking_product)], axis=1).reset_index() df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.stack("Cluster") df_cluster_normalizedMedian_ref.name="Normalized Median" df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.reset_index() self.df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.assign(Experiment_lexicographic_sort = pd.Categorical(df_cluster_normalizedMedian_ref["Experiment"], categories=multi_choice, ordered=True)) df_cluster_normalizedMedian_ref.sort_values("Experiment_lexicographic_sort", inplace=True) if collapse_cluster == False: fig_ranking = px.bar(df_cluster_normalizedMedian_ref, x="Cluster", y="Normalized Median", color="Experiment", barmode="group", title="Ranking - normalization to reference experiments the median across all experiments for each cluster", template="simple_white" ) fig_ranking.update_xaxes(categoryorder="total ascending") fig_ranking.update_layout(autosize=False, width=1200 if len(multi_choice)<=3 else 300*len(multi_choice), height=500, template="simple_white" ) return fig_ranking else: if ranking_boxPlot == "Bar plot - median": fig_globalRanking = px.bar(df_RelDistanceRanking.sort_values("Distance Ranking (rel, median)"), x="Experiment", y="Distance Ranking (rel, median)", title="Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)",# - median of all individual normalized medians - reference experiment is the median across all experiments for each cluster", error_x="SEM", error_y="SEM", color="Experiment", template="simple_white") if ranking_boxPlot == "Box plot": fig_globalRanking = px.box(df_cluster_normalizedMedian_ref, x="Experiment", y="Normalized Median", title="Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)",# "Ranking - median of all individual normalized medians - reference is the median across all experiments for each cluster", color="Experiment", points="all", template="simple_white", hover_name="Cluster") #return pn.Column(pn.Row(fig_globalRanking), pn.Row(fig_globalRanking2)) else: fig_globalRanking = px.bar(df_ranking_sum.sort_values("Normalized Median - Sum"), x="Experiment", template="simple_white", y="Normalized Median - Sum", title="Ranking - median of all individual normalized medians - reference is the median across all experiments for each cluster", color="Experiment") fig_globalRanking.update_layout(autosize=False, width=250*len(multi_choice), height=500, template="simple_white" ) return fig_globalRanking def quantity_pr_pg_barplot_comparison(self, multi_choice=["Exp1", "Exp2"]): """ Barplot, showing number of protein groups/profiles. Args: self: df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles", "data completeness of profiles", "Experiment" multi_choice: list of experiment names Returns: fig_quantity_pr_pg: barplot, number of protein groups/profiles before/after filtering of the intersection/total quantity """ df_quantity_pr_pg_combined = self.df_quantity_pr_pg_combined.copy() df_quantity_pr_pg_combined = df_quantity_pr_pg_combined[df_quantity_pr_pg_combined["Experiment"].isin(multi_choice)] df_quantity_pr_pg_combined.insert(0,"Expxfiltering",[" ".join([e,f]) for e,f in zip( df_quantity_pr_pg_combined.Experiment, df_quantity_pr_pg_combined.filtering)]) df_quantity_pr_pg_combined = df_quantity_pr_pg_combined.assign( Experiment_lexicographic_sort = pd.Categorical(df_quantity_pr_pg_combined["Experiment"], categories=multi_choice, ordered=True)) df_quantity_pr_pg_combined.sort_values(["Experiment_lexicographic_sort", "type"], ascending=[True, False], inplace=True) layout = go.Layout(barmode="overlay", #xaxis_tickangle=90, autosize=False, width=100*len(multi_choice)+150, height=400, template="simple_white") filtered = list(np.tile(["id","profile"],len(multi_choice))) fig_quantity_pg = px.bar(df_quantity_pr_pg_combined, x="Expxfiltering", y="number of protein groups", color="Experiment", barmode="overlay", hover_data=["type"], opacity=0.8, color_discrete_sequence=px.colors.qualitative.D3) fig_quantity_pg.update_layout(layout, title="Number of Protein Groups", xaxis={"tickmode":"array", "tickvals":[el for el in range(len(multi_choice)*2)], "ticktext":filtered, "title": {"text": None}}) fig_quantity_pr = px.bar(df_quantity_pr_pg_combined, x="filtering", y="number of profiles", color="type", barmode="overlay", labels={"Experiment":"", "filtering":""}, facet_col="Experiment",template="simple_white", opacity=1)\ .for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) fig_quantity_pr.update_layout(layout, title="Number of Profiles" ) return fig_quantity_pg, fig_quantity_pr def coverage_comparison(self, multi_choice=["Exp1", "Exp2"]): """ Barplot, showing data completeness of profiles. Args: self: df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles", "data completeness of profiles", "Experiment" multi_choice: list of experiment names Returns: fig_pr_dc: barplot, data completeness of profiles before/after filtering of intersection/total qunatity """ df_quantity_pr_pg_combined = self.df_quantity_pr_pg_combined.copy() df_quantity_pr_pg_combined = df_quantity_pr_pg_combined[df_quantity_pr_pg_combined["Experiment"].isin(multi_choice)].sort_values("filtering") df_quantity_pr_pg_combined = df_quantity_pr_pg_combined.assign( Experiment_lexicographic_sort = pd.Categorical(df_quantity_pr_pg_combined["Experiment"], categories=multi_choice, ordered=True)) #df_quantity_pr_pg_combined.sort_values("Experiment_lexicographic_sort", inplace=True) df_quantity_pr_pg_combined.sort_values(["Experiment_lexicographic_sort", "filtering"], inplace=True) fig_pr_dc = px.bar(df_quantity_pr_pg_combined.loc[df_quantity_pr_pg_combined.type=="total"], x="Experiment", y="data completeness of profiles", color="Experiment", barmode="overlay", hover_data=["filtering"], template="simple_white", opacity=0.8) fig_pr_dc.update_layout(#barmode="overlay", #xaxis_tickangle=90, title="Profile completeness of all<br>identified protein groups", autosize=False, width=100*len(multi_choice)+150, height=400, template="simple_white") return fig_pr_dc def venn_sections(self, multi_choice_venn=["Exp1"]): """ UpsetPlot is created based on list of experiments. If 2/3 experiments are given, the Upsetlot displays all possible mutually exclusive overlapping combinations of these experiments. Additionally a Venn Diagram is created using matplotlib. Latter figure has to be transformed from matplotlib object to jpg, to make it available for the webinterface via panel/holoviz. If more than 3 experiments are given, the UpsetPlot will be calculated only for those combinations of these experiments with at least 300 entries. Another way to think of this is the mutually exclusive sections of a venn diagram of the sets. If the original list has N sets, the returned list will have (2**N)-1 sets. Args: multi_choice_venn: list of experiment names self: unique_proteins_total: dict, key: Experiment name, value: unique protein (groups) Returns: im: Venn diagram, made availabe flor plotly/webinterface figure_UpSetPlot: Upsetplot figure combinations : list of tuple tag : str Binary string representing which sets are included / excluded in the combination. set : set The set formed by the overlapping input sets. """ def create_upsetplot(sets, multi_choice): num_combinations = 2 ** len(sets) bit_flags = [2 ** n for n in range(len(sets))] flags_zip_sets = [z for z in zip(bit_flags, sets)] combo_sets = [] overlapping_ids = [] experiments = [] #dictio = {} for bits in range(num_combinations - 1, 0, -1): include_sets = [s for flag, s in flags_zip_sets if bits & flag] exclude_sets = [s for flag, s in flags_zip_sets if not bits & flag] combo = set.intersection(*include_sets) combo = set.difference(combo, *exclude_sets) tag = "".join([str(int((bits & flag) > 0)) for flag in bit_flags]) experiment_decoded = [] for digit, exp in zip(list(tag), multi_choice): if digit=="0": continue else: experiment_decoded.append(exp) #dictio[len(combo)] = experiment_decoded if len(multi_choice)>3: if len(combo)>300: overlapping_ids.append(len(combo)) experiments.append(experiment_decoded) else: if len(combo)>0: overlapping_ids.append(len(combo)) experiments.append(experiment_decoded) #combo_sets.append((tag, len(combo))) fig_UpSetPlot = plt.Figure() series_UpSetPlot = from_memberships(experiments, data=overlapping_ids) upplot(series_UpSetPlot, fig=fig_UpSetPlot, show_counts="%d") return fig_UpSetPlot if "Sequence" not in self.df_01_filtered_combined.index.names: sets_proteins_total = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").index.get_level_values("Protein IDs")) for i in multi_choice_venn] sets_proteins_intersection = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").unstack(["Map", "Exp_Map"]).dropna()\ .index.get_level_values("Protein IDs")) for i in multi_choice_venn] else: sets_proteins_total = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").index.get_level_values("Sequence")) for i in multi_choice_venn] sets_proteins_intersection = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").unstack(["Map", "Exp_Map"]).dropna()\ .index.get_level_values("Sequence")) for i in multi_choice_venn] figure_UpSetPlot_total = create_upsetplot(sets_proteins_total, multi_choice_venn) figure_UpSetPlot_int = create_upsetplot(sets_proteins_intersection, multi_choice_venn) #make matplot figure available for plotly def convert_venn_jpg(vd): vd = vd.figure out_img = BytesIO() plt.savefig(out_img, bbox_inches="tight",format="jpg", dpi=72) out_img.seek(0) # rewind file im = Image.open(out_img) plt.clf() return im if len(multi_choice_venn) == 2: vd_t = venn2(sets_proteins_total, set_labels=([i for i in multi_choice_venn]), set_colors=px.colors.qualitative.D3[0:2], alpha=0.8) vd_t = plt.title("in at least one map") im_t = convert_venn_jpg(vd_t) vd_i = venn2(sets_proteins_intersection, set_labels=([i for i in multi_choice_venn]), set_colors=px.colors.qualitative.D3[0:2], alpha=0.8) vd_i = plt.title("in all maps") im_i = convert_venn_jpg(vd_i) elif len(multi_choice_venn) == 3: vd_t = venn3(sets_proteins_total, set_labels=([i for i in multi_choice_venn]), set_colors=px.colors.qualitative.D3[0:3], alpha=0.8) vd_t = plt.title("in at least one map") im_t = convert_venn_jpg(vd_t) vd_i = venn3(sets_proteins_intersection, set_labels=([i for i in multi_choice_venn]), set_colors=px.colors.qualitative.D3[0:3], alpha=0.8) vd_i = plt.title("in all maps") im_i = convert_venn_jpg(vd_i) else: im = "Venn diagram can be displayed for 3 Experiments or less" return im,im, figure_UpSetPlot_total, figure_UpSetPlot_int return im_t, im_i, figure_UpSetPlot_total, figure_UpSetPlot_int def dynamic_range_comparison(self, collapse_cluster=False, multi_choice=["Exp1", "Exp2"], ref_exp="Exp1"): """ A box plot for desired experiments (multi_choice) and all protein clusters is generated displaying the dynamic range Args: self: multi_choice: list of experiment names df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment" Returns: fig_dynamic_range: bar plot, dynamic range of each protein cluster for desired experiments is displayed. """ df_dynamicRange_combined = self.df_dynamicRange_combined.copy() df_dynamicRange_combined = df_dynamicRange_combined[df_dynamicRange_combined["Experiment"].isin(multi_choice)] df_dynamicRange_combined = df_dynamicRange_combined.assign(Experiment_lexicographic_sort = pd.Categorical(df_dynamicRange_combined["Experiment"], categories=multi_choice, ordered=True)) df_dynamicRange_combined.sort_values(["Experiment_lexicographic_sort", "Dynamic Range"], inplace=True) fig_dynamic_range = px.bar(df_dynamicRange_combined, x="Cluster", y="Dynamic Range", base="Min", facet_row="Experiment", template="simple_white", height=400*len(multi_choice), width=1200) df_dynamicRange_combined_ref = df_dynamicRange_combined.drop(["Experiment_lexicographic_sort"], axis=1) df_dynamicRange_combined_ref = df_dynamicRange_combined.set_index(["Cluster", "Experiment"], drop=False).unstack("Cluster")["Dynamic Range"] df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.div(df_dynamicRange_combined_ref.xs(ref_exp)) df_RelDynamicRange = pd.concat([df_dynamicRange_combined_ref.median(axis=1), df_dynamicRange_combined_ref.sem(axis=1)], axis=1, keys=["Dynamic Range (rel, median)", "SEM"]).reset_index() if collapse_cluster == False: df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.stack("Cluster") df_dynamicRange_combined_ref.name="Normalized Dynamic Range" df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.reset_index() fig_RelDynamicRange = px.bar(df_dynamicRange_combined_ref, x="Cluster", y="Normalized Dynamic Range", title="Dynamic Range - normalization to reference experiment: {}".format(ref_exp), barmode="group", template="simple_white", color="Experiment") fig_RelDynamicRange.update_xaxes(categoryorder="total ascending") fig_RelDynamicRange.update_layout(autosize=False, width=1200 if len(multi_choice)<=3 else 300*len(multi_choice), height=500, template="simple_white" ) else: fig_RelDynamicRange = px.bar(df_RelDynamicRange.sort_values("Dynamic Range (rel, median)"), x="Experiment", y="Dynamic Range (rel, median)", error_x="SEM", error_y="SEM", template="simple_white", title="Dynamic Range - median of all individual normalized medians - reference experiment: {}".format(ref_exp), color="Experiment") fig_RelDynamicRange.update_layout(autosize=False, width=250*len(multi_choice), height=500, template="simple_white" ) return pn.Column(pn.Row(fig_dynamic_range), pn.Row(fig_RelDynamicRange)) def calculate_global_scatter(self, multi_choice, metric, consolidation): """ A distribution plot of the profile scatter in each experiment is generated, with variable distance metric and consolidation of replicates. Args: self: df_01_filtered_combined: df, indexed multi_choice: list of experiment names metric: distance metric, one of 'euclidean distance', 'manhattan distance', '1 - cosine correlation', '1 - pearson correlation' consolidation: method to consolidate replicate distances, one of 'median', 'average', 'sum' Returns: plot: plotly.figure_factory.displot, shows kernel density estiamtion in the main pane and a rug plot underneath. Traces are sorted by ascending median of the distribution. """ # Option dictionaries cons_functions = { "median": np.median, "average": np.mean, "sum": np.sum } metrics = { "euclidean distance": "euclidean", "manhattan distance": "manhattan", "1 - cosine correlation": "cosine", "1 - pearson correlation": lambda x,y: 1-np.corrcoef(x,y)[0][1], "manhattan distance to average profile": [np.mean, pw.paired_manhattan_distances], "manhattan distance to median profile": [np.median, pw.paired_manhattan_distances] } # Option assertion assert consolidation in cons_functions.keys() assert metric in metrics.keys() # Filter experiments and intersection of proteins df = self.df_01_filtered_combined.loc[ self.df_01_filtered_combined.index.get_level_values("Experiment").isin(multi_choice)].copy() df.index = df.index.droplevel(["Exp_Map", "Gene names", "Compartment"]) if "Sequence" in df.index.names: df.index = df.index.droplevel(["Protein IDs"]) df_across = df.unstack(["Experiment", "Map"]).dropna().stack(["Experiment", "Map"]) nPG = df_across.unstack(["Experiment", "Map"]).shape[0] # Calculate and consolidate distances distances = pd.DataFrame() for exp in multi_choice: df_m = df_across.xs(exp, level="Experiment", axis=0) maps = list(set(df_m.index.get_level_values("Map"))) # this if clause switches between pairwise comparisons of profiles (else) and comparisons to an average/median profile if " to " in metric: df_m = df_m.unstack("Map") # calculate reference profiles df_profiles = df_m.stack("Fraction").apply(metrics[metric][0], axis=1).unstack("Fraction") # calculate the distance for every map distances_m = pd.DataFrame() for m in maps: dist_m = pd.DataFrame(metrics[metric][1](df_m.xs(m, level="Map", axis=1), df_profiles), columns = [m]) distances_m = pd.concat([distances_m, dist_m], axis=1) distances_m.index = df_m.index else: distances_m = pd.DataFrame() # loop over pairs of maps for i,mapi in enumerate(maps): for j,mapj in enumerate(maps): # only look at each comparison once if j <= i: continue dist = pw.paired_distances(df_m.xs(mapi, level="Map", axis=0).values, df_m.xs(mapj, level="Map", axis=0).values, metric = metrics[metric]) dist = pd.Series(dist, name="_".join([mapi,mapj])) distances_m = pd.concat([distances_m, dist], axis=1) distances_m.index = df_m.xs(maps[0], level="Map", axis=0).index distances = pd.concat([distances, pd.Series(distances_m.apply(cons_functions[consolidation], axis=1), name=exp)], axis=1) distances.index = distances_m.index self.distances = distances # Create and return plot plot = ff.create_distplot(distances.T.values, distances.columns, show_hist=False) plot.update_layout(title="Distribution of {} {}s, n = {}".format(metric, consolidation, nPG), width=1500, height=600, template="simple_white", xaxis={"rangemode": "nonnegative"}) return plot def svm_processing(self): """ The misclassification matrix, generated by Perseus, will be used for Recall/Precision calculation of each individual cluster and on a global level. Data will be stored in a local dictionary that will be assigned to the global dictionary. Args: self.df_SVM: dataframe, provided by Perseus, no index; Column names: e.g. "Predicted: ER", "Predicted: NPC" Rows: e.g. "True: ER", "True: NPC" Returns: self.analysed_datasets_dict: local dictionary (SVM_dict) will be assigned to the global dictionary self.analysed_datasets_dict, that is available for downloading {"Experiment name" : {see def read_jsonFile(self) [below]} {"Misclassification Analysis": { "True: ER" : { "Recall": int, "FDR": int, "Precision": int, "F1": int } "True: NPC" : {...} ... "Summary": {...} } } } """ global_SVM_dict_total = {} global_SVM_dict = {} for exp in self.json_dict.keys(): try: df_SVM = pd.read_json(self.json_dict[exp]["Misclassification Matrix"]) df_SVM["T: True group"] = df_SVM["T: True group"].str.replace(r'True: ', '') except KeyError: continue SVM_dict = {} all_correct = np.diag(df_SVM) members = df_SVM.sum(axis=1) total_members = 0 membrame_members = 0 membrane_correct = 0 all_organelle_recall = [] all_organelle_precision = [] all_organelle_f1 = [] F1_all_cluster = [] no_of_membrane_clusters = 0 total_correct = sum(all_correct) predicted_one_organelle = df_SVM.sum(axis=0) for i in range(len(df_SVM)): total_members = total_members + members[i] recall = all_correct[i]/members[i] fdr = (predicted_one_organelle[i]-all_correct[i])/predicted_one_organelle[i] precision = 1-fdr F1 = statistics.harmonic_mean([recall, precision]) F1_all_cluster.append(F1) SVM_dict[df_SVM["T: True group"][i]] = {"Recall": recall, "FDR": fdr, "Precision": precision, "F1": F1} if df_SVM["T: True group"][i]!="Nuclear pore complex" and df_SVM["T: True group"][i]!="Large Protein Complex" and df_SVM["T: True group"][i]!="Actin binding proteins" : no_of_membrane_clusters = no_of_membrane_clusters+1 membrame_members = membrame_members + members[i] membrane_correct = membrane_correct + all_correct[i] all_organelle_f1.append(F1) all_organelle_recall.append(recall) all_organelle_precision.append(precision) total_recall = total_correct/total_members membrane_recall = membrane_correct/membrame_members av_per_organelle_recall = statistics.mean(all_organelle_recall) median_per_organelle_recall = statistics.median(all_organelle_recall) av_per_organelle_precision = statistics.mean(all_organelle_precision) avg_organelle_f1 = statistics.mean(all_organelle_f1) avg_F1_all_cluster = statistics.mean(F1_all_cluster) SVM_dict_total = {} SVM_dict_total["Avg. all clusters"] = {"Recall": total_recall, "F1": avg_F1_all_cluster} #total recall = marker prediction accuracy SVM_dict_total["Avg. all organelles"] = {"Recall": av_per_organelle_recall, "F1": avg_organelle_f1, "Precision": av_per_organelle_precision} SVM_dict_total["Membrane"] = {"Recall": membrane_recall} SVM_dict_total["Median. per organelle"] = {"Recall": median_per_organelle_recall} global_SVM_dict[exp] = SVM_dict global_SVM_dict_total[exp] = SVM_dict_total self.global_SVM_dict = global_SVM_dict self.global_SVM_dict_total = global_SVM_dict_total if global_SVM_dict=={}: self.cache_stored_SVM = False return else: df_clusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict[i][j] for i in global_SVM_dict.keys() for j in global_SVM_dict[i].keys()}, orient='index') df_clusterPerformance_global.index.names = ["Experiment", "Type"] self.df_clusterPerformance_global = df_clusterPerformance_global.T df_AvgClusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict_total[i][j] for i in global_SVM_dict_total.keys() for j in global_SVM_dict_total[i].keys()}, orient='index') df_AvgClusterPerformance_global.index.names = ["Experiment", "Type"] self.df_AvgClusterPerformance_global = df_AvgClusterPerformance_global.T self.cache_stored_SVM = True return def svm_plotting(self, multi_choice): """ The markerperformance (line/scatter plot) as well as marker prediction accuracy (bar plot) is visuaized. Args: self: df_AvgClusterPerformance_global df_clusterPerformance_global multi_choice: list of experiment names """ df_clusterPerformance_global = self.df_clusterPerformance_global df_AvgClusterPerformance_global = self.df_AvgClusterPerformance_global df_AvgAllCluster = df_AvgClusterPerformance_global.xs("Avg. all clusters", level='Type', axis=1) fig_markerPredictionAccuracy = go.Figure()#data=[go.Bar(x=df_test.columns, y=df_test.loc["Recall"])]) for exp in multi_choice: fig_markerPredictionAccuracy.add_trace(go.Bar(x=[exp], y=[df_AvgAllCluster[exp].loc["Recall"]], name=exp)) fig_markerPredictionAccuracy.update_layout(template="simple_white", #showlegend=False, title="Marker prediction accuracy - Overall recall", xaxis=go.layout.XAxis(linecolor="black", linewidth=1, mirror=True), yaxis=go.layout.YAxis(linecolor="black", linewidth=1, title="Marker prediction accuracy [%]", mirror=True), ) fig_clusterPerformance = go.Figure() list_data_type = ["Avg. all clusters", "Avg. all organelles"] for i,exp in enumerate(multi_choice): df_clusterPerformance = df_clusterPerformance_global.xs(exp, level='Experiment', axis=1) df_AvgClusterPerformance = df_AvgClusterPerformance_global.xs(exp, level='Experiment', axis=1) fig_clusterPerformance.add_trace(go.Scatter(x=df_clusterPerformance.columns, y=df_clusterPerformance.loc["F1"], marker=dict(color=pio.templates["simple_white"].layout["colorway"][i]), name=exp)) for data_type in list_data_type: fig_clusterPerformance.add_trace(go.Scatter(x=[data_type], y=[df_AvgClusterPerformance[data_type].loc["F1"]], mode="markers", showlegend=False, marker=dict(color=pio.templates["simple_white"].layout["colorway"][i]) )) fig_clusterPerformance.update_layout(template="simple_white", #showlegend=False, title="Cluster wise SVM analysis", xaxis=go.layout.XAxis(linecolor="black", linewidth=1, mirror=True), yaxis=go.layout.YAxis(linecolor="black", linewidth=1, title="F1 score", #- harmonic mean of recall and precision mirror=True), ) return fig_markerPredictionAccuracy, fig_clusterPerformance def __repr__(self): return str(self.__dict__) #return "This is a spatial dataset with {} lines.".format(len(self.df_original)) def svm_heatmap(df_SVM): """ The misclassification matrix, generated by Perseus, will be displayed as a heatmap. Args: self.df_SVM: dataframe, provided by Perseus, no index; Column names: e.g. "Predicted: ER", "Predicted: NPC" Rows: e.g. "True: ER", "True: NPC" Returns: fig_SVMheatmap: heatmap of the misclassification matrix """ #df_SVM = self.df_SVM.copy() #if hasattr(df_SVM, "keys") == True: try: df_SVM = pd.read_json(df_SVM["Misclassification Matrix"]) df_SVM = df_SVM.set_index("T: True group")[::-1] except: df_SVM = df_SVM.set_index("T: True group")[::-1] y_axis_label = df_SVM.index x_axis_label = df_SVM.columns data_svm = df_SVM.values fig_SVMheatmap = go.Figure() fig_SVMheatmap.add_trace(go.Heatmap( z=data_svm, x = x_axis_label, y = y_axis_label, colorscale=[ [0.0, "green"], [0.01, "white"], [1.0, "red"] ], )) return fig_SVMheatmap def reframe_df_01_fromJson_for_Perseus(json_dict): """ Make 0-1 normalized data from all experiments available for Perseus Args: json: dictionary, json file uploaded in manage dataset tab. Return: df: 0-1 normlaized data (globally normalized), with Gene names, Protein IDs, Comaprtment as columns Pattern for Column data: Exp_Map_Fraction """ for exp_name in json_dict.keys(): for data_type in json_dict[exp_name].keys(): if data_type == "0/1 normalized data" and exp_name == list(json_dict.keys())[0]: df_01_combined = pd.read_json(json_dict[exp_name][data_type]) df_01_combined = df_01_combined.set_index(["Gene names", "Protein IDs", "Compartment"]).copy() df_01_combined.drop([col for col in df_01_combined.columns if not col.startswith("normalized profile")]) df_01_combined.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_combined.columns], names=["Set", "Map", "Fraction"]) df_01_combined.rename(columns = {"normalized profile":exp_name}, inplace=True) elif data_type == "0/1 normalized data" and exp_name != list(json_dict.keys())[0]: df_01_toadd = pd.read_json(json_dict[exp_name][data_type]) df_01_toadd = df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"]).copy() df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")]) df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"]) df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True) df_01_combined = pd.concat([df_01_combined, df_01_toadd], axis=1) df_01_combined.columns.names = ["Experiment", "Map", "Fraction"] df = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0) df = df.div(df.sum(axis=1), axis=0) index_ExpMap = df.index.get_level_values("Experiment")+"_"+df.index.get_level_values("Map") index_ExpMap.name = "Exp_Map" df.set_index(index_ExpMap, append=True, inplace=True) df.index = df.index.droplevel(["Map", "Experiment"]) df = df.stack("Fraction").unstack(["Exp_Map", "Fraction"]) df.columns = ["_".join(col) for col in df.columns.values] return df
[ "numpy.prod", "plotly.graph_objects.layout.YAxis", "matplotlib.pyplot.Figure", "io.BytesIO", "numpy.isfinite", "pandas.MultiIndex.from_tuples", "statistics.harmonic_mean", "plotly.express.box", "plotly.graph_objects.Bar", "plotly.graph_objects.Heatmap", "plotly.express.scatter", "sklearn.decom...
[((182063, 182074), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (182072, 182074), True, 'import plotly.graph_objects as go\n'), ((633, 652), 'numpy.unique', 'np.unique', (['x.values'], {}), '(x.values)\n', (642, 652), True, 'import numpy as np\n'), ((779, 791), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (788, 791), True, 'import numpy as np\n'), ((54853, 54875), 'pandas.DataFrame', 'pd.DataFrame', (['dict_npg'], {}), '(dict_npg)\n', (54865, 54875), True, 'import pandas as pd\n'), ((55253, 55276), 'pandas.DataFrame', 'pd.DataFrame', (['dict_npgf'], {}), '(dict_npgf)\n', (55265, 55276), True, 'import pandas as pd\n'), ((57943, 57954), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (57952, 57954), True, 'import plotly.graph_objects as go\n'), ((58401, 58412), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (58410, 58412), True, 'import plotly.graph_objects as go\n'), ((58857, 58868), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (58866, 58868), True, 'import plotly.graph_objects as go\n'), ((59349, 59540), 'plotly.express.bar', 'px.bar', (['self.df_npg'], {'x': '"""Fraction"""', 'y': '"""Protein Groups"""', 'color': '"""Protein Groups present in:"""', 'template': '"""simple_white"""', 'title': '"""Protein groups per fraction - before filtering"""', 'width': '(500)'}), "(self.df_npg, x='Fraction', y='Protein Groups', color=\n 'Protein Groups present in:', template='simple_white', title=\n 'Protein groups per fraction - before filtering', width=500)\n", (59355, 59540), True, 'import plotly.express as px\n'), ((59721, 59912), 'plotly.express.bar', 'px.bar', (['self.df_npgf'], {'x': '"""Fraction"""', 'y': '"""Protein Groups"""', 'color': '"""Protein Groups present in:"""', 'template': '"""simple_white"""', 'title': '"""Protein groups per fraction - after filtering"""', 'width': '(500)'}), "(self.df_npgf, x='Fraction', y='Protein Groups', color=\n 'Protein Groups present in:', template='simple_white', title=\n 'Protein groups per fraction - after filtering', width=500)\n", (59727, 59912), True, 'import plotly.express as px\n'), ((60047, 60058), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (60056, 60058), True, 'import plotly.graph_objects as go\n'), ((63276, 63295), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (63279, 63295), False, 'from sklearn.decomposition import PCA\n'), ((64120, 64134), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (64132, 64134), True, 'import pandas as pd\n'), ((72011, 72025), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (72023, 72025), True, 'import pandas as pd\n'), ((72076, 72090), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (72088, 72090), True, 'import pandas as pd\n'), ((72147, 72161), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (72159, 72161), True, 'import pandas as pd\n'), ((76516, 76530), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (76528, 76530), True, 'import pandas as pd\n'), ((78369, 78383), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (78381, 78383), True, 'import pandas as pd\n'), ((85488, 85502), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (85500, 85502), True, 'import pandas as pd\n'), ((89245, 89259), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (89257, 89259), True, 'import pandas as pd\n'), ((92155, 92169), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92167, 92169), True, 'import pandas as pd\n'), ((92196, 92210), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92208, 92210), True, 'import pandas as pd\n'), ((96078, 96092), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (96090, 96092), True, 'import pandas as pd\n'), ((106696, 106710), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (106708, 106710), True, 'import pandas as pd\n'), ((118331, 118345), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (118343, 118345), True, 'import pandas as pd\n'), ((118717, 118736), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (118720, 118736), False, 'from sklearn.decomposition import PCA\n'), ((125579, 126000), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'df_global_pca', 'x': 'x_PCA', 'y': 'y_PCA', 'color': "('Compartment' if markerset_or_cluster == False else 'Cluster')", 'color_discrete_map': '(compartment_color if markerset_or_cluster == False else cluster_color)', 'title': '"""Protein subcellular localization by PCA"""', 'hover_data': "['Protein IDs', 'Gene names', 'Compartment']", 'facet_col': '"""Experiment"""', 'facet_col_wrap': '(2)', 'opacity': '(0.9)', 'template': '"""simple_white"""'}), "(data_frame=df_global_pca, x=x_PCA, y=y_PCA, color='Compartment' if\n markerset_or_cluster == False else 'Cluster', color_discrete_map=\n compartment_color if markerset_or_cluster == False else cluster_color,\n title='Protein subcellular localization by PCA', hover_data=[\n 'Protein IDs', 'Gene names', 'Compartment'], facet_col='Experiment',\n facet_col_wrap=2, opacity=0.9, template='simple_white')\n", (125589, 126000), True, 'import plotly.express as px\n'), ((127010, 127024), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (127022, 127024), True, 'import pandas as pd\n'), ((128371, 128385), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (128383, 128385), True, 'import pandas as pd\n'), ((129281, 129357), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['df_distances.columns'], {'names': "['Experiment', 'Map']"}), "(df_distances.columns, names=['Experiment', 'Map'])\n", (129306, 129357), True, 'import pandas as pd\n'), ((129608, 129622), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (129620, 129622), True, 'import pandas as pd\n'), ((133252, 133341), 'pandas.Categorical', 'pd.Categorical', (["df_distance_comp['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_distance_comp['Experiment'], categories=multi_choice,\n ordered=True)\n", (133266, 133341), True, 'import pandas as pd\n'), ((146210, 146257), 'pandas.DataFrame', 'pd.DataFrame', (['dict_cluster_normalizedMedian_ref'], {}), '(dict_cluster_normalizedMedian_ref)\n', (146222, 146257), True, 'import pandas as pd\n'), ((153153, 153373), 'plotly.express.bar', 'px.bar', (['df_quantity_pr_pg_combined'], {'x': '"""Expxfiltering"""', 'y': '"""number of protein groups"""', 'color': '"""Experiment"""', 'barmode': '"""overlay"""', 'hover_data': "['type']", 'opacity': '(0.8)', 'color_discrete_sequence': 'px.colors.qualitative.D3'}), "(df_quantity_pr_pg_combined, x='Expxfiltering', y=\n 'number of protein groups', color='Experiment', barmode='overlay',\n hover_data=['type'], opacity=0.8, color_discrete_sequence=px.colors.\n qualitative.D3)\n", (153159, 153373), True, 'import plotly.express as px\n'), ((155610, 155860), 'plotly.express.bar', 'px.bar', (["df_quantity_pr_pg_combined.loc[df_quantity_pr_pg_combined.type == 'total']"], {'x': '"""Experiment"""', 'y': '"""data completeness of profiles"""', 'color': '"""Experiment"""', 'barmode': '"""overlay"""', 'hover_data': "['filtering']", 'template': '"""simple_white"""', 'opacity': '(0.8)'}), "(df_quantity_pr_pg_combined.loc[df_quantity_pr_pg_combined.type ==\n 'total'], x='Experiment', y='data completeness of profiles', color=\n 'Experiment', barmode='overlay', hover_data=['filtering'], template=\n 'simple_white', opacity=0.8)\n", (155616, 155860), True, 'import plotly.express as px\n'), ((169408, 169422), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (169420, 169422), True, 'import pandas as pd\n'), ((171539, 171613), 'plotly.figure_factory.create_distplot', 'ff.create_distplot', (['distances.T.values', 'distances.columns'], {'show_hist': '(False)'}), '(distances.T.values, distances.columns, show_hist=False)\n', (171557, 171613), True, 'import plotly.figure_factory as ff\n'), ((178450, 178461), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (178459, 178461), True, 'import plotly.graph_objects as go\n'), ((179322, 179333), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (179331, 179333), True, 'import plotly.graph_objects as go\n'), ((181762, 181810), 'pandas.read_json', 'pd.read_json', (["df_SVM['Misclassification Matrix']"], {}), "(df_SVM['Misclassification Matrix'])\n", (181774, 181810), True, 'import pandas as pd\n'), ((182109, 182227), 'plotly.graph_objects.Heatmap', 'go.Heatmap', ([], {'z': 'data_svm', 'x': 'x_axis_label', 'y': 'y_axis_label', 'colorscale': "[[0.0, 'green'], [0.01, 'white'], [1.0, 'red']]"}), "(z=data_svm, x=x_axis_label, y=y_axis_label, colorscale=[[0.0,\n 'green'], [0.01, 'white'], [1.0, 'red']])\n", (182119, 182227), True, 'import plotly.graph_objects as go\n'), ((5561, 5575), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5573, 5575), True, 'import pandas as pd\n'), ((5577, 5591), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5589, 5591), True, 'import pandas as pd\n'), ((73148, 73214), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Gene names', 'Map', 'Cluster', 'distance']"}), "(columns=['Gene names', 'Map', 'Cluster', 'distance'])\n", (73160, 73214), True, 'import pandas as pd\n'), ((73282, 73348), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Gene names', 'Map', 'Cluster', 'distance']"}), "(columns=['Gene names', 'Map', 'Cluster', 'distance'])\n", (73294, 73348), True, 'import pandas as pd\n'), ((73422, 73456), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Fraction']"}), "(columns=['Fraction'])\n", (73434, 73456), True, 'import pandas as pd\n'), ((73519, 73553), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Fraction']"}), "(columns=['Fraction'])\n", (73531, 73553), True, 'import pandas as pd\n'), ((78980, 79034), 'pandas.DataFrame', 'pd.DataFrame', (['d_m'], {'columns': '[m]', 'index': 'df_cluster.index'}), '(d_m, columns=[m], index=df_cluster.index)\n', (78992, 79034), True, 'import pandas as pd\n'), ((79073, 79122), 'pandas.concat', 'pd.concat', (['[df_distances_aggregated, d_m]'], {'axis': '(1)'}), '([df_distances_aggregated, d_m], axis=1)\n', (79082, 79122), True, 'import pandas as pd\n'), ((86575, 86586), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (86584, 86586), True, 'import plotly.graph_objects as go\n'), ((101732, 101746), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (101744, 101746), True, 'import pandas as pd\n'), ((101748, 101762), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (101760, 101762), True, 'import pandas as pd\n'), ((101836, 101850), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (101848, 101850), True, 'import pandas as pd\n'), ((101852, 101866), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (101864, 101866), True, 'import pandas as pd\n'), ((118549, 118585), 'pandas.concat', 'pd.concat', (['[df_mean, df_exp]'], {'axis': '(1)'}), '([df_mean, df_exp], axis=1)\n', (118558, 118585), True, 'import pandas as pd\n'), ((121007, 121021), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (121019, 121021), True, 'import pandas as pd\n'), ((121977, 122116), 'plotly.express.scatter_3d', 'px.scatter_3d', (['df_setofproteins_PCA'], {'x': '"""PC1"""', 'y': '"""PC2"""', 'z': '"""PC3"""', 'color': '"""Experiment"""', 'template': '"""simple_white"""', 'hover_data': "['Gene names']"}), "(df_setofproteins_PCA, x='PC1', y='PC2', z='PC3', color=\n 'Experiment', template='simple_white', hover_data=['Gene names'])\n", (121990, 122116), True, 'import plotly.express as px\n'), ((127615, 127629), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (127627, 127629), True, 'import pandas as pd\n'), ((136791, 136805), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (136803, 136805), True, 'import pandas as pd\n'), ((146080, 146157), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'Number of PG per Cluster': dict_quantified_cluster}"], {}), "({'Number of PG per Cluster': dict_quantified_cluster})\n", (146102, 146157), True, 'import pandas as pd\n'), ((147986, 148246), 'plotly.express.bar', 'px.bar', (['df_cluster_normalizedMedian_ref'], {'x': '"""Cluster"""', 'y': '"""Normalized Median"""', 'color': '"""Experiment"""', 'barmode': '"""group"""', 'title': '"""Ranking - normalization to reference experiments the median across all experiments for each cluster"""', 'template': '"""simple_white"""'}), "(df_cluster_normalizedMedian_ref, x='Cluster', y='Normalized Median',\n color='Experiment', barmode='group', title=\n 'Ranking - normalization to reference experiments the median across all experiments for each cluster'\n , template='simple_white')\n", (147992, 148246), True, 'import plotly.express as px\n'), ((159433, 159445), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (159443, 159445), True, 'import matplotlib.pyplot as plt\n'), ((159477, 159528), 'upsetplot.from_memberships', 'from_memberships', (['experiments'], {'data': 'overlapping_ids'}), '(experiments, data=overlapping_ids)\n', (159493, 159528), False, 'from upsetplot import from_memberships\n'), ((159541, 159602), 'upsetplot.plot', 'upplot', (['series_UpSetPlot'], {'fig': 'fig_UpSetPlot', 'show_counts': '"""%d"""'}), "(series_UpSetPlot, fig=fig_UpSetPlot, show_counts='%d')\n", (159547, 159602), True, 'from upsetplot import plot as upplot\n'), ((160976, 160985), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (160983, 160985), False, 'from io import BytesIO\n'), ((160998, 161061), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_img'], {'bbox_inches': '"""tight"""', 'format': '"""jpg"""', 'dpi': '(72)'}), "(out_img, bbox_inches='tight', format='jpg', dpi=72)\n", (161009, 161061), True, 'import matplotlib.pyplot as plt\n'), ((161121, 161140), 'PIL.Image.open', 'Image.open', (['out_img'], {}), '(out_img)\n', (161131, 161140), False, 'from PIL import Image\n'), ((161153, 161162), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (161160, 161162), True, 'import matplotlib.pyplot as plt\n'), ((161253, 161379), 'matplotlib_venn.venn2', 'venn2', (['sets_proteins_total'], {'set_labels': '[i for i in multi_choice_venn]', 'set_colors': 'px.colors.qualitative.D3[0:2]', 'alpha': '(0.8)'}), '(sets_proteins_total, set_labels=[i for i in multi_choice_venn],\n set_colors=px.colors.qualitative.D3[0:2], alpha=0.8)\n', (161258, 161379), False, 'from matplotlib_venn import venn2, venn3, venn3_circles\n'), ((161422, 161454), 'matplotlib.pyplot.title', 'plt.title', (['"""in at least one map"""'], {}), "('in at least one map')\n", (161431, 161454), True, 'import matplotlib.pyplot as plt\n'), ((161516, 161649), 'matplotlib_venn.venn2', 'venn2', (['sets_proteins_intersection'], {'set_labels': '[i for i in multi_choice_venn]', 'set_colors': 'px.colors.qualitative.D3[0:2]', 'alpha': '(0.8)'}), '(sets_proteins_intersection, set_labels=[i for i in multi_choice_venn],\n set_colors=px.colors.qualitative.D3[0:2], alpha=0.8)\n', (161521, 161649), False, 'from matplotlib_venn import venn2, venn3, venn3_circles\n'), ((161692, 161716), 'matplotlib.pyplot.title', 'plt.title', (['"""in all maps"""'], {}), "('in all maps')\n", (161701, 161716), True, 'import matplotlib.pyplot as plt\n'), ((173891, 173906), 'numpy.diag', 'np.diag', (['df_SVM'], {}), '(df_SVM)\n', (173898, 173906), True, 'import numpy as np\n'), ((175591, 175628), 'statistics.mean', 'statistics.mean', (['all_organelle_recall'], {}), '(all_organelle_recall)\n', (175606, 175628), False, 'import statistics\n'), ((175671, 175710), 'statistics.median', 'statistics.median', (['all_organelle_recall'], {}), '(all_organelle_recall)\n', (175688, 175710), False, 'import statistics\n'), ((175752, 175792), 'statistics.mean', 'statistics.mean', (['all_organelle_precision'], {}), '(all_organelle_precision)\n', (175767, 175792), False, 'import statistics\n'), ((175824, 175857), 'statistics.mean', 'statistics.mean', (['all_organelle_f1'], {}), '(all_organelle_f1)\n', (175839, 175857), False, 'import statistics\n'), ((175891, 175922), 'statistics.mean', 'statistics.mean', (['F1_all_cluster'], {}), '(F1_all_cluster)\n', (175906, 175922), False, 'import statistics\n'), ((7596, 7661), 'pkg_resources.resource_listdir', 'pkg_resources.resource_listdir', (['__name__', '"""annotations/complexes"""'], {}), "(__name__, 'annotations/complexes')\n", (7626, 7661), False, 'import pkg_resources\n'), ((26605, 26662), 'pandas.DataFrame', 'pd.DataFrame', (['df_normsilac_stacked'], {'columns': "['Ratio H/L']"}), "(df_normsilac_stacked, columns=['Ratio H/L'])\n", (26617, 26662), True, 'import pandas as pd\n'), ((31211, 31272), 'pandas.DataFrame', 'pd.DataFrame', (['df_lognorm_ratio_stacked'], {'columns': "['Ratio H/L']"}), "(df_lognorm_ratio_stacked, columns=['Ratio H/L'])\n", (31223, 31272), True, 'import pandas as pd\n'), ((38142, 38207), 'pandas.DataFrame', 'pd.DataFrame', (['df_lognorm_ratio_stacked'], {'columns': "['LFQ intensity']"}), "(df_lognorm_ratio_stacked, columns=['LFQ intensity'])\n", (38154, 38207), True, 'import pandas as pd\n'), ((51694, 51728), 'numpy.prod', 'np.prod', (['df_index_MapStacked.shape'], {}), '(df_index_MapStacked.shape)\n', (51701, 51728), True, 'import numpy as np\n'), ((52007, 52038), 'numpy.prod', 'np.prod', (['df_01_MapStacked.shape'], {}), '(df_01_MapStacked.shape)\n', (52014, 52038), True, 'import numpy as np\n'), ((52500, 52536), 'numpy.prod', 'np.prod', (['df_index_intersection.shape'], {}), '(df_index_intersection.shape)\n', (52507, 52536), True, 'import numpy as np\n'), ((53046, 53079), 'numpy.prod', 'np.prod', (['df_01_intersection.shape'], {}), '(df_01_intersection.shape)\n', (53053, 53079), True, 'import numpy as np\n'), ((56564, 56589), 'pandas.Series', 'pd.Series', (['self.fractions'], {}), '(self.fractions)\n', (56573, 56589), True, 'import pandas as pd\n'), ((56641, 56663), 'pandas.Series', 'pd.Series', (['list_npg_dc'], {}), '(list_npg_dc)\n', (56650, 56663), True, 'import pandas as pd\n'), ((56714, 56737), 'pandas.Series', 'pd.Series', (['list_npgf_dc'], {}), '(list_npgf_dc)\n', (56723, 56737), True, 'import pandas as pd\n'), ((57448, 57508), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, mirror=True)\n", (57463, 57508), True, 'import plotly.graph_objects as go\n'), ((57704, 57764), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, mirror=True)\n", (57719, 57764), True, 'import plotly.graph_objects as go\n'), ((58110, 58187), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': "plot_df['filtering']", 'y': "plot_df['number of protein groups']", 'name': 't'}), "(x=plot_df['filtering'], y=plot_df['number of protein groups'], name=t)\n", (58116, 58187), True, 'import plotly.graph_objects as go\n'), ((58316, 58355), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'title': '"""Protein Groups"""'}), "(title='Protein Groups')\n", (58331, 58355), True, 'import plotly.graph_objects as go\n'), ((58568, 58639), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': "plot_df['filtering']", 'y': "plot_df['number of profiles']", 'name': 't'}), "(x=plot_df['filtering'], y=plot_df['number of profiles'], name=t)\n", (58574, 58639), True, 'import plotly.graph_objects as go\n'), ((59037, 59114), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': "plot_df['type']", 'y': "plot_df['data completeness of profiles']", 'name': 't'}), "(x=plot_df['type'], y=plot_df['data completeness of profiles'], name=t)\n", (59043, 59114), True, 'import plotly.graph_objects as go\n'), ((59230, 59271), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'title': '"""Data completness"""'}), "(title='Data completness')\n", (59245, 59271), True, 'import plotly.graph_objects as go\n'), ((60196, 60282), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': "self.df_npg_dc['Fraction']", 'y': 'self.df_npg_dc[data_type]', 'name': 'data_type'}), "(x=self.df_npg_dc['Fraction'], y=self.df_npg_dc[data_type], name=\n data_type)\n", (60202, 60282), True, 'import plotly.graph_objects as go\n'), ((60448, 60473), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'title': '""""""'}), "(title='')\n", (60463, 60473), True, 'import plotly.graph_objects as go\n'), ((67925, 67939), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (67937, 67939), True, 'import pandas as pd\n'), ((73639, 73653), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (73651, 73653), True, 'import pandas as pd\n'), ((73655, 73669), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (73667, 73669), True, 'import pandas as pd\n'), ((73671, 73685), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (73683, 73685), True, 'import pandas as pd\n'), ((78556, 78590), 'numpy.abs', 'np.abs', (['(x - ref_profile.iloc[0, :])'], {}), '(x - ref_profile.iloc[0, :])\n', (78562, 78590), True, 'import numpy as np\n'), ((86633, 86826), 'plotly.graph_objects.Box', 'go.Box', ([], {'x': "df_cluster_xmaps_distance['Map']", 'y': "df_cluster_xmaps_distance['distance']", 'boxpoints': '"""all"""', 'whiskerwidth': '(0.2)', 'marker_size': '(2)', 'hovertext': "df_cluster_xmaps_distance['Gene names']"}), "(x=df_cluster_xmaps_distance['Map'], y=df_cluster_xmaps_distance[\n 'distance'], boxpoints='all', whiskerwidth=0.2, marker_size=2,\n hovertext=df_cluster_xmaps_distance['Gene names'])\n", (86639, 86826), True, 'import plotly.graph_objects as go\n'), ((86988, 87192), 'plotly.graph_objects.Box', 'go.Box', ([], {'x': "df_cluster_xmaps_distance['Combined Maps']", 'y': "df_cluster_xmaps_distance['distance']", 'boxpoints': '"""all"""', 'whiskerwidth': '(0.2)', 'marker_size': '(2)', 'hovertext': "df_cluster_xmaps_distance['Gene names']"}), "(x=df_cluster_xmaps_distance['Combined Maps'], y=\n df_cluster_xmaps_distance['distance'], boxpoints='all', whiskerwidth=\n 0.2, marker_size=2, hovertext=df_cluster_xmaps_distance['Gene names'])\n", (86994, 87192), True, 'import plotly.graph_objects as go\n'), ((92360, 92374), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92372, 92374), True, 'import pandas as pd\n'), ((97749, 97789), 'pandas.Series', 'pd.Series', ([], {'data': 'statistic_table_combined'}), '(data=statistic_table_combined)\n', (97758, 97789), True, 'import pandas as pd\n'), ((98227, 98241), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (98239, 98241), True, 'import pandas as pd\n'), ((129113, 129170), 'pandas.DataFrame', 'pd.DataFrame', (['d_m'], {'columns': '[(exp, m)]', 'index': 'df_exp.index'}), '(d_m, columns=[(exp, m)], index=df_exp.index)\n', (129125, 129170), True, 'import pandas as pd\n'), ((129202, 129240), 'pandas.concat', 'pd.concat', (['[df_distances, d_m]'], {'axis': '(1)'}), '([df_distances, d_m], axis=1)\n', (129211, 129240), True, 'import pandas as pd\n'), ((134037, 134048), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (134046, 134048), True, 'import plotly.graph_objects as go\n'), ((140388, 140461), 'pandas.Categorical', 'pd.Categorical', (["df_m['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_m['Experiment'], categories=multi_choice, ordered=True)\n", (140402, 140461), True, 'import pandas as pd\n'), ((140778, 140851), 'pandas.Categorical', 'pd.Categorical', (["df_c['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_c['Experiment'], categories=multi_choice, ordered=True)\n", (140792, 140851), True, 'import pandas as pd\n'), ((147690, 147795), 'pandas.Categorical', 'pd.Categorical', (["df_cluster_normalizedMedian_ref['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_cluster_normalizedMedian_ref['Experiment'], categories=\n multi_choice, ordered=True)\n", (147704, 147795), True, 'import pandas as pd\n'), ((149855, 150130), 'plotly.express.box', 'px.box', (['df_cluster_normalizedMedian_ref'], {'x': '"""Experiment"""', 'y': '"""Normalized Median"""', 'title': '"""Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)"""', 'color': '"""Experiment"""', 'points': '"""all"""', 'template': '"""simple_white"""', 'hover_name': '"""Cluster"""'}), "(df_cluster_normalizedMedian_ref, x='Experiment', y=\n 'Normalized Median', title=\n 'Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)'\n , color='Experiment', points='all', template='simple_white', hover_name\n ='Cluster')\n", (149861, 150130), True, 'import plotly.express as px\n'), ((152608, 152708), 'pandas.Categorical', 'pd.Categorical', (["df_quantity_pr_pg_combined['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_quantity_pr_pg_combined['Experiment'], categories=\n multi_choice, ordered=True)\n", (152622, 152708), True, 'import pandas as pd\n'), ((153772, 153990), 'plotly.express.bar', 'px.bar', (['df_quantity_pr_pg_combined'], {'x': '"""filtering"""', 'y': '"""number of profiles"""', 'color': '"""type"""', 'barmode': '"""overlay"""', 'labels': "{'Experiment': '', 'filtering': ''}", 'facet_col': '"""Experiment"""', 'template': '"""simple_white"""', 'opacity': '(1)'}), "(df_quantity_pr_pg_combined, x='filtering', y='number of profiles',\n color='type', barmode='overlay', labels={'Experiment': '', 'filtering':\n ''}, facet_col='Experiment', template='simple_white', opacity=1)\n", (153778, 153990), True, 'import plotly.express as px\n'), ((155271, 155371), 'pandas.Categorical', 'pd.Categorical', (["df_quantity_pr_pg_combined['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_quantity_pr_pg_combined['Experiment'], categories=\n multi_choice, ordered=True)\n", (155285, 155371), True, 'import pandas as pd\n'), ((161820, 161946), 'matplotlib_venn.venn3', 'venn3', (['sets_proteins_total'], {'set_labels': '[i for i in multi_choice_venn]', 'set_colors': 'px.colors.qualitative.D3[0:3]', 'alpha': '(0.8)'}), '(sets_proteins_total, set_labels=[i for i in multi_choice_venn],\n set_colors=px.colors.qualitative.D3[0:3], alpha=0.8)\n', (161825, 161946), False, 'from matplotlib_venn import venn2, venn3, venn3_circles\n'), ((161989, 162021), 'matplotlib.pyplot.title', 'plt.title', (['"""in at least one map"""'], {}), "('in at least one map')\n", (161998, 162021), True, 'import matplotlib.pyplot as plt\n'), ((162083, 162216), 'matplotlib_venn.venn3', 'venn3', (['sets_proteins_intersection'], {'set_labels': '[i for i in multi_choice_venn]', 'set_colors': 'px.colors.qualitative.D3[0:3]', 'alpha': '(0.8)'}), '(sets_proteins_intersection, set_labels=[i for i in multi_choice_venn],\n set_colors=px.colors.qualitative.D3[0:3], alpha=0.8)\n', (162088, 162216), False, 'from matplotlib_venn import venn2, venn3, venn3_circles\n'), ((162259, 162283), 'matplotlib.pyplot.title', 'plt.title', (['"""in all maps"""'], {}), "('in all maps')\n", (162268, 162283), True, 'import matplotlib.pyplot as plt\n'), ((163501, 163599), 'pandas.Categorical', 'pd.Categorical', (["df_dynamicRange_combined['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_dynamicRange_combined['Experiment'], categories=\n multi_choice, ordered=True)\n", (163515, 163599), True, 'import pandas as pd\n'), ((170079, 170093), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (170091, 170093), True, 'import pandas as pd\n'), ((170452, 170466), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (170464, 170466), True, 'import pandas as pd\n'), ((173630, 173691), 'pandas.read_json', 'pd.read_json', (["self.json_dict[exp]['Misclassification Matrix']"], {}), "(self.json_dict[exp]['Misclassification Matrix'])\n", (173642, 173691), True, 'import pandas as pd\n'), ((174643, 174688), 'statistics.harmonic_mean', 'statistics.harmonic_mean', (['[recall, precision]'], {}), '([recall, precision])\n', (174667, 174688), False, 'import statistics\n'), ((178605, 178671), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': '[exp]', 'y': "[df_AvgAllCluster[exp].loc['Recall']]", 'name': 'exp'}), "(x=[exp], y=[df_AvgAllCluster[exp].loc['Recall']], name=exp)\n", (178611, 178671), True, 'import plotly.graph_objects as go\n'), ((178868, 178928), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, mirror=True)\n", (178883, 178928), True, 'import plotly.graph_objects as go\n'), ((179036, 179141), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""Marker prediction accuracy [%]"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title=\n 'Marker prediction accuracy [%]', mirror=True)\n", (179051, 179141), True, 'import plotly.graph_objects as go\n'), ((183193, 183237), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (183205, 183237), True, 'import pandas as pd\n'), ((4829, 4865), 're.match', 're.match', (['"""Gene name|Compartment"""', 'x'], {}), "('Gene name|Compartment', x)\n", (4837, 4865), False, 'import re\n'), ((74994, 75036), 'natsort.natsorted', 'natsort.natsorted', (['df_dist_to_median.index'], {}), '(df_dist_to_median.index)\n', (75011, 75036), False, 'import natsort\n'), ((80592, 80633), 'natsort.natsorted', 'natsort.natsorted', (['df_setofproteins.index'], {}), '(df_setofproteins.index)\n', (80609, 80633), False, 'import natsort\n'), ((80804, 80845), 'natsort.natsorted', 'natsort.natsorted', (['df_setofproteins.index'], {}), '(df_setofproteins.index)\n', (80821, 80845), False, 'import natsort\n'), ((81782, 81830), 'natsort.natsorted', 'natsort.natsorted', (['df_setofproteins_median.index'], {}), '(df_setofproteins_median.index)\n', (81799, 81830), False, 'import natsort\n'), ((87662, 87735), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""Map"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='Map', mirror=True)\n", (87677, 87735), True, 'import plotly.graph_objects as go\n'), ((87878, 87956), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""distance"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='distance', mirror=True)\n", (87893, 87956), True, 'import plotly.graph_objects as go\n'), ((90086, 90130), 'natsort.natsorted', 'natsort.natsorted', (['df_boxplot_manymaps.index'], {}), '(df_boxplot_manymaps.index)\n', (90103, 90130), False, 'import natsort\n'), ((94613, 94670), 'pandas.read_json', 'pd.read_json', (["self.analysis_summary_dict['Dynamic Range']"], {}), "(self.analysis_summary_dict['Dynamic Range'])\n", (94625, 94670), True, 'import pandas as pd\n'), ((96947, 96978), 'pandas.Series', 'pd.Series', ([], {'data': 'statistic_table'}), '(data=statistic_table)\n', (96956, 96978), True, 'import pandas as pd\n'), ((97846, 97885), 'pandas.DataFrame', 'pd.DataFrame', (['statistic_series_combined'], {}), '(statistic_series_combined)\n', (97858, 97885), True, 'import pandas as pd\n'), ((106899, 106943), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (106911, 106943), True, 'import pandas as pd\n'), ((121638, 121731), 'pandas.Categorical', 'pd.Categorical', (["df_setofproteins_PCA['Experiment']"], {'categories': 'multi_choice', 'ordered': '(True)'}), "(df_setofproteins_PCA['Experiment'], categories=multi_choice,\n ordered=True)\n", (121652, 121731), True, 'import pandas as pd\n'), ((125133, 125237), 'pandas.Categorical', 'pd.Categorical', (["df_global_pca_exp['Compartment']"], {'categories': '[x for x in compartments]', 'ordered': '(True)'}), "(df_global_pca_exp['Compartment'], categories=[x for x in\n compartments], ordered=True)\n", (125147, 125237), True, 'import pandas as pd\n'), ((138603, 138676), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""Map"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='Map', mirror=True)\n", (138618, 138676), True, 'import plotly.graph_objects as go\n'), ((138950, 139028), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""distance"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='distance', mirror=True)\n", (138965, 139028), True, 'import plotly.graph_objects as go\n'), ((170282, 170322), 'pandas.concat', 'pd.concat', (['[distances_m, dist_m]'], {'axis': '(1)'}), '([distances_m, dist_m], axis=1)\n', (170291, 170322), True, 'import pandas as pd\n'), ((180505, 180565), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, mirror=True)\n", (180520, 180565), True, 'import plotly.graph_objects as go\n'), ((180697, 180775), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""F1 score"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='F1 score', mirror=True)\n", (180712, 180775), True, 'import plotly.graph_objects as go\n'), ((183846, 183890), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (183858, 183890), True, 'import pandas as pd\n'), ((184381, 184429), 'pandas.concat', 'pd.concat', (['[df_01_combined, df_01_toadd]'], {'axis': '(1)'}), '([df_01_combined, df_01_toadd], axis=1)\n', (184390, 184429), True, 'import pandas as pd\n'), ((21545, 21575), 're.match', 're.match', (['self.name_pattern', 'i'], {}), '(self.name_pattern, i)\n', (21553, 21575), False, 'import re\n'), ((53381, 53394), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (53389, 53394), True, 'import numpy as np\n'), ((53485, 53498), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (53493, 53498), True, 'import numpy as np\n'), ((53589, 53606), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (53597, 53606), True, 'import numpy as np\n'), ((53691, 53708), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (53699, 53708), True, 'import numpy as np\n'), ((53816, 53833), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (53824, 53833), True, 'import numpy as np\n'), ((69077, 69255), 'plotly.graph_objects.Scatter3d', 'go.Scatter3d', ([], {'x': 'df_setofproteins_PCA.PC1', 'y': 'df_setofproteins_PCA.PC2', 'z': 'df_setofproteins_PCA.PC3', 'hovertext': "df_setofproteins_PCA['Gene names']", 'mode': '"""markers"""', 'name': 'maps'}), "(x=df_setofproteins_PCA.PC1, y=df_setofproteins_PCA.PC2, z=\n df_setofproteins_PCA.PC3, hovertext=df_setofproteins_PCA['Gene names'],\n mode='markers', name=maps)\n", (69089, 69255), True, 'import plotly.graph_objects as go\n'), ((97039, 97069), 'pandas.DataFrame', 'pd.DataFrame', (['statistic_series'], {}), '(statistic_series)\n', (97051, 97069), True, 'import pandas as pd\n'), ((107907, 107967), 'pandas.concat', 'pd.concat', (['[df_01_combined, df_01_toadd]'], {'sort': '(False)', 'axis': '(1)'}), '([df_01_combined, df_01_toadd], sort=False, axis=1)\n', (107916, 107967), True, 'import pandas as pd\n'), ((108154, 108198), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (108166, 108198), True, 'import pandas as pd\n'), ((118987, 119010), 're.findall', 're.findall', (['"""PSMA.*"""', 'p'], {}), "('PSMA.*', p)\n", (118997, 119010), False, 'import re\n'), ((119138, 119161), 're.findall', 're.findall', (['"""PSMB.*"""', 'p'], {}), "('PSMB.*', p)\n", (119148, 119161), False, 'import re\n'), ((134265, 134439), 'plotly.graph_objects.Box', 'go.Box', ([], {'x': "[df_plot['Experiment'], df_plot['Map']]", 'y': "df_plot['distance']", 'boxpoints': '"""all"""', 'whiskerwidth': '(0.2)', 'marker_size': '(2)', 'name': 'exp', 'hovertext': "df_plot['Gene names']"}), "(x=[df_plot['Experiment'], df_plot['Map']], y=df_plot['distance'],\n boxpoints='all', whiskerwidth=0.2, marker_size=2, name=exp, hovertext=\n df_plot['Gene names'])\n", (134271, 134439), True, 'import plotly.graph_objects as go\n'), ((135408, 135493), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""Experiment"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='Experiment', mirror=True\n )\n", (135423, 135493), True, 'import plotly.graph_objects as go\n'), ((135822, 135900), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'title': '"""Distance"""', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, title='Distance', mirror=True)\n", (135837, 135900), True, 'import plotly.graph_objects as go\n'), ((171128, 171166), 'pandas.concat', 'pd.concat', (['[distances_m, dist]'], {'axis': '(1)'}), '([distances_m, dist], axis=1)\n', (171137, 171166), True, 'import pandas as pd\n'), ((9340, 9374), 're.match', 're.match', (['self.imported_columns', 'x'], {}), '(self.imported_columns, x)\n', (9348, 9374), False, 'import re\n'), ((9526, 9560), 're.match', 're.match', (['self.imported_columns', 'x'], {}), '(self.imported_columns, x)\n', (9534, 9560), False, 'import re\n'), ((21651, 21681), 're.match', 're.match', (['self.name_pattern', 'i'], {}), '(self.name_pattern, i)\n', (21659, 21681), False, 'import re\n'), ((108451, 108495), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (108463, 108495), True, 'import pandas as pd\n'), ((108614, 108678), 'pandas.concat', 'pd.concat', (['[df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd]'], {}), '([df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd])\n', (108623, 108678), True, 'import pandas as pd\n'), ((168415, 168432), 'numpy.corrcoef', 'np.corrcoef', (['x', 'y'], {}), '(x, y)\n', (168426, 168432), True, 'import numpy as np\n'), ((21800, 21830), 're.match', 're.match', (['self.name_pattern', 'i'], {}), '(self.name_pattern, i)\n', (21808, 21830), False, 'import re\n'), ((26391, 26406), 'numpy.nanmedian', 'np.nanmedian', (['x'], {}), '(x)\n', (26403, 26406), True, 'import numpy as np\n'), ((34141, 34155), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (34152, 34155), True, 'import numpy as np\n'), ((68579, 68757), 'plotly.graph_objects.Scatter3d', 'go.Scatter3d', ([], {'x': 'df_setofproteins_PCA.PC1', 'y': 'df_setofproteins_PCA.PC2', 'z': 'df_setofproteins_PCA.PC3', 'hovertext': "df_setofproteins_PCA['Gene names']", 'mode': '"""markers"""', 'name': 'maps'}), "(x=df_setofproteins_PCA.PC1, y=df_setofproteins_PCA.PC2, z=\n df_setofproteins_PCA.PC3, hovertext=df_setofproteins_PCA['Gene names'],\n mode='markers', name=maps)\n", (68591, 68757), True, 'import plotly.graph_objects as go\n'), ((108845, 108889), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (108857, 108889), True, 'import pandas as pd\n'), ((15296, 15312), 're.match', 're.match', (['s', 'col'], {}), '(s, col)\n', (15304, 15312), False, 'import re\n'), ((16054, 16086), 're.match', 're.match', (['self.name_pattern', 'col'], {}), '(self.name_pattern, col)\n', (16062, 16086), False, 'import re\n'), ((18232, 18248), 're.match', 're.match', (['s', 'col'], {}), '(s, col)\n', (18240, 18248), False, 'import re\n'), ((18892, 18924), 're.match', 're.match', (['self.name_pattern', 'col'], {}), '(self.name_pattern, col)\n', (18900, 18924), False, 'import re\n'), ((21981, 21997), 're.match', 're.match', (['s', 'col'], {}), '(s, col)\n', (21989, 21997), False, 'import re\n'), ((109544, 109588), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (109556, 109588), True, 'import pandas as pd\n'), ((110117, 110179), 'pandas.concat', 'pd.concat', (['[df_distances_combined, df_distances_toadd]'], {'axis': '(1)'}), '([df_distances_combined, df_distances_toadd], axis=1)\n', (110126, 110179), True, 'import pandas as pd\n'), ((15622, 15640), 're.findall', 're.findall', (['s', 'col'], {}), '(s, col)\n', (15632, 15640), False, 'import re\n'), ((15700, 15716), 're.match', 're.match', (['s', 'col'], {}), '(s, col)\n', (15708, 15716), False, 'import re\n'), ((15794, 15826), 're.match', 're.match', (['self.name_pattern', 'col'], {}), '(self.name_pattern, col)\n', (15802, 15826), False, 'import re\n'), ((18632, 18664), 're.match', 're.match', (['self.name_pattern', 'col'], {}), '(self.name_pattern, col)\n', (18640, 18664), False, 'import re\n'), ((110353, 110397), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (110365, 110397), True, 'import pandas as pd\n'), ((15945, 15977), 're.match', 're.match', (['self.name_pattern', 'col'], {}), '(self.name_pattern, col)\n', (15953, 15977), False, 'import re\n'), ((18783, 18815), 're.match', 're.match', (['self.name_pattern', 'col'], {}), '(self.name_pattern, col)\n', (18791, 18815), False, 'import re\n'), ((110626, 110670), 'pandas.read_json', 'pd.read_json', (['json_dict[exp_name][data_type]'], {}), '(json_dict[exp_name][data_type])\n', (110638, 110670), True, 'import pandas as pd\n'), ((110785, 110845), 'pandas.concat', 'pd.concat', (['[df_dynamicRange_combined, df_dynamicRange_toadd]'], {}), '([df_dynamicRange_combined, df_dynamicRange_toadd])\n', (110794, 110845), True, 'import pandas as pd\n')]
import matplotlib.pyplot as plt import numpy as np # データ生成 x = np.linspace(0, 10, 100) y1 = np.sin(x) y2 = np.cos(x) # プロット領域(Figure, Axes)の初期化 plt.figure(figsize=(12, 8)) fig1=plt.subplot(131) fig2=plt.subplot(132) fig3=plt.subplot(133) # 棒グラフの作成 fig1.bar([1,2,3],[3,4,5]) fig1.set_xlabel("x") fig1.set_ylabel("y") fig2.barh([0.5,1.5,2.5],[0.5,1,2]) fig2.set_xlabel("xbar") fig2.set_ylabel("ybar") fig2.set_xlim(0,3) fig2.set_ylim(0,3) fig3.scatter(y1, y2) plt.xlabel("sin(x)") plt.ylabel("cos(x)") plt.xlim(-1.2,1.2) plt.ylim(-1.5,1.5) #fig3.set_xlabel("sin(x)") #fig3.set_ylabel("cos(x)") plt.show()
[ "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.linspace", "matplotlib.pyplot.figure", "numpy.cos", "numpy.sin", "matplotlib.pyplot.ylim", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ]
[((64, 87), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (75, 87), True, 'import numpy as np\n'), ((93, 102), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (99, 102), True, 'import numpy as np\n'), ((108, 117), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (114, 117), True, 'import numpy as np\n'), ((146, 173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (156, 173), True, 'import matplotlib.pyplot as plt\n'), ((179, 195), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (190, 195), True, 'import matplotlib.pyplot as plt\n'), ((201, 217), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (212, 217), True, 'import matplotlib.pyplot as plt\n'), ((223, 239), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (234, 239), True, 'import matplotlib.pyplot as plt\n'), ((461, 481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sin(x)"""'], {}), "('sin(x)')\n", (471, 481), True, 'import matplotlib.pyplot as plt\n'), ((482, 502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cos(x)"""'], {}), "('cos(x)')\n", (492, 502), True, 'import matplotlib.pyplot as plt\n'), ((503, 522), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.2)', '(1.2)'], {}), '(-1.2, 1.2)\n', (511, 522), True, 'import matplotlib.pyplot as plt\n'), ((522, 541), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (530, 541), True, 'import matplotlib.pyplot as plt\n'), ((596, 606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (604, 606), True, 'import matplotlib.pyplot as plt\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import cv2, numpy as np parser = argparse.ArgumentParser() parser.add_argument('--path', default='../data/Lena.png', help='Image path.') params = parser.parse_args() image = cv2.imread(params.path) image_to_show = np.copy(image) mouse_pressed = False s_x = s_y = e_x = e_y = -1 def mouse_callback(event, x, y, flags, param): global image_to_show, s_x, s_y, e_x, e_y, mouse_pressed if event == cv2.EVENT_LBUTTONDOWN: mouse_pressed = True s_x, s_y = x, y image_to_show = np.copy(image) elif event == cv2.EVENT_MOUSEMOVE: if mouse_pressed: image_to_show = np.copy(image) cv2.rectangle(image_to_show, (s_x, s_y), (x, y), (0, 255, 0), 1) elif event == cv2.EVENT_LBUTTONUP: mouse_pressed = False e_x, e_y = x, y cv2.namedWindow('image') cv2.setMouseCallback('image', mouse_callback) while True: cv2.imshow('image', image_to_show) k = cv2.waitKey(1) if k == ord('c'): if s_y > e_y: s_y, e_y = e_y, s_y if s_x > e_x: s_x, e_x = e_x, s_x if e_y - s_y > 1 and e_x - s_x > 0: image = image[s_y:e_y, s_x:e_x] image_to_show = np.copy(image) elif k == 27: break cv2.destroyAllWindows()
[ "cv2.setMouseCallback", "numpy.copy", "cv2.rectangle", "argparse.ArgumentParser", "cv2.imshow", "cv2.waitKey", "cv2.destroyAllWindows", "cv2.imread", "cv2.namedWindow" ]
[((98, 123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (121, 123), False, 'import argparse\n'), ((239, 262), 'cv2.imread', 'cv2.imread', (['params.path'], {}), '(params.path)\n', (249, 262), False, 'import cv2, numpy as np\n'), ((279, 293), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (286, 293), True, 'import cv2, numpy as np\n'), ((893, 917), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (908, 917), False, 'import cv2, numpy as np\n'), ((918, 963), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'mouse_callback'], {}), "('image', mouse_callback)\n", (938, 963), False, 'import cv2, numpy as np\n'), ((1335, 1358), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1356, 1358), False, 'import cv2, numpy as np\n'), ((981, 1015), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image_to_show'], {}), "('image', image_to_show)\n", (991, 1015), False, 'import cv2, numpy as np\n'), ((1024, 1038), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1035, 1038), False, 'import cv2, numpy as np\n'), ((570, 584), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (577, 584), True, 'import cv2, numpy as np\n'), ((1287, 1301), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (1294, 1301), True, 'import cv2, numpy as np\n'), ((679, 693), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (686, 693), True, 'import cv2, numpy as np\n'), ((706, 770), 'cv2.rectangle', 'cv2.rectangle', (['image_to_show', '(s_x, s_y)', '(x, y)', '(0, 255, 0)', '(1)'], {}), '(image_to_show, (s_x, s_y), (x, y), (0, 255, 0), 1)\n', (719, 770), False, 'import cv2, numpy as np\n')]
#!/usr/bin/env /usr/bin/python3 import sys import numpy as np import pymesh import matplotlib matplotlib.use('Qt5Agg') from matplotlib import cm from matplotlib import pyplot as plt from matplotlib.backends.backend_qt5agg import ( FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar ) from matplotlib.figure import Figure from matplotlib import collections as mc from matplotlib.animation import FuncAnimation from PyQt5.QtCore import Qt, QPoint, QRect, QSize from PyQt5.QtGui import QDoubleValidator, QMouseEvent from PyQt5.QtWidgets import ( QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox ) from numbers import Number import pickle import warnings warnings.filterwarnings('ignore') ################################################################################ #def dNdx (nodes, faces): # N_e = faces.shape[0] # edges = np.vstack([faces[:,[1,2]], # faces[:,[2,0]], # faces[:,[0,1]]]) # vects = nodes[edges[:,1]] - nodes[edges[:,0]] # norms = np.flip(vects,axis=-1)*np.array([1,-1]) # areas = np.cross(vects[:N_e,:], vects[N_e:2*N_e,:])/2 # print(np.any(areas < 0)) # dN = np.zeros((N_e, 2, 3)) # dN[:,:,0] = norms[0:N_e,:]/areas[:,np.newaxis]/2 # dN[:,:,1] = norms[N_e:2*N_e,:]/areas[:,np.newaxis]/2 # dN[:,:,2] = norms[2*N_e:3*N_e,:]/areas[:,np.newaxis]/2 # return dN, areas #def B_matrix (nodes, faces): # N_e = faces.shape[0] # N = nodes.shape[0] # dN, areas = dNdx(nodes, faces) # B = np.zeros((N_e, 3, 6)) # B[:,0,0::2] = dN[:,0,:] # dN_i/dx # B[:,1,1::2] = dN[:,1,:] # dN_i/dy # B[:,2,0::2] = dN[:,1,:] # dN_i/dy # B[:,2,1::2] = dN[:,0,:] # dN_i/dx # return B, areas ################################################################################ # The above works but is slightly slower than the below simplified definitions. def B_matrix (nodes, faces): v_12 = nodes[faces[:,1]] - nodes[faces[:,0]] v_23 = nodes[faces[:,2]] - nodes[faces[:,1]] v_31 = nodes[faces[:,0]] - nodes[faces[:,2]] areas = (np.cross(v_12, v_23)) N_e = faces.shape[0] B = np.zeros((N_e, 3, 6)) B[:,0,0] = v_23[:,1]/areas B[:,0,2] = v_31[:,1]/areas B[:,0,4] = v_12[:,1]/areas B[:,1,1] = -v_23[:,0]/areas B[:,1,3] = -v_31[:,0]/areas B[:,1,5] = -v_12[:,0]/areas B[:,2,0] = -v_23[:,0]/areas B[:,2,2] = -v_31[:,0]/areas B[:,2,4] = -v_12[:,0]/areas B[:,2,1] = v_23[:,1]/areas B[:,2,3] = v_31[:,1]/areas B[:,2,5] = v_12[:,1]/areas areas = areas/2 return B, areas ################################################################################ def solve_FEM (nodes, faces, forces, boundary, E = 1.6, nu = 0.4): D = 1/(1-nu**2) * np.array([[1, nu, 0], [nu, 1, 0], [0, 0, (1-nu)/2]]) N_e = faces.shape[0] N_n = nodes.shape[0] N_dof = 2*N_n reshaped_boundary = np.vstack([boundary,boundary]).T.reshape(N_dof) forces = forces.reshape(N_dof) forces = np.append(forces, np.zeros(np.count_nonzero(reshaped_boundary))) # Dirichlet BC B, areas = B_matrix(nodes, faces) K = np.zeros((N_dof, N_dof)) if isinstance(E, Number): E = E * np.ones(faces.shape[0]) for i in range(N_e): Ke = areas[i]*E[i]*(B[i,:,:].T.dot(D).dot(B[i,:,:])) idx = np.array(faces[i]*2)[np.newaxis] K[idx.T, idx] += Ke[0::2,0::2] # XX K[(idx+1).T, idx+1] += Ke[1::2,1::2]# YY K[(idx+1).T, idx] += Ke[1::2,0::2] # YX K[idx.T, idx+1] += Ke[0::2,1::2] # XY K = np.append(K, np.identity(N_dof)[reshaped_boundary,:], axis = 0) # Dirichlet BC u = np.linalg.lstsq(K,forces, # np.linalg.solve requires square matrix rcond=None)[0].reshape((N_n,2)) return u ################################################################################ def smooth_k_step (x,k=2): # k should be >= 1 return np.where(x > 0, np.where(x<1, 0.5*(np.tanh(k*(2*x-1)/2/np.sqrt(x*(1-x)))+1), 1), 0) ################################################################################ class FEM_system: def __init__ (self, nodes, faces): self.nodes = nodes self.faces = faces self.E_0 = 1.6e1 # Young's modulus self.nu_0 = 0.4 # Poisson ratio self.time_index = 0 self.final_time = 8. self.delta_time = 0.1 self.history = np.zeros( (int(np.floor(self.final_time / self.delta_time)) + 1, self.nodes.shape[0], 2)) self.history[0] = self.nodes.copy() self.E = self.E_0*np.ones(self.faces.shape[0]) self.initial_nodes = self.nodes.copy() self.edges = np.unique(np.sort( np.vstack([np.stack((self.faces[:,0], self.faces[:,1]), axis=1), np.stack((self.faces[:,1], self.faces[:,2]), axis=1), np.stack((self.faces[:,2], self.faces[:,0]), axis=1)]), axis=1), axis=0) self.node_types = np.ones(self.nodes.shape[0], dtype = int) self.node_types[self.nodes[:,1] < 0.1] = 0 # boundary mask = np.logical_and(np.logical_not(self.nodes[:,1] < 0.1), np.logical_and(self.nodes[:,1] < 1.75, self.nodes[:,0] < 0)) self.node_types[mask] = 2 # left mask = np.logical_and(np.logical_not(self.nodes[:,1] < 0.1), np.logical_and(self.nodes[:,1] < 1.75, self.nodes[:,0] > 0)) self.node_types[mask] = 3 # right self.left_faces = np.logical_and(np.logical_and( self.node_types[self.faces[:,0]] == 2, self.node_types[self.faces[:,1]] == 2), self.node_types[self.faces[:,2]] == 2) self.right_faces = np.logical_and(np.logical_and( self.node_types[self.faces[:,0]] == 3, self.node_types[self.faces[:,1]] == 3), self.node_types[self.faces[:,2]] == 3) self.areas = np.abs(np.cross(self.nodes[self.faces[:,1],:] - \ self.nodes[self.faces[:,0],:], self.nodes[self.faces[:,2],:] - \ self.nodes[self.faces[:,0],:])) * 0.5 self.initial_areas = self.areas.copy() self.target_areas = self.areas.copy() self.forces = np.zeros_like(self.nodes) def reset(self): self.nodes = self.initial_nodes self.areas = self.initial_areas self.target_areas = self.initial_areas self.forces = np.zeros_like(self.nodes) self.E = self.E_0*np.ones(self.faces.shape[0]) def recalculate(self, time): later_change = smooth_k_step((time-4)/2) early_change = smooth_k_step(time/4) self.E[self.left_faces] = self.E_0*(1 + 0.3*later_change) self.E[self.right_faces] = self.E_0*(1 - 0.2*early_change) self.target_areas[self.left_faces] = self.initial_areas[ self.left_faces] * \ (1 - 0.2*later_change) self.target_areas[self.right_faces] = self.initial_areas[ self.right_faces] * \ (1 + 0.3*early_change) self.areas = np.abs(np.cross(self.nodes[self.faces[:,1],:] - \ self.nodes[self.faces[:,0],:], self.nodes[self.faces[:,2],:] - \ self.nodes[self.faces[:,0],:])) * 0.5 self.forces = np.zeros_like(self.nodes) force_multiplier = np.log(self.target_areas / self.areas) vectors = self.nodes[self.faces[:,2],:] - \ self.nodes[self.faces[:,1],:] vectors = np.flip(vectors,axis=-1)*np.array([-1,1]) * \ force_multiplier[:,np.newaxis] self.forces[:,0] += np.bincount(self.faces[:,0], vectors[:,0], minlength = self.nodes.shape[0]) self.forces[:,1] += np.bincount(self.faces[:,0], vectors[:,1], minlength = self.nodes.shape[0]) vectors = self.nodes[self.faces[:,0],:] - \ self.nodes[self.faces[:,2],:] vectors = np.flip(vectors,axis=-1)*np.array([-1,1]) * \ force_multiplier[:,np.newaxis] self.forces[:,0] += np.bincount(self.faces[:,1], vectors[:,0], minlength = self.nodes.shape[0]) self.forces[:,1] += np.bincount(self.faces[:,1], vectors[:,1], minlength = self.nodes.shape[0]) vectors = self.nodes[self.faces[:,1],:] - \ self.nodes[self.faces[:,0],:] vectors = np.flip(vectors,axis=-1)*np.array([-1,1]) * \ force_multiplier[:,np.newaxis] self.forces[:,0] += np.bincount(self.faces[:,2], vectors[:,0], minlength = self.nodes.shape[0]) self.forces[:,1] += np.bincount(self.faces[:,2], vectors[:,1], minlength = self.nodes.shape[0]) self.forces[self.node_types == 0, :] *= 0 # no forces on boundary def take_step(self): self.time_index += 1 self.recalculate(self.time_index * self.delta_time) u = solve_FEM(self.nodes, self.faces, self.forces, self.node_types == 0, self.E, self.nu_0) u[self.node_types == 0, :] = 0 # make sure Dirichlet BC enforced self.nodes += u self.history[self.time_index] = self.nodes.copy() def run_sim(self): for index in np.arange(int(np.floor(self.final_time / self.delta_time))): self.take_step() def plot_system(self, ax, time = 0.): if time > self.final_time: time = self.final_time time_index = int(np.floor(time / self.delta_time)) lines = self.history[time_index, self.edges] lc = mc.LineCollection(lines, colors = 'black') ax.add_collection(lc) ax.plot(self.history[time_index, self.node_types == 0,0], self.history[time_index, self.node_types == 0,1], linestyle = '', marker = 'o', markersize = 3, color = 'red') ax.plot(self.history[time_index, self.node_types == 1,0], self.history[time_index, self.node_types == 1,1], linestyle = '', marker = 'o', markersize = 3, color = 'green') ax.plot(self.history[time_index, self.node_types == 2,0], self.history[time_index, self.node_types == 2,1], linestyle = '', marker = 'o', markersize = 3, color = 'orange') ax.plot(self.history[time_index, self.node_types == 3,0], self.history[time_index, self.node_types == 3,1], linestyle = '', marker = 'o', markersize = 3, color = 'blue') ax.set_xlim([-3.0,3.0]) ax.set_ylim([-0.2,4.2]) ax.invert_yaxis() ################################################################################ class MPLCanvas(FigureCanvas): def __init__ (self, parent=None, width=8, height=8, dpi=100): self.fig = Figure(figsize=(width, height), dpi=dpi) self.ax = self.fig.add_subplot(111) FigureCanvas.__init__(self, self.fig) self.setParent(parent) FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding) FigureCanvas.updateGeometry(self) self.fig.tight_layout() ################################################################################ class Window (QWidget): def __init__ (self): super().__init__() self.title = 'Gut Tube 2D FEM' meshfile = '2d_tube_model_mesh.stl' mesh = pymesh.load_mesh(meshfile) nodes = mesh.nodes[:,:2].copy() faces = mesh.faces.copy() self.model = FEM_system(nodes, faces) self.model.run_sim() with open('model.pkl','wb') as pickle_file: pickle.dump(self.model, pickle_file) # with open('model.pkl','rb') as pickle_file: # self.model = pickle.load(pickle_file) self.time = 0. self.canvas = MPLCanvas() self.toolbar = NavigationToolbar(self.canvas, self) self.setup_GUI() def setup_GUI (self): self.setWindowTitle(self.title) main_layout = QVBoxLayout() main_layout.addWidget(self.canvas) main_layout.addWidget(self.toolbar) slider_layout = QHBoxLayout() self.slider = QSlider(Qt.Horizontal) self.slider.setMinimum(0) self.slider.setMaximum(int(np.floor(self.model.final_time / \ self.model.delta_time))) self.slider.setValue(0) self.slider.setSingleStep(1) self.slider.valueChanged.connect(self.slider_select) slider_layout.addWidget(self.slider) slider_layout.addWidget(QLabel('t:')) self.textbox = QLineEdit() self.textbox.setMaxLength(4) self.textbox.setFixedWidth(40) self.textbox.setText(f'{self.time:.1f}') self.textbox.setValidator(QDoubleValidator()) self.textbox.editingFinished.connect(self.textbox_select) slider_layout.addWidget(self.textbox) main_layout.addLayout(slider_layout) self.setLayout(main_layout) self.plot() def slider_select (self): self.time = self.slider.value() * self.model.delta_time self.textbox.setText(f'{self.time:.1f}') self.plot() def textbox_select (self): input_time = float(self.textbox.text()) if input_time < 0.: input_time = 0. if input_time > self.model.final_time: input_time = self.model.final_time self.time = input_time self.textbox.setText(f'{self.time:.1f}') self.slider.setValue(int(np.floor(input_time / \ self.model.delta_time))) self.plot() def plot (self, time = 0): for image in self.canvas.ax.images: if image.colorbar is not None: image.colorbar.remove() for collection in self.canvas.ax.collections: if collection.colorbar is not None: collection.colorbar.remove() self.canvas.ax.clear() self.model.plot_system(self.canvas.ax, self.time) self.canvas.draw() ################################################################################ if __name__ == "__main__": app = QApplication(sys.argv) window = Window() window.show() sys.exit(app.exec_()) ################################################################################ # EOF
[ "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "numpy.sqrt", "numpy.log", "numpy.logical_not", "matplotlib.collections.LineCollection", "numpy.count_nonzero", "numpy.array", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry", "PyQt5.QtWidgets.QApplication", "PyQt5.Qt...
[((95, 119), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (109, 119), False, 'import matplotlib\n'), ((881, 914), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (904, 914), False, 'import warnings\n'), ((2165, 2185), 'numpy.cross', 'np.cross', (['v_12', 'v_23'], {}), '(v_12, v_23)\n', (2173, 2185), True, 'import numpy as np\n'), ((2214, 2235), 'numpy.zeros', 'np.zeros', (['(N_e, 3, 6)'], {}), '((N_e, 3, 6))\n', (2222, 2235), True, 'import numpy as np\n'), ((3144, 3168), 'numpy.zeros', 'np.zeros', (['(N_dof, N_dof)'], {}), '((N_dof, N_dof))\n', (3152, 3168), True, 'import numpy as np\n'), ((12793, 12815), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (12805, 12815), False, 'from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox\n'), ((2781, 2837), 'numpy.array', 'np.array', (['[[1, nu, 0], [nu, 1, 0], [0, 0, (1 - nu) / 2]]'], {}), '([[1, nu, 0], [nu, 1, 0], [0, 0, (1 - nu) / 2]])\n', (2789, 2837), True, 'import numpy as np\n'), ((4786, 4825), 'numpy.ones', 'np.ones', (['self.nodes.shape[0]'], {'dtype': 'int'}), '(self.nodes.shape[0], dtype=int)\n', (4793, 4825), True, 'import numpy as np\n'), ((5921, 5946), 'numpy.zeros_like', 'np.zeros_like', (['self.nodes'], {}), '(self.nodes)\n', (5934, 5946), True, 'import numpy as np\n'), ((6092, 6117), 'numpy.zeros_like', 'np.zeros_like', (['self.nodes'], {}), '(self.nodes)\n', (6105, 6117), True, 'import numpy as np\n'), ((6870, 6895), 'numpy.zeros_like', 'np.zeros_like', (['self.nodes'], {}), '(self.nodes)\n', (6883, 6895), True, 'import numpy as np\n'), ((6917, 6955), 'numpy.log', 'np.log', (['(self.target_areas / self.areas)'], {}), '(self.target_areas / self.areas)\n', (6923, 6955), True, 'import numpy as np\n'), ((7155, 7230), 'numpy.bincount', 'np.bincount', (['self.faces[:, 0]', 'vectors[:, 0]'], {'minlength': 'self.nodes.shape[0]'}), '(self.faces[:, 0], vectors[:, 0], minlength=self.nodes.shape[0])\n', (7166, 7230), True, 'import numpy as np\n'), ((7262, 7337), 'numpy.bincount', 'np.bincount', (['self.faces[:, 0]', 'vectors[:, 1]'], {'minlength': 'self.nodes.shape[0]'}), '(self.faces[:, 0], vectors[:, 1], minlength=self.nodes.shape[0])\n', (7273, 7337), True, 'import numpy as np\n'), ((7546, 7621), 'numpy.bincount', 'np.bincount', (['self.faces[:, 1]', 'vectors[:, 0]'], {'minlength': 'self.nodes.shape[0]'}), '(self.faces[:, 1], vectors[:, 0], minlength=self.nodes.shape[0])\n', (7557, 7621), True, 'import numpy as np\n'), ((7653, 7728), 'numpy.bincount', 'np.bincount', (['self.faces[:, 1]', 'vectors[:, 1]'], {'minlength': 'self.nodes.shape[0]'}), '(self.faces[:, 1], vectors[:, 1], minlength=self.nodes.shape[0])\n', (7664, 7728), True, 'import numpy as np\n'), ((7937, 8012), 'numpy.bincount', 'np.bincount', (['self.faces[:, 2]', 'vectors[:, 0]'], {'minlength': 'self.nodes.shape[0]'}), '(self.faces[:, 2], vectors[:, 0], minlength=self.nodes.shape[0])\n', (7948, 8012), True, 'import numpy as np\n'), ((8044, 8119), 'numpy.bincount', 'np.bincount', (['self.faces[:, 2]', 'vectors[:, 1]'], {'minlength': 'self.nodes.shape[0]'}), '(self.faces[:, 2], vectors[:, 1], minlength=self.nodes.shape[0])\n', (8055, 8119), True, 'import numpy as np\n'), ((8857, 8897), 'matplotlib.collections.LineCollection', 'mc.LineCollection', (['lines'], {'colors': '"""black"""'}), "(lines, colors='black')\n", (8874, 8897), True, 'from matplotlib import collections as mc\n'), ((9932, 9972), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (9938, 9972), False, 'from matplotlib.figure import Figure\n'), ((10013, 10050), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', (['self', 'self.fig'], {}), '(self, self.fig)\n', (10034, 10050), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10078, 10156), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy', 'FigureCanvas.setSizePolicy', (['self', 'QSizePolicy.Expanding', 'QSizePolicy.Expanding'], {}), '(self, QSizePolicy.Expanding, QSizePolicy.Expanding)\n', (10104, 10156), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10167, 10200), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', (['self'], {}), '(self)\n', (10194, 10200), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10457, 10483), 'pymesh.load_mesh', 'pymesh.load_mesh', (['meshfile'], {}), '(meshfile)\n', (10473, 10483), False, 'import pymesh\n'), ((10846, 10882), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self'], {}), '(self.canvas, self)\n', (10863, 10882), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10977, 10990), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (10988, 10990), False, 'from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox\n'), ((11084, 11097), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (11095, 11097), False, 'from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox\n'), ((11114, 11136), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (11121, 11136), False, 'from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox\n'), ((11473, 11484), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (11482, 11484), False, 'from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox\n'), ((3051, 3086), 'numpy.count_nonzero', 'np.count_nonzero', (['reshaped_boundary'], {}), '(reshaped_boundary)\n', (3067, 3086), True, 'import numpy as np\n'), ((3206, 3229), 'numpy.ones', 'np.ones', (['faces.shape[0]'], {}), '(faces.shape[0])\n', (3213, 3229), True, 'import numpy as np\n'), ((3315, 3337), 'numpy.array', 'np.array', (['(faces[i] * 2)'], {}), '(faces[i] * 2)\n', (3323, 3337), True, 'import numpy as np\n'), ((3530, 3548), 'numpy.identity', 'np.identity', (['N_dof'], {}), '(N_dof)\n', (3541, 3548), True, 'import numpy as np\n'), ((4444, 4472), 'numpy.ones', 'np.ones', (['self.faces.shape[0]'], {}), '(self.faces.shape[0])\n', (4451, 4472), True, 'import numpy as np\n'), ((4908, 4946), 'numpy.logical_not', 'np.logical_not', (['(self.nodes[:, 1] < 0.1)'], {}), '(self.nodes[:, 1] < 0.1)\n', (4922, 4946), True, 'import numpy as np\n'), ((4956, 5017), 'numpy.logical_and', 'np.logical_and', (['(self.nodes[:, 1] < 1.75)', '(self.nodes[:, 0] < 0)'], {}), '(self.nodes[:, 1] < 1.75, self.nodes[:, 0] < 0)\n', (4970, 5017), True, 'import numpy as np\n'), ((5088, 5126), 'numpy.logical_not', 'np.logical_not', (['(self.nodes[:, 1] < 0.1)'], {}), '(self.nodes[:, 1] < 0.1)\n', (5102, 5126), True, 'import numpy as np\n'), ((5136, 5197), 'numpy.logical_and', 'np.logical_and', (['(self.nodes[:, 1] < 1.75)', '(self.nodes[:, 0] > 0)'], {}), '(self.nodes[:, 1] < 1.75, self.nodes[:, 0] > 0)\n', (5150, 5197), True, 'import numpy as np\n'), ((5280, 5379), 'numpy.logical_and', 'np.logical_and', (['(self.node_types[self.faces[:, 0]] == 2)', '(self.node_types[self.faces[:, 1]] == 2)'], {}), '(self.node_types[self.faces[:, 0]] == 2, self.node_types[self\n .faces[:, 1]] == 2)\n', (5294, 5379), True, 'import numpy as np\n'), ((5471, 5570), 'numpy.logical_and', 'np.logical_and', (['(self.node_types[self.faces[:, 0]] == 3)', '(self.node_types[self.faces[:, 1]] == 3)'], {}), '(self.node_types[self.faces[:, 0]] == 3, self.node_types[self\n .faces[:, 1]] == 3)\n', (5485, 5570), True, 'import numpy as np\n'), ((6138, 6166), 'numpy.ones', 'np.ones', (['self.faces.shape[0]'], {}), '(self.faces.shape[0])\n', (6145, 6166), True, 'import numpy as np\n'), ((8769, 8801), 'numpy.floor', 'np.floor', (['(time / self.delta_time)'], {}), '(time / self.delta_time)\n', (8777, 8801), True, 'import numpy as np\n'), ((10658, 10694), 'pickle.dump', 'pickle.dump', (['self.model', 'pickle_file'], {}), '(self.model, pickle_file)\n', (10669, 10694), False, 'import pickle\n'), ((11442, 11454), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""t:"""'], {}), "('t:')\n", (11448, 11454), False, 'from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QCheckBox, QSlider, QProgressBar, QFormLayout, QLineEdit, QTabWidget, QSizePolicy, QFileDialog, QMessageBox\n'), ((11620, 11638), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (11636, 11638), False, 'from PyQt5.QtGui import QDoubleValidator, QMouseEvent\n'), ((2930, 2961), 'numpy.vstack', 'np.vstack', (['[boundary, boundary]'], {}), '([boundary, boundary])\n', (2939, 2961), True, 'import numpy as np\n'), ((3606, 3644), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['K', 'forces'], {'rcond': 'None'}), '(K, forces, rcond=None)\n', (3621, 3644), True, 'import numpy as np\n'), ((5648, 5794), 'numpy.cross', 'np.cross', (['(self.nodes[self.faces[:, 1], :] - self.nodes[self.faces[:, 0], :])', '(self.nodes[self.faces[:, 2], :] - self.nodes[self.faces[:, 0], :])'], {}), '(self.nodes[self.faces[:, 1], :] - self.nodes[self.faces[:, 0], :],\n self.nodes[self.faces[:, 2], :] - self.nodes[self.faces[:, 0], :])\n', (5656, 5794), True, 'import numpy as np\n'), ((6678, 6824), 'numpy.cross', 'np.cross', (['(self.nodes[self.faces[:, 1], :] - self.nodes[self.faces[:, 0], :])', '(self.nodes[self.faces[:, 2], :] - self.nodes[self.faces[:, 0], :])'], {}), '(self.nodes[self.faces[:, 1], :] - self.nodes[self.faces[:, 0], :],\n self.nodes[self.faces[:, 2], :] - self.nodes[self.faces[:, 0], :])\n', (6686, 6824), True, 'import numpy as np\n'), ((7050, 7075), 'numpy.flip', 'np.flip', (['vectors'], {'axis': '(-1)'}), '(vectors, axis=-1)\n', (7057, 7075), True, 'import numpy as np\n'), ((7075, 7092), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (7083, 7092), True, 'import numpy as np\n'), ((7441, 7466), 'numpy.flip', 'np.flip', (['vectors'], {'axis': '(-1)'}), '(vectors, axis=-1)\n', (7448, 7466), True, 'import numpy as np\n'), ((7466, 7483), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (7474, 7483), True, 'import numpy as np\n'), ((7832, 7857), 'numpy.flip', 'np.flip', (['vectors'], {'axis': '(-1)'}), '(vectors, axis=-1)\n', (7839, 7857), True, 'import numpy as np\n'), ((7857, 7874), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (7865, 7874), True, 'import numpy as np\n'), ((8587, 8630), 'numpy.floor', 'np.floor', (['(self.final_time / self.delta_time)'], {}), '(self.final_time / self.delta_time)\n', (8595, 8630), True, 'import numpy as np\n'), ((11194, 11249), 'numpy.floor', 'np.floor', (['(self.model.final_time / self.model.delta_time)'], {}), '(self.model.final_time / self.model.delta_time)\n', (11202, 11249), True, 'import numpy as np\n'), ((12254, 12298), 'numpy.floor', 'np.floor', (['(input_time / self.model.delta_time)'], {}), '(input_time / self.model.delta_time)\n', (12262, 12298), True, 'import numpy as np\n'), ((4305, 4348), 'numpy.floor', 'np.floor', (['(self.final_time / self.delta_time)'], {}), '(self.final_time / self.delta_time)\n', (4313, 4348), True, 'import numpy as np\n'), ((4562, 4616), 'numpy.stack', 'np.stack', (['(self.faces[:, 0], self.faces[:, 1])'], {'axis': '(1)'}), '((self.faces[:, 0], self.faces[:, 1]), axis=1)\n', (4570, 4616), True, 'import numpy as np\n'), ((4624, 4678), 'numpy.stack', 'np.stack', (['(self.faces[:, 1], self.faces[:, 2])'], {'axis': '(1)'}), '((self.faces[:, 1], self.faces[:, 2]), axis=1)\n', (4632, 4678), True, 'import numpy as np\n'), ((4686, 4740), 'numpy.stack', 'np.stack', (['(self.faces[:, 2], self.faces[:, 0])'], {'axis': '(1)'}), '((self.faces[:, 2], self.faces[:, 0]), axis=1)\n', (4694, 4740), True, 'import numpy as np\n'), ((3915, 3935), 'numpy.sqrt', 'np.sqrt', (['(x * (1 - x))'], {}), '(x * (1 - x))\n', (3922, 3935), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import numpy as np import pymc3 import scipy.stats as stats plt.style.use("ggplot") # Parameter values for prior and analytic posterior n = 50 z = 10 alpha = 12 beta = 12 alpha_post = 22 beta_post = 52 # How many samples to carry out for MCMC iterations = 100000 # Use PyMC3 to construct a model context basic_model = pymc3.Model() with basic_model: # Define our prior belief about the fairness # of the coin using a Beta distribution theta = pymc3.Beta("theta", alpha=alpha, beta=beta) # Define the Bernoulli likelihood function y = pymc3.Binomial("y", n=n, p=theta, observed=z) # Carry out the MCMC analysis using the Metropolis algorithm # Use Maximum A Posteriori (MAP) optimisation as initial value for MCMC start = pymc3.find_MAP() # Use the Metropolis algorithm (as opposed to NUTS or HMC, etc.) step = pymc3.Metropolis() # Calculate the trace trace = pymc3.sample(iterations, step, start, random_seed=1, progressbar=True) # Plot the posterior histogram from MCMC analysis bins=50 plt.hist( trace["theta"], bins, histtype="step", normed=True, label="Posterior (MCMC)", color="red" ) # Plot the analytic prior and posterior beta distributions x = np.linspace(0, 1, 100) plt.plot( x, stats.beta.pdf(x, alpha, beta), "--", label="Prior", color="blue" ) plt.plot( x, stats.beta.pdf(x, alpha_post, beta_post), label='Posterior (Analytic)', color="green" ) # Update the graph labels plt.legend(title="Parameters", loc="best") plt.xlabel("$\\theta$, Fairness") plt.ylabel("Density") plt.show() # Show the trace plot pymc3.traceplot(trace) plt.show()
[ "pymc3.Metropolis", "matplotlib.pyplot.hist", "pymc3.find_MAP", "matplotlib.pyplot.ylabel", "pymc3.traceplot", "matplotlib.pyplot.xlabel", "pymc3.Beta", "matplotlib.pyplot.style.use", "pymc3.Binomial", "numpy.linspace", "pymc3.sample", "scipy.stats.beta.pdf", "pymc3.Model", "matplotlib.pyp...
[((93, 116), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (106, 116), True, 'import matplotlib.pyplot as plt\n'), ((354, 367), 'pymc3.Model', 'pymc3.Model', ([], {}), '()\n', (365, 367), False, 'import pymc3\n'), ((1078, 1182), 'matplotlib.pyplot.hist', 'plt.hist', (["trace['theta']", 'bins'], {'histtype': '"""step"""', 'normed': '(True)', 'label': '"""Posterior (MCMC)"""', 'color': '"""red"""'}), "(trace['theta'], bins, histtype='step', normed=True, label=\n 'Posterior (MCMC)', color='red')\n", (1086, 1182), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1280), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1269, 1280), True, 'import numpy as np\n'), ((1508, 1550), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Parameters"""', 'loc': '"""best"""'}), "(title='Parameters', loc='best')\n", (1518, 1550), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1584), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$, Fairness"""'], {}), "('$\\\\theta$, Fairness')\n", (1561, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1606), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (1595, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1607, 1617), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1615, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1663), 'pymc3.traceplot', 'pymc3.traceplot', (['trace'], {}), '(trace)\n', (1656, 1663), False, 'import pymc3\n'), ((1664, 1674), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1672, 1674), True, 'import matplotlib.pyplot as plt\n'), ((491, 534), 'pymc3.Beta', 'pymc3.Beta', (['"""theta"""'], {'alpha': 'alpha', 'beta': 'beta'}), "('theta', alpha=alpha, beta=beta)\n", (501, 534), False, 'import pymc3\n'), ((591, 636), 'pymc3.Binomial', 'pymc3.Binomial', (['"""y"""'], {'n': 'n', 'p': 'theta', 'observed': 'z'}), "('y', n=n, p=theta, observed=z)\n", (605, 636), False, 'import pymc3\n'), ((791, 807), 'pymc3.find_MAP', 'pymc3.find_MAP', ([], {}), '()\n', (805, 807), False, 'import pymc3\n'), ((890, 908), 'pymc3.Metropolis', 'pymc3.Metropolis', ([], {}), '()\n', (906, 908), False, 'import pymc3\n'), ((948, 1018), 'pymc3.sample', 'pymc3.sample', (['iterations', 'step', 'start'], {'random_seed': '(1)', 'progressbar': '(True)'}), '(iterations, step, start, random_seed=1, progressbar=True)\n', (960, 1018), False, 'import pymc3\n'), ((1298, 1328), 'scipy.stats.beta.pdf', 'stats.beta.pdf', (['x', 'alpha', 'beta'], {}), '(x, alpha, beta)\n', (1312, 1328), True, 'import scipy.stats as stats\n'), ((1388, 1428), 'scipy.stats.beta.pdf', 'stats.beta.pdf', (['x', 'alpha_post', 'beta_post'], {}), '(x, alpha_post, beta_post)\n', (1402, 1428), True, 'import scipy.stats as stats\n')]
import os import unittest import numpy as np from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1 from pyNastran.bdf.field_writer_8 import print_card_8 from pyNastran.bdf.mesh_utils.mass_properties import ( mass_properties, mass_properties_nsm) #mass_properties_breakdown from pyNastran.bdf.cards.test.utils import save_load_deck from pyNastran.bdf.mesh_utils.loads import sum_forces_moments, sum_forces_moments_elements class TestBars(unittest.TestCase): """test CBAR/PBAR/PBARL classes""" def test_pbar_1(self): """tests the PBAR BDF add""" area = 0.0 i11 = 4.9e-2 i22 = 5.5e-2 i12 = 6.6e-2 j = 7.7e-2 nsm = 1.0 fields = [ u'PBAR', 1510998, 1520998, area, i11, i22, j, nsm, None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, None, None, i12, ] card = print_card_8(fields) #print(card) card = print_card_8(fields) lines = card.split('\n') model = BDF(debug=False) card = model._process_card(lines) cardi = BDFCard(card) pbar = PBAR.add_card(cardi) pbar.raw_fields() self.assertEqual(pbar.A, area) self.assertEqual(pbar.i1, i11) self.assertEqual(pbar.i2, i22) self.assertEqual(pbar.i12, i12) self.assertEqual(pbar.j, j) self.assertEqual(pbar.k1, None) self.assertEqual(pbar.k2, None) self.assertEqual(pbar.nsm, nsm) assert np.allclose(pbar.Area(), area) assert np.allclose(pbar.I11(), i11) assert np.allclose(pbar.I22(), i22) assert np.allclose(pbar.I12(), i12) assert np.allclose(pbar.J(), j) assert np.allclose(pbar.Nsm(), nsm) def test_pbar_2(self): """tests the PBAR BDF add""" pid = 1 mid = 2 A = None I1 = I2 = None J = None nsm = None c1 = c2 = d1 = d2 = e1 = e2 = f1 = f2 = None k1 = k2 = None i12 = 3. fields = [ 'PBAR', pid, mid, A, I1, I2, J, nsm, None, c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, i12 ] card = print_card_8(fields) lines = card.split('\n') model = BDF(debug=False) card = model._process_card(lines) cardi = BDFCard(card) pbar = PBAR.add_card(cardi) self.assertEqual(pbar.pid, 1) self.assertEqual(pbar.mid, 2) self.assertEqual(pbar.A, 0.0) self.assertEqual(pbar.i1, 0.0) self.assertEqual(pbar.i2, 0.0) self.assertEqual(pbar.j, 0.0) self.assertEqual(pbar.nsm, 0.0) self.assertEqual(pbar.i12, 3.0) self.assertEqual(pbar.c1, 0.0) self.assertEqual(pbar.c2, 0.0) self.assertEqual(pbar.d1, 0.0) self.assertEqual(pbar.d2, 0.0) self.assertEqual(pbar.e1, 0.0) self.assertEqual(pbar.e2, 0.0) self.assertEqual(pbar.k1, None) self.assertEqual(pbar.k2, None) #-------------------------------------------------------- A = 6. I1 = 5. I2 = 4. J = 3. nsm = 2. c1 = c2 = d1 = d2 = e1 = e2 = f1 = f2 = None k1 = k2 = 1e2 i12 = 0. fields = [ 'PBAR', pid, mid, A, I1, I2, J, nsm, None, c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, i12] card = print_card_8(fields) lines = card.split('\n') model = BDF(debug=False) card = model._process_card(lines) cardi = BDFCard(card) pbar = PBAR.add_card(cardi) self.assertEqual(pbar.pid, 1) self.assertEqual(pbar.mid, 2) self.assertEqual(pbar.A, 6.0) self.assertEqual(pbar.i1, 5.0) self.assertEqual(pbar.i2, 4.0) self.assertEqual(pbar.j, 3.0) self.assertEqual(pbar.nsm, 2.0) self.assertEqual(pbar.i12, 0.0) self.assertEqual(pbar.c1, 0.0) self.assertEqual(pbar.c2, 0.0) self.assertEqual(pbar.d1, 0.0) self.assertEqual(pbar.d2, 0.0) self.assertEqual(pbar.e1, 0.0) self.assertEqual(pbar.e2, 0.0) self.assertEqual(pbar.k1, 1e2) self.assertEqual(pbar.k2, 1e2) def test_pbar_3(self): """tests the PBAR validate""" pid = 42 mid = 10 i1 = -1. i2 = -2. i12 = -3. j = -4. pbar = PBAR(pid, mid, A=0., i1=i1, i2=i2, i12=i12, j=j, nsm=0., c1=0., c2=0., d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8, k2=1.e8, comment='pbar') with self.assertRaises(ValueError): pbar.validate() pbar.i1 = 1. with self.assertRaises(ValueError): pbar.validate() pbar.i2 = 2. with self.assertRaises(ValueError): pbar.validate() pbar.j = 4. pbar.validate() model = BDF(debug=False) pbar = model.add_pbar(pid, mid, A=0., i1=2., i2=2., i12=1., j=4., nsm=0., c1=0., c2=0., d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8, k2=1.e8, comment='pbar') pbar.validate() nids = [100, 101] eid = 1000 x = [0., 0., 1.] g0 = None model.add_cbar(eid, pid, nids, x, g0, comment='cbar') model.add_grid(100, [0., 0., 0.]) model.add_grid(101, [1., 0., 0.]) E = 3.0e7 G = None nu = 0.3 model.add_mat1(mid, E, G, nu) save_load_deck(model) def test_cbar_g0(self): """modification of test_cbeam_01""" model = BDF(debug=False) pid = 200 mid = 6 model.add_pbar(pid, mid, A=0., i1=2., i2=2., i12=1., j=4., nsm=0., c1=0., c2=0., d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8, k2=1.e8, comment='pbar') eid = 100 nids = [10, 20] x = None g0 = 30 cbar = model.add_cbar(eid, pid, nids, x, g0, comment='cbar') cbar.write_card_16(is_double=False) E = 1.0e7 G = None nu = 0.3 model.add_mat1(mid, E, G, nu) model.add_grid(10, [0., 0., 0.]) model.add_grid(20, [0., 1., 0.]) model.add_grid(30, [0., 2., 0.]) model.cross_reference() save_load_deck(model) def test_pbarl_1(self): """tests the PBARL""" model = BDF(log=None, debug=False) pid = 4 mid = 40 group = 'group' Type = 'bad_type' dim = 42 nsm = 0.5 pbarl = PBARL(pid, mid, Type, dim, group=group, nsm=nsm, comment='comment') with self.assertRaises(ValueError): # Type pbarl.validate() pbarl.Type = 'TUBE' with self.assertRaises(TypeError): # dim pbarl.validate() pbarl.dim = [20.] with self.assertRaises(RuntimeError): pbarl.validate() pbarl.dim = [2., 1.] #with self.assertRaises(ValueError): #pbarl.validate() #pbarl.group = 'MSCBML0' pbarl.validate() str(pbarl) pbarl.write_card(size=8, is_double=False) pbarl.write_card(size=16, is_double=False) pbarl.write_card(size=16, is_double=True) model.properties[pid] = pbarl nid1 = 52 xyz1 = [0., 0., 0.] model.nodes[nid1] = GRID(nid1, cp=0, xyz=xyz1) nid2 = 53 xyz2 = [1., 0., 0.] model.nodes[nid2] = GRID(nid2, cp=0, xyz=xyz2) E = 30.0e7 G = None nu = 0.3 mat = MAT1(mid, E, G, nu, rho=1.0) model.materials[mid] = mat eid = 42 x = None g0 = None cbar = CBAR(eid, pid, [nid1, nid2], x, g0, offt='GGG', pa=0, pb=0, wa=None, wb=None, comment='') with self.assertRaises(ValueError): cbar.validate() cbar.x = [0., 1., 2.] cbar.validate() model.elements[eid] = cbar pbarl._verify(xref=False) model.validate() model.cross_reference() pbarl._verify(xref=True) assert np.allclose(cbar.Mass(), 9.9247779608), cbar.Mass() mat.rho = 0. assert np.allclose(cbar.Mass(), 0.5), cbar.Mass() scale = 'FR' x = [0.2, 0.4, 0.6, 0.8] model.add_cbarao(eid, scale, x, comment='cbarao') model.add_card(['CBARAO', eid+1, 'RF', 6, 0.1, 0.2], 'CBARAO') save_load_deck(model, run_quality=False, run_test_bdf=False) def test_bar_mass_1(self): """tests CBAR/PBAR mass""" model = BDF(debug=False) #model.case_control_deck = CaseControlDeck(case_control_lines) spc = ['SPC1', 123456, 123456, 1] grid1 = ['GRID', 1, None, 0., 0., 0.] grid2 = ['GRID', 2, None, 1., 0., 0.] #grid3 = ['GRID', 3, None, 1., 0., 0.] force = ['FORCE', 100, 1, 0, 2., 3., 4.] pid = 11 mid = 12 cbar = [ 'CBAR', 10, pid, 1, 2, 0., 1., 0., None, ] k1 = k2 = None area = 2.0 rho = 3. nu = 0.3 i1 = 2.1 i2 = 1.2 i12 = 0.1 j = None nsm = 0.1 pbar = [ 'PBAR', pid, mid, area, i1, i2, j, nsm, None, None, None, None, None, None, None, None, k1, k2, i12 ] mat1 = ['MAT1', mid, 3.0e7, None, nu, rho] model.add_card(grid1, 'GRID') model.add_card(grid2, 'GRID') model.add_card(cbar, 'CBAR') model.add_card(pbar, 'PBAR') model.add_card(mat1, 'MAT1') model.add_card(spc, 'SPC1') model.add_card(force, 'FORCE') model.validate() model.cross_reference() mass, unused_cg, unused_I = mass_properties( model, element_ids=None, mass_ids=None, reference_point=None, sym_axis=None, scale=None) #print('cg* =', cg) L = 1.0 mass_per_length = area * rho + nsm mass = L * mass_per_length #xcg = (0.0 * mass_a + 1.0 * mass_b) / (mass_a + mass_b) #print(mass_a, mass_b, xcg, mass_a + mass_b) #print('mass =', mass) #cbar = CBEAM() cbar = model.elements[10] pbar = model.properties[11] assert pbar.Nu() == nu, 'pbar.Nu()=%s nu=%s' % (pbar.Nu(), nu) assert pbar.Rho() == rho, 'pbar.Rho()=%s rho=%s' % (pbar.Rho(), rho) assert np.allclose(cbar.Length(), 1.0), cbar.Length() #assert np.allclose(cbar.Mass(), 10.25), cbar.Mass() #assert np.allclose(cbar.MassPerLength(), 10.25), cbar.MassPerLength() #assert np.allclose(mass, 10.25), mass case_control_lines = ( 'SOL 101\n' 'CEND\n' 'SUBCASE 1\n' ' STRESS(PLOT,SORT1,REAL) = ALL\n' ' SPC = 123456\n' ' LOAD = 100\n' 'BEGIN BULK\n' 'PARAM,GRDPNT,0\n' 'PARAM,POST,-1\n' 'PARAM POSTEXT YES\n' ) with open('cbar.bdf', 'w') as bdf_file: bdf_file.write(case_control_lines) model.write_bdf(bdf_file, enddata=True) model2 = BDF(debug=False) model2.read_bdf('cbar.bdf') model2._verify_bdf(xref=True) if not os.path.exists('cbar.op2') and 0: os.system('nastran scr=yes bat=no old=no cbar.bdf') os.remove('cbar.bdf') if 0: # pragma: no cover from pyNastran.op2.op2 import OP2 op2 = OP2() op2.read_op2('cbar.op2') #os.remove('cbar.op2') gpw = op2.grid_point_weight op2_mass = gpw.mass.max() assert np.allclose(op2_mass, mass), 'op2_mass=%s mass=%s' % (op2_mass, mass) #print('op2_mass=%s mass=%s' % (op2_mass, mass)) unused_op2_cg = gpw.cg unused_cg = np.array([0.5, 0., 0.], dtype='float32') #print('cg =', op2_cg) def test_bar_mass_2(self): """CBAR/PBARL""" model = BDF(debug=False) model.add_grid(1, [0., 0., 0.]) model.add_grid(2, [1., 0., 0.]) model.add_grid(3, [0., 1., 0.]) mid = 1 E = 3.0e7 G = None nu = 0.3 model.add_mat1(mid, E, G, nu, rho=1.) #--------------------------------------------------------------- eid = 1 pid = 101 nids = [1, 2] x = [0., 0., 1.] g0 = None unused_cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG', pa=0, pb=0, wa=None, wb=None, comment='CBAR') Type = 'BOX' dim = [1., 2., 0.1, 0.1] #pbeaml = model.add_pbeaml(pid, mid, Type, xxb, dims, nsm=None, #so=None, comment='PBEAML') unused_pbarl = model.add_pbarl(pid, mid, Type, dim, group='MSCBML0', nsm=0., comment='PBARL') #--------------------------------------------------------------- eid = 2 pid = 102 x = None g0 = 3 unused_cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG', pa=0, pb=0, wa=None, wb=None, comment='CBAR') Type = 'BOX' dim = [1., 2., 0.1, 0.1] unused_pbarl = model.add_pbarl(pid, mid, Type, dim, group='MSCBML0', nsm=0., comment='PBARL') #--------------------------------------------------------------- eid = 3 pid = 103 #cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG', #pa=42, pb=5, wa=None, wb=None, #comment='CBAR') unused_pbar = model.add_pbar(pid, mid, A=1., i1=0., i2=0., i12=0., j=0., nsm=0.1, c1=0., c2=0., d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8, k2=1.e8, comment='pbar') #G = 3.0e7 #E = None #nu = 0.3 #model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, #St=0.0, Sc=0.0, Ss=0.0, mcsid=0, #comment='') #--------------------------------------------------------------- model.validate() model.pop_parse_errors() model._verify_bdf(xref=False) model.cross_reference() model.pop_xref_errors() model._verify_bdf(xref=True) model.uncross_reference() def test_pbar_nsm(self): model = BDF(debug=False) pid = 1 mid = 1 nsm = 1. area = 2.0 pbar = model.add_pbar(pid, mid, A=area, i1=0., i2=0., i12=0., j=0., nsm=nsm, c1=0., c2=0., d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8, k2=1.e8, comment='') E = 1.0 G = None nu = 0.3 mat1 = model.add_mat1(mid, E, G, nu) #---------------- card_lines = [ 'PBAR 2 1 2. 1.', ] model.add_card(card_lines, 'PBAR', comment='', is_list=False, has_none=True) pbar2 = model.properties[2] #------------------ model.cross_reference() assert pbar.Nsm() == 1.0 assert pbar.Area() == 2.0 # mass/L = area*rho + nsm assert pbar.MassPerLength() == 1.0 # area = 2.0 mat1.rho = 10.0 assert pbar.MassPerLength() == 21.0, pbar.MassPerLength() assert pbar2.MassPerLength() == 21.0, pbar2.MassPerLength() def test_pbarl_nsm(self): model = BDF(debug=False) pid = 1 mid = 1 bar_type = 'BAR' dim = [1., 2.] # area = 2.0 pbarl = model.add_pbarl(pid, mid, bar_type, dim, group='MSCBML0', nsm=1., comment='') E = 1.0 G = None nu = 0.3 mat1 = model.add_mat1(mid, E, G, nu) #---------------- card_lines = [ 'PBARL 2 1 BAR', ' 1.0 2.0 1.0', ] model.add_card(card_lines, 'PBARL', comment='', is_list=False, has_none=True) pbarl2 = model.properties[2] #------------------ model.cross_reference() assert pbarl.Nsm() == 1.0 assert pbarl.Area() == 2.0 # mass/L = area*rho + nsm assert pbarl.MassPerLength() == 1.0 # area = 2.0 mat1.rho = 10.0 assert pbarl.MassPerLength() == 21.0, pbarl.MassPerLength() assert pbarl2.MassPerLength() == 21.0, pbarl2.MassPerLength() loadcase_id = 10 eid = 11 load_type = 'FZ' x1 = 0. x2 = None p1 = 10. scale = 'FR' model.add_pload1(loadcase_id, eid, load_type, scale, x1, p1, x2=x2, p2=None, comment='pload1') scale = 'LE' model.add_pload1(loadcase_id, eid, load_type, scale, x1, p1, x2=x2, p2=None, comment='') model.add_grid(1, [0., 0., 0.]) model.add_grid(2, [1., 0., 0.]) model.add_grid(3, [0., 1., 0.]) x = None g0 = 3 model.add_cbar(eid, pid, [1, 2], x, g0) model.cross_reference() p0 = 1 eids = None nids = None force1, moment1 = sum_forces_moments(model, p0, loadcase_id, include_grav=False, xyz_cid0=None) force2, moment2 = sum_forces_moments_elements(model, p0, loadcase_id, eids, nids, include_grav=False, xyz_cid0=None) #print(force1, force2) assert np.allclose(force1, force2), force1 assert np.allclose(moment1, moment2), moment1 save_load_deck(model, xref='standard', punch=True) def test_baror(self): """tests a BAROR""" model = BDF(debug=False) n1 = 10 n2 = 20 model.add_grid(n1, [0., 0., 0.]) model.add_grid(n2, [1., 0., 0.]) pid = 2 mid = 1 bar_type = 'BAR' dim = [1., 2.] # area = 2.0 unused_pbarl = model.add_pbarl(pid, mid, bar_type, dim, group='MSCBML0', nsm=1., comment='') E = 3.0e7 G = None nu = 0.3 model.add_mat1(mid, E, G, nu, rho=1.) card_lines = ['BAROR', None, pid, None, None, 0.6, 2.9, -5.87, 'GOG'] model.add_card(card_lines, 'BAROR', comment='BAROR', is_list=True, has_none=True) eid = 1 card_lines = ['CBAR', eid, pid, n1, n2] model.add_card(card_lines, 'CBAR', comment='', is_list=True, has_none=True) model.pop_parse_errors() save_load_deck(model) def test_baror_2(self): model = BDF(debug=False) pid = 12 is_g0 = True g0 = 42 x = None baror = model.add_baror(pid, is_g0, g0, x, offt='GGG', comment='baror') baror.raw_fields() baror.write_card(size=8) baror.write_card(size=16) save_load_deck(model) def test_cbend(self): """tests a CBEND""" model = BDF(debug=False) eid = 7 pid = 10 nids = [2, 3] g0 = 5 x = None geom = 1 cbend = model.add_cbend(eid, pid, nids, g0, x, geom, comment='cbend') model.add_grid(2, [0., 0., 0.]) model.add_grid(3, [0., 0., 0.]) model.add_grid(5, [0., 0., 0.]) #pbend = model.add_pbend(pid, mid, beam_type, A, i1, i2, j, #c1, c2, d1, d2, e1, e2, f1, f2, #k1, k2, nsm, rc, zc, delta_n, fsi, #rm, t, p, rb, theta_b, comment='') cbend.validate() cbend.raw_fields() cbend.write_card() cbend.write_card(size=16) model.validate() model._verify_bdf(xref=False) model.pop_parse_errors() #model.cross_reference() #model.pop_xref_errors() #model._verify_bdf(xref=True) #model.uncross_reference() def test_cbeam3(self): """tests a CBEAM3""" model = BDF(debug=False) model.add_grid(1, [0., 0., 0.]) model.add_grid(2, [0., 0., 0.]) model.add_grid(3, [0., 0., 0.]) model.add_grid(4, [0., 0., 0.]) eid = 1 pid = 2 nids = [1, 2, 3] x = None g0 = 4 cbeam3 = model.add_cbeam3(eid, pid, nids, x, g0, wa=None, wb=None, wc=None, tw=None, s=None, comment='cbeam3') cbeam3.raw_fields() A = 1. iz = 2. iy = 3. mid = 4 pbeam3 = model.add_pbeam3(pid, mid, A, iz, iy, iyz=0., j=None, nsm=0., cy=0., cz=0., dy=0., dz=0., ey=0., ez=0., fy=0., fz=0., comment='') E = 3.0e7 G = None nu = 0.3 model.add_mat1(mid, E, G, nu, rho=0.1) str(cbeam3) pbeam3s = str(pbeam3) #print(pbeam3s) str(pbeam3s) card_lines = pbeam3s.split('\n') cbeam3._verify(xref=False) model.cross_reference() model.uncross_reference() del model.properties[pid] model.cards_to_read.add('PBEAM3') model.add_card(card_lines, 'PBEAM3', comment='', ifile=None, is_list=False, has_none=True) model.pop_parse_errors() model.pop_xref_errors() assert pbeam3 == model.properties[pid] def test_bar_area(self): """tests the PBARL""" model = BDF(log=None, debug=False) mid = 40 group = 'group' nsm = 0.0 shape_dims_area = [ # name, dims, area, i1 ('ROD', [2.], 4. * np.pi, 0.), ('TUBE', [5., 1.], 24. * np.pi, 0.), ('BAR', [2., 3.], 6., 0.), ('BOX', [2., 3., 0.5, 0.5], 4., 0.), ('L', [2., 3., 1., 1.], 4., 0.), ('CHAN', [10., 10., 1., 1.], 28., None), ('CHAN1', [9., 0.1, 8., 10.], 19., None), ('CHAN2', [1, 1., 9., 10.], 26., None), # new ('I', [1., 1., 1., 0.1, 0.1, 0.1], 0.28, None), ('I1', [0.1, 1., 0.5, 1.], 1.05, None), ('H', [1.0, 0.1, 1.0, 0.1], 0.2, None), ('Z', [0.5, 0.5, 0.5, 1.], 0.75, None), ('Z', [0.8, 0.5, 0.5, 1.], 0.90, None), ('Z', [0.5, 0.8, 0.5, 1.], 1.05, None), ('Z', [0.5, 0.5, 0.8, 1.], 0.60, None), ('Z', [0.5, 0.5, 0.5, 2.], 1.75, None), ('CHAN', [1., 1., 0.1, 0.1], 0.28, None), ('CHAN1', [0.5, 0.5, 0.5, 1.], 0.75, None), ('CHAN2', [0.1, 0.1, 1., 1.], 0.28, None), ('CROSS', [0.1, 0.1, 1., 0.1], 0.11, None), ('HEXA', [0.1, 1., 1.], 0.90, None), ('HEXA', [0.2, 1., 1.], 0.80, None), ('HEXA', [0.1, 2., 1.], 1.90, None), ('HEXA', [0.1, 1., 2.], 1.80, None), ('HAT', [1., 0.1, 1., 0.1], 0.30, None), ('HAT', [2., 0.1, 1., 0.1], 0.50, None), ('HAT', [1., 0.2, 1., 0.1], 0.56, None), ('HAT', [1., 0.1, 2., 0.1], 0.40, None), ('HAT', [1., 0.1, 1., 0.2], 0.32, None), ('HAT1', [3., 1., 1., 0.1, 0.1], 0.76, None), ('HAT1', [3., 2., 1., 0.1, 0.1], 0.96, None), ('HAT1', [3., 1., 2., 0.1, 0.1], 0.76, None), ('HAT1', [3., 1., 1., 0.2, 0.1], 1.18, None), ('HAT1', [3., 1., 1., 0.1, 0.2], 1.04, None), ('T', [10., 10., 3., 0.5], 33.5, None), ('T2', [10., 5., 0.5, 2.0], 14., None), # ball,hall,tflange,tweb ('T', [1., 1., 0.1, 0.1], 0.19, None), ('T1', [1., 1., 0.1, 0.1], 0.20, None), ('T2', [1., 1., 0.1, 0.1], 0.19, None), ('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.64, None), ('DBOX', [2., 2., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.94, None), ('DBOX', [2., 1., 2., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.64, None), ('DBOX', [2., 1., 1., 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.72, None), ('DBOX', [2., 1., 1., 0.1, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.72, None), ('DBOX', [2., 1., 1., 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.1, ], 0.72, None), ('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, ], 0.725, None), ('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, ], 0.725, None), ('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, ], 0.725, None), ('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, ], 0.725, None), ] pid = 1 for bar_type, dims, areai, i1 in shape_dims_area: pbarl = PBARL(pid, mid, bar_type, dims, group=group, nsm=nsm, comment='comment') pbarl.validate() area2 = pbarl.Area() if i1 is not None: pbarl.I1() pbarl.I2() pbarl.I12() assert np.allclose(areai, area2), 'bar_type=%r dims=%s area=%s area_expected=%s' % (bar_type, dims, area2, areai) pid += 1 if __name__ == '__main__': # pragma: no cover unittest.main()
[ "pyNastran.op2.op2.OP2", "pyNastran.bdf.bdf.GRID", "numpy.array", "unittest.main", "os.remove", "os.path.exists", "pyNastran.bdf.bdf.MAT1", "pyNastran.bdf.field_writer_8.print_card_8", "pyNastran.bdf.mesh_utils.loads.sum_forces_moments", "numpy.allclose", "pyNastran.bdf.cards.test.utils.save_loa...
[((25633, 25648), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25646, 25648), False, 'import unittest\n'), ((902, 922), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['fields'], {}), '(fields)\n', (914, 922), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((959, 979), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['fields'], {}), '(fields)\n', (971, 979), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((1029, 1045), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (1032, 1045), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((1104, 1117), 'pyNastran.bdf.bdf.BDFCard', 'BDFCard', (['card'], {}), '(card)\n', (1111, 1117), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((1133, 1153), 'pyNastran.bdf.bdf.PBAR.add_card', 'PBAR.add_card', (['cardi'], {}), '(cardi)\n', (1146, 1153), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((2188, 2208), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['fields'], {}), '(fields)\n', (2200, 2208), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((2258, 2274), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (2261, 2274), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((2333, 2346), 'pyNastran.bdf.bdf.BDFCard', 'BDFCard', (['card'], {}), '(card)\n', (2340, 2346), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((2363, 2383), 'pyNastran.bdf.bdf.PBAR.add_card', 'PBAR.add_card', (['cardi'], {}), '(cardi)\n', (2376, 2383), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((3404, 3424), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['fields'], {}), '(fields)\n', (3416, 3424), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((3474, 3490), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (3477, 3490), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((3550, 3563), 'pyNastran.bdf.bdf.BDFCard', 'BDFCard', (['card'], {}), '(card)\n', (3557, 3563), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((3579, 3599), 'pyNastran.bdf.bdf.PBAR.add_card', 'PBAR.add_card', (['cardi'], {}), '(cardi)\n', (3592, 3599), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((4406, 4585), 'pyNastran.bdf.bdf.PBAR', 'PBAR', (['pid', 'mid'], {'A': '(0.0)', 'i1': 'i1', 'i2': 'i2', 'i12': 'i12', 'j': 'j', 'nsm': '(0.0)', 'c1': '(0.0)', 'c2': '(0.0)', 'd1': '(0.0)', 'd2': '(0.0)', 'e1': '(0.0)', 'e2': '(0.0)', 'f1': '(0.0)', 'f2': '(0.0)', 'k1': '(100000000.0)', 'k2': '(100000000.0)', 'comment': '"""pbar"""'}), "(pid, mid, A=0.0, i1=i1, i2=i2, i12=i12, j=j, nsm=0.0, c1=0.0, c2=0.0,\n d1=0.0, d2=0.0, e1=0.0, e2=0.0, f1=0.0, f2=0.0, k1=100000000.0, k2=\n 100000000.0, comment='pbar')\n", (4410, 4585), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((4915, 4931), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (4918, 4931), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((5520, 5541), 'pyNastran.bdf.cards.test.utils.save_load_deck', 'save_load_deck', (['model'], {}), '(model)\n', (5534, 5541), False, 'from pyNastran.bdf.cards.test.utils import save_load_deck\n'), ((5631, 5647), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (5634, 5647), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((6337, 6358), 'pyNastran.bdf.cards.test.utils.save_load_deck', 'save_load_deck', (['model'], {}), '(model)\n', (6351, 6358), False, 'from pyNastran.bdf.cards.test.utils import save_load_deck\n'), ((6434, 6460), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'log': 'None', 'debug': '(False)'}), '(log=None, debug=False)\n', (6437, 6460), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((6595, 6662), 'pyNastran.bdf.bdf.PBARL', 'PBARL', (['pid', 'mid', 'Type', 'dim'], {'group': 'group', 'nsm': 'nsm', 'comment': '"""comment"""'}), "(pid, mid, Type, dim, group=group, nsm=nsm, comment='comment')\n", (6600, 6662), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((7399, 7425), 'pyNastran.bdf.bdf.GRID', 'GRID', (['nid1'], {'cp': '(0)', 'xyz': 'xyz1'}), '(nid1, cp=0, xyz=xyz1)\n', (7403, 7425), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((7501, 7527), 'pyNastran.bdf.bdf.GRID', 'GRID', (['nid2'], {'cp': '(0)', 'xyz': 'xyz2'}), '(nid2, cp=0, xyz=xyz2)\n', (7505, 7527), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((7596, 7624), 'pyNastran.bdf.bdf.MAT1', 'MAT1', (['mid', 'E', 'G', 'nu'], {'rho': '(1.0)'}), '(mid, E, G, nu, rho=1.0)\n', (7600, 7624), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((7728, 7822), 'pyNastran.bdf.bdf.CBAR', 'CBAR', (['eid', 'pid', '[nid1, nid2]', 'x', 'g0'], {'offt': '"""GGG"""', 'pa': '(0)', 'pb': '(0)', 'wa': 'None', 'wb': 'None', 'comment': '""""""'}), "(eid, pid, [nid1, nid2], x, g0, offt='GGG', pa=0, pb=0, wa=None, wb=\n None, comment='')\n", (7732, 7822), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((8463, 8523), 'pyNastran.bdf.cards.test.utils.save_load_deck', 'save_load_deck', (['model'], {'run_quality': '(False)', 'run_test_bdf': '(False)'}), '(model, run_quality=False, run_test_bdf=False)\n', (8477, 8523), False, 'from pyNastran.bdf.cards.test.utils import save_load_deck\n'), ((8607, 8623), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (8610, 8623), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((9773, 9882), 'pyNastran.bdf.mesh_utils.mass_properties.mass_properties', 'mass_properties', (['model'], {'element_ids': 'None', 'mass_ids': 'None', 'reference_point': 'None', 'sym_axis': 'None', 'scale': 'None'}), '(model, element_ids=None, mass_ids=None, reference_point=\n None, sym_axis=None, scale=None)\n', (9788, 9882), False, 'from pyNastran.bdf.mesh_utils.mass_properties import mass_properties, mass_properties_nsm\n'), ((11217, 11233), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (11220, 11233), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((11430, 11451), 'os.remove', 'os.remove', (['"""cbar.bdf"""'], {}), "('cbar.bdf')\n", (11439, 11451), False, 'import os\n'), ((12066, 12082), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (12069, 12082), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((14796, 14812), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (14799, 14812), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((16015, 16031), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (16018, 16031), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((17768, 17845), 'pyNastran.bdf.mesh_utils.loads.sum_forces_moments', 'sum_forces_moments', (['model', 'p0', 'loadcase_id'], {'include_grav': '(False)', 'xyz_cid0': 'None'}), '(model, p0, loadcase_id, include_grav=False, xyz_cid0=None)\n', (17786, 17845), False, 'from pyNastran.bdf.mesh_utils.loads import sum_forces_moments, sum_forces_moments_elements\n'), ((17917, 18019), 'pyNastran.bdf.mesh_utils.loads.sum_forces_moments_elements', 'sum_forces_moments_elements', (['model', 'p0', 'loadcase_id', 'eids', 'nids'], {'include_grav': '(False)', 'xyz_cid0': 'None'}), '(model, p0, loadcase_id, eids, nids,\n include_grav=False, xyz_cid0=None)\n', (17944, 18019), False, 'from pyNastran.bdf.mesh_utils.loads import sum_forces_moments, sum_forces_moments_elements\n'), ((18116, 18143), 'numpy.allclose', 'np.allclose', (['force1', 'force2'], {}), '(force1, force2)\n', (18127, 18143), True, 'import numpy as np\n'), ((18167, 18196), 'numpy.allclose', 'np.allclose', (['moment1', 'moment2'], {}), '(moment1, moment2)\n', (18178, 18196), True, 'import numpy as np\n'), ((18214, 18264), 'pyNastran.bdf.cards.test.utils.save_load_deck', 'save_load_deck', (['model'], {'xref': '"""standard"""', 'punch': '(True)'}), "(model, xref='standard', punch=True)\n", (18228, 18264), False, 'from pyNastran.bdf.cards.test.utils import save_load_deck\n'), ((18336, 18352), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (18339, 18352), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((19183, 19204), 'pyNastran.bdf.cards.test.utils.save_load_deck', 'save_load_deck', (['model'], {}), '(model)\n', (19197, 19204), False, 'from pyNastran.bdf.cards.test.utils import save_load_deck\n'), ((19250, 19266), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (19253, 19266), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((19520, 19541), 'pyNastran.bdf.cards.test.utils.save_load_deck', 'save_load_deck', (['model'], {}), '(model)\n', (19534, 19541), False, 'from pyNastran.bdf.cards.test.utils import save_load_deck\n'), ((19613, 19629), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (19616, 19629), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((20626, 20642), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (20629, 20642), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((21985, 22011), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'log': 'None', 'debug': '(False)'}), '(log=None, debug=False)\n', (21988, 22011), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((11370, 11421), 'os.system', 'os.system', (['"""nastran scr=yes bat=no old=no cbar.bdf"""'], {}), "('nastran scr=yes bat=no old=no cbar.bdf')\n", (11379, 11421), False, 'import os\n'), ((11551, 11556), 'pyNastran.op2.op2.OP2', 'OP2', ([], {}), '()\n', (11554, 11556), False, 'from pyNastran.op2.op2 import OP2\n'), ((11726, 11753), 'numpy.allclose', 'np.allclose', (['op2_mass', 'mass'], {}), '(op2_mass, mass)\n', (11737, 11753), True, 'import numpy as np\n'), ((11917, 11959), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {'dtype': '"""float32"""'}), "([0.5, 0.0, 0.0], dtype='float32')\n", (11925, 11959), True, 'import numpy as np\n'), ((25186, 25258), 'pyNastran.bdf.bdf.PBARL', 'PBARL', (['pid', 'mid', 'bar_type', 'dims'], {'group': 'group', 'nsm': 'nsm', 'comment': '"""comment"""'}), "(pid, mid, bar_type, dims, group=group, nsm=nsm, comment='comment')\n", (25191, 25258), False, 'from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1\n'), ((25453, 25478), 'numpy.allclose', 'np.allclose', (['areai', 'area2'], {}), '(areai, area2)\n', (25464, 25478), True, 'import numpy as np\n'), ((11324, 11350), 'os.path.exists', 'os.path.exists', (['"""cbar.op2"""'], {}), "('cbar.op2')\n", (11338, 11350), False, 'import os\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jan 24 10:45:47 2020 @author: ben05 """ import ATL11 import numpy as np from scipy import stats import sys, os, h5py, glob, csv import io import pointCollection as pc import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib.colors import ListedColormap, LogNorm from matplotlib.backends.backend_pdf import PdfPages from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.ticker as ticker import cartopy.crs as ccrs #from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER #import cartopy.io.img_tiles as cimgt import cartopy.feature as cfeature import osgeo.gdal import datetime as dt from ATL11.h5util import create_attribute def ATL15_write(args): def make_dataset(field,data,field_attrs,file_obj,group_obj,scale_dict,dimScale=False): dimensions = field_attrs[field]['dimensions'].split(',') if field_attrs[field]['datatype'].startswith('int'): data = np.nan_to_num(data,nan=np.iinfo(np.dtype(field_attrs[field]['datatype'])).max) fillvalue = np.iinfo(np.dtype(field_attrs[field]['datatype'])).max elif field_attrs[field]['datatype'].startswith('float'): data = np.nan_to_num(data,nan=np.finfo(np.dtype(field_attrs[field]['datatype'])).max) fillvalue = np.finfo(np.dtype(field_attrs[field]['datatype'])).max dset = group_obj.create_dataset(field.encode('ASCII'),data=data,fillvalue=fillvalue,chunks=True,compression=6,dtype=field_attrs[field]['datatype']) for ii,dim in enumerate(dimensions): dset.dims[ii].label = scale[dim.strip()] if dimScale: dset.make_scale(field) else: if dim.strip().startswith('Nt'): dset.dims[ii].attach_scale(file_obj[scale[dim.strip()]]) else: dset.dims[ii].attach_scale(group_obj[scale[dim.strip()]]) for attr in attr_names: if 'dimensions' not in attr and 'datatype' not in attr: create_attribute(dset.id, attr, [], str(field_attrs[field][attr])) if field_attrs[field]['datatype'].startswith('int'): dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(np.dtype(field_attrs[field]['datatype'])).max elif field_attrs[field]['datatype'].startswith('float'): dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(np.dtype(field_attrs[field]['datatype'])).max return file_obj dz_dict ={'year':'t', 'year_lag1':'t', 'year_lag4':'t', 'delta_time':'t', 'delta_time_lag1':'t', 'delta_time_lag4':'t', 'x':'x', 'y':'y', # 'cell_area':'area', 'data_count':'count', 'misfit_rms':'misfit_rms', 'misfit_scaled_rms':'misfit_scaled_rms', 'delta_h':'dz', 'delta_h_sigma':'sigma_dz', 'dhdt':'dzdt', # 'dhdt':'avg_dzdt' # 'dhdt_lag1_sigma':'sigma_avg_dzdt_lag1', # 'dhdt_lag4':'dhdt_lag4', # 'dhdt_lag4_sigma':'dhdt_lag4_sigma', # 'dhdt_mission':'dhdt_mission', # 'dhdt_mission_sigma':'dhdt_mission_sigma', # 'ice_mask':'ice_mask', # 'mask_fraction':'mask_fraction', } scale = {'Nt':'year', 'Nt_lag1':'year_lag1', 'Nt_lag4':'year_lag4', 'Nx':'x', 'Ny':'y', 'Nx_10km':'x_10km', 'Ny_10km':'y_10km', 'Nx_20km':'x_20km', 'Ny_20km':'y_20km', 'Nx_40km':'x_40km', 'Ny_40km':'y_40km', } lags = { 'file' : ['FH','FH_lag1','FH_lag4'], 'vari' : ['','_lag1','_lag4'] } avgs = ['','_10km','_20km','_40km'] # establish output file kk=0 fileout = 'ATL15_yyyymmdd.h5' if os.path.isfile(fileout): os.remove(fileout) with h5py.File(fileout.encode('ASCII'),'w') as fo: # open data attributes file with open('ATL15_output_attrs_sd.csv','r', encoding='utf-8-sig') as attrfile: reader=list(csv.DictReader(attrfile)) attr_names=[x for x in reader[0].keys() if x != 'field' and x != 'group'] for kk,ave in enumerate(avgs): field_names = [row['field'] for row in reader if row['group'] == 'height_change'+ave] # loop over dz*.h5 files for one ave for jj in range(len(lags['file'])): filein = args.directory+'/dz'+ave+lags['vari'][jj]+'.h5' #print('file in ',filein) if not os.path.isfile(filein): print('No file:',args.directory+'/'+os.path.basename(filein)) continue else: print('Reading file:',args.directory+'/'+os.path.basename(filein)) lags['file'][jj] = h5py.File(filein,'r') dzg=list(lags['file'][jj].keys())[0] if kk==0: #establish variables in ROOT for fieldroot in ['year','delta_time']: field=fieldroot+lags['vari'][jj] data = np.array(lags['file'][jj][dzg][dz_dict[field]]) field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field']} if fieldroot == 'year': make_dataset(field,data,field_attrs,fo,fo,scale,dimScale=True) else: make_dataset(field,data,field_attrs,fo,fo,scale,dimScale=False) if jj==0: gh = fo.create_group('height_change'+ave) # spatial dimension scales for the gh for field in ['x','y']: data = np.array(lags['file'][jj][dzg][dz_dict[field]]) field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field']} make_dataset(field+ave,data,field_attrs,fo,gh,scale,dimScale=True) for fld in ['delta_h','delta_h_sigma']: field = fld+ave if fld.endswith('sigma'): data = np.array(lags['file'][jj][dzg]['sigma_'+dzg]) else: data = np.array(lags['file'][jj][dzg][dzg]) field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field']} make_dataset(field,data,field_attrs,fo,gh,scale,dimScale=False) for field in field_names: if not field.startswith('x') and not field.startswith('y') \ and not field.startswith('delta_h') and 'lag' not in field: field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field']} if dz_dict.get(field)!=None: data = np.array(lags['file'][jj][dzg][dz_dict[field]+ave]) else: # place holder data set for now dimensions = field_attrs[field]['dimensions'].split(',') data = np.ndarray(shape=tuple([ii+1 for ii in range(len(dimensions))]),dtype=field_attrs[field]['datatype']) make_dataset(field,data,field_attrs,fo,gh,scale,dimScale=False) else: # one of the lags for fld in ['','_sigma']: field = 'dhdt'+lags['vari'][jj]+fld+ave data = np.array(lags['file'][jj][dzg][dzg]) field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field']} make_dataset(field,data,field_attrs,fo,gh,scale,dimScale=False) for jj in range(len(lags['file'])): lags['file'][jj].close() return fileout if __name__=='__main__': import argparse parser=argparse.ArgumentParser() parser.add_argument('--directory','-d', type=str, default=os.getcwd(), help='directory to run') # parser.add_argument('ATL11_file', type=str) # parser.add_argument('--Hemisphere','-H', type=int, default=1, help='1 for Norhtern, -1 for Southern') # parser.add_argument('--mosaic', '-m', type=str) # parser.add_argument('--out_path', '-o', type=str, help='default is ATL11_file path') # parser.add_argument('--pdf', action='store_true', default=False, help='write images to .pdf file') # parser.add_argument('--nolog', action='store_true', default=False, help='no writing errors to .log file') args=parser.parse_args() print('args',args) fileout = ATL15_write(args)
[ "csv.DictReader", "argparse.ArgumentParser", "os.getcwd", "os.path.isfile", "h5py.File", "numpy.array", "os.path.basename", "numpy.dtype", "os.remove" ]
[((4048, 4071), 'os.path.isfile', 'os.path.isfile', (['fileout'], {}), '(fileout)\n', (4062, 4071), False, 'import sys, os, h5py, glob, csv\n'), ((8659, 8684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8682, 8684), False, 'import argparse\n'), ((4081, 4099), 'os.remove', 'os.remove', (['fileout'], {}), '(fileout)\n', (4090, 4099), False, 'import sys, os, h5py, glob, csv\n'), ((8747, 8758), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8756, 8758), False, 'import sys, os, h5py, glob, csv\n'), ((4301, 4325), 'csv.DictReader', 'csv.DictReader', (['attrfile'], {}), '(attrfile)\n', (4315, 4325), False, 'import sys, os, h5py, glob, csv\n'), ((5075, 5097), 'h5py.File', 'h5py.File', (['filein', '"""r"""'], {}), "(filein, 'r')\n", (5084, 5097), False, 'import sys, os, h5py, glob, csv\n'), ((1125, 1165), 'numpy.dtype', 'np.dtype', (["field_attrs[field]['datatype']"], {}), "(field_attrs[field]['datatype'])\n", (1133, 1165), True, 'import numpy as np\n'), ((2286, 2326), 'numpy.dtype', 'np.dtype', (["field_attrs[field]['datatype']"], {}), "(field_attrs[field]['datatype'])\n", (2294, 2326), True, 'import numpy as np\n'), ((4796, 4818), 'os.path.isfile', 'os.path.isfile', (['filein'], {}), '(filein)\n', (4810, 4818), False, 'import sys, os, h5py, glob, csv\n'), ((1367, 1407), 'numpy.dtype', 'np.dtype', (["field_attrs[field]['datatype']"], {}), "(field_attrs[field]['datatype'])\n", (1375, 1407), True, 'import numpy as np\n'), ((2461, 2501), 'numpy.dtype', 'np.dtype', (["field_attrs[field]['datatype']"], {}), "(field_attrs[field]['datatype'])\n", (2469, 2501), True, 'import numpy as np\n'), ((5371, 5418), 'numpy.array', 'np.array', (["lags['file'][jj][dzg][dz_dict[field]]"], {}), "(lags['file'][jj][dzg][dz_dict[field]])\n", (5379, 5418), True, 'import numpy as np\n'), ((6073, 6120), 'numpy.array', 'np.array', (["lags['file'][jj][dzg][dz_dict[field]]"], {}), "(lags['file'][jj][dzg][dz_dict[field]])\n", (6081, 6120), True, 'import numpy as np\n'), ((8171, 8207), 'numpy.array', 'np.array', (["lags['file'][jj][dzg][dzg]"], {}), "(lags['file'][jj][dzg][dzg])\n", (8179, 8207), True, 'import numpy as np\n'), ((1045, 1085), 'numpy.dtype', 'np.dtype', (["field_attrs[field]['datatype']"], {}), "(field_attrs[field]['datatype'])\n", (1053, 1085), True, 'import numpy as np\n'), ((4876, 4900), 'os.path.basename', 'os.path.basename', (['filein'], {}), '(filein)\n', (4892, 4900), False, 'import sys, os, h5py, glob, csv\n'), ((5014, 5038), 'os.path.basename', 'os.path.basename', (['filein'], {}), '(filein)\n', (5030, 5038), False, 'import sys, os, h5py, glob, csv\n'), ((6585, 6632), 'numpy.array', 'np.array', (["lags['file'][jj][dzg]['sigma_' + dzg]"], {}), "(lags['file'][jj][dzg]['sigma_' + dzg])\n", (6593, 6632), True, 'import numpy as np\n'), ((6696, 6732), 'numpy.array', 'np.array', (["lags['file'][jj][dzg][dzg]"], {}), "(lags['file'][jj][dzg][dzg])\n", (6704, 6732), True, 'import numpy as np\n'), ((1287, 1327), 'numpy.dtype', 'np.dtype', (["field_attrs[field]['datatype']"], {}), "(field_attrs[field]['datatype'])\n", (1295, 1327), True, 'import numpy as np\n'), ((7491, 7544), 'numpy.array', 'np.array', (["lags['file'][jj][dzg][dz_dict[field] + ave]"], {}), "(lags['file'][jj][dzg][dz_dict[field] + ave])\n", (7499, 7544), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2018 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. ''' @author: <NAME> <<EMAIL>> @note: Created on 04.07.2018 ''' import os import sys import cv2 import time import tqdm import argparse import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from sklearn import decomposition from sklearn.svm import LinearSVC from sklearn.manifold import TSNE from utils import data_generator as gen from utils.models import MLPClassifier BASE_DIR = os.path.dirname(os.path.abspath(__file__)) FEAT_DIR = os.path.join(BASE_DIR, 'features') def hparam_search(features_train, labels_train, features_test, labels_test): """ Search best C param for SVM classifier but first reduce dimension of the features. Args: features_train (ndarray of size [images, features_no]): Features of the train dataset. labels_train (ndarray of size [images]): Labels of the train dataset. features_test (ndarray of size [images, features_no]): Features of the test dataset. labels_test (ndarray of size [images]): Labels of the test dataset. """ VARIANCE = 0.60 pca_hparam = decomposition.PCA(VARIANCE) pca_hparam.fit(features_train) features_hparam_train = pca_hparam.transform(features_train) print("Componenst with ", VARIANCE * 100, "% of variance: ", pca_hparam.n_components_) for C in [0.001, 0.01, 0.1, 1.0, 1.2, 1.5, 2.0, 10.0]: classifier_svm = LinearSVC(C=C, verbose=False) classifier_svm.fit(features_hparam_train, labels_train) print("======= C:", C, "=======") print("TRAIN SCORE = ", classifier_svm.score(features_hparam_train, labels_train)) features_hparam_test = pca_hparam.transform(features_test) print("TEST SCORE = ", classifier_svm.score(features_hparam_test, labels_test)) def classify_svm(features_train, labels_train, features_test, labels_test, C=1.0, verbose=False): """ Train SVM classifier and eval train and test scores. Args: features_train (ndarray of size [images, features_no]): Features of the train dataset. labels_train (ndarray of size [images]): Labels of the train dataset. features_test (ndarray of size [images, features_no]): Features of the test dataset. labels_test (ndarray of size [images]): Labels of the test dataset. C (float): C parameter of SVM classifier. verbose (bool): Should I print some additional info? """ print("Learning started, please wait...") svm_time_start = time.time() classifier_svm = LinearSVC(C=C, verbose=verbose, dual=True, max_iter=1000) classifier_svm.fit(features_train, labels_train) svm_time_fit = time.time() print("SVM fit in ", svm_time_fit - svm_time_start, " seconds\n\n") print("TRAIN SCORE = ", classifier_svm.score(features_train, labels_train)) print("TEST SCORE = ", classifier_svm.score(features_test, labels_test)) def classify_mlp(features_train, labels_train, features_test, labels_test, epochs, learning_rate): BATCH_SIZE = 200 CLASSES_COUNT = np.max(labels_train) + 1 # Reset tf.reset_default_graph() # Define model with tf.device("/device:GPU:0"): model_classifier = MLPClassifier([features_train.shape[-1], 1024, 512, 256, 128, 64, CLASSES_COUNT], BATCH_SIZE, learning_rate) config = tf.ConfigProto(allow_soft_placement=True) # , log_device_placement=True) with tf.Session(config=config) as sess: # Run the initialization sess.run(tf.global_variables_initializer()) # Logs log_model_dir = os.path.join("logs", model_classifier.get_model_name()) writer = tf.summary.FileWriter(os.path.join(log_model_dir, time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime()))) # writer.add_graph(sess.graph) # Do the training loop global_batch_idx = 1 print("Learning started, please wait...") for epoch in range(epochs): indices = np.arange(features_train.shape[0]) features_shuffled = features_train.copy()[indices] labels_shuffled = labels_train.copy()[indices] for index in range(int(len(features_train)/BATCH_SIZE)): features = features_shuffled[index*BATCH_SIZE:(index+1)*BATCH_SIZE] labels = labels_shuffled[index*BATCH_SIZE:(index+1)*BATCH_SIZE] # zero mean #features = np.clip((features - 0.38) / 10, -1.0, 1.0) # run optimizer labels_one_hot = sess.run(tf.one_hot(labels, CLASSES_COUNT)) _, loss, pred, summary = sess.run([model_classifier.optimizer, model_classifier.loss, model_classifier.get_classification_prediction(), model_classifier.summary], feed_dict={model_classifier.placeholder_embed: features, model_classifier.placeholder_label: labels_one_hot}) # train acc acc = float(sum(np.argmax(pred, axis=-1) == labels)) / labels.shape[0] # summ writer.add_summary(summary, global_batch_idx) global_batch_idx += 1 # Info print("Epoch: %06d batch: %03d loss: %06f train acc: %03f" % (epoch + 1, index, loss, acc)) index += 1 accs = [] #features_test = np.clip((features_test - 0.38) / 10, -1.0, 1.0) for index in range(int(len(features_test)/BATCH_SIZE)): features = features_test[index*BATCH_SIZE:(index+1)*BATCH_SIZE] labels = labels_test[index*BATCH_SIZE:(index+1)*BATCH_SIZE] pred = sess.run(model_classifier.get_classification_prediction(), feed_dict={model_classifier.placeholder_embed: features}) acc = float(sum(np.argmax(pred, axis=-1) == labels)) / labels.shape[0] accs.append(acc) print("TEST ACC = ", np.mean(accs)) def main(argv): # Parser parser = argparse.ArgumentParser() parser.add_argument("-f", "--features", help="features to decompose (hog, cnn-0, cnn-1, ...)", type=str, default='hog') parser.add_argument("-c", "--classifier", help="supported image features: svm, mlp", type=str, default='svm') parser.add_argument("-e", "--epochs", help="training epochs for mlp classifier", type=int, default=1) parser.add_argument("-l", "--learning_rate", help="training learning_rate for mlp classifier", type=float, default=0.0005) parser.add_argument('-s', '--hparam_search', help='search for best C param for SVM classifier', action='store_true') args = vars(parser.parse_args()) # Load features =========================================================== data_dir = os.path.join(FEAT_DIR, args['features']) if not os.path.exists(data_dir): print ("There is no such features calculated...") features_train = np.load(os.path.join(data_dir, "features_train.npy")) labels_train = np.load(os.path.join(data_dir, "labels_train.npy")) features_test = np.load(os.path.join(data_dir, "features_test.npy")) labels_test = np.load(os.path.join(data_dir, "labels_test.npy")) ###################################################################################### #################################### HPARAM SEARCH ################################### ###################################################################################### if args['hparam_search']: hparam_search(features_train, labels_train, features_test, labels_test) ###################################################################################### ################################ FINAL CLASSIFICATION ################################ ###################################################################################### if args['classifier'] == 'svm': classify_svm(features_train, labels_train, features_test, labels_test) elif args['classifier'] == 'mlp': classify_mlp(features_train, labels_train, features_test, labels_test, args['epochs'], args['learning_rate']) if __name__ == "__main__": main(sys.argv[1:])
[ "numpy.arange", "os.path.exists", "numpy.mean", "argparse.ArgumentParser", "sklearn.decomposition.PCA", "tensorflow.Session", "numpy.max", "tensorflow.ConfigProto", "time.localtime", "tensorflow.one_hot", "tensorflow.device", "sklearn.svm.LinearSVC", "numpy.argmax", "time.time", "utils.m...
[((1630, 1664), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""features"""'], {}), "(BASE_DIR, 'features')\n", (1642, 1664), False, 'import os\n'), ((1592, 1617), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1607, 1617), False, 'import os\n'), ((2238, 2265), 'sklearn.decomposition.PCA', 'decomposition.PCA', (['VARIANCE'], {}), '(VARIANCE)\n', (2255, 2265), False, 'from sklearn import decomposition\n'), ((3628, 3639), 'time.time', 'time.time', ([], {}), '()\n', (3637, 3639), False, 'import time\n'), ((3661, 3718), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': 'C', 'verbose': 'verbose', 'dual': '(True)', 'max_iter': '(1000)'}), '(C=C, verbose=verbose, dual=True, max_iter=1000)\n', (3670, 3718), False, 'from sklearn.svm import LinearSVC\n'), ((3791, 3802), 'time.time', 'time.time', ([], {}), '()\n', (3800, 3802), False, 'import time\n'), ((4220, 4244), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4242, 4244), True, 'import tensorflow as tf\n'), ((4456, 4497), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (4470, 4497), True, 'import tensorflow as tf\n'), ((7288, 7313), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7311, 7313), False, 'import argparse\n'), ((8043, 8083), 'os.path.join', 'os.path.join', (['FEAT_DIR', "args['features']"], {}), "(FEAT_DIR, args['features'])\n", (8055, 8083), False, 'import os\n'), ((2542, 2571), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': 'C', 'verbose': '(False)'}), '(C=C, verbose=False)\n', (2551, 2571), False, 'from sklearn.svm import LinearSVC\n'), ((4175, 4195), 'numpy.max', 'np.max', (['labels_train'], {}), '(labels_train)\n', (4181, 4195), True, 'import numpy as np\n'), ((4278, 4304), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (4287, 4304), True, 'import tensorflow as tf\n'), ((4333, 4445), 'utils.models.MLPClassifier', 'MLPClassifier', (['[features_train.shape[-1], 1024, 512, 256, 128, 64, CLASSES_COUNT]', 'BATCH_SIZE', 'learning_rate'], {}), '([features_train.shape[-1], 1024, 512, 256, 128, 64,\n CLASSES_COUNT], BATCH_SIZE, learning_rate)\n', (4346, 4445), False, 'from utils.models import MLPClassifier\n'), ((4539, 4564), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4549, 4564), True, 'import tensorflow as tf\n'), ((8095, 8119), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (8109, 8119), False, 'import os\n'), ((8210, 8254), 'os.path.join', 'os.path.join', (['data_dir', '"""features_train.npy"""'], {}), "(data_dir, 'features_train.npy')\n", (8222, 8254), False, 'import os\n'), ((8283, 8325), 'os.path.join', 'os.path.join', (['data_dir', '"""labels_train.npy"""'], {}), "(data_dir, 'labels_train.npy')\n", (8295, 8325), False, 'import os\n'), ((8355, 8398), 'os.path.join', 'os.path.join', (['data_dir', '"""features_test.npy"""'], {}), "(data_dir, 'features_test.npy')\n", (8367, 8398), False, 'import os\n'), ((8426, 8467), 'os.path.join', 'os.path.join', (['data_dir', '"""labels_test.npy"""'], {}), "(data_dir, 'labels_test.npy')\n", (8438, 8467), False, 'import os\n'), ((4633, 4666), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4664, 4666), True, 'import tensorflow as tf\n'), ((5095, 5129), 'numpy.arange', 'np.arange', (['features_train.shape[0]'], {}), '(features_train.shape[0])\n', (5104, 5129), True, 'import numpy as np\n'), ((7228, 7241), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (7235, 7241), True, 'import numpy as np\n'), ((4866, 4882), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4880, 4882), False, 'import time\n'), ((5692, 5725), 'tensorflow.one_hot', 'tf.one_hot', (['labels', 'CLASSES_COUNT'], {}), '(labels, CLASSES_COUNT)\n', (5702, 5725), True, 'import tensorflow as tf\n'), ((7115, 7139), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(-1)'}), '(pred, axis=-1)\n', (7124, 7139), True, 'import numpy as np\n'), ((6257, 6281), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(-1)'}), '(pred, axis=-1)\n', (6266, 6281), True, 'import numpy as np\n')]
''' Forecast time series ''' import random import sys import argparse import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error def build_lstm_graph(n_features, n_targets, quantiles, burn_in, num_units, input_keep_prob=1.0, output_keep_prob=1.0, variable_scope='ts', dtype=tf.float32): ''' Build the symbolic graph for modeling the time series ''' # x, y are indexed by batch, time_step and feature with tf.variable_scope(variable_scope): x = tf.placeholder(dtype, [None, None, n_features], name='x') y = tf.placeholder(dtype, [None, None, n_targets], name='y') cell = tf.contrib.rnn.LSTMCell(num_units, use_peepholes=True) dropout_cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob, output_keep_prob) outputs, state = tf.nn.dynamic_rnn(dropout_cell, x, dtype=dtype) w_fcst = tf.get_variable('w_fcst', [n_features + num_units, len(quantiles) * n_targets]) b_fcst = tf.get_variable('b_fcst', [len(quantiles) * n_targets]) # Use the last n_targets elements in each output vector at # each time step to match against y # Features for linear forecast features_ = tf.concat([tf.reshape(x, [-1, n_features]), tf.reshape(outputs, [-1, num_units])], axis=1) # Predicted quantiles pred = tf.nn.xw_plus_b(features_, w_fcst, b_fcst) # Transform into shape [n_samples, n_steps, n_quantiles * n_targets] y_tiled = tf.tile(y, [1, 1, len(quantiles)]) pred = tf.reshape(pred, tf.shape(y_tiled)) # TODO: add penalty on LSTM weight matrices and w_fcst theta = y_tiled[:, burn_in:, :] - pred[:, burn_in:, :] err = theta * np.repeat(quantiles, n_targets) - tf.minimum(theta, 0) cost = tf.reduce_mean(tf.reshape(err, [-1, len(quantiles) * n_targets]), axis=0) cost = tf.reduce_mean(cost) return {'x': x, 'y': y, 'pred': pred, 'cost': cost, 'lstm_state': state, 'lstm_outputs': outputs, 'lstm_weights': cell.weights, 'w_fcst': w_fcst, 'b_fcst': b_fcst}, cell def train_lstm(sess, ts, y=None, features_func=None, targets_func=None, quantiles=[.5], burn_in=50, batch_size=50, lr0=1e-5, lr_decay=(50, .99), n_iter=500, valid_every=5, print_every=5, variable_scope='ts', **kwargs): ''' Train LSTM for given features and targets functions ''' assert (y is not None or ((features_func is not None) and (targets_func is not None))) # ts <num samples>-by-<length of every sample> # Split ts into train, dev set; we'll only use ts_test once at the end test_size = .1 if y is not None: features, dev_features, targets, dev_targets = ( train_test_split(ts, y, test_size=test_size)) else: ts_train, ts_dev = train_test_split(ts, test_size=test_size) # Make features, targets for LSTM training features = np.apply_along_axis(features_func, axis=1, arr=ts_train) targets = np.apply_along_axis(targets_func, axis=1, arr=ts_train) dev_features = np.apply_along_axis(features_func, axis=1, arr=ts_dev) dev_targets = np.apply_along_axis(targets_func, axis=1, arr=ts_dev) if features.ndim == 2: features = features[:, :, None] dev_features = dev_features[:, :, None] if targets.ndim == 2: targets = targets[:, :, None] dev_targets = dev_targets[:, :, None] n_features = features.shape[2] n_targets = targets.shape[2] # The burn-in period would be excluded from cost calculation if np.isscalar(quantiles): quantiles = [quantiles] lstm, cell = build_lstm_graph(n_features, n_targets, quantiles, burn_in, variable_scope=variable_scope, **kwargs) # Initialise optimiser with tf.variable_scope(variable_scope): global_step = tf.Variable(0, trainable=False) learning_rate = tf.train.exponential_decay(lr0, global_step, lr_decay[0], lr_decay[1]) optimizer = (tf.train.MomentumOptimizer(learning_rate, momentum=.5) .minimize(lstm['cost'], global_step=global_step)) # Begin training var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, variable_scope) sess.run(tf.variables_initializer(var_list)) # Run minibatch SGD # Break when Ctrl-C is pressed try: for i in range(n_iter): msg = f'Iter {i}' # Run SGD batch = random.sample(range(features.shape[0]), batch_size) _, cost = sess.run([optimizer, lstm['cost']], feed_dict={lstm['x']: features[batch], lstm['y']: targets[batch]}) msg += f' Train loss {cost:.4f}' if i % valid_every == 0: dict_ = {lstm['x']: dev_features, lstm['y']: dev_targets} dev_cost = sess.run(lstm['cost'], feed_dict=dict_) msg += f' Dev loss {dev_cost:.4f}' if i % print_every == 0: print(msg, file=sys.stderr) except KeyboardInterrupt: pass return lstm, cell def eval_ar(sess, lstm, ts_test, features_func, targets_func, burn_in): ''' Evaluate the AR model ''' # ts_test <num samples>-by-<num variables> # -by-<length of every sample/series> TS_WITH_NOISE = 0 TS_WITH_NO_NOISE = 1 x = ts_test[:, TS_WITH_NOISE, :].squeeze() x_no_noise = ts_test[:, TS_WITH_NO_NOISE, :].squeeze() features = np.apply_along_axis(features_func, axis=1, arr=x) targets = np.apply_along_axis(targets_func, axis=1, arr=x) targets_no_noise = np.apply_along_axis(targets_func, axis=1, arr=x_no_noise) if features.ndim == 2: features = features[:, :, None] if targets.ndim == 2: targets = targets[:, :, None] targets_no_noise = targets_no_noise[:, :, None] dict_ = {lstm['x']: features, lstm['y']: targets} cost, pred = sess.run([lstm['cost'], lstm['pred']], feed_dict=dict_) # For simple feature and median quantile cost_no_noise = mean_squared_error(targets_no_noise[:, burn_in:, 0], pred[:, burn_in:, 0]) return cost, np.sqrt(cost_no_noise), pred if __name__ == '__main__': ''' Command line interface Usage: seq 1 50 | xargs -I {} -P 3 python3 tspred_qtl.py simulation.npz simulation_test.npz >> out.csv ''' # Parse command line parser = argparse.ArgumentParser() parser.add_argument('train_file') parser.add_argument('test_file') args = parser.parse_args() # Read data data = np.load(args.train_file)['data'] data_test = np.load(args.test_file)['data'] # Train simple_features = lambda x: x[:-1] moments_features = lambda x: np.column_stack([x[:-1], x[:-1] ** 2]) sess = tf.Session() burn_in = 50 features_func = simple_features res = train_lstm(sess, data[:, 0, :].squeeze() * 10, features_func, lambda x: x[1:], quantiles=[.5], burn_in=burn_in, batch_size=50, lr0=3e-3, lr_decay=(50, .99), n_iter=300, num_units=10) # Test cost, cost_no_noise, pred = eval_ar(sess, res[0], data_test * 10, features_func, lambda x: x[1:], burn_in) pred_error = data_test[:, 1, 1:].squeeze() - pred.squeeze() / 10 print(' '.join([str(w) for w in pred_error.flat]))
[ "numpy.sqrt", "tensorflow.shape", "numpy.column_stack", "tensorflow.contrib.rnn.LSTMCell", "tensorflow.reduce_mean", "tensorflow.variables_initializer", "numpy.repeat", "numpy.isscalar", "argparse.ArgumentParser", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.nn.dynamic_rnn", "...
[((3917, 3939), 'numpy.isscalar', 'np.isscalar', (['quantiles'], {}), '(quantiles)\n', (3928, 3939), True, 'import numpy as np\n'), ((4581, 4645), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES', 'variable_scope'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES, variable_scope)\n', (4598, 4645), True, 'import tensorflow as tf\n'), ((5955, 6004), 'numpy.apply_along_axis', 'np.apply_along_axis', (['features_func'], {'axis': '(1)', 'arr': 'x'}), '(features_func, axis=1, arr=x)\n', (5974, 6004), True, 'import numpy as np\n'), ((6019, 6067), 'numpy.apply_along_axis', 'np.apply_along_axis', (['targets_func'], {'axis': '(1)', 'arr': 'x'}), '(targets_func, axis=1, arr=x)\n', (6038, 6067), True, 'import numpy as np\n'), ((6091, 6148), 'numpy.apply_along_axis', 'np.apply_along_axis', (['targets_func'], {'axis': '(1)', 'arr': 'x_no_noise'}), '(targets_func, axis=1, arr=x_no_noise)\n', (6110, 6148), True, 'import numpy as np\n'), ((6577, 6651), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['targets_no_noise[:, burn_in:, 0]', 'pred[:, burn_in:, 0]'], {}), '(targets_no_noise[:, burn_in:, 0], pred[:, burn_in:, 0])\n', (6595, 6651), False, 'from sklearn.metrics import mean_squared_error\n'), ((6964, 6989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6987, 6989), False, 'import argparse\n'), ((7349, 7361), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7359, 7361), True, 'import tensorflow as tf\n'), ((547, 580), 'tensorflow.variable_scope', 'tf.variable_scope', (['variable_scope'], {}), '(variable_scope)\n', (564, 580), True, 'import tensorflow as tf\n'), ((594, 651), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None, None, n_features]'], {'name': '"""x"""'}), "(dtype, [None, None, n_features], name='x')\n", (608, 651), True, 'import tensorflow as tf\n'), ((664, 720), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None, None, n_targets]'], {'name': '"""y"""'}), "(dtype, [None, None, n_targets], name='y')\n", (678, 720), True, 'import tensorflow as tf\n'), ((737, 791), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['num_units'], {'use_peepholes': '(True)'}), '(num_units, use_peepholes=True)\n', (760, 791), True, 'import tensorflow as tf\n'), ((815, 885), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['cell', 'input_keep_prob', 'output_keep_prob'], {}), '(cell, input_keep_prob, output_keep_prob)\n', (844, 885), True, 'import tensorflow as tf\n'), ((964, 1011), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['dropout_cell', 'x'], {'dtype': 'dtype'}), '(dropout_cell, x, dtype=dtype)\n', (981, 1011), True, 'import tensorflow as tf\n'), ((1568, 1610), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['features_', 'w_fcst', 'b_fcst'], {}), '(features_, w_fcst, b_fcst)\n', (1583, 1610), True, 'import tensorflow as tf\n'), ((2133, 2153), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost'], {}), '(cost)\n', (2147, 2153), True, 'import tensorflow as tf\n'), ((3069, 3113), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ts', 'y'], {'test_size': 'test_size'}), '(ts, y, test_size=test_size)\n', (3085, 3113), False, 'from sklearn.model_selection import train_test_split\n'), ((3152, 3193), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ts'], {'test_size': 'test_size'}), '(ts, test_size=test_size)\n', (3168, 3193), False, 'from sklearn.model_selection import train_test_split\n'), ((3265, 3321), 'numpy.apply_along_axis', 'np.apply_along_axis', (['features_func'], {'axis': '(1)', 'arr': 'ts_train'}), '(features_func, axis=1, arr=ts_train)\n', (3284, 3321), True, 'import numpy as np\n'), ((3340, 3395), 'numpy.apply_along_axis', 'np.apply_along_axis', (['targets_func'], {'axis': '(1)', 'arr': 'ts_train'}), '(targets_func, axis=1, arr=ts_train)\n', (3359, 3395), True, 'import numpy as np\n'), ((3419, 3473), 'numpy.apply_along_axis', 'np.apply_along_axis', (['features_func'], {'axis': '(1)', 'arr': 'ts_dev'}), '(features_func, axis=1, arr=ts_dev)\n', (3438, 3473), True, 'import numpy as np\n'), ((3496, 3549), 'numpy.apply_along_axis', 'np.apply_along_axis', (['targets_func'], {'axis': '(1)', 'arr': 'ts_dev'}), '(targets_func, axis=1, arr=ts_dev)\n', (3515, 3549), True, 'import numpy as np\n'), ((4162, 4195), 'tensorflow.variable_scope', 'tf.variable_scope', (['variable_scope'], {}), '(variable_scope)\n', (4179, 4195), True, 'import tensorflow as tf\n'), ((4219, 4250), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (4230, 4250), True, 'import tensorflow as tf\n'), ((4275, 4345), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr0', 'global_step', 'lr_decay[0]', 'lr_decay[1]'], {}), '(lr0, global_step, lr_decay[0], lr_decay[1])\n', (4301, 4345), True, 'import tensorflow as tf\n'), ((4692, 4726), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['var_list'], {}), '(var_list)\n', (4716, 4726), True, 'import tensorflow as tf\n'), ((6709, 6731), 'numpy.sqrt', 'np.sqrt', (['cost_no_noise'], {}), '(cost_no_noise)\n', (6716, 6731), True, 'import numpy as np\n'), ((7128, 7152), 'numpy.load', 'np.load', (['args.train_file'], {}), '(args.train_file)\n', (7135, 7152), True, 'import numpy as np\n'), ((7177, 7200), 'numpy.load', 'np.load', (['args.test_file'], {}), '(args.test_file)\n', (7184, 7200), True, 'import numpy as np\n'), ((7294, 7332), 'numpy.column_stack', 'np.column_stack', (['[x[:-1], x[:-1] ** 2]'], {}), '([x[:-1], x[:-1] ** 2])\n', (7309, 7332), True, 'import numpy as np\n'), ((1775, 1792), 'tensorflow.shape', 'tf.shape', (['y_tiled'], {}), '(y_tiled)\n', (1783, 1792), True, 'import tensorflow as tf\n'), ((1978, 1998), 'tensorflow.minimum', 'tf.minimum', (['theta', '(0)'], {}), '(theta, 0)\n', (1988, 1998), True, 'import tensorflow as tf\n'), ((1411, 1442), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, n_features]'], {}), '(x, [-1, n_features])\n', (1421, 1442), True, 'import tensorflow as tf\n'), ((1475, 1511), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, num_units]'], {}), '(outputs, [-1, num_units])\n', (1485, 1511), True, 'import tensorflow as tf\n'), ((1944, 1975), 'numpy.repeat', 'np.repeat', (['quantiles', 'n_targets'], {}), '(quantiles, n_targets)\n', (1953, 1975), True, 'import numpy as np\n'), ((4418, 4473), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': '(0.5)'}), '(learning_rate, momentum=0.5)\n', (4444, 4473), True, 'import tensorflow as tf\n')]
from math import * import numpy as np def f(x, a=1): return sin(exp(a * x)) def generate(fileName, start=0, end=2, n=20): X = np.linspace(start, end, n) Y = list() for a in range(1, n+1): Y.append([f(x, a) for x in X]) with open('test/X.csv', 'w') as fs: line = X.join(',') fs.write(line) with open('test/Y.csv', 'w') as fs: for y in Y: line = y.join(',') fs.write(line)
[ "numpy.linspace" ]
[((136, 162), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {}), '(start, end, n)\n', (147, 162), True, 'import numpy as np\n')]
from time import time from os import path, listdir from datetime import timedelta from datetime import date as dt_date from datetime import datetime as dt from numpy import cumprod from pandas import DataFrame, read_sql_query, read_csv, concat from functions import psqlEngine class Investments(): def __init__(self, path = '../investments/', name = 'get_investments', **kwargs): self.kwargs = kwargs self.path = path self.hyperparameters() self.get_engine() self.get_dollar() self.get_all_assets() self.domestic_bond_returns() self.get_benchmarks() self.portfolio_domestic_stocks = self.get_quotas('domestic_stocks') self.portfolio_international_stocks = self.get_quotas('international_stocks') self.portfolio_crypto = self.get_quotas('crypto') # self.portfolio_domestic_options = self.get_quotas('domestic_options') self.portfolio_domestic_funds = self.get_quotas('domestic_funds') self.get_portfolio() self.get_aggregate() self.get_time_series() self.dispose_engine() def __call__(self, flag = 'assets'): if flag == 'dollar': return self.dollar if flag == 'bonds': return self.domestic_bonds, self.interests if flag == 'stocks': return self.domestic_tickers, self.international_tickers if flag == 'crypto': return self.crypto, self.fractions if flag == 'portfolio': return self.portfolio, self.portfolio_aggregate.round(2) if flag == 'save': rounded = self.portfolio.round(2) rounded2 = self.portfolio_aggregate.round(2) engine = psqlEngine(self.database) connection = engine.connect() rounded.to_sql('portfolio', connection, if_exists = 'replace', index = False) rounded2.to_sql('aggregate', connection, if_exists = 'replace', index = False) connection.close() engine.dispose() if flag == 'time_series': return self.portfolio_time_series.round(2) def hyperparameters(self): self.database = self.kwargs.get('database', 'database.ini') self.benchmark_database = self.kwargs.get('benchmarks_database', 'benchmarks') self.domestic_stocks_database = self.kwargs.get('domestic_database', 'brazil_stocks') self.domestic_options_database = self.kwargs.get('domestic_database', 'brazil_options') self.international_database = self.kwargs.get('international_database', 'usa_stocks') self.currency_database = self.kwargs.get('currency_database', 'currencies') self.domestic_bonds_path = '{}bonds/'.format(self.path) self.crypto_path = '{}crypto/'.format(self.path) self.domestic_stocks_path = '{}stocks/domestic/'.format(self.path) self.international_stocks_path = '{}stocks/international/'.format(self.path) self.domestic_options_path = '{}options/domestic/'.format(self.path) self.domestic_funds_path = '{}funds/domestic/'.format(self.path) self.list_paths = [ self.domestic_bonds_path, self.crypto_path, self.domestic_stocks_path, self.international_stocks_path, self.domestic_options_path, self.domestic_funds_path, ] self.dates_min = DataFrame() def get_engine(self): self.engine = psqlEngine(self.database) self.connection = self.engine.connect() def dispose_engine(self): self.connection.close() self.engine.dispose() def get_dollar(self): currency = 'BRLUSD' self.dollar = float(read_sql_query("SELECT * FROM {} WHERE ticker = '{}'".format(self.benchmark_database, currency), self.connection).iloc[0].close) self.dollar_full = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.benchmark_database, currency), self.connection) self.dollar_full.drop_duplicates('date', inplace = True) self.dollar_full = self.insert_weekends(self.dollar_full) self.dollar_full.rename(columns = {'close': 'dollar_close'}, inplace = True) self.dollar_full['dollar_close'] = self.dollar_full.dollar_close.astype('float') def get_benchmarks(self): self.spy = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'SPY' ORDER BY date".format(self.benchmark_database), self.connection) self.bova = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'BOVA11' ORDER BY date".format(self.benchmark_database), self.connection) self.spy.drop_duplicates('date', inplace = True) self.bova.drop_duplicates('date', inplace = True) self.spy = self.insert_weekends(self.spy) self.spy['close'] = self.spy.close.astype('float') self.bova = self.insert_weekends(self.bova) self.bova = self.bova.merge(self.dollar_full, on = 'date') self.bova['close'] = self.bova.close.astype('float') self.bova['close_dollar'] = (self.bova.close * self.bova.dollar_close).to_list() def get_all_assets(self): self.interests, self.fractions = list(), list() self.domestic_tickers, self.international_tickers = list(), list() self.domestic_options_tickers = list() self.domestic_funds_tickers = list() for directory in self.list_paths: list_files = list() for filename in listdir(directory): if filename.endswith('.csv'): list_files.append(path.join(directory, filename)) if directory == self.domestic_bonds_path: self.interests.append(filename.replace('.csv', '').upper()) if directory == self.crypto_path: self.fractions.append(filename.replace('.csv', '').upper()) if directory == self.domestic_stocks_path: self.domestic_tickers.append(filename.replace('.csv', '').upper()) if directory == self.international_stocks_path: self.international_tickers.append(filename.replace('.csv', '').upper()) if directory == self.domestic_options_path: self.domestic_options_tickers.append(filename.replace('.csv', '').upper()) if directory == self.domestic_funds_path: self.domestic_funds_tickers.append(filename.replace('.csv', '').upper()) dictionary = dict() if directory == self.domestic_bonds_path: for filename, interest in zip(list_files, self.interests): df = read_csv(filename) dictionary[interest] = df if dictionary: self.domestic_bonds = concat(dictionary) self.domestic_bonds = self.domestic_bonds.rename(columns = {'pct_cdi': 'share'}) self.domestic_bonds = self.domestic_bonds.merge(self.dollar_full, on = 'date') self.domestic_bonds['purchase_price_dollar'] = (self.domestic_bonds.purchase_price.astype('float') * self.domestic_bonds.dollar_close.astype('float')).to_list() else: if directory == self.crypto_path: symbols = self.fractions if directory == self.domestic_stocks_path: symbols = self.domestic_tickers if directory == self.international_stocks_path: symbols = self.international_tickers if directory == self.domestic_options_path: symbols = self.domestic_options_tickers if directory == self.domestic_funds_path: symbols = self.domestic_funds_tickers for filename, ticker in zip(list_files, symbols): df = read_csv(filename) if ticker in self.domestic_funds_tickers: df.set_index('date', inplace = True) df['purchase_price'] = df.purchase_price.diff() df = df.dropna() df.reset_index(inplace = True) if (ticker in self.domestic_tickers) or (ticker in self.domestic_options_tickers) or (ticker in self.domestic_funds_tickers): df = df.merge(self.dollar_full, on = 'date') df['purchase_price'] = df.purchase_price.astype('float') * df.dollar_close.astype('float') dictionary[ticker] = df df['cum_share'] = df.share.cumsum() df['price_share'] = (df.purchase_price / df.share) df['cum_price_share'] = df.price_share.expanding().mean() dictionary[ticker] = df if dictionary: self.stocks = concat(dictionary) if directory == self.crypto_path: self.crypto = concat(dictionary) if directory == self.domestic_stocks_path: self.domestic_stocks = concat(dictionary) if directory == self.international_stocks_path: self.international_stocks = concat(dictionary) if directory == self.domestic_options_path: self.domestic_options = concat(dictionary) if directory == self.domestic_funds_path: self.domestic_funds = concat(dictionary) def get_quotas(self, asset): quotas = dict() domestic = False if asset == 'crypto': list_tickers = self.fractions db = self.currency_database if asset == 'domestic_stocks': list_tickers = self.domestic_tickers db = self.domestic_stocks_database domestic = True if asset == 'international_stocks': list_tickers = self.international_tickers db = self.international_database if asset == 'domestic_options': list_tickers = self.domestic_options_tickers db = self.domestic_options_database domestic = True if asset == 'domestic_funds': list_tickers = self.domestic_funds_tickers domestic = True for ticker in list_tickers: key = ticker.upper() if asset == 'crypto': quotas[key] = self.crypto.loc[ticker].cum_share.iloc[-1] if asset == 'domestic_stocks': quotas[key] = self.domestic_stocks.loc[ticker].cum_share.iloc[-1] if asset == 'international_stocks': quotas[key] = self.international_stocks.loc[ticker].cum_share.iloc[-1] if asset == 'domestic_options': quotas[key] = self.domestic_options.loc[ticker].cum_share.iloc[-1] if asset == 'domestic_funds': quotas[key] = 1. portfolio = DataFrame({ 'asset': list(quotas.keys()), 'quotas': list(quotas.values()) }) portfolio.sort_values(by = ['asset'], inplace = True) if asset == 'domestic_funds': value_usd, value_brl = list(), list() for asset in list_tickers: close_price = read_csv(self.domestic_funds_path + '{}.csv'.format(asset.lower())).share.iloc[-1] value_usd.append(close_price * quotas.get(asset) * self.dollar) value_brl.append(close_price * quotas.get(asset)) portfolio['value_usd'] = value_usd portfolio['value_brl'] = value_brl else: if domestic == False: close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection) else: close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, adjusted_close as close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection) close_price['close'] = close_price.close.astype('float') close_price = close_price.loc[close_price.ticker.isin(portfolio.asset.to_list())] self.dates_min = self.dates_min.append(close_price[['date', 'ticker']]) close_price['quota'] = close_price.ticker.apply(lambda x: quotas.get(x)) if domestic == False: portfolio['value_usd'] = (close_price.close * close_price.quota).to_list() portfolio['value_brl'] = (close_price.close * close_price.quota / self.dollar).to_list() else: portfolio['value_usd'] = (close_price.close * close_price.quota * self.dollar).to_list() portfolio['value_brl'] = (close_price.close * close_price.quota).to_list() portfolio.sort_values(by = ['value_usd'], ascending = False, inplace = True) return portfolio def get_portfolio(self): self.portfolio = dict() self.portfolio['domestic bonds'] = self.portfolio_bonds self.portfolio['domestic stocks'] = self.portfolio_domestic_stocks self.portfolio['international stocks'] = self.portfolio_international_stocks self.portfolio['crypto'] = self.portfolio_crypto # self.portfolio['domestic options'] = self.portfolio_domestic_options self.portfolio['domestic funds'] = self.portfolio_domestic_funds self.portfolio = concat(self.portfolio) self.portfolio = self.portfolio.loc[self.portfolio.quotas >= 1e-10] def get_aggregate(self): assets = list(self.portfolio.index.unique(level = 0)) value_brl, value_usd = list(), list() for asset in assets: value_brl.append(self.portfolio.loc[asset].sum().value_brl) value_usd.append(self.portfolio.loc[asset].sum().value_usd) self.portfolio_aggregate = DataFrame({ 'asset': assets, 'value_brl': value_brl, 'value_usd': value_usd, }) def insert_weekends(self, df, asset = 'stock'): df.set_index('date', inplace = True) start, end = df.index[0], df.index[-1] start = dt.strptime(start, '%Y-%m-%d').date() end = dt.strptime(end, '%Y-%m-%d').date() dates = [str(start + timedelta(days = x)) for x in range(0, (end - start).days + 1, 1)] df = df.reindex(dates, fill_value = 0) df.reset_index(inplace = True) close = list() if asset == '6040': for value in df.interest: if value != 0: close.append(value) if value == 0: close.append(1.) df['interest'] = close if asset == 'bond': for value in df.portfolio: if value != 0: close.append(value) if value == 0: close.append(close[-1]) df['portfolio'] = close if asset == 'crypto': for value in df.close: if value != 0: close.append(value) if value == 0: close.append(close[-1]) df['close'] = close if asset == 'stock': for value in df.close: if value != 0: close.append(value) if value == 0: close.append(close[-1]) df['close'] = close return df def get_concat_dataframe(self, columns, options = True): columns_bonds = list() for elem in columns: if elem == 'share': columns_bonds.append('purchase_price') elif elem == 'purchase_price': columns_bonds.append('purchase_price_dollar') else: columns_bonds.append(elem) domestic_bonds = dict() domestic_bonds['CDB'] = self.domestic_bonds[columns_bonds].rename(columns = {'purchase_price_dollar': 'purchase_price'}) domestic_bonds = concat(domestic_bonds) if options == True: df = concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns], self.domestic_options[columns]]) else: df = concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns]]) return df def get_portfolio_invested(self, df): if 'date' in df.columns.to_list(): df.set_index('date', inplace = True) start, end = df.index[0], df.index[-1] start = dt.strptime(start, '%Y-%m-%d').date() end = dt.strptime(end, '%Y-%m-%d').date() reference = self.get_concat_dataframe(['date', 'purchase_price'], False) # reference['purchase_price'] = reference.purchase_price.astype('float') reference = reference.groupby(by = 'date')['purchase_price'].sum() reference = DataFrame(reference).reset_index() reference['close'] = reference.purchase_price.cumsum() reference = reference.loc[(reference.date >= start.strftime('%Y-%m-%d')) & (reference.date <= end.strftime('%Y-%m-%d'))] reference = self.insert_weekends(reference) reference = reference.drop(columns = {'purchase_price'}).rename(columns = {'close': 'invested'}) ref_start = dt.strptime(reference.date.iloc[0], '%Y-%m-%d').date() ref_end = dt.strptime(reference.date.iloc[-1], '%Y-%m-%d').date() dates_beginning = [str(start + timedelta(days = x)) for x in range(0, (ref_start - start).days, 1)] dates_end = [str(ref_end + timedelta(days = x)) for x in range(1, (end - ref_end).days + 1, 1)] aux = [reference.invested.iloc[0] for _ in range(len(dates_beginning))] aux2 = [reference.invested.iloc[-1] for _ in range(len(dates_end))] reference = DataFrame({ 'date': dates_beginning + reference.date.to_list() + dates_end, 'invested': aux + reference.invested.to_list() + aux2, }) return reference.invested.to_list() def get_return_benchmark_portfolio(self): value_bond, value_bova = 400, 600 value = list() dates = self.bova.loc[(self.bova.date >= self.start_date) & (self.bova.date <= self.end_date), 'date'].to_list() bova_dollar = self.bova.loc[(self.bova.date >= self.start_date) & (self.bova.date <= self.end_date), 'close_dollar'] interests = self.insert_weekends(self.cdi[['date', 'interest']], asset = '6040').interest for interest, return_bova in zip(interests, bova_dollar.pct_change().fillna(0)): value_bond = value_bond * interest value_bova = value_bova * (1 + return_bova) value.append(value_bond + value_bova) self.benchmark_portfolio = DataFrame({ 'date': dates, 'portfolio': value, }) def domestic_bond_returns(self): end = dt_date.today().strftime('%Y-%m-%d') self.cdi = read_csv('../interests/cdi.csv') self.cdi['date'] = [dt.strptime(date, '%d/%m/%Y').strftime('%Y-%m-%d') for date in self.cdi.date] self.cdi['interest'] = [1 + interest / 100 for interest in self.cdi.cdi] total_returns_brl = 0 for date, purchase_price, share in zip(self.domestic_bonds.date, self.domestic_bonds.purchase_price, self.domestic_bonds.share): cdi = self.cdi.loc[(self.cdi.date >= date) & (self.cdi.date <= end)] value = purchase_price for interest in cdi.interest: value = value * (interest * share) total_returns_brl += value total_returns_usd = total_returns_brl * self.dollar self.portfolio_bonds = DataFrame({ 'asset': ['domestic bonds'], 'quotas': [1], 'value_usd': [total_returns_usd], 'value_brl': [total_returns_brl], }) def get_returns(self, df, flag = 'cumulative'): reference = self.get_concat_dataframe(['date', 'purchase_price'], False) reference = reference.groupby(by = 'date')['purchase_price'].sum() reference = DataFrame(reference).reset_index() df.reset_index(inplace = True) returns = list() if flag == 'cumulative': for date in df['date'].iloc[1:]: end = df.loc[df.date == date, 'portfolio'].index[0] start = end - 1 if date not in reference['date'].to_list(): retorno = (df.portfolio.iloc[end] - df.portfolio.iloc[start]) / df.portfolio.iloc[start] returns.append(retorno) if date in reference['date'].to_list(): cash_flow = reference.loc[reference.date == date, 'purchase_price'].iloc[0] retorno = (df.portfolio.iloc[end] - (df.portfolio.iloc[start] + cash_flow)) / (df.portfolio.iloc[start] + cash_flow) returns.append(retorno) returns = [0] + returns returns = list(map(lambda x: x + 1, returns)) returns = 100 * (cumprod(returns) - 1) if flag == 'cagr': for date in df['date'].iloc[1:]: end = df.loc[df.date == date, 'portfolio'].index[0] start = df.index[0] exponent = 365 / (end - start) cash_flow = reference.loc[(reference.date >= self.start_date) & (reference.date <= date), 'purchase_price'].sum() retorno = 100 * (((df.portfolio.iloc[end] / (df.portfolio.iloc[start] + cash_flow)) ** exponent) - 1) returns.append(retorno) returns = [0] + returns return returns def get_min_date(self, db, tickers): print(self.dates_min.loc[self.dates_min.date < (dt_date.today() - timedelta(days = 1)).strftime('%Y-%m-%d')]) dates = self.dates_min.date.min() return dates def get_start_date(self): start_domestic = self.domestic_stocks[['date']].sort_values(by = 'date').iloc[0].values[0] start_international = self.international_stocks[['date']].sort_values(by = 'date').iloc[0].values[0] start_crypto = self.crypto[['date']].sort_values(by = 'date').iloc[0].values[0] # start_domestic_options = self.domestic_options[['date']].sort_values(by = 'date').iloc[0].values[0] start_domestic_funds = self.domestic_funds[['date']].sort_values(by = 'date').iloc[0].values[0] start_date = min(start_domestic, start_international, start_crypto, start_domestic_funds)#, start_domestic_options) self.start_date = self.kwargs.get('start_date', start_date) self.start_date = dt.strptime(self.start_date, '%Y-%m-%d').strftime('%Y-%m-%d') def get_end_date(self): domestic_date = self.get_min_date(self.domestic_stocks_database, self.domestic_tickers) international_date = self.get_min_date(self.international_database, self.international_tickers) crypto_date = self.get_min_date(self.currency_database, self.fractions) dates = [domestic_date, international_date, crypto_date] end_date = min(dates) self.end_date = self.kwargs.get('end_date', end_date) self.end_date = dt.strptime(self.end_date, '%Y-%m-%d').strftime('%Y-%m-%d') def get_time_series(self): start_time = time() self.get_start_date() self.get_end_date() dates = [(dt.strptime(self.start_date, '%Y-%m-%d').date() + timedelta(days = k)).strftime('%Y-%m-%d') \ for k in range((dt.strptime(self.end_date, '%Y-%m-%d').date() - dt.strptime(self.start_date, '%Y-%m-%d').date()).days + 1)] dataframe = DataFrame() df = self.get_concat_dataframe(['date', 'share'], False) quotes = df.index.unique(level = 0).to_list() for quote in quotes: if quote in self.interests: for data, value in zip(df.loc[quote].date, df.loc[quote].purchase_price): interests = self.cdi.loc[self.cdi.date >= data, ['date', 'interest']] lista = [value * interests.interest.iloc[0]] for interest in interests.interest.iloc[1:]: lista.append(lista[-1] * interest) interests['portfolio'] = lista lista = list() interests = interests.merge(self.dollar_full, on = 'date') interests['portfolio'] = (interests.portfolio.astype('float') * interests.dollar_close.astype('float')).to_list() interests.drop(columns = {'interest'}, inplace = True) interests = self.insert_weekends(interests, asset = 'bond') dataframe = concat([dataframe, interests]) elif quote in self.domestic_funds_tickers: prices = self.domestic_funds[['date', 'share']].sort_values(by = 'date') prices.rename(columns = {'share': 'close'}, inplace = True) prices = prices.merge(self.dollar_full, on = 'date') prices['portfolio'] = (prices.close.astype('float') * prices.dollar_close.astype('float')).to_list() prices.drop(columns = {'close'}, inplace = True) dataframe = concat([dataframe, prices]) else: start_time = time() if quote in self.fractions: prices = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.currency_database, quote), self.connection) prices.drop_duplicates('date', inplace = True) prices = self.insert_weekends(prices, asset = 'crypto') elif quote in self.international_tickers: prices = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.international_database, quote), self.connection) prices.drop_duplicates('date', inplace = True) prices = self.insert_weekends(prices) elif quote in self.domestic_options_tickers: prices = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.domestic_options_database, quote), self.connection).drop_duplicates('date') prices.drop_duplicates('date', inplace = True) prices = self.insert_weekends(prices) elif quote in self.domestic_tickers: prices = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.domestic_stocks_database, quote), self.connection).drop_duplicates('date') prices.drop_duplicates('date', inplace = True) prices = self.insert_weekends(prices) prices = prices.merge(self.dollar_full, on = 'date') prices['close'] = (prices.close.astype('float') * prices.dollar_close.astype('float')).to_list() for data, share in zip(df.loc[quote].date, df.loc[quote].share): close_price = prices.loc[prices.date >= data].copy() close_price['portfolio'] = [price * share for price in close_price.close] dataframe = concat([dataframe, close_price]) dataframe = dataframe.groupby(by = ['date']).sum().drop(columns = {'close'}) dataframe = DataFrame(dataframe).loc[(dataframe.index >= self.start_date) & (dataframe.index <= self.end_date)] self.portfolio_time_series = DataFrame() self.portfolio_time_series['date'] = dates self.portfolio_time_series['portfolio'] = dataframe.portfolio.to_list() self.portfolio_time_series['portfolio_invested'] = self.get_portfolio_invested(self.portfolio_time_series) self.portfolio_time_series['SPY'] = self.spy.loc[(self.spy.date >= self.start_date) & (self.spy.date <= self.end_date), 'close'].to_list() self.portfolio_time_series['BOVA11'] = self.bova.loc[(self.bova.date >= self.start_date) & (self.bova.date <= self.end_date)].drop_duplicates('date')['close_dollar'].to_list() self.portfolio_time_series.sort_values(by = 'date', inplace = True) self.portfolio_time_series['return_portfolio'] = self.get_returns(self.portfolio_time_series) self.portfolio_time_series['return_SPY'] = 100 * ((self.portfolio_time_series.SPY.pct_change() + 1).fillna(1).cumprod() - 1) self.portfolio_time_series['return_BOVA11'] = 100 * ((self.portfolio_time_series.BOVA11.pct_change() + 1).fillna(1).cumprod() - 1) self.get_return_benchmark_portfolio() self.portfolio_time_series['port_bench'] = self.benchmark_portfolio.portfolio.to_list() self.portfolio_time_series['return_port_bench'] = [0] + 100 *((self.benchmark_portfolio.portfolio.pct_change() + 1).fillna(1).cumprod() - 1) self.portfolio_time_series['cagr_portfolio'] = self.get_returns(self.portfolio_time_series, flag = 'cagr') self.portfolio_time_series['cagr_SPY'] = [0] + [100 * ((cagr / self.portfolio_time_series.SPY.iloc[0]) ** (250 / k) - 1) for k, cagr in enumerate(self.portfolio_time_series.SPY.iloc[1:], 1)] self.portfolio_time_series['cagr_BOVA11'] = [0] + [100 * ((cagr / self.portfolio_time_series.BOVA11.iloc[0]) ** (250 / k) - 1) for k, cagr in enumerate(self.portfolio_time_series.BOVA11.iloc[1:], 1)] self.portfolio_time_series['cagr_port_bench'] = [0] + [100 * ((cagr / self.benchmark_portfolio.portfolio.iloc[0]) ** (250 / k) - 1) for k, cagr in enumerate(self.benchmark_portfolio.portfolio.iloc[1:], 1)] self.portfolio_time_series.drop(columns = {'index'}, inplace = True) self.portfolio_time_series.set_index('date', inplace = True)
[ "os.listdir", "pandas.read_csv", "datetime.datetime.strptime", "os.path.join", "datetime.timedelta", "functions.psqlEngine", "time.time", "pandas.DataFrame", "datetime.date.today", "pandas.concat", "numpy.cumprod" ]
[((3407, 3418), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (3416, 3418), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((3469, 3494), 'functions.psqlEngine', 'psqlEngine', (['self.database'], {}), '(self.database)\n', (3479, 3494), False, 'from functions import psqlEngine\n'), ((13693, 13715), 'pandas.concat', 'concat', (['self.portfolio'], {}), '(self.portfolio)\n', (13699, 13715), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((14139, 14215), 'pandas.DataFrame', 'DataFrame', (["{'asset': assets, 'value_brl': value_brl, 'value_usd': value_usd}"], {}), "({'asset': assets, 'value_brl': value_brl, 'value_usd': value_usd})\n", (14148, 14215), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((16272, 16294), 'pandas.concat', 'concat', (['domestic_bonds'], {}), '(domestic_bonds)\n', (16278, 16294), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((19121, 19167), 'pandas.DataFrame', 'DataFrame', (["{'date': dates, 'portfolio': value}"], {}), "({'date': dates, 'portfolio': value})\n", (19130, 19167), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((19312, 19344), 'pandas.read_csv', 'read_csv', (['"""../interests/cdi.csv"""'], {}), "('../interests/cdi.csv')\n", (19320, 19344), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((20038, 20166), 'pandas.DataFrame', 'DataFrame', (["{'asset': ['domestic bonds'], 'quotas': [1], 'value_usd': [\n total_returns_usd], 'value_brl': [total_returns_brl]}"], {}), "({'asset': ['domestic bonds'], 'quotas': [1], 'value_usd': [\n total_returns_usd], 'value_brl': [total_returns_brl]})\n", (20047, 20166), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((23640, 23646), 'time.time', 'time', ([], {}), '()\n', (23644, 23646), False, 'from time import time\n'), ((23973, 23984), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (23982, 23984), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((27876, 27887), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (27885, 27887), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((1726, 1751), 'functions.psqlEngine', 'psqlEngine', (['self.database'], {}), '(self.database)\n', (1736, 1751), False, 'from functions import psqlEngine\n'), ((5545, 5563), 'os.listdir', 'listdir', (['directory'], {}), '(directory)\n', (5552, 5563), False, 'from os import path, listdir\n'), ((16340, 16525), 'pandas.concat', 'concat', (['[domestic_bonds, self.domestic_stocks[columns], self.international_stocks[\n columns], self.crypto[columns], self.domestic_funds[columns], self.\n domestic_options[columns]]'], {}), '([domestic_bonds, self.domestic_stocks[columns], self.\n international_stocks[columns], self.crypto[columns], self.\n domestic_funds[columns], self.domestic_options[columns]])\n', (16346, 16525), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((16547, 16700), 'pandas.concat', 'concat', (['[domestic_bonds, self.domestic_stocks[columns], self.international_stocks[\n columns], self.crypto[columns], self.domestic_funds[columns]]'], {}), '([domestic_bonds, self.domestic_stocks[columns], self.\n international_stocks[columns], self.crypto[columns], self.\n domestic_funds[columns]])\n', (16553, 16700), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((14425, 14455), 'datetime.datetime.strptime', 'dt.strptime', (['start', '"""%Y-%m-%d"""'], {}), "(start, '%Y-%m-%d')\n", (14436, 14455), True, 'from datetime import datetime as dt\n'), ((14477, 14505), 'datetime.datetime.strptime', 'dt.strptime', (['end', '"""%Y-%m-%d"""'], {}), "(end, '%Y-%m-%d')\n", (14488, 14505), True, 'from datetime import datetime as dt\n'), ((16908, 16938), 'datetime.datetime.strptime', 'dt.strptime', (['start', '"""%Y-%m-%d"""'], {}), "(start, '%Y-%m-%d')\n", (16919, 16938), True, 'from datetime import datetime as dt\n'), ((16960, 16988), 'datetime.datetime.strptime', 'dt.strptime', (['end', '"""%Y-%m-%d"""'], {}), "(end, '%Y-%m-%d')\n", (16971, 16988), True, 'from datetime import datetime as dt\n'), ((17254, 17274), 'pandas.DataFrame', 'DataFrame', (['reference'], {}), '(reference)\n', (17263, 17274), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((17659, 17706), 'datetime.datetime.strptime', 'dt.strptime', (['reference.date.iloc[0]', '"""%Y-%m-%d"""'], {}), "(reference.date.iloc[0], '%Y-%m-%d')\n", (17670, 17706), True, 'from datetime import datetime as dt\n'), ((17732, 17780), 'datetime.datetime.strptime', 'dt.strptime', (['reference.date.iloc[-1]', '"""%Y-%m-%d"""'], {}), "(reference.date.iloc[-1], '%Y-%m-%d')\n", (17743, 17780), True, 'from datetime import datetime as dt\n'), ((19256, 19271), 'datetime.date.today', 'dt_date.today', ([], {}), '()\n', (19269, 19271), True, 'from datetime import date as dt_date\n'), ((20451, 20471), 'pandas.DataFrame', 'DataFrame', (['reference'], {}), '(reference)\n', (20460, 20471), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((22973, 23013), 'datetime.datetime.strptime', 'dt.strptime', (['self.start_date', '"""%Y-%m-%d"""'], {}), "(self.start_date, '%Y-%m-%d')\n", (22984, 23013), True, 'from datetime import datetime as dt\n'), ((23526, 23564), 'datetime.datetime.strptime', 'dt.strptime', (['self.end_date', '"""%Y-%m-%d"""'], {}), "(self.end_date, '%Y-%m-%d')\n", (23537, 23564), True, 'from datetime import datetime as dt\n'), ((27739, 27759), 'pandas.DataFrame', 'DataFrame', (['dataframe'], {}), '(dataframe)\n', (27748, 27759), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((6791, 6809), 'pandas.read_csv', 'read_csv', (['filename'], {}), '(filename)\n', (6799, 6809), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((6929, 6947), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (6935, 6947), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((8001, 8019), 'pandas.read_csv', 'read_csv', (['filename'], {}), '(filename)\n', (8009, 8019), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((9003, 9021), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (9009, 9021), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((14542, 14559), 'datetime.timedelta', 'timedelta', ([], {'days': 'x'}), '(days=x)\n', (14551, 14559), False, 'from datetime import timedelta\n'), ((17827, 17844), 'datetime.timedelta', 'timedelta', ([], {'days': 'x'}), '(days=x)\n', (17836, 17844), False, 'from datetime import timedelta\n'), ((17931, 17948), 'datetime.timedelta', 'timedelta', ([], {'days': 'x'}), '(days=x)\n', (17940, 17948), False, 'from datetime import timedelta\n'), ((19373, 19402), 'datetime.datetime.strptime', 'dt.strptime', (['date', '"""%d/%m/%Y"""'], {}), "(date, '%d/%m/%Y')\n", (19384, 19402), True, 'from datetime import datetime as dt\n'), ((21397, 21413), 'numpy.cumprod', 'cumprod', (['returns'], {}), '(returns)\n', (21404, 21413), False, 'from numpy import cumprod\n'), ((25028, 25058), 'pandas.concat', 'concat', (['[dataframe, interests]'], {}), '([dataframe, interests])\n', (25034, 25058), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((25558, 25585), 'pandas.concat', 'concat', (['[dataframe, prices]'], {}), '([dataframe, prices])\n', (25564, 25585), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((25633, 25639), 'time.time', 'time', ([], {}), '()\n', (25637, 25639), False, 'from time import time\n'), ((5649, 5679), 'os.path.join', 'path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (5658, 5679), False, 'from os import path, listdir\n'), ((9114, 9132), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (9120, 9132), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((9243, 9261), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (9249, 9261), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((9382, 9400), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (9388, 9400), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((9513, 9531), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (9519, 9531), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((9640, 9658), 'pandas.concat', 'concat', (['dictionary'], {}), '(dictionary)\n', (9646, 9658), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((23773, 23790), 'datetime.timedelta', 'timedelta', ([], {'days': 'k'}), '(days=k)\n', (23782, 23790), False, 'from datetime import timedelta\n'), ((27601, 27633), 'pandas.concat', 'concat', (['[dataframe, close_price]'], {}), '([dataframe, close_price])\n', (27607, 27633), False, 'from pandas import DataFrame, read_sql_query, read_csv, concat\n'), ((22088, 22103), 'datetime.date.today', 'dt_date.today', ([], {}), '()\n', (22101, 22103), True, 'from datetime import date as dt_date\n'), ((22106, 22123), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (22115, 22123), False, 'from datetime import timedelta\n'), ((23723, 23763), 'datetime.datetime.strptime', 'dt.strptime', (['self.start_date', '"""%Y-%m-%d"""'], {}), "(self.start_date, '%Y-%m-%d')\n", (23734, 23763), True, 'from datetime import datetime as dt\n'), ((23845, 23883), 'datetime.datetime.strptime', 'dt.strptime', (['self.end_date', '"""%Y-%m-%d"""'], {}), "(self.end_date, '%Y-%m-%d')\n", (23856, 23883), True, 'from datetime import datetime as dt\n'), ((23893, 23933), 'datetime.datetime.strptime', 'dt.strptime', (['self.start_date', '"""%Y-%m-%d"""'], {}), "(self.start_date, '%Y-%m-%d')\n", (23904, 23933), True, 'from datetime import datetime as dt\n')]
import torch import shutil import numpy as np import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt # import cv2 from skimage.transform import resize import torchvision.transforms as transforms from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, filename='checkpoint.pth.tar'): torch.save(state, filename) def adjust_learning_rate(optimizer, epoch, args, interval): """Sets the learning rate to the initial LR decayed by 10 every 100 epochs""" lr = args.lr if epoch < interval[0]: lr = args.lr elif epoch >= interval[0] and epoch < interval[1]: lr = args.lr * 0.1 else: lr = args.lr * 0.01 #lr = args.lr * (0.1 ** (epoch // 100)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def multi_class_auc(all_target, all_output, num_c = None): from sklearn.preprocessing import label_binarize # all_output = np.stack(all_output) all_target = label_binarize(all_target, classes=list(range(0, num_c))) all_output = label_binarize(all_output, classes=list(range(0, num_c))) auc_sum = [] for num_class in range(0, num_c): try: auc = roc_auc_score(all_target[:, num_class], all_output[:, num_class]) auc_sum.append(auc) except ValueError: pass auc = sum(auc_sum) / (float(len(auc_sum))+1e-8) return auc def evaluation_metrics(label, pred, C): if C==2: auc = roc_auc_score(label, pred) else: auc = multi_class_auc(label, pred, num_c=C) corrects = np.equal(np.array(label), np.array(pred)) acc = float(sum(corrects)) / len(corrects) # mean class precision = precision_score(label, pred, average='macro') recall = recall_score(label, pred, average='macro') f1score = f1_score(label, pred, average='macro') return round(auc, 4), round(acc, 4), round(precision, 4), round(recall, 4), round(f1score, 4) def showfeature(x, savename): # trun to numpy x = x.data.cpu().numpy() print (x.shape) box = [] for item in range(0, x.shape[0]): x_patch = x[item, :, :] box.append(x_patch) x_patch = np.stack(box) x_patch = np.max(x_patch, axis=0) x_patch = resize(x_patch, (224, 224), order=3, mode='constant', cval=0, clip=True, preserve_range=True) x_patch = (x_patch - np.min(x_patch)) / (np.max(x_patch) - np.min(x_patch) + 1e-11) x_patch = x_patch * 255 x_patch = np.array(x_patch, dtype="uint8") plt.plot(1), plt.imshow(x_patch, cmap='jet') plt.axis('off') plt.savefig(savename, bbox_inches='tight', pad_inches=0) def showimage(x, savename): import torchvision.transforms as transforms mean=[0.485, 0.456, 0.406] std=[0.229, 0.224, 0.225] z = x * torch.tensor(std).view(3, 1, 1).cuda() z = z + torch.tensor(mean).view(3, 1, 1).cuda() z = z.cpu() z = z[[2,1,0], : ,:] img2 = transforms.ToPILImage()(z) img2.save(savename) def get_color_distortion(s=1.0): color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s) rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8) rnd_gray = transforms.RandomGrayscale(p=0.2) color_distort = transforms.Compose([rnd_color_jitter, rnd_gray]) return color_distort def gaussian_blur(x): from PIL.ImageFilter import GaussianBlur if np.random.randint(0, 2) == 1: x = x.filter(GaussianBlur(radius=np.random.uniform(0.1, 2.0))) return x
[ "torchvision.transforms.ToPILImage", "sklearn.metrics.precision_score", "sklearn.metrics.recall_score", "numpy.array", "torchvision.transforms.ColorJitter", "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.imshow", "matplotlib.pyplot.plot", "numpy.max", "numpy.stack", "numpy.min", "matplotl...
[((64, 85), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (78, 85), False, 'import matplotlib\n'), ((773, 800), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (783, 800), False, 'import torch\n'), ((2629, 2674), 'sklearn.metrics.precision_score', 'precision_score', (['label', 'pred'], {'average': '"""macro"""'}), "(label, pred, average='macro')\n", (2644, 2674), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\n'), ((2688, 2730), 'sklearn.metrics.recall_score', 'recall_score', (['label', 'pred'], {'average': '"""macro"""'}), "(label, pred, average='macro')\n", (2700, 2730), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\n'), ((2745, 2783), 'sklearn.metrics.f1_score', 'f1_score', (['label', 'pred'], {'average': '"""macro"""'}), "(label, pred, average='macro')\n", (2753, 2783), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\n'), ((3110, 3123), 'numpy.stack', 'np.stack', (['box'], {}), '(box)\n', (3118, 3123), True, 'import numpy as np\n'), ((3138, 3161), 'numpy.max', 'np.max', (['x_patch'], {'axis': '(0)'}), '(x_patch, axis=0)\n', (3144, 3161), True, 'import numpy as np\n'), ((3176, 3273), 'skimage.transform.resize', 'resize', (['x_patch', '(224, 224)'], {'order': '(3)', 'mode': '"""constant"""', 'cval': '(0)', 'clip': '(True)', 'preserve_range': '(True)'}), "(x_patch, (224, 224), order=3, mode='constant', cval=0, clip=True,\n preserve_range=True)\n", (3182, 3273), False, 'from skimage.transform import resize\n'), ((3421, 3453), 'numpy.array', 'np.array', (['x_patch'], {'dtype': '"""uint8"""'}), "(x_patch, dtype='uint8')\n", (3429, 3453), True, 'import numpy as np\n'), ((3507, 3522), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3515, 3522), True, 'import matplotlib.pyplot as plt\n'), ((3527, 3583), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savename'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(savename, bbox_inches='tight', pad_inches=0)\n", (3538, 3583), True, 'import matplotlib.pyplot as plt\n'), ((3982, 4040), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.8 * s)', '(0.8 * s)', '(0.8 * s)', '(0.2 * s)'], {}), '(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)\n', (4004, 4040), True, 'import torchvision.transforms as transforms\n'), ((4064, 4109), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[color_jitter]'], {'p': '(0.8)'}), '([color_jitter], p=0.8)\n', (4086, 4109), True, 'import torchvision.transforms as transforms\n'), ((4125, 4158), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (4151, 4158), True, 'import torchvision.transforms as transforms\n'), ((4179, 4227), 'torchvision.transforms.Compose', 'transforms.Compose', (['[rnd_color_jitter, rnd_gray]'], {}), '([rnd_color_jitter, rnd_gray])\n', (4197, 4227), True, 'import torchvision.transforms as transforms\n'), ((2396, 2422), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['label', 'pred'], {}), '(label, pred)\n', (2409, 2422), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\n'), ((2514, 2529), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2522, 2529), True, 'import numpy as np\n'), ((2531, 2545), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (2539, 2545), True, 'import numpy as np\n'), ((3458, 3469), 'matplotlib.pyplot.plot', 'plt.plot', (['(1)'], {}), '(1)\n', (3466, 3469), True, 'import matplotlib.pyplot as plt\n'), ((3471, 3502), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x_patch'], {'cmap': '"""jet"""'}), "(x_patch, cmap='jet')\n", (3481, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3901), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3899, 3901), True, 'import torchvision.transforms as transforms\n'), ((4330, 4353), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4347, 4353), True, 'import numpy as np\n'), ((2116, 2181), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['all_target[:, num_class]', 'all_output[:, num_class]'], {}), '(all_target[:, num_class], all_output[:, num_class])\n', (2129, 2181), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\n'), ((3316, 3331), 'numpy.min', 'np.min', (['x_patch'], {}), '(x_patch)\n', (3322, 3331), True, 'import numpy as np\n'), ((3336, 3351), 'numpy.max', 'np.max', (['x_patch'], {}), '(x_patch)\n', (3342, 3351), True, 'import numpy as np\n'), ((3354, 3369), 'numpy.min', 'np.min', (['x_patch'], {}), '(x_patch)\n', (3360, 3369), True, 'import numpy as np\n'), ((4401, 4428), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(2.0)'], {}), '(0.1, 2.0)\n', (4418, 4428), True, 'import numpy as np\n'), ((3735, 3752), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (3747, 3752), False, 'import torch\n'), ((3786, 3804), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (3798, 3804), False, 'import torch\n')]
# coding: utf-8 __author__ = 'ZFTurbo: https://kaggle.com/zfturbo' import os import time import pickle import numpy as np import pandas as pd from multiprocessing import Pool, cpu_count from itertools import repeat from ensemble_boxes import * from map_boxes import * def save_in_file_fast(arr, file_name): pickle.dump(arr, open(file_name, 'wb')) def load_from_file_fast(file_name): return pickle.load(open(file_name, 'rb')) def get_detections(path): preds = pd.read_csv(path) ids = preds['ImageId'].values preds_strings = preds['PredictionString'].values ImageID = [] LabelName = [] Conf = [] XMin = [] XMax = [] YMin = [] YMax = [] for j in range(len(ids)): # print('Go for {}'.format(ids[j])) id = ids[j] if str(preds_strings[j]) == 'nan': continue arr = preds_strings[j].strip().split(' ') if len(arr) % 6 != 0: print('Some problem here! {}'.format(id)) exit() for i in range(0, len(arr), 6): ImageID.append(id) LabelName.append(arr[i]) Conf.append(float(arr[i + 1])) XMin.append(float(arr[i + 2])) XMax.append(float(arr[i + 4])) YMin.append(float(arr[i + 3])) YMax.append(float(arr[i + 5])) res = pd.DataFrame(ImageID, columns=['ImageId']) res['LabelName'] = LabelName res['Conf'] = Conf res['XMin'] = XMin res['XMax'] = XMax res['YMin'] = YMin res['YMax'] = YMax return res def process_single_id(id, res, weights, params): run_type = params['run_type'] verbose = params['verbose'] if verbose: print('Go for ID: {}'.format(id)) boxes_list = [] scores_list = [] labels_list = [] labels_to_use_forward = dict() labels_to_use_backward = dict() for i in range(len(res[id])): boxes = [] scores = [] labels = [] dt = res[id][i] if str(dt) == 'nan': boxes = np.zeros((0, 4), dtype=np.float32) scores = np.zeros((0, ), dtype=np.float32) labels = np.zeros((0, ), dtype=np.int32) boxes_list.append(boxes) scores_list.append(scores) labels_list.append(labels) continue pred = dt.strip().split(' ') # Empty preds if len(pred) <= 1: boxes = np.zeros((0, 4), dtype=np.float32) scores = np.zeros((0,), dtype=np.float32) labels = np.zeros((0,), dtype=np.int32) boxes_list.append(boxes) scores_list.append(scores) labels_list.append(labels) continue # Check correctness if len(pred) % 6 != 0: print('Erorr % 6 {}'.format(len(pred))) print(dt) exit() for j in range(0, len(pred), 6): lbl = pred[j] scr = float(pred[j + 1]) box_x1 = float(pred[j + 2]) box_y1 = float(pred[j + 3]) box_x2 = float(pred[j + 4]) box_y2 = float(pred[j + 5]) if box_x1 >= box_x2: if verbose: print('Problem with box x1 and x2: {}. Skip it'.format(pred[j:j+6])) continue if box_y1 >= box_y2: if verbose: print('Problem with box y1 and y2: {}. Skip it'.format(pred[j:j+6])) continue if scr <= 0: if verbose: print('Problem with box score: {}. Skip it'.format(pred[j:j+6])) continue boxes.append([box_x1, box_y1, box_x2, box_y2]) scores.append(scr) if lbl not in labels_to_use_forward: cur_point = len(labels_to_use_forward) labels_to_use_forward[lbl] = cur_point labels_to_use_backward[cur_point] = lbl labels.append(labels_to_use_forward[lbl]) boxes = np.array(boxes, dtype=np.float32) scores = np.array(scores, dtype=np.float32) labels = np.array(labels, dtype=np.int32) boxes_list.append(boxes) scores_list.append(scores) labels_list.append(labels) # Empty predictions for all models if len(boxes_list) == 0: return np.array([]), np.array([]), np.array([]) if run_type == 'wbf': merged_boxes, merged_scores, merged_labels = weighted_boxes_fusion(boxes_list, scores_list, labels_list, weights=weights, iou_thr=params['intersection_thr'], skip_box_thr=params['skip_box_thr'], conf_type=params['conf_type']) elif run_type == 'nms': iou_thr = params['iou_thr'] merged_boxes, merged_scores, merged_labels = nms(boxes_list, scores_list, labels_list, weights=weights, iou_thr=iou_thr) elif run_type == 'soft-nms': iou_thr = params['iou_thr'] sigma = params['sigma'] thresh = params['thresh'] merged_boxes, merged_scores, merged_labels = soft_nms(boxes_list, scores_list, labels_list, weights=weights, iou_thr=iou_thr, sigma=sigma, thresh=thresh) elif run_type == 'nmw': merged_boxes, merged_scores, merged_labels = non_maximum_weighted(boxes_list, scores_list, labels_list, weights=weights, iou_thr=params['intersection_thr'], skip_box_thr=params['skip_box_thr']) if verbose: print(len(boxes_list), len(merged_boxes)) if 'limit_boxes' in params: limit_boxes = params['limit_boxes'] if len(merged_boxes) > limit_boxes: merged_boxes = merged_boxes[:limit_boxes] merged_scores = merged_scores[:limit_boxes] merged_labels = merged_labels[:limit_boxes] # Rename labels back merged_labels_string = [] for m in merged_labels: merged_labels_string.append(labels_to_use_backward[m]) merged_labels = np.array(merged_labels_string, dtype=np.str) # Create IDs array ids_list = [id] * len(merged_labels) return merged_boxes, merged_scores, merged_labels, ids_list def ensemble_predictions(pred_filenames, weights, params): verbose = False if 'verbose' in params: verbose = params['verbose'] start_time = time.time() procs_to_use = max(cpu_count() // 2, 1) # procs_to_use = 1 if verbose: print('Use processes: {}'.format(procs_to_use)) res = dict() ref_ids = None for j in range(len(pred_filenames)): s = pd.read_csv(pred_filenames[j]) try: s.sort_values('ImageId', inplace=True) except: s.sort_values('ImageID', inplace=True) s.reset_index(drop=True, inplace=True) try: ids = s['ImageId'].values except: ids = s['ImageID'].values preds = s['PredictionString'].values if ref_ids is None: ref_ids = tuple(ids) else: if ref_ids != tuple(ids): print('Different IDs in ensembled CSVs!') exit() for i in range(len(ids)): id = ids[i] if id not in res: res[id] = [] res[id].append(preds[i]) p = Pool(processes=procs_to_use) ids_to_use = sorted(list(res.keys())) results = p.starmap(process_single_id, zip(ids_to_use, repeat(res), repeat(weights), repeat(params))) all_ids = [] all_boxes = [] all_scores = [] all_labels = [] for boxes, scores, labels, ids_list in results: if boxes is None: continue all_boxes.append(boxes) all_scores.append(scores) all_labels.append(labels) all_ids.append(ids_list) all_ids = np.concatenate(all_ids) all_boxes = np.concatenate(all_boxes) all_scores = np.concatenate(all_scores) all_labels = np.concatenate(all_labels) if verbose: print(all_ids.shape, all_boxes.shape, all_scores.shape, all_labels.shape) res = pd.DataFrame(all_ids, columns=['ImageId']) res['LabelName'] = all_labels res['Conf'] = all_scores res['XMin'] = all_boxes[:, 0] res['XMax'] = all_boxes[:, 2] res['YMin'] = all_boxes[:, 1] res['YMax'] = all_boxes[:, 3] if verbose: print('Run time: {:.2f}'.format(time.time() - start_time)) return res if __name__ == '__main__': if 1: params = { 'run_type': 'nms', 'iou_thr': 0.5, 'verbose': True, } if 1: params = { 'run_type': 'soft-nms', 'iou_thr': 0.5, 'thresh': 0.0001, 'sigma': 0.1, 'verbose': True, } if 1: params = { 'run_type': 'nmw', 'skip_box_thr': 0.000000001, 'intersection_thr': 0.5, 'limit_boxes': 30000, 'verbose': True, } if 1: params = { 'run_type': 'wbf', 'skip_box_thr': 0.0000001, 'intersection_thr': 0.6, 'conf_type': 'avg', 'limit_boxes': 30000, 'verbose': True, } # Files available here: https://github.com/ZFTurbo/Weighted-Boxes-Fusion/releases/download/v1.0/test_data.zip annotations_path = 'test_data/challenge-2019-validation-detection-bbox-expand_3520.csv' pred_list = [ 'test_data/0.46450_TF_IRV2_atrous_3520.csv', 'test_data/0.52319_mmdet_3520.csv', 'test_data/0.52918_tensorpack1_3520.csv', 'test_data/0.53775_tensorpack2_3520.csv', 'test_data/0.51145_retinanet_3520.csv', ] weights = [1, 1, 1, 1, 1] ann = pd.read_csv(annotations_path) ann = ann[['ImageId', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax']].values # Find initial scores for i in range(len(pred_list)): det = get_detections(pred_list[i]) det = det[['ImageId', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax']].values mean_ap, average_precisions = mean_average_precision_for_boxes(ann, det, verbose=False) print("File: {} mAP: {:.6f}".format(os.path.basename(pred_list[i]), mean_ap)) start_time = time.time() ensemble_preds = ensemble_predictions(pred_list, weights, params) print("Overall ensemble time for method: {}: {:.2f} sec".format(params['run_type'], time.time() - start_time)) ensemble_preds.to_csv("test_data/debug_{}.csv".format(params['run_type']), index=False) ensemble_preds = ensemble_preds[['ImageId', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax']].values mean_ap, average_precisions = mean_average_precision_for_boxes(ann, ensemble_preds, verbose=True) print("Ensemble [{}] Weights: {} Params: {} mAP: {:.6f}".format(len(weights), weights, params, mean_ap))
[ "pandas.read_csv", "multiprocessing.cpu_count", "numpy.array", "numpy.zeros", "multiprocessing.Pool", "numpy.concatenate", "os.path.basename", "pandas.DataFrame", "time.time", "itertools.repeat" ]
[((479, 496), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (490, 496), True, 'import pandas as pd\n'), ((1336, 1378), 'pandas.DataFrame', 'pd.DataFrame', (['ImageID'], {'columns': "['ImageId']"}), "(ImageID, columns=['ImageId'])\n", (1348, 1378), True, 'import pandas as pd\n'), ((6280, 6324), 'numpy.array', 'np.array', (['merged_labels_string'], {'dtype': 'np.str'}), '(merged_labels_string, dtype=np.str)\n', (6288, 6324), True, 'import numpy as np\n'), ((6618, 6629), 'time.time', 'time.time', ([], {}), '()\n', (6627, 6629), False, 'import time\n'), ((7575, 7603), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'procs_to_use'}), '(processes=procs_to_use)\n', (7579, 7603), False, 'from multiprocessing import Pool, cpu_count\n'), ((8076, 8099), 'numpy.concatenate', 'np.concatenate', (['all_ids'], {}), '(all_ids)\n', (8090, 8099), True, 'import numpy as np\n'), ((8116, 8141), 'numpy.concatenate', 'np.concatenate', (['all_boxes'], {}), '(all_boxes)\n', (8130, 8141), True, 'import numpy as np\n'), ((8159, 8185), 'numpy.concatenate', 'np.concatenate', (['all_scores'], {}), '(all_scores)\n', (8173, 8185), True, 'import numpy as np\n'), ((8203, 8229), 'numpy.concatenate', 'np.concatenate', (['all_labels'], {}), '(all_labels)\n', (8217, 8229), True, 'import numpy as np\n'), ((8339, 8381), 'pandas.DataFrame', 'pd.DataFrame', (['all_ids'], {'columns': "['ImageId']"}), "(all_ids, columns=['ImageId'])\n", (8351, 8381), True, 'import pandas as pd\n'), ((9992, 10021), 'pandas.read_csv', 'pd.read_csv', (['annotations_path'], {}), '(annotations_path)\n', (10003, 10021), True, 'import pandas as pd\n'), ((10498, 10509), 'time.time', 'time.time', ([], {}), '()\n', (10507, 10509), False, 'import time\n'), ((3993, 4026), 'numpy.array', 'np.array', (['boxes'], {'dtype': 'np.float32'}), '(boxes, dtype=np.float32)\n', (4001, 4026), True, 'import numpy as np\n'), ((4044, 4078), 'numpy.array', 'np.array', (['scores'], {'dtype': 'np.float32'}), '(scores, dtype=np.float32)\n', (4052, 4078), True, 'import numpy as np\n'), ((4096, 4128), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int32'}), '(labels, dtype=np.int32)\n', (4104, 4128), True, 'import numpy as np\n'), ((6859, 6889), 'pandas.read_csv', 'pd.read_csv', (['pred_filenames[j]'], {}), '(pred_filenames[j])\n', (6870, 6889), True, 'import pandas as pd\n'), ((2020, 2054), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (2028, 2054), True, 'import numpy as np\n'), ((2076, 2108), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.float32'}), '((0,), dtype=np.float32)\n', (2084, 2108), True, 'import numpy as np\n'), ((2131, 2161), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.int32'}), '((0,), dtype=np.int32)\n', (2139, 2161), True, 'import numpy as np\n'), ((2407, 2441), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (2415, 2441), True, 'import numpy as np\n'), ((2463, 2495), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.float32'}), '((0,), dtype=np.float32)\n', (2471, 2495), True, 'import numpy as np\n'), ((2517, 2547), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.int32'}), '((0,), dtype=np.int32)\n', (2525, 2547), True, 'import numpy as np\n'), ((4317, 4329), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4325, 4329), True, 'import numpy as np\n'), ((4331, 4343), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4339, 4343), True, 'import numpy as np\n'), ((4345, 4357), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4353, 4357), True, 'import numpy as np\n'), ((6653, 6664), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (6662, 6664), False, 'from multiprocessing import Pool, cpu_count\n'), ((7705, 7716), 'itertools.repeat', 'repeat', (['res'], {}), '(res)\n', (7711, 7716), False, 'from itertools import repeat\n'), ((7718, 7733), 'itertools.repeat', 'repeat', (['weights'], {}), '(weights)\n', (7724, 7733), False, 'from itertools import repeat\n'), ((7735, 7749), 'itertools.repeat', 'repeat', (['params'], {}), '(params)\n', (7741, 7749), False, 'from itertools import repeat\n'), ((10438, 10468), 'os.path.basename', 'os.path.basename', (['pred_list[i]'], {}), '(pred_list[i])\n', (10454, 10468), False, 'import os\n'), ((10668, 10679), 'time.time', 'time.time', ([], {}), '()\n', (10677, 10679), False, 'import time\n'), ((8637, 8648), 'time.time', 'time.time', ([], {}), '()\n', (8646, 8648), False, 'import time\n')]
#!/usr/local/bin/python import torch import torch.nn as nn from torch.autograd import Variable import numpy as np from PIL import Image import os, glob, argparse, tqdm from data_loader import get_data from model import get_model #==========================================================================# def create_folder(folder): if not os.path.isdir(folder): os.makedirs(folder) #==========================================================================# def load_pretrained_model(model, name): try: dir_ = sorted(glob.glob(os.path.join('snapshot', name, '*.pth')))[-1] except: return 0 model.load_state_dict(torch.load(dir_)) print('Loaded trained model: {}!'.format(dir_)) return int(os.path.basename(dir_).split('.')[0]) #==========================================================================# def save_model(model, name, epoch): dir_ = os.path.join('snapshot', name, '%s.pth'%(str(epoch).zfill(4))) create_folder(os.path.dirname(dir_)) torch.save(model.state_dict(), dir_) print('!!Saving model: {}!'.format(dir_)) #==========================================================================# def update_lr(lr, optimizer): for param_group in optimizer.param_groups: param_group['lr'] = lr #==========================================================================# def to_cuda(data): if torch.cuda.is_available(): data = data.cuda() return data #==========================================================================# def solver(name, data_loader, model, epoch, optimizer=None, mode='train'): if optimizer is None: model.eval() else: model.train() loss_cum = [] Acc = 0 count_test = 0 test_out = [] Loss = nn.CrossEntropyLoss() for batch_idx, (data,target) in tqdm.tqdm(enumerate(data_loader), total=len(data_loader), desc="!{} -> [{}] Epoch: {}".format(name.upper(), mode.upper(),epoch)): volatile = True if optimizer is None else False data = Variable(to_cuda(data), volatile=volatile) target = Variable(to_cuda(target), volatile=volatile) output = model(data) loss = Loss(output,target) if optimizer is not None: optimizer.zero_grad() loss.backward() optimizer.step() loss_cum.append(loss.data.cpu()[0]) _, arg_max_out = torch.max(output.data.cpu(), 1) if mode=='test': for oo in arg_max_out: test_out.append('%s,%d\n'%(str(count_test).zfill(4), oo)) count_test+=1 Acc += arg_max_out.long().eq(target.data.cpu().long()).sum() ACC = float(Acc*100)/len(data_loader.dataset) LOSS = np.array(loss_cum).mean() if mode=='test': f=open(os.path.join('snapshot', name, 'test.txt'),'w') for line in test_out: f.writelines(line) f.close() else: print("LOSS %s: %0.3f || ACC %s: %0.2f"%(mode.upper(), LOSS, mode.upper(), ACC)) return ACC #==========================================================================# def train(config, train_loader, val_loader, model): val_before = 0 for epoch in range(config.start_epoch, config.num_epochs): _, solver(config.model, train_loader, model, epoch, optimizer=config.optimizer, mode='train') val_acc = solver(config.model, val_loader, model, epoch, mode='val') if val_acc>val_before: save_model(model, config.model, epoch+1) val_before=val_acc flag_stop=0 else: flag_stop+=1 if flag_stop==config.stop_training: return # Decay learning rate if (epoch+1) > (config.num_epochs - config.num_epochs_decay): config.lr -= (config.lr / float(config.num_epochs_decay)) update_lr(config.lr, config.optimizer) print ('Decay learning rate to: {}.'.format(config.lr)) #==========================================================================# def test(config, val_loader, test_loader, model): assert start_epoch>0, "you must first TRAIN" solver(config.model, val_loader, model, config.start_epoch, mode='val') solver(config.model, test_loader, model, config.start_epoch, mode='test') #==========================================================================# if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('--image_size', type=int, default=224) parser.add_argument('--lr', type=float, default=0.001) # Training settings parser.add_argument('--batch_size', type=int, default=128) parser.add_argument('--num_epochs', type=int, default=59) parser.add_argument('--num_epochs_decay', type=int, default=60) parser.add_argument('--stop_training', type=int, default=3, help='Stops after N epochs if acc_val is not increasing') parser.add_argument('--num_workers', type=int, default=4) parser.add_argument('--model', type=str, default='densenet201') parser.add_argument('--TEST', action='store_true', default=False) config = parser.parse_args() config.model = config.model.lower() train_loader, val_loader, test_loader = get_data(config) #Train, Val, Test loaders that are foun in './data' num_classes=len(train_loader.dataset.classes) #Numbet of classes model = get_model(config.model, num_classes) #Returns the model and the batch size that fits in a 4GB GPU if torch.cuda.is_available(): model.cuda() #============================Optimizer==================================# config.optimizer = torch.optim.Adam(model.parameters(), config.lr, [0.5, 0.999]) #================= Look if there is a previous snapshot ================# config.start_epoch = load_pretrained_model(model, config.model) if config.TEST: test(config, val_loader, test_loader, model) else: train(config, train_loader, val_loader, model) #Train until VALIDATION convergence, i.e., stops after -confign.stop_training- plateau region test(config, val_loader, test_loader, model)
[ "torch.nn.CrossEntropyLoss", "argparse.ArgumentParser", "os.makedirs", "torch.load", "os.path.join", "os.path.dirname", "numpy.array", "torch.cuda.is_available", "os.path.isdir", "os.path.basename", "data_loader.get_data", "model.get_model" ]
[((1364, 1389), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1387, 1389), False, 'import torch\n'), ((1722, 1743), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1741, 1743), True, 'import torch.nn as nn\n'), ((4232, 4257), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4255, 4257), False, 'import os, glob, argparse, tqdm\n'), ((5017, 5033), 'data_loader.get_data', 'get_data', (['config'], {}), '(config)\n', (5025, 5033), False, 'from data_loader import get_data\n'), ((5169, 5205), 'model.get_model', 'get_model', (['config.model', 'num_classes'], {}), '(config.model, num_classes)\n', (5178, 5205), False, 'from model import get_model\n'), ((5278, 5303), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5301, 5303), False, 'import torch\n'), ((342, 363), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (355, 363), False, 'import os, glob, argparse, tqdm\n'), ((365, 384), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (376, 384), False, 'import os, glob, argparse, tqdm\n'), ((631, 647), 'torch.load', 'torch.load', (['dir_'], {}), '(dir_)\n', (641, 647), False, 'import torch\n'), ((952, 973), 'os.path.dirname', 'os.path.dirname', (['dir_'], {}), '(dir_)\n', (967, 973), False, 'import os, glob, argparse, tqdm\n'), ((2660, 2678), 'numpy.array', 'np.array', (['loss_cum'], {}), '(loss_cum)\n', (2668, 2678), True, 'import numpy as np\n'), ((2720, 2762), 'os.path.join', 'os.path.join', (['"""snapshot"""', 'name', '"""test.txt"""'], {}), "('snapshot', name, 'test.txt')\n", (2732, 2762), False, 'import os, glob, argparse, tqdm\n'), ((538, 577), 'os.path.join', 'os.path.join', (['"""snapshot"""', 'name', '"""*.pth"""'], {}), "('snapshot', name, '*.pth')\n", (550, 577), False, 'import os, glob, argparse, tqdm\n'), ((712, 734), 'os.path.basename', 'os.path.basename', (['dir_'], {}), '(dir_)\n', (728, 734), False, 'import os, glob, argparse, tqdm\n')]
""" eep_functions.py (C) <NAME> Institute for Astronomy University of Hawaiʻi 2019 July 1 Python utilities to convert stellar evolution tracks to downsampled tracks based on Equivalent Evolutionary Phases (EEPs) according to the method of Dotter (2016). """ from multiprocessing import Pool import numpy as np from scipy.interpolate import interp1d import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm from .config import eep_intervals, primary_eep_indices, eep_path, eep_log_path from .model_grid_utils import get_full_track, get_eep_track def eep_interpolate(track, special=False): """ Given a raw evolutionary track, returns a downsampled track based on Equivalent Evolutionary Phases (EEPs). The primary EEPs are defined in the function `PrimaryEEPs`, and the secondary EEPs are computed based on the number of secondary EEPs between each pair of primary EEPs as specified in the list `EEP_intervals`. If one of the EEP_intervals is 200, then for that pair of primary EEPs, the metric distance between those primary EEPs is divided into 200 equally spaced points, and the relevant stellar parameters are linearly interpolated at those points. """ i_eep = _primary_eeps(track, special) # get primary EEP indices in raw track num_intervals = len(i_eep) - 1 # In some cases, the raw models do not hit the ZAMS. In these cases, # return -1 for now and make a note. if num_intervals == 0: return -1 dist = _Metric_Function(track) # compute metric distance along track primary_eep_dist = dist[i_eep] secondary_eep_dist = np.zeros(sum(eep_intervals[:num_intervals]) + len(i_eep)) # Determine appropriate distance to each secondary EEP j0 = 0 for i in range(num_intervals): my_dist = primary_eep_dist[i+1] - primary_eep_dist[i] delta = my_dist/(eep_intervals[i] + 1) new_dist = np.array([primary_eep_dist[i] + delta*j \ for j in range(eep_intervals[i]+1)]) secondary_eep_dist[j0:j0+len(new_dist)] = new_dist j0 += len(new_dist) secondary_eep_dist[-1] = primary_eep_dist[-1] # Create list of interpolator functions interp_fs = [interp1d(dist, track[col]) for col in track.columns] # Interpolate stellar parameters along evolutionary track for # desired EEP distances eep_track = np.array([f(secondary_eep_dist) for f in interp_fs]).T eep_track = pd.DataFrame(eep_track, columns=track.columns) return eep_track def _primary_eeps(track, special=False): """ Given a track, returns a list containing indices of Equivalent Evolutionary Phases (EEPs) """ # define a list of functions to iterate over if special: functions = [_PreMS, _ZAMS, _EAMS, _IAMS, _TAMS, _RGBump_special] else: functions = [_PreMS, _ZAMS, _EAMS, _IAMS, _TAMS, _RGBump] # get indices of EEPs i_eep = np.zeros(len(functions)+1, dtype=int) for i in range(1,len(i_eep)): i_eep[i] = functions[i-1](track, i0=i_eep[i-1]) if i_eep[i] == -1: return i_eep[1:i] return i_eep[1:] def _PreMS(track, logTc=5.0, i0=0): """ The pre-main sequence EEP is the point where central temperature rises above a certain value (which must be lower than necessary for sustained fusion). The default value is log10(T_c) = 5.0, but may be chosen to be a different value. An optional argument i0 can be supplied, which is the index to start with. This relies on the behavior of pandas.Series.argmax() for a Series of bools. If no temperature is greater than or equal to logTc, the natural return value is i0. So we don't mistake this failed search, we must check the value at i0 to make sure it satisfies our criterion. RETURNS ------- `i_PreMS`: (int) the index of the first element in track[i0: "logT(cen)"] greater than logTc. """ logTc_tr = track.loc[i0:, "logT(cen)"] i_PreMS = _first_true_index(logTc_tr >= logTc) return i_PreMS def _ZAMS(track, ZAMS_pref=3, Xc_burned=0.001, Hlum_frac_max=0.999, i0=10): """ The Zero-Age Main Sequence EEP has three different implementations in Dotter's code: ZAMS1) the point where the core hydrogen mass fraction has been depleted by some fraction (0.001 by default: Xc <= Xmax - 0.001) ZAMS2) the point *before ZAMS1* where the hydrogen-burning luminosity achieves some fraction of the total luminosity (0.999 by default: Hlum/lum = 0.999) ZAMS3) the point *before ZAMS1* with the highest surface gravity ZAMS3 is implemented by default. """ Xc_init = track.loc[0, "Xcen"] Xc_tr = track.loc[i0:, "Xcen"] ZAMS1 = _first_true_index(Xc_tr <= Xc_init-Xc_burned) if ZAMS1 == -1: return -1 if ZAMS_pref == 1: return ZAMS1 if ZAMS_pref == 2: Hlum_tr = track.loc[i0:ZAMS1, 'H lum (Lsun)'] lum_tr = track.loc[i0:ZAMS1, 'L/Lsun'] Hlum_frac = Hlum_tr/lum_tr ZAMS2 = _first_true_index(Hlum_frac >= Hlum_frac_max) if ZAMS2 == -1: return ZAMS1 return ZAMS2 logg_tr = track.loc[0:ZAMS1, "logg"] ZAMS3 = logg_tr.idxmax() return ZAMS3 def _IorT_AMS(track, Xmin, i0): """ The Intermediate- and Terminal-Age Main Sequence (IAMS, TAMS) EEPs both use the core hydrogen mass fraction dropping below some critical amount. This function encapsulates the main part of the code, with the difference between IAMS and TAMS being the value of Xmin. """ Xc_tr = track.loc[i0:, 'Xcen'] i_eep = _first_true_index(Xc_tr <= Xmin) return i_eep def _EAMS(track, Xmin=0.55, i0=12): """ Early-Age Main Sequence. Without this, the low-mass tracks do not reach an EEP past the ZAMS before 15 Gyr. """ i_EAMS = _IorT_AMS(track, Xmin, i0) return i_EAMS def _IAMS(track, Xmin=0.3, i0=12): """ Intermediate-Age Main Sequence exists solely to ensure the convective hook is sufficiently sampled. Defined to be when the core hydrogen mass fraction drops below some critical value. Default: Xc <= 0.3 """ i_IAMS = _IorT_AMS(track, Xmin, i0) return i_IAMS def _TAMS(track, Xmin=1e-12, i0=14): """ Terminal-Age Main Sequence, defined to be when the core hydrogen mass fraction drops below some critical value. Default: Xc <= 1e-12 """ i_TAMS = _IorT_AMS(track, Xmin, i0) return i_TAMS def _RGBump(track, i0=None): """ The Red Giant Bump is an interruption in the increase in luminosity on the Red Giant Branch. It occurs when the hydrogen-burning shell reaches the composition discontinuity left from the first convective dredge-up. Dotter skips the Red Giant Bump and proceeds to the Tip of the Red Giant Branch, but since the YREC models, at the time of this writing, terminate at the helium flash, I choose to use the Red Giant Bump as my final EEP. I identify the RGBump as the first local minimum in Teff after the TAMS. To avoid weird end-of-track artifacts, if the minimum is within 1 step from the end of the raw track, the track is treated as if it doesn't reach the RGBump. Added 2018/07/22: Some tracks have weird, jumpy behavior before the RGBump which gets mistakenly identified as the RGBump. To avoid this, I force the RGBump to be the first local minimum in Teff after the TAMS *and* with a luminosity above 10 Lsun. Added 2019/05/28: The default grid has two tracks that *just barely* do not reach the RGBump. These tracks will use _RGBump_special. In this function, I manually set the final point in these tracks as the RGBump to extend their EEPs. This will only affect calculations pas the TAMS for stars adjacent to these tracks in the grid, and the errors should be negligible (but I have not quantified them). """ N = len(track) lum_tr = track.loc[i0:, "L/Lsun"] logT_tr = track.loc[i0:, "Log Teff(K)"] RGBump = _first_true_index(lum_tr > 10) + 1 if RGBump == 0: return -1 while logT_tr[RGBump] < logT_tr[RGBump-1] and RGBump < N-1: RGBump += 1 # Two cases: 1) We didn't reach an extremum, in which case RGBump gets # set as the final index of the track. In this case, return -1. # 2) We found the extremum, in which case RGBump gets set # as the index corresponding to the extremum. if RGBump >= N-1: return -1 return RGBump-1 def _RGBump_special(track, i0=None): """ The Red Giant Bump is an interruption in the increase in luminosity on the Red Giant Branch. It occurs when the hydrogen-burning shell reaches the composition discontinuity left from the first convective dredge-up. Added 2019/05/28: The default grid has two tracks that *just barely* do not reach the RGBump. These tracks will use _RGBump_special. In this function, I manually set the final point in these tracks as the RGBump to extend their EEPs. This will only affect calculations pas the TAMS for stars adjacent to these tracks in the grid, and the errors should be negligible (but I have not quantified them). """ N = len(track) return N-1 def _RGBTip(track, i0=None): """ Red Giant Branch Tip Dotter describes the tip of the red giant branch (RGBTip) EEP as "the point at which stellar luminosity reaches a maximum---or the stellar Teff reaches a minimum---after core H burning is complete but before core He burning has progressed significantly." Note that the YREC models at the time of this writing nominally end at the helium flash, so the RGBTip is unadvisable to use as an EEP. """ Ymin = track.loc[i0, "Ycen"] - 1e-2 Yc_tr = track.loc[i0:, "Ycen"] before_He_burned = (Yc_tr > Ymin) if not before_He_burned.any(): return -1 lum_tr = track.loc[i0:, "L/Lsun"] RGBTip1 = (lum_tr[before_He_burned]).idxmax() logT_tr = track.loc[i0:, "Log Teff(K)"] RGBTip2 = (logT_tr[before_He_burned]).idxmin() RGBTip = min(RGBTip1, RGBTip2) return RGBTip def _Metric_Function(track): """ The Metric Function is used to calculate the distance along the evolution track. Traditionally, the Euclidean distance along the track on the H-R diagram has been used, but any positive-definite function will work. """ return _HRD_distance(track) def _HRD_distance(track): """ Distance along the H-R diagram, to be used in the Metric Function. Returns an array containing the distance from the beginning of the evolution track for each step along the track, in logarithmic effective temperature and logarithmic luminosity space. """ # Allow for scaling to make changes in Teff and L comparable Tscale = 20 Lscale = 1 logTeff = track["Log Teff(K)"] logLum = np.log10(track["L/Lsun"]) N = len(track) dist = np.zeros(N) for i in range(1, N): temp_dist = (((logTeff.iloc[i] - logTeff.iloc[i-1])*Tscale)**2 + ((logLum.iloc[i] - logLum.iloc[i-1])*Lscale)**2) dist[i] = dist[i-1] + np.sqrt(temp_dist) return dist def _first_true_index(bools): """ Given a pandas Series of bools, returns the index of the first occurrence of `True`. **Index-based, NOT location-based** I.e., say x = pd.Series({0: False, 2: False, 4: True}), then _first_true_index(x) will return index 4, not the positional index 2. If no value in `bools` is True, returns -1. """ if not bools.any(): return -1 i_true = bools.idxmax() return i_true def convert_track(mass, met, alpha, log_file=None): """ Given mass, metallicity, and alpha in the grid, converts the corresponding evolution track into an EEP-based track. """ # Define special cases special1 = (mass == 0.84) & (met == 0.0) & (alpha == 0.0) special2 = (mass == 0.99) & (met == 0.5) & (alpha == 0.4) special = special1 | special2 track, fname = get_full_track(mass, met, alpha, return_fname=True) out_fname = "eep_" + fname print("Constructing %s..." % out_fname, end="\r") try: eep_track = eep_interpolate(track, special) # If the track doesn't reach the ZAMS, returns -1. # In this case, skip and make a note. if eep_track is -1: if log_file is None: print("Track does not reach ZAMS: %s" %fname) else: print(fname, file=log_file) return eep_track.to_pickle(eep_path+out_fname) except (ValueError, KeyboardInterrupt) as e: print("") raise e def _convert_track_pool(fstring): """ Given mass, metallicity, and alpha in the grid, converts the corresponding evolution track into an EEP-based track. """ fsplit = fstring.split("_") met = int(fsplit[1])/100 alpha = int(fsplit[3])/100 mass = int(fsplit[5])/100 # Define special cases special1 = (mass == 0.84) & (met == 0.0) & (alpha == 0.0) special2 = (mass == 0.99) & (met == 0.5) & (alpha == 0.4) special = special1 | special2 track = get_full_track(mass, met, alpha) out_fname = "eep_" + fstring + ".pkl" try: eep_track = eep_interpolate(track, special) # If the track doesn't reach the ZAMS, returns -1. # In this case, skip and make a note. if eep_track is -1: with open(eep_log_path, "a+") as f: print(out_fname, file=f) return eep_track.to_pickle(eep_path+out_fname) except (ValueError, KeyboardInterrupt) as e: print("") raise e def _convert_series(): """ Takes all model grid points, looks for raw track file at each point, and produces EEP-based track for each point. If any models fail to reach ZAMS, the corresponding filename is written in a log file. """ from .config import mass_grid, met_grid, alpha_grid failed_fname = eep_path + "failed_eep.txt" print("Converting tracks to EEP basis...") n_total = len(mass_grid) * len(met_grid) * len(alpha_grid) with open(failed_fname, "a+") as log_file: with tqdm(total=n_total) as pbar: for z in met_grid: for alpha in alpha_grid: for m in mass_grid: convert_track(m, z, alpha, log_file=log_file) pbar.update() print("Tracks successfully converted to EEPs. See %s for failed tracks." % failed_fname) def _convert_pool(): """ Takes all model grid points, looks for raw track file at each point, and produces EEP-based track for each point. If any models fail to reach ZAMS, the corresponding filename is written in a log file. """ from .config import mass_grid, met_grid, alpha_grid from .model_grid_utils import _to_string failed_fname = eep_path + "failed_eep.txt" string_list = [] for z in met_grid: for alpha in alpha_grid: for m in mass_grid: string = "met_%s_alpha_%s_mass_%s" % \ (_to_string(z), _to_string(alpha), _to_string(m)) string_list.append(string) print("Converting tracks to EEP basis...") with Pool() as pool: with tqdm(total=len(string_list)) as pbar: for i, _ in enumerate(pool.imap_unordered(_convert_track_pool, string_list)): pbar.update() print("Tracks successfully converted to EEPs. See %s for failed tracks." % failed_fname) def convert_all_tracks(use_pool=False): """ Wrapper for functions to convert evolution tracks to EEP basis Allows user to convert in series or in parallel using multiprocessing.Pool. """ if use_pool: _convert_pool() else: _convert_series() def HRD(mass=None, met=None, alpha=None, df_track=None, df_eep=None, verbose=True): """Checking out a particular model on the HRD. """ if df_track is None: labels = ["Log Teff(K)", "L/Lsun", "Xcen", "Age(Gyr)"] teff, lum, xcen, age = labels track = get_full_track(mass, met, alpha, labels) eep = get_eep_track(mass, met, alpha, labels) else: track = df_track eep = df_eep teff, lum = "Log Teff(K)", "L/Lsun" if verbose: i_ZAMS = 201 print("Number of EEPs in this track: ", len(eep)) if len(eep) == 202: final_eep = "ZAMS" elif len(eep) == 253: final_eep = "EAMS" elif len(eep) == 354: final_eep = "IAMS" elif len(eep) == 455: final_eep = "TAMS" elif len(eep) == 606: final_eep = "RGBump" else: final_eep = "ERROR" print(" Final EEP reached: ", final_eep) print(" Final model Xcen: ", track.loc[len(track)-1, xcen]) ## print(" Final model age: ", track.loc[len(track)-1, age]) ## print(" Final EEP age: ", eep.loc[len(eep)-1, age]) ## print(" ZAMS age: ", eep.loc[i_ZAMS, age]) ## fig = plt.figure() ax = fig.add_subplot(111) ax.plot(track[teff], np.log10(track[lum])) eep = eep.reindex(range(primary_eep_indices[-1]+1)) ax.plot(eep.loc[::10, teff], np.log10(eep.loc[::10, lum]), "ko", ms=2) ax.plot(eep.loc[primary_eep_indices, teff], np.log10(eep.loc[primary_eep_indices, lum]), "ko", ms=4) ax.set_xlabel(r"log($T_\mathrm{eff}$/K)") ax.set_ylabel(r"log($L$/$L_\odot$)") ax.set_title(r"$M$ = %.02f; [M/H] = %.01f; [$\alpha$/M] = %.01f" %(mass, met, alpha)) ax.invert_xaxis() fig.show() def _test_failed_eeps(): """Opens log file `failed_eep.txt` and plots tracks """ with open(eep_path+"failed_eep.txt", "r") as f: for line in f: track = pd.read_pickle(model_path + line.strip()) plt.plot(track["Log Teff(K)"], np.log10(track["L/Lsun"])) plt.gca().invert_xaxis() plt.show() def _test_models(masses, mets, alphas): """ Plots multiple tracks and their EEP-based tracks, given lists of mass, metallicity, and alpha. """ fig = plt.figure() ax = fig.add_subplot(111) for i in range(len(masses)): ax = HRD(ax, masses[i], mets[i], alphas[i]) ax.invert_xaxis() plt.show() if __name__ == "__main__": convert_all_tracks(use_pool=True)
[ "numpy.log10", "numpy.sqrt", "matplotlib.pyplot.gca", "tqdm.tqdm", "scipy.interpolate.interp1d", "numpy.zeros", "matplotlib.pyplot.figure", "multiprocessing.Pool", "pandas.DataFrame", "matplotlib.pyplot.show" ]
[((2461, 2507), 'pandas.DataFrame', 'pd.DataFrame', (['eep_track'], {'columns': 'track.columns'}), '(eep_track, columns=track.columns)\n', (2473, 2507), True, 'import pandas as pd\n'), ((10987, 11012), 'numpy.log10', 'np.log10', (["track['L/Lsun']"], {}), "(track['L/Lsun'])\n", (10995, 11012), True, 'import numpy as np\n'), ((11044, 11055), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (11052, 11055), True, 'import numpy as np\n'), ((17262, 17274), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17272, 17274), True, 'import matplotlib.pyplot as plt\n'), ((18171, 18181), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18179, 18181), True, 'import matplotlib.pyplot as plt\n'), ((18353, 18365), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18363, 18365), True, 'import matplotlib.pyplot as plt\n'), ((18508, 18518), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18516, 18518), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2252), 'scipy.interpolate.interp1d', 'interp1d', (['dist', 'track[col]'], {}), '(dist, track[col])\n', (2234, 2252), False, 'from scipy.interpolate import interp1d\n'), ((15417, 15423), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (15421, 15423), False, 'from multiprocessing import Pool\n'), ((17331, 17351), 'numpy.log10', 'np.log10', (['track[lum]'], {}), '(track[lum])\n', (17339, 17351), True, 'import numpy as np\n'), ((17442, 17470), 'numpy.log10', 'np.log10', (['eep.loc[::10, lum]'], {}), '(eep.loc[::10, lum])\n', (17450, 17470), True, 'import numpy as np\n'), ((17556, 17599), 'numpy.log10', 'np.log10', (['eep.loc[primary_eep_indices, lum]'], {}), '(eep.loc[primary_eep_indices, lum])\n', (17564, 17599), True, 'import numpy as np\n'), ((11257, 11275), 'numpy.sqrt', 'np.sqrt', (['temp_dist'], {}), '(temp_dist)\n', (11264, 11275), True, 'import numpy as np\n'), ((14333, 14352), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_total'}), '(total=n_total)\n', (14337, 14352), False, 'from tqdm import tqdm\n'), ((18142, 18151), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18149, 18151), True, 'import matplotlib.pyplot as plt\n'), ((18111, 18136), 'numpy.log10', 'np.log10', (["track['L/Lsun']"], {}), "(track['L/Lsun'])\n", (18119, 18136), True, 'import numpy as np\n')]
import math import numpy as np def quaternion_to_rotation_matrix(q): # Original C++ Method defined in pba/src/pba/DataInterface.h qq = math.sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]) qw = qx = qy = qz = 0 if qq > 0: # NORMALIZE THE QUATERNION qw = q[0] / qq qx = q[1] / qq qy = q[2] / qq qz = q[3] / qq else: qw = 1 qx = qy = qz = 0 m = np.zeros((3, 3), dtype=float) m[0][0] = float(qw * qw + qx * qx - qz * qz - qy * qy) m[0][1] = float(2 * qx * qy - 2 * qz * qw) m[0][2] = float(2 * qy * qw + 2 * qz * qx) m[1][0] = float(2 * qx * qy + 2 * qw * qz) m[1][1] = float(qy * qy + qw * qw - qz * qz - qx * qx) m[1][2] = float(2 * qz * qy - 2 * qx * qw) m[2][0] = float(2 * qx * qz - 2 * qy * qw) m[2][1] = float(2 * qy * qz + 2 * qw * qx) m[2][2] = float(qz * qz + qw * qw - qy * qy - qx * qx) return m def rotation_matrix_to_quaternion(m): # Original C++ Method defined in pba/src/pba/DataInterface.h q = np.array([0, 0, 0, 0], dtype=float) q[0] = 1 + m[0][0] + m[1][1] + m[2][2] if q[0] > 0.000000001: q[0] = math.sqrt(q[0]) / 2.0 q[1] = (m[2][1] - m[1][2]) / (4.0 * q[0]) q[2] = (m[0][2] - m[2][0]) / (4.0 * q[0]) q[3] = (m[1][0] - m[0][1]) / (4.0 * q[0]) else: if m[0][0] > m[1][1] and m[0][0] > m[2][2]: s = 2.0 * math.sqrt(1.0 + m[0][0] - m[1][1] - m[2][2]) q[1] = 0.25 * s q[2] = (m[0][1] + m[1][0]) / s q[3] = (m[0][2] + m[2][0]) / s q[0] = (m[1][2] - m[2][1]) / s elif m[1][1] > m[2][2]: s = 2.0 * math.sqrt(1.0 + m[1][1] - m[0][0] - m[2][2]) q[1] = (m[0][1] + m[1][0]) / s q[2] = 0.25 * s q[3] = (m[1][2] + m[2][1]) / s q[0] = (m[0][2] - m[2][0]) / s else: s = 2.0 * math.sqrt(1.0 + m[2][2] - m[0][0] - m[1][1]) q[1] = (m[0][2] + m[2][0]) / s q[2] = (m[1][2] + m[2][1]) / s q[3] = 0.25 * s q[0] = (m[0][1] - m[1][0]) / s return q class Extrinsics: def __init__(self): # center is the coordinate of the camera center with respect to the # world coordinate frame (t = -R C) self._center = np.array([0, 0, 0], dtype=float) # the translation vector is the vector used to transform points in # world coordinates to camera coordinates (C = -R^T t) self._translation_vec = np.array([0, 0, 0], dtype=float) # use for these attributes the getter and setter methods self._quaternion = np.array([0, 0, 0, 0], dtype=float) # for rotations the inverse is equal to the transpose # self._rotation_inv_mat = np.linalg.transpose(self._rotation_mat) self._rotation_mat = np.zeros((3, 3), dtype=float) @staticmethod def invert_transformation_mat(trans_mat): # Exploit that the inverse of the rotation part is equal to the # transposed of the rotation part. This should be more robust than # trans_mat_inv = np.linalg.inv(trans_mat) trans_mat_inv = np.zeros_like(trans_mat) rotation_part_inv = trans_mat[0:3, 0:3].T trans_mat_inv[0:3, 0:3] = rotation_part_inv trans_mat_inv[0:3, 3] = -np.dot(rotation_part_inv, trans_mat[0:3, 3]) trans_mat_inv[3, 3] = 1 return trans_mat_inv def is_rotation_mat_valid(self, some_mat): # TEST if rotation_mat is really a rotation matrix # (i.e. det = -1 or det = 1) det = np.linalg.det(some_mat) is_close = np.isclose(det, 1) or np.isclose(det, -1) # if not is_close: # logger.vinfo('some_mat', some_mat) # logger.vinfo('determinante', det) return is_close def set_quaternion(self, quaternion): self._quaternion = quaternion # we must change the rotation matrixes as well self._rotation_mat = quaternion_to_rotation_matrix(quaternion) def set_rotation_mat(self, rotation_mat): assert self.is_rotation_mat_valid(rotation_mat) self._rotation_mat = rotation_mat # we must change the quaternion as well self._quaternion = rotation_matrix_to_quaternion(rotation_mat) def set_camera_center_after_rotation(self, center): assert self.is_rotation_mat_valid(self._rotation_mat) self._center = center self._translation_vec = -np.dot(self._rotation_mat, center) def set_camera_translation_vector_after_rotation(self, translation_vector): # translation_vector: trans_vec = -Rc assert self.is_rotation_mat_valid(self._rotation_mat) self._translation_vec = translation_vector self._center = -np.dot( self._rotation_mat.transpose(), translation_vector ) def get_quaternion(self): return self._quaternion def get_rotation_mat(self): # Note: # self._rotation_mat.T or self._rotation_mat.transpose() # DO NOT CHANGE THE MATRIX return self._rotation_mat def get_translation_vec(self): return self._translation_vec def get_camera_center(self): return self._center def get_4x4_world_to_cam_mat(self): # This matrix can be used to convert points given in world coordinates # into points given in camera coordinates # M = [R -Rc] # [0 1], # https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg homogeneous_mat = np.identity(4, dtype=float) homogeneous_mat[0:3, 0:3] = self.get_rotation_mat() homogeneous_mat[0:3, 3] = -self.get_rotation_mat().dot( self.get_camera_center() ) return homogeneous_mat def set_4x4_cam_to_world_mat(self, cam_to_world_mat): # This matrix can be used to convert points given in camera coordinates # into points given in world coordinates # M = [R^T c] # [0 1] # # https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg rotation_part = cam_to_world_mat[0:3, 0:3] translation_part = cam_to_world_mat[0:3, 3] self.set_rotation_mat(rotation_part.transpose()) self.set_camera_center_after_rotation(translation_part) def get_4x4_cam_to_world_mat(self): # This matrix can be used to convert points given in camera coordinates # into points given in world coordinates # M = [R^T c] # [0 1] # :return: # # https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg homogeneous_mat = np.identity(4, dtype=float) homogeneous_mat[0:3, 0:3] = self.get_rotation_mat().transpose() homogeneous_mat[0:3, 3] = self.get_camera_center() return homogeneous_mat def cam_to_world_coord_multiple_coords(self, cam_coords): num_coords = cam_coords.shape[0] hom_entries = np.ones(num_coords).reshape((num_coords, 1)) cam_coords_hom = np.hstack((cam_coords, hom_entries)) world_coords_hom = ( self.get_4x4_cam_to_world_mat().dot(cam_coords_hom.T).T ) world_coords = np.delete(world_coords_hom, 3, 1) return world_coords
[ "numpy.identity", "numpy.isclose", "numpy.ones", "numpy.hstack", "numpy.delete", "math.sqrt", "numpy.linalg.det", "numpy.array", "numpy.zeros", "numpy.dot", "numpy.zeros_like" ]
[((146, 210), 'math.sqrt', 'math.sqrt', (['(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3])'], {}), '(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3])\n', (155, 210), False, 'import math\n'), ((431, 460), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'float'}), '((3, 3), dtype=float)\n', (439, 460), True, 'import numpy as np\n'), ((1047, 1082), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0, 0], dtype=float)\n', (1055, 1082), True, 'import numpy as np\n'), ((2322, 2354), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (2330, 2354), True, 'import numpy as np\n'), ((2525, 2557), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (2533, 2557), True, 'import numpy as np\n'), ((2651, 2686), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0, 0], dtype=float)\n', (2659, 2686), True, 'import numpy as np\n'), ((2853, 2882), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'float'}), '((3, 3), dtype=float)\n', (2861, 2882), True, 'import numpy as np\n'), ((3170, 3194), 'numpy.zeros_like', 'np.zeros_like', (['trans_mat'], {}), '(trans_mat)\n', (3183, 3194), True, 'import numpy as np\n'), ((3594, 3617), 'numpy.linalg.det', 'np.linalg.det', (['some_mat'], {}), '(some_mat)\n', (3607, 3617), True, 'import numpy as np\n'), ((5602, 5629), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (5613, 5629), True, 'import numpy as np\n'), ((6801, 6828), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (6812, 6828), True, 'import numpy as np\n'), ((7188, 7224), 'numpy.hstack', 'np.hstack', (['(cam_coords, hom_entries)'], {}), '((cam_coords, hom_entries))\n', (7197, 7224), True, 'import numpy as np\n'), ((7355, 7388), 'numpy.delete', 'np.delete', (['world_coords_hom', '(3)', '(1)'], {}), '(world_coords_hom, 3, 1)\n', (7364, 7388), True, 'import numpy as np\n'), ((1168, 1183), 'math.sqrt', 'math.sqrt', (['q[0]'], {}), '(q[0])\n', (1177, 1183), False, 'import math\n'), ((3330, 3374), 'numpy.dot', 'np.dot', (['rotation_part_inv', 'trans_mat[0:3, 3]'], {}), '(rotation_part_inv, trans_mat[0:3, 3])\n', (3336, 3374), True, 'import numpy as np\n'), ((3637, 3655), 'numpy.isclose', 'np.isclose', (['det', '(1)'], {}), '(det, 1)\n', (3647, 3655), True, 'import numpy as np\n'), ((3659, 3678), 'numpy.isclose', 'np.isclose', (['det', '(-1)'], {}), '(det, -1)\n', (3669, 3678), True, 'import numpy as np\n'), ((4480, 4514), 'numpy.dot', 'np.dot', (['self._rotation_mat', 'center'], {}), '(self._rotation_mat, center)\n', (4486, 4514), True, 'import numpy as np\n'), ((1424, 1468), 'math.sqrt', 'math.sqrt', (['(1.0 + m[0][0] - m[1][1] - m[2][2])'], {}), '(1.0 + m[0][0] - m[1][1] - m[2][2])\n', (1433, 1468), False, 'import math\n'), ((7118, 7137), 'numpy.ones', 'np.ones', (['num_coords'], {}), '(num_coords)\n', (7125, 7137), True, 'import numpy as np\n'), ((1681, 1725), 'math.sqrt', 'math.sqrt', (['(1.0 + m[1][1] - m[0][0] - m[2][2])'], {}), '(1.0 + m[1][1] - m[0][0] - m[2][2])\n', (1690, 1725), False, 'import math\n'), ((1919, 1963), 'math.sqrt', 'math.sqrt', (['(1.0 + m[2][2] - m[0][0] - m[1][1])'], {}), '(1.0 + m[2][2] - m[0][0] - m[1][1])\n', (1928, 1963), False, 'import math\n')]
import numpy as np import matplotlib.pyplot as plt fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(8,12)) # Plot pow with different settings t = 1e-2 rcut = 5 settings = [ [2, rcut/(1/t*(1-t)) ** (1 / 2), 1], [4, rcut/(1/t*(1-t)) ** (1 / 4), 1], [8, rcut/(1/t*(1-t)) ** (1 / 8), 1], ] rmin = 0 rmax = 5.2 for setting in settings: m = setting[0] r0 = setting[1] c = setting[2] d = c r = np.arange(rmin, rmax, 0.01) polym = c / (d + (r / r0) ** m) ax2.plot(r, polym, label="m = {}, r0 = {:.3}, c = d = {}".format(m, r0, c)) ax2.axvline(rcut, color='k', linestyle='--') ax2.text( rcut*0.99, 0.5, "rcut inferred from threshold", verticalalignment='center', horizontalalignment='right', rotation="vertical", ) ax2.set_title("pow") ax2.set_xlabel("r") ax2.set_ylabel("w(r)") # Plot poly with different settings settings = [ [rcut, 3], [rcut, 2], [rcut, 1], ] for setting in settings: r0 = setting[0] m = setting[1] c = 1 poly3m = [] for ri in r: if ri < r0: poly3m.append(c*(1 + 2 * (ri / r0) ** 3 - 3 * (ri / r0) ** 2) ** m) else: poly3m.append(0) ax1.plot(r, poly3m, label="m = {}, r0 = {}, c={}".format(m, r0, c)) ax1.axvline(rcut, color='k', linestyle='--') ax1.text( rcut*0.99, 0.5, "rcut inferred from r0".format(t), verticalalignment='center', horizontalalignment='right', rotation="vertical", ) ax1.set_title("poly") ax1.set_xlabel("r") ax1.set_ylabel("w(r)") # Plot exp with different settings settings = [ [rcut/np.log(1/t - 0), 1, 0], [rcut/np.log(10/t - 9), 10, 9], [rcut/np.log(100/t - 99), 100, 99], ] for setting in settings: r = np.arange(rmin, rmax, 0.01) r0 = setting[0] c = setting[1] d = setting[2] exp = c/(d + np.exp(r/r0)) ax3.plot(r, exp, label="r0={:.3}, c={}, d={}".format(r0, c, d)) ax3.axvline(rcut, color='k', linestyle='--') ax3.text( rcut*0.99, 0.5, "rcut inferred from threshold", verticalalignment='center', horizontalalignment='right', rotation="vertical", ) ax3.set_title("exp") ax3.set_xlabel("r") ax3.set_ylabel("w(r)") l = "upper right" anchor = (0.9, 1) ax1.set_xlim(rmin, rmax) ax1.set_ylim(0, 1) ax2.set_ylim(0, 1) ax3.set_ylim(0, 1) ax1.legend(loc=l, bbox_to_anchor=anchor) ax2.legend(loc=l, bbox_to_anchor=anchor) ax3.legend(loc=l, bbox_to_anchor=anchor) plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.05) plt.show()
[ "matplotlib.pyplot.subplots_adjust", "numpy.log", "numpy.exp", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show" ]
[((76, 124), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'figsize': '(8, 12)'}), '(3, 1, sharex=True, figsize=(8, 12))\n', (88, 124), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2502), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.95)', 'top': '(0.95)', 'bottom': '(0.05)'}), '(left=0.1, right=0.95, top=0.95, bottom=0.05)\n', (2457, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2503, 2513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2511, 2513), True, 'import matplotlib.pyplot as plt\n'), ((437, 464), 'numpy.arange', 'np.arange', (['rmin', 'rmax', '(0.01)'], {}), '(rmin, rmax, 0.01)\n', (446, 464), True, 'import numpy as np\n'), ((1740, 1767), 'numpy.arange', 'np.arange', (['rmin', 'rmax', '(0.01)'], {}), '(rmin, rmax, 0.01)\n', (1749, 1767), True, 'import numpy as np\n'), ((1605, 1622), 'numpy.log', 'np.log', (['(1 / t - 0)'], {}), '(1 / t - 0)\n', (1611, 1622), True, 'import numpy as np\n'), ((1639, 1657), 'numpy.log', 'np.log', (['(10 / t - 9)'], {}), '(10 / t - 9)\n', (1645, 1657), True, 'import numpy as np\n'), ((1675, 1695), 'numpy.log', 'np.log', (['(100 / t - 99)'], {}), '(100 / t - 99)\n', (1681, 1695), True, 'import numpy as np\n'), ((1843, 1857), 'numpy.exp', 'np.exp', (['(r / r0)'], {}), '(r / r0)\n', (1849, 1857), True, 'import numpy as np\n')]
import numpy as np import keras from keras.models import Model from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU from keras.optimizers import Adam from keras import backend as K # 今更きけないGAN # https://qiita.com/triwave33/items/1890ccc71fab6cbca87e # https://qiita.com/pacifinapacific/items/6811b711eee1a5ebbb03 class GAN(): def __init__(self, latent_dim=2, data_dim=2): # 潜在変数の次元数 self.latent_dim = latent_dim # データの次元 self.data_dim = data_dim return def make_model(self, gene_hidden_neurons, disc_hidden_neurons): # discriminator model self.disc_model = self.__build_discriminator(disc_hidden_neurons) #self.disc_model.compile(optimizer=Adam(lr=1e-5, beta_1=0.1), loss='binary_crossentropy', metrics=['accuracy']) self.disc_model.compile(optimizer=Adam(lr=5e-6, beta_1=0.1), loss='binary_crossentropy', metrics=['accuracy']) # generator model self.gene_model = self.__build_generator(gene_hidden_neurons) # combined model of generator and discriminator self.combined_model = self.__build_combined_gene_and_disc() #self.combined_model.compile(optimizer=Adam(lr=2e-4, beta_1=0.5), loss='binary_crossentropy') self.combined_model.compile(optimizer=Adam(lr=2e-4, beta_1=0.5), loss='binary_crossentropy') return def __build_generator(self, hidden_neurons): ''' build generator keras model the last activation is tanh. ''' # input latent_inputs = Input(shape=(self.latent_dim,)) # hidden layer x = latent_inputs for hidden_n in hidden_neurons: x = Dense(hidden_n)(x) #x = Activation('relu')(x) x = LeakyReLU()(x) x = BatchNormalization()(x) # output x = Dense(self.data_dim)(x) datas = Activation('tanh')(x) #datas = Activation('linear')(x) model = Model(input=latent_inputs, output=datas) model.summary() return model def __build_discriminator(self, hidden_neurons): ''' build discriminator keras model ''' # input datas = Input(shape=(self.data_dim,)) # hidden layer x = datas for hidden_n in hidden_neurons: x = Dense(hidden_n)(x) x = Activation('relu')(x) #x = LeakyReLU()(x) #x = BatchNormalization()(x) # output x = Dense(1)(x) real_or_fake = Activation('sigmoid')(x) # model = Model(input=datas, output=real_or_fake) model.summary() return model def __build_combined_gene_and_disc(self): ''' build combined keras model of generator and discriminator ''' # input latent_inputs = Input(shape=(self.latent_dim,)) # data data = self.gene_model(latent_inputs) # true or false self.disc_model.trainable = False real_or_fake = self.disc_model(data) # model = Model(input=latent_inputs, output=real_or_fake) model.summary() return model def train(self, real_datas, epoch, batch_size=32): ''' training gan model ''' print('start training gan model') for iep in range(epoch): #self.train_step(real_datas, batch_size, iep) self.train_step_test1(real_datas, batch_size, iep) print('end training') return def train_step(self, real_datas, batch_size=32, now_epoch=None, print_on_batch=False): ''' training gan model on one epoch discriminatorの学習時にrealとfakeを別々に学習 ''' # sample_num = real_datas.shape[0] half_batch_size = int(batch_size / 2) batch_num = int(sample_num / half_batch_size) + 1 # index for minibatch training shuffled_idx = np.random.permutation(sample_num) # roop of batch for i_batch in range(batch_num): if half_batch_size*i_batch < sample_num: # --------------------------- # training of discriminator # --------------------------- # real data real_x = real_datas[shuffled_idx[half_batch_size*i_batch : half_batch_size*(i_batch+1)]] x_num = real_x.shape[0] y = np.ones((x_num, 1)) # label = 1 # disc_loss_real = self.disc_model.train_on_batch(x=real_x, y=y) # fake data latents = np.random.normal(0, 1, (x_num, self.latent_dim)) fake_x = self.gene_model.predict(latents) y = np.zeros((x_num, 1)) # label = 0 # disc_loss_fake = self.disc_model.train_on_batch(x=fake_x, y=y) # loss disc_loss = 0.5 * np.add(disc_loss_real, disc_loss_fake) # --------------------------- # training of generator # --------------------------- # generated data # batch size = x_num * 2 (= real + fake in disc training) latents = np.random.normal(0, 1, (x_num * 2, self.latent_dim)) # x_num * 2 y = np.ones((x_num * 2, 1)) # label = 1 # gene_loss = self.combined_model.train_on_batch(x=latents, y=y) # training progress if print_on_batch: print_epoch = now_epoch if now_epoch is not None else 0 print ("epoch: %d, batch: %d, [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (print_epoch, i_batch+1, disc_loss[0], 100*disc_loss[1], gene_loss)) # training progress print_epoch = now_epoch if now_epoch is not None else 0 print ("epoch: %d, [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (print_epoch, disc_loss[0], 100*disc_loss[1], gene_loss)) #print ("epoch: %d, [D loss: %f, acc.: %.2f%%]" % (print_epoch, disc_loss[0], 100*disc_loss[1])) return def train_step_test1(self, real_datas, batch_size=32, now_epoch=None, print_on_batch=False): ''' training gan model on one epoch discriminatorの学習時にrealとfakeを一緒に学習 ''' # sample_num = real_datas.shape[0] half_batch_size = int(batch_size / 2) batch_num = int(sample_num / half_batch_size) + 1 # index for minibatch training shuffled_idx = np.random.permutation(sample_num) # roop of batch for i_batch in range(batch_num): if half_batch_size*i_batch < sample_num: # --------------------------- # training of discriminator # --------------------------- # real data real_x = real_datas[shuffled_idx[half_batch_size*i_batch : half_batch_size*(i_batch+1)]] x_num = real_x.shape[0] real_y = np.ones((x_num, 1)) # label = 1 # fake data latents = np.random.normal(0, 1, (x_num, self.latent_dim)) fake_x = self.gene_model.predict(latents) fake_y = np.zeros((x_num, 1)) # label = 0 # x = np.append(real_x, fake_x, axis=0) y = np.append(real_y, fake_y, axis=0) disc_loss = self.disc_model.train_on_batch(x=x, y=y) # --------------------------- # training of generator # --------------------------- # generated data # batch size = x_num * 2 (= real + fake in disc training) latents = np.random.normal(0, 1, (x_num * 2, self.latent_dim)) # x_num * 2 y = np.ones((x_num * 2, 1)) # label = 1 # gene_loss = self.combined_model.train_on_batch(x=latents, y=y) # training progress if print_on_batch: print_epoch = now_epoch if now_epoch is not None else 0 print ("epoch: %d, batch: %d, [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (print_epoch, i_batch+1, disc_loss[0], 100*disc_loss[1], gene_loss)) # training progress print_epoch = now_epoch if now_epoch is not None else 0 print ("epoch: %d, [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (print_epoch, disc_loss[0], 100*disc_loss[1], gene_loss)) #print ("epoch: %d, [D loss: %f, acc.: %.2f%%]" % (print_epoch, disc_loss[0], 100*disc_loss[1])) return def train_step_only_disc_with_random_noise(self, real_datas, batch_size=32, now_epoch=None, print_on_batch=False): ''' training gan model on one epoch discriminatorのみ学習 ''' # sample_num = real_datas.shape[0] half_batch_size = int(batch_size / 2) batch_num = int(sample_num / half_batch_size) + 1 # index for minibatch training shuffled_idx = np.random.permutation(sample_num) # roop of batch for i_batch in range(batch_num): if half_batch_size*i_batch < sample_num: # --------------------------- # training of discriminator # --------------------------- # real data real_x = real_datas[shuffled_idx[half_batch_size*i_batch : half_batch_size*(i_batch+1)]] x_num = real_x.shape[0] real_y = np.ones((x_num, 1)) # label = 1 # fake data fake_x = np.random.rand(x_num, 2) * 2 - 1 fake_y = np.zeros((x_num, 1)) # label = 0 # x = np.append(real_x, fake_x, axis=0) y = np.append(real_y, fake_y, axis=0) disc_loss = self.disc_model.train_on_batch(x=x, y=y) # training progress if print_on_batch: print_epoch = now_epoch if now_epoch is not None else 0 print ("epoch: %d, batch: %d, [D loss: %f, acc.: %.2f%%]" % (print_epoch, i_batch+1, disc_loss[0], 100*disc_loss[1])) # training progress print_epoch = now_epoch if now_epoch is not None else 0 print ("epoch: %d, [D loss: %f, acc.: %.2f%%]" % (print_epoch, disc_loss[0], 100*disc_loss[1])) return def train_step_only_gene(self, real_datas, batch_size=32, now_epoch=None, print_on_batch=False): ''' training gan model on one epoch generatorのみ学習 ''' # sample_num = real_datas.shape[0] half_batch_size = int(batch_size / 2) batch_num = int(sample_num / half_batch_size) + 1 # index for minibatch training shuffled_idx = np.random.permutation(sample_num) # roop of batch for i_batch in range(batch_num): if half_batch_size*i_batch < sample_num: # real data real_x = real_datas[shuffled_idx[half_batch_size*i_batch : half_batch_size*(i_batch+1)]] x_num = real_x.shape[0] # --------------------------- # training of generator # --------------------------- # generated data # batch size = x_num * 2 (= real + fake in disc training) latents = np.random.normal(0, 1, (x_num * 2, self.latent_dim)) # x_num * 2 y = np.ones((x_num * 2, 1)) # label = 1 # gene_loss = self.combined_model.train_on_batch(x=latents, y=y) # training progress if print_on_batch: print_epoch = now_epoch if now_epoch is not None else 0 #print ("epoch: %d, batch: %d, [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (print_epoch, i_batch+1, disc_loss[0], 100*disc_loss[1], gene_loss)) #print ("epoch: %d, batch: %d, [D loss: %f, acc.: %.2f%%]" % (print_epoch, i_batch+1, disc_loss[0], 100*disc_loss[1])) print ("epoch: %d, batch: %d, [G loss: %f]" % (print_epoch, i_batch+1, gene_loss)) # training progress print_epoch = now_epoch if now_epoch is not None else 0 #print ("epoch: %d, [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (print_epoch, disc_loss[0], 100*disc_loss[1], gene_loss)) #print ("epoch: %d, [D loss: %f, acc.: %.2f%%]" % (print_epoch, disc_loss[0], 100*disc_loss[1])) print ("epoch: %d, [G loss: %f]" % (print_epoch, gene_loss)) return class WGAN_GP(): def __init__(self, latent_dim=2, data_dim=2): # 潜在変数の次元数 self.latent_dim = latent_dim # データの次元 self.data_dim = data_dim return def make_model(self, gene_hidden_neurons, disc_hidden_neurons, batch_size, gradient_penalty_weight): # discriminator model self.disc_model = self.__build_discriminator(disc_hidden_neurons) # generator model self.gene_model = self.__build_generator(gene_hidden_neurons) # combinedモデルの学習時はdiscriminatorの学習をFalseにする for layer in self.disc_model.layers: layer.trainable = False self.disc_model.trainable = False self.netG_model, self.netG_train = self.__build_combined_gene_and_disc() # for layer in self.disc_model.layers: layer.trainable = True for layer in self.gene_model.layers: layer.trainable = False self.disc_model.trainable = True self.gene_model.trainable = False self.netD_train = self.__build_discriminator_with_own_loss(batch_size, gradient_penalty_weight) return def __build_generator(self, hidden_neurons): ''' build generator keras model the last activation is tanh. ''' # input latent_inputs = Input(shape=(self.latent_dim,)) # hidden layer x = latent_inputs for hidden_n in hidden_neurons: x = Dense(hidden_n)(x) #x = Activation('relu')(x) x = LeakyReLU()(x) x = BatchNormalization()(x) # output x = Dense(self.data_dim)(x) datas = Activation('tanh')(x) #datas = Activation('linear')(x) model = Model(input=latent_inputs, output=datas) model.summary() return model def __build_discriminator(self, hidden_neurons): ''' build discriminator keras model ''' # input datas = Input(shape=(self.data_dim,)) # hidden layer x = datas for hidden_n in hidden_neurons: x = Dense(hidden_n)(x) #x = Activation('relu')(x) x = LeakyReLU()(x) #x = BatchNormalization()(x) # output x = Dense(1)(x) #real_or_fake = Activation('sigmoid')(x) # sigmoid is not used in wgan # model = Model(input=datas, output=x) model.summary() return model def __build_combined_gene_and_disc(self): ''' build combined keras model of generator and discriminator ''' # input latent_inputs = Input(shape=(self.latent_dim,)) # generated data data = self.gene_model(latent_inputs) # valid = self.disc_model(data) # model = Model(input=latent_inputs, output=valid) model.summary() # loss = -1 * K.mean(valid) # training_updates = Adam(lr=1e-4, beta_1=0.5, beta_2=0.9).get_updates(self.gene_model.trainable_weights,[],loss) g_train = K.function([latent_inputs], [loss], training_updates) return model, g_train def __build_discriminator_with_own_loss(self, batch_size, gradient_penalty_weight): ##モデルの定義 # generatorの入力 latent_inputs = Input(shape=(self.latent_dim,)) # discriimnatorの入力 gene_data = self.gene_model(latent_inputs) real_data = Input(shape=(self.data_dim,)) # ave_rate = K.placeholder(shape=(None, 1)) ave_data = Input(shape=(self.data_dim,), tensor=ave_rate * real_data + (1-ave_rate) * gene_data) #ave_rate = Input(shape=(1,)) #ave_data = ave_rate * real_data + (1-ave_rate) * gene_data # discriminatorの出力 gene_out = self.disc_model(gene_data) real_out = self.disc_model(real_data) ave_out = self.disc_model(ave_data) ##モデルの定義終了 # 損失関数を定義する # original critic loss loss_real = K.mean(real_out) / batch_size loss_fake = K.mean(gene_out) / batch_size # gradient penalty grad_mixed = K.gradients(ave_out, [ave_data])[0] #norm_grad_mixed = K.sqrt(K.sum(K.square(grad_mixed), axis=[1,2,3])) norm_grad_mixed = K.sqrt(K.sum(K.square(grad_mixed), axis=1)) grad_penalty = K.mean(K.square(norm_grad_mixed -1)) # 最終的な損失関数 loss = loss_fake - loss_real + gradient_penalty_weight * grad_penalty # オプティマイザーと損失関数、学習する重みを指定する training_updates = Adam(lr=1e-4, beta_1=0.5, beta_2=0.9)\ .get_updates(self.disc_model.trainable_weights,[],loss) # 入出力とtraining_updatesをfunction化 d_train = K.function([real_data, latent_inputs, ave_rate], [loss_real, loss_fake], training_updates) return d_train def train(self, real_datas, epoch, batch_size=32, train_ratio=5): ''' train wgan-gp model ''' for epoch in range(epochs): self.train_on_epoch(real_datas, batch_size, train_ratio) return def train_step(self, real_datas, batch_size=32, train_ratio=5): ''' train wgan-gp model ''' sample_num = real_datas.shape[0] batch_num = int(sample_num / batch_size) + 1 # index for minibatch training shuffled_idx = np.array([np.random.permutation(sample_num) for i in range(train_ratio)]) # roop of batch for i_batch in range(batch_num): if batch_size*i_batch < sample_num: # --------------------- # Discriminatorの学習 # --------------------- for itr in range(train_ratio): # バッチサイズを教師データからピックアップ real_x = real_datas[shuffled_idx[itr, batch_size*i_batch : batch_size*(i_batch+1)]] real_x_num = real_x.shape[0] # ノイズ noise = np.random.normal(0, 1, (real_x_num, self.latent_dim)) # epsilon = np.random.uniform(size = (real_x_num, 1)) errD_real, errD_fake = self.netD_train([real_x, noise, epsilon]) d_loss = errD_real - errD_fake # --------------------- # Generatorの学習 # --------------------- noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) # 生成データの正解ラベルは本物(1) valid_y = np.array([1] * batch_size) # Train the generator g_loss = self.netG_train([noise]) # 進捗の表示 print ("[D loss: %f] [G loss: %f]" % (d_loss, g_loss[0])) return
[ "numpy.random.rand", "keras.backend.gradients", "numpy.array", "keras.layers.Activation", "keras.layers.Dense", "keras.backend.square", "keras.backend.placeholder", "keras.models.Model", "numpy.random.permutation", "keras.optimizers.Adam", "numpy.random.normal", "numpy.ones", "numpy.add", ...
[((1635, 1666), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (1640, 1666), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((2075, 2115), 'keras.models.Model', 'Model', ([], {'input': 'latent_inputs', 'output': 'datas'}), '(input=latent_inputs, output=datas)\n', (2080, 2115), False, 'from keras.models import Model\n'), ((2324, 2353), 'keras.layers.Input', 'Input', ([], {'shape': '(self.data_dim,)'}), '(shape=(self.data_dim,))\n', (2329, 2353), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((2722, 2761), 'keras.models.Model', 'Model', ([], {'input': 'datas', 'output': 'real_or_fake'}), '(input=datas, output=real_or_fake)\n', (2727, 2761), False, 'from keras.models import Model\n'), ((3005, 3036), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (3010, 3036), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((3248, 3295), 'keras.models.Model', 'Model', ([], {'input': 'latent_inputs', 'output': 'real_or_fake'}), '(input=latent_inputs, output=real_or_fake)\n', (3253, 3295), False, 'from keras.models import Model\n'), ((4153, 4186), 'numpy.random.permutation', 'np.random.permutation', (['sample_num'], {}), '(sample_num)\n', (4174, 4186), True, 'import numpy as np\n'), ((6839, 6872), 'numpy.random.permutation', 'np.random.permutation', (['sample_num'], {}), '(sample_num)\n', (6860, 6872), True, 'import numpy as np\n'), ((9458, 9491), 'numpy.random.permutation', 'np.random.permutation', (['sample_num'], {}), '(sample_num)\n', (9479, 9491), True, 'import numpy as np\n'), ((11343, 11376), 'numpy.random.permutation', 'np.random.permutation', (['sample_num'], {}), '(sample_num)\n', (11364, 11376), True, 'import numpy as np\n'), ((14569, 14600), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (14574, 14600), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((15009, 15049), 'keras.models.Model', 'Model', ([], {'input': 'latent_inputs', 'output': 'datas'}), '(input=latent_inputs, output=datas)\n', (15014, 15049), False, 'from keras.models import Model\n'), ((15258, 15287), 'keras.layers.Input', 'Input', ([], {'shape': '(self.data_dim,)'}), '(shape=(self.data_dim,))\n', (15263, 15287), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((15687, 15715), 'keras.models.Model', 'Model', ([], {'input': 'datas', 'output': 'x'}), '(input=datas, output=x)\n', (15692, 15715), False, 'from keras.models import Model\n'), ((15959, 15990), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (15964, 15990), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((16148, 16188), 'keras.models.Model', 'Model', ([], {'input': 'latent_inputs', 'output': 'valid'}), '(input=latent_inputs, output=valid)\n', (16153, 16188), False, 'from keras.models import Model\n'), ((16425, 16478), 'keras.backend.function', 'K.function', (['[latent_inputs]', '[loss]', 'training_updates'], {}), '([latent_inputs], [loss], training_updates)\n', (16435, 16478), True, 'from keras import backend as K\n'), ((16672, 16703), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (16677, 16703), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((16807, 16836), 'keras.layers.Input', 'Input', ([], {'shape': '(self.data_dim,)'}), '(shape=(self.data_dim,))\n', (16812, 16836), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((16868, 16898), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None, 1)'}), '(shape=(None, 1))\n', (16881, 16898), True, 'from keras import backend as K\n'), ((16919, 17010), 'keras.layers.Input', 'Input', ([], {'shape': '(self.data_dim,)', 'tensor': '(ave_rate * real_data + (1 - ave_rate) * gene_data)'}), '(shape=(self.data_dim,), tensor=ave_rate * real_data + (1 - ave_rate) *\n gene_data)\n', (16924, 17010), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((18112, 18206), 'keras.backend.function', 'K.function', (['[real_data, latent_inputs, ave_rate]', '[loss_real, loss_fake]', 'training_updates'], {}), '([real_data, latent_inputs, ave_rate], [loss_real, loss_fake],\n training_updates)\n', (18122, 18206), True, 'from keras import backend as K\n'), ((1951, 1971), 'keras.layers.Dense', 'Dense', (['self.data_dim'], {}), '(self.data_dim)\n', (1956, 1971), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((1992, 2010), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2002, 2010), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((2631, 2639), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2636, 2639), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((2667, 2688), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2677, 2688), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((14885, 14905), 'keras.layers.Dense', 'Dense', (['self.data_dim'], {}), '(self.data_dim)\n', (14890, 14905), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((14926, 14944), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (14936, 14944), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((15565, 15573), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (15570, 15573), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((16248, 16261), 'keras.backend.mean', 'K.mean', (['valid'], {}), '(valid)\n', (16254, 16261), True, 'from keras import backend as K\n'), ((17378, 17394), 'keras.backend.mean', 'K.mean', (['real_out'], {}), '(real_out)\n', (17384, 17394), True, 'from keras import backend as K\n'), ((17429, 17445), 'keras.backend.mean', 'K.mean', (['gene_out'], {}), '(gene_out)\n', (17435, 17445), True, 'from keras import backend as K\n'), ((17511, 17543), 'keras.backend.gradients', 'K.gradients', (['ave_out', '[ave_data]'], {}), '(ave_out, [ave_data])\n', (17522, 17543), True, 'from keras import backend as K\n'), ((17727, 17756), 'keras.backend.square', 'K.square', (['(norm_grad_mixed - 1)'], {}), '(norm_grad_mixed - 1)\n', (17735, 17756), True, 'from keras import backend as K\n'), ((910, 936), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(5e-06)', 'beta_1': '(0.1)'}), '(lr=5e-06, beta_1=0.1)\n', (914, 936), False, 'from keras.optimizers import Adam\n'), ((1365, 1392), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (1369, 1392), False, 'from keras.optimizers import Adam\n'), ((1778, 1793), 'keras.layers.Dense', 'Dense', (['hidden_n'], {}), '(hidden_n)\n', (1783, 1793), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((1854, 1865), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (1863, 1865), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((1886, 1906), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1904, 1906), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((2457, 2472), 'keras.layers.Dense', 'Dense', (['hidden_n'], {}), '(hidden_n)\n', (2462, 2472), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((2493, 2511), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2503, 2511), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((4648, 4667), 'numpy.ones', 'np.ones', (['(x_num, 1)'], {}), '((x_num, 1))\n', (4655, 4667), True, 'import numpy as np\n'), ((4849, 4897), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(x_num, self.latent_dim)'], {}), '(0, 1, (x_num, self.latent_dim))\n', (4865, 4897), True, 'import numpy as np\n'), ((4978, 4998), 'numpy.zeros', 'np.zeros', (['(x_num, 1)'], {}), '((x_num, 1))\n', (4986, 4998), True, 'import numpy as np\n'), ((5501, 5553), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(x_num * 2, self.latent_dim)'], {}), '(0, 1, (x_num * 2, self.latent_dim))\n', (5517, 5553), True, 'import numpy as np\n'), ((5587, 5610), 'numpy.ones', 'np.ones', (['(x_num * 2, 1)'], {}), '((x_num * 2, 1))\n', (5594, 5610), True, 'import numpy as np\n'), ((7339, 7358), 'numpy.ones', 'np.ones', (['(x_num, 1)'], {}), '((x_num, 1))\n', (7346, 7358), True, 'import numpy as np\n'), ((7445, 7493), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(x_num, self.latent_dim)'], {}), '(0, 1, (x_num, self.latent_dim))\n', (7461, 7493), True, 'import numpy as np\n'), ((7579, 7599), 'numpy.zeros', 'np.zeros', (['(x_num, 1)'], {}), '((x_num, 1))\n', (7587, 7599), True, 'import numpy as np\n'), ((7670, 7703), 'numpy.append', 'np.append', (['real_x', 'fake_x'], {'axis': '(0)'}), '(real_x, fake_x, axis=0)\n', (7679, 7703), True, 'import numpy as np\n'), ((7725, 7758), 'numpy.append', 'np.append', (['real_y', 'fake_y'], {'axis': '(0)'}), '(real_y, fake_y, axis=0)\n', (7734, 7758), True, 'import numpy as np\n'), ((8106, 8158), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(x_num * 2, self.latent_dim)'], {}), '(0, 1, (x_num * 2, self.latent_dim))\n', (8122, 8158), True, 'import numpy as np\n'), ((8192, 8215), 'numpy.ones', 'np.ones', (['(x_num * 2, 1)'], {}), '((x_num * 2, 1))\n', (8199, 8215), True, 'import numpy as np\n'), ((9958, 9977), 'numpy.ones', 'np.ones', (['(x_num, 1)'], {}), '((x_num, 1))\n', (9965, 9977), True, 'import numpy as np\n'), ((10122, 10142), 'numpy.zeros', 'np.zeros', (['(x_num, 1)'], {}), '((x_num, 1))\n', (10130, 10142), True, 'import numpy as np\n'), ((10213, 10246), 'numpy.append', 'np.append', (['real_x', 'fake_x'], {'axis': '(0)'}), '(real_x, fake_x, axis=0)\n', (10222, 10246), True, 'import numpy as np\n'), ((10268, 10301), 'numpy.append', 'np.append', (['real_y', 'fake_y'], {'axis': '(0)'}), '(real_y, fake_y, axis=0)\n', (10277, 10301), True, 'import numpy as np\n'), ((11967, 12019), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(x_num * 2, self.latent_dim)'], {}), '(0, 1, (x_num * 2, self.latent_dim))\n', (11983, 12019), True, 'import numpy as np\n'), ((12053, 12076), 'numpy.ones', 'np.ones', (['(x_num * 2, 1)'], {}), '((x_num * 2, 1))\n', (12060, 12076), True, 'import numpy as np\n'), ((14712, 14727), 'keras.layers.Dense', 'Dense', (['hidden_n'], {}), '(hidden_n)\n', (14717, 14727), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((14788, 14799), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (14797, 14799), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((14820, 14840), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (14838, 14840), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((15391, 15406), 'keras.layers.Dense', 'Dense', (['hidden_n'], {}), '(hidden_n)\n', (15396, 15406), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((15467, 15478), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (15476, 15478), False, 'from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LeakyReLU\n'), ((16303, 16342), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.5)', 'beta_2': '(0.9)'}), '(lr=0.0001, beta_1=0.5, beta_2=0.9)\n', (16307, 16342), False, 'from keras.optimizers import Adam\n'), ((17665, 17685), 'keras.backend.square', 'K.square', (['grad_mixed'], {}), '(grad_mixed)\n', (17673, 17685), True, 'from keras import backend as K\n'), ((17925, 17964), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.5)', 'beta_2': '(0.9)'}), '(lr=0.0001, beta_1=0.5, beta_2=0.9)\n', (17929, 17964), False, 'from keras.optimizers import Adam\n'), ((18782, 18815), 'numpy.random.permutation', 'np.random.permutation', (['sample_num'], {}), '(sample_num)\n', (18803, 18815), True, 'import numpy as np\n'), ((19828, 19881), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, self.latent_dim)'], {}), '(0, 1, (batch_size, self.latent_dim))\n', (19844, 19881), True, 'import numpy as np\n'), ((19949, 19975), 'numpy.array', 'np.array', (['([1] * batch_size)'], {}), '([1] * batch_size)\n', (19957, 19975), True, 'import numpy as np\n'), ((5171, 5209), 'numpy.add', 'np.add', (['disc_loss_real', 'disc_loss_fake'], {}), '(disc_loss_real, disc_loss_fake)\n', (5177, 5209), True, 'import numpy as np\n'), ((19396, 19449), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(real_x_num, self.latent_dim)'], {}), '(0, 1, (real_x_num, self.latent_dim))\n', (19412, 19449), True, 'import numpy as np\n'), ((19506, 19545), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(real_x_num, 1)'}), '(size=(real_x_num, 1))\n', (19523, 19545), True, 'import numpy as np\n'), ((10063, 10087), 'numpy.random.rand', 'np.random.rand', (['x_num', '(2)'], {}), '(x_num, 2)\n', (10077, 10087), True, 'import numpy as np\n')]
__author__ = '<NAME>' __copyright__ = 'Copyright 2017, Profactor GmbH' __license__ = 'BSD' import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import cv2 def crop(img, border): return img[border[1]:-border[1], border[0]:-border[0]] if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Create mean depth images for RGB/IR by averaging over redundant captures.') parser.add_argument('index', type=str, help='Index CSV') parser.add_argument('--crop', type=int, help='Crop images by this size', default=100) parser.add_argument('--unitscale', type=float, help='Scale depth by this value', default=0.001) parser.add_argument('--output', type=str, help='Result file', default='input_depths.npz') args = parser.parse_args() df = pd.DataFrame.from_csv(args.index, sep=' ') depth_ir = {} depth_rgb = {} temps = [] poses = [] groups = df.groupby(df.Temp) first = True for t, tgroup in groups: temps.append(t) print('Processing temperature {}'.format(t)) for p, pgroup in tgroup.groupby(tgroup.Axis): if first: poses.append(p) print(' Processing position {}'.format(p)) # Read IR Depth d = [] for name in pgroup[pgroup.Type == 'depth.png']['Name']: fname = os.path.join(os.path.dirname(args.index), name) dm = cv2.imread(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH).astype(np.float32) dm[d==0] = np.nan d.append(dm) d = np.stack(d, axis=0) d = np.mean(d, axis=0) depth_ir[(p, t)] = d * args.unitscale d = [] for name in pgroup[pgroup.Type == 'sdepth.exr']['Name']: fname = os.path.join(os.path.dirname(args.index), name) dm = cv2.imread(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH).astype(np.float32) dm[d==0] = np.nan d.append(dm) d = np.stack(d, axis=0) d = np.mean(d, axis=0) depth_rgb[(p, t)] = d * args.unitscale first = False depth_ir = {k: crop(img, (args.crop, args.crop)) for k, img in depth_ir.items()} depth_rgb = {k: crop(img, (args.crop, args.crop)) for k, img in depth_rgb.items()} np.savez(args.output, depth_ir=depth_ir, depth_rgb=depth_rgb, temps=temps, poses=poses)
[ "numpy.mean", "numpy.savez", "argparse.ArgumentParser", "pandas.DataFrame.from_csv", "numpy.stack", "os.path.dirname", "cv2.imread" ]
[((329, 451), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create mean depth images for RGB/IR by averaging over redundant captures."""'}), "(description=\n 'Create mean depth images for RGB/IR by averaging over redundant captures.'\n )\n", (352, 451), False, 'import argparse\n'), ((835, 877), 'pandas.DataFrame.from_csv', 'pd.DataFrame.from_csv', (['args.index'], {'sep': '""" """'}), "(args.index, sep=' ')\n", (856, 877), True, 'import pandas as pd\n'), ((2398, 2489), 'numpy.savez', 'np.savez', (['args.output'], {'depth_ir': 'depth_ir', 'depth_rgb': 'depth_rgb', 'temps': 'temps', 'poses': 'poses'}), '(args.output, depth_ir=depth_ir, depth_rgb=depth_rgb, temps=temps,\n poses=poses)\n', (2406, 2489), True, 'import numpy as np\n'), ((1646, 1665), 'numpy.stack', 'np.stack', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (1654, 1665), True, 'import numpy as np\n'), ((1682, 1700), 'numpy.mean', 'np.mean', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (1689, 1700), True, 'import numpy as np\n'), ((2092, 2111), 'numpy.stack', 'np.stack', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (2100, 2111), True, 'import numpy as np\n'), ((2128, 2146), 'numpy.mean', 'np.mean', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (2135, 2146), True, 'import numpy as np\n'), ((1431, 1458), 'os.path.dirname', 'os.path.dirname', (['args.index'], {}), '(args.index)\n', (1446, 1458), False, 'import os\n'), ((1877, 1904), 'os.path.dirname', 'os.path.dirname', (['args.index'], {}), '(args.index)\n', (1892, 1904), False, 'import os\n'), ((1487, 1547), 'cv2.imread', 'cv2.imread', (['fname', '(cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n', (1497, 1547), False, 'import cv2\n'), ((1933, 1993), 'cv2.imread', 'cv2.imread', (['fname', '(cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n', (1943, 1993), False, 'import cv2\n')]
import abc import numpy as np from sklearn.metrics import accuracy_score class BaseMetric(): def __init__(self): self.preds = [] self.targets = [] def append(self, preds, targets): self.preds.extend(preds.data.cpu().numpy()) self.targets.extend(targets.data.cpu().numpy()) def reset(self): self.preds = [] self.targets = [] def show(self): return '{}: {:.4f}'.format(self.name, self.compute()) @abc.abstractmethod def compute(self): raise NotImplementedError class AccuracyScore(BaseMetric): def __init__(self): super().__init__() self.name = 'accuracy' def compute(self): preds = np.argmax(self.preds, axis=1) return accuracy_score(preds, self.targets)
[ "sklearn.metrics.accuracy_score", "numpy.argmax" ]
[((713, 742), 'numpy.argmax', 'np.argmax', (['self.preds'], {'axis': '(1)'}), '(self.preds, axis=1)\n', (722, 742), True, 'import numpy as np\n'), ((758, 793), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['preds', 'self.targets'], {}), '(preds, self.targets)\n', (772, 793), False, 'from sklearn.metrics import accuracy_score\n')]
import os import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as viz_utils from object_detection.builders import model_builder from object_detection.utils import config_util import cv2 import numpy as np from matplotlib import pyplot as plt from move import * import math from imutils.video import VideoStream from imutils.video import FPS # CONSTANTS (at 25.5 inches shooting distance) webcamXOffset = 12 # in Pixels webcamYOffset = 150 # in Pixels X_ACC_THRESHOLD = 4 # in Pixels Y_ACC_THRESHOLD = 7 # in Pixels impatience = 70 aimTime = 3 # CUSTOM OFFSETS FOR SOLDIERS TO AIM AT BODY radiomanX = 18 # 18 pixels to the right multiGunnerX = -1 machineGunnerX = 1 # HELPER FUNCTIONS def nextTarget(coords, crosshairX, crosshairY): """Find the closest target to the crosshair using euclidean distance""" nearestTgtId = -1 minDist = 1e9 i = 0 for x in coords: ed = math.sqrt((crosshairX - (int(x[1]) + (int(x[3]) - int(x[1])) / 2)) ** 2 + (crosshairY - (int(x[2]) + (int(x[4]) - int(x[2])) / 2)) ** 2) if ed < minDist: minDist = ed nearestTgtId = i i += 1 if nearestTgtId != -1: #print("TARGETTING:", coords[nearestTgtId][0]) return [int(coords[nearestTgtId][1]) + (int(coords[nearestTgtId][3]) - int(coords[nearestTgtId][1])) // 2, int(coords[nearestTgtId][2]) + (int(coords[nearestTgtId][4]) - int(coords[nearestTgtId][2])) // 2, coords[nearestTgtId][0][2:]] # Returns (x, y, id) coords of closest target return [-1, -1, -1]# No targets found # Load pipeline config and build a detection model configs = config_util.get_configs_from_pipeline_file("detect_soldiers_v5/pipeline.config") detection_model = model_builder.build(model_config=configs['model'], is_training=False) # Restore checkpoint ckpt = tf.compat.v2.train.Checkpoint(model=detection_model) ckpt.restore(os.path.join("detect_soldiers_v5", 'ckpt-3')).expect_partial() @tf.function def detect_fn(image): image, shapes = detection_model.preprocess(image) prediction_dict = detection_model.predict(image, shapes) detections = detection_model.postprocess(prediction_dict, shapes) return detections category_index = label_map_util.create_category_index_from_labelmap("detect_soldiers_v5/label_map.pbtxt") print("Starting video stream...") cap = VideoStream(src = 0).start() sleep(2) fps = FPS().start() aimed = 0 # Premove tilter a few degrees to account for slop moveTilter(5, 40) while True: frame = cap.read() (h, w) = frame.shape[:2] width, height = w, h image_np = np.array(frame) input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32) detections = detect_fn(input_tensor) num_detections = int(detections.pop('num_detections')) detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()} detections['num_detections'] = num_detections # detection_classes should be ints. detections['detection_classes'] = detections['detection_classes'].astype(np.int64) label_id_offset = 1 image_np_with_detections = image_np.copy() coords = viz_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'], detections['detection_classes']+label_id_offset, detections['detection_scores'], category_index, use_normalized_coordinates=True, max_boxes_to_draw=10, min_score_thresh=.49, agnostic_mode=False) #print(coords) # Draw crosshair crosshairX = w // 2 + int(webcamXOffset) crosshairY = h // 2 + int(webcamYOffset) cv2.circle(image_np_with_detections, (crosshairX, crosshairY), 5, (0xFF, 0xFF, 0xFF), 1) cv2.circle(image_np_with_detections, (crosshairX, crosshairY), 10, (0xFF, 0xFF, 0xFF), 2) cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600))) # Find the closest target tgt = nextTarget(coords, crosshairX, crosshairY) #print("TARGET:", tgt) # Aim at closest target if (tgt[0] != -1): k = 1 if abs(tgt[0] - crosshairX) > 75: k = 0 # Make minor adjustments based on what type of soldier it is aiming at if (tgt[2] == "GreenRadioman"): tgt[0] += radiomanX elif (tgt[2] == "GreenMultiGunner"): tgt[0] += multiGunnerX elif (tgt[2] == "GreenMachineGunner"): tgt[0] += machineGunnerX print("Targetting:", tgt[2]) turnAndTilt(tgt[0] - crosshairX, k * -(tgt[1] - crosshairY)) # Shoot if the target is aimed if (aimed >= aimTime and abs(tgt[0] - crosshairX) <= X_ACC_THRESHOLD and abs(tgt[1] - crosshairY) <= Y_ACC_THRESHOLD): # Wait for wobbling to stop moveShooter(1) print("SHOOT") aimed = 0 elif abs(tgt[0] - crosshairX) <= X_ACC_THRESHOLD and abs(tgt[1] - crosshairY) <= Y_ACC_THRESHOLD: aimed += 1 print("AIMING:", aimed, "/", aimTime) else: aimed = 0 else: impatience -= 1 if impatience < 0: # move tilter back down to check if we missed any soldiers impatience = 70 moveTilter(5, 10) setLastAng(tilterMotor.position) if cv2.waitKey(10) & 0xFF == ord('q'): cv2.destroyAllWindows() cleanup_motors() break # update fps counter fps.update() fps.stop() print ('[INFO] elapsed time: {:.2f}'.format(fps.elapsed())) print ('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
[ "imutils.video.VideoStream", "object_detection.builders.model_builder.build", "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "os.path.join", "imutils.video.FPS", "numpy.array", "cv2.circle", "cv2.destroyAllWindows", "object_detection.utils.label_map_util.crea...
[((1681, 1766), 'object_detection.utils.config_util.get_configs_from_pipeline_file', 'config_util.get_configs_from_pipeline_file', (['"""detect_soldiers_v5/pipeline.config"""'], {}), "('detect_soldiers_v5/pipeline.config'\n )\n", (1723, 1766), False, 'from object_detection.utils import config_util\n'), ((1780, 1849), 'object_detection.builders.model_builder.build', 'model_builder.build', ([], {'model_config': "configs['model']", 'is_training': '(False)'}), "(model_config=configs['model'], is_training=False)\n", (1799, 1849), False, 'from object_detection.builders import model_builder\n'), ((1879, 1931), 'tensorflow.compat.v2.train.Checkpoint', 'tf.compat.v2.train.Checkpoint', ([], {'model': 'detection_model'}), '(model=detection_model)\n', (1908, 1931), True, 'import tensorflow as tf\n'), ((2269, 2362), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['"""detect_soldiers_v5/label_map.pbtxt"""'], {}), "(\n 'detect_soldiers_v5/label_map.pbtxt')\n", (2319, 2362), False, 'from object_detection.utils import label_map_util\n'), ((2644, 2659), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (2652, 2659), True, 'import numpy as np\n'), ((3234, 3557), 'object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array', 'viz_utils.visualize_boxes_and_labels_on_image_array', (['image_np_with_detections', "detections['detection_boxes']", "(detections['detection_classes'] + label_id_offset)", "detections['detection_scores']", 'category_index'], {'use_normalized_coordinates': '(True)', 'max_boxes_to_draw': '(10)', 'min_score_thresh': '(0.49)', 'agnostic_mode': '(False)'}), "(image_np_with_detections,\n detections['detection_boxes'], detections['detection_classes'] +\n label_id_offset, detections['detection_scores'], category_index,\n use_normalized_coordinates=True, max_boxes_to_draw=10, min_score_thresh\n =0.49, agnostic_mode=False)\n", (3285, 3557), True, 'from object_detection.utils import visualization_utils as viz_utils\n'), ((3823, 3912), 'cv2.circle', 'cv2.circle', (['image_np_with_detections', '(crosshairX, crosshairY)', '(5)', '(255, 255, 255)', '(1)'], {}), '(image_np_with_detections, (crosshairX, crosshairY), 5, (255, 255,\n 255), 1)\n', (3833, 3912), False, 'import cv2\n'), ((3916, 4007), 'cv2.circle', 'cv2.circle', (['image_np_with_detections', '(crosshairX, crosshairY)', '(10)', '(255, 255, 255)', '(2)'], {}), '(image_np_with_detections, (crosshairX, crosshairY), 10, (255, \n 255, 255), 2)\n', (3926, 4007), False, 'import cv2\n'), ((2399, 2417), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (2410, 2417), False, 'from imutils.video import VideoStream\n'), ((2443, 2448), 'imutils.video.FPS', 'FPS', ([], {}), '()\n', (2446, 2448), False, 'from imutils.video import FPS\n'), ((2705, 2732), 'numpy.expand_dims', 'np.expand_dims', (['image_np', '(0)'], {}), '(image_np, 0)\n', (2719, 2732), True, 'import numpy as np\n'), ((4043, 4091), 'cv2.resize', 'cv2.resize', (['image_np_with_detections', '(800, 600)'], {}), '(image_np_with_detections, (800, 600))\n', (4053, 4091), False, 'import cv2\n'), ((5550, 5573), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5571, 5573), False, 'import cv2\n'), ((1945, 1989), 'os.path.join', 'os.path.join', (['"""detect_soldiers_v5"""', '"""ckpt-3"""'], {}), "('detect_soldiers_v5', 'ckpt-3')\n", (1957, 1989), False, 'import os\n'), ((5506, 5521), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (5517, 5521), False, 'import cv2\n')]
# coding: utf8 from skimage.io import imread from skimage.measure import label from openslide import OpenSlide import numpy import sys from skimage.morphology import dilation import os import pickle slidename = str(sys.argv[1]) probapath = str(sys.argv[2]) offsetx = 0 offsety = 0 slide = OpenSlide(slidename) probaimage = imread(probapath) if probaimage.ndim == 3: probaimage = probaimage[:, :, 1] posy, posx = numpy.where(probaimage > 84) posx *= 500 posx += (offsetx * 4) posy *= 500 posy += (offsety * 4) dico = dict() # brown contours version # for k in range(len(posx)): # image = numpy.array(slide.read_region(location=(posx[k], posy[k]), level=0, size=(500, 500)))[:, :, 0:3] # brown = image[:, :, 0].astype(float) / (1. + image[:, :, 2].astype(float)) # brown = brown > 1.3 # d = dilation(brown) # inv = numpy.logical_not(brown) # c = numpy.logical_and(inv, d) # c[0, :] = 1 # c[:, 0] = 1 # c[-1, :] = 1 # c[:, -1] = 1 # y, x = numpy.where(c > 0) # y += posy[k] # x += posx[k] # dico["brown_" + str(k + 1)] = {'coords': [(x[c], y[c]) for c in range(len(x))], # 'color': "magenta", # 'id': k + 1, # 'class': 'spe'} # only box version for k in range(len(posx)): image = numpy.zeros((500, 500), numpy.uint8) c[0, :] = 1 c[:, 0] = 1 c[-1, :] = 1 c[:, -1] = 1 y, x = numpy.where(c > 0) y += posy[k] x += posx[k] dico["brown_" + str(k + 1)] = {'coords': [(x[c], y[c]) for c in range(len(x))], 'color': 'magenta', 'id': k + 1, 'class': 'spe'} name = slidename.split('.')[0] n = 1 if os.path.exists(name + ".annot"): while os.path.exists(name + "_" + str(n) + ".annot"): n += 1 with open(name + "_" + str(n) + ".annot", "wb") as f: pickle.dump(dico, f) else: with open(name + ".annot", "wb") as f: pickle.dump(dico, f)
[ "os.path.exists", "pickle.dump", "numpy.where", "skimage.io.imread", "numpy.zeros", "openslide.OpenSlide" ]
[((294, 314), 'openslide.OpenSlide', 'OpenSlide', (['slidename'], {}), '(slidename)\n', (303, 314), False, 'from openslide import OpenSlide\n'), ((329, 346), 'skimage.io.imread', 'imread', (['probapath'], {}), '(probapath)\n', (335, 346), False, 'from skimage.io import imread\n'), ((424, 452), 'numpy.where', 'numpy.where', (['(probaimage > 84)'], {}), '(probaimage > 84)\n', (435, 452), False, 'import numpy\n'), ((1810, 1841), 'os.path.exists', 'os.path.exists', (["(name + '.annot')"], {}), "(name + '.annot')\n", (1824, 1841), False, 'import os\n'), ((1361, 1397), 'numpy.zeros', 'numpy.zeros', (['(500, 500)', 'numpy.uint8'], {}), '((500, 500), numpy.uint8)\n', (1372, 1397), False, 'import numpy\n'), ((1475, 1493), 'numpy.where', 'numpy.where', (['(c > 0)'], {}), '(c > 0)\n', (1486, 1493), False, 'import numpy\n'), ((1983, 2003), 'pickle.dump', 'pickle.dump', (['dico', 'f'], {}), '(dico, f)\n', (1994, 2003), False, 'import pickle\n'), ((2062, 2082), 'pickle.dump', 'pickle.dump', (['dico', 'f'], {}), '(dico, f)\n', (2073, 2082), False, 'import pickle\n')]
""" Author: <NAME> Construct paper sequence for papers/authors predict_paper. """ import sys import os BASE_PATH = os.path.abspath(os.path.join(os.getcwd())) sys.path.append(BASE_PATH) import pickle import random import numpy as np import config dataset_ids = dict() min_citations = config.min_citations seed = 1234 # load with open(config.p2_cited_citing_lst, 'r') as f: num_data = 0 for line in f: num_data += 1 p_id = line.split(':')[0] p_citing = line.split(':')[1].split(',') if len(p_citing) < min_citations: # filter those papers/authors which # citations are less than 'min_citations' continue p_citing = [int(xovee) for xovee in p_citing] dataset_ids[int(p_id)] = p_citing l = list(dataset_ids.items()) random.seed(seed) random.shuffle(l) dataset_ids = dict(l) with open(config.p2_cited_dict, 'wb') as f: print('Number of valid cascades: {}/{}'.format(len(dataset_ids), num_data)) pickle.dump(dataset_ids, f) def p2a(input, output): paper2authors = dict() with open(input, 'r') as f: for line in f: p_id = line.split(':')[0] p_authors = line.split(':')[1].split(',') p_authors = [int(xovee) for xovee in p_authors] paper2authors[int(p_id)] = p_authors max_authors = 0 for author in paper2authors.values(): if len(author) > max_authors: max_authors = len(author) # with open(output, 'wb') as f: # pickle.dump(paper2authors, f) return paper2authors def p2v(input, output): paper2venue = dict() with open(input, 'r') as f: for line in f: p_id = int(line.split(',')[0]) p_v = int(line.split(',')[1]) paper2venue[p_id] = p_v # with open(output, 'wb') as f: # pickle.dump(paper2venue, f) return paper2venue with open(config.p2_cited_dict, 'rb') as f: cited = pickle.load(f) paper2authors = p2a(config.p_a_lst, config.p_p2a) paper2venue = p2v(config.p_v_lst, config.p_p2v) x_ids = dict() err = 0 for p_id, p_citing in cited.items(): try: x_ids[p_id] = [(p_c, paper2venue[p_c], paper2authors[p_c]) for p_c in p_citing] except KeyError: err += 1 print('# KeyErrors:', err) max_seq = 0 for v in x_ids.values(): if len(v) > max_seq: max_seq = len(v) print('Max # sequence:', max_seq) print('# samples:', len(x_ids)) x = list() x_authors = list() x_idx = list() with open(config.p_emb_dict, 'rb') as f: a_emb, p_emb, v_emb = pickle.load(f) for p_id, p_ids in x_ids.items(): x_idx.append(p_id) temp_x = list() # embeddings of the original paper temp_x.append(np.concatenate([p_emb[p_id], v_emb[paper2venue[p_id]]])) # embeddings of papers who cite original paper for p_citing in p_ids[:config.seq_length - 1]: temp_x.append(np.concatenate([p_emb[p_citing[0]], v_emb[p_citing[1]]])) x.append(temp_x) for p_id, p_ids in x_ids.items(): temp_x_authors = list() # embeddings of the original paper temp_x_authors.append([a_emb[author] for author in paper2authors[p_id][:config.author_length]]) # embeddings of papers who cite original paper for p_citing in p_ids[:config.seq_length - 1]: temp_x_authors.append([a_emb[author] for author in p_citing[2][:config.author_length]]) x_authors.append(temp_x_authors) with open(config.p_x, 'wb') as f: pickle.dump(x, f) with open(config.p_x_authors, 'wb') as f: pickle.dump(x_authors, f) temp_y = dict() y = list() with open(config.p20_cited_citing_lst, 'r') as f: for line in f: line = line.strip() p_id = int(line.split(':')[0]) temp_y[p_id] = int(line.split(':')[1]) for x_id in x_idx: y.append(temp_y[x_id]) with open(config.p_y, 'wb') as f: print('# samples:', len(y)) pickle.dump(y, f)
[ "pickle.dump", "random.shuffle", "pickle.load", "random.seed", "os.getcwd", "numpy.concatenate", "sys.path.append" ]
[((158, 184), 'sys.path.append', 'sys.path.append', (['BASE_PATH'], {}), '(BASE_PATH)\n', (173, 184), False, 'import sys\n'), ((785, 802), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (796, 802), False, 'import random\n'), ((803, 820), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (817, 820), False, 'import random\n'), ((973, 1000), 'pickle.dump', 'pickle.dump', (['dataset_ids', 'f'], {}), '(dataset_ids, f)\n', (984, 1000), False, 'import pickle\n'), ((1937, 1951), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1948, 1951), False, 'import pickle\n'), ((2548, 2562), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2559, 2562), False, 'import pickle\n'), ((3437, 3454), 'pickle.dump', 'pickle.dump', (['x', 'f'], {}), '(x, f)\n', (3448, 3454), False, 'import pickle\n'), ((3502, 3527), 'pickle.dump', 'pickle.dump', (['x_authors', 'f'], {}), '(x_authors, f)\n', (3513, 3527), False, 'import pickle\n'), ((3860, 3877), 'pickle.dump', 'pickle.dump', (['y', 'f'], {}), '(y, f)\n', (3871, 3877), False, 'import pickle\n'), ((144, 155), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (153, 155), False, 'import os\n'), ((2699, 2754), 'numpy.concatenate', 'np.concatenate', (['[p_emb[p_id], v_emb[paper2venue[p_id]]]'], {}), '([p_emb[p_id], v_emb[paper2venue[p_id]]])\n', (2713, 2754), True, 'import numpy as np\n'), ((2880, 2936), 'numpy.concatenate', 'np.concatenate', (['[p_emb[p_citing[0]], v_emb[p_citing[1]]]'], {}), '([p_emb[p_citing[0]], v_emb[p_citing[1]]])\n', (2894, 2936), True, 'import numpy as np\n')]
import numpy as np import pandas as pd from collections import Counter from skmultilearn.model_selection.measures import get_combination_wise_output_matrix from skmultilearn.model_selection import iterative_train_test_split from tqdm import tqdm def get_section(report, section_name, section_status = False): if report.find(section_name+':')!=-1: words = report.split(section_name+':')[1] uppercasecounter = 0 for id, char in enumerate(words): if char.isupper(): uppercasecounter+=1 else: uppercasecounter=0 if uppercasecounter>5: words = words[:(id-uppercasecounter)] section = words.replace('\n', ' ') section_status = True else: section = '' return section, section_status def replace_all(text, dic): for i, j in dic.items(): text = text.replace(i, j) return text def process_sections(sections_text, sections_names): impression = '' indication = '' findings = '' d = {' ': '', '_': '', '\n': '', '\t': ''} if replace_all(sections_text[0],d) != '': impression = 'IMPRESSION: {}'.format(sections_text[0]) for s_id in range(len(sections_text[4:])): if replace_all(sections_text[s_id+4],d)!='': indication += '{}: {} \n'.format(sections_names[s_id+4], sections_text[4+s_id]) findings = 'FINDINGS: {}'.format(sections_text[3]) if impression == '' or indication == '': impression = '' indication = '' return impression, indication, findings def extract_txt_file(path): with open(path, 'r') as file: report = file.read() sections = ['IMPRESSION', 'CONCLUSION','PROVISIONAL FINDINGS IMPRESSION (PFI)', 'FINDINGS','INDICATION', 'HISTORY','TECHNIQUE','STUDY','EXAM'] section_texts = [] section_exists = [] # indication finder for s in sections: text, text_exist = get_section(report, s) section_texts.append(text) section_exists.append(text_exist) # impression postprocessing if not section_exists[0] and section_exists[1]: section_exists[0] = True section_texts[0] = section_texts[1] elif not section_exists[0] and section_exists[2]: section_exists[0] = True section_texts[0] = section_texts[2] # OPTINAL: if no impression present: take findings as impression # if not section_exists[0] and section_exists[3]: # section_exists[0] = True # section_texts[0] = section_texts[3] impression, indication, findings = process_sections(section_texts, sections) return impression, indication, findings def mergeMIMIC(): result = pd.read_csv('physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-chexpert.csv') df = pd.read_csv('physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-metadata.csv') df = df.loc[(df['ViewPosition'] == 'PA') | (df['ViewPosition'] == 'AP')] new_result = pd.DataFrame(columns=np.append(result.columns.values, np.array(['Path', 'Path_compr', 'Indication', 'Impression', 'Findings'])), index=range(df["dicom_id"].values.shape[0])) print(new_result) paths = df["dicom_id"].values.copy() empty = 0 c_nf = 0 for i in tqdm(range(paths.shape[0])): p_compr = 'physionet.org/files/mimic-cxr-jpg/compressed_images224/files/' + 'p{}/p{}/s{}/'.format( str(df['subject_id'].values[i])[:2], df['subject_id'].values[i], df['study_id'].values[i]) + paths[ i] + '.jpg' p_txt = 'physionet.org/files/mimic-cxr/2.0.0/files/' + 'p{}/p{}/s{}.txt'.format( str(df['subject_id'].values[i])[:2], df['subject_id'].values[i], df['study_id'].values[i]) p = 'physionet.org/files/mimic-cxr-jpg/2.0.0/files/'+'p{}/p{}/s{}/'.format(str(df['subject_id'].values[i])[:2], df['subject_id'].values[i],df['study_id'].values[i])+paths[i]+'.jpg' result_index = result.index[(result['subject_id'] == df['subject_id'].values[i]) & (result['study_id'] == df['study_id'].values[i])] impression, indication, findings = extract_txt_file(p_txt) try: if impression != '': class_values = result.loc[result_index].values[0] class_values = np.nan_to_num(class_values) class_values = np.where(class_values==-1.0, 0.0, class_values) class_values = np.where(class_values == -9.0, 0.0, class_values) if np.count_nonzero(class_values[2:])==0: class_values[10] = 1.0 input = list(class_values) + [p, p_compr, indication, impression, findings] new_result.iloc[i] = input c_nf+=1 except: print(input) print("SHITSHIT") empty+=1 print("empty: {}".format(empty)) print(c_nf) new_result.to_csv("physionet.org/files/mimic-cxr-jpg/2.0.0/total_multi_mimic_0706_textgen.csv") def stratify(): df = pd.read_csv("physionet.org/files/mimic-cxr-jpg/2.0.0/improved_multi_mimic_0709_text_gen.csv", usecols=['Path_compr','Indication', 'Impression', 'Findings', 'No Finding', 'Enlarged ' 'Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity','Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']) totalX = df[['Path_compr','Indication', 'Impression', 'Findings']].values totalY = df[['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis','Pneumothorax', 'Pleural Effusion', 'Pleural Other','Fracture', 'Support Devices']].values print(totalX.shape) print(totalY.shape) totalX = np.expand_dims(totalX, axis=1) print("PRE ITERATIVE") X_train, y_train, X_test, y_test = iterative_train_test_split(totalX, totalY, 0.2) print("COMBINATION") df = pd.DataFrame({ 'train': Counter( str(combination) for row in get_combination_wise_output_matrix(y_train, order=2) for combination in row), 'test': Counter( str(combination) for row in get_combination_wise_output_matrix(y_test, order=2) for combination in row) }).T.fillna(0.0) print(df.to_string()) X_train = np.squeeze(X_train, axis=1) X_test = np.squeeze(X_test, axis=1) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) print("WRITING Train") dfTotal2 = pd.DataFrame(columns=['Path_compr','Indication', 'Impression', 'Findings', 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis','Pneumothorax', 'Pleural Effusion', 'Pleural Other','Fracture', 'Support Devices']) print(dfTotal2.shape) dfTotal2[['Path_compr','Indication', 'Impression', 'Findings']] = pd.DataFrame(X_train) dfTotal2[['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis','Pneumothorax', 'Pleural Effusion', 'Pleural Other','Fracture', 'Support Devices']] = y_train with open("physionet.org/files/mimic-cxr-jpg/2.0.0/train_multi2_v3.csv", mode='w', newline='\n') as f: dfTotal2.to_csv(f, sep=",", float_format='%.2f', index=False, line_terminator='\n', encoding='utf-8') print("WRITING Test") dfTotal2 = pd.DataFrame(columns=['Path_compr','Indication', 'Impression', 'Findings', 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis','Pneumothorax', 'Pleural Effusion', 'Pleural Other','Fracture', 'Support Devices']) dfTotal2[['Path_compr','Indication', 'Impression', 'Findings']] = pd.DataFrame(X_test) dfTotal2[['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis','Pneumothorax', 'Pleural Effusion', 'Pleural Other','Fracture', 'Support Devices']] = y_test with open("physionet.org/files/mimic-cxr-jpg/2.0.0/test_multi_v3.csv", mode='w', newline='\n') as f: dfTotal2.to_csv(f, sep=",", float_format='%.2f', index=False, line_terminator='\n', encoding='utf-8') def stratify_val(): df = pd.read_csv("physionet.org/files/mimic-cxr-jpg/2.0.0/train_multi2_v3.csv", usecols=['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding', 'Enlarged ' 'Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']) totalX = df[['Path_compr', 'Indication', 'Impression', 'Findings']].values totalY = df[['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']].values print(totalX.shape) print(totalY.shape) totalX = np.expand_dims(totalX, axis=1) print("PRE ITERATIVE") X_train, y_train, X_test, y_test = iterative_train_test_split(totalX, totalY, 0.2) print("COMBINATION") df = pd.DataFrame({ 'train': Counter( str(combination) for row in get_combination_wise_output_matrix(y_train, order=2) for combination in row), 'test': Counter( str(combination) for row in get_combination_wise_output_matrix(y_test, order=2) for combination in row) }).T.fillna(0.0) print(df.to_string()) X_train = np.squeeze(X_train, axis=1) X_test = np.squeeze(X_test, axis=1) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) print("WRITING Train") dfTotal2 = pd.DataFrame( columns=['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']) print(dfTotal2.shape) dfTotal2[['Path_compr', 'Indication', 'Impression', 'Findings']] = pd.DataFrame(X_train) dfTotal2[['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']] = y_train with open("physionet.org/files/mimic-cxr-jpg/2.0.0/train_multi_v3.csv", mode='w', newline='\n') as f: dfTotal2.to_csv(f, sep=",", float_format='%.2f', index=False, line_terminator='\n', encoding='utf-8') print("WRITING Test") dfTotal2 = pd.DataFrame(columns=['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']) dfTotal2[['Path_compr', 'Indication', 'Impression', 'Findings']] = pd.DataFrame(X_test) dfTotal2[['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture', 'Support Devices']] = y_test with open("physionet.org/files/mimic-cxr-jpg/2.0.0/val_multi_v3.csv", mode='w', newline='\n') as f: dfTotal2.to_csv(f, sep=",", float_format='%.2f', index=False, line_terminator='\n', encoding='utf-8') if __name__ == '__main__': mergeMIMIC() stratify() stratify_val()
[ "pandas.read_csv", "numpy.where", "numpy.squeeze", "numpy.count_nonzero", "numpy.array", "skmultilearn.model_selection.measures.get_combination_wise_output_matrix", "skmultilearn.model_selection.iterative_train_test_split", "numpy.expand_dims", "pandas.DataFrame", "numpy.nan_to_num" ]
[((2700, 2788), 'pandas.read_csv', 'pd.read_csv', (['"""physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-chexpert.csv"""'], {}), "(\n 'physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-chexpert.csv')\n", (2711, 2788), True, 'import pandas as pd\n'), ((2793, 2881), 'pandas.read_csv', 'pd.read_csv', (['"""physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-metadata.csv"""'], {}), "(\n 'physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-metadata.csv')\n", (2804, 2881), True, 'import pandas as pd\n'), ((4997, 5409), 'pandas.read_csv', 'pd.read_csv', (['"""physionet.org/files/mimic-cxr-jpg/2.0.0/improved_multi_mimic_0709_text_gen.csv"""'], {'usecols': "['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding',\n 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices']"}), "(\n 'physionet.org/files/mimic-cxr-jpg/2.0.0/improved_multi_mimic_0709_text_gen.csv'\n , usecols=['Path_compr', 'Indication', 'Impression', 'Findings',\n 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly',\n 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia',\n 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other',\n 'Fracture', 'Support Devices'])\n", (5008, 5409), True, 'import pandas as pd\n'), ((6062, 6092), 'numpy.expand_dims', 'np.expand_dims', (['totalX'], {'axis': '(1)'}), '(totalX, axis=1)\n', (6076, 6092), True, 'import numpy as np\n'), ((6160, 6207), 'skmultilearn.model_selection.iterative_train_test_split', 'iterative_train_test_split', (['totalX', 'totalY', '(0.2)'], {}), '(totalX, totalY, 0.2)\n', (6186, 6207), False, 'from skmultilearn.model_selection import iterative_train_test_split\n'), ((6641, 6668), 'numpy.squeeze', 'np.squeeze', (['X_train'], {'axis': '(1)'}), '(X_train, axis=1)\n', (6651, 6668), True, 'import numpy as np\n'), ((6682, 6708), 'numpy.squeeze', 'np.squeeze', (['X_test'], {'axis': '(1)'}), '(X_test, axis=1)\n', (6692, 6708), True, 'import numpy as np\n'), ((6853, 7174), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding',\n 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices']"}), "(columns=['Path_compr', 'Indication', 'Impression', 'Findings',\n 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly',\n 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia',\n 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other',\n 'Fracture', 'Support Devices'])\n", (6865, 7174), True, 'import pandas as pd\n'), ((7286, 7307), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {}), '(X_train)\n', (7298, 7307), True, 'import pandas as pd\n'), ((7876, 8197), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding',\n 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices']"}), "(columns=['Path_compr', 'Indication', 'Impression', 'Findings',\n 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly',\n 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia',\n 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other',\n 'Fracture', 'Support Devices'])\n", (7888, 8197), True, 'import pandas as pd\n'), ((8320, 8340), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {}), '(X_test)\n', (8332, 8340), True, 'import pandas as pd\n'), ((8890, 9277), 'pandas.read_csv', 'pd.read_csv', (['"""physionet.org/files/mimic-cxr-jpg/2.0.0/train_multi2_v3.csv"""'], {'usecols': "['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding',\n 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices']"}), "('physionet.org/files/mimic-cxr-jpg/2.0.0/train_multi2_v3.csv',\n usecols=['Path_compr', 'Indication', 'Impression', 'Findings',\n 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly',\n 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia',\n 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other',\n 'Fracture', 'Support Devices'])\n", (8901, 9277), True, 'import pandas as pd\n'), ((10340, 10370), 'numpy.expand_dims', 'np.expand_dims', (['totalX'], {'axis': '(1)'}), '(totalX, axis=1)\n', (10354, 10370), True, 'import numpy as np\n'), ((10438, 10485), 'skmultilearn.model_selection.iterative_train_test_split', 'iterative_train_test_split', (['totalX', 'totalY', '(0.2)'], {}), '(totalX, totalY, 0.2)\n', (10464, 10485), False, 'from skmultilearn.model_selection import iterative_train_test_split\n'), ((10919, 10946), 'numpy.squeeze', 'np.squeeze', (['X_train'], {'axis': '(1)'}), '(X_train, axis=1)\n', (10929, 10946), True, 'import numpy as np\n'), ((10960, 10986), 'numpy.squeeze', 'np.squeeze', (['X_test'], {'axis': '(1)'}), '(X_test, axis=1)\n', (10970, 10986), True, 'import numpy as np\n'), ((11130, 11451), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding',\n 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices']"}), "(columns=['Path_compr', 'Indication', 'Impression', 'Findings',\n 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly',\n 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia',\n 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other',\n 'Fracture', 'Support Devices'])\n", (11142, 11451), True, 'import pandas as pd\n'), ((11576, 11597), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {}), '(X_train)\n', (11588, 11597), True, 'import pandas as pd\n'), ((12164, 12485), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Path_compr', 'Indication', 'Impression', 'Findings', 'No Finding',\n 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices']"}), "(columns=['Path_compr', 'Indication', 'Impression', 'Findings',\n 'No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly',\n 'Lung Opacity', 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia',\n 'Atelectasis', 'Pneumothorax', 'Pleural Effusion', 'Pleural Other',\n 'Fracture', 'Support Devices'])\n", (12176, 12485), True, 'import pandas as pd\n'), ((12689, 12709), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {}), '(X_test)\n', (12701, 12709), True, 'import pandas as pd\n'), ((3025, 3097), 'numpy.array', 'np.array', (["['Path', 'Path_compr', 'Indication', 'Impression', 'Findings']"], {}), "(['Path', 'Path_compr', 'Indication', 'Impression', 'Findings'])\n", (3033, 3097), True, 'import numpy as np\n'), ((4264, 4291), 'numpy.nan_to_num', 'np.nan_to_num', (['class_values'], {}), '(class_values)\n', (4277, 4291), True, 'import numpy as np\n'), ((4323, 4372), 'numpy.where', 'np.where', (['(class_values == -1.0)', '(0.0)', 'class_values'], {}), '(class_values == -1.0, 0.0, class_values)\n', (4331, 4372), True, 'import numpy as np\n'), ((4402, 4451), 'numpy.where', 'np.where', (['(class_values == -9.0)', '(0.0)', 'class_values'], {}), '(class_values == -9.0, 0.0, class_values)\n', (4410, 4451), True, 'import numpy as np\n'), ((4472, 4506), 'numpy.count_nonzero', 'np.count_nonzero', (['class_values[2:]'], {}), '(class_values[2:])\n', (4488, 4506), True, 'import numpy as np\n'), ((6324, 6376), 'skmultilearn.model_selection.measures.get_combination_wise_output_matrix', 'get_combination_wise_output_matrix', (['y_train'], {'order': '(2)'}), '(y_train, order=2)\n', (6358, 6376), False, 'from skmultilearn.model_selection.measures import get_combination_wise_output_matrix\n'), ((6491, 6542), 'skmultilearn.model_selection.measures.get_combination_wise_output_matrix', 'get_combination_wise_output_matrix', (['y_test'], {'order': '(2)'}), '(y_test, order=2)\n', (6525, 6542), False, 'from skmultilearn.model_selection.measures import get_combination_wise_output_matrix\n'), ((10602, 10654), 'skmultilearn.model_selection.measures.get_combination_wise_output_matrix', 'get_combination_wise_output_matrix', (['y_train'], {'order': '(2)'}), '(y_train, order=2)\n', (10636, 10654), False, 'from skmultilearn.model_selection.measures import get_combination_wise_output_matrix\n'), ((10769, 10820), 'skmultilearn.model_selection.measures.get_combination_wise_output_matrix', 'get_combination_wise_output_matrix', (['y_test'], {'order': '(2)'}), '(y_test, order=2)\n', (10803, 10820), False, 'from skmultilearn.model_selection.measures import get_combination_wise_output_matrix\n')]
import torch import torch.nn as nn import torch.sparse from torch.autograd import Variable import numpy as np import sys from torch.autograd import Function import math import h5py import json # from . import resnet1 import matplotlib.pyplot as plt from skimage.transform import resize ############################################################################### # Functions ############################################################################### def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm2d') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def get_norm_layer(norm_type): if norm_type == 'batch': norm_layer = nn.BatchNorm2d elif norm_type == 'instance': norm_layer = nn.InstanceNorm2d else: print('normalization layer [%s] is not found' % norm) return norm_layer def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]): netG = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netG == 'resnet_9blocks': netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids) elif which_model_netG == 'resnet_6blocks': netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids) elif which_model_netG == 'unet_128': netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) elif which_model_netG == 'unet_256': # netG = SingleUnetGenerator_S(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) # netG = SingleUnetGenerator_R(input_nc, output_nc, 7, ngf, norm_layer=nn.BatchNorm2d, use_dropout=use_dropout, gpu_ids=gpu_ids, ) output_nc = 3 # netG2 = SingleUnetGenerator_R2(input_nc, output_nc, 7, ngf, norm_layer=nn.BatchNorm2d, use_dropout=use_dropout, gpu_ids=gpu_ids) # netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) netG = MultiUnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) # output_nc_R = 3 # netR = SingleUnetGenerator_R(input_nc, output_nc_R, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) # output_nc_L = 3 # netL = SingleUnetGenerator_R(input_nc, output_nc_L, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) else: print('Generator model name [%s] is not recognized' % which_model_netG) if len(gpu_ids) > 0: netG.cuda(gpu_ids[0]) # netR.cuda(gpu_ids[0]) # netL.cuda(gpu_ids[0]) netG.apply(weights_init) # netR.apply(weights_init) # netL.apply(weights_init) return netG def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[]): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) else: print('Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(device_id=gpu_ids[0]) netD.apply(weights_init) return netD def print_network(net): num_params = 0 for param in net.parameters(): num_params += param.numel() print(net) print('Total number of parameters: %d' % num_params) ############################################################################## # Classes ############################################################################## class Sparse(Function): # Sparse matrix for S def forward(self, input, S): self.save_for_backward(S) output = torch.mm(S, input) # output = output.cuda() return output # This function has only a single output, so it gets only one gradient def backward(self, grad_output): S, = self.saved_tensors grad_weight = None grad_input = torch.mm(S.t(), grad_output) # grad_input = grad_input.cuda() return grad_input, grad_weight class JointLoss(nn.Module): def __init__(self): super(JointLoss, self).__init__() self.w_ss_local = 2.0 self.w_SAW = 1.0 self.w_rs_local = 1.0 self.w_reconstr = 2.0 self.w_reconstr_real = 2.0 self.w_rs_dense = 2.0 self.w_ls = 2.0 self.w_ss_dense = 4.0 self.w_sp = 0.25 self.w_IIW = 4.0 self.w_feature = 0.75 self.w_grad = 0.25 self.local_s_w = np.array([[0.5, 0.5, 0.5, 0.5, 0.5], \ [0.5, 1 , 1 , 1, 0.5],\ [0.5, 1, 1, 1, 0.5],\ [0.5, 1, 1, 1, 0.5],\ [0.5, 0.5, 0.5, 0.5, 0.5]]) x = np.arange(-1, 2) y = np.arange(-1, 2) self.X, self.Y = np.meshgrid(x, y) # self.h_offset = [0,0,0,1,1,2,2,2,1] # self.w_offset = [0,1,2,0,2,0,1,2,1] self.total_loss = None self.running_stage = 0 def BilateralRefSmoothnessLoss(self, pred_R, targets, att, num_features): # pred_R = pred_R.cpu() total_loss = Variable(torch.cuda.FloatTensor(1)) total_loss[0] = 0 N = pred_R.size(2) * pred_R.size(3) Z = (pred_R.size(1) * N ) # grad_input = torch.FloatTensor(pred_R.size()) # grad_input = grad_input.zero_() for i in range(pred_R.size(0)): # for each image B_mat = targets[att+'B_list'][i] # still list of blur sparse matrices S_mat = Variable(targets[att + 'S'][i].cuda(), requires_grad = False) # Splat and Slicing matrix n_vec = Variable(targets[att + 'N'][i].cuda(), requires_grad = False) # bi-stochatistic vector, which is diagonal matrix p = pred_R[i,:,:,:].view(pred_R.size(1),-1).t() # NX3 # p'p # p_norm = torch.mm(p.t(), p) # p_norm_sum = torch.trace(p_norm) p_norm_sum = torch.sum(torch.mul(p,p)) # S * N * p Snp = torch.mul(n_vec.repeat(1,pred_R.size(1)), p) sp_mm = Sparse() Snp = sp_mm(Snp, S_mat) Snp_1 = Snp.clone() Snp_2 = Snp.clone() # # blur for f in range(num_features+1): B_var1 = Variable(B_mat[f].cuda(), requires_grad = False) sp_mm1 = Sparse() Snp_1 = sp_mm1(Snp_1, B_var1) B_var2 = Variable(B_mat[num_features-f].cuda(), requires_grad = False) sp_mm2 = Sparse() Snp_2 = sp_mm2(Snp_2, B_var2) Snp_12 = Snp_1 + Snp_2 pAp = torch.sum(torch.mul(Snp, Snp_12)) total_loss = total_loss + ((p_norm_sum - pAp)/Z) total_loss = total_loss/pred_R.size(0) # average over all images return total_loss def SUNCGReconstLoss(self, R, S, mask, targets): rgb_img = Variable(targets['rgb_img'].cuda(), requires_grad = False) S = S.repeat(1,3,1,1) chromaticity = Variable(targets['chromaticity'].cuda(), requires_grad = False) R = torch.mul(chromaticity, R.repeat(1,3,1,1)) return torch.mean( torch.pow(torch.mul(mask, rgb_img - torch.mul(R, S)), 2) ) def IIWReconstLoss(self, R, S, targets): S = S.repeat(1,3,1,1) rgb_img = Variable(targets['rgb_img'].cuda(), requires_grad = False) # 1 channel chromaticity = Variable(targets['chromaticity'].cuda(), requires_grad = False) p_R = torch.mul(chromaticity, R.repeat(1,3,1,1)) # return torch.mean( torch.mul(L, torch.pow( torch.log(rgb_img) - torch.log(p_R) - torch.log(S), 2))) return torch.mean( torch.pow( rgb_img - torch.mul(p_R, S), 2)) def Ranking_Loss(self, prediction_R, judgements, is_flip): #ranking loss for each prediction feature tau = 0.25 #abs(I1 - I2)) ) #1.2 * (1 + math.fabs(math.log(I1) - math.log(I2) ) ) points = judgements['intrinsic_points'] comparisons = judgements['intrinsic_comparisons'] id_to_points = {p['id']: p for p in points} rows = prediction_R.size(1) cols = prediction_R.size(2) num_valid_comparisons = 0 num_valid_comparisons_ineq =0 num_valid_comparisons_eq = 0 total_loss_eq = Variable(torch.cuda.FloatTensor(1)) total_loss_eq[0] = 0 total_loss_ineq = Variable(torch.cuda.FloatTensor(1)) total_loss_ineq[0] = 0 for c in comparisons: # "darker" is "J_i" in our paper darker = c['darker'] if darker not in ('1', '2', 'E'): continue # "darker_score" is "w_i" in our paper # remove unconfident point weight = c['darker_score'] if weight < 0.5 or weight is None: continue point1 = id_to_points[c['point1']] point2 = id_to_points[c['point2']] if not point1['opaque'] or not point2['opaque']: continue # if is_flip: # l1 = prediction_R[:, int(point1['y'] * rows), cols - 1 - int( point1['x'] * cols)] # l2 = prediction_R[:, int(point2['y'] * rows), cols - 1 - int( point2['x'] * cols)] # else: l1 = prediction_R[:, int(point1['y'] * rows), int(point1['x'] * cols)] l2 = prediction_R[:, int(point2['y'] * rows), int(point2['x'] * cols)] l1_m = l1 #torch.mean(l1) l2_m = l2 #torch.mean(l2) # print(int(point1['y'] * rows), int(point1['x'] * cols), int(point2['y'] * rows), int(point2['x'] * cols), darker) # print(point1['y'], point1['x'], point2['y'], point2['x'], c['point1'], c['point2']) # print("===============================================================") # l2 > l1, l2 is brighter # if darker == '1' and ((l1_m.data[0] / l2_m.data[0]) > 1.0/tau): # # loss =0 # loss = weight * torch.mean((tau - (l2_m / l1_m))) # num_valid_comparisons += 1 # # l1 > l2, l1 is brighter # elif darker == '2' and ((l2_m.data[0] / l1_m.data[0]) > 1.0/tau): # # loss =0 # loss = weight * torch.mean((tau - (l1_m / l2_m))) # num_valid_comparisons += 1 # # is equal # elif darker == 'E': # loss = weight * torch.mean(torch.abs(l2 - l1)) # num_valid_comparisons += 1 # else: # loss = 0.0 # l2 is brighter if darker == '1' and ((l1_m.data[0] - l2_m.data[0]) > - tau): # print("dark 1", l1_m.data[0] - l2_m.data[0]) total_loss_ineq += weight * torch.mean( torch.pow( tau - (l2_m - l1_m), 2) ) num_valid_comparisons_ineq += 1. # print("darker 1 loss", l2_m.data[0], l1_m.data[0], loss.data[0]) # l1 > l2, l1 is brighter elif darker == '2' and ((l2_m.data[0] - l1_m.data[0]) > - tau): # print("dark 2", l2_m.data[0] - l1_m.data[0]) total_loss_ineq += weight * torch.mean( torch.pow( tau - (l1_m - l2_m),2) ) num_valid_comparisons_ineq += 1. # print("darker 2 loss", l2_m.data[0], l1_m.data[0], loss.data[0]) elif darker == 'E': total_loss_eq += weight * torch.mean( torch.pow(l2 - l1,2) ) num_valid_comparisons_eq += 1. else: loss = 0.0 total_loss = total_loss_ineq + total_loss_eq num_valid_comparisons = num_valid_comparisons_eq + num_valid_comparisons_ineq # print("average eq loss", total_loss_eq.data[0]/(num_valid_comparisons_eq + 1e-6)) # print("average ineq loss", total_loss_ineq.data[0]/(num_valid_comparisons_ineq + 1e-6)) return total_loss/(num_valid_comparisons + 1e-6) def BatchRankingLoss(self, prediction_R, judgements_eq, judgements_ineq, random_filp): eq_loss, ineq_loss = 0, 0 num_valid_eq = 0 num_valid_ineq = 0 tau = 0.425 rows = prediction_R.size(1) cols = prediction_R.size(2) num_channel = prediction_R.size(0) # evaluate equality annotations densely if judgements_eq.size(1) > 2: judgements_eq = judgements_eq.cuda() R_vec = prediction_R.view(num_channel, -1) # R_vec = torch.exp(R_vec) # I_vec = I.view(1, -1) y_1 = torch.floor(judgements_eq[:,0] * rows).long() y_2 = torch.floor(judgements_eq[:,2] * rows).long() if random_filp: x_1 = cols - 1 - torch.floor(judgements_eq[:,1] * cols).long() x_2 = cols - 1 - torch.floor(judgements_eq[:,3] * cols).long() else: x_1 = torch.floor(judgements_eq[:,1] * cols).long() x_2 = torch.floor(judgements_eq[:,3] * cols).long() # compute linear index for point 1 # y_1 = torch.floor(judgements_eq[:,0] * rows).long() # x_1 = torch.floor(judgements_eq[:,1] * cols).long() point_1_idx_linaer = y_1 * cols + x_1 # compute linear index for point 2 # y_2 = torch.floor(judgements_eq[:,2] * rows).long() # x_2 = torch.floor(judgements_eq[:,3] * cols).long() point_2_idx_linear = y_2 * cols + x_2 # extract all pairs of comparisions points_1_vec = torch.index_select(R_vec, 1, Variable(point_1_idx_linaer, requires_grad = False)) points_2_vec = torch.index_select(R_vec, 1, Variable(point_2_idx_linear, requires_grad = False)) # I1_vec = torch.index_select(I_vec, 1, point_1_idx_linaer) # I2_vec = torch.index_select(I_vec, 1, point_2_idx_linear) weight = Variable(judgements_eq[:,4], requires_grad = False) # weight = confidence#* torch.exp(4.0 * torch.abs(I1_vec - I2_vec) ) # compute loss # eq_loss = torch.sum(torch.mul(weight, torch.mean(torch.abs(points_1_vec - points_2_vec),0) )) eq_loss = torch.sum(torch.mul(weight, torch.mean(torch.pow(points_1_vec - points_2_vec,2),0) )) num_valid_eq += judgements_eq.size(0) # compute inequality annotations if judgements_ineq.size(1) > 2: judgements_ineq = judgements_ineq.cuda() R_intensity = torch.mean(prediction_R, 0) # R_intensity = torch.log(R_intensity) R_vec_mean = R_intensity.view(1, -1) y_1 = torch.floor(judgements_ineq[:,0] * rows).long() y_2 = torch.floor(judgements_ineq[:,2] * rows).long() # x_1 = torch.floor(judgements_ineq[:,1] * cols).long() # x_2 = torch.floor(judgements_ineq[:,3] * cols).long() if random_filp: x_1 = cols - 1 - torch.floor(judgements_ineq[:,1] * cols).long() x_2 = cols - 1 - torch.floor(judgements_ineq[:,3] * cols).long() else: x_1 = torch.floor(judgements_ineq[:,1] * cols).long() x_2 = torch.floor(judgements_ineq[:,3] * cols).long() # y_1 = torch.floor(judgements_ineq[:,0] * rows).long() # x_1 = torch.floor(judgements_ineq[:,1] * cols).long() point_1_idx_linaer = y_1 * cols + x_1 # y_2 = torch.floor(judgements_ineq[:,2] * rows).long() # x_2 = torch.floor(judgements_ineq[:,3] * cols).long() point_2_idx_linear = y_2 * cols + x_2 # extract all pairs of comparisions points_1_vec = torch.index_select(R_vec_mean, 1, Variable(point_1_idx_linaer, requires_grad = False)).squeeze(0) points_2_vec = torch.index_select(R_vec_mean, 1, Variable(point_2_idx_linear, requires_grad = False)).squeeze(0) weight = Variable(judgements_ineq[:,4], requires_grad = False) # point 2 should be always darker than (<) point 1 # compute loss relu_layer = nn.ReLU(True) # ineq_loss = torch.sum(torch.mul(weight, relu_layer(points_2_vec - points_1_vec + tau) ) ) ineq_loss = torch.sum(torch.mul(weight, torch.pow( relu_layer(points_2_vec - points_1_vec + tau),2) ) ) # ineq_loss = torch.sum(torch.mul(weight, torch.pow(relu_layer(tau - points_1_vec/points_2_vec),2))) num_included = torch.sum( torch.ge(points_2_vec.data - points_1_vec.data, -tau).float().cuda() ) # num_included = torch.sum(torch.ge(points_2_vec.data/points_1_vec.data, 1./tau).float().cuda()) num_valid_ineq += num_included # avoid divide by zero return eq_loss/(num_valid_eq + 1e-8) + ineq_loss/(num_valid_ineq + 1e-8) def ShadingPenaltyLoss(self, S): return torch.mean(torch.pow(S - 0.5,2) ) # return torch.sum( torch.mul(sky_mask, torch.abs(S - np.log(0.5))/num_val_pixels )) def AngleLoss(self, prediction_n, targets): mask = Variable(targets['mask'].cuda(), requires_grad = False) normal = Variable(targets['normal'].cuda(), requires_grad = False) num_valid = torch.sum(mask[:,0,:,:]) # compute dot product angle_loss = - torch.sum( torch.mul(mask, torch.mul(prediction_n, normal)), 1) return 1 + torch.sum(angle_loss)/num_valid def GradientLoss(self, prediction_n, mask, gt_n): N = torch.sum(mask) # horizontal angle difference h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:]) h_gradient = prediction_n[:,:,:,0:-2] - prediction_n[:,:,:,2:] h_gradient_gt = gt_n[:,:,:,0:-2] - gt_n[:,:,:,2:] h_gradient_loss = torch.mul(h_mask, torch.abs(h_gradient - h_gradient_gt)) # Vertical angle difference v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:]) v_gradient = prediction_n[:,:,0:-2,:] - prediction_n[:,:,2:,:] v_gradient_gt = gt_n[:,:,0:-2,:] - gt_n[:,:,2:,:] v_gradient_loss = torch.mul(v_mask, torch.abs(v_gradient - v_gradient_gt)) gradient_loss = torch.sum(h_gradient_loss) + torch.sum(v_gradient_loss) gradient_loss = gradient_loss/(N*2.0) return gradient_loss def SmoothLoss(self, prediction_n, mask): N = torch.sum(mask[:,0,:,:]) # horizontal angle difference h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:]) h_gradient = torch.sum( torch.mul(h_mask, torch.mul(prediction_n[:,:,:,0:-2], prediction_n[:,:,:,2:])), 1) h_gradient_loss = 1 - torch.sum(h_gradient)/N # Vertical angle difference v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:]) v_gradient = torch.sum( torch.mul(v_mask, torch.mul(prediction_n[:,:,0:-2,:], prediction_n[:,:,2:,:])), 1) v_gradient_loss = 1 - torch.sum(v_gradient)/N gradient_loss = h_gradient_loss + v_gradient_loss return gradient_loss def UncertaintyLoss(self, prediction_n, uncertainty, targets): uncertainty = torch.squeeze(uncertainty, 1) mask = Variable(targets['mask'].cuda(), requires_grad = False) normal = Variable(targets['normal'].cuda(), requires_grad = False) num_valid = torch.sum(mask[:,0,:,:]) angle_diff = ( torch.sum( torch.mul(prediction_n, normal), 1) + 1.0) * 0.5 uncertainty_loss = torch.sum( torch.mul(mask[:,0,:,:], torch.pow(uncertainty - angle_diff, 2) ) ) return uncertainty_loss/num_valid def MaskLocalSmoothenessLoss(self, R, M, targets): h = R.size(2) w = R.size(3) num_c = R.size(1) half_window_size = 1 total_loss = Variable(torch.cuda.FloatTensor(1)) total_loss[0] = 0 mask_center = M[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \ half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size]] R_center = R[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \ half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size] ] c_idx = 0 for k in range(0,half_window_size*2+1): for l in range(0,half_window_size*2+1): # albedo_weights = Variable(targets["r_w_s"+str(scale_idx)][:,c_idx,:,:].unsqueeze(1).repeat(1,num_c,1,1).float().cuda(), requires_grad = False) R_N = R[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ] mask_N = M[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ] composed_M = torch.mul(mask_N, mask_center) # albedo_weights = torch.mul(albedo_weights, composed_M) r_diff = torch.mul( composed_M, torch.pow(R_center - R_N,2) ) total_loss = total_loss + torch.mean(r_diff) c_idx = c_idx + 1 return total_loss/(8.0 * num_c) def LocalAlebdoSmoothenessLoss(self, R, targets, scale_idx): h = R.size(2) w = R.size(3) num_c = R.size(1) half_window_size = 1 total_loss = Variable(torch.cuda.FloatTensor(1)) total_loss[0] = 0 R_center = R[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \ half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size] ] c_idx = 0 for k in range(0,half_window_size*2+1): for l in range(0,half_window_size*2+1): albedo_weights = targets["r_w_s"+str(scale_idx)][:,c_idx,:,:].unsqueeze(1).repeat(1,num_c,1,1).float().cuda() R_N = R[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ] # mask_N = M[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ] # composed_M = torch.mul(mask_N, mask_center) # albedo_weights = torch.mul(albedo_weights, composed_M) r_diff = torch.mul( Variable(albedo_weights, requires_grad = False), torch.abs(R_center - R_N) ) total_loss = total_loss + torch.mean(r_diff) c_idx = c_idx + 1 return total_loss/(8.0 * num_c) def Data_Loss(self, log_prediction, mask, log_gt): N = torch.sum(mask) log_diff = log_prediction - log_gt log_diff = torch.mul(log_diff, mask) s1 = torch.sum( torch.pow(log_diff,2) )/N s2 = torch.pow(torch.sum(log_diff),2)/(N*N) data_loss = s1 - s2 return data_loss def L2GradientMatchingLoss(self, log_prediction, mask, log_gt): N = torch.sum(mask) log_diff = log_prediction - log_gt log_diff = torch.mul(log_diff, mask) v_gradient = torch.pow(log_diff[:,:,0:-2,:] - log_diff[:,:,2:,:],2) v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:]) v_gradient = torch.mul(v_gradient, v_mask) h_gradient = torch.pow(log_diff[:,:,:,0:-2] - log_diff[:,:,:,2:],2) h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:]) h_gradient = torch.mul(h_gradient, h_mask) gradient_loss = (torch.sum(h_gradient) + torch.sum(v_gradient)) gradient_loss = gradient_loss/N return gradient_loss def L1GradientMatchingLoss(self, log_prediction, mask, log_gt): N = torch.sum( mask ) log_diff = log_prediction - log_gt log_diff = torch.mul(log_diff, mask) v_gradient = torch.abs(log_diff[:,:,0:-2,:] - log_diff[:,:,2:,:]) v_mask = torch.mul(mask[:,:,0:-2,:], mask[:,:,2:,:]) v_gradient = torch.mul(v_gradient, v_mask) h_gradient = torch.abs(log_diff[:,:,:,0:-2] - log_diff[:,:,:,2:]) h_mask = torch.mul(mask[:,:,:,0:-2], mask[:,:,:,2:]) h_gradient = torch.mul(h_gradient, h_mask) gradient_loss = (torch.sum(h_gradient) + torch.sum(v_gradient))/2.0 gradient_loss = gradient_loss/N return gradient_loss def L1Loss(self, prediction_n, mask, gt): num_valid = torch.sum( mask ) diff = torch.mul(mask, torch.abs(prediction_n - gt)) return torch.sum(diff)/num_valid def L2Loss(self, prediction_n, mask, gt): num_valid = torch.sum( mask ) diff = torch.mul(mask, torch.pow(prediction_n - gt,2)) return torch.sum(diff)/num_valid def HuberLoss(self, prediction, mask, gt): tau = 1.0 num_valid = torch.sum(mask) diff_L1 = torch.abs(prediction - gt) diff_L2 = torch.pow(prediction - gt ,2) mask_L2 = torch.le(diff_L1, tau).float().cuda() mask_L1 = 1.0 - mask_L2 L2_loss = 0.5 * torch.sum(torch.mul(mask, torch.mul(mask_L2, diff_L2))) L1_loss = torch.sum(torch.mul(mask, torch.mul(mask_L1, diff_L1))) - 0.5 final_loss = (L2_loss + L1_loss)/num_valid return final_loss # def DirectFramework(self, input_images, prediction_R, prediction_S, targets, epoch): # # downsample all the images # prediction_R_1 = prediction_R[:,:,::2,::2] # prediction_R_2 = prediction_R_1[:,:,::2,::2] # prediction_R_3 = prediction_R_2[:,:,::2,::2] # mask_0 = Variable(targets['mask'].cuda(), requires_grad = False) # mask_0 = mask_0[:,0,:,:].unsqueeze(1) # mask_1 = mask_0[:,:,::2,::2] # mask_2 = mask_1[:,:,::2,::2] # mask_3 = mask_2[:,:,::2,::2] # R_gt_0 = Variable(targets['gt_R'].cuda(), requires_grad = False) # R_gt_1 = R_gt_0[:,:,::2,::2] # R_gt_2 = R_gt_1[:,:,::2,::2] # R_gt_3 = R_gt_2[:,:,::2,::2] # S_gt_0 = Variable(targets['gt_S'].cuda(), requires_grad = False) # S_gt_1 = S_gt_0[:,:,::2,::2] # S_gt_2 = S_gt_1[:,:,::2,::2] # S_gt_3 = S_gt_2[:,:,::2,::2] # # gt_normal = Variable(targets['normal'].cuda(), requires_grad = False) # prediction_S_1 = prediction_S[:,:,::2,::2] # prediction_S_2 = prediction_S_1[:,:,::2,::2] # prediction_S_3 = prediction_S_2[:,:,::2,::2] # # R L2 loss # w_data = 1.0 # w_grad = 0.5 # R_loss = w_data * self.L2Loss(prediction_R, mask_0, R_gt_0) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R , mask_0, R_gt_0) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_1, mask_1, R_gt_1) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_2, mask_2, R_gt_2) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_3, mask_3, R_gt_3) # S_mask_0 = mask_0[:,0,:,:].unsqueeze(1) # S_mask_1 = mask_1[:,0,:,:].unsqueeze(1) # S_mask_2 = mask_2[:,0,:,:].unsqueeze(1) # S_mask_3 = mask_3[:,0,:,:].unsqueeze(1) # # S Huber Loss # S_loss = w_data * self.HuberLoss(prediction_S, S_mask_0, S_gt_0) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S , S_mask_0, S_gt_0) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_1, S_mask_1, S_gt_1) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_2, S_mask_2, S_gt_2) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_3, S_mask_3, S_gt_3) # Reconstr_loss = 2.0 * self.SUNCGReconstLoss(input_images, prediction_R, prediction_S, mask_0, targets) # # Ls_loss = 8.0 * self.BilateralRefSmoothnessLoss(prediction_L, targets, 'S', 2) # print("R_loss", R_loss.data[0]) # print("S_loss", S_loss.data[0]) # # print("Reconstr_loss", Reconstr_loss.data[0]) # # print("Lighting Loss", Ls_loss.data[0]) # total_loss = R_loss + S_loss + Reconstr_loss # return total_loss # def ScaleInvarianceFramework(self, input_images, prediction_R, prediction_S, targets, epoch): # prediction_R_1 = prediction_R[:,:,::2,::2] # prediction_R_2 = prediction_R_1[:,:,::2,::2] # prediction_R_3 = prediction_R_2[:,:,::2,::2] # # downsample all the images # mask_0 = Variable(targets['mask'].cuda(), requires_grad = False) # mask_0 = mask_0[:,0,:,:].unsqueeze(1) # mask_1 = mask_0[:,:,::2,::2] # mask_2 = mask_1[:,:,::2,::2] # mask_3 = mask_2[:,:,::2,::2] # R_gt_0 = torch.log(Variable(targets['gt_R'].cuda(), requires_grad = False)) # R_gt_1 = R_gt_0[:,:,::2,::2] # R_gt_2 = R_gt_1[:,:,::2,::2] # R_gt_3 = R_gt_2[:,:,::2,::2] # S_gt_0 = torch.log(Variable(targets['gt_S'].cuda(), requires_grad = False)) # S_gt_1 = S_gt_0[:,:,::2,::2] # S_gt_2 = S_gt_1[:,:,::2,::2] # S_gt_3 = S_gt_2[:,:,::2,::2] # # end of downsample # w_data = 1.0 # w_grad = 0.5 # R_loss = w_data * self.Data_Loss(prediction_R, mask_0, R_gt_0) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R , mask_0, R_gt_0) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_1, mask_1, R_gt_1) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_2, mask_2, R_gt_2) # R_loss += w_grad * self.L1GradientMatchingLoss(prediction_R_3, mask_3, R_gt_3) # S_mask_0 = mask_0[:,0,:,:].unsqueeze(1) # S_mask_1 = mask_1[:,0,:,:].unsqueeze(1) # S_mask_2 = mask_2[:,0,:,:].unsqueeze(1) # S_mask_3 = mask_3[:,0,:,:].unsqueeze(1) # prediction_S_1 = prediction_S[:,:,::2,::2] # prediction_S_2 = prediction_S_1[:,:,::2,::2] # prediction_S_3 = prediction_S_2[:,:,::2,::2] # S_loss = w_data * self.Data_Loss(prediction_S, S_mask_0, S_gt_0) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S , S_mask_0, S_gt_0) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_1, S_mask_1, S_gt_1) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_2, S_mask_2, S_gt_2) # S_loss += w_grad * self.L1GradientMatchingLoss(prediction_S_3, S_mask_3, S_gt_3) # # S_loss += 2.0 * self. (prediction_S, targets, 'S', 2) # Reconstr_loss = self.SUNCGReconstLoss(input_images, torch.exp(prediction_R), torch.exp(prediction_S), mask_0, targets) # # # lighting smoothness loss # # Ls_loss = 32.0 * self.LocalLightingSmoothenessLoss(prediction_L, targets) # # Ls_loss = 8.0 * self.BilateralRefSmoothnessLoss(prediction_L, targets, 'S', 2) # print("Reconstr_loss", Reconstr_loss.data[0]) # print("R_loss", R_loss.data[0]) # print("S_loss", S_loss.data[0]) # # print("Lighting Loss", Ls_loss.data[0]) # total_loss = R_loss + S_loss + Reconstr_loss #+ Ls_loss # return total_loss # def NormalShadingSmoothnessLoss(self, S, targets, scale_idx): # h = S.size(2) # w = S.size(3) # num_c = S.size(1) # half_window_size = 1 # total_loss = Variable(torch.cuda.FloatTensor(1)) # total_loss[0] = 0 # # mask_center = M[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \ # # half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size]] # S_center = S[:,:,half_window_size + self.Y[half_window_size,half_window_size]:h-half_window_size + self.Y[half_window_size,half_window_size], \ # half_window_size + self.X[half_window_size,half_window_size]:w-half_window_size + self.X[half_window_size,half_window_size] ] # c_idx = 0 # for k in range(0,half_window_size*2+1): # for l in range(0,half_window_size*2+1): # normal_weights = targets["s_w_"+str(scale_idx)][:,c_idx,:,:].unsqueeze(1).repeat(1,num_c,1,1).float().cuda() # S_N = S[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ] # # mask_N = M[:,:,half_window_size + self.Y[k,l]:h- half_window_size + self.Y[k,l], half_window_size + self.X[k,l]: w-half_window_size + self.X[k,l] ] # # composed_M = torch.mul(mask_N, mask_center) # # normal_weights = torch.mul(normal_weights, composed_M) # r_diff = torch.mul( Variable(normal_weights, requires_grad = False), torch.pow(S_center - S_N, 2) ) # total_loss = total_loss + torch.mean(r_diff) # c_idx = c_idx + 1 # return total_loss/(8.0 * num_c) def CCLoss(self, prediction_S, saw_mask, gts, num_cc): diff = prediction_S - gts total_loss = Variable(torch.cuda.FloatTensor(1)) total_loss[0] = 0 num_regions = 0 # for each prediction for i in range(prediction_S.size(0)): log_diff = diff[i,:,:,:] mask = saw_mask[i,:,:,:].int() for k in range(1, num_cc[i]+1): new_mask = (mask == k).float().cuda() masked_log_diff = torch.mul(new_mask, log_diff) N = torch.sum(new_mask) s1 = torch.sum( torch.pow(masked_log_diff,2) )/N s2 = torch.pow(torch.sum(masked_log_diff),2)/(N*N) total_loss += (s1 - s2) num_regions +=1 return total_loss/(num_regions + 1e-6) def SAWLoss(self, prediction_S, targets): # Shading smoothness ignore mask region lambda_1, lambda_2 = 0.1, 1. # saw_mask_0 = Variable(targets['saw_mask_0'].cuda(), requires_grad = False) # prediction_S_1 = prediction_S[:,:,::2,::2] # prediction_S_2 = prediction_S_1[:,:,::2,::2] # prediction_S_3 = prediction_S_2[:,:,::2,::2] # mask_0 = saw_mask_0 # mask_1 = mask_0[:,:,::2,::2] # mask_2 = mask_1[:,:,::2,::2] # mask_3 = mask_2[:,:,::2,::2] # saw_loss_0 = self.w_ss_local * self.MaskLocalSmoothenessLoss(prediction_S, mask_0, targets) # saw_loss_0 += self.w_ss_local * 0.5 * self.MaskLocalSmoothenessLoss(prediction_S_1, mask_1, targets) # saw_loss_0 += self.w_ss_local * 0.333 * self.MaskLocalSmoothenessLoss(prediction_S_2, mask_2, targets) # saw_loss_0 += self.w_ss_local * 0.25 * self.MaskLocalSmoothenessLoss(prediction_S_3, mask_3, targets) # shadow boundary saw_mask_1 = Variable(targets['saw_mask_1'].cuda(), requires_grad = False) linear_I = torch.mean( Variable(targets['rgb_img'].cuda(), requires_grad = False),1) linear_I = linear_I.unsqueeze(1) linear_I[linear_I < 1e-4] = 1e-4 # linear_I = linear_I.data[0,0,:,:].cpu().numpy() # srgb_img = np.transpose(linear_I, (1 , 2 ,0)) # mask_1 = saw_mask_1.data[0,0,:,:].cpu().numpy() # R_np = np.transpose(R_np, (1 , 2 ,0 # print(targets['num_mask_1'][0]) # plt.figure() # plt.imshow(mask_1, cmap='gray') # plt.show() # display i # plt.figure() # plt.imshow(linear_I, cmap='gray') # plt.show() # display i # sys.exit() saw_loss_1 = lambda_1 * self.CCLoss(prediction_S, saw_mask_1, torch.log(linear_I), targets['num_mask_1']) # smooth region saw_mask_2 = Variable(targets['saw_mask_2'].cuda(), requires_grad = False) saw_loss_2 = lambda_2 * self.CCLoss(prediction_S, saw_mask_2, 0, targets['num_mask_2']) # print("saw_loss_1 ", saw_loss_1.data[0]) # print("saw_loss_2 ", saw_loss_2.data[0]) return saw_loss_2 + saw_loss_1 def DirectFramework(self, prediction, gt, mask): w_data = 1.0 w_grad = 0.5 final_loss = w_data * self.L2Loss(prediction, mask, gt) # level 0 prediction_1 = prediction[:,:,::2,::2] prediction_2 = prediction_1[:,:,::2,::2] prediction_3 = prediction_2[:,:,::2,::2] mask_1 = mask[:,:,::2,::2] mask_2 = mask_1[:,:,::2,::2] mask_3 = mask_2[:,:,::2,::2] gt_1 = gt[:,:,::2,::2] gt_2 = gt_1[:,:,::2,::2] gt_3 = gt_2[:,:,::2,::2] final_loss += w_grad * self.L1GradientMatchingLoss(prediction , mask, gt) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3) return final_loss # all parameter in log space, presumption def ScaleInvarianceFramework(self, prediction, gt, mask, w_grad): assert(prediction.size(1) == gt.size(1)) assert(prediction.size(1) == mask.size(1)) w_data = 1.0 final_loss = w_data * self.Data_Loss(prediction, mask, gt) final_loss += w_grad * self.L1GradientMatchingLoss(prediction , mask, gt) # level 0 prediction_1 = prediction[:,:,::2,::2] prediction_2 = prediction_1[:,:,::2,::2] prediction_3 = prediction_2[:,:,::2,::2] mask_1 = mask[:,:,::2,::2] mask_2 = mask_1[:,:,::2,::2] mask_3 = mask_2[:,:,::2,::2] gt_1 = gt[:,:,::2,::2] gt_2 = gt_1[:,:,::2,::2] gt_3 = gt_2[:,:,::2,::2] final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3) return final_loss def LinearScaleInvarianceFramework(self, prediction, gt, mask, w_grad): assert(prediction.size(1) == gt.size(1)) assert(prediction.size(1) == mask.size(1)) w_data = 1.0 # w_grad = 0.5 gt_vec = gt[mask > 0.1] pred_vec = prediction[mask > 0.1] gt_vec = gt_vec.unsqueeze(1).float().cpu() pred_vec = pred_vec.unsqueeze(1).float().cpu() scale, _ = torch.gels(gt_vec.data, pred_vec.data) # scale, _ = torch.lstsq(gt_vec.data, pred_vec.data) scale = scale[0,0] # print("scale" , scale) # sys.exit() prediction_scaled = prediction * scale final_loss = w_data * self.L2Loss(prediction_scaled, mask, gt) prediction_1 = prediction_scaled[:,:,::2,::2] prediction_2 = prediction_1[:,:,::2,::2] prediction_3 = prediction_2[:,:,::2,::2] mask_1 = mask[:,:,::2,::2] mask_2 = mask_1[:,:,::2,::2] mask_3 = mask_2[:,:,::2,::2] gt_1 = gt[:,:,::2,::2] gt_2 = gt_1[:,:,::2,::2] gt_3 = gt_2[:,:,::2,::2] final_loss += w_grad * self.L1GradientMatchingLoss(prediction_scaled , mask, gt) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3) return final_loss def WeightedLinearScaleInvarianceFramework(self, prediction, gt, mask, w_grad): w_data = 1.0 assert(prediction.size(1) == gt.size(1)) assert(prediction.size(1) == mask.size(1)) if torch.sum(mask.data) < 10: return 0 # w_grad = 0.5 gt_vec = gt[mask > 0.1] pred_vec = prediction[mask > 0.1] gt_vec = gt_vec.unsqueeze(1).float().cpu() pred_vec = pred_vec.unsqueeze(1).float().cpu() scale, _ = torch.gels(gt_vec.data, pred_vec.data) # scale, _ = torch.lstsq(gt_vec.data, pred_vec.data) scale = scale[0,0] prediction_scaled = prediction * scale ones_matrix = Variable(torch.zeros(gt.size(0), gt.size(1), gt.size(2), gt.size(3)) + 1, requires_grad = False) weight = torch.min(1/gt, ones_matrix.float().cuda()) weight_mask = torch.mul(weight, mask) final_loss = w_data * self.L2Loss(prediction_scaled, weight_mask, gt) prediction_1 = prediction_scaled[:,:,::2,::2] prediction_2 = prediction_1[:,:,::2,::2] prediction_3 = prediction_2[:,:,::2,::2] mask_1 = weight_mask[:,:,::2,::2] mask_2 = mask_1[:,:,::2,::2] mask_3 = mask_2[:,:,::2,::2] gt_1 = gt[:,:,::2,::2] gt_2 = gt_1[:,:,::2,::2] gt_3 = gt_2[:,:,::2,::2] final_loss += w_grad * self.L1GradientMatchingLoss(prediction_scaled , weight_mask, gt) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2) final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3) return final_loss def SUNCGBatchRankingLoss(self, prediction_R, judgements_eq, judgements_ineq): eq_loss, ineq_loss = 0, 0 num_valid_eq = 0 num_valid_ineq = 0 tau = 0.4 rows = prediction_R.size(1) cols = prediction_R.size(2) num_channel = prediction_R.size(0) # evaluate equality annotations densely if judgements_eq.size(1) > 2: judgements_eq = judgements_eq.cuda() R_vec = prediction_R.view(num_channel, -1) # R_vec = torch.exp(R_vec) y_1 = judgements_eq[:,0].long() y_2 = judgements_eq[:,2].long() # if random_filp: # x_1 = cols - 1 - judgements_eq[:,1].long() # x_2 = cols - 1 - judgements_eq[:,3].long() # else: x_1 = judgements_eq[:,1].long() x_2 = judgements_eq[:,3].long() # compute linear index for point 1 # y_1 = torch.floor(judgements_eq[:,0] * rows).long() # x_1 = torch.floor(judgements_eq[:,1] * cols).long() point_1_idx_linear = y_1 * cols + x_1 # compute linear index for point 2 # y_2 = torch.floor(judgements_eq[:,2] * rows).long() # x_2 = torch.floor(judgements_eq[:,3] * cols).long() point_2_idx_linear = y_2 * cols + x_2 # extract all pairs of comparisions points_1_vec = torch.index_select(R_vec, 1, Variable(point_1_idx_linear, requires_grad = False)) points_2_vec = torch.index_select(R_vec, 1, Variable(point_2_idx_linear, requires_grad = False)) # I1_vec = torch.index_select(I_vec, 1, point_1_idx_linaer) # I2_vec = torch.index_select(I_vec, 1, point_2_idx_linear) # weight = Variable(judgements_eq[:,4], requires_grad = False) # weight = confidence#* torch.exp(4.0 * torch.abs(I1_vec - I2_vec) ) # compute Loss # eq_loss = torch.sum(torch.mul(weight, torch.mean(torch.abs(points_1_vec - points_2_vec),0) )) eq_loss = torch.sum( torch.mean( torch.pow(points_1_vec - points_2_vec,2) ,0) ) num_valid_eq += judgements_eq.size(0) # # compute inequality annotations if judgements_ineq.size(1) > 2: judgements_ineq = judgements_ineq.cuda() R_intensity = torch.mean(prediction_R, 0) # R_intensity = torch.log(R_intensity) R_vec_mean = R_intensity.view(1, -1) y_1 = judgements_ineq[:,0].long() y_2 = judgements_ineq[:,2].long() # x_1 = torch.floor(judgements_ineq[:,1] * cols).long() # x_2 = torch.floor(judgements_ineq[:,3] * cols).long() x_1 = judgements_ineq[:,1].long() x_2 = judgements_ineq[:,3].long() # y_1 = torch.floor(judgements_ineq[:,0] * rows).long() # x_1 = torch.floor(judgements_ineq[:,1] * cols).long() point_1_idx_linear = y_1 * cols + x_1 # y_2 = torch.floor(judgements_ineq[:,2] * rows).long() # x_2 = torch.floor(judgements_ineq[:,3] * cols).long() point_2_idx_linear = y_2 * cols + x_2 # extract all pairs of comparisions points_1_vec = torch.index_select(R_vec_mean, 1, Variable(point_1_idx_linear, requires_grad = False)).squeeze(0) points_2_vec = torch.index_select(R_vec_mean, 1, Variable(point_2_idx_linear, requires_grad = False)).squeeze(0) # point 2 should be always darker than (<) point 1 # compute loss relu_layer = nn.ReLU(True) # ineq_loss = torch.sum(torch.mul(weight, relu_layer(points_2_vec - points_1_vec + tau) ) ) ineq_loss = torch.sum(torch.pow( relu_layer(points_2_vec - points_1_vec + tau),2) ) # ineq_loss = torch.sum(torch.mul(weight, torch.pow(relu_layer(tau - points_1_vec/points_2_vec),2))) num_included = torch.sum( torch.ge(points_2_vec.data - points_1_vec.data, -tau).float().cuda() ) # num_included = torch.sum(torch.ge(points_2_vec.data/points_1_vec.data, 1./tau).float().cuda()) num_valid_ineq += num_included # avoid divide by zero return (eq_loss)/(num_valid_eq + 1e-8) + ineq_loss/(num_valid_ineq + 1e-8) def __call__(self, input_images, prediction_R, prediction_S, targets, data_set_name, epoch): lambda_CG = 0.5 if data_set_name == "IIW": print("IIW Loss") num_images = prediction_R.size(0) # Albedo smoothness term # rs_loss = self.w_rs_dense * self.BilateralRefSmoothnessLoss(prediction_R, targets, 'R', 5) # multi-scale smoothness term prediction_R_1 = prediction_R[:,:,::2,::2] prediction_R_2 = prediction_R_1[:,:,::2,::2] prediction_R_3 = prediction_R_2[:,:,::2,::2] rs_loss = self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R, targets,0) rs_loss = rs_loss + 0.5 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_1, targets,1) rs_loss = rs_loss + 0.3333 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_2, targets,2) rs_loss = rs_loss + 0.25 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_3, targets,3) # # Lighting smoothness Loss ss_loss = self.w_ss_dense * self.BilateralRefSmoothnessLoss(prediction_S, targets, 'S', 2) # # Reconstruction Loss reconstr_loss = self.w_reconstr_real * self.IIWReconstLoss(torch.exp(prediction_R), \ torch.exp(prediction_S), targets) # IIW Loss total_iiw_loss = Variable(torch.cuda.FloatTensor(1)) total_iiw_loss[0] = 0 for i in range(0, num_images): # judgements = json.load(open(targets["judgements_path"][i])) # total_iiw_loss += self.w_IIW * self.Ranking_Loss(prediction_R[i,:,:,:], judgements, random_filp) judgements_eq = targets["eq_mat"][i] judgements_ineq = targets["ineq_mat"][i] random_filp = targets["random_filp"][i] total_iiw_loss += self.w_IIW * self.BatchRankingLoss(prediction_R[i,:,:,:], judgements_eq, judgements_ineq, random_filp) total_iiw_loss = (total_iiw_loss)/num_images # print("reconstr_loss ", reconstr_loss.data[0]) # print("rs_loss ", rs_loss.data[0]) # print("ss_loss ", ss_loss.data[0]) # print("total_iiw_loss ", total_iiw_loss.data[0]) total_loss = total_iiw_loss + reconstr_loss + rs_loss + ss_loss elif data_set_name == "Render": print("Render LOSS") mask = Variable(targets['mask'].cuda(), requires_grad = False) mask_R = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_R.size(1),1,1) mask_S = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_S.size(1),1,1) mask_img = mask[:,0,:,:].unsqueeze(1).repeat(1,input_images.size(1),1,1) gt_R = Variable(targets['gt_R'].cuda(), requires_grad = False) gt_S = Variable(targets['gt_S'].cuda(), requires_grad = False) R_loss = lambda_CG * self.LinearScaleInvarianceFramework(torch.exp(prediction_R), gt_R, mask_R, 0.5) # using ScaleInvarianceFramework might achieve better performance if we train on both IIW and SAW, # but LinearScaleInvarianceFramework could produce better perforamnce if trained on CGIntrinsics only S_loss = lambda_CG * self.LinearScaleInvarianceFramework(torch.exp(prediction_S), gt_S, mask_S, 0.5) # S_loss = lambda_CG * self.ScaleInvarianceFramework(prediction_S, torch.log(gt_S), mask_S, 0.5) reconstr_loss = lambda_CG * self.w_reconstr * self.SUNCGReconstLoss(torch.exp(prediction_R), torch.exp(prediction_S), mask_img, targets) # print("R_loss ", R_loss.data[0]) # print("S_loss ", S_loss.data[0]) # print("reconstr_loss ", reconstr_loss.data[0]) total_loss = R_loss + S_loss + reconstr_loss elif data_set_name == "CGIntrinsics": # ============================================================================================== This is scale invariance loss =============== print("CGIntrinsics LOSS") mask = Variable(targets['mask'].cuda(), requires_grad = False) mask_R = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_R.size(1),1,1) mask_S = mask[:,0,:,:].unsqueeze(1).repeat(1,prediction_S.size(1),1,1) mask_img = mask[:,0,:,:].unsqueeze(1).repeat(1,input_images.size(1),1,1) gt_R = Variable(targets['gt_R'].cuda(), requires_grad = False) gt_S = Variable(targets['gt_S'].cuda(), requires_grad = False) R_loss = lambda_CG *self.LinearScaleInvarianceFramework(torch.exp(prediction_R), gt_R, mask_R, 0.5) # using ScaleInvarianceFramework might achieve better performance if we train on both IIW and SAW, # but LinearScaleInvarianceFramework could produce better perforamnce if trained on CGIntrinsics only S_loss = lambda_CG * self.LinearScaleInvarianceFramework(torch.exp(prediction_S), gt_S, mask_S, 0.5) # S_loss = lambda_CG * self.ScaleInvarianceFramework(prediction_S, torch.log(gt_S), mask_S, 0.5) reconstr_loss = lambda_CG * self.w_reconstr * self.SUNCGReconstLoss(torch.exp(prediction_R), torch.exp(prediction_S), mask_img, targets) # Why put this? Because some ground truth shadings are nosiy Ss_loss = lambda_CG * self.w_ss_dense * self.BilateralRefSmoothnessLoss(prediction_S, targets, 'S', 2) total_iiw_loss = 0 for i in range(0, prediction_R.size(0)): judgements_eq = targets["eq_mat"][i] judgements_ineq = targets["ineq_mat"][i] random_filp = targets["random_filp"][i] total_iiw_loss += lambda_CG * self.SUNCGBatchRankingLoss(prediction_R[i,:,:,:], judgements_eq, judgements_ineq) total_iiw_loss = total_iiw_loss/prediction_R.size(0) # print("R_loss ", R_loss.data[0]) # print("S_loss ", S_loss.data[0]) # print("reconstr_loss ", reconstr_loss.data[0]) # print("Ss_loss ", Ss_loss.data[0]) # print("SUNCGBatchRankingLoss ", total_iiw_loss.data[0]) total_loss = R_loss + S_loss + reconstr_loss + Ss_loss + total_iiw_loss elif data_set_name == "SAW": print("SAW Loss") prediction_R_1 = prediction_R[:,:,::2,::2] prediction_R_2 = prediction_R_1[:,:,::2,::2] prediction_R_3 = prediction_R_2[:,:,::2,::2] rs_loss = self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R, targets,0) rs_loss = rs_loss + 0.5 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_1, targets,1) rs_loss = rs_loss + 0.3333 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_2, targets,2) rs_loss = rs_loss + 0.25 * self.w_rs_local * self.LocalAlebdoSmoothenessLoss(prediction_R_3, targets,3) reconstr_loss = self.w_reconstr_real * self.IIWReconstLoss(torch.exp(prediction_R), \ torch.exp(prediction_S), targets) ss_loss = self.w_ss_dense * self.BilateralRefSmoothnessLoss(prediction_S, targets, 'S', 2) SAW_loss = self.w_SAW * self.SAWLoss(prediction_S, targets) # print("rs_loss ", rs_loss.data[0]) # print("SAW_loss ", SAW_loss.data[0]) # print("reconstr_loss ", reconstr_loss.data[0]) # print("ss_loss ", ss_loss.data[0]) total_loss = rs_loss + SAW_loss + reconstr_loss + ss_loss else: print("NORMAL Loss") sys.exit() self.total_loss = total_loss # return total_loss.data[0] return total_loss.data def compute_whdr(self, reflectance, judgements, delta=0.1): points = judgements['intrinsic_points'] comparisons = judgements['intrinsic_comparisons'] id_to_points = {p['id']: p for p in points} rows, cols = reflectance.shape[0:2] error_sum = 0.0 error_equal_sum = 0.0 error_inequal_sum = 0.0 weight_sum = 0.0 weight_equal_sum = 0.0 weight_inequal_sum = 0.0 for c in comparisons: # "darker" is "J_i" in our paper darker = c['darker'] if darker not in ('1', '2', 'E'): continue # "darker_score" is "w_i" in our paper weight = c['darker_score'] if weight <= 0.0 or weight is None: continue point1 = id_to_points[c['point1']] point2 = id_to_points[c['point2']] if not point1['opaque'] or not point2['opaque']: continue # convert to grayscale and threshold l1 = max(1e-10, np.mean(reflectance[ int(point1['y'] * rows), int(point1['x'] * cols), ...])) l2 = max(1e-10, np.mean(reflectance[ int(point2['y'] * rows), int(point2['x'] * cols), ...])) # # convert algorithm value to the same units as human judgements if l2 / l1 > 1.0 + delta: alg_darker = '1' elif l1 / l2 > 1.0 + delta: alg_darker = '2' else: alg_darker = 'E' if darker == 'E': if darker != alg_darker: error_equal_sum += weight weight_equal_sum += weight else: if darker != alg_darker: error_inequal_sum += weight weight_inequal_sum += weight if darker != alg_darker: error_sum += weight weight_sum += weight if weight_sum: return (error_sum / weight_sum), error_equal_sum/( weight_equal_sum + 1e-10), error_inequal_sum/(weight_inequal_sum + 1e-10) else: return None def evaluate_WHDR(self, prediction_R, targets): # num_images = prediction_S.size(0) # must be even number total_whdr = float(0) total_whdr_eq = float(0) total_whdr_ineq = float(0) count = float(0) for i in range(0, prediction_R.size(0)): prediction_R_np = prediction_R.data[i,:,:,:].cpu().numpy() prediction_R_np = np.transpose(np.exp(prediction_R_np * 0.4545), (1,2,0)) # o_h = targets['oringinal_shape'][0].numpy() # o_w = targets['oringinal_shape'][1].numpy() # prediction_R_srgb_np = prediction_R_srgb.data[i,:,:,:].cpu().numpy() # prediction_R_srgb_np = np.transpose(prediction_R_srgb_np, (1,2,0)) o_h = targets['oringinal_shape'][0].numpy() o_w = targets['oringinal_shape'][1].numpy() # resize to original resolution prediction_R_np = resize(prediction_R_np, (o_h[i],o_w[i]), order=1, preserve_range=True) # print(targets["judgements_path"][i]) # load Json judgement judgements = json.load(open(targets["judgements_path"][i])) whdr, whdr_eq, whdr_ineq = self.compute_whdr(prediction_R_np, judgements, 0.1) total_whdr += whdr total_whdr_eq += whdr_eq total_whdr_ineq += whdr_ineq count += 1. return total_whdr, total_whdr_eq, total_whdr_ineq, count def evaluate_RC_loss(self, prediction_n, targets): normal_norm = torch.sqrt( torch.sum(torch.pow(prediction_n , 2) , 1) ) normal_norm = normal_norm.unsqueeze(1).repeat(1,3,1,1) prediction_n = torch.div(prediction_n , normal_norm) # mask_0 = Variable(targets['mask'].cuda(), requires_grad = False) # n_gt_0 = Variable(targets['normal'].cuda(), requires_grad = False) total_loss = self.AngleLoss(prediction_n, targets) # return total_loss.data[0] return total_loss.data def evaluate_L0_loss(self, prediction_R, targets): # num_images = prediction_S.size(0) # must be even number total_whdr = float(0) count = float(0) for i in range(0, 1): prediction_R_np = prediction_R # prediction_R_np = prediction_R.data[i,:,:,:].cpu().numpy() # prediction_R_np = np.transpose(prediction_R_np, (1,2,0)) # load Json judgement judgements = json.load(open(targets["judgements_path"][i])) whdr = self.compute_whdr(prediction_R_np, judgements, 0.1) total_whdr += whdr count += 1 return total_whdr, count def get_loss_var(self): return self.total_loss # Defines the Unet generator. # |num_downs|: number of downsamplings in UNet. For example, # if |num_downs| == 7, image of size 128x128 will become of size 1x1 # at the bottleneck class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]): super(UnetGenerator, self).__init__() self.gpu_ids = gpu_ids # currently support only input_nc == output_nc # assert(input_nc == output_nc) # construct unet structure unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, innermost=True) for i in range(num_downs - 5): unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) # Defines the submodule with skip connection. # X -------------------identity---------------------- X # |-- downsampling -- |submodule| -- upsampling --| class UnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, False) downnorm = norm_layer(inner_nc, affine=True) uprelu = nn.ReLU(False) upnorm = norm_layer(outer_nc, affine=True) if outermost: n_output_dim = 3 uprelu1 = nn.ReLU(False) uprelu2 = nn.ReLU(False) upconv_1 = nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1) upconv_2 = nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1) conv_1 = nn.Conv2d(inner_nc, inner_nc, kernel_size=3, stride=1, padding=1) conv_2 = nn.Conv2d(inner_nc, inner_nc, kernel_size=3, stride=1, padding=1) # conv_1_o = nn.Conv2d(inner_nc, 1, kernel_size=3, # stride=1, padding=1) conv_2_o = nn.Conv2d(inner_nc, n_output_dim, kernel_size=3, stride=1, padding=1) upnorm_1 = norm_layer(inner_nc, affine=True) upnorm_2 = norm_layer(inner_nc, affine=True) # uprelu2_o = nn.ReLU(False) down = [downconv] up_1 = [uprelu1, upconv_1, upnorm_1, nn.ReLU(False), conv_1, nn.ReLU(False), conv_1_o] up_2 = [uprelu2, upconv_2, upnorm_2, nn.ReLU(False), conv_2, nn.ReLU(False), conv_2_o] self.downconv_model = nn.Sequential(*down) self.upconv_model_1 = nn.Sequential(*up_1) self.upconv_model_2 = nn.Sequential(*up_2) self.submodule = submodule elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up self.model = nn.Sequential(*model) else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) # self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: # return self.model(x) down_x = self.downconv_model(x) y = self.submodule.forward(down_x) y_1 = self.upconv_model_1(y) y_2 = self.upconv_model_2(y) return y_1, y_2 else: return torch.cat([self.model(x), x], 1) class SingleUnetGenerator_S(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]): super(SingleUnetGenerator_S, self).__init__() self.gpu_ids = gpu_ids # currently support only input_nc == output_nc # assert(input_nc == output_nc) # construct unet structure unet_block = SingleUnetSkipConnectionBlock_S(ngf * 8, ngf * 8, innermost=True) for i in range(num_downs - 5): unet_block = SingleUnetSkipConnectionBlock_S(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = SingleUnetSkipConnectionBlock_S(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_S(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_S(ngf, ngf * 2, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_S(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) class SingleUnetSkipConnectionBlock_S(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(SingleUnetSkipConnectionBlock_S, self).__init__() self.outermost = outermost self.innermost = innermost downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, False) downnorm = norm_layer(inner_nc, affine=True) uprelu = nn.ReLU(False) upnorm = norm_layer(outer_nc, affine=True) if outermost: upconv = nn.ConvTranspose2d(inner_nc * 2, 1, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv] model = down + [submodule] self.model = nn.Sequential(*model) self.up_model = nn.Sequential(*up) elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] # model = down + up int_conv = [nn.AdaptiveAvgPool2d((2,2)), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, stride=2, padding=1), nn.ReLU(False)] fc = [nn.Linear(256, 3)] self.int_conv = nn.Sequential(* int_conv) self.fc = nn.Sequential(* fc) self.down_model = nn.Sequential(*down) self.up_model = nn.Sequential(*up) else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] #+ up + [nn.Dropout(0.5)] else: model = down + [submodule] # + up if use_dropout: upconv_model = up + [nn.Dropout(0.5)] else: upconv_model = up self.model = nn.Sequential(*model) self.up_model = nn.Sequential(*upconv_model) def forward(self, x): if self.outermost: y_1, color_s = self.model(x) y_1 = self.up_model(y_1) return y_1, color_s elif self.innermost: y_1 = self.down_model(x) color_s = self.int_conv(y_1) color_s = color_s.view(color_s.size(0), -1) color_s = self.fc(color_s) y_1 = self.up_model(y_1) y_1 = torch.cat([y_1, x], 1) return y_1, color_s else: y_1, color_s = self.model(x) y_1 = self.up_model(y_1) return torch.cat([y_1, x], 1), color_s class SingleUnetGenerator_R(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]): super(SingleUnetGenerator_R, self).__init__() self.gpu_ids = gpu_ids # currently support only input_nc == output_nc # assert(input_nc == output_nc) # construct unet structure unet_block = SingleUnetSkipConnectionBlock_R(ngf * 8, ngf * 8, innermost=True) for i in range(num_downs - 5): unet_block = SingleUnetSkipConnectionBlock_R(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = SingleUnetSkipConnectionBlock_R(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_R(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_R(ngf, ngf * 2, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_R(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) class SingleUnetSkipConnectionBlock_R(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(SingleUnetSkipConnectionBlock_R, self).__init__() self.outermost = outermost if outermost: downconv = nn.Conv2d(3, inner_nc, kernel_size=4, stride=2, padding=1) else: downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc, affine=True) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc, affine=True) num_output = outer_nc if outermost: # upconv = nn.ConvTranspose2d(inner_nc * 2, num_output, # kernel_size=4, stride=2, # padding=1) upconv = [uprelu, nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1), nn.ReLU(False), nn.Conv2d(inner_nc, num_output, kernel_size=1)] down = [downconv] up = upconv model = down + [submodule] + up self.model = nn.Sequential(*model) elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up self.model = nn.Sequential(*model) else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) # self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: return self.model(x) else: return torch.cat([self.model(x), x], 1) class SingleUnetGenerator_L(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]): super(SingleUnetGenerator_L, self).__init__() self.gpu_ids = gpu_ids # currently support only input_nc == output_nc # assert(input_nc == output_nc) # construct unet structure unet_block = SingleUnetSkipConnectionBlock_L(ngf * 8, ngf * 8, innermost=True) for i in range(num_downs - 5): unet_block = SingleUnetSkipConnectionBlock_L(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = SingleUnetSkipConnectionBlock_L(ngf * 4, ngf * 8, unet_block, gird = True, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_L(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_L(ngf, ngf * 2, unet_block, norm_layer=norm_layer) unet_block = SingleUnetSkipConnectionBlock_L(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) class SingleUnetSkipConnectionBlock_L(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, gird =False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(SingleUnetSkipConnectionBlock_L, self).__init__() self.outermost = outermost self.gird = grid if outermost: downconv = nn.Conv2d(3, inner_nc, kernel_size=4, stride=2, padding=1) else: downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc, affine=True) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc, affine=True) num_output = outer_nc if outermost: # upconv = nn.ConvTranspose2d(inner_nc * 2, num_output, # kernel_size=4, stride=2, # padding=1) upconv = [uprelu, nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1), nn.ReLU(False), nn.Conv2d(inner_nc, 1, kernel_size=1), nn.Sigmoid()] down = [downconv] up = upconv model = down + [submodule] + up self.model = nn.Sequential(*model) elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] model = down + up self.model = nn.Sequential(*model) else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up if self.gird: grid_layer = [nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1), norm_layer(inner_nc, affine=True), nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/4, kernel_size=3, padding=1), nn.ReLU(False), nn.Conv2d(inner_nc/4, num_output, kernel_size=1)] self.grid_layer = nn.Sequential(*grid_layer) self.model = nn.Sequential(*model) # self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: y = self.model(x) return y, self.grid_y else: y = self.model(x) if self.grid: upsample_layer = nn.Upsample(scale_factor= 8, mode='bilinear') self.grid_y = upsample_layer(self.grid_layer(y)) return torch.cat([y, x], 1) class MultiUnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]): super(MultiUnetGenerator, self).__init__() self.gpu_ids = gpu_ids # currently support only input_nc == output_nc # assert(input_nc == output_nc) # construct unet structure unet_block = MultiUnetSkipConnectionBlock(ngf * 8, ngf * 8, innermost=True) for i in range(num_downs - 5): unet_block = MultiUnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = MultiUnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer) unet_block = MultiUnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer) unet_block = MultiUnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer) unet_block = MultiUnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer) self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) # self.model(input) # Defines the submodule with skip connection. # X -------------------identity---------------------- X # |-- downsampling -- |submodule| -- upsampling --| class MultiUnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(MultiUnetSkipConnectionBlock, self).__init__() self.outermost = outermost self.innermost = innermost # print("we are in mutilUnet") downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, False) downnorm = norm_layer(inner_nc, affine=True) uprelu = nn.ReLU(False) upnorm = norm_layer(outer_nc, affine=True) if outermost: n_output_dim = 3 # upconv = nn.ConvTranspose2d(inner_nc * 2, n_output_dim, # kernel_size=4, stride=2, # padding=1) # downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, # stride=2, padding=1) # conv1 = nn.Conv2d(inner_nc, 1, kernel_size=5, # stride=1, padding=2) # conv2 = nn.Conv2d(inner_nc, 3, kernel_size=5, # stride=1, padding=2) down = [downconv] # upconv_model_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, 1, # kernel_size=4, stride=2, padding=1)] # upconv_model_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, 1, # kernel_size=4, stride=2, padding=1)] # upconv_model_u = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, inner_nc, # kernel_size=4, stride=2, padding=1), nn.ReLU(False), # nn.Conv2d(inner_nc, 1, kernel_size=1) , nn.Sigmoid()] # self.upconv_model_u = nn.Sequential(*upconv_model_u) upconv_model_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1), norm_layer(inner_nc, affine=True), nn.ReLU(False), nn.Conv2d(inner_nc, 1, kernel_size= 1, bias=True)] upconv_model_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1) , norm_layer(inner_nc, affine=True), nn.ReLU(False), nn.Conv2d(inner_nc, 1, kernel_size= 1, bias=True)] # model = down + [submodule] + up # upconv_model_1 = up_1 # upconv_model_2 = up_2 elif innermost: # upconv = nn.ConvTranspose2d(inner_nc, outer_nc, # kernel_size=4, stride=2, # padding=1) down = [downrelu, downconv] upconv_model_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1), norm_layer(outer_nc, affine=True)] upconv_model_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1), norm_layer(outer_nc, affine=True)] # for rgb shading # int_conv = [nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, # stride=1, padding=1)] # int_conv = [nn.AdaptiveAvgPool2d((2,2)) , nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, stride=2, padding=1), nn.ReLU(False)] # int_conv = [nn.AdaptiveAvgPool2d((2,2)) , nn.ReLU(False), nn.Conv2d(inner_nc, inner_nc/2, kernel_size=3, stride=2, padding=1), nn.ReLU(False) \ # nn.Conv2d(inner_nc/2, inner_nc/4, kernel_size=3, stride=1, padding=1), nn.ReLU(False)] # fc = [nn.Linear(256, 3)] # self.int_conv = nn.Sequential(* int_conv) # self.fc = nn.Sequential(* fc) else: # upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, # kernel_size=4, stride=2, # padding=1) down = [downrelu, downconv, downnorm] up_1 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1), norm_layer(outer_nc, affine=True)] up_2 = [nn.ReLU(False), nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1), norm_layer(outer_nc, affine=True)] if use_dropout: upconv_model_1 = up_1 + [nn.Dropout(0.5)] upconv_model_2 = up_2 + [nn.Dropout(0.5)] # model = down + [submodule] + up + [nn.Dropout(0.5)] else: upconv_model_1 = up_1 upconv_model_2 = up_2 # model = down + [submodule] self.downconv_model = nn.Sequential(*down) self.submodule = submodule self.upconv_model_1 = nn.Sequential(*upconv_model_1) self.upconv_model_2 = nn.Sequential(*upconv_model_2) def forward(self, x): if self.outermost: down_x = self.downconv_model(x) y_1, y_2 = self.submodule.forward(down_x) # y_u = self.upconv_model_u(y_1) y_1 = self.upconv_model_1(y_1) y_2 = self.upconv_model_2(y_2) return y_1, y_2 # return self.model(x) elif self.innermost: down_output = self.downconv_model(x) y_1 = self.upconv_model_1(down_output) y_2 = self.upconv_model_2(down_output) y_1 = torch.cat([y_1, x], 1) y_2 = torch.cat([y_2, x], 1) return y_1, y_2 else: down_x = self.downconv_model(x) y_1, y_2 = self.submodule.forward(down_x) y_1 = self.upconv_model_1(y_1) y_2 = self.upconv_model_2(y_2) y_1 = torch.cat([y_1, x], 1) y_2 = torch.cat([y_2, x], 1) return y_1, y_2
[ "torch.mul", "torch.nn.ReLU", "torch.nn.Dropout", "torch.nn.Sequential", "torch.pow", "torch.exp", "numpy.array", "torch.cuda.is_available", "torch.sum", "torch.squeeze", "sys.exit", "numpy.arange", "torch.nn.Sigmoid", "torch.nn.parallel.data_parallel", "torch.mean", "torch.floor", "...
[((1208, 1233), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1231, 1233), False, 'import torch\n'), ((3377, 3402), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3400, 3402), False, 'import torch\n'), ((4437, 4455), 'torch.mm', 'torch.mm', (['S', 'input'], {}), '(S, input)\n', (4445, 4455), False, 'import torch\n'), ((5276, 5408), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 1, 1, 1, 0.5], [0.5, 1, 1, 1, 0.5], [0.5,\n 1, 1, 1, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5]]'], {}), '([[0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 1, 1, 1, 0.5], [0.5, 1, 1, 1, \n 0.5], [0.5, 1, 1, 1, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5]])\n', (5284, 5408), True, 'import numpy as np\n'), ((5634, 5650), 'numpy.arange', 'np.arange', (['(-1)', '(2)'], {}), '(-1, 2)\n', (5643, 5650), True, 'import numpy as np\n'), ((5663, 5679), 'numpy.arange', 'np.arange', (['(-1)', '(2)'], {}), '(-1, 2)\n', (5672, 5679), True, 'import numpy as np\n'), ((5705, 5722), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5716, 5722), True, 'import numpy as np\n'), ((18155, 18182), 'torch.sum', 'torch.sum', (['mask[:, 0, :, :]'], {}), '(mask[:, 0, :, :])\n', (18164, 18182), False, 'import torch\n'), ((18417, 18432), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (18426, 18432), False, 'import torch\n'), ((18490, 18539), 'torch.mul', 'torch.mul', (['mask[:, :, :, 0:-2]', 'mask[:, :, :, 2:]'], {}), '(mask[:, :, :, 0:-2], mask[:, :, :, 2:])\n', (18499, 18539), False, 'import torch\n'), ((18803, 18852), 'torch.mul', 'torch.mul', (['mask[:, :, 0:-2, :]', 'mask[:, :, 2:, :]'], {}), '(mask[:, :, 0:-2, :], mask[:, :, 2:, :])\n', (18812, 18852), False, 'import torch\n'), ((19281, 19308), 'torch.sum', 'torch.sum', (['mask[:, 0, :, :]'], {}), '(mask[:, 0, :, :])\n', (19290, 19308), False, 'import torch\n'), ((19363, 19412), 'torch.mul', 'torch.mul', (['mask[:, :, :, 0:-2]', 'mask[:, :, :, 2:]'], {}), '(mask[:, :, :, 0:-2], mask[:, :, :, 2:])\n', (19372, 19412), False, 'import torch\n'), ((19631, 19680), 'torch.mul', 'torch.mul', (['mask[:, :, 0:-2, :]', 'mask[:, :, 2:, :]'], {}), '(mask[:, :, 0:-2, :], mask[:, :, 2:, :])\n', (19640, 19680), False, 'import torch\n'), ((20023, 20052), 'torch.squeeze', 'torch.squeeze', (['uncertainty', '(1)'], {}), '(uncertainty, 1)\n', (20036, 20052), False, 'import torch\n'), ((20220, 20247), 'torch.sum', 'torch.sum', (['mask[:, 0, :, :]'], {}), '(mask[:, 0, :, :])\n', (20229, 20247), False, 'import torch\n'), ((23953, 23968), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (23962, 23968), False, 'import torch\n'), ((24031, 24056), 'torch.mul', 'torch.mul', (['log_diff', 'mask'], {}), '(log_diff, mask)\n', (24040, 24056), False, 'import torch\n'), ((24296, 24311), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (24305, 24311), False, 'import torch\n'), ((24374, 24399), 'torch.mul', 'torch.mul', (['log_diff', 'mask'], {}), '(log_diff, mask)\n', (24383, 24399), False, 'import torch\n'), ((24422, 24483), 'torch.pow', 'torch.pow', (['(log_diff[:, :, 0:-2, :] - log_diff[:, :, 2:, :])', '(2)'], {}), '(log_diff[:, :, 0:-2, :] - log_diff[:, :, 2:, :], 2)\n', (24431, 24483), False, 'import torch\n'), ((24494, 24543), 'torch.mul', 'torch.mul', (['mask[:, :, 0:-2, :]', 'mask[:, :, 2:, :]'], {}), '(mask[:, :, 0:-2, :], mask[:, :, 2:, :])\n', (24503, 24543), False, 'import torch\n'), ((24559, 24588), 'torch.mul', 'torch.mul', (['v_gradient', 'v_mask'], {}), '(v_gradient, v_mask)\n', (24568, 24588), False, 'import torch\n'), ((24611, 24672), 'torch.pow', 'torch.pow', (['(log_diff[:, :, :, 0:-2] - log_diff[:, :, :, 2:])', '(2)'], {}), '(log_diff[:, :, :, 0:-2] - log_diff[:, :, :, 2:], 2)\n', (24620, 24672), False, 'import torch\n'), ((24683, 24732), 'torch.mul', 'torch.mul', (['mask[:, :, :, 0:-2]', 'mask[:, :, :, 2:]'], {}), '(mask[:, :, :, 0:-2], mask[:, :, :, 2:])\n', (24692, 24732), False, 'import torch\n'), ((24748, 24777), 'torch.mul', 'torch.mul', (['h_gradient', 'h_mask'], {}), '(h_gradient, h_mask)\n', (24757, 24777), False, 'import torch\n'), ((25002, 25017), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (25011, 25017), False, 'import torch\n'), ((25082, 25107), 'torch.mul', 'torch.mul', (['log_diff', 'mask'], {}), '(log_diff, mask)\n', (25091, 25107), False, 'import torch\n'), ((25130, 25188), 'torch.abs', 'torch.abs', (['(log_diff[:, :, 0:-2, :] - log_diff[:, :, 2:, :])'], {}), '(log_diff[:, :, 0:-2, :] - log_diff[:, :, 2:, :])\n', (25139, 25188), False, 'import torch\n'), ((25200, 25249), 'torch.mul', 'torch.mul', (['mask[:, :, 0:-2, :]', 'mask[:, :, 2:, :]'], {}), '(mask[:, :, 0:-2, :], mask[:, :, 2:, :])\n', (25209, 25249), False, 'import torch\n'), ((25265, 25294), 'torch.mul', 'torch.mul', (['v_gradient', 'v_mask'], {}), '(v_gradient, v_mask)\n', (25274, 25294), False, 'import torch\n'), ((25317, 25375), 'torch.abs', 'torch.abs', (['(log_diff[:, :, :, 0:-2] - log_diff[:, :, :, 2:])'], {}), '(log_diff[:, :, :, 0:-2] - log_diff[:, :, :, 2:])\n', (25326, 25375), False, 'import torch\n'), ((25387, 25436), 'torch.mul', 'torch.mul', (['mask[:, :, :, 0:-2]', 'mask[:, :, :, 2:]'], {}), '(mask[:, :, :, 0:-2], mask[:, :, :, 2:])\n', (25396, 25436), False, 'import torch\n'), ((25452, 25481), 'torch.mul', 'torch.mul', (['h_gradient', 'h_mask'], {}), '(h_gradient, h_mask)\n', (25461, 25481), False, 'import torch\n'), ((25696, 25711), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (25705, 25711), False, 'import torch\n'), ((25883, 25898), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (25892, 25898), False, 'import torch\n'), ((26092, 26107), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (26101, 26107), False, 'import torch\n'), ((26127, 26153), 'torch.abs', 'torch.abs', (['(prediction - gt)'], {}), '(prediction - gt)\n', (26136, 26153), False, 'import torch\n'), ((26172, 26201), 'torch.pow', 'torch.pow', (['(prediction - gt)', '(2)'], {}), '(prediction - gt, 2)\n', (26181, 26201), False, 'import torch\n'), ((39671, 39709), 'torch.gels', 'torch.gels', (['gt_vec.data', 'pred_vec.data'], {}), '(gt_vec.data, pred_vec.data)\n', (39681, 39709), False, 'import torch\n'), ((41204, 41242), 'torch.gels', 'torch.gels', (['gt_vec.data', 'pred_vec.data'], {}), '(gt_vec.data, pred_vec.data)\n', (41214, 41242), False, 'import torch\n'), ((41591, 41614), 'torch.mul', 'torch.mul', (['weight', 'mask'], {}), '(weight, mask)\n', (41600, 41614), False, 'import torch\n'), ((58539, 58575), 'torch.div', 'torch.div', (['prediction_n', 'normal_norm'], {}), '(prediction_n, normal_norm)\n', (58548, 58575), False, 'import torch\n'), ((61539, 61604), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)\n', (61548, 61604), True, 'import torch.nn as nn\n'), ((61653, 61677), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (61665, 61677), True, 'import torch.nn as nn\n'), ((61748, 61762), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (61755, 61762), True, 'import torch.nn as nn\n'), ((66388, 66453), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)\n', (66397, 66453), True, 'import torch.nn as nn\n'), ((66502, 66526), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (66514, 66526), True, 'import torch.nn as nn\n'), ((66597, 66611), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (66604, 66611), True, 'import torch.nn as nn\n'), ((71021, 71044), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (71033, 71044), True, 'import torch.nn as nn\n'), ((71115, 71128), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (71122, 71128), True, 'import torch.nn as nn\n'), ((74898, 74921), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (74910, 74921), True, 'import torch.nn as nn\n'), ((74992, 75005), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (74999, 75005), True, 'import torch.nn as nn\n'), ((79461, 79526), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)\n', (79470, 79526), True, 'import torch.nn as nn\n'), ((79575, 79599), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (79587, 79599), True, 'import torch.nn as nn\n'), ((79670, 79684), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (79677, 79684), True, 'import torch.nn as nn\n'), ((84332, 84352), 'torch.nn.Sequential', 'nn.Sequential', (['*down'], {}), '(*down)\n', (84345, 84352), True, 'import torch.nn as nn\n'), ((84418, 84448), 'torch.nn.Sequential', 'nn.Sequential', (['*upconv_model_1'], {}), '(*upconv_model_1)\n', (84431, 84448), True, 'import torch.nn as nn\n'), ((84479, 84509), 'torch.nn.Sequential', 'nn.Sequential', (['*upconv_model_2'], {}), '(*upconv_model_2)\n', (84492, 84509), True, 'import torch.nn as nn\n'), ((6018, 6043), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (6040, 6043), False, 'import torch\n'), ((9229, 9254), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (9251, 9254), False, 'import torch\n'), ((9320, 9345), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (9342, 9345), False, 'import torch\n'), ((14828, 14878), 'torch.autograd.Variable', 'Variable', (['judgements_eq[:, 4]'], {'requires_grad': '(False)'}), '(judgements_eq[:, 4], requires_grad=False)\n', (14836, 14878), False, 'from torch.autograd import Variable\n'), ((15417, 15444), 'torch.mean', 'torch.mean', (['prediction_R', '(0)'], {}), '(prediction_R, 0)\n', (15427, 15444), False, 'import torch\n'), ((16859, 16911), 'torch.autograd.Variable', 'Variable', (['judgements_ineq[:, 4]'], {'requires_grad': '(False)'}), '(judgements_ineq[:, 4], requires_grad=False)\n', (16867, 16911), False, 'from torch.autograd import Variable\n'), ((17030, 17043), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (17037, 17043), True, 'import torch.nn as nn\n'), ((17821, 17842), 'torch.pow', 'torch.pow', (['(S - 0.5)', '(2)'], {}), '(S - 0.5, 2)\n', (17830, 17842), False, 'import torch\n'), ((18708, 18745), 'torch.abs', 'torch.abs', (['(h_gradient - h_gradient_gt)'], {}), '(h_gradient - h_gradient_gt)\n', (18717, 18745), False, 'import torch\n'), ((19021, 19058), 'torch.abs', 'torch.abs', (['(v_gradient - v_gradient_gt)'], {}), '(v_gradient - v_gradient_gt)\n', (19030, 19058), False, 'import torch\n'), ((19086, 19112), 'torch.sum', 'torch.sum', (['h_gradient_loss'], {}), '(h_gradient_loss)\n', (19095, 19112), False, 'import torch\n'), ((19115, 19141), 'torch.sum', 'torch.sum', (['v_gradient_loss'], {}), '(v_gradient_loss)\n', (19124, 19141), False, 'import torch\n'), ((20663, 20688), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (20685, 20688), False, 'import torch\n'), ((22545, 22570), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (22567, 22570), False, 'import torch\n'), ((24804, 24825), 'torch.sum', 'torch.sum', (['h_gradient'], {}), '(h_gradient)\n', (24813, 24825), False, 'import torch\n'), ((24828, 24849), 'torch.sum', 'torch.sum', (['v_gradient'], {}), '(v_gradient)\n', (24837, 24849), False, 'import torch\n'), ((25745, 25773), 'torch.abs', 'torch.abs', (['(prediction_n - gt)'], {}), '(prediction_n - gt)\n', (25754, 25773), False, 'import torch\n'), ((25790, 25805), 'torch.sum', 'torch.sum', (['diff'], {}), '(diff)\n', (25799, 25805), False, 'import torch\n'), ((25933, 25964), 'torch.pow', 'torch.pow', (['(prediction_n - gt)', '(2)'], {}), '(prediction_n - gt, 2)\n', (25942, 25964), False, 'import torch\n'), ((25980, 25995), 'torch.sum', 'torch.sum', (['diff'], {}), '(diff)\n', (25989, 25995), False, 'import torch\n'), ((34367, 34392), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (34389, 34392), False, 'import torch\n'), ((40932, 40952), 'torch.sum', 'torch.sum', (['mask.data'], {}), '(mask.data)\n', (40941, 40952), False, 'import torch\n'), ((44813, 44840), 'torch.mean', 'torch.mean', (['prediction_R', '(0)'], {}), '(prediction_R, 0)\n', (44823, 44840), False, 'import torch\n'), ((46055, 46068), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (46062, 46068), True, 'import torch.nn as nn\n'), ((57794, 57865), 'skimage.transform.resize', 'resize', (['prediction_R_np', '(o_h[i], o_w[i])'], {'order': '(1)', 'preserve_range': '(True)'}), '(prediction_R_np, (o_h[i], o_w[i]), order=1, preserve_range=True)\n', (57800, 57865), False, 'from skimage.transform import resize\n'), ((60961, 61019), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (60986, 61019), True, 'import torch.nn as nn\n'), ((61888, 61902), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (61895, 61902), True, 'import torch.nn as nn\n'), ((61925, 61939), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (61932, 61939), True, 'import torch.nn as nn\n'), ((61963, 62041), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (61981, 62041), True, 'import torch.nn as nn\n'), ((62145, 62223), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (62163, 62223), True, 'import torch.nn as nn\n'), ((62326, 62391), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', 'inner_nc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(inner_nc, inner_nc, kernel_size=3, stride=1, padding=1)\n', (62335, 62391), True, 'import torch.nn as nn\n'), ((62454, 62519), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', 'inner_nc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(inner_nc, inner_nc, kernel_size=3, stride=1, padding=1)\n', (62463, 62519), True, 'import torch.nn as nn\n'), ((62700, 62769), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', 'n_output_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(inner_nc, n_output_dim, kernel_size=3, stride=1, padding=1)\n', (62709, 62769), True, 'import torch.nn as nn\n'), ((63231, 63251), 'torch.nn.Sequential', 'nn.Sequential', (['*down'], {}), '(*down)\n', (63244, 63251), True, 'import torch.nn as nn\n'), ((63286, 63306), 'torch.nn.Sequential', 'nn.Sequential', (['*up_1'], {}), '(*up_1)\n', (63299, 63306), True, 'import torch.nn as nn\n'), ((63341, 63361), 'torch.nn.Sequential', 'nn.Sequential', (['*up_2'], {}), '(*up_2)\n', (63354, 63361), True, 'import torch.nn as nn\n'), ((65915, 65973), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (65940, 65973), True, 'import torch.nn as nn\n'), ((66707, 66778), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', '(1)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, 1, kernel_size=4, stride=2, padding=1)\n', (66725, 66778), True, 'import torch.nn as nn\n'), ((66989, 67010), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (67002, 67010), True, 'import torch.nn as nn\n'), ((67039, 67057), 'torch.nn.Sequential', 'nn.Sequential', (['*up'], {}), '(*up)\n', (67052, 67057), True, 'import torch.nn as nn\n'), ((70318, 70376), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (70343, 70376), True, 'import torch.nn as nn\n'), ((70781, 70839), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(3, inner_nc, kernel_size=4, stride=2, padding=1)\n', (70790, 70839), True, 'import torch.nn as nn\n'), ((70906, 70971), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)\n', (70915, 70971), True, 'import torch.nn as nn\n'), ((71806, 71827), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (71819, 71827), True, 'import torch.nn as nn\n'), ((74158, 74216), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (74183, 74216), True, 'import torch.nn as nn\n'), ((74658, 74716), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(3, inner_nc, kernel_size=4, stride=2, padding=1)\n', (74667, 74716), True, 'import torch.nn as nn\n'), ((74783, 74848), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)\n', (74792, 74848), True, 'import torch.nn as nn\n'), ((75688, 75709), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (75701, 75709), True, 'import torch.nn as nn\n'), ((77518, 77538), 'torch.cat', 'torch.cat', (['[y, x]', '(1)'], {}), '([y, x], 1)\n', (77527, 77538), False, 'import torch\n'), ((78769, 78827), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (78794, 78827), True, 'import torch.nn as nn\n'), ((6841, 6856), 'torch.mul', 'torch.mul', (['p', 'p'], {}), '(p, p)\n', (6850, 6856), False, 'import torch\n'), ((7558, 7580), 'torch.mul', 'torch.mul', (['Snp', 'Snp_12'], {}), '(Snp, Snp_12)\n', (7567, 7580), False, 'import torch\n'), ((14499, 14548), 'torch.autograd.Variable', 'Variable', (['point_1_idx_linaer'], {'requires_grad': '(False)'}), '(point_1_idx_linaer, requires_grad=False)\n', (14507, 14548), False, 'from torch.autograd import Variable\n'), ((14608, 14657), 'torch.autograd.Variable', 'Variable', (['point_2_idx_linear'], {'requires_grad': '(False)'}), '(point_2_idx_linear, requires_grad=False)\n', (14616, 14657), False, 'from torch.autograd import Variable\n'), ((18317, 18338), 'torch.sum', 'torch.sum', (['angle_loss'], {}), '(angle_loss)\n', (18326, 18338), False, 'import torch\n'), ((19457, 19522), 'torch.mul', 'torch.mul', (['prediction_n[:, :, :, 0:-2]', 'prediction_n[:, :, :, 2:]'], {}), '(prediction_n[:, :, :, 0:-2], prediction_n[:, :, :, 2:])\n', (19466, 19522), False, 'import torch\n'), ((19552, 19573), 'torch.sum', 'torch.sum', (['h_gradient'], {}), '(h_gradient)\n', (19561, 19573), False, 'import torch\n'), ((19725, 19790), 'torch.mul', 'torch.mul', (['prediction_n[:, :, 0:-2, :]', 'prediction_n[:, :, 2:, :]'], {}), '(prediction_n[:, :, 0:-2, :], prediction_n[:, :, 2:, :])\n', (19734, 19790), False, 'import torch\n'), ((19820, 19841), 'torch.sum', 'torch.sum', (['v_gradient'], {}), '(v_gradient)\n', (19829, 19841), False, 'import torch\n'), ((20392, 20430), 'torch.pow', 'torch.pow', (['(uncertainty - angle_diff)', '(2)'], {}), '(uncertainty - angle_diff, 2)\n', (20401, 20430), False, 'import torch\n'), ((22023, 22053), 'torch.mul', 'torch.mul', (['mask_N', 'mask_center'], {}), '(mask_N, mask_center)\n', (22032, 22053), False, 'import torch\n'), ((24081, 24103), 'torch.pow', 'torch.pow', (['log_diff', '(2)'], {}), '(log_diff, 2)\n', (24090, 24103), False, 'import torch\n'), ((24131, 24150), 'torch.sum', 'torch.sum', (['log_diff'], {}), '(log_diff)\n', (24140, 24150), False, 'import torch\n'), ((25508, 25529), 'torch.sum', 'torch.sum', (['h_gradient'], {}), '(h_gradient)\n', (25517, 25529), False, 'import torch\n'), ((25532, 25553), 'torch.sum', 'torch.sum', (['v_gradient'], {}), '(v_gradient)\n', (25541, 25553), False, 'import torch\n'), ((34735, 34764), 'torch.mul', 'torch.mul', (['new_mask', 'log_diff'], {}), '(new_mask, log_diff)\n', (34744, 34764), False, 'import torch\n'), ((34785, 34804), 'torch.sum', 'torch.sum', (['new_mask'], {}), '(new_mask)\n', (34794, 34804), False, 'import torch\n'), ((36890, 36909), 'torch.log', 'torch.log', (['linear_I'], {}), '(linear_I)\n', (36899, 36909), False, 'import torch\n'), ((43907, 43956), 'torch.autograd.Variable', 'Variable', (['point_1_idx_linear'], {'requires_grad': '(False)'}), '(point_1_idx_linear, requires_grad=False)\n', (43915, 43956), False, 'from torch.autograd import Variable\n'), ((44016, 44065), 'torch.autograd.Variable', 'Variable', (['point_2_idx_linear'], {'requires_grad': '(False)'}), '(point_2_idx_linear, requires_grad=False)\n', (44024, 44065), False, 'from torch.autograd import Variable\n'), ((48232, 48257), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (48254, 48257), False, 'import torch\n'), ((57281, 57313), 'numpy.exp', 'np.exp', (['(prediction_R_np * 0.4545)'], {}), '(prediction_R_np * 0.4545)\n', (57287, 57313), True, 'import numpy as np\n'), ((58418, 58444), 'torch.pow', 'torch.pow', (['prediction_n', '(2)'], {}), '(prediction_n, 2)\n', (58427, 58444), False, 'import torch\n'), ((63047, 63061), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (63054, 63061), True, 'import torch.nn as nn\n'), ((63071, 63085), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (63078, 63085), True, 'import torch.nn as nn\n'), ((63146, 63160), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (63153, 63160), True, 'import torch.nn as nn\n'), ((63170, 63184), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (63177, 63184), True, 'import torch.nn as nn\n'), ((63447, 63521), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)\n', (63465, 63521), True, 'import torch.nn as nn\n'), ((63739, 63760), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (63752, 63760), True, 'import torch.nn as nn\n'), ((63797, 63875), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (63815, 63875), True, 'import torch.nn as nn\n'), ((64236, 64257), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (64249, 64257), True, 'import torch.nn as nn\n'), ((67104, 67178), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)\n', (67122, 67178), True, 'import torch.nn as nn\n'), ((67578, 67602), 'torch.nn.Sequential', 'nn.Sequential', (['*int_conv'], {}), '(*int_conv)\n', (67591, 67602), True, 'import torch.nn as nn\n'), ((67627, 67645), 'torch.nn.Sequential', 'nn.Sequential', (['*fc'], {}), '(*fc)\n', (67640, 67645), True, 'import torch.nn as nn\n'), ((67678, 67698), 'torch.nn.Sequential', 'nn.Sequential', (['*down'], {}), '(*down)\n', (67691, 67698), True, 'import torch.nn as nn\n'), ((67727, 67745), 'torch.nn.Sequential', 'nn.Sequential', (['*up'], {}), '(*up)\n', (67740, 67745), True, 'import torch.nn as nn\n'), ((67782, 67860), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (67800, 67860), True, 'import torch.nn as nn\n'), ((68360, 68381), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (68373, 68381), True, 'import torch.nn as nn\n'), ((68410, 68438), 'torch.nn.Sequential', 'nn.Sequential', (['*upconv_model'], {}), '(*upconv_model)\n', (68423, 68438), True, 'import torch.nn as nn\n'), ((68864, 68886), 'torch.cat', 'torch.cat', (['[y_1, x]', '(1)'], {}), '([y_1, x], 1)\n', (68873, 68886), False, 'import torch\n'), ((71453, 71531), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (71471, 71531), True, 'import torch.nn as nn\n'), ((71577, 71591), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (71584, 71591), True, 'import torch.nn as nn\n'), ((71634, 71680), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', 'num_output'], {'kernel_size': '(1)'}), '(inner_nc, num_output, kernel_size=1)\n', (71643, 71680), True, 'import torch.nn as nn\n'), ((71873, 71947), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)\n', (71891, 71947), True, 'import torch.nn as nn\n'), ((72165, 72186), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (72178, 72186), True, 'import torch.nn as nn\n'), ((72223, 72301), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (72241, 72301), True, 'import torch.nn as nn\n'), ((72662, 72683), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (72675, 72683), True, 'import torch.nn as nn\n'), ((75330, 75408), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (75348, 75408), True, 'import torch.nn as nn\n'), ((75454, 75468), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (75461, 75468), True, 'import torch.nn as nn\n'), ((75511, 75548), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', '(1)'], {'kernel_size': '(1)'}), '(inner_nc, 1, kernel_size=1)\n', (75520, 75548), True, 'import torch.nn as nn\n'), ((75550, 75562), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (75560, 75562), True, 'import torch.nn as nn\n'), ((75755, 75829), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)\n', (75773, 75829), True, 'import torch.nn as nn\n'), ((76057, 76078), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (76070, 76078), True, 'import torch.nn as nn\n'), ((76115, 76193), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (76133, 76193), True, 'import torch.nn as nn\n'), ((77073, 77094), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (77086, 77094), True, 'import torch.nn as nn\n'), ((77387, 77431), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(8)', 'mode': '"""bilinear"""'}), "(scale_factor=8, mode='bilinear')\n", (77398, 77431), True, 'import torch.nn as nn\n'), ((81068, 81082), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (81075, 81082), True, 'import torch.nn as nn\n'), ((81084, 81162), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (81102, 81162), True, 'import torch.nn as nn\n'), ((81239, 81253), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (81246, 81253), True, 'import torch.nn as nn\n'), ((81288, 81336), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', '(1)'], {'kernel_size': '(1)', 'bias': '(True)'}), '(inner_nc, 1, kernel_size=1, bias=True)\n', (81297, 81336), True, 'import torch.nn as nn\n'), ((81369, 81383), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (81376, 81383), True, 'import torch.nn as nn\n'), ((81385, 81463), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (81403, 81463), True, 'import torch.nn as nn\n'), ((81541, 81555), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (81548, 81555), True, 'import torch.nn as nn\n'), ((81590, 81638), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', '(1)'], {'kernel_size': '(1)', 'bias': '(True)'}), '(inner_nc, 1, kernel_size=1, bias=True)\n', (81599, 81638), True, 'import torch.nn as nn\n'), ((85072, 85094), 'torch.cat', 'torch.cat', (['[y_1, x]', '(1)'], {}), '([y_1, x], 1)\n', (85081, 85094), False, 'import torch\n'), ((85113, 85135), 'torch.cat', 'torch.cat', (['[y_2, x]', '(1)'], {}), '([y_2, x], 1)\n', (85122, 85135), False, 'import torch\n'), ((85381, 85403), 'torch.cat', 'torch.cat', (['[y_1, x]', '(1)'], {}), '([y_1, x], 1)\n', (85390, 85403), False, 'import torch\n'), ((85422, 85444), 'torch.cat', 'torch.cat', (['[y_2, x]', '(1)'], {}), '([y_2, x], 1)\n', (85431, 85444), False, 'import torch\n'), ((8623, 8640), 'torch.mul', 'torch.mul', (['p_R', 'S'], {}), '(p_R, S)\n', (8632, 8640), False, 'import torch\n'), ((13484, 13523), 'torch.floor', 'torch.floor', (['(judgements_eq[:, 0] * rows)'], {}), '(judgements_eq[:, 0] * rows)\n', (13495, 13523), False, 'import torch\n'), ((13548, 13587), 'torch.floor', 'torch.floor', (['(judgements_eq[:, 2] * rows)'], {}), '(judgements_eq[:, 2] * rows)\n', (13559, 13587), False, 'import torch\n'), ((15567, 15608), 'torch.floor', 'torch.floor', (['(judgements_ineq[:, 0] * rows)'], {}), '(judgements_ineq[:, 0] * rows)\n', (15578, 15608), False, 'import torch\n'), ((15633, 15674), 'torch.floor', 'torch.floor', (['(judgements_ineq[:, 2] * rows)'], {}), '(judgements_ineq[:, 2] * rows)\n', (15644, 15674), False, 'import torch\n'), ((18261, 18292), 'torch.mul', 'torch.mul', (['prediction_n', 'normal'], {}), '(prediction_n, normal)\n', (18270, 18292), False, 'import torch\n'), ((20280, 20311), 'torch.mul', 'torch.mul', (['prediction_n', 'normal'], {}), '(prediction_n, normal)\n', (20289, 20311), False, 'import torch\n'), ((22177, 22205), 'torch.pow', 'torch.pow', (['(R_center - R_N)', '(2)'], {}), '(R_center - R_N, 2)\n', (22186, 22205), False, 'import torch\n'), ((22253, 22271), 'torch.mean', 'torch.mean', (['r_diff'], {}), '(r_diff)\n', (22263, 22271), False, 'import torch\n'), ((23646, 23691), 'torch.autograd.Variable', 'Variable', (['albedo_weights'], {'requires_grad': '(False)'}), '(albedo_weights, requires_grad=False)\n', (23654, 23691), False, 'from torch.autograd import Variable\n'), ((23695, 23720), 'torch.abs', 'torch.abs', (['(R_center - R_N)'], {}), '(R_center - R_N)\n', (23704, 23720), False, 'import torch\n'), ((23786, 23804), 'torch.mean', 'torch.mean', (['r_diff'], {}), '(r_diff)\n', (23796, 23804), False, 'import torch\n'), ((26342, 26369), 'torch.mul', 'torch.mul', (['mask_L2', 'diff_L2'], {}), '(mask_L2, diff_L2)\n', (26351, 26369), False, 'import torch\n'), ((26416, 26443), 'torch.mul', 'torch.mul', (['mask_L1', 'diff_L1'], {}), '(mask_L1, diff_L1)\n', (26425, 26443), False, 'import torch\n'), ((44552, 44593), 'torch.pow', 'torch.pow', (['(points_1_vec - points_2_vec)', '(2)'], {}), '(points_1_vec - points_2_vec, 2)\n', (44561, 44593), False, 'import torch\n'), ((48057, 48080), 'torch.exp', 'torch.exp', (['prediction_R'], {}), '(prediction_R)\n', (48066, 48080), False, 'import torch\n'), ((48136, 48159), 'torch.exp', 'torch.exp', (['prediction_S'], {}), '(prediction_S)\n', (48145, 48159), False, 'import torch\n'), ((67398, 67426), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(2, 2)'], {}), '((2, 2))\n', (67418, 67426), True, 'import torch.nn as nn\n'), ((67427, 67496), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', '(inner_nc / 2)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, inner_nc / 2, kernel_size=3, stride=2, padding=1)\n', (67436, 67496), True, 'import torch.nn as nn\n'), ((67496, 67510), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (67503, 67510), True, 'import torch.nn as nn\n'), ((67531, 67548), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (67540, 67548), True, 'import torch.nn as nn\n'), ((69032, 69054), 'torch.cat', 'torch.cat', (['[y_1, x]', '(1)'], {}), '([y_1, x], 1)\n', (69041, 69054), False, 'import torch\n'), ((77020, 77046), 'torch.nn.Sequential', 'nn.Sequential', (['*grid_layer'], {}), '(*grid_layer)\n', (77033, 77046), True, 'import torch.nn as nn\n'), ((82037, 82051), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (82044, 82051), True, 'import torch.nn as nn\n'), ((82053, 82127), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)\n', (82071, 82127), True, 'import torch.nn as nn\n'), ((82274, 82288), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (82281, 82288), True, 'import torch.nn as nn\n'), ((82290, 82364), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)\n', (82308, 82364), True, 'import torch.nn as nn\n'), ((83496, 83510), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (83503, 83510), True, 'import torch.nn as nn\n'), ((83512, 83590), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (83530, 83590), True, 'import torch.nn as nn\n'), ((83727, 83741), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (83734, 83741), True, 'import torch.nn as nn\n'), ((83743, 83821), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (83761, 83821), True, 'import torch.nn as nn\n'), ((8122, 8137), 'torch.mul', 'torch.mul', (['R', 'S'], {}), '(R, S)\n', (8131, 8137), False, 'import torch\n'), ((11717, 11750), 'torch.pow', 'torch.pow', (['(tau - (l2_m - l1_m))', '(2)'], {}), '(tau - (l2_m - l1_m), 2)\n', (11726, 11750), False, 'import torch\n'), ((13821, 13860), 'torch.floor', 'torch.floor', (['(judgements_eq[:, 1] * cols)'], {}), '(judgements_eq[:, 1] * cols)\n', (13832, 13860), False, 'import torch\n'), ((13889, 13928), 'torch.floor', 'torch.floor', (['(judgements_eq[:, 3] * cols)'], {}), '(judgements_eq[:, 3] * cols)\n', (13900, 13928), False, 'import torch\n'), ((15158, 15199), 'torch.pow', 'torch.pow', (['(points_1_vec - points_2_vec)', '(2)'], {}), '(points_1_vec - points_2_vec, 2)\n', (15167, 15199), False, 'import torch\n'), ((16048, 16089), 'torch.floor', 'torch.floor', (['(judgements_ineq[:, 1] * cols)'], {}), '(judgements_ineq[:, 1] * cols)\n', (16059, 16089), False, 'import torch\n'), ((16118, 16159), 'torch.floor', 'torch.floor', (['(judgements_ineq[:, 3] * cols)'], {}), '(judgements_ineq[:, 3] * cols)\n', (16129, 16159), False, 'import torch\n'), ((16649, 16698), 'torch.autograd.Variable', 'Variable', (['point_1_idx_linaer'], {'requires_grad': '(False)'}), '(point_1_idx_linaer, requires_grad=False)\n', (16657, 16698), False, 'from torch.autograd import Variable\n'), ((16774, 16823), 'torch.autograd.Variable', 'Variable', (['point_2_idx_linear'], {'requires_grad': '(False)'}), '(point_2_idx_linear, requires_grad=False)\n', (16782, 16823), False, 'from torch.autograd import Variable\n'), ((26221, 26243), 'torch.le', 'torch.le', (['diff_L1', 'tau'], {}), '(diff_L1, tau)\n', (26229, 26243), False, 'import torch\n'), ((34838, 34867), 'torch.pow', 'torch.pow', (['masked_log_diff', '(2)'], {}), '(masked_log_diff, 2)\n', (34847, 34867), False, 'import torch\n'), ((34903, 34929), 'torch.sum', 'torch.sum', (['masked_log_diff'], {}), '(masked_log_diff)\n', (34912, 34929), False, 'import torch\n'), ((45749, 45798), 'torch.autograd.Variable', 'Variable', (['point_1_idx_linear'], {'requires_grad': '(False)'}), '(point_1_idx_linear, requires_grad=False)\n', (45757, 45798), False, 'from torch.autograd import Variable\n'), ((45874, 45923), 'torch.autograd.Variable', 'Variable', (['point_2_idx_linear'], {'requires_grad': '(False)'}), '(point_2_idx_linear, requires_grad=False)\n', (45882, 45923), False, 'from torch.autograd import Variable\n'), ((49815, 49838), 'torch.exp', 'torch.exp', (['prediction_R'], {}), '(prediction_R)\n', (49824, 49838), False, 'import torch\n'), ((50179, 50202), 'torch.exp', 'torch.exp', (['prediction_S'], {}), '(prediction_S)\n', (50188, 50202), False, 'import torch\n'), ((50416, 50439), 'torch.exp', 'torch.exp', (['prediction_R'], {}), '(prediction_R)\n', (50425, 50439), False, 'import torch\n'), ((50441, 50464), 'torch.exp', 'torch.exp', (['prediction_S'], {}), '(prediction_S)\n', (50450, 50464), False, 'import torch\n'), ((54576, 54586), 'sys.exit', 'sys.exit', ([], {}), '()\n', (54584, 54586), False, 'import sys\n'), ((76586, 76664), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, inner_nc, kernel_size=4, stride=2, padding=1)\n', (76604, 76664), True, 'import torch.nn as nn\n'), ((76781, 76795), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (76788, 76795), True, 'import torch.nn as nn\n'), ((76829, 76888), 'torch.nn.Conv2d', 'nn.Conv2d', (['inner_nc', '(inner_nc / 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(inner_nc, inner_nc / 4, kernel_size=3, padding=1)\n', (76838, 76888), True, 'import torch.nn as nn\n'), ((76888, 76902), 'torch.nn.ReLU', 'nn.ReLU', (['(False)'], {}), '(False)\n', (76895, 76902), True, 'import torch.nn as nn\n'), ((76936, 76986), 'torch.nn.Conv2d', 'nn.Conv2d', (['(inner_nc / 4)', 'num_output'], {'kernel_size': '(1)'}), '(inner_nc / 4, num_output, kernel_size=1)\n', (76945, 76986), True, 'import torch.nn as nn\n'), ((12122, 12155), 'torch.pow', 'torch.pow', (['(tau - (l1_m - l2_m))', '(2)'], {}), '(tau - (l1_m - l2_m), 2)\n', (12131, 12155), False, 'import torch\n'), ((13656, 13695), 'torch.floor', 'torch.floor', (['(judgements_eq[:, 1] * cols)'], {}), '(judgements_eq[:, 1] * cols)\n', (13667, 13695), False, 'import torch\n'), ((13735, 13774), 'torch.floor', 'torch.floor', (['(judgements_eq[:, 3] * cols)'], {}), '(judgements_eq[:, 3] * cols)\n', (13746, 13774), False, 'import torch\n'), ((15879, 15920), 'torch.floor', 'torch.floor', (['(judgements_ineq[:, 1] * cols)'], {}), '(judgements_ineq[:, 1] * cols)\n', (15890, 15920), False, 'import torch\n'), ((15960, 16001), 'torch.floor', 'torch.floor', (['(judgements_ineq[:, 3] * cols)'], {}), '(judgements_ineq[:, 3] * cols)\n', (15971, 16001), False, 'import torch\n'), ((51475, 51498), 'torch.exp', 'torch.exp', (['prediction_R'], {}), '(prediction_R)\n', (51484, 51498), False, 'import torch\n'), ((51826, 51849), 'torch.exp', 'torch.exp', (['prediction_S'], {}), '(prediction_S)\n', (51835, 51849), False, 'import torch\n'), ((52063, 52086), 'torch.exp', 'torch.exp', (['prediction_R'], {}), '(prediction_R)\n', (52072, 52086), False, 'import torch\n'), ((52088, 52111), 'torch.exp', 'torch.exp', (['prediction_S'], {}), '(prediction_S)\n', (52097, 52111), False, 'import torch\n'), ((64128, 64143), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (64138, 64143), True, 'import torch.nn as nn\n'), ((68265, 68280), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (68275, 68280), True, 'import torch.nn as nn\n'), ((72554, 72569), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (72564, 72569), True, 'import torch.nn as nn\n'), ((76446, 76461), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (76456, 76461), True, 'import torch.nn as nn\n'), ((84008, 84023), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (84018, 84023), True, 'import torch.nn as nn\n'), ((84066, 84081), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (84076, 84081), True, 'import torch.nn as nn\n'), ((12380, 12401), 'torch.pow', 'torch.pow', (['(l2 - l1)', '(2)'], {}), '(l2 - l1, 2)\n', (12389, 12401), False, 'import torch\n'), ((17417, 17470), 'torch.ge', 'torch.ge', (['(points_2_vec.data - points_1_vec.data)', '(-tau)'], {}), '(points_2_vec.data - points_1_vec.data, -tau)\n', (17425, 17470), False, 'import torch\n'), ((46420, 46473), 'torch.ge', 'torch.ge', (['(points_2_vec.data - points_1_vec.data)', '(-tau)'], {}), '(points_2_vec.data - points_1_vec.data, -tau)\n', (46428, 46473), False, 'import torch\n'), ((53944, 53967), 'torch.exp', 'torch.exp', (['prediction_R'], {}), '(prediction_R)\n', (53953, 53967), False, 'import torch\n'), ((54023, 54046), 'torch.exp', 'torch.exp', (['prediction_S'], {}), '(prediction_S)\n', (54032, 54046), False, 'import torch\n')]
import gym import numpy as np import tensorflow as tf from tensorflow import saved_model as sm from easy_rl.utils.window_stat import WindowStat from easy_rl.utils.gym_wrapper.atari_wrapper import make_atari, wrap_deepmind import time def main(): gym_env = gym.make("CartPole-v0") atari_env = make_atari("PongNoFrameskip-v4") atari_env = wrap_deepmind( env=atari_env, frame_stack=True, clip_rewards=False, episode_life=True, wrap_frame=True, frame_resize=42) # replace the following env according to your saved_model # env = atari_env env = gym_env with tf.Session() as sess: path = 'dump_dir' MetaGraphDef = tf.saved_model.loader.load( sess, tags=[sm.tag_constants.SERVING], export_dir=path) # get SignatureDef protobuf SignatureDef_d = MetaGraphDef.signature_def SignatureDef = SignatureDef_d["predict_results"] # get inputs/outputs TensorInfo protobuf ph_inputs = {} for name, ts_info in SignatureDef.inputs.items(): ph_inputs[name] = sm.utils.get_tensor_from_tensor_info( ts_info, sess.graph) outputs = {} for name, ts_info in SignatureDef.outputs.items(): outputs[name] = sm.utils.get_tensor_from_tensor_info( ts_info, sess.graph) for name, ph in ph_inputs.items(): print(name, ph) for name, ts in outputs.items(): print(name, ts) len_window = WindowStat("length", 50) reward_window = WindowStat("reward", 50) for i in range(100): ob = env.reset() env.render() time.sleep(0.2) done = False episode_len = 0 episode_reward = .0 while not done: action = sess.run( outputs["output_actions"], feed_dict={ ph_inputs["obs_ph"]: [np.asarray(ob)], ph_inputs["deterministic_ph"]: True }) next_ob, reward, done, info = env.step(action[0]) env.render() time.sleep(0.1) episode_reward += reward episode_len += 1 ob = next_ob len_window.push(episode_len) reward_window.push(episode_reward) print(reward_window) print(len_window) if __name__ == '__main__': main()
[ "tensorflow.Session", "numpy.asarray", "easy_rl.utils.gym_wrapper.atari_wrapper.wrap_deepmind", "tensorflow.saved_model.loader.load", "tensorflow.saved_model.utils.get_tensor_from_tensor_info", "time.sleep", "easy_rl.utils.gym_wrapper.atari_wrapper.make_atari", "easy_rl.utils.window_stat.WindowStat", ...
[((262, 285), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (270, 285), False, 'import gym\n'), ((303, 335), 'easy_rl.utils.gym_wrapper.atari_wrapper.make_atari', 'make_atari', (['"""PongNoFrameskip-v4"""'], {}), "('PongNoFrameskip-v4')\n", (313, 335), False, 'from easy_rl.utils.gym_wrapper.atari_wrapper import make_atari, wrap_deepmind\n'), ((352, 475), 'easy_rl.utils.gym_wrapper.atari_wrapper.wrap_deepmind', 'wrap_deepmind', ([], {'env': 'atari_env', 'frame_stack': '(True)', 'clip_rewards': '(False)', 'episode_life': '(True)', 'wrap_frame': '(True)', 'frame_resize': '(42)'}), '(env=atari_env, frame_stack=True, clip_rewards=False,\n episode_life=True, wrap_frame=True, frame_resize=42)\n', (365, 475), False, 'from easy_rl.utils.gym_wrapper.atari_wrapper import make_atari, wrap_deepmind\n'), ((634, 646), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (644, 646), True, 'import tensorflow as tf\n'), ((705, 791), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['sess'], {'tags': '[sm.tag_constants.SERVING]', 'export_dir': 'path'}), '(sess, tags=[sm.tag_constants.SERVING],\n export_dir=path)\n', (731, 791), True, 'import tensorflow as tf\n'), ((1531, 1555), 'easy_rl.utils.window_stat.WindowStat', 'WindowStat', (['"""length"""', '(50)'], {}), "('length', 50)\n", (1541, 1555), False, 'from easy_rl.utils.window_stat import WindowStat\n'), ((1580, 1604), 'easy_rl.utils.window_stat.WindowStat', 'WindowStat', (['"""reward"""', '(50)'], {}), "('reward', 50)\n", (1590, 1604), False, 'from easy_rl.utils.window_stat import WindowStat\n'), ((1108, 1165), 'tensorflow.saved_model.utils.get_tensor_from_tensor_info', 'sm.utils.get_tensor_from_tensor_info', (['ts_info', 'sess.graph'], {}), '(ts_info, sess.graph)\n', (1144, 1165), True, 'from tensorflow import saved_model as sm\n'), ((1292, 1349), 'tensorflow.saved_model.utils.get_tensor_from_tensor_info', 'sm.utils.get_tensor_from_tensor_info', (['ts_info', 'sess.graph'], {}), '(ts_info, sess.graph)\n', (1328, 1349), True, 'from tensorflow import saved_model as sm\n'), ((1700, 1715), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1710, 1715), False, 'import time\n'), ((2201, 2216), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2211, 2216), False, 'import time\n'), ((1990, 2004), 'numpy.asarray', 'np.asarray', (['ob'], {}), '(ob)\n', (2000, 2004), True, 'import numpy as np\n')]
""" Optimal binning algorithm for continuous target. """ # <NAME> <<EMAIL>> # Copyright (C) 2019 import numbers import time from sklearn.utils import check_array import numpy as np from ..information import solver_statistics from ..logging import Logger from .auto_monotonic import auto_monotonic_continuous from .auto_monotonic import peak_valley_trend_change_heuristic from .binning import OptimalBinning from .binning_statistics import continuous_bin_info from .binning_statistics import ContinuousBinningTable from .binning_statistics import target_info_special_continuous from .continuous_cp import ContinuousBinningCP from .preprocessing import preprocessing_user_splits_categorical from .preprocessing import split_data from .transformations import transform_continuous_target logger = Logger(__name__).logger def _check_parameters(name, dtype, prebinning_method, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_bin_size, monotonic_trend, min_mean_diff, max_pvalue, max_pvalue_policy, outlier_detector, outlier_params, cat_cutoff, user_splits, user_splits_fixed, special_codes, split_digits, time_limit, verbose): if not isinstance(name, str): raise TypeError("name must be a string.") if dtype not in ("categorical", "numerical"): raise ValueError('Invalid value for dtype. Allowed string ' 'values are "categorical" and "numerical".') if prebinning_method not in ("cart", "quantile", "uniform"): raise ValueError('Invalid value for prebinning_method. Allowed string ' 'values are "cart", "quantile" and "uniform".') if not isinstance(max_n_prebins, numbers.Integral) or max_n_prebins <= 1: raise ValueError("max_prebins must be an integer greater than 1; " "got {}.".format(max_n_prebins)) if not 0. < min_prebin_size <= 0.5: raise ValueError("min_prebin_size must be in (0, 0.5]; got {}." .format(min_prebin_size)) if min_n_bins is not None: if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0: raise ValueError("min_n_bins must be a positive integer; got {}." .format(min_n_bins)) if max_n_bins is not None: if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0: raise ValueError("max_n_bins must be a positive integer; got {}." .format(max_n_bins)) if min_n_bins is not None and max_n_bins is not None: if min_n_bins > max_n_bins: raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}." .format(min_n_bins, max_n_bins)) if min_bin_size is not None: if (not isinstance(min_bin_size, numbers.Number) or not 0. < min_bin_size <= 0.5): raise ValueError("min_bin_size must be in (0, 0.5]; got {}." .format(min_bin_size)) if max_bin_size is not None: if (not isinstance(max_bin_size, numbers.Number) or not 0. < max_bin_size <= 1.0): raise ValueError("max_bin_size must be in (0, 1.0]; got {}." .format(max_bin_size)) if min_bin_size is not None and max_bin_size is not None: if min_bin_size > max_bin_size: raise ValueError("min_bin_size must be <= max_bin_size; " "got {} <= {}.".format(min_bin_size, max_bin_size)) if monotonic_trend is not None: if monotonic_trend not in ("auto", "auto_heuristic", "auto_asc_desc", "ascending", "descending", "convex", "concave", "peak", "valley", "peak_heuristic", "valley_heuristic"): raise ValueError('Invalid value for monotonic trend. Allowed ' 'string values are "auto", "auto_heuristic", ' '"auto_asc_desc", "ascending", "descending", ' '"concave", "convex", "peak", "valley", ' '"peak_heuristic" and "valley_heuristic".') if (not isinstance(min_mean_diff, numbers.Number) or min_mean_diff < 0): raise ValueError("min_mean_diff must be >= 0; got {}." .format(min_mean_diff)) if max_pvalue is not None: if (not isinstance(max_pvalue, numbers.Number) or not 0. < max_pvalue <= 1.0): raise ValueError("max_pvalue must be in (0, 1.0]; got {}." .format(max_pvalue)) if max_pvalue_policy not in ("all", "consecutive"): raise ValueError('Invalid value for max_pvalue_policy. Allowed string ' 'values are "all" and "consecutive".') if outlier_detector is not None: if outlier_detector not in ("range", "zscore"): raise ValueError('Invalid value for outlier_detector. Allowed ' 'string values are "range" and "zscore".') if outlier_params is not None: if not isinstance(outlier_params, dict): raise TypeError("outlier_params must be a dict or None; " "got {}.".format(outlier_params)) if cat_cutoff is not None: if (not isinstance(cat_cutoff, numbers.Number) or not 0. < cat_cutoff <= 1.0): raise ValueError("cat_cutoff must be in (0, 1.0]; got {}." .format(cat_cutoff)) if user_splits is not None: if not isinstance(user_splits, (np.ndarray, list)): raise TypeError("user_splits must be a list or numpy.ndarray.") if user_splits_fixed is not None: if user_splits is None: raise ValueError("user_splits must be provided.") else: if not isinstance(user_splits_fixed, (np.ndarray, list)): raise TypeError("user_splits_fixed must be a list or " "numpy.ndarray.") elif not all(isinstance(s, bool) for s in user_splits_fixed): raise ValueError("user_splits_fixed must be list of boolean.") elif len(user_splits) != len(user_splits_fixed): raise ValueError("Inconsistent length of user_splits and " "user_splits_fixed: {} != {}. Lengths must " "be equal".format(len(user_splits), len(user_splits_fixed))) if special_codes is not None: if not isinstance(special_codes, (np.ndarray, list, dict)): raise TypeError("special_codes must be a dit, list or " "numpy.ndarray.") if isinstance(special_codes, dict) and not len(special_codes): raise ValueError("special_codes empty. special_codes dict must " "contain at least one special.") if split_digits is not None: if (not isinstance(split_digits, numbers.Integral) or not 0 <= split_digits <= 8): raise ValueError("split_digist must be an integer in [0, 8]; " "got {}.".format(split_digits)) if not isinstance(time_limit, numbers.Number) or time_limit < 0: raise ValueError("time_limit must be a positive value in seconds; " "got {}.".format(time_limit)) if not isinstance(verbose, bool): raise TypeError("verbose must be a boolean; got {}.".format(verbose)) class ContinuousOptimalBinning(OptimalBinning): """Optimal binning of a numerical or categorical variable with respect to a continuous target. Parameters ---------- name : str, optional (default="") The variable name. dtype : str, optional (default="numerical") The variable data type. Supported data types are "numerical" for continuous and ordinal variables and "categorical" for categorical and nominal variables. prebinning_method : str, optional (default="cart") The pre-binning method. Supported methods are "cart" for a CART decision tree, "quantile" to generate prebins with approximately same frequency and "uniform" to generate prebins with equal width. Method "cart" uses `sklearn.tree.DecisionTreeRegressor <https://scikit-learn.org/stable/modules/generated/sklearn.tree. DecisionTreeRegressor.html>`_. max_n_prebins : int (default=20) The maximum number of bins after pre-binning (prebins). min_prebin_size : float (default=0.05) The fraction of mininum number of records for each prebin. min_n_bins : int or None, optional (default=None) The minimum number of bins. If None, then ``min_n_bins`` is a value in ``[0, max_n_prebins]``. max_n_bins : int or None, optional (default=None) The maximum number of bins. If None, then ``max_n_bins`` is a value in ``[0, max_n_prebins]``. min_bin_size : float or None, optional (default=None) The fraction of minimum number of records for each bin. If None, ``min_bin_size = min_prebin_size``. max_bin_size : float or None, optional (default=None) The fraction of maximum number of records for each bin. If None, ``max_bin_size = 1.0``. monotonic_trend : str or None, optional (default="auto") The **mean** monotonic trend. Supported trends are “auto”, "auto_heuristic" and "auto_asc_desc" to automatically determine the trend minimize the L1-norm using a machine learning classifier, "ascending", "descending", "concave", "convex", "peak" and "peak_heuristic" to allow a peak change point, and "valley" and "valley_heuristic" to allow a valley change point. Trends "auto_heuristic", "peak_heuristic" and "valley_heuristic" use a heuristic to determine the change point, and are significantly faster for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc" is used to automatically select the best monotonic trend between "ascending" and "descending". If None, then the monotonic constraint is disabled. min_mean_diff : float, optional (default=0) The minimum mean difference between consecutives bins. This option currently only applies when ``monotonic_trend`` is "ascending" or "descending". max_pvalue : float or None, optional (default=0.05) The maximum p-value among bins. The T-test is used to detect bins not satisfying the p-value constraint. max_pvalue_policy : str, optional (default="consecutive") The method to determine bins not satisfying the p-value constraint. Supported methods are "consecutive" to compare consecutive bins and "all" to compare all bins. outlier_detector : str or None, optional (default=None) The outlier detection method. Supported methods are "range" to use the interquartile range based method or "zcore" to use the modified Z-score method. outlier_params : dict or None, optional (default=None) Dictionary of parameters to pass to the outlier detection method. cat_cutoff : float or None, optional (default=None) Generate bin others with categories in which the fraction of occurrences is below the ``cat_cutoff`` value. This option is available when ``dtype`` is "categorical". user_splits : array-like or None, optional (default=None) The list of pre-binning split points when ``dtype`` is "numerical" or the list of prebins when ``dtype`` is "categorical". user_splits_fixed : array-like or None (default=None) The list of pre-binning split points that must be fixed. special_codes : array-like, dict or None, optional (default=None) List of special codes. Use special codes to specify the data values that must be treated separately. split_digits : int or None, optional (default=None) The significant digits of the split points. If ``split_digits`` is set to 0, the split points are integers. If None, then all significant digits in the split points are considered. time_limit : int (default=100) The maximum time in seconds to run the optimization solver. verbose : bool (default=False) Enable verbose output. **prebinning_kwargs : keyword arguments The pre-binning keywrord arguments. .. versionadded:: 0.6.1 Notes ----- The parameter values ``max_n_prebins`` and ``min_prebin_size`` control complexity and memory usage. The default values generally produce quality results, however, some improvement can be achieved by increasing ``max_n_prebins`` and/or decreasing ``min_prebin_size``. The T-test uses an estimate of the standard deviation of the contingency table to speed up the model generation and reduce memory usage. Therefore, it is not guaranteed to obtain bins satisfying the p-value constraint, although it may work reasonably well in most cases. To avoid having bins with similar bins the parameter ``min_mean_diff`` is recommended. """ def __init__(self, name="", dtype="numerical", prebinning_method="cart", max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend="auto", min_mean_diff=0, max_pvalue=None, max_pvalue_policy="consecutive", outlier_detector=None, outlier_params=None, cat_cutoff=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, time_limit=100, verbose=False, **prebinning_kwargs): self.name = name self.dtype = dtype self.prebinning_method = prebinning_method self.solver = "cp" self.max_n_prebins = max_n_prebins self.min_prebin_size = min_prebin_size self.min_n_bins = min_n_bins self.max_n_bins = max_n_bins self.min_bin_size = min_bin_size self.max_bin_size = max_bin_size self.monotonic_trend = monotonic_trend self.min_mean_diff = min_mean_diff self.max_pvalue = max_pvalue self.max_pvalue_policy = max_pvalue_policy self.outlier_detector = outlier_detector self.outlier_params = outlier_params self.cat_cutoff = cat_cutoff self.user_splits = user_splits self.user_splits_fixed = user_splits_fixed self.special_codes = special_codes self.split_digits = split_digits self.time_limit = time_limit self.verbose = verbose self.prebinning_kwargs = prebinning_kwargs # auxiliary self._categories = None self._cat_others = None self._n_records = None self._sums = None self._stds = None self._min_target = None self._max_target = None self._n_zeros = None self._n_records_cat_others = None self._n_records_missing = None self._n_records_special = None self._sum_cat_others = None self._sum_special = None self._sum_missing = None self._std_cat_others = None self._std_special = None self._std_missing = None self._min_target_missing = None self._min_target_special = None self._min_target_others = None self._max_target_missing = None self._max_target_special = None self._max_target_others = None self._n_zeros_missing = None self._n_zeros_special = None self._n_zeros_others = None self._problem_type = "regression" # info self._binning_table = None self._n_prebins = None self._n_refinements = 0 self._n_samples = None self._optimizer = None self._splits_optimal = None self._status = None # timing self._time_total = None self._time_preprocessing = None self._time_prebinning = None self._time_solver = None self._time_optimizer = None self._time_postprocessing = None self._is_fitted = False def fit(self, x, y, check_input=False): """Fit the optimal binning according to the given training data. Parameters ---------- x : array-like, shape = (n_samples,) Training vector, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Target vector relative to x. check_input : bool (default=False) Whether to check input arrays. Returns ------- self : ContinuousOptimalBinning Fitted optimal binning. """ return self._fit(x, y, check_input) def fit_transform(self, x, y, metric="mean", metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Fit the optimal binning according to the given training data, then transform it. Parameters ---------- x : array-like, shape = (n_samples,) Training vector, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Target vector relative to x. metric : str (default="mean"): The metric used to transform the input vector. Supported metrics are "mean" to choose the mean, "indices" to assign the corresponding indices of the bins and "bins" to assign the corresponding bin interval. metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- x_new : numpy array, shape = (n_samples,) Transformed array. """ return self.fit(x, y, check_input).transform( x, metric, metric_special, metric_missing, show_digits, check_input) def transform(self, x, metric="mean", metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Transform given data to mean using bins from the fitted optimal binning. Parameters ---------- x : array-like, shape = (n_samples,) Training vector, where n_samples is the number of samples. metric : str (default="mean"): The metric used to transform the input vector. Supported metrics are "mean" to choose the mean, "indices" to assign the corresponding indices of the bins and "bins" to assign the corresponding bin interval. metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- x_new : numpy array, shape = (n_samples,) Transformed array. Notes ----- Transformation of data including categories not present during training return zero mean. """ self._check_is_fitted() return transform_continuous_target(self._splits_optimal, self.dtype, x, self._n_records, self._sums, self.special_codes, self._categories, self._cat_others, metric, metric_special, metric_missing, self.user_splits, show_digits, check_input) def _fit(self, x, y, check_input): time_init = time.perf_counter() if self.verbose: logger.info("Optimal binning started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params()) # Pre-processing if self.verbose: logger.info("Pre-processing started.") self._n_samples = len(x) if self.verbose: logger.info("Pre-processing: number of samples: {}" .format(self._n_samples)) time_preprocessing = time.perf_counter() [x_clean, y_clean, x_missing, y_missing, x_special, y_special, y_others, categories, cat_others, _, _, _, _] = split_data( self.dtype, x, y, self.special_codes, self.cat_cutoff, self.user_splits, check_input, self.outlier_detector, self.outlier_params) self._time_preprocessing = time.perf_counter() - time_preprocessing if self.verbose: n_clean = len(x_clean) n_missing = len(x_missing) n_special = len(x_special) logger.info("Pre-processing: number of clean samples: {}" .format(n_clean)) logger.info("Pre-processing: number of missing samples: {}" .format(n_missing)) logger.info("Pre-processing: number of special samples: {}" .format(n_special)) if self.outlier_detector is not None: n_outlier = self._n_samples-(n_clean + n_missing + n_special) logger.info("Pre-processing: number of outlier samples: {}" .format(n_outlier)) if self.dtype == "categorical": n_categories = len(categories) n_categories_others = len(cat_others) n_others = len(y_others) logger.info("Pre-processing: number of others samples: {}" .format(n_others)) logger.info("Pre-processing: number of categories: {}" .format(n_categories)) logger.info("Pre-processing: number of categories others: {}" .format(n_categories_others)) logger.info("Pre-processing terminated. Time: {:.4f}s" .format(self._time_preprocessing)) # Pre-binning if self.verbose: logger.info("Pre-binning started.") time_prebinning = time.perf_counter() if self.user_splits is not None: n_splits = len(self.user_splits) if self.verbose: logger.info("Pre-binning: user splits supplied: {}" .format(n_splits)) if not n_splits: splits = self.user_splits n_records = np.array([]) sums = np.array([]) stds = np.array([]) else: if self.dtype == "numerical": user_splits = check_array( self.user_splits, ensure_2d=False, dtype=None, force_all_finite=True) if len(set(user_splits)) != len(user_splits): raise ValueError("User splits are not unique.") sorted_idx = np.argsort(user_splits) user_splits = user_splits[sorted_idx] else: [categories, user_splits, x_clean, y_clean, y_others, cat_others, _, _, sorted_idx ] = preprocessing_user_splits_categorical( self.user_splits, x_clean, y_clean, None) if self.user_splits_fixed is not None: self.user_splits_fixed = np.asarray( self.user_splits_fixed)[sorted_idx] [splits, n_records, sums, ssums, stds, min_t, max_t, n_zeros] = self._prebinning_refinement( user_splits, x_clean, y_clean, y_missing, x_special, y_special, y_others) else: [splits, n_records, sums, ssums, stds, min_t, max_t, n_zeros] = self._fit_prebinning( x_clean, y_clean, y_missing, x_special, y_special, y_others) self._n_prebins = len(n_records) self._categories = categories self._cat_others = cat_others self._time_prebinning = time.perf_counter() - time_prebinning if self.verbose: logger.info("Pre-binning: number of prebins: {}" .format(self._n_prebins)) logger.info("Pre-binning terminated. Time: {:.4f}s" .format(self._time_prebinning)) # Optimization self._fit_optimizer(splits, n_records, sums, ssums, stds) # Post-processing if self.verbose: logger.info("Post-processing started.") logger.info("Post-processing: compute binning information.") time_postprocessing = time.perf_counter() if not len(splits): n_records = n_records.sum() sums = sums.sum() [self._n_records, self._sums, self._stds, self._min_target, self._max_target, self._n_zeros] = continuous_bin_info( self._solution, n_records, sums, ssums, stds, min_t, max_t, n_zeros, self._n_records_missing, self._sum_missing, self._std_missing, self._min_target_missing, self._max_target_missing, self._n_zeros_missing, self._n_records_special, self._sum_special, self._std_special, self._min_target_special, self._max_target_special, self._n_zeros_special, self._n_records_cat_others, self._sum_cat_others, self._std_cat_others, self._min_target_others, self._max_target_others, self._n_zeros_others, self._cat_others) if self.dtype == "numerical": min_x = x_clean.min() max_x = x_clean.max() else: min_x = None max_x = None self._binning_table = ContinuousBinningTable( self.name, self.dtype, self.special_codes, self._splits_optimal, self._n_records, self._sums, self._stds, self._min_target, self._max_target, self._n_zeros, min_x, max_x, self._categories, self._cat_others, self.user_splits) self._time_postprocessing = time.perf_counter() - time_postprocessing if self.verbose: logger.info("Post-processing terminated. Time: {:.4f}s" .format(self._time_postprocessing)) self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s" .format(self._status, self._time_total)) # Completed successfully self._is_fitted = True return self def _fit_optimizer(self, splits, n_records, sums, ssums, stds): if self.verbose: logger.info("Optimizer started.") time_init = time.perf_counter() if len(n_records) <= 1: self._status = "OPTIMAL" self._splits_optimal = splits self._solution = np.zeros(len(splits)).astype(bool) if self.verbose: logger.warning("Optimizer: {} bins after pre-binning." .format(len(n_records))) logger.warning("Optimizer: solver not run.") logger.info("Optimizer terminated. Time: 0s") return if self.min_bin_size is not None: min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples)) else: min_bin_size = self.min_bin_size if self.max_bin_size is not None: max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples)) else: max_bin_size = self.max_bin_size # Monotonic trend trend_change = None if self.dtype == "numerical": auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc") if self.monotonic_trend in auto_monotonic_modes: monotonic = auto_monotonic_continuous( n_records, sums, self.monotonic_trend) if self.monotonic_trend == "auto_heuristic": if monotonic in ("peak", "valley"): if monotonic == "peak": monotonic = "peak_heuristic" else: monotonic = "valley_heuristic" mean = sums / n_records trend_change = peak_valley_trend_change_heuristic( mean, monotonic) if self.verbose: logger.info("Optimizer: classifier predicts {} " "monotonic trend.".format(monotonic)) else: monotonic = self.monotonic_trend if monotonic in ("peak_heuristic", "valley_heuristic"): mean = sums / n_records trend_change = peak_valley_trend_change_heuristic( mean, monotonic) else: monotonic = self.monotonic_trend if monotonic is not None: monotonic = "ascending" if self.verbose: if monotonic is None: logger.info( "Optimizer: monotonic trend not set.") else: logger.info("Optimizer: monotonic trend set to {}." .format(monotonic)) optimizer = ContinuousBinningCP(monotonic, self.min_n_bins, self.max_n_bins, min_bin_size, max_bin_size, self.min_mean_diff, self.max_pvalue, self.max_pvalue_policy, self.user_splits_fixed, self.time_limit) if self.verbose: logger.info("Optimizer: build model...") optimizer.build_model(n_records, sums, ssums, trend_change) if self.verbose: logger.info("Optimizer: solve...") status, solution = optimizer.solve() self._solution = solution self._optimizer, self._time_optimizer = solver_statistics( self.solver, optimizer.solver_) self._status = status if self.dtype == "categorical" and self.user_splits is not None: self._splits_optimal = splits[solution] else: self._splits_optimal = splits[solution[:-1]] self._time_solver = time.perf_counter() - time_init if self.verbose: logger.info("Optimizer terminated. Time: {:.4f}s" .format(self._time_solver)) def _prebinning_refinement(self, splits_prebinning, x, y, y_missing, x_special, y_special, y_others, sw_clean=None, sw_missing=None, sw_special=None, sw_others=None): n_splits = len(splits_prebinning) if not n_splits: return splits_prebinning, np.array([]), np.array([]) if self.split_digits is not None: splits_prebinning = np.round(splits_prebinning, self.split_digits) if self.dtype == "categorical" and self.user_splits is not None: indices = np.digitize(x, splits_prebinning, right=True) n_bins = n_splits else: indices = np.digitize(x, splits_prebinning, right=False) n_bins = n_splits + 1 # Compute n_records, sum and std for special, missing and others # self._n_records_special = len(y_special) # self._sum_special = np.sum(y_special) # self._n_zeros_special = np.count_nonzero(y_special == 0) # if len(y_special): # self._std_special = np.std(y_special) # self._min_target_special = np.min(y_special) # self._max_target_special = np.max(y_special) [self._n_records_special, self._sum_special, self._n_zeros_special, self._std_special, self._min_target_special, self._max_target_special] = target_info_special_continuous( self.special_codes, x_special, y_special) self._n_records_missing = len(y_missing) self._sum_missing = np.sum(y_missing) self._n_zeros_missing = np.count_nonzero(y_missing == 0) if len(y_missing): self._std_missing = np.std(y_missing) self._min_target_missing = np.min(y_missing) self._max_target_missing = np.max(y_missing) if len(y_others): self._n_records_cat_others = len(y_others) self._sum_cat_others = np.sum(y_others) self._std_cat_others = np.std(y_others) self._min_target_others = np.min(y_others) self._max_target_others = np.max(y_others) self._n_zeros_others = np.count_nonzero(y_others == 0) n_records = np.empty(n_bins).astype(np.int64) sums = np.empty(n_bins) ssums = np.empty(n_bins) stds = np.empty(n_bins) n_zeros = np.empty(n_bins).astype(np.int64) min_t = np.full(n_bins, -np.inf) max_t = np.full(n_bins, np.inf) # Compute prebin information for i in range(n_bins): mask = (indices == i) n_records[i] = np.count_nonzero(mask) ymask = y[mask] sums[i] = np.sum(ymask) ssums[i] = np.sum(ymask ** 2) stds[i] = np.std(ymask) n_zeros[i] = np.count_nonzero(ymask == 0) if len(ymask): min_t[i] = np.min(ymask) max_t[i] = np.max(ymask) return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t, n_zeros) @property def binning_table(self): """Return an instantiated binning table. Please refer to :ref:`Binning table: continuous target`. Returns ------- binning_table : ContinuousBinningTable. """ self._check_is_fitted() return self._binning_table
[ "numpy.ceil", "numpy.digitize", "numpy.asarray", "time.perf_counter", "numpy.min", "numpy.max", "numpy.count_nonzero", "numpy.sum", "numpy.array", "numpy.argsort", "numpy.empty", "sklearn.utils.check_array", "numpy.std", "numpy.full", "numpy.round" ]
[((21227, 21246), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (21244, 21246), False, 'import time\n'), ((21733, 21752), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (21750, 21752), False, 'import time\n'), ((23696, 23715), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23713, 23715), False, 'import time\n'), ((26255, 26274), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (26272, 26274), False, 'import time\n'), ((28345, 28364), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (28362, 28364), False, 'import time\n'), ((33794, 33811), 'numpy.sum', 'np.sum', (['y_missing'], {}), '(y_missing)\n', (33800, 33811), True, 'import numpy as np\n'), ((33844, 33876), 'numpy.count_nonzero', 'np.count_nonzero', (['(y_missing == 0)'], {}), '(y_missing == 0)\n', (33860, 33876), True, 'import numpy as np\n'), ((34501, 34517), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (34509, 34517), True, 'import numpy as np\n'), ((34534, 34550), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (34542, 34550), True, 'import numpy as np\n'), ((34566, 34582), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (34574, 34582), True, 'import numpy as np\n'), ((34651, 34675), 'numpy.full', 'np.full', (['n_bins', '(-np.inf)'], {}), '(n_bins, -np.inf)\n', (34658, 34675), True, 'import numpy as np\n'), ((34692, 34715), 'numpy.full', 'np.full', (['n_bins', 'np.inf'], {}), '(n_bins, np.inf)\n', (34699, 34715), True, 'import numpy as np\n'), ((22096, 22115), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22113, 22115), False, 'import time\n'), ((25661, 25680), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (25678, 25680), False, 'import time\n'), ((27671, 27690), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (27688, 27690), False, 'import time\n'), ((27895, 27914), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (27912, 27914), False, 'import time\n'), ((32042, 32061), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32059, 32061), False, 'import time\n'), ((32687, 32733), 'numpy.round', 'np.round', (['splits_prebinning', 'self.split_digits'], {}), '(splits_prebinning, self.split_digits)\n', (32695, 32733), True, 'import numpy as np\n'), ((32830, 32875), 'numpy.digitize', 'np.digitize', (['x', 'splits_prebinning'], {'right': '(True)'}), '(x, splits_prebinning, right=True)\n', (32841, 32875), True, 'import numpy as np\n'), ((32942, 32988), 'numpy.digitize', 'np.digitize', (['x', 'splits_prebinning'], {'right': '(False)'}), '(x, splits_prebinning, right=False)\n', (32953, 32988), True, 'import numpy as np\n'), ((33936, 33953), 'numpy.std', 'np.std', (['y_missing'], {}), '(y_missing)\n', (33942, 33953), True, 'import numpy as np\n'), ((33993, 34010), 'numpy.min', 'np.min', (['y_missing'], {}), '(y_missing)\n', (33999, 34010), True, 'import numpy as np\n'), ((34050, 34067), 'numpy.max', 'np.max', (['y_missing'], {}), '(y_missing)\n', (34056, 34067), True, 'import numpy as np\n'), ((34185, 34201), 'numpy.sum', 'np.sum', (['y_others'], {}), '(y_others)\n', (34191, 34201), True, 'import numpy as np\n'), ((34237, 34253), 'numpy.std', 'np.std', (['y_others'], {}), '(y_others)\n', (34243, 34253), True, 'import numpy as np\n'), ((34292, 34308), 'numpy.min', 'np.min', (['y_others'], {}), '(y_others)\n', (34298, 34308), True, 'import numpy as np\n'), ((34347, 34363), 'numpy.max', 'np.max', (['y_others'], {}), '(y_others)\n', (34353, 34363), True, 'import numpy as np\n'), ((34399, 34430), 'numpy.count_nonzero', 'np.count_nonzero', (['(y_others == 0)'], {}), '(y_others == 0)\n', (34415, 34430), True, 'import numpy as np\n'), ((34847, 34869), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (34863, 34869), True, 'import numpy as np\n'), ((34920, 34933), 'numpy.sum', 'np.sum', (['ymask'], {}), '(ymask)\n', (34926, 34933), True, 'import numpy as np\n'), ((34957, 34975), 'numpy.sum', 'np.sum', (['(ymask ** 2)'], {}), '(ymask ** 2)\n', (34963, 34975), True, 'import numpy as np\n'), ((34998, 35011), 'numpy.std', 'np.std', (['ymask'], {}), '(ymask)\n', (35004, 35011), True, 'import numpy as np\n'), ((35037, 35065), 'numpy.count_nonzero', 'np.count_nonzero', (['(ymask == 0)'], {}), '(ymask == 0)\n', (35053, 35065), True, 'import numpy as np\n'), ((24048, 24060), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24056, 24060), True, 'import numpy as np\n'), ((24084, 24096), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24092, 24096), True, 'import numpy as np\n'), ((24120, 24132), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24128, 24132), True, 'import numpy as np\n'), ((28914, 28958), 'numpy.ceil', 'np.ceil', (['(self.min_bin_size * self._n_samples)'], {}), '(self.min_bin_size * self._n_samples)\n', (28921, 28958), True, 'import numpy as np\n'), ((29093, 29137), 'numpy.ceil', 'np.ceil', (['(self.max_bin_size * self._n_samples)'], {}), '(self.max_bin_size * self._n_samples)\n', (29100, 29137), True, 'import numpy as np\n'), ((32585, 32597), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (32593, 32597), True, 'import numpy as np\n'), ((32599, 32611), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (32607, 32611), True, 'import numpy as np\n'), ((34452, 34468), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (34460, 34468), True, 'import numpy as np\n'), ((34601, 34617), 'numpy.empty', 'np.empty', (['n_bins'], {}), '(n_bins)\n', (34609, 34617), True, 'import numpy as np\n'), ((35120, 35133), 'numpy.min', 'np.min', (['ymask'], {}), '(ymask)\n', (35126, 35133), True, 'import numpy as np\n'), ((35161, 35174), 'numpy.max', 'np.max', (['ymask'], {}), '(ymask)\n', (35167, 35174), True, 'import numpy as np\n'), ((24231, 24317), 'sklearn.utils.check_array', 'check_array', (['self.user_splits'], {'ensure_2d': '(False)', 'dtype': 'None', 'force_all_finite': '(True)'}), '(self.user_splits, ensure_2d=False, dtype=None, force_all_finite\n =True)\n', (24242, 24317), False, 'from sklearn.utils import check_array\n'), ((24535, 24558), 'numpy.argsort', 'np.argsort', (['user_splits'], {}), '(user_splits)\n', (24545, 24558), True, 'import numpy as np\n'), ((24994, 25028), 'numpy.asarray', 'np.asarray', (['self.user_splits_fixed'], {}), '(self.user_splits_fixed)\n', (25004, 25028), True, 'import numpy as np\n')]
from numpy import exp, pi from ....Classes.Arc1 import Arc1 from ....Classes.Arc3 import Arc3 from ....Functions.Geometry.merge_notch_list import merge_notch_list def get_yoke_desc(self, sym=1, is_reversed=False, prop_dict=None): """This method returns an ordered description of the elements that defines the yoke radius of the lamination Parameters ---------- self : Lamination A Lamination object sym : int Symetry factor (2=half the lamination) is_reversed : bool True to return the line in clockwise oder prop_dict : dict Property dictionary to apply on the lines Returns ------- yoke_desc : list list of dictionary with key: "begin_angle", "end_angle", "obj" yoke_lines : list List of Lines to draw the yoke """ Ryoke = self.get_Ryoke() if self.yoke_notch is None or len(self.yoke_notch) == 0: # No notches if sym == 1: yoke_desc = list() yoke_desc.append( { "obj": Arc3(begin=Ryoke, end=-Ryoke, is_trigo_direction=True), "begin_angle": 0, "end_angle": pi, } ) yoke_desc.append( { "obj": Arc3(begin=-Ryoke, end=Ryoke, is_trigo_direction=True), "begin_angle": 0, "end_angle": pi, } ) else: yoke_desc = [ { "obj": Arc1( begin=Ryoke, end=Ryoke * exp(1j * 2 * pi / sym), radius=Ryoke, is_trigo_direction=True, ), "begin_angle": 0, "end_angle": 2 * pi / sym, } ] else: # Get the notches notch_list = self.get_notch_list(sym=sym, is_yoke=True) # Add all the yoke lines yoke_desc = list() for ii, desc in enumerate(notch_list): yoke_desc.append(desc) if ii != len(notch_list) - 1: yoke_dict = dict() yoke_dict["begin_angle"] = notch_list[ii]["end_angle"] yoke_dict["end_angle"] = notch_list[ii + 1]["begin_angle"] yoke_dict["obj"] = Arc1( begin=Ryoke * exp(1j * yoke_dict["begin_angle"]), end=Ryoke * exp(1j * yoke_dict["end_angle"]), radius=Ryoke, is_trigo_direction=True, ) yoke_desc.append(yoke_dict) # Add last yoke line if sym == 1 and len(notch_list) > 0: yoke_dict = dict() yoke_dict["begin_angle"] = notch_list[-1]["end_angle"] yoke_dict["end_angle"] = notch_list[0]["begin_angle"] yoke_dict["obj"] = Arc1( begin=Ryoke * exp(1j * yoke_dict["begin_angle"]), end=Ryoke * exp(1j * yoke_dict["end_angle"]), radius=Ryoke, is_trigo_direction=True, ) if notch_list[0]["begin_angle"] < 0: # First element is an slot or notch yoke_desc.append(yoke_dict) else: # First element is a yoke line yoke_desc.insert(0, yoke_dict) elif sym != 1: # With symmetry # Add last yoke line yoke_dict = dict() yoke_dict["begin_angle"] = notch_list[-1]["end_angle"] yoke_dict["end_angle"] = 2 * pi / sym yoke_dict["obj"] = Arc1( begin=Ryoke * exp(1j * yoke_dict["begin_angle"]), end=Ryoke * exp(1j * yoke_dict["end_angle"]), radius=Ryoke, is_trigo_direction=True, ) yoke_desc.append(yoke_dict) # Add first yoke line yoke_dict = dict() yoke_dict["begin_angle"] = 0 yoke_dict["end_angle"] = notch_list[0]["begin_angle"] yoke_dict["obj"] = Arc1( begin=Ryoke * exp(1j * yoke_dict["begin_angle"]), end=Ryoke * exp(1j * yoke_dict["end_angle"]), radius=Ryoke, is_trigo_direction=True, ) yoke_desc.insert(0, yoke_dict) # Convert the description to lines yoke_lines = list() for yoke in yoke_desc: if isinstance(yoke["obj"], (Arc1, Arc3)): yoke_lines.append(yoke["obj"]) elif "lines" in yoke: # Duplicated slot for line in yoke["lines"]: yoke_lines.append(line.copy()) yoke_lines[-1].rotate((yoke["begin_angle"] + yoke["end_angle"]) / 2) else: # Notches self.is_internal = not self.is_internal # To draw slot on yoke lines = yoke["obj"].build_geometry() self.is_internal = not self.is_internal # To draw slot on yoke for line in lines: line.rotate((yoke["begin_angle"] + yoke["end_angle"]) / 2) yoke_lines.extend(lines) # Reverse the lines if is_reversed: yoke_lines = yoke_lines[::-1] for line in yoke_lines: line.reverse() # Set line properties if prop_dict is not None: for line in yoke_lines: if line.prop_dict is None: line.prop_dict = prop_dict else: line.prop_dict.update(prop_dict) return yoke_desc, yoke_lines
[ "numpy.exp" ]
[((2967, 3003), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['begin_angle'])"], {}), "(1.0j * yoke_dict['begin_angle'])\n", (2970, 3003), False, 'from numpy import exp, pi\n'), ((3031, 3065), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['end_angle'])"], {}), "(1.0j * yoke_dict['end_angle'])\n", (3034, 3065), False, 'from numpy import exp, pi\n'), ((2418, 2454), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['begin_angle'])"], {}), "(1.0j * yoke_dict['begin_angle'])\n", (2421, 2454), False, 'from numpy import exp, pi\n'), ((2486, 2520), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['end_angle'])"], {}), "(1.0j * yoke_dict['end_angle'])\n", (2489, 2520), False, 'from numpy import exp, pi\n'), ((3695, 3731), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['begin_angle'])"], {}), "(1.0j * yoke_dict['begin_angle'])\n", (3698, 3731), False, 'from numpy import exp, pi\n'), ((3759, 3793), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['end_angle'])"], {}), "(1.0j * yoke_dict['end_angle'])\n", (3762, 3793), False, 'from numpy import exp, pi\n'), ((4158, 4194), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['begin_angle'])"], {}), "(1.0j * yoke_dict['begin_angle'])\n", (4161, 4194), False, 'from numpy import exp, pi\n'), ((4222, 4256), 'numpy.exp', 'exp', (["(1.0j * yoke_dict['end_angle'])"], {}), "(1.0j * yoke_dict['end_angle'])\n", (4225, 4256), False, 'from numpy import exp, pi\n'), ((1626, 1650), 'numpy.exp', 'exp', (['(1.0j * 2 * pi / sym)'], {}), '(1.0j * 2 * pi / sym)\n', (1629, 1650), False, 'from numpy import exp, pi\n')]
# Naive bayes and XGBoost with Tfidf vectorization used as benchmarks from utils import * from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from nltk import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import wordnet from nltk.corpus import stopwords from sklearn.ensemble import GradientBoostingClassifier from sklearn.naive_bayes import MultinomialNB import numpy as np from time import time def get_wordnet_pos(word): """Map POS tag to first character lemmatize() accepts""" tag = nltk.pos_tag([word])[0][1][0].upper() tag_dict = {"J": wordnet.ADJ, "N": wordnet.NOUN, "V": wordnet.VERB, "R": wordnet.ADV} return tag_dict.get(tag, wordnet.NOUN) def text_preprocess(sentence): """ :param sentence: String containing a sentence to be preprocessed :return: a list of ordered/ ready to be processed words """ # 1st step: lowercase sentence = sentence.lower() # 2nd step: substituting <br /><br /> with lbreak sentence = sentence.replace('<br /><br />', ' ') # 3rd step: Insert spaces between alphabetic / numbers and non alphabetic characters sentence = re.sub("[A-Za-z]+", lambda ele: " " + ele[0] + " ", sentence) sentence = re.sub("[0-9]+", lambda ele: " " + ele[0] + " ", sentence) return sentence if __name__ == '__main__': print('Loading train, dev and test data...') # loading train, dev and test data ls = read_IMDB('IMDB Dataset.csv') train_ls, dev_ls, test_ls = train_test_split(ls) print('Done.\n') # Extract features and labels print('Extracting features and labels...') X_train = np.array([text_preprocess(item[0]) for item in train_ls]) X_dev = np.array([text_preprocess(item[0]) for item in dev_ls]) X_test = np.array([text_preprocess(item[0]) for item in test_ls]) y_train = np.array([item[1] for item in train_ls], dtype=np.long) y_dev = np.array([item[1] for item in dev_ls], dtype=np.long) y_test = np.array([item[1] for item in test_ls], dtype=np.long) print('Done.\n') # Vectorize using TfIdf or CountVectorizer print('Vectorizing features with Tfidf...') # vectorizer = TfidfVectorizer(sublinear_tf=True, use_idf=True, max_df=0.5, min_df=0.00, stop_words='english') print('Vectorizing features with CountVectorizer...') # Performs better vectorizer = CountVectorizer(ngram_range=(1,3), stop_words='english') # Provide a vocabulary to the vectorizer from the training set, then transform all sentences X_train = vectorizer.fit_transform(X_train) X_dev = vectorizer.transform(X_dev) X_test = vectorizer.transform(X_test) print('Done.\n') # Fit Naive Bayes model print('Fitting Multinomial Naive Bayes model...') t0 = time() model_1 = MultinomialNB() # Multinomial can fit data from sparse matrix, GaussianNB needs dense model_1.fit(X_train, y_train) print(f'\nTraining time: {round(time() - t0, 3)} s') # Compute accuracy t0 = time() score_train = model_1.score(X_train, y_train) print(f'Prediction time(train): {round(time() - t0, 3)} s') t0 = time() score_test = model_1.score(X_test, y_test) print(f'Prediction time(test): {round(time() - t0, 3)} s') print('\nTrain set score: ', score_train) print('Test set score: ', score_test) t0 = time() model_2 = GradientBoostingClassifier() print('Fitting Gradient Boosting model...') model_2.fit(X_train, y_train) print(f'\nTraining time: {round(time() - t0, 3)} s') # Compute accuracy t0 = time() score_train = model_2.score(X_train, y_train) print(f'Prediction time(train): {round(time() - t0, 3)} s') t0 = time() score_test = model_2.score(X_test, y_test) print(f'Prediction time(test): {round(time() - t0, 3)} s') print('\nTrain set score: ', score_train) print('Test set score: ', score_test)
[ "sklearn.feature_extraction.text.CountVectorizer", "numpy.array", "sklearn.naive_bayes.MultinomialNB", "sklearn.ensemble.GradientBoostingClassifier", "time.time" ]
[((1950, 2005), 'numpy.array', 'np.array', (['[item[1] for item in train_ls]'], {'dtype': 'np.long'}), '([item[1] for item in train_ls], dtype=np.long)\n', (1958, 2005), True, 'import numpy as np\n'), ((2018, 2071), 'numpy.array', 'np.array', (['[item[1] for item in dev_ls]'], {'dtype': 'np.long'}), '([item[1] for item in dev_ls], dtype=np.long)\n', (2026, 2071), True, 'import numpy as np\n'), ((2085, 2139), 'numpy.array', 'np.array', (['[item[1] for item in test_ls]'], {'dtype': 'np.long'}), '([item[1] for item in test_ls], dtype=np.long)\n', (2093, 2139), True, 'import numpy as np\n'), ((2465, 2522), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 3)', 'stop_words': '"""english"""'}), "(ngram_range=(1, 3), stop_words='english')\n", (2480, 2522), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2863, 2869), 'time.time', 'time', ([], {}), '()\n', (2867, 2869), False, 'from time import time\n'), ((2884, 2899), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2897, 2899), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((3094, 3100), 'time.time', 'time', ([], {}), '()\n', (3098, 3100), False, 'from time import time\n'), ((3225, 3231), 'time.time', 'time', ([], {}), '()\n', (3229, 3231), False, 'from time import time\n'), ((3440, 3446), 'time.time', 'time', ([], {}), '()\n', (3444, 3446), False, 'from time import time\n'), ((3461, 3489), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (3487, 3489), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((3662, 3668), 'time.time', 'time', ([], {}), '()\n', (3666, 3668), False, 'from time import time\n'), ((3793, 3799), 'time.time', 'time', ([], {}), '()\n', (3797, 3799), False, 'from time import time\n'), ((3040, 3046), 'time.time', 'time', ([], {}), '()\n', (3044, 3046), False, 'from time import time\n'), ((3194, 3200), 'time.time', 'time', ([], {}), '()\n', (3198, 3200), False, 'from time import time\n'), ((3321, 3327), 'time.time', 'time', ([], {}), '()\n', (3325, 3327), False, 'from time import time\n'), ((3608, 3614), 'time.time', 'time', ([], {}), '()\n', (3612, 3614), False, 'from time import time\n'), ((3762, 3768), 'time.time', 'time', ([], {}), '()\n', (3766, 3768), False, 'from time import time\n'), ((3889, 3895), 'time.time', 'time', ([], {}), '()\n', (3893, 3895), False, 'from time import time\n')]
''' prep_dev_notebook: pred_newshapes_dev: Runs against new_shapes ''' import os import sys import random import math import re import gc import time import numpy as np import cv2 import matplotlib import matplotlib.pyplot as plt import tensorflow as tf import keras import keras.backend as KB import mrcnn.model_mod as modellib import mrcnn.visualize as visualize from mrcnn.config import Config from mrcnn.dataset import Dataset from mrcnn.utils import stack_tensors, stack_tensors_3d, log from mrcnn.datagen import data_generator, load_image_gt import platform syst = platform.system() if syst == 'Windows': # Root directory of the project print(' windows ' , syst) # WINDOWS MACHINE ------------------------------------------------------------------ ROOT_DIR = "E:\\" MODEL_PATH = os.path.join(ROOT_DIR, "models") DATASET_PATH = os.path.join(ROOT_DIR, 'MLDatasets') #### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_logs") COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5") DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs") COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014") RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5") elif syst == 'Linux': print(' Linx ' , syst) # LINUX MACHINE ------------------------------------------------------------------ ROOT_DIR = os.getcwd() MODEL_PATH = os.path.expanduser('~/models') DATASET_PATH = os.path.expanduser('~/MLDatasets') #### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs") COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5") COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014") DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs") RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5") else : raise Error('unreconized system ' ) print("Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__)) import pprint pp = pprint.PrettyPrinter(indent=2, width=100) np.set_printoptions(linewidth=100,precision=4,threshold=1000, suppress = True) ##------------------------------------------------------------------------------------ ## Old Shapes TRAINING ##------------------------------------------------------------------------------------ def prep_oldshapes_train(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, folder_name= "mrcnn_oldshape_training_logs"): import mrcnn.shapes as shapes MODEL_DIR = os.path.join(MODEL_PATH, folder_name) # Build configuration object ----------------------------------------------- config = shapes.ShapesConfig() config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU). config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE config.STEPS_PER_EPOCH = epoch_steps config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2] # Build shape dataset ----------------------------------------------- dataset_train = shapes.ShapesDataset() dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_train.prepare() # Validation dataset dataset_val = shapes.ShapesDataset() dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_val.prepare() try : del model print('delete model is successful') gc.collect() except: pass KB.clear_session() model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers) print(' COCO Model Path : ', COCO_MODEL_PATH) print(' Checkpoint folder Path: ', MODEL_DIR) print(' Model Parent Path : ', MODEL_PATH) print(' Resent Model Path : ', RESNET_MODEL_PATH) load_model(model, init_with = init_with) train_generator = data_generator(dataset_train, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment = False) val_generator = data_generator(dataset_val, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment=False) model.config.display() return [model, dataset_train, dataset_val, train_generator, val_generator, config] ##------------------------------------------------------------------------------------ ## Old Shapes TESTING ##------------------------------------------------------------------------------------ def prep_oldshapes_test(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, folder_name= "mrcnn_oldshape_test_logs"): import mrcnn.shapes as shapes MODEL_DIR = os.path.join(MODEL_PATH, folder_name) # MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs") # Build configuration object ----------------------------------------------- config = shapes.ShapesConfig() config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU). config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE config.STEPS_PER_EPOCH = epoch_steps config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2] # Build shape dataset ----------------------------------------------- dataset_test = shapes.ShapesDataset() dataset_test.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_test.prepare() # Recreate the model in inference mode try : del model print('delete model is successful') gc.collect() except: pass KB.clear_session() model = modellib.MaskRCNN(mode="inference", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers ) print(' COCO Model Path : ', COCO_MODEL_PATH) print(' Checkpoint folder Path: ', MODEL_DIR) print(' Model Parent Path : ', MODEL_PATH) print(' Resent Model Path : ', RESNET_MODEL_PATH) load_model(model, init_with = init_with) test_generator = data_generator(dataset_test, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment = False) model.config.display() return [model, dataset_test, test_generator, config] ##------------------------------------------------------------------------------------ ## New Shapes TESTING ##------------------------------------------------------------------------------------ def prep_newshapes_test(init_with = 'last', FCN_layers = False, batch_sz = 5, epoch_steps = 4,folder_name= "mrcnn_newshape_test_logs"): import mrcnn.new_shapes as new_shapes MODEL_DIR = os.path.join(MODEL_PATH, folder_name) # Build configuration object ----------------------------------------------- config = new_shapes.NewShapesConfig() config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU). config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE config.STEPS_PER_EPOCH = epoch_steps config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2] # Build shape dataset ----------------------------------------------- # Training dataset dataset_test = new_shapes.NewShapesDataset() dataset_test.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_test.prepare() # Recreate the model in inference mode try : del model print('delete model is successful') gc.collect() except: pass KB.clear_session() model = modellib.MaskRCNN(mode="inference", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers ) print(' COCO Model Path : ', COCO_MODEL_PATH) print(' Checkpoint folder Path: ', MODEL_DIR) print(' Model Parent Path : ', MODEL_PATH) print(' Resent Model Path : ', RESNET_MODEL_PATH) load_model(model, init_with = init_with) test_generator = data_generator(dataset_test, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment = False) model.config.display() return [model, dataset_test, test_generator, config] ##------------------------------------------------------------------------------------ ## New Shapes TRAINING ##------------------------------------------------------------------------------------ def prep_newshapes_train(init_with = "last", FCN_layers= False, batch_sz =5, epoch_steps = 4, folder_name= "mrcnn_newshape_training_logs"): import mrcnn.new_shapes as new_shapes MODEL_DIR = os.path.join(MODEL_PATH, folder_name) # Build configuration object ----------------------------------------------- config = new_shapes.NewShapesConfig() config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU). config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE config.STEPS_PER_EPOCH = epoch_steps config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2] # Build shape dataset ----------------------------------------------- # Training dataset dataset_train = new_shapes.NewShapesDataset() dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_train.prepare() # Validation dataset dataset_val = new_shapes.NewShapesDataset() dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_val.prepare() try : del model print('delete model is successful') gc.collect() except: pass KB.clear_session() model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers) print('MODEL_PATH : ', MODEL_PATH) print('COCO_MODEL_PATH : ', COCO_MODEL_PATH) print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH) print('MODEL_DIR : ', MODEL_DIR) print('Last Saved Model : ', model.find_last()) load_model(model, init_with = 'last') train_generator = data_generator(dataset_train, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment = False) config.display() return [model, dataset_train, train_generator, config] ##------------------------------------------------------------------------------------ ## LOAD MODEL ##------------------------------------------------------------------------------------ def load_model(model, init_with = None): ''' methods to load weights 1 - load a specific file 2 - find a last checkpoint in a specific folder 3 - use init_with keyword ''' # Which weights to start with? print('-----------------------------------------------') print(' Load model with init parm: ', init_with) # print(' find last chkpt :', model.find_last()) # print(' n) print('-----------------------------------------------') ## 1- look for a specific weights file ## Load trained weights (fill in path to trained weights here) # model_path = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819\\mask_rcnn_shapes_5784.h5' # print(' model_path : ', model_path ) # print("Loading weights from ", model_path) # model.load_weights(model_path, by_name=True) # print('Load weights complete') # ## 2- look for last checkpoint file in a specific folder (not working correctly) # model.config.LAST_EPOCH_RAN = 5784 # model.model_dir = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819' # last_model_found = model.find_last() # print(' last model in MODEL_DIR: ', last_model_found) # # loc= model.load_weights(model.find_last()[1], by_name=True) # # print('Load weights complete :', loc) ## 3- Use init_with keyword ## Which weights to start with? # init_with = "last" # imagenet, coco, or last if init_with == "imagenet": # loc=model.load_weights(model.get_imagenet_weights(), by_name=True) loc=model.load_weights(RESNET_MODEL_PATH, by_name=True) elif init_with == "coco": # Load weights trained on MS COCO, but skip layers that # are different due to the different number of classes # See README for instructions to download the COCO weights loc=model.load_weights(COCO_MODEL_PATH, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) elif init_with == "last": # Load the last model you trained and continue training, placing checkpouints in same folder loc= model.load_weights(model.find_last()[1], by_name=True) else: assert init_with != "", "Provide path to trained weights" print("Loading weights from ", init_with) loc = model.load_weights(init_with, by_name=True) print('Load weights complete', loc) """ ##------------------------------------------------------------------------------------ ## Old Shapes DEVELOPMENT ##------------------------------------------------------------------------------------ def prep_oldshapes_dev(init_with = None, FCN_layers = False, batch_sz = 5): import mrcnn.shapes as shapes MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_oldshape_dev_logs") config = build_config(batch_sz = batch_sz) dataset_train = shapes.ShapesDataset() dataset_train.load_shapes(150, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_train.prepare() try : del model print('delete model is successful') gc.collect() except: pass KB.clear_session() model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers) print(' COCO Model Path : ', COCO_MODEL_PATH) print(' Checkpoint folder Path: ', MODEL_DIR) print(' Model Parent Path : ', MODEL_PATH) print(' Resent Model Path : ', RESNET_MODEL_PATH) load_model(model, init_with = init_with) train_generator = data_generator(dataset_train, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment = False) model.config.display() return [model, dataset_train, train_generator, config] ##------------------------------------------------------------------------------------ ## New Shapes DEVELOPMENT ##------------------------------------------------------------------------------------ def prep_newshapes_dev(init_with = "last", FCN_layers= False, batch_sz = 5): import mrcnn.new_shapes as new_shapes MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_newshape_dev_logs") config = build_config(batch_sz = batch_sz, newshapes=True) # Build shape dataset ----------------------------------------------- # Training dataset dataset_train = new_shapes.NewShapesDataset() dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_train.prepare() # Validation dataset dataset_val = new_shapes.NewShapesDataset() dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1]) dataset_val.prepare() try : del model, train_generator, val_generator, mm gc.collect() except: pass KB.clear_session() model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers) print('MODEL_PATH : ', MODEL_PATH) print('COCO_MODEL_PATH : ', COCO_MODEL_PATH) print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH) print('MODEL_DIR : ', MODEL_DIR) print('Last Saved Model : ', model.find_last()) load_model(model, init_with = 'last') train_generator = data_generator(dataset_train, model.config, shuffle=True, batch_size=model.config.BATCH_SIZE, augment = False) config.display() return [model, dataset_train, train_generator, config] """
[ "mrcnn.new_shapes.NewShapesDataset", "mrcnn.datagen.data_generator", "mrcnn.new_shapes.NewShapesConfig", "os.path.join", "mrcnn.model_mod.MaskRCNN", "os.getcwd", "platform.system", "mrcnn.shapes.ShapesConfig", "mrcnn.shapes.ShapesDataset", "pprint.PrettyPrinter", "keras.backend.clear_session", ...
[((597, 614), 'platform.system', 'platform.system', ([], {}), '()\n', (612, 614), False, 'import platform\n'), ((2144, 2185), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)', 'width': '(100)'}), '(indent=2, width=100)\n', (2164, 2185), False, 'import pprint\n'), ((2186, 2264), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(100)', 'precision': '(4)', 'threshold': '(1000)', 'suppress': '(True)'}), '(linewidth=100, precision=4, threshold=1000, suppress=True)\n', (2205, 2264), True, 'import numpy as np\n'), ((847, 879), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""models"""'], {}), "(ROOT_DIR, 'models')\n", (859, 879), False, 'import os\n'), ((904, 940), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""MLDatasets"""'], {}), "(ROOT_DIR, 'MLDatasets')\n", (916, 940), False, 'import os\n'), ((1028, 1073), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(MODEL_PATH, 'mask_rcnn_coco.h5')\n", (1040, 1073), False, 'import os\n'), ((1098, 1141), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mrcnn_coco_logs"""'], {}), "(MODEL_PATH, 'mrcnn_coco_logs')\n", (1110, 1141), False, 'import os\n'), ((1166, 1204), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""coco2014"""'], {}), "(DATASET_PATH, 'coco2014')\n", (1178, 1204), False, 'import os\n'), ((1228, 1313), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(MODEL_PATH, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n )\n", (1240, 1313), False, 'import os\n'), ((2669, 2706), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'folder_name'], {}), '(MODEL_PATH, folder_name)\n', (2681, 2706), False, 'import os\n'), ((2802, 2823), 'mrcnn.shapes.ShapesConfig', 'shapes.ShapesConfig', ([], {}), '()\n', (2821, 2823), True, 'import mrcnn.shapes as shapes\n'), ((3195, 3217), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (3215, 3217), True, 'import mrcnn.shapes as shapes\n'), ((3373, 3395), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (3393, 3395), True, 'import mrcnn.shapes as shapes\n'), ((3629, 3647), 'keras.backend.clear_session', 'KB.clear_session', ([], {}), '()\n', (3645, 3647), True, 'import keras.backend as KB\n'), ((3660, 3757), 'mrcnn.model_mod.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'MODEL_DIR', 'FCN_layers': 'FCN_layers'}), "(mode='training', config=config, model_dir=MODEL_DIR,\n FCN_layers=FCN_layers)\n", (3677, 3757), True, 'import mrcnn.model_mod as modellib\n'), ((4041, 4154), 'mrcnn.datagen.data_generator', 'data_generator', (['dataset_train', 'model.config'], {'shuffle': '(True)', 'batch_size': 'model.config.BATCH_SIZE', 'augment': '(False)'}), '(dataset_train, model.config, shuffle=True, batch_size=model.\n config.BATCH_SIZE, augment=False)\n', (4055, 4154), False, 'from mrcnn.datagen import data_generator, load_image_gt\n'), ((4246, 4357), 'mrcnn.datagen.data_generator', 'data_generator', (['dataset_val', 'model.config'], {'shuffle': '(True)', 'batch_size': 'model.config.BATCH_SIZE', 'augment': '(False)'}), '(dataset_val, model.config, shuffle=True, batch_size=model.\n config.BATCH_SIZE, augment=False)\n', (4260, 4357), False, 'from mrcnn.datagen import data_generator, load_image_gt\n'), ((5006, 5043), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'folder_name'], {}), '(MODEL_PATH, folder_name)\n', (5018, 5043), False, 'import os\n'), ((5208, 5229), 'mrcnn.shapes.ShapesConfig', 'shapes.ShapesConfig', ([], {}), '()\n', (5227, 5229), True, 'import mrcnn.shapes as shapes\n'), ((5600, 5622), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (5620, 5622), True, 'import mrcnn.shapes as shapes\n'), ((5897, 5915), 'keras.backend.clear_session', 'KB.clear_session', ([], {}), '()\n', (5913, 5915), True, 'import keras.backend as KB\n'), ((5928, 6026), 'mrcnn.model_mod.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'MODEL_DIR', 'FCN_layers': 'FCN_layers'}), "(mode='inference', config=config, model_dir=MODEL_DIR,\n FCN_layers=FCN_layers)\n", (5945, 6026), True, 'import mrcnn.model_mod as modellib\n'), ((6410, 6522), 'mrcnn.datagen.data_generator', 'data_generator', (['dataset_test', 'model.config'], {'shuffle': '(True)', 'batch_size': 'model.config.BATCH_SIZE', 'augment': '(False)'}), '(dataset_test, model.config, shuffle=True, batch_size=model.\n config.BATCH_SIZE, augment=False)\n', (6424, 6522), False, 'from mrcnn.datagen import data_generator, load_image_gt\n'), ((7115, 7152), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'folder_name'], {}), '(MODEL_PATH, folder_name)\n', (7127, 7152), False, 'import os\n'), ((7248, 7276), 'mrcnn.new_shapes.NewShapesConfig', 'new_shapes.NewShapesConfig', ([], {}), '()\n', (7274, 7276), True, 'import mrcnn.new_shapes as new_shapes\n'), ((7671, 7700), 'mrcnn.new_shapes.NewShapesDataset', 'new_shapes.NewShapesDataset', ([], {}), '()\n', (7698, 7700), True, 'import mrcnn.new_shapes as new_shapes\n'), ((7977, 7995), 'keras.backend.clear_session', 'KB.clear_session', ([], {}), '()\n', (7993, 7995), True, 'import keras.backend as KB\n'), ((8008, 8106), 'mrcnn.model_mod.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'MODEL_DIR', 'FCN_layers': 'FCN_layers'}), "(mode='inference', config=config, model_dir=MODEL_DIR,\n FCN_layers=FCN_layers)\n", (8025, 8106), True, 'import mrcnn.model_mod as modellib\n'), ((8490, 8602), 'mrcnn.datagen.data_generator', 'data_generator', (['dataset_test', 'model.config'], {'shuffle': '(True)', 'batch_size': 'model.config.BATCH_SIZE', 'augment': '(False)'}), '(dataset_test, model.config, shuffle=True, batch_size=model.\n config.BATCH_SIZE, augment=False)\n', (8504, 8602), False, 'from mrcnn.datagen import data_generator, load_image_gt\n'), ((9223, 9260), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'folder_name'], {}), '(MODEL_PATH, folder_name)\n', (9235, 9260), False, 'import os\n'), ((9356, 9384), 'mrcnn.new_shapes.NewShapesConfig', 'new_shapes.NewShapesConfig', ([], {}), '()\n', (9382, 9384), True, 'import mrcnn.new_shapes as new_shapes\n'), ((9779, 9808), 'mrcnn.new_shapes.NewShapesDataset', 'new_shapes.NewShapesDataset', ([], {}), '()\n', (9806, 9808), True, 'import mrcnn.new_shapes as new_shapes\n'), ((9963, 9992), 'mrcnn.new_shapes.NewShapesDataset', 'new_shapes.NewShapesDataset', ([], {}), '()\n', (9990, 9992), True, 'import mrcnn.new_shapes as new_shapes\n'), ((10222, 10240), 'keras.backend.clear_session', 'KB.clear_session', ([], {}), '()\n', (10238, 10240), True, 'import keras.backend as KB\n'), ((10253, 10350), 'mrcnn.model_mod.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'MODEL_DIR', 'FCN_layers': 'FCN_layers'}), "(mode='training', config=config, model_dir=MODEL_DIR,\n FCN_layers=FCN_layers)\n", (10270, 10350), True, 'import mrcnn.model_mod as modellib\n'), ((10663, 10776), 'mrcnn.datagen.data_generator', 'data_generator', (['dataset_train', 'model.config'], {'shuffle': '(True)', 'batch_size': 'model.config.BATCH_SIZE', 'augment': '(False)'}), '(dataset_train, model.config, shuffle=True, batch_size=model.\n config.BATCH_SIZE, augment=False)\n', (10677, 10776), False, 'from mrcnn.datagen import data_generator, load_image_gt\n'), ((1469, 1480), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1478, 1480), False, 'import os\n'), ((1505, 1535), 'os.path.expanduser', 'os.path.expanduser', (['"""~/models"""'], {}), "('~/models')\n", (1523, 1535), False, 'import os\n'), ((1560, 1594), 'os.path.expanduser', 'os.path.expanduser', (['"""~/MLDatasets"""'], {}), "('~/MLDatasets')\n", (1578, 1594), False, 'import os\n'), ((1694, 1739), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(MODEL_PATH, 'mask_rcnn_coco.h5')\n", (1706, 1739), False, 'import os\n'), ((1764, 1802), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""coco2014"""'], {}), "(DATASET_PATH, 'coco2014')\n", (1776, 1802), False, 'import os\n'), ((1826, 1869), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mrcnn_coco_logs"""'], {}), "(MODEL_PATH, 'mrcnn_coco_logs')\n", (1838, 1869), False, 'import os\n'), ((1894, 1979), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(MODEL_PATH, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n )\n", (1906, 1979), False, 'import os\n'), ((3586, 3598), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3596, 3598), False, 'import gc\n'), ((5854, 5866), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5864, 5866), False, 'import gc\n'), ((7934, 7946), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7944, 7946), False, 'import gc\n'), ((10179, 10191), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10189, 10191), False, 'import gc\n')]
import numpy as np import datetime from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier import sklearn.metrics from ops import add_features, augment_data from sklearn.model_selection import train_test_split from capture_data import DataObserver from sklearn.preprocessing import StandardScaler import warnings import time import pickle warnings.filterwarnings("ignore") np.set_printoptions(suppress=True) ##CONFIG root_path = './data/' timestamp = datetime.datetime.now().isoformat().split('.')[0].replace(':', '_') model_dir = './experiments/model-' + timestamp + '/' # Parameters vocabulary = 'PEAWSB' n_classes = len(vocabulary)+1 # number of classes data_scaler = StandardScaler() n_features = 12 def prepare_data(augment_iter=0): X = [] y = [] for i in range(n_classes): if i == n_classes - 1: char = 'None' else: char = vocabulary[i] res_x = pickle.load(open(root_path + char + ".pkl", 'rb')) res_y = np.tile(i, (len(res_x), 1)).tolist() X += res_x y += res_y X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42, stratify=y) X_train, y_train = augment_data(X_train, y_train, iterations=augment_iter) # add features and normalize data to 0 mean and unit variance pen_up = [] for i in range(len(X_train)): sequence = np.asarray(X_train[i]) pen_up.append(sequence[:, 2]) sequence = sequence[:, 0:2] sequence = add_features(sequence) X_train[i] = sequence data_scaler.fit(np.vstack(X_train)) for i in range(len(X_train)): sequence = np.asarray(X_train[i]) sequence = data_scaler.transform(sequence) X_train[i] = np.column_stack((sequence, pen_up[i])).tolist() for i in range(len(X_test)): sequence = np.asarray(X_test[i]) pen_up = sequence[:, 2] sequence = sequence[:, 0:2] sequence = add_features(sequence) sequence = data_scaler.transform(sequence) X_test[i] = np.column_stack((sequence, pen_up)).tolist() max_seqLen = max(len(max(X_train, key=len)), len(max(X_test, key=len))) # Pad sequences for dimension consistency padding_mask = np.zeros(n_features).tolist() for i in range(len(X_train)): X_train[i] += [padding_mask for _ in range(max_seqLen - len(X_train[i]))] for i in range(len(X_test)): X_test[i] += [padding_mask for _ in range(max_seqLen - len(X_test[i]))] # flat sequence X_train = np.asarray(X_train) shape = np.shape(X_train) X_train = np.reshape(X_train, (shape[0], shape[1] * shape[2])) X_test = np.asarray(X_test) shape = np.shape(X_test) X_test = np.reshape(X_test, (shape[0], shape[1] * shape[2])) return X_train, X_test, y_train, y_test def train(model, X, y): # Perform training print("Start training..") #model_training model.fit(X, y) #saver.save(sess, model_dir + 'model.cptk') print("Training done, final model saved") def test(model, X, y): # prediction sample for every entry of test set prediction = model.predict(X) test_confusion_matrix = sklearn.metrics.confusion_matrix(y, prediction, labels=range(n_classes)) test_accuracy = np.sum(np.diagonal(test_confusion_matrix)) / np.sum(test_confusion_matrix) print("Test Accuracy: ", test_accuracy) print("Test Confusion Matrix:") print(test_confusion_matrix) if __name__ == '__main__': X_train, X_test, y_train, y_test = prepare_data(augment_iter=4) #model = RandomForestClassifier(n_estimators=20, max_depth=12) # Test Accuracy: 0.7238 #model = SVC() # linear: Test Accuracy: 0.7428, rbf: 0.7142 model = MLPClassifier(hidden_layer_sizes=(300,), learning_rate='adaptive', random_state=1) # Test Accuracy: 0.8 # train models train(model, X_train, y_train) # evaluate models test(model, X_test, y_test)
[ "numpy.diagonal", "numpy.reshape", "ops.augment_data", "sklearn.neural_network.MLPClassifier", "sklearn.model_selection.train_test_split", "numpy.asarray", "numpy.column_stack", "ops.add_features", "sklearn.preprocessing.StandardScaler", "numpy.sum", "numpy.zeros", "datetime.datetime.now", "...
[((463, 496), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (486, 496), False, 'import warnings\n'), ((497, 531), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (516, 531), True, 'import numpy as np\n'), ((798, 814), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (812, 814), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1223, 1290), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.15)', 'random_state': '(42)', 'stratify': 'y'}), '(X, y, test_size=0.15, random_state=42, stratify=y)\n', (1239, 1290), False, 'from sklearn.model_selection import train_test_split\n'), ((1315, 1370), 'ops.augment_data', 'augment_data', (['X_train', 'y_train'], {'iterations': 'augment_iter'}), '(X_train, y_train, iterations=augment_iter)\n', (1327, 1370), False, 'from ops import add_features, augment_data\n'), ((2651, 2670), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (2661, 2670), True, 'import numpy as np\n'), ((2683, 2700), 'numpy.shape', 'np.shape', (['X_train'], {}), '(X_train)\n', (2691, 2700), True, 'import numpy as np\n'), ((2715, 2767), 'numpy.reshape', 'np.reshape', (['X_train', '(shape[0], shape[1] * shape[2])'], {}), '(X_train, (shape[0], shape[1] * shape[2]))\n', (2725, 2767), True, 'import numpy as np\n'), ((2782, 2800), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (2792, 2800), True, 'import numpy as np\n'), ((2813, 2829), 'numpy.shape', 'np.shape', (['X_test'], {}), '(X_test)\n', (2821, 2829), True, 'import numpy as np\n'), ((2843, 2894), 'numpy.reshape', 'np.reshape', (['X_test', '(shape[0], shape[1] * shape[2])'], {}), '(X_test, (shape[0], shape[1] * shape[2]))\n', (2853, 2894), True, 'import numpy as np\n'), ((3845, 3931), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(300,)', 'learning_rate': '"""adaptive"""', 'random_state': '(1)'}), "(hidden_layer_sizes=(300,), learning_rate='adaptive',\n random_state=1)\n", (3858, 3931), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1507, 1529), 'numpy.asarray', 'np.asarray', (['X_train[i]'], {}), '(X_train[i])\n', (1517, 1529), True, 'import numpy as np\n'), ((1623, 1645), 'ops.add_features', 'add_features', (['sequence'], {}), '(sequence)\n', (1635, 1645), False, 'from ops import add_features, augment_data\n'), ((1697, 1715), 'numpy.vstack', 'np.vstack', (['X_train'], {}), '(X_train)\n', (1706, 1715), True, 'import numpy as np\n'), ((1770, 1792), 'numpy.asarray', 'np.asarray', (['X_train[i]'], {}), '(X_train[i])\n', (1780, 1792), True, 'import numpy as np\n'), ((1966, 1987), 'numpy.asarray', 'np.asarray', (['X_test[i]'], {}), '(X_test[i])\n', (1976, 1987), True, 'import numpy as np\n'), ((2075, 2097), 'ops.add_features', 'add_features', (['sequence'], {}), '(sequence)\n', (2087, 2097), False, 'from ops import add_features, augment_data\n'), ((3433, 3462), 'numpy.sum', 'np.sum', (['test_confusion_matrix'], {}), '(test_confusion_matrix)\n', (3439, 3462), True, 'import numpy as np\n'), ((2356, 2376), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (2364, 2376), True, 'import numpy as np\n'), ((3395, 3429), 'numpy.diagonal', 'np.diagonal', (['test_confusion_matrix'], {}), '(test_confusion_matrix)\n', (3406, 3429), True, 'import numpy as np\n'), ((1865, 1903), 'numpy.column_stack', 'np.column_stack', (['(sequence, pen_up[i])'], {}), '((sequence, pen_up[i]))\n', (1880, 1903), True, 'import numpy as np\n'), ((2169, 2204), 'numpy.column_stack', 'np.column_stack', (['(sequence, pen_up)'], {}), '((sequence, pen_up))\n', (2184, 2204), True, 'import numpy as np\n'), ((576, 599), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (597, 599), False, 'import datetime\n')]
import numpy as np import matplotlib.pyplot as plt from matplotlib import patches #################### # 8 custom methods # #################### def plot_custom_bar_chart_with_error(input_data, input_names=None, fig_tag=1, input_fig_size=(9, 7), titles=('bar plot', 'field'), window_title_input='bar plot', color_bar='b', kind=None, additional_field=None, input_parameters=None, log_scale=False, add_extra_numbers=None): fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) fig.canvas.set_window_title(window_title_input) ax_bar = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_field = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) index = np.arange(len(input_data)) bar_width = 0.35 # bar plot ax_bar.bar(index, list(input_data), bar_width, color=color_bar) ax_bar.set_title(titles[0]) ax_bar.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_bar.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_bar.set_axisbelow(True) ax_bar.set_xlabel('Methods') ax_bar.set_ylabel('Error (pixel)') if log_scale: ax_bar.set_yscale('log') ax_bar.set_ylabel('Error - log scale - (pixel)') ax_bar.set_xlim(0 - bar_width, len(input_data) - bar_width) if input_names is not None: ax_bar.set_xticks(index, minor=False) xtick_names = plt.setp(ax_bar, xticklabels=input_names) plt.setp(xtick_names, rotation=45, fontsize=12) ax_bar.grid(True) # fig.text(0.5, 0.04, 'Methods', ha='center') # fig.text(0.01, 0.5, 'Errors', va='center', rotation='vertical') # right side of the figure: # Quiver if additional_field is not None: ax_field.set_title(titles[1]) X, Y = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_field.quiver(Y, X, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') # Annotate computational time: if add_extra_numbers is not None: y_val_a, y_val_b = ax_bar.get_ylim() for i in range(len(input_data)): ax_bar.text(i + bar_width/2, 0.85*(y_val_b - y_val_a), str(np.around(add_extra_numbers[i], decimals=9)), horizontalalignment='center', size='small', color='k', rotation=90) # Text on the figure customise this part for the need! # 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI' if kind is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'one_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4])) fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5])) elif kind == 'one_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .70, r'kind: ' + str(input_parameters[4])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[6])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7])) elif kind == 'one_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4])) fig.text(.78, .60, r'Ground truth, steps ') fig.text(.78, .55, str(input_parameters[5]) + ' ' + str(input_parameters[6])) elif kind == 'one_REAL': fig.text(.78, .85, r'id element: ' + str(input_parameters[3])) fig.text(.78, .60, r'Ground truth method ') fig.text(.78, .55, str(input_parameters[4])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig def plot_custom_boxplot(input_data, input_names=None, fig_tag=1, input_fig_size=(11, 7.5), x_axis_label='Methods', y_axis_label='Error (pixel)', input_titles=('Error', 'field'), window_title_input='boxplot plot', kind=None, additional_field=None, input_parameters=None, log_scale=False, annotate_mean=True, add_extra_annotation=None): """ :param input_data: list of lists, one for each block! :param input_names: :param fig_tag: :param x_axis_label: :param y_axis_label: :param input_fig_size: :param input_titles: :param window_title_input: :param kind: :param additional_field: :param input_parameters: :param log_scale: :param annotate_mean: :param add_extra_annotation: :return: """ fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) if input_parameters is None: ax_box = plt.subplot(111) else: ax_box = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_field = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) num_boxes = len(input_data) index_boxes = np.arange(1, num_boxes+1) bp = ax_box.boxplot(input_data, notch=False, patch_artist=False, sym='+', vert=1, whis=1.5) # set the colors: plt.setp(bp['boxes'], color='blue') plt.setp(bp['whiskers'], color='blue') plt.setp(bp['fliers'], color='red', marker='+') ax_box.set_title(input_titles[0]) ax_box.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_box.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_box.set_axisbelow(True) ax_box.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_box.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if log_scale: ax_box.set_yscale('log') ax_box.set_ylabel(y_axis_label + ' log-scale') # ax_box.set_xlim(0 - 0.5, num_boxes + 0.5) if input_names is not None: ax_box.set_xticks(index_boxes, minor=False) xtick_names = plt.setp(ax_box, xticklabels=input_names) plt.setp(xtick_names, rotation=45, fontsize=12) #ax_box.grid(True) # right side of the figure: # Quiver if additional_field is not None: ax_field.set_title(input_titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_field.quiver(yy, xx, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') # Annotate mean mu = [np.mean(input_data[i]) for i in range(len(input_data))] colors_num = ['green', 'green', 'green', 'green', 'green'] if annotate_mean: y_val = ax_box.get_ylim()[1] for i in range(len(mu)): ax_box.text(i + 0.775, y_val - y_val * 0.1, str(np.around(mu[i], decimals=9)), horizontalalignment='center', size='small', color=colors_num[i % 5], rotation=90) if add_extra_annotation is not None: y_val = ax_box.get_ylim()[1] for i in range(len(add_extra_annotation)): ax_box.text(i + 1.225, y_val - y_val * 0.1, str(np.around(add_extra_annotation[i], decimals=9)), horizontalalignment='center', size='small', color='k', rotation=90) # Text on the figure customise this part for the need! # 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI' if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'multiple_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'number of samples: ' + str(int(input_parameters[3]))) fig.text(.78, .70, str(np.round(input_parameters[4], 3)) + r'$ \leq \theta \leq $ ' + str(np.round(input_parameters[5], 3))) fig.text(.78, .65, str(np.round(input_parameters[3], 3)) + r'$ \leq t_x \leq $ ' + str(np.round(input_parameters[7], 3))) fig.text(.78, .60, str(np.round(input_parameters[5], 3)) + r'$ \leq t_y \leq $ ' + str(np.round(input_parameters[9], 3))) elif kind == 'multiple_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .70, r'kind: ' + str(input_parameters[4])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[6])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7])) fig.text(.78, .50, r'number of samples: ' + str(int(input_parameters[8]))) elif kind == 'multiple_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'number of samples = ' + str(input_parameters[3])) fig.text(.78, .70, r'$\sigma_i$ = ' + str(input_parameters[4])) fig.text(.78, .65, r'$\sigma_g$ = ' + str(input_parameters[5])) fig.text(.78, .60, r'Ground truth, steps: ') fig.text(.78, .57, str(input_parameters[6]) + ' ' + str(input_parameters[7])) elif kind == 'multiple_REAL': fig.text(.765, .85, r'Real Data: ') fig.text(.78, .70, r'SFVs id string:') fig.text(.78, .65, str(input_parameters[3])) fig.text(.78, .60, r'Ground truth method ') fig.text(.78, .55, str(input_parameters[4])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig def plot_custom_step_versus_error_single(list_steps, matrix_of_lines, # errors ordered row-major label_lines, fig_tag=2, input_parameters=None, additional_field=None, window_title_input='errors', titles=('iterations vs. error', 'Field'), x_axis_label='number of steps', y_axis_label='Error', kind=None, input_fig_size=(9, 7), input_colors=None, input_line_style=None, input_marker=None, log_scale=False, additional_vertical_line=None, legend_location='upper right', ): assert len(list_steps) == matrix_of_lines.shape[1] fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) if input_colors is None: colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] input_colors = [colors[j % len(colors)] for j in range(len(list_steps))] if input_marker is None: input_marker = ['.', ] * len(list_steps) if input_line_style is None: input_line_style = ['-', ] * len(list_steps) for j in range(matrix_of_lines.shape[0]): ax_graph.plot(list_steps, matrix_of_lines[j, :], color=input_colors[j], linestyle=input_line_style[j], marker=input_marker[j], label=label_lines[j]) ax_graph.set_title(titles[0]) ax_graph.legend(loc=legend_location, shadow=False) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if log_scale: ax_graph.set_yscale('log') ax_graph.set_ylabel(y_axis_label + ' log-scale') if additional_vertical_line is not None: # print vertical lines: xa, xb, ya, yb = list(ax_graph.axis()) ax_graph.plot([additional_vertical_line, additional_vertical_line], [ya, yb], 'k--', lw=0.5, color='0.3') ax_graph.text(additional_vertical_line + 0.2, (yb - ya)/2., r'automatic = '+str(additional_vertical_line)) # ax_graph.set_xlim(0 - 0.5, num_boxes + 0.5) # right side of the figure: # Quiver if additional_field is not None: ax_svf.set_title(titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(yy, xx, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') # Text on the figure customise this part for the need! # 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI' if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'one_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4])) fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5])) if kind == 'one_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .70, r'kind: ' + str(input_parameters[4])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[6])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7])) elif kind == 'one_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4])) if len(input_parameters) > 5: fig.text(.745, .65, r'Ground truth method, steps: ') fig.text(.78, .60, str(input_parameters[5]) + ' ' + str(input_parameters[6])) elif kind == 'one_REAL': fig.text(.765, .85, r'Real data: ') fig.text(.78, .75, r'id svf:') fig.text(.78, .70, str(input_parameters[3])) if len(input_parameters) > 5: fig.text(.745, .65, r'Ground truth method, steps: ') fig.text(.78, .60, str(input_parameters[4]) + ' ' + str(input_parameters[5])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig def plot_custom_step_versus_error_multiple(list_steps, matrix_of_lines_means, # errors ordered row-major label_lines, y_error=None, fig_tag=2, input_parameters=None, additional_field=None, window_title_input='errors', titles=('iterations vs. error', 'Field'), x_axis_label='number of steps', y_axis_label='Error', kind=None, input_fig_size=(9, 7), input_colors=None, input_line_style=None, input_marker=None, log_scale=False, additional_vertical_line=None, legend_location='upper right', ): fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) if input_parameters is None: ax_graph = plt.subplot(111) else: ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) if input_colors is None: colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] input_colors = [colors[j % len(colors)] for j in range(len(list_steps))] if input_marker is None: input_marker = ['.', ] * len(list_steps) if input_line_style is None: input_line_style = ['-', ] * len(list_steps) for j in range(matrix_of_lines_means.shape[0]): if y_error is None: ax_graph.errorbar(list_steps, matrix_of_lines_means[j, :], color=input_colors[j], linestyle=input_line_style[j], marker=input_marker[j], label=label_lines[j]) else: if len(y_error) == 2: ax_graph.errorbar(list_steps, matrix_of_lines_means[j, :], yerr=[y_error[0][j], y_error[1][j]], color=input_colors[j], linestyle=input_line_style[j], marker=input_marker[j], label=label_lines[j]) else: ax_graph.errorbar(list_steps, matrix_of_lines_means[j, :], yerr=y_error[j], color=input_colors[j], linestyle=input_line_style[j], marker=input_marker[j], label=label_lines[j]) ax_graph.set_title(titles[0]) ax_graph.legend(loc=legend_location, shadow=False) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if log_scale: ax_graph.set_yscale('log', nonposy='mask') ax_graph.set_ylabel(y_axis_label + ' log-scale') if additional_vertical_line is not None: # print vertical lines: xa, xb, ya, yb = list(ax_graph.axis()) ax_graph.plot([additional_vertical_line, additional_vertical_line], [ya, yb], 'k--', lw=0.5, color='0.3') ax_graph.text(additional_vertical_line + 0.2, (yb - ya)/2., r'automatic = '+str(additional_vertical_line)) # ax_graph.set_xlim(0 - 0.5, num_boxes + 0.5) # right side of the figure: # Quiver if additional_field is not None and input_parameters is not None: ax_svf.set_title(titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(yy, xx, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') # Text on the figure customise this part for the need! # 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI' if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'multiple_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$N = $ ' + str(int(input_parameters[3]))) fig.text(.78, .70, str(np.round(input_parameters[4], 3)) + r'$ \leq \theta \leq $ ' + str(np.round(input_parameters[5], 3))) fig.text(.78, .65, str(np.round(input_parameters[3], 3)) + r'$ \leq t_x \leq $ ' + str(np.round(input_parameters[6], 3))) fig.text(.78, .60, str(np.round(input_parameters[5], 3)) + r'$ \leq t_y \leq $ ' + str(np.round(input_parameters[7], 3))) if kind == 'multiple_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .70, r'kind: ' + str(input_parameters[4])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[6])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7])) fig.text(.78, .75, r'$N = $ ' + str(int(input_parameters[8]))) elif kind == 'multiple_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'N =' + str(input_parameters[3])) fig.text(.78, .60, r'$\sigma_i$ = ' + str(input_parameters[4])) fig.text(.78, .55, r'$\sigma_g$ = ' + str(input_parameters[5])) print(len(input_parameters)) fig.text(.78, .50, r'Ground truth method, steps: ') fig.text(.78, .45, str(input_parameters[6]) + ' ' + str(input_parameters[7])) elif kind == 'multiple_REAL': fig.text(.765, .85, r'Real Data: ') fig.text(.78, .70, r'SFVs id string:') fig.text(.78, .65, str(input_parameters[3])) fig.text(.78, .60, r'Ground truth method ') fig.text(.78, .55, str(input_parameters[4])) elif kind == 'multiple_GAUSS_ic': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'N =' + str(input_parameters[3])) fig.text(.78, .60, r'$\sigma_i$ = ' + str(input_parameters[4])) fig.text(.78, .55, r'$\sigma_g$ = ' + str(input_parameters[5])) print(len(input_parameters)) elif kind == 'multiple_REAL_ic': fig.text(.765, .85, r'Real Data: ') fig.text(.78, .70, r'SFVs id string:') fig.text(.78, .65, str(input_parameters[4])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig def plot_custom_cluster(x_in, y_in, fig_tag=11, window_title_input='scatter plot', input_titles=('Main window', 'secondary window'), x_axis_label='time (s)', y_axis_label='error (pixel)', log_scale_x=False, log_scale_y=False, legend_location='upper right', threshold=10, input_fig_size=(12, 7), clusters_labels=None, clusters_colors=None, clusters_markers=None, kind=None, input_parameters=None, additional_field=None, additional_passepartout_values=None): # adapt input if they are not lists. if not isinstance(x_in, list): x_in = [x_in] if not isinstance(y_in, list): y_in = [y_in] elements_per_clusters = [len(x_array) for x_array in x_in] number_of_clusters = len(elements_per_clusters) if clusters_colors is None: clusters_colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '0.8', 'b', 'g', 'r', 'c'] if clusters_labels is None: clusters_labels = ['cluster' + str(i) for i in range(number_of_clusters)] if clusters_markers is None: clusters_markers = ['+' for _ in range(number_of_clusters)] font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} # Sanity check: assert len(x_in) == len(y_in) assert len(y_in) == number_of_clusters # Initialize figure: fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.1, right=0.95, top=0.92, bottom=0.1) fig.canvas.set_window_title(window_title_input) if input_parameters is None: ax_scatter = plt.subplot(111) else: ax_scatter = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_field = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) # Plot figure: for j in range(number_of_clusters): ax_scatter.scatter(x_in[j], y_in[j], c=clusters_colors[j], marker=clusters_markers[j], label=clusters_labels[j]) # Title and axis labels ax_scatter.set_title(input_titles[0]) ax_scatter.legend(loc=legend_location, shadow=False) ax_scatter.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_scatter.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if log_scale_x: ax_scatter.set_xscale('log') ax_scatter.set_xlabel(x_axis_label + ' log-scale') if log_scale_y: ax_scatter.set_yscale('log') ax_scatter.set_ylabel(y_axis_label + ' log-scale') # Grid: ax_scatter.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_scatter.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_scatter.set_axisbelow(True) # right side of the figure: # Quiver if additional_field is not None: ax_field.set_title(input_titles[1]) X, Y = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_field.quiver(Y, X, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') # Additional errorbar boxplot. if max(elements_per_clusters) > threshold: pass # kind options if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'multiple_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$N = $ ' + str(int(input_parameters[3]))) fig.text(.78, .70, str(np.round(input_parameters[4], 3)) + r'$ \leq \theta \leq $ ' + str(np.round(input_parameters[5], 3))) fig.text(.78, .65, str(np.round(input_parameters[3], 3)) + r'$ \leq t_x \leq $ ' + str(np.round(input_parameters[7], 3))) fig.text(.78, .60, str(np.round(input_parameters[5], 3)) + r'$ \leq t_y \leq $ ' + str(np.round(input_parameters[9], 3))) elif kind == 'multiple_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4])) elif kind == 'multiple_REAL': pass else: raise Warning('Kind not recognized.') def plot_custom_step_error(list_steps_number, matrix_of_lines, # errors ordered column-major label_lines, stdev=None, additional_field=None, window_title_input='errors', titles=('iterations vs. error', 'Field'), input_parameters=None, fig_tag=2, kind=None, input_fig_size=(10, 6), log_scale=False, input_colors=None, legend_location='upper right', input_line_style='-', input_marker='o'): fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) if input_parameters is None: ax_graph = plt.subplot(111) else: ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100) fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) fig.canvas.set_window_title(window_title_input) if input_colors is None: input_colors = ['r'] * matrix_of_lines.shape[0] # Graph for num_line in range(matrix_of_lines.shape[0]): # number of methods if stdev is None: ax_graph.plot(list_steps_number, matrix_of_lines[num_line, :], linestyle=input_line_style[num_line], marker=input_marker[num_line], label=label_lines[num_line], color=input_colors[num_line]) else: ax_graph.errorbar(list_steps_number, matrix_of_lines[num_line, :], yerr=stdev[num_line, :], color=input_colors[num_line], linestyle=input_line_style[num_line], marker=input_marker[num_line], label=label_lines[num_line] ) ax_graph.set_title(titles[0]) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel('steps number', fontdict=font) ax_graph.set_ylabel('error', fontdict=font) if log_scale: ax_graph.set_yscale('log') ax_graph.set_ylabel('error - log scale') xa, xb = ax_graph.get_xlim() ya, yb = ax_graph.get_ylim() ax_graph.set_xlim([xa, xb + 0.05*xb]) ax_graph.set_ylim([ya-0.05*ya, yb]) ax_graph.legend(loc=legend_location, shadow=False) # Quiver if additional_field is not None: ax_svf.set_title(titles[1]) X, Y = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(Y, X, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.01, width=0.03, scale=1, scale_units='xy', units='xy', angles='xy') # Text on the figure customise this part for the need! if input_parameters is not None and kind is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'one_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4])) fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5])) fig.text(.78, .60, r'Number of steps considered: ') fig.text(.78, .55, str(int(input_parameters[6]))) elif kind == 'one_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .70, r'kind: ' + str(input_parameters[4])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[6])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7])) elif kind == 'one_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4])) fig.text(.78, .65, r'Number of steps considered: ') fig.text(.78, .60, str(input_parameters[5])) elif kind == 'one_REAL': fig.text(.765, .85, r'Real Data: ') fig.text(.78, .70, r'id svf:') fig.text(.78, .65, str(input_parameters[3])) fig.text(.78, .60, r'Number of steps considered: ') fig.text(.78, .55, str(input_parameters[4])) elif kind == 'multiple_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$N = $ ' + str(int(input_parameters[3]))) fig.text(.78, .70, str(np.round(input_parameters[4], 3)) + r'$ \leq \theta \leq $ ' + str(np.round(input_parameters[5], 3))) fig.text(.78, .65, str(np.round(input_parameters[6], 3)) + r'$ \leq t_x \leq $ ' + str(np.round(input_parameters[7], 3))) fig.text(.78, .60, str(np.round(input_parameters[8], 3)) + r'$ \leq t_y \leq $ ' + str(np.round(input_parameters[9], 3))) fig.text(.78, .55, r'Steps considered: ' + str(input_parameters[7])) elif kind == 'multiple_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .70, r'kind: ' + str(input_parameters[4])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[6])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7])) fig.text(.78, .50, 'N = ' + str(input_parameters[8])) fig.text(.78, .45, 'max steps = ' + str(input_parameters[9])) elif kind == 'multiple_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[1])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[2])) fig.text(.78, .65, r'Number of samples: ' + str(input_parameters[0])) fig.text(.78, .55, r'Steps considered: ' + str(input_parameters[3])) elif kind == 'multiple_REAL': fig.text(.78, .85, r'SFVs id string:') fig.text(.78, .80, str(input_parameters[3])) fig.text(.78, .60, r'Max number of steps: ') fig.text(.78, .55, str(input_parameters[4])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig # ------------- Bossa figures: -------------- def plot_custom_bossa_figures_like_fov(time_matrix, error_matrix, # errors ordered row-major label_lines, fig_tag=2, input_parameters=None, additional_field=None, window_title_input='errors', titles=('time vs. error (increasing field of views)', 'Field'), x_axis_label='Time(s)', y_axis_label='Error', kind=None, input_fig_size=(9, 7), input_colors=None, input_line_style=None, input_marker=None, log_scale=False, additional_vertical_line=None, legend_location='lower right', ): assert time_matrix.shape[0] == error_matrix.shape[0] assert time_matrix.shape[1] == error_matrix.shape[1] num_methods = time_matrix.shape[0] if kind == 'one_SE2': fov_list = [int(f) for f in input_parameters[3:]] elif kind == 'one_GAUSS': fov_list = [int(f) for f in input_parameters[4:]] else: raise Warning('Kind not recognized.') fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) if input_colors is None: colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] input_colors = [colors[c % len(colors)] for c in range(num_methods)] if input_marker is None: input_marker = ['.', ] * num_methods if input_line_style is None: input_line_style = ['-', ] * num_methods for met in range(num_methods): # cycle over the method. ax_graph.plot(time_matrix[met, :], error_matrix[met, :], color=input_colors[met], linestyle=input_line_style[met], marker=input_marker[met], label=label_lines[met]) ax_graph.set_title(titles[0]) ax_graph.legend(loc=legend_location, shadow=False) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if log_scale: ax_graph.set_yscale('log') ax_graph.set_ylabel(y_axis_label + ' log-scale') # right side of the figure: # Quiver if additional_field is not None: ax_svf.set_title(titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(yy, xx, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') x_size, y_size = additional_field.shape[0:2] for fov in fov_list: val_fov = int(fov) ax_svf.add_patch(patches.Rectangle( (x_size/2 - val_fov/2, y_size/2 - val_fov/2), # (x,y) val_fov, # width val_fov, # height fill=False # remove background )) # Text on the figure customise this part for the need! # 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI' if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'one_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4])) fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5])) fig.text(.10, .88, r'FOVs: ' + str(input_parameters[6:])) elif kind == 'one_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[5])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[6])) fig.text(.75, .65, r'Ground truth method, steps: ') fig.text(.78, .60, str(input_parameters[3]) + ', ' + str(input_parameters[4])) fig.text(.18, .88, r'FOVs: ' + str(input_parameters[7:])) elif kind == 'one_REAL': fig.text(.765, .85, r'Real data: ') fig.text(.78, .75, r'id svf:') fig.text(.78, .70, str(input_parameters[3])) fig.text(.78, .65, r'Ground truth method, steps: ') fig.text(.78, .60, str(input_parameters[3])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig def plot_custom_bossa_figures_like_3(time_matrix, error_matrix, # errors ordered row-major label_lines, fig_tag=2, input_parameters=None, additional_field=None, window_title_input='errors', titles=('time vs. error (increasing parameter)', 'Last Field'), x_axis_label='Time(s)', y_axis_label='Error', kind=None, input_fig_size=(9, 7), input_colors=None, input_line_style=None, input_marker=None, log_scale=False, additional_vertical_line=None, legend_location='lower right', ): assert time_matrix.shape[0] == error_matrix.shape[0] assert time_matrix.shape[1] == error_matrix.shape[1] num_methods = time_matrix.shape[0] fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) if input_colors is None: colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] input_colors = [colors[c % len(colors)] for c in range(num_methods)] if input_marker is None: input_marker = ['.', ] * num_methods if input_line_style is None: input_line_style = ['-', ] * num_methods for met in range(num_methods): # cycle over the method. ax_graph.plot(time_matrix[met, :], error_matrix[met, :], color=input_colors[met], linestyle=input_line_style[met], marker=input_marker[met], label=label_lines[met]) ax_graph.set_title(titles[0]) ax_graph.legend(loc=legend_location, shadow=False) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if log_scale: ax_graph.set_yscale('log') ax_graph.set_ylabel(y_axis_label + ' log-scale') # right side of the figure: # Quiver if additional_field is not None: ax_svf.set_title(titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(yy, xx, additional_field[:, :, 0, 0, 0], additional_field[:, :, 0, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') # Text on the figure customise this part for the need! # 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI' if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if kind == 'one_SE2': ax_graph.set_title('time vs. error (increasing rotation angle)') fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.10, .88, r'$\theta$ = ' + str(input_parameters[2:])) elif kind == 'one_GAUSS': ax_graph.set_title('time vs. error (increasing sigma Gaussian filter)') fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .72, r'$\sigma_i = $' + str(input_parameters[5])) fig.text(.75, .65, r'Ground truth, steps: ') fig.text(.78, .60, str(input_parameters[3]) + ', ' + str(input_parameters[4])) fig.text(.15, .88, r'$\sigma_g = $' + str(input_parameters[6:])) elif kind == 'one_REAL': fig.text(.765, .85, r'Real Data: ') fig.text(.78, .75, r'id svf:') fig.text(.78, .70, str(input_parameters[0])) fig.text(.78, .65, r'Ground truth method: ') fig.text(.78, .60, str(input_parameters[0])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig def plot_custom_time_error_steps(time_matrix, error_matrix, # errors ordered row-major label_lines, y_error=None, fig_tag=2, input_parameters=None, additional_field=None, window_title_input='errors', titles=('mean time vs. mean error (increasing steps)', 'Field sample'), x_axis_label='Time(s)', y_axis_label='Error', kind=None, input_fig_size=(9, 7), input_colors=None, input_line_style=None, input_marker=None, x_log_scale=False, y_log_scale=False, legend_location='lower right', additional_data=None): assert time_matrix.shape[0] == error_matrix.shape[0] assert time_matrix.shape[1] == error_matrix.shape[1] num_methods = time_matrix.shape[0] fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) # Set the axis according to the inputs: (GOOD version!) ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=4, rowspan=3) if input_parameters is not None: ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) if input_colors is None: colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] input_colors = [colors[c % len(colors)] for c in range(num_methods)] if input_marker is None: input_marker = ['.', ] * num_methods if input_line_style is None: input_line_style = ['-', ] * num_methods for met in range(num_methods): # cycle over the method. if y_error is None: ax_graph.plot(time_matrix[met, :], error_matrix[met, :], color=input_colors[met], linestyle=input_line_style[met], marker=input_marker[met], label=label_lines[met]) else: if len(y_error) == 2: ax_graph.errorbar(time_matrix[met, :], error_matrix[met, :], yerr=[y_error[0][met, :], y_error[1][met, :]], color=input_colors[met], linestyle=input_line_style[met], marker=input_marker[met], label=label_lines[met]) else: ax_graph.errorbar(time_matrix[met, :], error_matrix[met, :], yerr=y_error[met, :], color=input_colors[met], linestyle=input_line_style[met], marker=input_marker[met], label=label_lines[met]) ax_graph.set_title(titles[0]) ax_graph.legend(loc=legend_location, shadow=False) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18) ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10) if x_log_scale: ax_graph.set_xscale('log') ax_graph.set_xlabel(x_axis_label + ' log-scale') if y_log_scale: ax_graph.set_yscale('log') ax_graph.set_ylabel(y_axis_label + ' log-scale') # right side of the figure: # Quiver if additional_field is not None: ax_svf.set_title(titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(yy, xx, additional_field[:, :, additional_field.shape[2]/2, 0, 0], additional_field[:, :, additional_field.shape[2]/2, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') ax_svf.set_aspect('equal') if kind is not None and input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) if additional_data is not None: fig.text(.75, .50, r'Max norm: ' + str(additional_data[0])) fig.text(.75, .45, r'Mean norm: ' + str(additional_data[1])) if kind == 'one_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') if isinstance(input_parameters[3], float): # is the angle fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3])) else: # is the denominator of pi fig.text(.78, .75, r'$\theta = \pi /$ ' + str(input_parameters[3])) fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4])) fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5])) fig.text(.11, .18, r'Steps: ' + str(input_parameters[6:])) elif kind == 'one_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .75, r'scale_factor: ' + str(input_parameters[3])) fig.text(.78, .70, r'sigma: ' + str(input_parameters[4])) fig.text(.78, .65, r'in_psl: ' + str(input_parameters[5])) fig.text(.11, .18, r'Steps: ' + str(input_parameters[6:])) elif kind == 'one_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4])) fig.text(.75, .65, r'Ground method, steps: ') fig.text(.78, .60, str(input_parameters[5]) + ', ' + str(input_parameters[6])) fig.text(.15, .88, r'$Steps = $' + str(input_parameters[7:])) elif kind == 'one_REAL': fig.text(.78, .85, r'Real data') fig.text(.78, .75, 'id data:') fig.text(.78, .70, str(input_parameters[3])) fig.text(.78, .65, r'Ground method, steps: ') fig.text(.78, .60, str(input_parameters[4]) + ' ' + str(input_parameters[5])) fig.text(.15, .88, r'$Steps = $' + str(input_parameters[6:])) elif kind == 'multiple_SE2': fig.text(.765, .85, r'SE(2) generated SVF: ') fig.text(.78, .75, r'$N = $ ' + str(int(input_parameters[3]))) fig.text(.78, .70, str(np.round(input_parameters[4], 3)) + r'$ \leq \theta \leq $ ' + str(np.round(input_parameters[5], 3))) fig.text(.78, .65, str(np.round(input_parameters[6], 3)) + r'$ \leq t_x \leq $ ' + str(np.round(input_parameters[7], 3))) fig.text(.78, .60, str(np.round(input_parameters[8], 3)) + r'$ \leq t_y \leq $ ' + str(np.round(input_parameters[9], 3))) fig.text(.11, .15, r'$Steps = $' + str(input_parameters[10:])) elif kind == 'multiple_HOM': fig.text(.765, .85, r'HOM generated SVF: ') fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[3])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[4])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[5])) fig.text(.78, .50, r'$N = $ ' + str(int(input_parameters[6]))) fig.text(.11, .18, r'Steps: ' + str(input_parameters[7:])) elif kind == 'multiple_GAUSS': fig.text(.765, .85, r'Gauss generated SVF: ') fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[4])) fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[5])) fig.text(.78, .65, r'Number of samples: ' + str(input_parameters[3])) fig.text(.78, .55, r'Ground method, steps: ') fig.text(.78, .50, str(input_parameters[6]) + ' ' + str(input_parameters[7])) fig.text(.15, .88, r'$Steps = $' + str(input_parameters[8:])) elif kind == 'multiple_REAL': fig.text(.765, .85, r'Real data') fig.text(.78, .75, r'SFVs id string:') fig.text(.78, .70, str(input_parameters[3])) fig.text(.78, .65, r'Ground method, steps: ') fig.text(.78, .60, str(input_parameters[4]) + ' ' + str(input_parameters[5])) fig.text(.11, .15, r'$Steps = $' + str(input_parameters[6:])) elif kind == 'multiple_REAL_ic': fig.text(.765, .85, r'Real data') fig.text(.78, .75, r'SFVs id string:') fig.text(.78, .70, str(input_parameters[3])) fig.text(.11, .15, r'$Steps = $' + str(input_parameters[4:])) else: raise Warning('Kind not recognized.') fig.set_tight_layout(True) return fig """ To see errors at each level of the vector field. Created for homographies, searching for a computationally well-defined homographies. """ def plot_error_linewise(error_lines, quotes_lines, # errors ordered row-major label_lines, fig_tag=2, input_parameters=None, additional_field=None, # must be with the axial quote if 3d axial_quote=0, log_scale=False, window_title_input='errors', titles=('time vs. error (increasing field of views)', 'Field'), legend_location='lower right', ): num_lines = len(error_lines) # list of error vectors fig = plt.figure(fig_tag, figsize=(8, 5), dpi=100, facecolor='w', edgecolor='k') fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08) font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14} fig.canvas.set_window_title(window_title_input) ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3) if additional_field is not None: ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] markers = ['.', ] * len(colors) line_styles = ['-', ] * len(colors) for line_num in range(num_lines): # cycle over the method. ax_graph.plot(range(len(error_lines[line_num])), error_lines[line_num], color=colors[line_num], linestyle=line_styles[line_num], marker=markers[line_num], label=label_lines[line_num]) ax_graph.set_title('Error per position') ax_graph.legend(loc=legend_location, shadow=False) ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax_graph.set_axisbelow(True) ax_graph.set_xlabel('Position (pixel/voxel)', fontdict=font, labelpad=18) ax_graph.set_ylabel('Error', fontdict=font, labelpad=10) if log_scale: ax_graph.set_yscale('log') ax_graph.set_ylabel('Error' + ' log-scale') # right side of the figure: # Quiver if additional_field is not None: ax_svf.set_title(titles[1]) xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1])) ax_svf.quiver(yy, xx, additional_field[:, :, axial_quote, 0, 0], additional_field[:, :, axial_quote, 0, 1], color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy') x_size, y_size = additional_field.shape[0:2] for h_lines in range(len(quotes_lines)): ax_svf.axhline(y=quotes_lines[h_lines], xmin=0, xmax=x_size, c='b', linewidth=1, zorder=2) # Text on the figure customise this part for the need! # to show the content of the input_parameters: # if input_parameters is not None: dom = tuple([int(j) for j in input_parameters[:3]]) fig.text(.78, .80, r'Domain = ' + str(dom)) fig.text(.765, .85, r'pgl(n) generated SVF: ') fig.text(.78, .75, r'center: ' + str(input_parameters[3])) fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[4])) fig.text(.78, .60, r'sigma: ' + str(input_parameters[5])) fig.text(.78, .55, r'in_psl: ' + str(input_parameters[6])) fig.set_tight_layout(True) return fig
[ "matplotlib.pyplot.setp", "numpy.mean", "matplotlib.patches.Rectangle", "numpy.round", "matplotlib.pyplot.figure", "numpy.around", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplot2grid", "numpy.arange" ]
[((842, 928), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (852, 928), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1116), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (1078, 1116), True, 'import matplotlib.pyplot as plt\n'), ((5973, 6059), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (5983, 6059), True, 'import matplotlib.pyplot as plt\n'), ((6587, 6614), 'numpy.arange', 'np.arange', (['(1)', '(num_boxes + 1)'], {}), '(1, num_boxes + 1)\n', (6596, 6614), True, 'import numpy as np\n'), ((6738, 6773), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['boxes']"], {'color': '"""blue"""'}), "(bp['boxes'], color='blue')\n", (6746, 6773), True, 'import matplotlib.pyplot as plt\n'), ((6778, 6816), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers']"], {'color': '"""blue"""'}), "(bp['whiskers'], color='blue')\n", (6786, 6816), True, 'import matplotlib.pyplot as plt\n'), ((6821, 6868), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'color': '"""red"""', 'marker': '"""+"""'}), "(bp['fliers'], color='red', marker='+')\n", (6829, 6868), True, 'import matplotlib.pyplot as plt\n'), ((12712, 12798), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (12722, 12798), True, 'import matplotlib.pyplot as plt\n'), ((13019, 13073), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (13035, 13073), True, 'import matplotlib.pyplot as plt\n'), ((18638, 18724), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (18648, 18724), True, 'import matplotlib.pyplot as plt\n'), ((26968, 27054), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (26978, 27054), True, 'import matplotlib.pyplot as plt\n'), ((31054, 31140), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (31064, 31140), True, 'import matplotlib.pyplot as plt\n'), ((31629, 31681), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)'}), '(fig_tag, figsize=input_fig_size, dpi=100)\n', (31639, 31681), True, 'import matplotlib.pyplot as plt\n'), ((39466, 39552), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (39476, 39552), True, 'import matplotlib.pyplot as plt\n'), ((39773, 39827), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (39789, 39827), True, 'import matplotlib.pyplot as plt\n'), ((45009, 45095), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (45019, 45095), True, 'import matplotlib.pyplot as plt\n'), ((45316, 45370), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (45332, 45370), True, 'import matplotlib.pyplot as plt\n'), ((49918, 50004), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': 'input_fig_size', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w',\n edgecolor='k')\n", (49928, 50004), True, 'import matplotlib.pyplot as plt\n'), ((50285, 50339), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(4)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=4, rowspan=3)\n', (50301, 50339), True, 'import matplotlib.pyplot as plt\n'), ((59031, 59105), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_tag'], {'figsize': '(8, 5)', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(fig_tag, figsize=(8, 5), dpi=100, facecolor='w', edgecolor='k')\n", (59041, 59105), True, 'import matplotlib.pyplot as plt\n'), ((59330, 59384), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (59346, 59384), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1229), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (1191, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1984, 2025), 'matplotlib.pyplot.setp', 'plt.setp', (['ax_bar'], {'xticklabels': 'input_names'}), '(ax_bar, xticklabels=input_names)\n', (1992, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2081), 'matplotlib.pyplot.setp', 'plt.setp', (['xtick_names'], {'rotation': '(45)', 'fontsize': '(12)'}), '(xtick_names, rotation=45, fontsize=12)\n', (2042, 2081), True, 'import matplotlib.pyplot as plt\n'), ((6315, 6331), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (6326, 6331), True, 'import matplotlib.pyplot as plt\n'), ((6359, 6413), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (6375, 6413), True, 'import matplotlib.pyplot as plt\n'), ((7507, 7548), 'matplotlib.pyplot.setp', 'plt.setp', (['ax_box'], {'xticklabels': 'input_names'}), '(ax_box, xticklabels=input_names)\n', (7515, 7548), True, 'import matplotlib.pyplot as plt\n'), ((7557, 7604), 'matplotlib.pyplot.setp', 'plt.setp', (['xtick_names'], {'rotation': '(45)', 'fontsize': '(12)'}), '(xtick_names, rotation=45, fontsize=12)\n', (7565, 7604), True, 'import matplotlib.pyplot as plt\n'), ((8165, 8187), 'numpy.mean', 'np.mean', (['input_data[i]'], {}), '(input_data[i])\n', (8172, 8187), True, 'import numpy as np\n'), ((13130, 13184), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (13146, 13184), True, 'import matplotlib.pyplot as plt\n'), ((18982, 18998), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (18993, 18998), True, 'import matplotlib.pyplot as plt\n'), ((19028, 19082), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (19044, 19082), True, 'import matplotlib.pyplot as plt\n'), ((27227, 27243), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (27238, 27243), True, 'import matplotlib.pyplot as plt\n'), ((27275, 27329), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (27291, 27329), True, 'import matplotlib.pyplot as plt\n'), ((27389, 27443), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (27405, 27443), True, 'import matplotlib.pyplot as plt\n'), ((31398, 31414), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (31409, 31414), True, 'import matplotlib.pyplot as plt\n'), ((31444, 31498), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (31460, 31498), True, 'import matplotlib.pyplot as plt\n'), ((39884, 39938), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (39900, 39938), True, 'import matplotlib.pyplot as plt\n'), ((45427, 45481), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (45443, 45481), True, 'import matplotlib.pyplot as plt\n'), ((50396, 50450), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (50412, 50450), True, 'import matplotlib.pyplot as plt\n'), ((50507, 50561), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((3, 4), (0, 0), colspan=3, rowspan=3)\n', (50523, 50561), True, 'import matplotlib.pyplot as plt\n'), ((50579, 50633), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (50595, 50633), True, 'import matplotlib.pyplot as plt\n'), ((59441, 59495), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (59457, 59495), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2410), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (2383, 2410), True, 'import numpy as np\n'), ((2412, 2448), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (2421, 2448), True, 'import numpy as np\n'), ((6480, 6534), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (6496, 6534), True, 'import matplotlib.pyplot as plt\n'), ((7785, 7821), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (7794, 7821), True, 'import numpy as np\n'), ((7823, 7859), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (7832, 7859), True, 'import numpy as np\n'), ((14905, 14941), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (14914, 14941), True, 'import numpy as np\n'), ((14943, 14979), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (14952, 14979), True, 'import numpy as np\n'), ((19147, 19201), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (19163, 19201), True, 'import matplotlib.pyplot as plt\n'), ((21882, 21918), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (21891, 21918), True, 'import numpy as np\n'), ((21920, 21956), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (21929, 21956), True, 'import numpy as np\n'), ((28537, 28573), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (28546, 28573), True, 'import numpy as np\n'), ((28575, 28611), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (28584, 28611), True, 'import numpy as np\n'), ((31563, 31617), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 4)', '(2, 3)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((3, 4), (2, 3), colspan=1, rowspan=1)\n', (31579, 31617), True, 'import matplotlib.pyplot as plt\n'), ((33513, 33549), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (33522, 33549), True, 'import numpy as np\n'), ((33551, 33587), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (33560, 33587), True, 'import numpy as np\n'), ((41273, 41309), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (41282, 41309), True, 'import numpy as np\n'), ((41311, 41347), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (41320, 41347), True, 'import numpy as np\n'), ((46816, 46852), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (46825, 46852), True, 'import numpy as np\n'), ((46854, 46890), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (46863, 46890), True, 'import numpy as np\n'), ((52977, 53013), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (52986, 53013), True, 'import numpy as np\n'), ((53015, 53051), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (53024, 53051), True, 'import numpy as np\n'), ((60675, 60711), 'numpy.arange', 'np.arange', (['additional_field.shape[0]'], {}), '(additional_field.shape[0])\n', (60684, 60711), True, 'import numpy as np\n'), ((60713, 60749), 'numpy.arange', 'np.arange', (['additional_field.shape[1]'], {}), '(additional_field.shape[1])\n', (60722, 60749), True, 'import numpy as np\n'), ((41756, 41861), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x_size / 2 - val_fov / 2, y_size / 2 - val_fov / 2)', 'val_fov', 'val_fov'], {'fill': '(False)'}), '((x_size / 2 - val_fov / 2, y_size / 2 - val_fov / 2),\n val_fov, val_fov, fill=False)\n', (41773, 41861), False, 'from matplotlib import patches\n'), ((2952, 2995), 'numpy.around', 'np.around', (['add_extra_numbers[i]'], {'decimals': '(9)'}), '(add_extra_numbers[i], decimals=9)\n', (2961, 2995), True, 'import numpy as np\n'), ((8437, 8465), 'numpy.around', 'np.around', (['mu[i]'], {'decimals': '(9)'}), '(mu[i], decimals=9)\n', (8446, 8465), True, 'import numpy as np\n'), ((8788, 8834), 'numpy.around', 'np.around', (['add_extra_annotation[i]'], {'decimals': '(9)'}), '(add_extra_annotation[i], decimals=9)\n', (8797, 8834), True, 'import numpy as np\n'), ((9605, 9637), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (9613, 9637), True, 'import numpy as np\n'), ((9781, 9813), 'numpy.round', 'np.round', (['input_parameters[7]', '(3)'], {}), '(input_parameters[7], 3)\n', (9789, 9813), True, 'import numpy as np\n'), ((9957, 9989), 'numpy.round', 'np.round', (['input_parameters[9]', '(3)'], {}), '(input_parameters[9], 3)\n', (9965, 9989), True, 'import numpy as np\n'), ((22861, 22893), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (22869, 22893), True, 'import numpy as np\n'), ((23037, 23069), 'numpy.round', 'np.round', (['input_parameters[6]', '(3)'], {}), '(input_parameters[6], 3)\n', (23045, 23069), True, 'import numpy as np\n'), ((23213, 23245), 'numpy.round', 'np.round', (['input_parameters[7]', '(3)'], {}), '(input_parameters[7], 3)\n', (23221, 23245), True, 'import numpy as np\n'), ((29485, 29517), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (29493, 29517), True, 'import numpy as np\n'), ((29661, 29693), 'numpy.round', 'np.round', (['input_parameters[7]', '(3)'], {}), '(input_parameters[7], 3)\n', (29669, 29693), True, 'import numpy as np\n'), ((29837, 29869), 'numpy.round', 'np.round', (['input_parameters[9]', '(3)'], {}), '(input_parameters[9], 3)\n', (29845, 29869), True, 'import numpy as np\n'), ((9496, 9528), 'numpy.round', 'np.round', (['input_parameters[4]', '(3)'], {}), '(input_parameters[4], 3)\n', (9504, 9528), True, 'import numpy as np\n'), ((9675, 9707), 'numpy.round', 'np.round', (['input_parameters[3]', '(3)'], {}), '(input_parameters[3], 3)\n', (9683, 9707), True, 'import numpy as np\n'), ((9851, 9883), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (9859, 9883), True, 'import numpy as np\n'), ((22752, 22784), 'numpy.round', 'np.round', (['input_parameters[4]', '(3)'], {}), '(input_parameters[4], 3)\n', (22760, 22784), True, 'import numpy as np\n'), ((22931, 22963), 'numpy.round', 'np.round', (['input_parameters[3]', '(3)'], {}), '(input_parameters[3], 3)\n', (22939, 22963), True, 'import numpy as np\n'), ((23107, 23139), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (23115, 23139), True, 'import numpy as np\n'), ((29376, 29408), 'numpy.round', 'np.round', (['input_parameters[4]', '(3)'], {}), '(input_parameters[4], 3)\n', (29384, 29408), True, 'import numpy as np\n'), ((29555, 29587), 'numpy.round', 'np.round', (['input_parameters[3]', '(3)'], {}), '(input_parameters[3], 3)\n', (29563, 29587), True, 'import numpy as np\n'), ((29731, 29763), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (29739, 29763), True, 'import numpy as np\n'), ((35988, 36020), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (35996, 36020), True, 'import numpy as np\n'), ((36164, 36196), 'numpy.round', 'np.round', (['input_parameters[7]', '(3)'], {}), '(input_parameters[7], 3)\n', (36172, 36196), True, 'import numpy as np\n'), ((36340, 36372), 'numpy.round', 'np.round', (['input_parameters[9]', '(3)'], {}), '(input_parameters[9], 3)\n', (36348, 36372), True, 'import numpy as np\n'), ((55923, 55955), 'numpy.round', 'np.round', (['input_parameters[5]', '(3)'], {}), '(input_parameters[5], 3)\n', (55931, 55955), True, 'import numpy as np\n'), ((56099, 56131), 'numpy.round', 'np.round', (['input_parameters[7]', '(3)'], {}), '(input_parameters[7], 3)\n', (56107, 56131), True, 'import numpy as np\n'), ((56275, 56307), 'numpy.round', 'np.round', (['input_parameters[9]', '(3)'], {}), '(input_parameters[9], 3)\n', (56283, 56307), True, 'import numpy as np\n'), ((35879, 35911), 'numpy.round', 'np.round', (['input_parameters[4]', '(3)'], {}), '(input_parameters[4], 3)\n', (35887, 35911), True, 'import numpy as np\n'), ((36058, 36090), 'numpy.round', 'np.round', (['input_parameters[6]', '(3)'], {}), '(input_parameters[6], 3)\n', (36066, 36090), True, 'import numpy as np\n'), ((36234, 36266), 'numpy.round', 'np.round', (['input_parameters[8]', '(3)'], {}), '(input_parameters[8], 3)\n', (36242, 36266), True, 'import numpy as np\n'), ((55814, 55846), 'numpy.round', 'np.round', (['input_parameters[4]', '(3)'], {}), '(input_parameters[4], 3)\n', (55822, 55846), True, 'import numpy as np\n'), ((55993, 56025), 'numpy.round', 'np.round', (['input_parameters[6]', '(3)'], {}), '(input_parameters[6], 3)\n', (56001, 56025), True, 'import numpy as np\n'), ((56169, 56201), 'numpy.round', 'np.round', (['input_parameters[8]', '(3)'], {}), '(input_parameters[8], 3)\n', (56177, 56201), True, 'import numpy as np\n')]
# <NAME>, Nov 2018 # # This script analyzes the data and makes predictions. # It creates a decision tree classifier with ideal hyperparameters # (maximum depth and minimum samples split). These hyperparameters # are calculated over the 80% training data using 3-fold cross validation. # The predictions are made on the 20% test data. Argument variables of # the script are an input data file, a features importance data file, and # a detailed data file containing all the data along with predictions and # train/test label. # # Dependencies: argparse, pandas, numpy, sklearn.tree, # sklearn.model_selection # # Usage: python analysis.py data_file importance_file detailed_file # package import for parsing arguments import argparse import pandas as pd import numpy as np from sklearn import tree from sklearn.model_selection import train_test_split, cross_val_score # variable for parsing parser = argparse.ArgumentParser() # adding argument for input file parser.add_argument('input_file') parser.add_argument('importance_file') parser.add_argument('output_file') args = parser.parse_args() def main(): # read the data df = pd.read_csv(args.input_file) # specify feature and target columns feature_cols = df.columns.values[:-1] target = df.columns.values[-1] # create X and y X = df.loc[:, feature_cols] y = df.loc[:, target] # split the training data 80/20 Xtrain, Xtest, ytrain, ytest = train_test_split(X,y,test_size=0.2) depth = best_depth(Xtrain, ytrain) split = best_samples_split(Xtrain, ytrain) model = tree.DecisionTreeClassifier(max_depth = depth, min_samples_split = split, random_state=42) model.fit(Xtrain,ytrain) results = pd.DataFrame(np.vstack((feature_cols, model.feature_importances_))) results = results.T results.columns = ['Features', 'Importance'] results = results.sort_values(by='Importance', ascending=False) results = results[results['Importance']>0] results.to_csv(args.importance_file, index=False) # make predictions on test and training data predictions_test = model.predict(Xtest) predictions_train = model.predict(Xtrain) # combine all the data to save for output Xtr = Xtrain.copy() Xtr['Classification'] = ytrain Xte = Xtest.copy() Xte['Classification'] = ytest Xte['Predictions'] = predictions_test Xtr['Predictions'] = predictions_train Xtr['Type'] = "train" Xte['Type'] = "test" output = Xtr.append(other=Xte) output.to_csv(args.output_file, index=False) def best_depth(X, y): ''' ideitifies the best value for maximum depth in a decision tree by iterating over values 1 to 40 and returns value that gave the best average accuracy over 3-fold cross-validation ''' depths = np.linspace(1, 40, 40, endpoint=True) train_accuracy = [] for i in depths: model = tree.DecisionTreeClassifier(max_depth=i, random_state=42) avg_score = np.mean(cross_val_score(model, X, y, cv=3)) train_accuracy.append(avg_score) return depths[np.argmax(train_accuracy)] def best_samples_split(X, y): ''' ideitifies the best value for minimum samples split in a decision tree by iterating over values 0.1 to 1.0 and returns value that gave the best average accuracy over 3-fold cross-validation ''' splits = range(2,21,1) train_accuracy = [] for i in splits: model = tree.DecisionTreeClassifier(min_samples_split=i, random_state=42) avg_score = np.mean(cross_val_score(model, X, y, cv=3)) train_accuracy.append(avg_score) return splits[np.argmax(train_accuracy)] # call main function if __name__ == "__main__": main()
[ "argparse.ArgumentParser", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.tree.DecisionTreeClassifier", "numpy.argmax", "numpy.linspace", "numpy.vstack", "sklearn.model_selection.cross_val_score" ]
[((900, 925), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (923, 925), False, 'import argparse\n'), ((1137, 1165), 'pandas.read_csv', 'pd.read_csv', (['args.input_file'], {}), '(args.input_file)\n', (1148, 1165), True, 'import pandas as pd\n'), ((1437, 1474), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1453, 1474), False, 'from sklearn.model_selection import train_test_split, cross_val_score\n'), ((1573, 1663), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': 'depth', 'min_samples_split': 'split', 'random_state': '(42)'}), '(max_depth=depth, min_samples_split=split,\n random_state=42)\n', (1600, 1663), False, 'from sklearn import tree\n'), ((2787, 2824), 'numpy.linspace', 'np.linspace', (['(1)', '(40)', '(40)'], {'endpoint': '(True)'}), '(1, 40, 40, endpoint=True)\n', (2798, 2824), True, 'import numpy as np\n'), ((1721, 1774), 'numpy.vstack', 'np.vstack', (['(feature_cols, model.feature_importances_)'], {}), '((feature_cols, model.feature_importances_))\n', (1730, 1774), True, 'import numpy as np\n'), ((2887, 2944), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': 'i', 'random_state': '(42)'}), '(max_depth=i, random_state=42)\n', (2914, 2944), False, 'from sklearn import tree\n'), ((3069, 3094), 'numpy.argmax', 'np.argmax', (['train_accuracy'], {}), '(train_accuracy)\n', (3078, 3094), True, 'import numpy as np\n'), ((3434, 3499), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'min_samples_split': 'i', 'random_state': '(42)'}), '(min_samples_split=i, random_state=42)\n', (3461, 3499), False, 'from sklearn import tree\n'), ((3624, 3649), 'numpy.argmax', 'np.argmax', (['train_accuracy'], {}), '(train_accuracy)\n', (3633, 3649), True, 'import numpy as np\n'), ((2973, 3007), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'cv': '(3)'}), '(model, X, y, cv=3)\n', (2988, 3007), False, 'from sklearn.model_selection import train_test_split, cross_val_score\n'), ((3528, 3562), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'cv': '(3)'}), '(model, X, y, cv=3)\n', (3543, 3562), False, 'from sklearn.model_selection import train_test_split, cross_val_score\n')]
import numpy as np def _sum_by_group_sorted(indices, *values): """ Auxiliary function to sum up values by some given indices (both as numpy arrays). Expects the indices and values to already be sorted. :param indices: :type indices: :param values: :type values: :return: :rtype: """ # Index defines whether a specific index has already appeared in the index array before. index = np.ones(len(indices), 'bool') index[:-1] = indices[1:] != indices[:-1] # make indices unique for output indices = indices[index] val = list(values) for i in range(len(val)): # sum up values, chose only those with unique indices and then subtract the previous sums # --> this way for each index the sum of all values belonging to this index is returned np.cumsum(val[i], out=val[i]) val[i] = val[i][index] val[i][1:] = val[i][1:] - val[i][:-1] return [indices] + val def _sum_by_group(indices, *values): """ Auxiliary function to sum up values by some given indices (both as numpy arrays). :param indices: :type indices: :param values: :type values: :return: :rtype: """ # sort indices and values by indices order = np.argsort(indices) indices = indices[order] val = list(values) for i in range(len(val)): val[i] = val[i][order] return _sum_by_group_sorted(indices, *val)
[ "numpy.argsort", "numpy.cumsum" ]
[((1261, 1280), 'numpy.argsort', 'np.argsort', (['indices'], {}), '(indices)\n', (1271, 1280), True, 'import numpy as np\n'), ((830, 859), 'numpy.cumsum', 'np.cumsum', (['val[i]'], {'out': 'val[i]'}), '(val[i], out=val[i])\n', (839, 859), True, 'import numpy as np\n')]
import argparse import pdb import laspy import os import shutil import numpy as np from tqdm import tqdm from semantics_recovery import all_path, mkdir def config_parser(): parser = argparse.ArgumentParser( description='Semantic label recovery script.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--input_path', type=str, default=None, required=True, help='Directory where the raw data is.') parser.add_argument('--las_path', type=str, default=None, required=True, help='Directory where the las files are.') parser.add_argument('--output_path', type=str, default=None, help='Directory to store cleaned npy labels. Default is the raw data path.') parser.add_argument('--save_backup', action="store_true", help='Flag to backup the raw data before cleaning.') parser.add_argument('--semantics', action="store_true", help='To clean semantics data in-place based on the scene coord results.') parser.add_argument('--start_idx', type=int, default=0, help='start index of the file list') parser.add_argument('--end_idx', type=int, default=None, help='stop index of the file list') opt = parser.parse_args() if opt.output_path is None: opt.output_path = opt.input_path print("Point cloud label output path is not specified! The raw data path is used, " "i.e., the cleaned npy labels are generated in place.") return opt def main(): args = config_parser() print(args) start_idx = args.start_idx end_idx = args.end_idx # set the input and output path las_file_path = os.path.abspath(args.las_path) input_path = os.path.abspath(args.input_path) output_path = os.path.abspath(args.output_path) file_ls, folder_ls = all_path(input_path, filter_list=['.npy']) print("To process {:d} npy labels at {:s}".format(len(file_ls), input_path)) # load raw las and get the boundary in ECEF las_ls = [las for las in os.listdir(las_file_path) if las.endswith('.las')] bound_xyz_min, bound_xyz_max = np.array([float('inf')] * 3), np.zeros(3) for idx, las_name in enumerate(las_ls): las = laspy.read(os.path.join(las_file_path, las_name)) this_min_xyz = np.min(np.stack([las.x, las.y, las.z]), axis=1) this_max_xyz = np.max(np.stack([las.x, las.y, las.z]), axis=1) bound_xyz_min = np.minimum(bound_xyz_min, this_min_xyz) bound_xyz_max = np.maximum(bound_xyz_max, this_max_xyz) las = None print("XYZ boundary min: {}, max: {}".format(bound_xyz_min, bound_xyz_max)) # create output folder structure input_path_len = len(input_path.split('/')) folder_ls = ['/'.join(folder.split('/')[input_path_len:]) for folder in folder_ls] folder_ls = [folder for folder in folder_ls if 'outlier-removal-backup' not in folder] folder_ls = np.unique(folder_ls).tolist() mkdir(output_path, folder_ls) if args.save_backup: output_backup_path = os.path.abspath(os.path.join(args.output_path, 'outlier-removal-backup')) mkdir(output_backup_path, folder_ls) # process the labels for idx_dp, file_name in tqdm(enumerate(file_ls[start_idx:end_idx])): """Load ray-traced point cloud""" sc_path = '{:s}_pc.npy'.format(file_name) sm_path = '{:s}_semantics.npy'.format(file_name) out_sc_path = os.path.join(args.output_path, '{:s}_pc.npy'.format( '/'.join(file_name.split('/')[input_path_len:]))) out_sm_path = os.path.join(args.output_path, '{:s}_semantics.npy'.format( '/'.join(file_name.split('/')[input_path_len:]))) _sc = np.load(sc_path) # [480, 720, 3] _sc_shape = _sc.shape sc = _sc.reshape(-1, 3).copy() # [N, 3] mask_has_data = sc[:, 0] != -1 mask_outlier = np.logical_or(sc > bound_xyz_max, sc < bound_xyz_min).sum(axis=1) > 0 # [N] mask_outlier = np.logical_and(mask_outlier, mask_has_data) if mask_outlier.sum(): print("{:d} / {:d} outliers to remove, percentage: {:.2f}%, file: {:s}".format( mask_outlier.sum(), len(sc), mask_outlier.sum() / len(sc) * 100, os.path.abspath(sc_path))) sc[mask_outlier] = -1.0 sc = sc.reshape(_sc_shape) if args.semantics and os.path.exists(sm_path): _sm = np.load(sm_path) # [480, 720] _sm_shape = _sm.shape sm = _sm.reshape(-1, 1).copy() sm[mask_outlier] = 0 # set to 0 for semantics instead of -1 sm = sm.reshape(_sm_shape).astype(np.uint8) # backup only the point cloud has potential outliers if args.save_backup: # backup point cloud out_backup_path = os.path.join(output_backup_path, '{:s}_pc.npy'.format( '/'.join(file_name.split('/')[input_path_len:]))) shutil.copy(sc_path, out_backup_path) if args.semantics and os.path.exists(sm_path): # backup semantics map out_backup_path = os.path.join(output_backup_path, '{:s}_semantics.npy'.format( '/'.join(file_name.split('/')[input_path_len:]))) shutil.copy(sm_path, out_backup_path) # overwrite the old files np.save(out_sc_path, sc) if args.semantics and os.path.exists(sm_path): np.save(out_sm_path, sm) if __name__ == '__main__': main()
[ "os.path.exists", "os.listdir", "numpy.minimum", "argparse.ArgumentParser", "numpy.logical_and", "numpy.unique", "os.path.join", "semantics_recovery.mkdir", "numpy.logical_or", "numpy.stack", "numpy.zeros", "shutil.copy", "os.path.abspath", "numpy.maximum", "semantics_recovery.all_path",...
[((188, 318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Semantic label recovery script."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Semantic label recovery script.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (211, 318), False, 'import argparse\n'), ((1787, 1817), 'os.path.abspath', 'os.path.abspath', (['args.las_path'], {}), '(args.las_path)\n', (1802, 1817), False, 'import os\n'), ((1835, 1867), 'os.path.abspath', 'os.path.abspath', (['args.input_path'], {}), '(args.input_path)\n', (1850, 1867), False, 'import os\n'), ((1886, 1919), 'os.path.abspath', 'os.path.abspath', (['args.output_path'], {}), '(args.output_path)\n', (1901, 1919), False, 'import os\n'), ((1945, 1987), 'semantics_recovery.all_path', 'all_path', (['input_path'], {'filter_list': "['.npy']"}), "(input_path, filter_list=['.npy'])\n", (1953, 1987), False, 'from semantics_recovery import all_path, mkdir\n'), ((3066, 3095), 'semantics_recovery.mkdir', 'mkdir', (['output_path', 'folder_ls'], {}), '(output_path, folder_ls)\n', (3071, 3095), False, 'from semantics_recovery import all_path, mkdir\n'), ((2263, 2274), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2271, 2274), True, 'import numpy as np\n'), ((2549, 2588), 'numpy.minimum', 'np.minimum', (['bound_xyz_min', 'this_min_xyz'], {}), '(bound_xyz_min, this_min_xyz)\n', (2559, 2588), True, 'import numpy as np\n'), ((2613, 2652), 'numpy.maximum', 'np.maximum', (['bound_xyz_max', 'this_max_xyz'], {}), '(bound_xyz_max, this_max_xyz)\n', (2623, 2652), True, 'import numpy as np\n'), ((3232, 3268), 'semantics_recovery.mkdir', 'mkdir', (['output_backup_path', 'folder_ls'], {}), '(output_backup_path, folder_ls)\n', (3237, 3268), False, 'from semantics_recovery import all_path, mkdir\n'), ((3831, 3847), 'numpy.load', 'np.load', (['sc_path'], {}), '(sc_path)\n', (3838, 3847), True, 'import numpy as np\n'), ((4106, 4149), 'numpy.logical_and', 'np.logical_and', (['mask_outlier', 'mask_has_data'], {}), '(mask_outlier, mask_has_data)\n', (4120, 4149), True, 'import numpy as np\n'), ((2147, 2172), 'os.listdir', 'os.listdir', (['las_file_path'], {}), '(las_file_path)\n', (2157, 2172), False, 'import os\n'), ((2344, 2381), 'os.path.join', 'os.path.join', (['las_file_path', 'las_name'], {}), '(las_file_path, las_name)\n', (2356, 2381), False, 'import os\n'), ((2413, 2444), 'numpy.stack', 'np.stack', (['[las.x, las.y, las.z]'], {}), '([las.x, las.y, las.z])\n', (2421, 2444), True, 'import numpy as np\n'), ((2484, 2515), 'numpy.stack', 'np.stack', (['[las.x, las.y, las.z]'], {}), '([las.x, las.y, las.z])\n', (2492, 2515), True, 'import numpy as np\n'), ((3032, 3052), 'numpy.unique', 'np.unique', (['folder_ls'], {}), '(folder_ls)\n', (3041, 3052), True, 'import numpy as np\n'), ((3166, 3222), 'os.path.join', 'os.path.join', (['args.output_path', '"""outlier-removal-backup"""'], {}), "(args.output_path, 'outlier-removal-backup')\n", (3178, 3222), False, 'import os\n'), ((5531, 5555), 'numpy.save', 'np.save', (['out_sc_path', 'sc'], {}), '(out_sc_path, sc)\n', (5538, 5555), True, 'import numpy as np\n'), ((4492, 4515), 'os.path.exists', 'os.path.exists', (['sm_path'], {}), '(sm_path)\n', (4506, 4515), False, 'import os\n'), ((4539, 4555), 'numpy.load', 'np.load', (['sm_path'], {}), '(sm_path)\n', (4546, 4555), True, 'import numpy as np\n'), ((5103, 5140), 'shutil.copy', 'shutil.copy', (['sc_path', 'out_backup_path'], {}), '(sc_path, out_backup_path)\n', (5114, 5140), False, 'import shutil\n'), ((5590, 5613), 'os.path.exists', 'os.path.exists', (['sm_path'], {}), '(sm_path)\n', (5604, 5613), False, 'import os\n'), ((5631, 5655), 'numpy.save', 'np.save', (['out_sm_path', 'sm'], {}), '(out_sm_path, sm)\n', (5638, 5655), True, 'import numpy as np\n'), ((4006, 4059), 'numpy.logical_or', 'np.logical_or', (['(sc > bound_xyz_max)', '(sc < bound_xyz_min)'], {}), '(sc > bound_xyz_max, sc < bound_xyz_min)\n', (4019, 4059), True, 'import numpy as np\n'), ((4355, 4379), 'os.path.abspath', 'os.path.abspath', (['sc_path'], {}), '(sc_path)\n', (4370, 4379), False, 'import os\n'), ((5180, 5203), 'os.path.exists', 'os.path.exists', (['sm_path'], {}), '(sm_path)\n', (5194, 5203), False, 'import os\n'), ((5442, 5479), 'shutil.copy', 'shutil.copy', (['sm_path', 'out_backup_path'], {}), '(sm_path, out_backup_path)\n', (5453, 5479), False, 'import shutil\n')]
"""util functions # many old functions, need to clean up # homography --> homography # warping # loss --> delete if useless """ import numpy as np import torch from pathlib import Path import datetime import datetime from collections import OrderedDict import torch.nn.functional as F import torch.nn as nn ###### check # from utils.nms_pytorch import box_nms as box_nms_retinaNet from utils.d2s import DepthToSpace, SpaceToDepth def img_overlap(img_r, img_g, img_gray): # img_b repeat def to_3d(img): if len(img.shape) == 2: img = img[np.newaxis, ...] return img img_r, img_g, img_gray = to_3d(img_r), to_3d(img_g), to_3d(img_gray) img = np.concatenate((img_gray, img_gray, img_gray), axis=0) img[0, :, :] += img_r[0, :, :] img[1, :, :] += img_g[0, :, :] img[img > 1] = 1 img[img < 0] = 0 return img def thd_img(img, thd=0.015): img[img < thd] = 0 img[img >= thd] = 1 return img def toNumpy(tensor): return tensor.detach().cpu().numpy() def save_path_formatter(args, parser): print("todo: save path") return Path('.') pass ''' def save_path_formatter(args, parser): def is_default(key, value): return value == parser.get_default(key) args_dict = vars(args) # data_folder_name = str(Path(args_dict['data']).normpath().name) data_folder_name = str(Path(args_dict['data'])) folder_string = [data_folder_name] if not is_default('epochs', args_dict['epochs']): folder_string.append('{}epochs'.format(args_dict['epochs'])) keys_with_prefix = OrderedDict() keys_with_prefix['epoch_size'] = 'epoch_size' keys_with_prefix['sequence_length'] = 'seq' keys_with_prefix['rotation_mode'] = 'rot_' keys_with_prefix['padding_mode'] = 'padding_' keys_with_prefix['batch_size'] = 'b' keys_with_prefix['lr'] = 'lr' keys_with_prefix['photo_loss_weight'] = 'p' keys_with_prefix['mask_loss_weight'] = 'm' keys_with_prefix['smooth_loss_weight'] = 's' for key, prefix in keys_with_prefix.items(): value = args_dict[key] if not is_default(key, value): folder_string.append('{}{}'.format(prefix, value)) save_path = Path(','.join(folder_string)) timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M") return save_path/timestamp # return '' ''' def tensor2array(tensor, max_value=255, colormap='rainbow', channel_first=True): tensor = tensor.detach().cpu() if max_value is None: max_value = tensor.max().item() if tensor.ndimension() == 2 or tensor.size(0) == 1: try: import cv2 if int(cv2.__version__[0]) >= 3: color_cvt = cv2.COLOR_BGR2RGB else: # 2.4 color_cvt = cv2.cv.CV_BGR2RGB if colormap == 'rainbow': colormap = cv2.COLORMAP_RAINBOW elif colormap == 'bone': colormap = cv2.COLORMAP_BONE array = (255*tensor.squeeze().numpy()/max_value).clip(0, 255).astype(np.uint8) colored_array = cv2.applyColorMap(array, colormap) array = cv2.cvtColor(colored_array, color_cvt).astype(np.float32)/255 except ImportError: if tensor.ndimension() == 2: tensor.unsqueeze_(2) array = (tensor.expand(tensor.size(0), tensor.size(1), 3).numpy()/max_value).clip(0,1) if channel_first: array = array.transpose(2, 0, 1) elif tensor.ndimension() == 3: assert(tensor.size(0) == 3) array = 0.5 + tensor.numpy()*0.5 if not channel_first: array = array.transpose(1, 2, 0) return array # from utils.utils import find_files_with_ext def find_files_with_ext(directory, extension='.npz'): # print(os.listdir(directory)) list_of_files = [] import os if extension == ".npz": for l in os.listdir(directory): if l.endswith(extension): list_of_files.append(l) # print(l) return list_of_files def save_checkpoint(save_path, net_state, epoch, filename='checkpoint.pth.tar'): file_prefix = ['superPointNet'] # torch.save(net_state, save_path) filename = '{}_{}_{}'.format(file_prefix[0], str(epoch), filename) torch.save(net_state, save_path/filename) print("save checkpoint to ", filename) pass def load_checkpoint(load_path, filename='checkpoint.pth.tar'): file_prefix = ['superPointNet'] filename = '{}__{}'.format(file_prefix[0], filename) # torch.save(net_state, save_path) checkpoint = torch.load(load_path/filename) print("load checkpoint from ", filename) return checkpoint pass def saveLoss(filename, iter, loss, task='train', **options): # save_file = save_output / "export.txt" with open(filename, "a") as myfile: myfile.write(task + " iter: " + str(iter) + ", ") myfile.write("loss: " + str(loss) + ", ") myfile.write(str(options)) myfile.write("\n") # myfile.write("iter: " + str(iter) + '\n') # myfile.write("output pairs: " + str(count) + '\n') def saveImg(img, filename): import cv2 cv2.imwrite(filename, img) def pltImshow(img): from matplotlib import pyplot as plt plt.imshow(img) plt.show() def loadConfig(filename): import yaml with open(filename, 'r') as f: config = yaml.load(f) return config def append_csv(file='foo.csv', arr=[]): import csv # fields=['first','second','third'] # pre = lambda i: ['{0:.3f}'.format(x) for x in i] with open(file, 'a') as f: writer = csv.writer(f) if type(arr[0]) is list: for a in arr: writer.writerow(a) # writer.writerow(pre(a)) # print(pre(a)) else: writer.writerow(arr) ''' def save_checkpoint(save_path, dispnet_state, exp_pose_state, is_best, filename='checkpoint.pth.tar'): file_prefixes = ['dispnet', 'exp_pose'] states = [dispnet_state, exp_pose_state] for (prefix, state) in zip(file_prefixes, states): torch.save(state, save_path/'{}_{}'.format(prefix,filename)) if is_best: for prefix in file_prefixes: shutil.copyfile(save_path/'{}_{}'.format(prefix,filename), save_path/'{}_model_best.pth.tar'.format(prefix)) ''' import cv2 def sample_homography(inv_scale=3): corner_img = np.array([(-1, -1), (-1, 1), (1, -1), (1, 1)]) # offset_r = 1 - 1/inv_scale # img_offset = np.array([(-1, -1), (-1, offset_r), (offset_r, -1), (offset_r, offset_r)]) img_offset = corner_img corner_map = (np.random.rand(4,2)-0.5)*2/(inv_scale + 0.01) + img_offset matrix = cv2.getPerspectiveTransform(np.float32(corner_img), np.float32(corner_map)) return matrix def sample_homographies(batch_size=1, scale=10, device='cpu'): ## sample homography matrix # mat_H = [sample_homography(inv_scale=scale) for i in range(batch_size)] mat_H = [sample_homography(inv_scale=scale) for i in range(batch_size)] ##### debug # from utils.utils import sample_homo # mat_H = [sample_homo(image=np.zeros((1,1))) for i in range(batch_size)] # mat_H = [np.identity(3) for i in range(batch_size)] mat_H = np.stack(mat_H, axis=0) mat_H = torch.tensor(mat_H, dtype=torch.float32) mat_H = mat_H.to(device) mat_H_inv = torch.stack([torch.inverse(mat_H[i, :, :]) for i in range(batch_size)]) mat_H_inv = torch.tensor(mat_H_inv, dtype=torch.float32) mat_H_inv = mat_H_inv.to(device) return mat_H, mat_H_inv def warpLabels(pnts, homography, H, W): import torch """ input: pnts: numpy homography: numpy output: warped_pnts: numpy """ from utils.utils import warp_points from utils.utils import filter_points pnts = torch.tensor(pnts).long() homography = torch.tensor(homography, dtype=torch.float32) warped_pnts = warp_points(torch.stack((pnts[:, 0], pnts[:, 1]), dim=1), homography) # check the (x, y) warped_pnts = filter_points(warped_pnts, torch.tensor([W, H])).round().long() return warped_pnts.numpy() def warp_points_np(points, homographies, device='cpu'): """ Warp a list of points with the given homography. Arguments: points: list of N points, shape (N, 2). homography: batched or not (shapes (B, 3, 3) and (...) respectively). Returns: a Tensor of shape (N, 2) or (B, N, 2) (depending on whether the homography is batched) containing the new coordinates of the warped points. """ # expand points len to (x, y, 1) batch_size = homographies.shape[0] points = np.concatenate((points, np.ones((points.shape[0], 1))), axis=1) # points = points.to(device) # homographies = homographies.(batch_size*3,3) # warped_points = homographies*points # warped_points = homographies@points.transpose(0,1) warped_points = np.tensordot(homographies, points.transpose(), axes=([2], [0])) # normalize the points warped_points = warped_points.reshape([batch_size, 3, -1]) warped_points = warped_points.transpose([0, 2, 1]) warped_points = warped_points[:, :, :2] / warped_points[:, :, 2:] return warped_points def homography_scaling(homography, H, W): trans = np.array([[2./W, 0., -1], [0., 2./H, -1], [0., 0., 1.]]) homography = np.linalg.inv(trans) @ homography @ trans return homography def homography_scaling_torch(homography, H, W): trans = torch.tensor([[2./W, 0., -1], [0., 2./H, -1], [0., 0., 1.]]) homography = (trans.inverse() @ homography @ trans) return homography def filter_points(points, shape, return_mask=False): ### check! points = points.float() shape = shape.float() mask = (points >= 0) * (points <= shape-1) mask = (torch.prod(mask, dim=-1) == 1) if return_mask: return points[mask], mask return points [mask] # return points [torch.prod(mask, dim=-1) == 1] def warp_points(points, homographies, device='cpu'): """ Warp a list of points with the given homography. Arguments: points: list of N points, shape (N, 2(x, y))). homography: batched or not (shapes (B, 3, 3) and (...) respectively). Returns: a Tensor of shape (N, 2) or (B, N, 2(x, y)) (depending on whether the homography is batched) containing the new coordinates of the warped points. """ # expand points len to (x, y, 1) no_batches = len(homographies.shape) == 2 homographies = homographies.unsqueeze(0) if no_batches else homographies # homographies = homographies.unsqueeze(0) if len(homographies.shape) == 2 else homographies batch_size = homographies.shape[0] points = torch.cat((points.float(), torch.ones((points.shape[0], 1)).to(device)), dim=1) points = points.to(device) homographies = homographies.view(batch_size*3,3) # warped_points = homographies*points # points = points.double() warped_points = homographies@points.transpose(0,1) # warped_points = np.tensordot(homographies, points.transpose(), axes=([2], [0])) # normalize the points warped_points = warped_points.view([batch_size, 3, -1]) warped_points = warped_points.transpose(2, 1) warped_points = warped_points[:, :, :2] / warped_points[:, :, 2:] return warped_points[0,:,:] if no_batches else warped_points # from utils.utils import inv_warp_image_batch def inv_warp_image_batch(img, mat_homo_inv, device='cpu', mode='bilinear'): ''' Inverse warp images in batch :param img: batch of images tensor [batch_size, 1, H, W] :param mat_homo_inv: batch of homography matrices tensor [batch_size, 3, 3] :param device: GPU device or CPU :return: batch of warped images tensor [batch_size, 1, H, W] ''' # compute inverse warped points if len(img.shape) == 2 or len(img.shape) == 3: img = img.view(1,1,img.shape[0], img.shape[1]) if len(mat_homo_inv.shape) == 2: mat_homo_inv = mat_homo_inv.view(1,3,3) Batch, channel, H, W = img.shape coor_cells = torch.stack(torch.meshgrid(torch.linspace(-1, 1, W), torch.linspace(-1, 1, H)), dim=2) coor_cells = coor_cells.transpose(0, 1) coor_cells = coor_cells.to(device) coor_cells = coor_cells.contiguous() src_pixel_coords = warp_points(coor_cells.view([-1, 2]), mat_homo_inv, device) src_pixel_coords = src_pixel_coords.view([Batch, H, W, 2]) src_pixel_coords = src_pixel_coords.float() warped_img = F.grid_sample(img, src_pixel_coords, mode=mode, align_corners=True) return warped_img def inv_warp_image(img, mat_homo_inv, device='cpu', mode='bilinear'): ''' Inverse warp images in batch :param img: batch of images tensor [H, W] :param mat_homo_inv: batch of homography matrices tensor [3, 3] :param device: GPU device or CPU :return: batch of warped images tensor [H, W] ''' warped_img = inv_warp_image_batch(img, mat_homo_inv, device, mode) return warped_img.squeeze() def labels2Dto3D(labels, cell_size, add_dustbin=True): ''' Change the shape of labels into 3D. Batch of labels. :param labels: tensor [batch_size, 1, H, W] keypoint map. :param cell_size: 8 :return: labels: tensors[batch_size, 65, Hc, Wc] ''' batch_size, channel, H, W = labels.shape Hc, Wc = H // cell_size, W // cell_size space2depth = SpaceToDepth(8) # labels = space2depth(labels).squeeze(0) labels = space2depth(labels) # labels = labels.view(batch_size, H, 1, W, 1) # labels = labels.view(batch_size, Hc, cell_size, Wc, cell_size) # labels = labels.transpose(1, 2).transpose(3, 4).transpose(2, 3) # labels = labels.reshape(batch_size, 1, cell_size ** 2, Hc, Wc) # labels = labels.view(batch_size, cell_size ** 2, Hc, Wc) if add_dustbin: dustbin = labels.sum(dim=1) dustbin = 1 - dustbin dustbin[dustbin < 1.] = 0 # print('dust: ', dustbin.shape) # labels = torch.cat((labels, dustbin.view(batch_size, 1, Hc, Wc)), dim=1) labels = torch.cat((labels, dustbin.view(batch_size, 1, Hc, Wc)), dim=1) ## norm dn = labels.sum(dim=1) labels = labels.div(torch.unsqueeze(dn, 1)) return labels def labels2Dto3D_flattened(labels, cell_size): ''' Change the shape of labels into 3D. Batch of labels. :param labels: tensor [batch_size, 1, H, W] keypoint map. :param cell_size: 8 :return: labels: tensors[batch_size, 65, Hc, Wc] ''' batch_size, channel, H, W = labels.shape Hc, Wc = H // cell_size, W // cell_size space2depth = SpaceToDepth(8) # labels = space2depth(labels).squeeze(0) labels = space2depth(labels) # print("labels in 2Dto3D: ", labels.shape) # labels = labels.view(batch_size, H, 1, W, 1) # labels = labels.view(batch_size, Hc, cell_size, Wc, cell_size) # labels = labels.transpose(1, 2).transpose(3, 4).transpose(2, 3) # labels = labels.reshape(batch_size, 1, cell_size ** 2, Hc, Wc) # labels = labels.view(batch_size, cell_size ** 2, Hc, Wc) dustbin = torch.ones((batch_size, 1, Hc, Wc)).cuda() # labels = torch.cat((labels, dustbin.view(batch_size, 1, Hc, Wc)), dim=1) labels = torch.cat((labels*2, dustbin.view(batch_size, 1, Hc, Wc)), dim=1) labels = torch.argmax(labels, dim=1) return labels def old_flatten64to1(semi, tensor=False): ''' Flatten 3D np array to 2D :param semi: np [64 x Hc x Wc] or tensor (batch_size, 65, Hc, Wc) :return: flattened map np [1 x Hc*8 x Wc*8] or tensor (batch_size, 1, Hc*8, Wc*8) ''' if tensor: is_batch = len(semi.size()) == 4 if not is_batch: semi = semi.unsqueeze_(0) Hc, Wc = semi.size()[2], semi.size()[3] cell = 8 semi.transpose_(1, 2) semi.transpose_(2, 3) semi = semi.view(-1, Hc, Wc, cell, cell) semi.transpose_(2, 3) semi = semi.contiguous() semi = semi.view(-1, 1, Hc * cell, Wc * cell) heatmap = semi if not is_batch: heatmap = heatmap.squeeze_(0) else: Hc, Wc = semi.shape[1], semi.shape[2] cell = 8 semi = semi.transpose(1, 2, 0) heatmap = np.reshape(semi, [Hc, Wc, cell, cell]) heatmap = np.transpose(heatmap, [0, 2, 1, 3]) # heatmap = np.transpose(heatmap, [2, 0, 3, 1]) heatmap = np.reshape(heatmap, [Hc * cell, Wc * cell]) heatmap = heatmap[np.newaxis, :, :] return heatmap def flattenDetection(semi, tensor=False): ''' Flatten detection output :param semi: output from detector head tensor [65, Hc, Wc] :or tensor (batch_size, 65, Hc, Wc) :return: 3D heatmap np (1, H, C) :or tensor (batch_size, 65, Hc, Wc) ''' batch = False if len(semi.shape) == 4: batch = True batch_size = semi.shape[0] # if tensor: # semi.exp_() # d = semi.sum(dim=1) + 0.00001 # d = d.view(d.shape[0], 1, d.shape[1], d.shape[2]) # semi = semi / d # how to /(64,15,20) # nodust = semi[:, :-1, :, :] # heatmap = flatten64to1(nodust, tensor=tensor) # else: # Convert pytorch -> numpy. # --- Process points. # dense = nn.functional.softmax(semi, dim=0) # [65, Hc, Wc] if batch: dense = nn.functional.softmax(semi, dim=1) # [batch, 65, Hc, Wc] # Remove dustbin. nodust = dense[:, :-1, :, :] else: dense = nn.functional.softmax(semi, dim=0) # [65, Hc, Wc] nodust = dense[:-1, :, :].unsqueeze(0) # Reshape to get full resolution heatmap. # heatmap = flatten64to1(nodust, tensor=True) # [1, H, W] depth2space = DepthToSpace(8) heatmap = depth2space(nodust) heatmap = heatmap.squeeze(0) if not batch else heatmap return heatmap def sample_homo(image): import tensorflow as tf from utils.homographies import sample_homography H = sample_homography(tf.shape(image)[:2]) with tf.Session(): H_ = H.eval() H_ = np.concatenate((H_, np.array([1])[:, np.newaxis]), axis=1) # warped_im = tf.contrib.image.transform(image, H, interpolation="BILINEAR") mat = np.reshape(H_, (3, 3)) # for i in range(batch): # np.stack() return mat import cv2 def getPtsFromHeatmap(heatmap, conf_thresh, nms_dist): ''' :param self: :param heatmap: np (H, W) :return: ''' border_remove = 4 H, W = heatmap.shape[0], heatmap.shape[1] xs, ys = np.where(heatmap >= conf_thresh) # Confidence threshold. sparsemap = (heatmap >= conf_thresh) if len(xs) == 0: return np.zeros((3, 0)) pts = np.zeros((3, len(xs))) # Populate point data sized 3xN. pts[0, :] = ys pts[1, :] = xs pts[2, :] = heatmap[xs, ys] pts, _ = nms_fast(pts, H, W, dist_thresh=nms_dist) # Apply NMS. inds = np.argsort(pts[2, :]) pts = pts[:, inds[::-1]] # Sort by confidence. # Remove points along border. bord = border_remove toremoveW = np.logical_or(pts[0, :] < bord, pts[0, :] >= (W - bord)) toremoveH = np.logical_or(pts[1, :] < bord, pts[1, :] >= (H - bord)) toremove = np.logical_or(toremoveW, toremoveH) pts = pts[:, ~toremove] return pts def box_nms(prob, size, iou=0.1, min_prob=0.01, keep_top_k=0): # requires https://github.com/open-mmlab/mmdetection. # Warning : BUILD FROM SOURCE using command MMCV_WITH_OPS=1 pip install -e # from mmcv.ops import nms as nms_mmdet from torchvision.ops import nms """Performs non maximum suppression on the heatmap by considering hypothetical bounding boxes centered at each pixel's location (e.g. corresponding to the receptive field). Optionally only keeps the top k detections. Arguments: prob: the probability heatmap, with shape `[H, W]`. size: a scalar, the size of the bouding boxes. iou: a scalar, the IoU overlap threshold. min_prob: a threshold under which all probabilities are discarded before NMS. keep_top_k: an integer, the number of top scores to keep. """ pts = torch.nonzero(prob > min_prob).float() # [N, 2] prob_nms = torch.zeros_like(prob) if pts.nelement() == 0: return prob_nms size = torch.tensor(size/2.).cuda() boxes = torch.cat([pts-size, pts+size], dim=1) # [N, 4] scores = prob[pts[:, 0].long(), pts[:, 1].long()] if keep_top_k != 0: indices = nms(boxes, scores, iou) else: raise NotImplementedError # indices, _ = nms(boxes, scores, iou, boxes.size()[0]) # print("boxes: ", boxes.shape) # print("scores: ", scores.shape) # proposals = torch.cat([boxes, scores.unsqueeze(-1)], dim=-1) # dets, indices = nms_mmdet(proposals, iou) # indices = indices.long() # indices = box_nms_retinaNet(boxes, scores, iou) pts = torch.index_select(pts, 0, indices) scores = torch.index_select(scores, 0, indices) prob_nms[pts[:, 0].long(), pts[:, 1].long()] = scores return prob_nms def nms_fast(in_corners, H, W, dist_thresh): """ Run a faster approximate Non-Max-Suppression on numpy corners shaped: 3xN [x_i,y_i,conf_i]^T Algo summary: Create a grid sized HxW. Assign each corner location a 1, rest are zeros. Iterate through all the 1's and convert them either to -1 or 0. Suppress points by setting nearby values to 0. Grid Value Legend: -1 : Kept. 0 : Empty or suppressed. 1 : To be processed (converted to either kept or supressed). NOTE: The NMS first rounds points to integers, so NMS distance might not be exactly dist_thresh. It also assumes points are within image boundaries. Inputs in_corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T. H - Image height. W - Image width. dist_thresh - Distance to suppress, measured as an infinty norm distance. Returns nmsed_corners - 3xN numpy matrix with surviving corners. nmsed_inds - N length numpy vector with surviving corner indices. """ grid = np.zeros((H, W)).astype(int) # Track NMS data. inds = np.zeros((H, W)).astype(int) # Store indices of points. # Sort by confidence and round to nearest int. inds1 = np.argsort(-in_corners[2, :]) corners = in_corners[:, inds1] rcorners = corners[:2, :].round().astype(int) # Rounded corners. # Check for edge case of 0 or 1 corners. if rcorners.shape[1] == 0: return np.zeros((3, 0)).astype(int), np.zeros(0).astype(int) if rcorners.shape[1] == 1: out = np.vstack((rcorners, in_corners[2])).reshape(3, 1) return out, np.zeros((1)).astype(int) # Initialize the grid. for i, rc in enumerate(rcorners.T): grid[rcorners[1, i], rcorners[0, i]] = 1 inds[rcorners[1, i], rcorners[0, i]] = i # Pad the border of the grid, so that we can NMS points near the border. pad = dist_thresh grid = np.pad(grid, ((pad, pad), (pad, pad)), mode='constant') # Iterate through points, highest to lowest conf, suppress neighborhood. count = 0 for i, rc in enumerate(rcorners.T): # Account for top and left padding. pt = (rc[0] + pad, rc[1] + pad) if grid[pt[1], pt[0]] == 1: # If not yet suppressed. grid[pt[1] - pad:pt[1] + pad + 1, pt[0] - pad:pt[0] + pad + 1] = 0 grid[pt[1], pt[0]] = -1 count += 1 # Get all surviving -1's and return sorted array of remaining corners. keepy, keepx = np.where(grid == -1) keepy, keepx = keepy - pad, keepx - pad inds_keep = inds[keepy, keepx] out = corners[:, inds_keep] values = out[-1, :] inds2 = np.argsort(-values) out = out[:, inds2] out_inds = inds1[inds_keep[inds2]] return out, out_inds def compute_valid_mask(image_shape, inv_homography, device='cpu', erosion_radius=0): """ Compute a boolean mask of the valid pixels resulting from an homography applied to an image of a given shape. Pixels that are False correspond to bordering artifacts. A margin can be discarded using erosion. Arguments: input_shape: Tensor of rank 2 representing the image shape, i.e. `[H, W]`. homography: Tensor of shape (B, 8) or (8,), where B is the batch size. `erosion_radius: radius of the margin to be discarded. Returns: a Tensor of type `tf.int32` and shape (H, W). """ # mask = H_transform(tf.ones(image_shape), homography, interpolation='NEAREST') # mask = H_transform(tf.ones(image_shape), homography, interpolation='NEAREST') if inv_homography.dim() == 2: inv_homography = inv_homography.view(-1, 3, 3) batch_size = inv_homography.shape[0] mask = torch.ones(batch_size, 1, image_shape[0], image_shape[1]).to(device) mask = inv_warp_image_batch(mask, inv_homography, device=device, mode='nearest') mask = mask.view(batch_size, image_shape[0], image_shape[1]) mask = mask.cpu().numpy() if erosion_radius > 0: kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erosion_radius*2,)*2) for i in range(batch_size): mask[i, :, :] = cv2.erode(mask[i, :, :], kernel, iterations=1) return torch.tensor(mask).to(device) def normPts(pts, shape): """ normalize pts to [-1, 1] :param pts: tensor (y, x) :param shape: tensor shape (y, x) :return: """ pts = pts/shape*2 - 1 return pts def denormPts(pts, shape): """ denormalize pts back to H, W :param pts: tensor (y, x) :param shape: numpy (y, x) :return: """ pts = (pts+1)*shape/2 return pts # def subpixel_loss(image, labels, dense_desc, patch_size=8): # # concat image and dense_desc # # extract patches # # # pass def descriptor_loss(descriptors, descriptors_warped, homographies, mask_valid=None, cell_size=8, lamda_d=250, device='cpu', descriptor_dist=4, **config): ''' Compute descriptor loss from descriptors_warped and given homographies :param descriptors: Output from descriptor head tensor [batch_size, descriptors, Hc, Wc] :param descriptors_warped: Output from descriptor head of warped image tensor [batch_size, descriptors, Hc, Wc] :param homographies: known homographies :param cell_size: 8 :param device: gpu or cpu :param config: :return: loss, and other tensors for visualization ''' # put to gpu homographies = homographies.to(device) # config from utils.utils import warp_points lamda_d = lamda_d # 250 margin_pos = 1 margin_neg = 0.2 batch_size, Hc, Wc = descriptors.shape[0], descriptors.shape[2], descriptors.shape[3] ##### # H, W = Hc.numpy().astype(int) * cell_size, Wc.numpy().astype(int) * cell_size H, W = Hc * cell_size, Wc * cell_size ##### with torch.no_grad(): # shape = torch.tensor(list(descriptors.shape[2:]))*torch.tensor([cell_size, cell_size]).type(torch.FloatTensor).to(device) shape = torch.tensor([H, W]).type(torch.FloatTensor).to(device) # compute the center pixel of every cell in the image coor_cells = torch.stack(torch.meshgrid(torch.arange(Hc), torch.arange(Wc)), dim=2) coor_cells = coor_cells.type(torch.FloatTensor).to(device) coor_cells = coor_cells * cell_size + cell_size // 2 ## coord_cells is now a grid containing the coordinates of the Hc x Wc ## center pixels of the 8x8 cells of the image # coor_cells = coor_cells.view([-1, Hc, Wc, 1, 1, 2]) coor_cells = coor_cells.view([-1, 1, 1, Hc, Wc, 2]) # be careful of the order # warped_coor_cells = warp_points(coor_cells.view([-1, 2]), homographies, device) warped_coor_cells = normPts(coor_cells.view([-1, 2]), shape) warped_coor_cells = torch.stack((warped_coor_cells[:,1], warped_coor_cells[:,0]), dim=1) # (y, x) to (x, y) warped_coor_cells = warp_points(warped_coor_cells, homographies, device) warped_coor_cells = torch.stack((warped_coor_cells[:, :, 1], warped_coor_cells[:, :, 0]), dim=2) # (batch, x, y) to (batch, y, x) shape_cell = torch.tensor([H//cell_size, W//cell_size]).type(torch.FloatTensor).to(device) # warped_coor_mask = denormPts(warped_coor_cells, shape_cell) warped_coor_cells = denormPts(warped_coor_cells, shape) # warped_coor_cells = warped_coor_cells.view([-1, 1, 1, Hc, Wc, 2]) warped_coor_cells = warped_coor_cells.view([-1, Hc, Wc, 1, 1, 2]) # print("warped_coor_cells: ", warped_coor_cells.shape) # compute the pairwise distance cell_distances = coor_cells - warped_coor_cells cell_distances = torch.norm(cell_distances, dim=-1) ##### check # print("descriptor_dist: ", descriptor_dist) mask = cell_distances <= descriptor_dist # 0.5 # trick mask = mask.type(torch.FloatTensor).to(device) # compute the pairwise dot product between descriptors: d^t * d descriptors = descriptors.transpose(1, 2).transpose(2, 3) descriptors = descriptors.view((batch_size, Hc, Wc, 1, 1, -1)) descriptors_warped = descriptors_warped.transpose(1, 2).transpose(2, 3) descriptors_warped = descriptors_warped.view((batch_size, 1, 1, Hc, Wc, -1)) dot_product_desc = descriptors * descriptors_warped dot_product_desc = dot_product_desc.sum(dim=-1) ## dot_product_desc.shape = [batch_size, Hc, Wc, Hc, Wc, desc_len] # hinge loss positive_dist = torch.max(margin_pos - dot_product_desc, torch.tensor(0.).to(device)) # positive_dist[positive_dist < 0] = 0 negative_dist = torch.max(dot_product_desc - margin_neg, torch.tensor(0.).to(device)) # negative_dist[neative_dist < 0] = 0 # sum of the dimension if mask_valid is None: # mask_valid = torch.ones_like(mask) mask_valid = torch.ones(batch_size, 1, Hc*cell_size, Wc*cell_size) mask_valid = mask_valid.view(batch_size, 1, 1, mask_valid.shape[2], mask_valid.shape[3]) loss_desc = lamda_d * mask * positive_dist + (1 - mask) * negative_dist loss_desc = loss_desc * mask_valid # mask_validg = torch.ones_like(mask) ##### bug in normalization normalization = (batch_size * (mask_valid.sum()+1) * Hc * Wc) pos_sum = (lamda_d * mask * positive_dist/normalization).sum() neg_sum = ((1 - mask) * negative_dist/normalization).sum() loss_desc = loss_desc.sum() / normalization # loss_desc = loss_desc.sum() / (batch_size * Hc * Wc) # return loss_desc, mask, mask_valid, positive_dist, negative_dist return loss_desc, mask, pos_sum, neg_sum """ pos_pairs = mask * positive_dist pos_pairs = pos_pairs[pos_pairs != 0] print("pos_pairs mean: ", pos_pairs.mean()) print("pos_pairs max: ", pos_pairs.max()) print("pos_pairs min: ", pos_pairs.min()) === print("pos_pairs mean: ", pos_pairs.mean()) pos_pairs mean: tensor(0.6237, device='cuda:0', grad_fn=<MeanBackward1>) print("pos_pairs max: ", pos_pairs.max()) pos_pairs max: tensor(1.3984, device='cuda:0', grad_fn=<MaxBackward1>) print("pos_pairs min: ", pos_pairs.min()) pos_pairs min: tensor(0.1569, device='cuda:0', grad_fn=<MinBackward1>) (pos_pairs < 0.3).sum() Out[9]: tensor(88, device='cuda:0') (pos_pairs < 0.5).sum() Out[10]: tensor(393, device='cuda:0') (pos_pairs < 0.7).sum() Out[11]: tensor(703, device='cuda:0') """ def sumto2D(ndtensor): # input tensor: [batch_size, Hc, Wc, Hc, Wc] # output tensor: [batch_size, Hc, Wc] return ndtensor.sum(dim=1).sum(dim=1) def mAP(pred_batch, labels_batch): pass def precisionRecall_torch(pred, labels): offset = 10**-6 assert pred.size() == labels.size(), 'Sizes of pred, labels should match when you get the precision/recall!' precision = torch.sum(pred*labels) / (torch.sum(pred)+ offset) recall = torch.sum(pred*labels) / (torch.sum(labels) + offset) if precision.item() > 1.: print(pred) print(labels) import scipy.io.savemat as savemat savemat('pre_recall.mat', {'pred': pred, 'labels': labels}) assert precision.item() <=1. and precision.item() >= 0. return {'precision': precision, 'recall': recall} def precisionRecall(pred, labels, thd=None): offset = 10**-6 if thd is None: precision = np.sum(pred*labels) / (np.sum(pred)+ offset) recall = np.sum(pred*labels) / (np.sum(labels) + offset) return {'precision': precision, 'recall': recall} def getWriterPath(task='train', exper_name='', date=True): import datetime prefix = 'runs/' str_date_time = '' if exper_name != '': exper_name += '_' if date: str_date_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") return prefix + task + '/' + exper_name + str_date_time def crop_or_pad_choice(in_num_points, out_num_points, shuffle=False): # Adapted from https://github.com/haosulab/frustum_pointnet/blob/635c938f18b9ec1de2de717491fb217df84d2d93/fpointnet/data/datasets/utils.py """Crop or pad point cloud to a fixed number; return the indexes Args: points (np.ndarray): point cloud. (n, d) num_points (int): the number of output points shuffle (bool): whether to shuffle the order Returns: np.ndarray: output point cloud np.ndarray: index to choose input points """ if shuffle: choice = np.random.permutation(in_num_points) else: choice = np.arange(in_num_points) assert out_num_points > 0, 'out_num_points = %d must be positive int!'%out_num_points if in_num_points >= out_num_points: choice = choice[:out_num_points] else: num_pad = out_num_points - in_num_points pad = np.random.choice(choice, num_pad, replace=True) choice = np.concatenate([choice, pad]) return choice
[ "scipy.io.savemat", "tensorflow.shape", "numpy.random.rand", "yaml.load", "numpy.argsort", "numpy.array", "utils.utils.warp_points", "torch.sum", "torch.nn.functional.softmax", "numpy.arange", "torch.arange", "matplotlib.pyplot.imshow", "torch.nn.functional.grid_sample", "os.listdir", "n...
[((682, 736), 'numpy.concatenate', 'np.concatenate', (['(img_gray, img_gray, img_gray)'], {'axis': '(0)'}), '((img_gray, img_gray, img_gray), axis=0)\n', (696, 736), True, 'import numpy as np\n'), ((1100, 1109), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (1104, 1109), False, 'from pathlib import Path\n'), ((4278, 4321), 'torch.save', 'torch.save', (['net_state', '(save_path / filename)'], {}), '(net_state, save_path / filename)\n', (4288, 4321), False, 'import torch\n'), ((4585, 4617), 'torch.load', 'torch.load', (['(load_path / filename)'], {}), '(load_path / filename)\n', (4595, 4617), False, 'import torch\n'), ((5172, 5198), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'img'], {}), '(filename, img)\n', (5183, 5198), False, 'import cv2\n'), ((5265, 5280), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (5275, 5280), True, 'from matplotlib import pyplot as plt\n'), ((5285, 5295), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5293, 5295), True, 'from matplotlib import pyplot as plt\n'), ((6416, 6462), 'numpy.array', 'np.array', (['[(-1, -1), (-1, 1), (1, -1), (1, 1)]'], {}), '([(-1, -1), (-1, 1), (1, -1), (1, 1)])\n', (6424, 6462), True, 'import numpy as np\n'), ((7248, 7271), 'numpy.stack', 'np.stack', (['mat_H'], {'axis': '(0)'}), '(mat_H, axis=0)\n', (7256, 7271), True, 'import numpy as np\n'), ((7284, 7324), 'torch.tensor', 'torch.tensor', (['mat_H'], {'dtype': 'torch.float32'}), '(mat_H, dtype=torch.float32)\n', (7296, 7324), False, 'import torch\n'), ((7459, 7503), 'torch.tensor', 'torch.tensor', (['mat_H_inv'], {'dtype': 'torch.float32'}), '(mat_H_inv, dtype=torch.float32)\n', (7471, 7503), False, 'import torch\n'), ((7875, 7920), 'torch.tensor', 'torch.tensor', (['homography'], {'dtype': 'torch.float32'}), '(homography, dtype=torch.float32)\n', (7887, 7920), False, 'import torch\n'), ((9322, 9389), 'numpy.array', 'np.array', (['[[2.0 / W, 0.0, -1], [0.0, 2.0 / H, -1], [0.0, 0.0, 1.0]]'], {}), '([[2.0 / W, 0.0, -1], [0.0, 2.0 / H, -1], [0.0, 0.0, 1.0]])\n', (9330, 9389), True, 'import numpy as np\n'), ((9521, 9592), 'torch.tensor', 'torch.tensor', (['[[2.0 / W, 0.0, -1], [0.0, 2.0 / H, -1], [0.0, 0.0, 1.0]]'], {}), '([[2.0 / W, 0.0, -1], [0.0, 2.0 / H, -1], [0.0, 0.0, 1.0]])\n', (9533, 9592), False, 'import torch\n'), ((12588, 12655), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['img', 'src_pixel_coords'], {'mode': 'mode', 'align_corners': '(True)'}), '(img, src_pixel_coords, mode=mode, align_corners=True)\n', (12601, 12655), True, 'import torch.nn.functional as F\n'), ((13569, 13584), 'utils.d2s.SpaceToDepth', 'SpaceToDepth', (['(8)'], {}), '(8)\n', (13581, 13584), False, 'from utils.d2s import DepthToSpace, SpaceToDepth\n'), ((14829, 14844), 'utils.d2s.SpaceToDepth', 'SpaceToDepth', (['(8)'], {}), '(8)\n', (14841, 14844), False, 'from utils.d2s import DepthToSpace, SpaceToDepth\n'), ((15523, 15550), 'torch.argmax', 'torch.argmax', (['labels'], {'dim': '(1)'}), '(labels, dim=1)\n', (15535, 15550), False, 'import torch\n'), ((18023, 18038), 'utils.d2s.DepthToSpace', 'DepthToSpace', (['(8)'], {}), '(8)\n', (18035, 18038), False, 'from utils.d2s import DepthToSpace, SpaceToDepth\n'), ((18510, 18532), 'numpy.reshape', 'np.reshape', (['H_', '(3, 3)'], {}), '(H_, (3, 3))\n', (18520, 18532), True, 'import numpy as np\n'), ((18834, 18866), 'numpy.where', 'np.where', (['(heatmap >= conf_thresh)'], {}), '(heatmap >= conf_thresh)\n', (18842, 18866), True, 'import numpy as np\n'), ((19203, 19224), 'numpy.argsort', 'np.argsort', (['pts[2, :]'], {}), '(pts[2, :])\n', (19213, 19224), True, 'import numpy as np\n'), ((19352, 19406), 'numpy.logical_or', 'np.logical_or', (['(pts[0, :] < bord)', '(pts[0, :] >= W - bord)'], {}), '(pts[0, :] < bord, pts[0, :] >= W - bord)\n', (19365, 19406), True, 'import numpy as np\n'), ((19425, 19479), 'numpy.logical_or', 'np.logical_or', (['(pts[1, :] < bord)', '(pts[1, :] >= H - bord)'], {}), '(pts[1, :] < bord, pts[1, :] >= H - bord)\n', (19438, 19479), True, 'import numpy as np\n'), ((19497, 19532), 'numpy.logical_or', 'np.logical_or', (['toremoveW', 'toremoveH'], {}), '(toremoveW, toremoveH)\n', (19510, 19532), True, 'import numpy as np\n'), ((20482, 20504), 'torch.zeros_like', 'torch.zeros_like', (['prob'], {}), '(prob)\n', (20498, 20504), False, 'import torch\n'), ((20609, 20651), 'torch.cat', 'torch.cat', (['[pts - size, pts + size]'], {'dim': '(1)'}), '([pts - size, pts + size], dim=1)\n', (20618, 20651), False, 'import torch\n'), ((21194, 21229), 'torch.index_select', 'torch.index_select', (['pts', '(0)', 'indices'], {}), '(pts, 0, indices)\n', (21212, 21229), False, 'import torch\n'), ((21243, 21281), 'torch.index_select', 'torch.index_select', (['scores', '(0)', 'indices'], {}), '(scores, 0, indices)\n', (21261, 21281), False, 'import torch\n'), ((22578, 22607), 'numpy.argsort', 'np.argsort', (['(-in_corners[2, :])'], {}), '(-in_corners[2, :])\n', (22588, 22607), True, 'import numpy as np\n'), ((23275, 23330), 'numpy.pad', 'np.pad', (['grid', '((pad, pad), (pad, pad))'], {'mode': '"""constant"""'}), "(grid, ((pad, pad), (pad, pad)), mode='constant')\n", (23281, 23330), True, 'import numpy as np\n'), ((23840, 23860), 'numpy.where', 'np.where', (['(grid == -1)'], {}), '(grid == -1)\n', (23848, 23860), True, 'import numpy as np\n'), ((24008, 24027), 'numpy.argsort', 'np.argsort', (['(-values)'], {}), '(-values)\n', (24018, 24027), True, 'import numpy as np\n'), ((3889, 3910), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3899, 3910), False, 'import os\n'), ((5391, 5403), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (5400, 5403), False, 'import yaml\n'), ((5624, 5637), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5634, 5637), False, 'import csv\n'), ((6726, 6748), 'numpy.float32', 'np.float32', (['corner_img'], {}), '(corner_img)\n', (6736, 6748), True, 'import numpy as np\n'), ((6750, 6772), 'numpy.float32', 'np.float32', (['corner_map'], {}), '(corner_map)\n', (6760, 6772), True, 'import numpy as np\n'), ((6978, 7012), 'utils.homographies.sample_homography', 'sample_homography', ([], {'inv_scale': 'scale'}), '(inv_scale=scale)\n', (6995, 7012), False, 'from utils.homographies import sample_homography\n'), ((7951, 7995), 'torch.stack', 'torch.stack', (['(pnts[:, 0], pnts[:, 1])'], {'dim': '(1)'}), '((pnts[:, 0], pnts[:, 1]), dim=1)\n', (7962, 7995), False, 'import torch\n'), ((9842, 9866), 'torch.prod', 'torch.prod', (['mask'], {'dim': '(-1)'}), '(mask, dim=-1)\n', (9852, 9866), False, 'import torch\n'), ((16503, 16541), 'numpy.reshape', 'np.reshape', (['semi', '[Hc, Wc, cell, cell]'], {}), '(semi, [Hc, Wc, cell, cell])\n', (16513, 16541), True, 'import numpy as np\n'), ((16560, 16595), 'numpy.transpose', 'np.transpose', (['heatmap', '[0, 2, 1, 3]'], {}), '(heatmap, [0, 2, 1, 3])\n', (16572, 16595), True, 'import numpy as np\n'), ((16670, 16713), 'numpy.reshape', 'np.reshape', (['heatmap', '[Hc * cell, Wc * cell]'], {}), '(heatmap, [Hc * cell, Wc * cell])\n', (16680, 16713), True, 'import numpy as np\n'), ((17654, 17688), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['semi'], {'dim': '(1)'}), '(semi, dim=1)\n', (17675, 17688), True, 'import torch.nn as nn\n'), ((17800, 17834), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['semi'], {'dim': '(0)'}), '(semi, dim=0)\n', (17821, 17834), True, 'import torch.nn as nn\n'), ((18315, 18327), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (18325, 18327), True, 'import tensorflow as tf\n'), ((18969, 18985), 'numpy.zeros', 'np.zeros', (['(3, 0)'], {}), '((3, 0))\n', (18977, 18985), True, 'import numpy as np\n'), ((20753, 20776), 'torchvision.ops.nms', 'nms', (['boxes', 'scores', 'iou'], {}), '(boxes, scores, iou)\n', (20756, 20776), False, 'from torchvision.ops import nms\n'), ((25342, 25413), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '((erosion_radius * 2,) * 2)'], {}), '(cv2.MORPH_ELLIPSE, (erosion_radius * 2,) * 2)\n', (25367, 25413), False, 'import cv2\n'), ((27267, 27282), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27280, 27282), False, 'import torch\n'), ((28242, 28312), 'torch.stack', 'torch.stack', (['(warped_coor_cells[:, 1], warped_coor_cells[:, 0])'], {'dim': '(1)'}), '((warped_coor_cells[:, 1], warped_coor_cells[:, 0]), dim=1)\n', (28253, 28312), False, 'import torch\n'), ((28358, 28410), 'utils.utils.warp_points', 'warp_points', (['warped_coor_cells', 'homographies', 'device'], {}), '(warped_coor_cells, homographies, device)\n', (28369, 28410), False, 'from utils.utils import warp_points\n'), ((28440, 28516), 'torch.stack', 'torch.stack', (['(warped_coor_cells[:, :, 1], warped_coor_cells[:, :, 0])'], {'dim': '(2)'}), '((warped_coor_cells[:, :, 1], warped_coor_cells[:, :, 0]), dim=2)\n', (28451, 28516), False, 'import torch\n'), ((29121, 29155), 'torch.norm', 'torch.norm', (['cell_distances'], {'dim': '(-1)'}), '(cell_distances, dim=-1)\n', (29131, 29155), False, 'import torch\n'), ((30287, 30344), 'torch.ones', 'torch.ones', (['batch_size', '(1)', '(Hc * cell_size)', '(Wc * cell_size)'], {}), '(batch_size, 1, Hc * cell_size, Wc * cell_size)\n', (30297, 30344), False, 'import torch\n'), ((32183, 32207), 'torch.sum', 'torch.sum', (['(pred * labels)'], {}), '(pred * labels)\n', (32192, 32207), False, 'import torch\n'), ((32247, 32271), 'torch.sum', 'torch.sum', (['(pred * labels)'], {}), '(pred * labels)\n', (32256, 32271), False, 'import torch\n'), ((32424, 32483), 'scipy.io.savemat', 'savemat', (['"""pre_recall.mat"""', "{'pred': pred, 'labels': labels}"], {}), "('pre_recall.mat', {'pred': pred, 'labels': labels})\n", (32431, 32483), True, 'import scipy.io.savemat as savemat\n'), ((33785, 33821), 'numpy.random.permutation', 'np.random.permutation', (['in_num_points'], {}), '(in_num_points)\n', (33806, 33821), True, 'import numpy as np\n'), ((33849, 33873), 'numpy.arange', 'np.arange', (['in_num_points'], {}), '(in_num_points)\n', (33858, 33873), True, 'import numpy as np\n'), ((34118, 34165), 'numpy.random.choice', 'np.random.choice', (['choice', 'num_pad'], {'replace': '(True)'}), '(choice, num_pad, replace=True)\n', (34134, 34165), True, 'import numpy as np\n'), ((34183, 34212), 'numpy.concatenate', 'np.concatenate', (['[choice, pad]'], {}), '([choice, pad])\n', (34197, 34212), True, 'import numpy as np\n'), ((3073, 3107), 'cv2.applyColorMap', 'cv2.applyColorMap', (['array', 'colormap'], {}), '(array, colormap)\n', (3090, 3107), False, 'import cv2\n'), ((7384, 7413), 'torch.inverse', 'torch.inverse', (['mat_H[i, :, :]'], {}), '(mat_H[i, :, :])\n', (7397, 7413), False, 'import torch\n'), ((7832, 7850), 'torch.tensor', 'torch.tensor', (['pnts'], {}), '(pnts)\n', (7844, 7850), False, 'import torch\n'), ((8720, 8749), 'numpy.ones', 'np.ones', (['(points.shape[0], 1)'], {}), '((points.shape[0], 1))\n', (8727, 8749), True, 'import numpy as np\n'), ((9396, 9416), 'numpy.linalg.inv', 'np.linalg.inv', (['trans'], {}), '(trans)\n', (9409, 9416), True, 'import numpy as np\n'), ((12191, 12215), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', 'W'], {}), '(-1, 1, W)\n', (12205, 12215), False, 'import torch\n'), ((12217, 12241), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', 'H'], {}), '(-1, 1, H)\n', (12231, 12241), False, 'import torch\n'), ((14386, 14408), 'torch.unsqueeze', 'torch.unsqueeze', (['dn', '(1)'], {}), '(dn, 1)\n', (14401, 14408), False, 'import torch\n'), ((15309, 15344), 'torch.ones', 'torch.ones', (['(batch_size, 1, Hc, Wc)'], {}), '((batch_size, 1, Hc, Wc))\n', (15319, 15344), False, 'import torch\n'), ((18285, 18300), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (18293, 18300), True, 'import tensorflow as tf\n'), ((20419, 20449), 'torch.nonzero', 'torch.nonzero', (['(prob > min_prob)'], {}), '(prob > min_prob)\n', (20432, 20449), False, 'import torch\n'), ((20568, 20592), 'torch.tensor', 'torch.tensor', (['(size / 2.0)'], {}), '(size / 2.0)\n', (20580, 20592), False, 'import torch\n'), ((22399, 22415), 'numpy.zeros', 'np.zeros', (['(H, W)'], {}), '((H, W))\n', (22407, 22415), True, 'import numpy as np\n'), ((22458, 22474), 'numpy.zeros', 'np.zeros', (['(H, W)'], {}), '((H, W))\n', (22466, 22474), True, 'import numpy as np\n'), ((25049, 25106), 'torch.ones', 'torch.ones', (['batch_size', '(1)', 'image_shape[0]', 'image_shape[1]'], {}), '(batch_size, 1, image_shape[0], image_shape[1])\n', (25059, 25106), False, 'import torch\n'), ((25474, 25520), 'cv2.erode', 'cv2.erode', (['mask[i, :, :]', 'kernel'], {'iterations': '(1)'}), '(mask[i, :, :], kernel, iterations=1)\n', (25483, 25520), False, 'import cv2\n'), ((25533, 25551), 'torch.tensor', 'torch.tensor', (['mask'], {}), '(mask)\n', (25545, 25551), False, 'import torch\n'), ((32209, 32224), 'torch.sum', 'torch.sum', (['pred'], {}), '(pred)\n', (32218, 32224), False, 'import torch\n'), ((32273, 32290), 'torch.sum', 'torch.sum', (['labels'], {}), '(labels)\n', (32282, 32290), False, 'import torch\n'), ((32704, 32725), 'numpy.sum', 'np.sum', (['(pred * labels)'], {}), '(pred * labels)\n', (32710, 32725), True, 'import numpy as np\n'), ((32766, 32787), 'numpy.sum', 'np.sum', (['(pred * labels)'], {}), '(pred * labels)\n', (32772, 32787), True, 'import numpy as np\n'), ((18380, 18393), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (18388, 18393), True, 'import numpy as np\n'), ((22903, 22939), 'numpy.vstack', 'np.vstack', (['(rcorners, in_corners[2])'], {}), '((rcorners, in_corners[2]))\n', (22912, 22939), True, 'import numpy as np\n'), ((27599, 27615), 'torch.arange', 'torch.arange', (['Hc'], {}), '(Hc)\n', (27611, 27615), False, 'import torch\n'), ((27617, 27633), 'torch.arange', 'torch.arange', (['Wc'], {}), '(Wc)\n', (27629, 27633), False, 'import torch\n'), ((29962, 29979), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (29974, 29979), False, 'import torch\n'), ((30095, 30112), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (30107, 30112), False, 'import torch\n'), ((32727, 32739), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (32733, 32739), True, 'import numpy as np\n'), ((32789, 32803), 'numpy.sum', 'np.sum', (['labels'], {}), '(labels)\n', (32795, 32803), True, 'import numpy as np\n'), ((33080, 33103), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (33101, 33103), False, 'import datetime\n'), ((6628, 6648), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (6642, 6648), True, 'import numpy as np\n'), ((10785, 10817), 'torch.ones', 'torch.ones', (['(points.shape[0], 1)'], {}), '((points.shape[0], 1))\n', (10795, 10817), False, 'import torch\n'), ((22804, 22820), 'numpy.zeros', 'np.zeros', (['(3, 0)'], {}), '((3, 0))\n', (22812, 22820), True, 'import numpy as np\n'), ((22834, 22845), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (22842, 22845), True, 'import numpy as np\n'), ((22974, 22985), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (22982, 22985), True, 'import numpy as np\n'), ((3128, 3166), 'cv2.cvtColor', 'cv2.cvtColor', (['colored_array', 'color_cvt'], {}), '(colored_array, color_cvt)\n', (3140, 3166), False, 'import cv2\n'), ((8104, 8124), 'torch.tensor', 'torch.tensor', (['[W, H]'], {}), '([W, H])\n', (8116, 8124), False, 'import torch\n'), ((27432, 27452), 'torch.tensor', 'torch.tensor', (['[H, W]'], {}), '([H, W])\n', (27444, 27452), False, 'import torch\n'), ((28573, 28619), 'torch.tensor', 'torch.tensor', (['[H // cell_size, W // cell_size]'], {}), '([H // cell_size, W // cell_size])\n', (28585, 28619), False, 'import torch\n')]
# Author: <NAME> (<EMAIL>) 05/31/2018 # Modified by <NAME> 07/12/2020 import bpy import mathutils import numpy as np import os import sys import time import pdb import argparse def setup_blender(width, height, focal_length, output_dir): # camera #bpy.ops.object.delete() for m in bpy.data.meshes: bpy.data.meshes.remove(m) for m in bpy.data.materials: m.user_clear() bpy.data.materials.remove(m) # camera_data = bpy.data.cameras.new(name='Camera') # camera_object = bpy.data.objects.new('Camera', camera_data) # bpy.context.scene.collection.objects.link(camera_object) camera = bpy.data.objects['Camera'] camera.data.angle = np.arctan(width / 2 / focal_length) * 2 # camera.data.clip_end = 1.2 # camera.data.clip_start = 0.2 # render layer scene = bpy.context.scene scene.render.filepath = 'buffer' scene.render.image_settings.color_depth = '16' scene.render.resolution_percentage = 100 scene.render.resolution_x = width scene.render.resolution_y = height # compositor nodes scene.use_nodes = True tree = scene.node_tree rl = tree.nodes.new('CompositorNodeRLayers') output = tree.nodes.new('CompositorNodeOutputFile') output.base_path = '' output.format.file_format = 'OPEN_EXR' #tree.links.new(rl.outputs['Z'], output.inputs[0]) tree.links.new(rl.outputs['Depth'], output.inputs[0]) #-Original # remove default cube # bpy.data.objects['Camera'].select=False # bpy.data.objects['Cube'].select = True # bpy.ops.object.delete() # bpy.data.objects['Camera'].select=True return scene, camera, output if __name__ == '__main__': viewspace_path = '/home/alex-pop/Desktop/Doctorat/Program_blender/viewspace_shapenet_33.txt' test_predicted_path ='/home/alex-pop/Desktop/Doctorat/Program_blender/Test_viewstate.txt' output_dir = '/home/alex-pop/Desktop/Doctorat/Program_blender/' data_type = 'test' class_list_path = '/home/alex-pop/Desktop/Doctorat/Backups/Trial_Test_Valid_mat/Train_Test/' + data_type + '/_class.txt' ShapeNetv1_dir = '/home/alex-pop/Desktop/Doctorat/Backups/Trial_Test_Valid_mat/Train_Test/' with open(os.path.join(class_list_path)) as file: class_list = [line.strip() for line in file] #print(class_list) viewspace = np.loadtxt(viewspace_path) test_viewstate=np.loadtxt(test_predicted_path) width = 640 height = 480 focal = 238 * 2 scene, camera, output = setup_blender(width, height, focal, output_dir) intrinsics = np.array([[focal, 0, width / 2], [0, focal, height / 2], [0, 0, 1]]) np.savetxt(os.path.join(output_dir, 'intrinsics.txt'), intrinsics, '%f') viewspace_start_filepath='/home/alex-pop/Desktop/Doctorat/Program_blender/Viewspace_start.txt' nr_pozitie_filepath='/home/alex-pop/Desktop/Doctorat/Program_blender/nr_pozitie.txt' tip_view_path='/home/alex-pop/Desktop/Doctorat/Program_blender/tip_view.txt' model_view_path='/home/alex-pop/Desktop/Doctorat/Program_blender/Test_viewstate_model.txt' viewsapace_start=np.loadtxt(viewspace_start_filepath) nr_pozitie=np.loadtxt(nr_pozitie_filepath) tip_view=np.loadtxt(tip_view_path) with open(os.path.join(model_view_path)) as file: model_id_list = [line.strip() for line in file] # viewspace_start= # nr_pozitie=open('nr_pozitie.txt', 'w+') # tip_view=open('nr_pozitie.txt', 'w+') exr_dir = os.path.join(output_dir, 'exr', "02654") pose_dir = os.path.join(output_dir, 'pose', "02654") os.makedirs(exr_dir, exist_ok=True) os.makedirs(pose_dir, exist_ok=True) bpy.data.objects['Camera'].select=False bpy.data.objects['Camera'].select=True #bpy.context.object.scan_type='kinect' #bpy.context.object.save_scan=True # Redirect output to log file # Rotate model by 90 degrees around x-axis (z-up => y-up) to match ShapeNet's coordinates #bpy.ops.transform.rotate(value=-np.pi / 2, axis=(1, 0, 0)) # ->original code #bpy.ops.transform.rotate(value=-np.pi / 2, orient_axis=(1, 0, 0)) #bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) #bpy.ops.transform.rotate(value=-np.pi / 2, orient_axis='Z') # Render # for i in range(viewspace.shape[0]): t_view=open('/home/alex-pop/Desktop/Doctorat/Program_blender/tip_view.txt', 'w+') if(int(viewsapace_start)==0): print('restart') vs_start=open('Viewspace_start.txt', 'w+') nr_poz=open('nr_pozitie.txt', 'w+') vs_start.write("1") vs_start.close() nr_poz.write('0') nr_poz.close() t_view.write('0') t_view.close() else: i=int(nr_pozitie) print("Nr_pozitie:"+str(i)) working_model=model_id_list[i] print(model_id_list[i]) for class_id in class_list: model_list = os.listdir(os.path.join(ShapeNetv1_dir, data_type, class_id)) if(working_model in model_list): model_path = os.path.join(ShapeNetv1_dir, data_type, class_id, working_model, 'model.obj') bpy.ops.import_scene.obj(filepath=model_path) bpy.ops.transform.rotate(value=-np.pi / 2, axis=(1, 0, 0)) break if(int(tip_view)==0): pozitie_actuala=int(test_viewstate[i][0]) cam_pose = mathutils.Vector((viewspace[pozitie_actuala][0], viewspace[pozitie_actuala][1], viewspace[pozitie_actuala][2])) center_pose = mathutils.Vector((0, 0, 0)) direct = center_pose - cam_pose rot_quat = direct.to_track_quat('-Z', 'Y') camera.rotation_euler = rot_quat.to_euler() camera.location = cam_pose t_view.write('1') t_view.close() print('Nr:'+str(i)+" Initial position nr:"+str(pozitie_actuala)) else: nr_poz=open('nr_pozitie.txt', 'w+') pozitie_prezisa=int(test_viewstate[i][1]) cam_pose = mathutils.Vector((viewspace[pozitie_prezisa][0], viewspace[pozitie_prezisa][1], viewspace[pozitie_prezisa][2])) center_pose = mathutils.Vector((0, 0, 0)) direct = center_pose - cam_pose rot_quat = direct.to_track_quat('-Z', 'Y') camera.rotation_euler = rot_quat.to_euler() camera.location = cam_pose t_view.write('0') t_view.close() print('Nr:'+str(i)+" Predicted position nr:"+str(pozitie_prezisa)) i=i+1 nr_poz.write(str(i)) nr_poz.close() # Clean up # i=0 # scene.frame_set(i) # np.savetxt(os.path.join(pose_dir, '%d.txt' % i), pose_matrix, '%f')
[ "bpy.data.meshes.remove", "mathutils.Vector", "os.makedirs", "bpy.ops.transform.rotate", "os.path.join", "numpy.array", "bpy.data.materials.remove", "bpy.ops.import_scene.obj", "numpy.loadtxt", "numpy.arctan" ]
[((2388, 2414), 'numpy.loadtxt', 'np.loadtxt', (['viewspace_path'], {}), '(viewspace_path)\n', (2398, 2414), True, 'import numpy as np\n'), ((2434, 2465), 'numpy.loadtxt', 'np.loadtxt', (['test_predicted_path'], {}), '(test_predicted_path)\n', (2444, 2465), True, 'import numpy as np\n'), ((2614, 2682), 'numpy.array', 'np.array', (['[[focal, 0, width / 2], [0, focal, height / 2], [0, 0, 1]]'], {}), '([[focal, 0, width / 2], [0, focal, height / 2], [0, 0, 1]])\n', (2622, 2682), True, 'import numpy as np\n'), ((3149, 3185), 'numpy.loadtxt', 'np.loadtxt', (['viewspace_start_filepath'], {}), '(viewspace_start_filepath)\n', (3159, 3185), True, 'import numpy as np\n'), ((3201, 3232), 'numpy.loadtxt', 'np.loadtxt', (['nr_pozitie_filepath'], {}), '(nr_pozitie_filepath)\n', (3211, 3232), True, 'import numpy as np\n'), ((3246, 3271), 'numpy.loadtxt', 'np.loadtxt', (['tip_view_path'], {}), '(tip_view_path)\n', (3256, 3271), True, 'import numpy as np\n'), ((3540, 3580), 'os.path.join', 'os.path.join', (['output_dir', '"""exr"""', '"""02654"""'], {}), "(output_dir, 'exr', '02654')\n", (3552, 3580), False, 'import os\n'), ((3596, 3637), 'os.path.join', 'os.path.join', (['output_dir', '"""pose"""', '"""02654"""'], {}), "(output_dir, 'pose', '02654')\n", (3608, 3637), False, 'import os\n'), ((3642, 3677), 'os.makedirs', 'os.makedirs', (['exr_dir'], {'exist_ok': '(True)'}), '(exr_dir, exist_ok=True)\n', (3653, 3677), False, 'import os\n'), ((3682, 3718), 'os.makedirs', 'os.makedirs', (['pose_dir'], {'exist_ok': '(True)'}), '(pose_dir, exist_ok=True)\n', (3693, 3718), False, 'import os\n'), ((322, 347), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['m'], {}), '(m)\n', (344, 347), False, 'import bpy\n'), ((412, 440), 'bpy.data.materials.remove', 'bpy.data.materials.remove', (['m'], {}), '(m)\n', (437, 440), False, 'import bpy\n'), ((694, 729), 'numpy.arctan', 'np.arctan', (['(width / 2 / focal_length)'], {}), '(width / 2 / focal_length)\n', (703, 729), True, 'import numpy as np\n'), ((2700, 2742), 'os.path.join', 'os.path.join', (['output_dir', '"""intrinsics.txt"""'], {}), "(output_dir, 'intrinsics.txt')\n", (2712, 2742), False, 'import os\n'), ((2241, 2270), 'os.path.join', 'os.path.join', (['class_list_path'], {}), '(class_list_path)\n', (2253, 2270), False, 'import os\n'), ((3294, 3323), 'os.path.join', 'os.path.join', (['model_view_path'], {}), '(model_view_path)\n', (3306, 3323), False, 'import os\n'), ((5508, 5624), 'mathutils.Vector', 'mathutils.Vector', (['(viewspace[pozitie_actuala][0], viewspace[pozitie_actuala][1], viewspace[\n pozitie_actuala][2])'], {}), '((viewspace[pozitie_actuala][0], viewspace[pozitie_actuala]\n [1], viewspace[pozitie_actuala][2]))\n', (5524, 5624), False, 'import mathutils\n'), ((5647, 5674), 'mathutils.Vector', 'mathutils.Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (5663, 5674), False, 'import mathutils\n'), ((6149, 6265), 'mathutils.Vector', 'mathutils.Vector', (['(viewspace[pozitie_prezisa][0], viewspace[pozitie_prezisa][1], viewspace[\n pozitie_prezisa][2])'], {}), '((viewspace[pozitie_prezisa][0], viewspace[pozitie_prezisa]\n [1], viewspace[pozitie_prezisa][2]))\n', (6165, 6265), False, 'import mathutils\n'), ((6288, 6315), 'mathutils.Vector', 'mathutils.Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (6304, 6315), False, 'import mathutils\n'), ((5011, 5060), 'os.path.join', 'os.path.join', (['ShapeNetv1_dir', 'data_type', 'class_id'], {}), '(ShapeNetv1_dir, data_type, class_id)\n', (5023, 5060), False, 'import os\n'), ((5139, 5216), 'os.path.join', 'os.path.join', (['ShapeNetv1_dir', 'data_type', 'class_id', 'working_model', '"""model.obj"""'], {}), "(ShapeNetv1_dir, data_type, class_id, working_model, 'model.obj')\n", (5151, 5216), False, 'import os\n'), ((5233, 5278), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'model_path'}), '(filepath=model_path)\n', (5257, 5278), False, 'import bpy\n'), ((5296, 5354), 'bpy.ops.transform.rotate', 'bpy.ops.transform.rotate', ([], {'value': '(-np.pi / 2)', 'axis': '(1, 0, 0)'}), '(value=-np.pi / 2, axis=(1, 0, 0))\n', (5320, 5354), False, 'import bpy\n')]
import pytest import os import numpy as np import dxx @pytest.fixture(scope="module") def mock_data_file() -> str: mock_file_name = "mock.DSB" sampling_freq = 48000 mock_data = np.arange(5 * sampling_freq, dtype=np.int16) dxx.write(mock_file_name, mock_data) yield mock_file_name os.remove(mock_file_name)
[ "pytest.fixture", "dxx.write", "numpy.arange", "os.remove" ]
[((59, 89), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (73, 89), False, 'import pytest\n'), ((193, 237), 'numpy.arange', 'np.arange', (['(5 * sampling_freq)'], {'dtype': 'np.int16'}), '(5 * sampling_freq, dtype=np.int16)\n', (202, 237), True, 'import numpy as np\n'), ((242, 278), 'dxx.write', 'dxx.write', (['mock_file_name', 'mock_data'], {}), '(mock_file_name, mock_data)\n', (251, 278), False, 'import dxx\n'), ((308, 333), 'os.remove', 'os.remove', (['mock_file_name'], {}), '(mock_file_name)\n', (317, 333), False, 'import os\n')]
import numpy as np import argparse import os import random from tensorflow.python.training.tracking.util import Checkpoint os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import time import json import tensorflow as tf from tensorflow.keras import optimizers from tensorflow.keras.layers import * from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau from tensorflow.keras.models import load_model from train_tf import RnnParameterData from json import encoder from model_tf import TrajPreSimple, TrajPreAttnAvgLongUser, TrajPreLocalAttnLong from train_tf import train_model, RnnParameterData, generate_input_history, markov, \ generate_input_long_history, generate_input_long_history2, generate_geolife_data, \ preprocess_data encoder.FLOAT_REPR = lambda o: format(o, '.3f') def run(args): parameters = RnnParameterData(loc_emb_size=args.loc_emb_size, uid_emb_size=args.uid_emb_size, voc_emb_size=args.voc_emb_size, tim_emb_size=args.tim_emb_size, hidden_size=args.hidden_size, dropout_p=args.dropout_p, data_name=args.data_name, lr=args.learning_rate, lr_step=args.lr_step, lr_decay=args.lr_decay, L2=args.L2, rnn_type=args.rnn_type, optim=args.optim, attn_type=args.attn_type, clip=args.clip, epoch_max=args.epoch_max, history_mode=args.history_mode, model_mode=args.model_mode, data_path=args.data_path, save_path=args.save_path, plot_user_traj=args.plot_user_traj, use_geolife_data=args.use_geolife_data) argv = {'loc_emb_size': args.loc_emb_size, 'uid_emb_size': args.uid_emb_size, 'voc_emb_size': args.voc_emb_size, 'tim_emb_size': args.tim_emb_size, 'hidden_size': args.hidden_size, 'dropout_p': args.dropout_p, 'data_name': args.data_name, 'learning_rate': args.learning_rate, 'lr_step': args.lr_step, 'lr_decay': args.lr_decay, 'L2': args.L2, 'act_type': 'selu', 'optim': args.optim, 'attn_type': args.attn_type, 'clip': args.clip, 'rnn_type': args.rnn_type, 'epoch_max': args.epoch_max, 'history_mode': args.history_mode, 'model_mode': args.model_mode, 'plot_user_traj': args.plot_user_traj,'use_geolife_data': args.use_geolife_data} print('*' * 15 + 'start training' + '*' * 15) print('model_mode:{} history_mode:{} users:{}'.format( parameters.model_mode, parameters.history_mode, parameters.uid_size)) candidate = parameters.data_neural.keys() if parameters.use_geolife_data: data_np_raw = np.load(parameters.data_path + '1000_0_05_00.npz', allow_pickle=True)['array1'] whole_candidate = list(range(42)) candidate = random.sample(whole_candidate, 42) data_np, parameters.loc_size = preprocess_data(data_np_raw, candidate) data_train, train_idx = generate_geolife_data(data_np,'train', candidate) data_test, test_idx = generate_geolife_data(data_np,'test', candidate) # train_idx = else: if 'long' in parameters.model_mode: long_history = True else: long_history = False if long_history is False: data_train, train_idx = generate_input_history(parameters.data_neural, 'train', mode2=parameters.history_mode, candidate=candidate) data_test, test_idx = generate_input_history(parameters.data_neural, 'test', mode2=parameters.history_mode, candidate=candidate) if -1 < parameters.plot_user_traj < 886: user_trained, user_idx = generate_input_history(parameters.data_neural, 'train', mode2=parameters.history_mode, candidate=[parameters.plot_user_traj]) elif long_history is True: if parameters.model_mode == 'simple_long': data_train, train_idx = generate_input_long_history2( parameters.data_neural, 'train', candidate=candidate) data_test, test_idx = generate_input_long_history2( parameters.data_neural, 'test', candidate=candidate) if -1 < parameters.plot_user_traj < 886: user_trained, user_idx = generate_input_long_history2(parameters.data_neural, 'train', candidate=[parameters.plot_user_traj]) else: data_train, train_idx = generate_input_long_history( parameters.data_neural, 'train', candidate=candidate) data_test, test_idx = generate_input_long_history( parameters.data_neural, 'test', candidate=candidate) if -1 < parameters.plot_user_traj < 886: user_trained, user_idx = generate_input_long_history(parameters.data_neural, 'train', candidate=[parameters.plot_user_traj]) if parameters.model_mode in ['simple', 'simple_long']: model = TrajPreSimple(parameters=parameters) elif parameters.model_mode == 'attn_avg_long_user': model = TrajPreAttnAvgLongUser(parameters=parameters) elif parameters.model_mode == 'attn_local_long': model = TrajPreLocalAttnLong(parameters=parameters) if args.pretrain == 1: pass model.compile( optimizer = Adam( learning_rate=parameters.lr, clipnorm=parameters.clip ), run_eagerly=True, loss = tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[ tf.keras.losses.SparseCategoricalCrossentropy(), tf.keras.metrics.SparseCategoricalAccuracy(), ]) reduce_lr = ReduceLROnPlateau(monitor = 'sparse_categorical_crossentropy', factor=parameters.lr_decay, patience=parameters.lr_step, min_lr=parameters.min_lr) avg_acc_markov, users_acc_markov = markov(parameters, candidate) metrics = {'train_loss': [], 'valid_loss': [], 'accuracy': [], 'valid_acc': {}, 'lr': []} metrics['markov_acc'] = users_acc_markov print('users:{} markov:{} train:{} test:{}'.format(len(candidate), avg_acc_markov, len([ y for x in train_idx for y in train_idx[x]]), len([y for x in test_idx for y in test_idx[x]]))) show_per_epoch = 1 lr_last = lr = np.float32(parameters.lr) checkpoint_path = "training/cp-{epoch:04d}.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) temp_model_path = "training/" + parameters.model_mode + "/tp-{epoch:04d}" # continue to train the model # model.load_weights(temp_model_path.format(epoch=47)).expect_partial() model.compile( optimizer = Adam( learning_rate=parameters.lr, clipnorm=parameters.clip ), run_eagerly=True, loss = tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[ tf.keras.losses.SparseCategoricalCrossentropy(), tf.keras.metrics.SparseCategoricalAccuracy(), ]) for epoch in range(parameters.epoch): if args.pretrain == 0: if lr < lr_last: model.load_weights(temp_model_path.format(epoch=np.argmax(metrics['accuracy']))) model, history = train_model(model, data_train, train_idx, parameters.model_mode, reduce_lr, -1, parameters.use_geolife_data, Train=True) model.save_weights(temp_model_path.format(epoch=epoch)) # loss', 'sparse_categorical_crossentropy', 'sparse_categorical_accuracy', 'lr lr_last, lr = lr, (history['lr'][0]) if not (epoch % show_per_epoch): if not parameters.use_geolife_data and parameters.plot_user_traj != -1: train_model(model, user_trained, user_idx, parameters.model_mode, reduce_lr, parameters.plot_user_traj, parameters.use_geolife_data, Train=False) result = train_model(model, data_test, test_idx, parameters.model_mode, reduce_lr, -1, parameters.use_geolife_data, Train=False) print(result) metrics['lr'].append(lr) metrics['train_loss'].extend(history['sparse_categorical_crossentropy']) metrics['valid_loss'].append(result[0]) metrics['accuracy'].append(result[1]) model.save_weights('my_model/' + parameters.model_mode + '/final-{epoch:04d}'.format(epoch=np.argmax(metrics['accuracy']))) save_name = '_res' json.dump({'args': eval(str(argv)), 'metrics': eval(str(metrics))}, fp=open( args.save_path + parameters.model_mode + save_name + '.rs', 'w'), indent=4) metrics_view = {'train_loss': [], 'valid_loss': [], 'accuracy': []} for key in metrics_view: metrics_view[key] = metrics[key] json.dump({'args': eval(str(argv)), 'metrics': eval(str(metrics_view))}, fp=open( args.save_path + parameters.model_mode + save_name + '.txt', 'w'), indent=4) # model.save_weights(temp_model_path.format(epoch=np.argmax(metrics['accuracy']))) # for rt, dirs, files in os.walk(checkpoint_dir): # for name in files: # remove_path = os.path.join(rt, name) # os.remove(remove_path) # model.save_weights(temp_model_path.format(epoch=np.argmax(metrics['accuracy']))) def load_pretrained_model(args): pass if __name__ == '__main__': np.random.seed(1) parser = argparse.ArgumentParser() parser.add_argument('--loc_emb_size', type=int, default=100, help="location embeddings size") parser.add_argument('--uid_emb_size', type=int, default=40, help="user id embeddings size") parser.add_argument('--voc_emb_size', type=int, default=25, help="words embeddings size") parser.add_argument('--tim_emb_size', type=int, default=10, help="time embeddings size") parser.add_argument('--hidden_size', type=int, default=300) parser.add_argument('--dropout_p', type=float, default=0.6) parser.add_argument('--data_name', type=str, default='foursquare') parser.add_argument('--learning_rate', type=float, default=0.001) parser.add_argument('--lr_step', type=int, default=1) parser.add_argument('--lr_decay', type=float, default=0.1) parser.add_argument('--optim', type=str, default='Adam', choices=['Adam', 'SGD']) parser.add_argument('--L2', type=float, default=1 * 1e-5, help=" weight decay (L2 penalty)") parser.add_argument('--clip', type=float, default=2) parser.add_argument('--epoch_max', type=int, default=50) parser.add_argument('--history_mode', type=str, default='avg', choices=['max', 'avg', 'whole']) parser.add_argument('--rnn_type', type=str, default='LSTM', choices=['LSTM', 'GRU', 'RNN']) parser.add_argument('--attn_type', type=str, default='dot', choices=['general', 'concat', 'dot']) parser.add_argument('--data_path', type=str, default='../data/') parser.add_argument('--save_path', type=str, default='../results/') parser.add_argument('--model_mode', type=str, default='simple', choices=['simple', 'simple_long', 'attn_avg_long_user', 'attn_local_long']) parser.add_argument('--pretrain', type=int, default=0) parser.add_argument('--min-lr', type=float, default=1e-5) parser.add_argument('--plot_user_traj', type=int, default=4) parser.add_argument('--use_geolife_data', type=bool, default=False) args = parser.parse_args() if args.pretrain == 1: args = load_pretrained_model(args) ours_acc = run(args)
[ "train_tf.train_model", "model_tf.TrajPreLocalAttnLong", "train_tf.generate_input_history", "model_tf.TrajPreAttnAvgLongUser", "argparse.ArgumentParser", "tensorflow.keras.callbacks.ReduceLROnPlateau", "train_tf.markov", "numpy.random.seed", "random.sample", "tensorflow.keras.losses.SparseCategori...
[((946, 1588), 'train_tf.RnnParameterData', 'RnnParameterData', ([], {'loc_emb_size': 'args.loc_emb_size', 'uid_emb_size': 'args.uid_emb_size', 'voc_emb_size': 'args.voc_emb_size', 'tim_emb_size': 'args.tim_emb_size', 'hidden_size': 'args.hidden_size', 'dropout_p': 'args.dropout_p', 'data_name': 'args.data_name', 'lr': 'args.learning_rate', 'lr_step': 'args.lr_step', 'lr_decay': 'args.lr_decay', 'L2': 'args.L2', 'rnn_type': 'args.rnn_type', 'optim': 'args.optim', 'attn_type': 'args.attn_type', 'clip': 'args.clip', 'epoch_max': 'args.epoch_max', 'history_mode': 'args.history_mode', 'model_mode': 'args.model_mode', 'data_path': 'args.data_path', 'save_path': 'args.save_path', 'plot_user_traj': 'args.plot_user_traj', 'use_geolife_data': 'args.use_geolife_data'}), '(loc_emb_size=args.loc_emb_size, uid_emb_size=args.\n uid_emb_size, voc_emb_size=args.voc_emb_size, tim_emb_size=args.\n tim_emb_size, hidden_size=args.hidden_size, dropout_p=args.dropout_p,\n data_name=args.data_name, lr=args.learning_rate, lr_step=args.lr_step,\n lr_decay=args.lr_decay, L2=args.L2, rnn_type=args.rnn_type, optim=args.\n optim, attn_type=args.attn_type, clip=args.clip, epoch_max=args.\n epoch_max, history_mode=args.history_mode, model_mode=args.model_mode,\n data_path=args.data_path, save_path=args.save_path, plot_user_traj=args\n .plot_user_traj, use_geolife_data=args.use_geolife_data)\n', (962, 1588), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((6134, 6282), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""sparse_categorical_crossentropy"""', 'factor': 'parameters.lr_decay', 'patience': 'parameters.lr_step', 'min_lr': 'parameters.min_lr'}), "(monitor='sparse_categorical_crossentropy', factor=\n parameters.lr_decay, patience=parameters.lr_step, min_lr=parameters.min_lr)\n", (6151, 6282), False, 'from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau\n'), ((6354, 6383), 'train_tf.markov', 'markov', (['parameters', 'candidate'], {}), '(parameters, candidate)\n', (6360, 6383), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((6943, 6968), 'numpy.float32', 'np.float32', (['parameters.lr'], {}), '(parameters.lr)\n', (6953, 6968), True, 'import numpy as np\n'), ((7043, 7075), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7058, 7075), False, 'import os\n'), ((10006, 10023), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (10020, 10023), True, 'import numpy as np\n'), ((10038, 10063), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10061, 10063), False, 'import argparse\n'), ((2973, 3007), 'random.sample', 'random.sample', (['whole_candidate', '(42)'], {}), '(whole_candidate, 42)\n', (2986, 3007), False, 'import random\n'), ((3047, 3086), 'train_tf.preprocess_data', 'preprocess_data', (['data_np_raw', 'candidate'], {}), '(data_np_raw, candidate)\n', (3062, 3086), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((3119, 3169), 'train_tf.generate_geolife_data', 'generate_geolife_data', (['data_np', '"""train"""', 'candidate'], {}), "(data_np, 'train', candidate)\n", (3140, 3169), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((3199, 3248), 'train_tf.generate_geolife_data', 'generate_geolife_data', (['data_np', '"""test"""', 'candidate'], {}), "(data_np, 'test', candidate)\n", (3220, 3248), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((5406, 5442), 'model_tf.TrajPreSimple', 'TrajPreSimple', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (5419, 5442), False, 'from model_tf import TrajPreSimple, TrajPreAttnAvgLongUser, TrajPreLocalAttnLong\n'), ((2831, 2900), 'numpy.load', 'np.load', (["(parameters.data_path + '1000_0_05_00.npz')"], {'allow_pickle': '(True)'}), "(parameters.data_path + '1000_0_05_00.npz', allow_pickle=True)\n", (2838, 2900), True, 'import numpy as np\n'), ((3484, 3596), 'train_tf.generate_input_history', 'generate_input_history', (['parameters.data_neural', '"""train"""'], {'mode2': 'parameters.history_mode', 'candidate': 'candidate'}), "(parameters.data_neural, 'train', mode2=parameters.\n history_mode, candidate=candidate)\n", (3506, 3596), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((3682, 3793), 'train_tf.generate_input_history', 'generate_input_history', (['parameters.data_neural', '"""test"""'], {'mode2': 'parameters.history_mode', 'candidate': 'candidate'}), "(parameters.data_neural, 'test', mode2=parameters.\n history_mode, candidate=candidate)\n", (3704, 3793), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((5515, 5560), 'model_tf.TrajPreAttnAvgLongUser', 'TrajPreAttnAvgLongUser', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (5537, 5560), False, 'from model_tf import TrajPreSimple, TrajPreAttnAvgLongUser, TrajPreLocalAttnLong\n'), ((5757, 5816), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'parameters.lr', 'clipnorm': 'parameters.clip'}), '(learning_rate=parameters.lr, clipnorm=parameters.clip)\n', (5761, 5816), False, 'from tensorflow.keras.optimizers import Adam, RMSprop\n'), ((5913, 5960), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (5958, 5960), True, 'import tensorflow as tf\n'), ((7307, 7366), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'parameters.lr', 'clipnorm': 'parameters.clip'}), '(learning_rate=parameters.lr, clipnorm=parameters.clip)\n', (7311, 7366), False, 'from tensorflow.keras.optimizers import Adam, RMSprop\n'), ((7463, 7510), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (7508, 7510), True, 'import tensorflow as tf\n'), ((7927, 8051), 'train_tf.train_model', 'train_model', (['model', 'data_train', 'train_idx', 'parameters.model_mode', 'reduce_lr', '(-1)', 'parameters.use_geolife_data'], {'Train': '(True)'}), '(model, data_train, train_idx, parameters.model_mode, reduce_lr,\n -1, parameters.use_geolife_data, Train=True)\n', (7938, 8051), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((3939, 4069), 'train_tf.generate_input_history', 'generate_input_history', (['parameters.data_neural', '"""train"""'], {'mode2': 'parameters.history_mode', 'candidate': '[parameters.plot_user_traj]'}), "(parameters.data_neural, 'train', mode2=parameters.\n history_mode, candidate=[parameters.plot_user_traj])\n", (3961, 4069), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((5630, 5673), 'model_tf.TrajPreLocalAttnLong', 'TrajPreLocalAttnLong', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (5650, 5673), False, 'from model_tf import TrajPreSimple, TrajPreAttnAvgLongUser, TrajPreLocalAttnLong\n'), ((6000, 6047), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (6045, 6047), True, 'import tensorflow as tf\n'), ((6065, 6109), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (6107, 6109), True, 'import tensorflow as tf\n'), ((7550, 7597), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (7595, 7597), True, 'import tensorflow as tf\n'), ((7615, 7659), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (7657, 7659), True, 'import tensorflow as tf\n'), ((8593, 8717), 'train_tf.train_model', 'train_model', (['model', 'data_test', 'test_idx', 'parameters.model_mode', 'reduce_lr', '(-1)', 'parameters.use_geolife_data'], {'Train': '(False)'}), '(model, data_test, test_idx, parameters.model_mode, reduce_lr, -\n 1, parameters.use_geolife_data, Train=False)\n', (8604, 8717), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((4251, 4338), 'train_tf.generate_input_long_history2', 'generate_input_long_history2', (['parameters.data_neural', '"""train"""'], {'candidate': 'candidate'}), "(parameters.data_neural, 'train', candidate=\n candidate)\n", (4279, 4338), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((4393, 4479), 'train_tf.generate_input_long_history2', 'generate_input_long_history2', (['parameters.data_neural', '"""test"""'], {'candidate': 'candidate'}), "(parameters.data_neural, 'test', candidate=\n candidate)\n", (4421, 4479), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((4817, 4903), 'train_tf.generate_input_long_history', 'generate_input_long_history', (['parameters.data_neural', '"""train"""'], {'candidate': 'candidate'}), "(parameters.data_neural, 'train', candidate=\n candidate)\n", (4844, 4903), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((4958, 5043), 'train_tf.generate_input_long_history', 'generate_input_long_history', (['parameters.data_neural', '"""test"""'], {'candidate': 'candidate'}), "(parameters.data_neural, 'test', candidate=candidate\n )\n", (4985, 5043), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((8422, 8571), 'train_tf.train_model', 'train_model', (['model', 'user_trained', 'user_idx', 'parameters.model_mode', 'reduce_lr', 'parameters.plot_user_traj', 'parameters.use_geolife_data'], {'Train': '(False)'}), '(model, user_trained, user_idx, parameters.model_mode, reduce_lr,\n parameters.plot_user_traj, parameters.use_geolife_data, Train=False)\n', (8433, 8571), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((9046, 9076), 'numpy.argmax', 'np.argmax', (["metrics['accuracy']"], {}), "(metrics['accuracy'])\n", (9055, 9076), True, 'import numpy as np\n'), ((4598, 4703), 'train_tf.generate_input_long_history2', 'generate_input_long_history2', (['parameters.data_neural', '"""train"""'], {'candidate': '[parameters.plot_user_traj]'}), "(parameters.data_neural, 'train', candidate=[\n parameters.plot_user_traj])\n", (4626, 4703), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((5162, 5266), 'train_tf.generate_input_long_history', 'generate_input_long_history', (['parameters.data_neural', '"""train"""'], {'candidate': '[parameters.plot_user_traj]'}), "(parameters.data_neural, 'train', candidate=[\n parameters.plot_user_traj])\n", (5189, 5266), False, 'from train_tf import train_model, RnnParameterData, generate_input_history, markov, generate_input_long_history, generate_input_long_history2, generate_geolife_data, preprocess_data\n'), ((7835, 7865), 'numpy.argmax', 'np.argmax', (["metrics['accuracy']"], {}), "(metrics['accuracy'])\n", (7844, 7865), True, 'import numpy as np\n')]
import os import numpy as np import json from detectron2.structures import BoxMode import itertools from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.data.datasets import register_coco_instances, load_coco_json from detectron2.data import detection_utils as utils import detectron2.data.transforms as T from detectron2.config import get_cfg import copy import torch import cv2 from skimage import io from detectron2.engine import DefaultTrainer from detectron2.data import build_detection_test_loader, build_detection_train_loader from detectron2.utils.visualizer import ColorMode, Visualizer from detectron2.evaluation import COCOEvaluator from detectron2.data import DatasetMapper from LossEvalHook import LossEvalHook from detectron2.modeling import build_model from detectron2.checkpoint import DetectionCheckpointer class CustomTrainer(DefaultTrainer): @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") return COCOEvaluator(dataset_name, cfg, True, output_folder) def build_hooks(self): hooks = super().build_hooks() hooks.insert(-1,LossEvalHook( self.cfg.TEST.EVAL_PERIOD, self.model, build_detection_test_loader( self.cfg, self.cfg.DATASETS.TEST[0], DatasetMapper(self.cfg,True) ) )) return hooks def custom_mapper(dataset_list): dataset_list = copy.deepcopy(dataset_list) # it will be modified by code below l = len(dataset_list) image = utils.read_image(dataset_list["file_name"], format=None) # transform_list = [ # T.Resize((800,800)) # # T.Resize((800,800)), # # T.RandomBrightness(0.8, 1.8), # # T.RandomContrast(0.6, 1.3), # # T.RandomSaturation(0.8, 1.4), # # T.RandomRotation(angle=[90, 90]), # # T.RandomLighting(0.7), # # T.RandomFlip(prob=0.4, horizontal=False, vertical=True), # ] # image, transforms = T.apply_transform_gens(transform_list, image) # dataset_list["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) dataset_list["image"] = torch.as_tensor(image.astype("float32")) annos = [ utils.transform_instance_annotations(obj, [], image.shape[:2]) for obj in dataset_list.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances(annos, image.shape[:2]) dataset_list["instances"] = utils.filter_empty_instances(instances) return dataset_list def create_cfg(weights_root, name_of_dataset_train, name_of_dataset_test): cfg = get_cfg() cfg.OUTPUT_DIR = weights_root cfg.DATASETS.TRAIN = (name_of_dataset_train,) if name_of_dataset_test == "": cfg.DATASETS.TEST = () else: cfg.DATASETS.TEST = (name_of_dataset_test,) cfg.TEST.EVAL_PERIOD = 25 # 50 cfg.MODEL.DEVICE = "cpu" # cpu or cuda # cfg.INPUT.RANDOM_FLIP = "horizontal" # cfg.DATALOADER.NUM_WORKERS = 8 # cfg.SOLVER.IMS_PER_BATCH = 1 # ?????????? cfg.MODEL.PIXEL_MEAN = [0.0] # len(PIXEL_MEAN) -> input_shape C:\Users\savchenko.bs\Desktop\new_placement\detectron2\detectron2\layers\shape_spec.py cfg.MODEL.PIXEL_STD = [1.0] cfg.DATALOADER.NUM_WORKERS = 0 # Total number of RoIs per training minibatch = # ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH # E.g., a common configuration is: 512 * 16 = 8192 cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 512 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # 512 cfg.SOLVER.IMS_PER_BATCH = 5 # 16 cfg.SOLVER.CHECKPOINT_PERIOD = 250 cfg.SOLVER.BASE_LR = 0.01 # 0.003 # 0.0025 cfg.SOLVER.MAX_ITER = 15000 # 2000 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 4 cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000 cfg.MODEL.RPN.PRE_NMS_TOPK_TEST = 10000 cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 10000 cfg.TEST.DETECTIONS_PER_IMAGE = 200 # cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[122.48, 158.97, 86.08, 71.7]] # # P3_C, KC_135, C_5, B_52 # cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [ # [1.5, 2.17, 2.28, 1.83] # ] # cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [ # [0.5, 1.0, 1.8, 2.17, 2.3] # ] # cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [ # [1.0, 1.8], [1.0, 2.3], [1.0, 2.17] # ] return cfg def write_cfg(cfg, full_cfg_path): with open(full_cfg_path, "w") as f: f.write(cfg.dump()) return full_cfg_path def write_weights_from_cfg(cfg, saving_dir, weights_name): cfg.OUTPUT_DIR = saving_dir model = build_model(cfg) checkpointer = DetectionCheckpointer(model, save_dir=saving_dir) checkpointer.save(weights_name) return saving_dir + "/" + weights_name def visualize_img(name_of_dataset, ind_img): res = DatasetCatalog.get(name_of_dataset) if ind_img > len(res): return if res[ind_img]["file_name"].lower().endswith(('.tiff', '.tif')): img = io.imread(res[ind_img]["file_name"]) else: img = cv2.imread(res[ind_img]["file_name"]) print(img.shape) if len(img.shape) == 2: img = np.stack((img,) * 3, axis=-1) visualizer = Visualizer( img[:, :, ::-1], metadata=MetadataCatalog.get(name_of_dataset), scale=0.8 ) vis = visualizer.draw_dataset_dict(res[ind_img]) cv2.imshow(f"Image #{ind_img}", vis.get_image()[:, :, ::-1]) cv2.waitKey(0) return def main(): # registration dataset name_of_train_dataset = "coco_Planes_detection_Train" name_of_test_dataset = "coco_Planes_detection_Test" imgs_root = "C:/Users/savchenko.bs/Desktop/new_placement/detectron2/Dataset_10k" train_path_annotation = "C:/Users/savchenko.bs/Desktop/new_placement/detectron2/Dataset_10k/Train_Data.json" test_path_annotation = "C:/Users/savchenko.bs/Desktop/new_placement/detectron2/weights/RDP_1000_1000/TIFF/Test_Data_REAL.json" # test_path_annotation = "C:/Users/savchenko.bs/Desktop/new_placement/detectron2/Dataset_10k/test_dataset_tiff/Train_Data.json" list_of_dicts_train = load_coco_json(train_path_annotation, imgs_root, dataset_name=name_of_train_dataset) DatasetCatalog.register(name_of_train_dataset, lambda: load_coco_json(train_path_annotation, imgs_root, name_of_train_dataset)) list_of_dicts_test = load_coco_json(test_path_annotation, imgs_root, dataset_name=name_of_test_dataset) DatasetCatalog.register(name_of_test_dataset, lambda: load_coco_json(test_path_annotation, imgs_root, name_of_test_dataset)) # visualize_img(name_of_test_dataset, 5) # visualize_img(name_of_test_dataset, 6) # planes_metadata = MetadataCatalog.get(name_of_train_dataset) # print(planes_metadata) # print(MetadataCatalog.get(name_of_test_dataset)) # custom_mapper(list_of_dicts_train) ###################### # config params function weights_root = "C:/Users/savchenko.bs/Desktop/new_placement/detectron2/weights/res_learning_08_06" cfg = create_cfg(weights_root, name_of_train_dataset, name_of_test_dataset) # write weights from specific config # name_model = write_weights_from_cfg(cfg, weights_root,"detectron2_model") ######################## # write config cfg_name = "detectron2_config.yaml" cfg_name = write_cfg(cfg, weights_root + "/" + cfg_name) ############## # open cfg from file cfg_from_file = get_cfg() cfg_from_file.merge_from_file(cfg_name) #################### trainer = CustomTrainer(cfg) trainer.resume_or_load(resume=True) trainer.train() if __name__ == "__main__": main()
[ "detectron2.data.detection_utils.filter_empty_instances", "detectron2.modeling.build_model", "detectron2.config.get_cfg", "cv2.imread", "os.path.join", "detectron2.data.DatasetMapper", "detectron2.data.detection_utils.annotations_to_instances", "detectron2.data.detection_utils.transform_instance_annot...
[((1570, 1597), 'copy.deepcopy', 'copy.deepcopy', (['dataset_list'], {}), '(dataset_list)\n', (1583, 1597), False, 'import copy\n'), ((1674, 1730), 'detectron2.data.detection_utils.read_image', 'utils.read_image', (["dataset_list['file_name']"], {'format': 'None'}), "(dataset_list['file_name'], format=None)\n", (1690, 1730), True, 'from detectron2.data import detection_utils as utils\n'), ((2537, 2591), 'detectron2.data.detection_utils.annotations_to_instances', 'utils.annotations_to_instances', (['annos', 'image.shape[:2]'], {}), '(annos, image.shape[:2])\n', (2567, 2591), True, 'from detectron2.data import detection_utils as utils\n'), ((2624, 2663), 'detectron2.data.detection_utils.filter_empty_instances', 'utils.filter_empty_instances', (['instances'], {}), '(instances)\n', (2652, 2663), True, 'from detectron2.data import detection_utils as utils\n'), ((2775, 2784), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2782, 2784), False, 'from detectron2.config import get_cfg\n'), ((4799, 4815), 'detectron2.modeling.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (4810, 4815), False, 'from detectron2.modeling import build_model\n'), ((4835, 4884), 'detectron2.checkpoint.DetectionCheckpointer', 'DetectionCheckpointer', (['model'], {'save_dir': 'saving_dir'}), '(model, save_dir=saving_dir)\n', (4856, 4884), False, 'from detectron2.checkpoint import DetectionCheckpointer\n'), ((5021, 5056), 'detectron2.data.DatasetCatalog.get', 'DatasetCatalog.get', (['name_of_dataset'], {}), '(name_of_dataset)\n', (5039, 5056), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((5634, 5648), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5645, 5648), False, 'import cv2\n'), ((6303, 6392), 'detectron2.data.datasets.load_coco_json', 'load_coco_json', (['train_path_annotation', 'imgs_root'], {'dataset_name': 'name_of_train_dataset'}), '(train_path_annotation, imgs_root, dataset_name=\n name_of_train_dataset)\n', (6317, 6392), False, 'from detectron2.data.datasets import register_coco_instances, load_coco_json\n'), ((6545, 6632), 'detectron2.data.datasets.load_coco_json', 'load_coco_json', (['test_path_annotation', 'imgs_root'], {'dataset_name': 'name_of_test_dataset'}), '(test_path_annotation, imgs_root, dataset_name=\n name_of_test_dataset)\n', (6559, 6632), False, 'from detectron2.data.datasets import register_coco_instances, load_coco_json\n'), ((7623, 7632), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (7630, 7632), False, 'from detectron2.config import get_cfg\n'), ((1094, 1147), 'detectron2.evaluation.COCOEvaluator', 'COCOEvaluator', (['dataset_name', 'cfg', '(True)', 'output_folder'], {}), '(dataset_name, cfg, True, output_folder)\n', (1107, 1147), False, 'from detectron2.evaluation import COCOEvaluator\n'), ((2359, 2421), 'detectron2.data.detection_utils.transform_instance_annotations', 'utils.transform_instance_annotations', (['obj', '[]', 'image.shape[:2]'], {}), '(obj, [], image.shape[:2])\n', (2395, 2421), True, 'from detectron2.data import detection_utils as utils\n'), ((5183, 5219), 'skimage.io.imread', 'io.imread', (["res[ind_img]['file_name']"], {}), "(res[ind_img]['file_name'])\n", (5192, 5219), False, 'from skimage import io\n'), ((5244, 5281), 'cv2.imread', 'cv2.imread', (["res[ind_img]['file_name']"], {}), "(res[ind_img]['file_name'])\n", (5254, 5281), False, 'import cv2\n'), ((5345, 5374), 'numpy.stack', 'np.stack', (['((img,) * 3)'], {'axis': '(-1)'}), '((img,) * 3, axis=-1)\n', (5353, 5374), True, 'import numpy as np\n'), ((1037, 1078), 'os.path.join', 'os.path.join', (['cfg.OUTPUT_DIR', '"""inference"""'], {}), "(cfg.OUTPUT_DIR, 'inference')\n", (1049, 1078), False, 'import os\n'), ((5446, 5482), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['name_of_dataset'], {}), '(name_of_dataset)\n', (5465, 5482), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((6447, 6518), 'detectron2.data.datasets.load_coco_json', 'load_coco_json', (['train_path_annotation', 'imgs_root', 'name_of_train_dataset'], {}), '(train_path_annotation, imgs_root, name_of_train_dataset)\n', (6461, 6518), False, 'from detectron2.data.datasets import register_coco_instances, load_coco_json\n'), ((6686, 6755), 'detectron2.data.datasets.load_coco_json', 'load_coco_json', (['test_path_annotation', 'imgs_root', 'name_of_test_dataset'], {}), '(test_path_annotation, imgs_root, name_of_test_dataset)\n', (6700, 6755), False, 'from detectron2.data.datasets import register_coco_instances, load_coco_json\n'), ((1441, 1470), 'detectron2.data.DatasetMapper', 'DatasetMapper', (['self.cfg', '(True)'], {}), '(self.cfg, True)\n', (1454, 1470), False, 'from detectron2.data import DatasetMapper\n')]
from tkinter import * from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk) from matplotlib.backend_bases import key_press_handler import numpy as np #commandline import argparse #visualization import matplotlib.pyplot as plt #font import matplotlib.font_manager as fm # import pyglet # pyglet.font.add_file('AponaLohit.ttf') #plugin architecture import importlib #euclideandistance from sklearn.neighbors import DistanceMetric dist = DistanceMetric.get_metric('euclidean') PLUGIN_NAME = "plugins.core_3" CLUSTER_PLUGIN = "plugins.core2_cluster" SECOND_WIND = "plugins.core_4" plugin_module = importlib.import_module(PLUGIN_NAME, '.') plugin_module_2 = importlib.import_module(CLUSTER_PLUGIN, '.') plugin_module_3 = importlib.import_module(SECOND_WIND, '.') #SECONDARY WINDOW STARTS here def second_window(tuplex,tupley,labels,sent_dic,text_input): plugin3 = plugin_module_3.Plugin() font_name = fm.FontProperties(fname='kalpurush.ttf') def getboundary(event): # print(slider.get()) boundary = slider.get() # therestx, theresty, thecoordinates = boundary_word_plot(dictionary, distance_list, boundary) therestx, theresty, thecoordinates = plugin3.boundary_word_plot(dictionary, distance_list, boundary) # sentences = source_of_words(thecoordinates, sent_dic) sentences = plugin3.source_of_words(thecoordinates, sent_dic) axes1.clear() axes1.scatter(therestx,theresty, cmap='Paired') word_canvas.draw() def window_click(event): # print('you pressed', event.button, event.xdata, event.ydata) xpos, ypos = event.xdata, event.ydata for key in sent_dic: for values in sent_dic[key]: if abs(xpos - values[0]) < 5 and abs(ypos - values[1]) < 5: text_show = plt.text(event.xdata, event.ydata, key, fontsize=5, fontproperties=font_name) word_canvas.draw() # mapx,mapy = word_plot(labels, tuplex, tupley, text_input.get()) mapx, mapy = plugin3.word_plot(labels, tuplex, tupley, text_input.get()) # dictionary, distance_list = distance_x_y(labels, tuplex, tupley, text_input.get()) dictionary, distance_list = plugin3.distance_x_y(labels, tuplex, tupley, text_input.get()) maximumdistance = max(distance_list) window = Toplevel() window.minsize(width=1080, height=900) fig, axes1 = plt.subplots() axes1.scatter(mapx, mapy, cmap='Paired', label=text_input.get()) axes1.legend(prop=font_name,title="Given word: ", borderpad=0.5 ) word_canvas = FigureCanvasTkAgg(fig, window) # word_canvas.bind("<Button-1>", window_click) word_canvas.mpl_connect('button_press_event', window_click) plot_widget = word_canvas.get_tk_widget() plot_widget.pack(side = TOP, fill = BOTH, expand = True) toolbar = NavigationToolbar2Tk(word_canvas, window) toolbar.update() toolbar.pack() help = Label(window, text="Slide to set boundary",font=("Helvetica", 16)) help.pack() slider = Scale(window,from_=0, to=maximumdistance, orient=HORIZONTAL,command=getboundary) slider.pack(fill = BOTH) # print(var.get()) word_canvas.draw() #SENCONDARY WINDOW ENDS HERE class Root(Tk): def __init__(self): super(Root,self).__init__() self.title("BERTENDER") self.minsize(800,600) self.main_exec() def main_exec(self): plugin = plugin_module.Plugin(args_dict) plugin2 = plugin_module_2.Plugin() # print(plugin_module) label, values, sent_dic = plugin.initial() def on_click(event): # print('you pressed', event.button, event.xdata, event.ydata) self.check = 5 axes = plt.gca() left, right = axes.get_xlim() if(left< 0 and right<=0): if(abs(left)-abs(right)) < 500 and (abs(left)-abs(right)) > 200: self.check = 4 elif(abs(left)-abs(right)) < 200 and (abs(left)-abs(right)) > 100: self.check = 2 elif(abs(left)-abs(right)) < 100: self.check = 1 elif(left >= 0 and right >0): if(abs(left)-abs(right)) < 500 and (abs(left)-abs(right)) > 200: self.check = 4 elif(abs(left)-abs(right)) < 200 and (abs(left)-abs(right)) > 100: self.check = 2 elif(abs(left)-abs(right)) < 100: self.check = 1 elif(left < 0 and right >=0): # if(300 < (abs(left)- right) < 400): # self.check = 4 if(200 < (abs(left)-right) < 300): self.check = 3 elif(100 < (abs(left)-right) < 200): self.check = 2 elif((abs(left)-right) < 100): self.check = 0.5 xpos, ypos = event.xdata, event.ydata for key in sent_dic: for values in sent_dic[key]: if abs(xpos - values[0]) < 5 and abs(ypos - values[1]) < 5: # print(key) self.text_show = plt.text(event.xdata, event.ydata, key, fontsize=5, fontproperties=prop) canvas.draw() # key_press_handler(event, canvas, toolbar) def off_click(event): self.text_show.remove() canvas.draw() # mouse click event ends here """plotting starts here""" prop = fm.FontProperties(fname='kalpurush.ttf') x = [] y = [] for token in values: for temp in token: x.append(temp[0]) y.append(temp[1]) flat_list = [item for sublist in values for item in sublist] np_flat_list = np.array(flat_list) f, axes = plt.subplots(nrows = 2, ncols=1) y_pred = plugin2.initial(np_flat_list,label) axes[0].scatter(np_flat_list[:, 0], np_flat_list[:, 1],c=y_pred, cmap='Paired') for i in range(len(label)): p = axes[1].scatter(np_flat_list[:, 0], np_flat_list[:, 1],c=y_pred, cmap='Paired') plt.annotate(label[i], xy=(x[i], y[i]), xytext=(0, 0), textcoords='offset points', ha='right', fontsize=19, fontproperties=prop) """UI work in tkinter and integration of tkinter with matplotlib""" canvas = FigureCanvasTkAgg(f, self) f.canvas.mpl_connect('button_press_event', on_click) f.canvas.mpl_connect('button_release_event', off_click) canvas.draw() toolbar = NavigationToolbar2Tk(canvas, self) toolbar.update() toolbar.pack() canvas.get_tk_widget().pack(side = BOTTOM, fill = BOTH, expand = True) text_input = Entry(self) text_input.pack(side = LEFT) input_button=Button(self, height=1, width=10, text="Find", command=lambda: second_window(x,y,label,sent_dic,text_input)) input_button.pack(side = LEFT) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("text",type=str,help="corpus name") parser.add_argument("--start_layer",type=int,default=8,help="starting layer number to plot") parser.add_argument("--end_layer",type=int,default=12,help="ending layer number to plot") parser.add_argument("--perplexity",type=int,default=3,help="number of nearest neighbour") args = parser.parse_args() args_dict = vars(args) # print(args) root = Root() root.mainloop()
[ "matplotlib.pyplot.text", "matplotlib.backends.backend_tkagg.NavigationToolbar2Tk", "importlib.import_module", "argparse.ArgumentParser", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.gca", "numpy.array", "matplotlib.pyplot.annotate", "sklearn.neighbors.DistanceMetric.get_metric", "...
[((469, 507), 'sklearn.neighbors.DistanceMetric.get_metric', 'DistanceMetric.get_metric', (['"""euclidean"""'], {}), "('euclidean')\n", (494, 507), False, 'from sklearn.neighbors import DistanceMetric\n'), ((628, 669), 'importlib.import_module', 'importlib.import_module', (['PLUGIN_NAME', '"""."""'], {}), "(PLUGIN_NAME, '.')\n", (651, 669), False, 'import importlib\n'), ((688, 732), 'importlib.import_module', 'importlib.import_module', (['CLUSTER_PLUGIN', '"""."""'], {}), "(CLUSTER_PLUGIN, '.')\n", (711, 732), False, 'import importlib\n'), ((751, 792), 'importlib.import_module', 'importlib.import_module', (['SECOND_WIND', '"""."""'], {}), "(SECOND_WIND, '.')\n", (774, 792), False, 'import importlib\n'), ((940, 980), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'fname': '"""kalpurush.ttf"""'}), "(fname='kalpurush.ttf')\n", (957, 980), True, 'import matplotlib.font_manager as fm\n'), ((2421, 2435), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2433, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2595, 2625), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig', 'window'], {}), '(fig, window)\n', (2612, 2625), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((2862, 2903), 'matplotlib.backends.backend_tkagg.NavigationToolbar2Tk', 'NavigationToolbar2Tk', (['word_canvas', 'window'], {}), '(word_canvas, window)\n', (2882, 2903), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((7228, 7253), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7251, 7253), False, 'import argparse\n'), ((5565, 5605), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'fname': '"""kalpurush.ttf"""'}), "(fname='kalpurush.ttf')\n", (5582, 5605), True, 'import matplotlib.font_manager as fm\n'), ((5859, 5878), 'numpy.array', 'np.array', (['flat_list'], {}), '(flat_list)\n', (5867, 5878), True, 'import numpy as np\n'), ((5898, 5928), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)'}), '(nrows=2, ncols=1)\n', (5910, 5928), True, 'import matplotlib.pyplot as plt\n'), ((6592, 6618), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['f', 'self'], {}), '(f, self)\n', (6609, 6618), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((6784, 6818), 'matplotlib.backends.backend_tkagg.NavigationToolbar2Tk', 'NavigationToolbar2Tk', (['canvas', 'self'], {}), '(canvas, self)\n', (6804, 6818), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((3764, 3773), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3771, 3773), True, 'import matplotlib.pyplot as plt\n'), ((6220, 6353), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label[i]'], {'xy': '(x[i], y[i])', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""', 'ha': '"""right"""', 'fontsize': '(19)', 'fontproperties': 'prop'}), "(label[i], xy=(x[i], y[i]), xytext=(0, 0), textcoords=\n 'offset points', ha='right', fontsize=19, fontproperties=prop)\n", (6232, 6353), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1923), 'matplotlib.pyplot.text', 'plt.text', (['event.xdata', 'event.ydata', 'key'], {'fontsize': '(5)', 'fontproperties': 'font_name'}), '(event.xdata, event.ydata, key, fontsize=5, fontproperties=font_name)\n', (1854, 1923), True, 'import matplotlib.pyplot as plt\n'), ((5211, 5283), 'matplotlib.pyplot.text', 'plt.text', (['event.xdata', 'event.ydata', 'key'], {'fontsize': '(5)', 'fontproperties': 'prop'}), '(event.xdata, event.ydata, key, fontsize=5, fontproperties=prop)\n', (5219, 5283), True, 'import matplotlib.pyplot as plt\n')]
import numpy as np a = np.arange(10) * 10 print(a) # [ 0 10 20 30 40 50 60 70 80 90] print(a[5]) # 50 print(a[8]) # 80 print(a[[5, 8]]) # [50 80] print(a[[5, 4, 8, 0]]) # [50 40 80 0] print(a[[5, 5, 5, 5]]) # [50 50 50 50] idx = np.array([[5, 4], [8, 0]]) print(idx) # [[5 4] # [8 0]] print(a[idx]) # [[50 40] # [80 0]] # print(a[[[5, 4], [8, 0]]]) # IndexError: too many indices for array print(a[[[[5, 4], [8, 0]]]]) # [[50 40] # [80 0]] a_2d = np.arange(12).reshape((3, 4)) print(a_2d) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] print(a_2d[0]) # [0 1 2 3] print(a_2d[2]) # [ 8 9 10 11] print(a_2d[[2, 0]]) # [[ 8 9 10 11] # [ 0 1 2 3]] print(a_2d[[2, 2, 2]]) # [[ 8 9 10 11] # [ 8 9 10 11] # [ 8 9 10 11]] print(a_2d[:, 1]) # [1 5 9] print(a_2d[:, 3]) # [ 3 7 11] print(a_2d[:, 1:2]) # [[1] # [5] # [9]] print(a_2d[:, [3, 1]]) # [[ 3 1] # [ 7 5] # [11 9]] print(a_2d[:, [3, 3, 3]]) # [[ 3 3 3] # [ 7 7 7] # [11 11 11]] print(a_2d[0, 1]) # 1 print(a_2d[2, 3]) # 11 print(a_2d[[0, 2], [1, 3]]) # [ 1 11] # index # [[0, 1] [2, 3]] # print(a_2d[[0, 2, 1], [1, 3]]) # IndexError: shape mismatch: indexing arrays could not be broadcast together with shapes (3,) (2,) print(a_2d[[[0, 0], [2, 2]], [[1, 3], [1, 3]]]) # [[ 1 3] # [ 9 11]] # index # [[0, 1] [0, 3] # [2, 1] [2, 3]] print(a_2d[[[0], [2]], [1, 3]]) # [[ 1 3] # [ 9 11]] idxs = np.ix_([0, 2], [1, 3]) print(idxs) # (array([[0], # [2]]), array([[1, 3]])) print(type(idxs)) # <class 'tuple'> print(type(idxs[0])) # <class 'numpy.ndarray'> print(idxs[0]) # [[0] # [2]] print(idxs[1]) # [[1 3]] print(a_2d[np.ix_([0, 2], [1, 3])]) # [[ 1 3] # [ 9 11]] print(a_2d[np.ix_([2, 0], [3, 3, 3])]) # [[11 11 11] # [ 3 3 3]] print(a_2d[[0, 2]][:, [1, 3]]) # [[ 1 3] # [ 9 11]] a_2d = np.arange(12).reshape((3, 4)) print(a_2d) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_2d[np.ix_([0, 2], [1, 3])] = 100 print(a_2d) # [[ 0 100 2 100] # [ 4 5 6 7] # [ 8 100 10 100]] a_2d[np.ix_([0, 2], [1, 3])] = [100, 200] print(a_2d) # [[ 0 100 2 200] # [ 4 5 6 7] # [ 8 100 10 200]] a_2d[np.ix_([0, 2], [1, 3])] = [[100, 200], [300, 400]] print(a_2d) # [[ 0 100 2 200] # [ 4 5 6 7] # [ 8 300 10 400]] print(a_2d[[0, 2]][:, [1, 3]]) # [[100 200] # [300 400]] a_2d[[0, 2]][:, [1, 3]] = 0 print(a_2d) # [[ 0 100 2 200] # [ 4 5 6 7] # [ 8 300 10 400]] a_2d = np.arange(12).reshape((3, 4)) print(a_2d) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_2d[[2, 0]] = [[100, 200, 300, 400], [500, 600, 700, 800]] print(a_2d) # [[500 600 700 800] # [ 4 5 6 7] # [100 200 300 400]] a_2d[[2, 2]] = [[-1, -2, -3, -4], [-5, -6, -7, -8]] print(a_2d) # [[500 600 700 800] # [ 4 5 6 7] # [ -5 -6 -7 -8]] a_2d = np.arange(12).reshape((3, 4)) print(a_2d) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_fancy = a_2d[np.ix_([0, 2], [1, 3])] print(a_fancy) # [[ 1 3] # [ 9 11]] a_fancy[0, 0] = 100 print(a_fancy) # [[100 3] # [ 9 11]] print(a_2d) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_2d = np.arange(12).reshape((3, 4)) print(a_2d) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] print(a_2d[[2, 0], ::-1]) # [[11 10 9 8] # [ 3 2 1 0]] print(a_2d[::2, [3, 0, 1]]) # [[ 3 0 1] # [11 8 9]]
[ "numpy.array", "numpy.ix_", "numpy.arange" ]
[((237, 263), 'numpy.array', 'np.array', (['[[5, 4], [8, 0]]'], {}), '([[5, 4], [8, 0]])\n', (245, 263), True, 'import numpy as np\n'), ((1410, 1432), 'numpy.ix_', 'np.ix_', (['[0, 2]', '[1, 3]'], {}), '([0, 2], [1, 3])\n', (1416, 1432), True, 'import numpy as np\n'), ((24, 37), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (33, 37), True, 'import numpy as np\n'), ((1927, 1949), 'numpy.ix_', 'np.ix_', (['[0, 2]', '[1, 3]'], {}), '([0, 2], [1, 3])\n', (1933, 1949), True, 'import numpy as np\n'), ((2039, 2061), 'numpy.ix_', 'np.ix_', (['[0, 2]', '[1, 3]'], {}), '([0, 2], [1, 3])\n', (2045, 2061), True, 'import numpy as np\n'), ((2158, 2180), 'numpy.ix_', 'np.ix_', (['[0, 2]', '[1, 3]'], {}), '([0, 2], [1, 3])\n', (2164, 2180), True, 'import numpy as np\n'), ((2935, 2957), 'numpy.ix_', 'np.ix_', (['[0, 2]', '[1, 3]'], {}), '([0, 2], [1, 3])\n', (2941, 2957), True, 'import numpy as np\n'), ((464, 477), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (473, 477), True, 'import numpy as np\n'), ((1647, 1669), 'numpy.ix_', 'np.ix_', (['[0, 2]', '[1, 3]'], {}), '([0, 2], [1, 3])\n', (1653, 1669), True, 'import numpy as np\n'), ((1707, 1732), 'numpy.ix_', 'np.ix_', (['[2, 0]', '[3, 3, 3]'], {}), '([2, 0], [3, 3, 3])\n', (1713, 1732), True, 'import numpy as np\n'), ((1827, 1840), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (1836, 1840), True, 'import numpy as np\n'), ((2457, 2470), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2466, 2470), True, 'import numpy as np\n'), ((2825, 2838), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2834, 2838), True, 'import numpy as np\n'), ((3133, 3146), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (3142, 3146), True, 'import numpy as np\n')]
import os import numpy as np from PIL import Image from eratosthenes.generic.mapping_io import read_geo_image from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment rgi_id = 'RGI60-01.19773' # Red Glacier bbox = (4353, 5279, 9427, 10980) # 1000 m buffer f2018 = "T05VMG_20180913T214531_B0" f2020 = "T05VMG_20200912T214531_B0" # read data I_18_b,_,_,_ = read_geo_image(os.path.join('header', f2018+'2.jp2')) I_18_g,_,_,_ = read_geo_image(os.path.join('header', f2018+'3.jp2')) I_18_r,_,_,_ = read_geo_image(os.path.join('header', f2018+'4.jp2')) I_20_b,_,_,_ = read_geo_image(os.path.join('header', f2020+'2.jp2')) I_20_g,_,_,_ = read_geo_image(os.path.join('header', f2020+'3.jp2')) I_20_r,_,_,_ = read_geo_image(os.path.join('header', f2020+'4.jp2')) # make sub-set I_18_r = I_18_r[bbox[0]:bbox[1],bbox[2]:bbox[3]] I_18_g = I_18_g[bbox[0]:bbox[1],bbox[2]:bbox[3]] I_18_b = I_18_b[bbox[0]:bbox[1],bbox[2]:bbox[3]] I_20_r = I_20_r[bbox[0]:bbox[1],bbox[2]:bbox[3]] I_20_g = I_20_g[bbox[0]:bbox[1],bbox[2]:bbox[3]] I_20_b = I_20_b[bbox[0]:bbox[1],bbox[2]:bbox[3]] # I_18_r = mat_to_gray(I_18_r) I_18_g = mat_to_gray(I_18_g) I_18_b = mat_to_gray(I_18_b) I_20_r = mat_to_gray(I_20_r) I_20_g = mat_to_gray(I_20_g) I_20_b = mat_to_gray(I_20_b) I_18_rgb = np.dstack((I_18_r, I_18_g, I_18_b)) I_18_rgb = np.uint8(255*I_18_rgb) I_18_rgb_l = log_adjustment(I_18_rgb) img = Image.fromarray(I_18_rgb_l) img.save("Red-sen2-13-09-2018.jpg", quality=95) I_20_rgb = np.dstack((I_20_r, I_20_g, I_20_b)) I_20_rgb = np.uint8(255*I_20_rgb) I_20_rgb_l = log_adjustment(I_20_rgb) img = Image.fromarray(I_20_rgb_l) img.save("Red-sen2-12-09-2020.jpg", quality=95)
[ "numpy.uint8", "numpy.dstack", "PIL.Image.fromarray", "os.path.join", "eratosthenes.preprocessing.shadow_transforms.log_adjustment", "eratosthenes.preprocessing.shadow_transforms.mat_to_gray" ]
[((1160, 1179), 'eratosthenes.preprocessing.shadow_transforms.mat_to_gray', 'mat_to_gray', (['I_18_r'], {}), '(I_18_r)\n', (1171, 1179), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1189, 1208), 'eratosthenes.preprocessing.shadow_transforms.mat_to_gray', 'mat_to_gray', (['I_18_g'], {}), '(I_18_g)\n', (1200, 1208), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1218, 1237), 'eratosthenes.preprocessing.shadow_transforms.mat_to_gray', 'mat_to_gray', (['I_18_b'], {}), '(I_18_b)\n', (1229, 1237), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1248, 1267), 'eratosthenes.preprocessing.shadow_transforms.mat_to_gray', 'mat_to_gray', (['I_20_r'], {}), '(I_20_r)\n', (1259, 1267), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1277, 1296), 'eratosthenes.preprocessing.shadow_transforms.mat_to_gray', 'mat_to_gray', (['I_20_g'], {}), '(I_20_g)\n', (1288, 1296), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1306, 1325), 'eratosthenes.preprocessing.shadow_transforms.mat_to_gray', 'mat_to_gray', (['I_20_b'], {}), '(I_20_b)\n', (1317, 1325), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1338, 1373), 'numpy.dstack', 'np.dstack', (['(I_18_r, I_18_g, I_18_b)'], {}), '((I_18_r, I_18_g, I_18_b))\n', (1347, 1373), True, 'import numpy as np\n'), ((1385, 1409), 'numpy.uint8', 'np.uint8', (['(255 * I_18_rgb)'], {}), '(255 * I_18_rgb)\n', (1393, 1409), True, 'import numpy as np\n'), ((1422, 1446), 'eratosthenes.preprocessing.shadow_transforms.log_adjustment', 'log_adjustment', (['I_18_rgb'], {}), '(I_18_rgb)\n', (1436, 1446), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1454, 1481), 'PIL.Image.fromarray', 'Image.fromarray', (['I_18_rgb_l'], {}), '(I_18_rgb_l)\n', (1469, 1481), False, 'from PIL import Image\n'), ((1542, 1577), 'numpy.dstack', 'np.dstack', (['(I_20_r, I_20_g, I_20_b)'], {}), '((I_20_r, I_20_g, I_20_b))\n', (1551, 1577), True, 'import numpy as np\n'), ((1589, 1613), 'numpy.uint8', 'np.uint8', (['(255 * I_20_rgb)'], {}), '(255 * I_20_rgb)\n', (1597, 1613), True, 'import numpy as np\n'), ((1626, 1650), 'eratosthenes.preprocessing.shadow_transforms.log_adjustment', 'log_adjustment', (['I_20_rgb'], {}), '(I_20_rgb)\n', (1640, 1650), False, 'from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment\n'), ((1658, 1685), 'PIL.Image.fromarray', 'Image.fromarray', (['I_20_rgb_l'], {}), '(I_20_rgb_l)\n', (1673, 1685), False, 'from PIL import Image\n'), ((421, 460), 'os.path.join', 'os.path.join', (['"""header"""', "(f2018 + '2.jp2')"], {}), "('header', f2018 + '2.jp2')\n", (433, 460), False, 'import os\n'), ((490, 529), 'os.path.join', 'os.path.join', (['"""header"""', "(f2018 + '3.jp2')"], {}), "('header', f2018 + '3.jp2')\n", (502, 529), False, 'import os\n'), ((559, 598), 'os.path.join', 'os.path.join', (['"""header"""', "(f2018 + '4.jp2')"], {}), "('header', f2018 + '4.jp2')\n", (571, 598), False, 'import os\n'), ((629, 668), 'os.path.join', 'os.path.join', (['"""header"""', "(f2020 + '2.jp2')"], {}), "('header', f2020 + '2.jp2')\n", (641, 668), False, 'import os\n'), ((698, 737), 'os.path.join', 'os.path.join', (['"""header"""', "(f2020 + '3.jp2')"], {}), "('header', f2020 + '3.jp2')\n", (710, 737), False, 'import os\n'), ((767, 806), 'os.path.join', 'os.path.join', (['"""header"""', "(f2020 + '4.jp2')"], {}), "('header', f2020 + '4.jp2')\n", (779, 806), False, 'import os\n')]
# program to extract data from FITS files into files to # represent a box of the stellar atmosphere import numpy as np import os.path import os import sys import time import glob from astropy.io import fits from tqdm import tqdm import importlib import phys importlib.reload(phys) def getdata(f): hdulist = fits.open(f) hdu = hdulist[0] a = hdu.data return a def swap_axes(a): a = np.swapaxes(a, 0, 2) a = np.swapaxes(a, 1, 2) return a def get_abund(f): awh = np.loadtxt(f) a = np.zeros(30) a[0] = awh[0] a[1] = 1.0 - sum(awh) a[2 : len(a)] = awh[1 : len(awh)] return a """ dz = height resolution: 1.25 km for F3V 10 km for G2V 6 km for K0V 4 km for M0V 3.2 km for M2V for the width and length, dx=dy, the resolution is 58.5938 km for F3V, 17.5781 km for G2V, 11.7188 km for K0V, 4.8828 km for M0V, 3.0469 km for M2V. apn - added points number (number of points added when extending an extracted atmospheric structure) """ num = str(int(np.loadtxt('snapshot.inp'))) dims = np.loadtxt('dims.inp', comments = '!') dz = dims[1, 0] Nz = int(dims[0, 0]) apn = Nz - 324 apm = phys.average_particle_mass(get_abund('abund.inp')) T = getdata('eosT.' + num + '.fits') p = getdata('eosP.' + num + '.fits') d = getdata('result_0.' + num + '.fits') T = swap_axes(T) p = swap_axes(p) d = swap_axes(d) top = len(T[0, 0, :]) - 1 T_top = T[:, :, top] p_top = p[:, :, top] h1d = -np.arange(1, apn + 1) * dz h = np.broadcast_to(h1d, T_top.shape + h1d.shape) T_add = np.broadcast_to(T_top[...,None], T_top.shape + (apn,)) p_top = np.broadcast_to(p_top[...,None], p_top.shape + (apn,)) H = phys.boltz * T_add / apm / phys.grav_sun p_add = p_top * np.exp(h / H) d_add = apm * p_add / phys.boltz / T_add T = np.concatenate((T, T_add), axis = 2) p = np.concatenate((p, p_add), axis = 2) d = np.concatenate((d, d_add), axis = 2) T = T.flatten().astype(np.float32) p = p.flatten().astype(np.float32) d = d.flatten().astype(np.float32) T.tofile('eosT.' + num + '.bin') p.tofile('eosP.' + num + '.bin') d.tofile('result_0.' + num + '.bin')
[ "numpy.swapaxes", "numpy.exp", "numpy.zeros", "numpy.concatenate", "importlib.reload", "astropy.io.fits.open", "numpy.loadtxt", "numpy.broadcast_to", "numpy.arange" ]
[((262, 284), 'importlib.reload', 'importlib.reload', (['phys'], {}), '(phys)\n', (278, 284), False, 'import importlib\n'), ((1051, 1087), 'numpy.loadtxt', 'np.loadtxt', (['"""dims.inp"""'], {'comments': '"""!"""'}), "('dims.inp', comments='!')\n", (1061, 1087), True, 'import numpy as np\n'), ((1488, 1533), 'numpy.broadcast_to', 'np.broadcast_to', (['h1d', '(T_top.shape + h1d.shape)'], {}), '(h1d, T_top.shape + h1d.shape)\n', (1503, 1533), True, 'import numpy as np\n'), ((1543, 1598), 'numpy.broadcast_to', 'np.broadcast_to', (['T_top[..., None]', '(T_top.shape + (apn,))'], {}), '(T_top[..., None], T_top.shape + (apn,))\n', (1558, 1598), True, 'import numpy as np\n'), ((1606, 1661), 'numpy.broadcast_to', 'np.broadcast_to', (['p_top[..., None]', '(p_top.shape + (apn,))'], {}), '(p_top[..., None], p_top.shape + (apn,))\n', (1621, 1661), True, 'import numpy as np\n'), ((1785, 1819), 'numpy.concatenate', 'np.concatenate', (['(T, T_add)'], {'axis': '(2)'}), '((T, T_add), axis=2)\n', (1799, 1819), True, 'import numpy as np\n'), ((1826, 1860), 'numpy.concatenate', 'np.concatenate', (['(p, p_add)'], {'axis': '(2)'}), '((p, p_add), axis=2)\n', (1840, 1860), True, 'import numpy as np\n'), ((1867, 1901), 'numpy.concatenate', 'np.concatenate', (['(d, d_add)'], {'axis': '(2)'}), '((d, d_add), axis=2)\n', (1881, 1901), True, 'import numpy as np\n'), ((317, 329), 'astropy.io.fits.open', 'fits.open', (['f'], {}), '(f)\n', (326, 329), False, 'from astropy.io import fits\n'), ((412, 432), 'numpy.swapaxes', 'np.swapaxes', (['a', '(0)', '(2)'], {}), '(a, 0, 2)\n', (423, 432), True, 'import numpy as np\n'), ((441, 461), 'numpy.swapaxes', 'np.swapaxes', (['a', '(1)', '(2)'], {}), '(a, 1, 2)\n', (452, 461), True, 'import numpy as np\n'), ((506, 519), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (516, 519), True, 'import numpy as np\n'), ((529, 541), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (537, 541), True, 'import numpy as np\n'), ((1724, 1737), 'numpy.exp', 'np.exp', (['(h / H)'], {}), '(h / H)\n', (1730, 1737), True, 'import numpy as np\n'), ((1014, 1040), 'numpy.loadtxt', 'np.loadtxt', (['"""snapshot.inp"""'], {}), "('snapshot.inp')\n", (1024, 1040), True, 'import numpy as np\n'), ((1456, 1477), 'numpy.arange', 'np.arange', (['(1)', '(apn + 1)'], {}), '(1, apn + 1)\n', (1465, 1477), True, 'import numpy as np\n')]
from __future__ import print_function, division from argparse import ArgumentParser import yaml import logging import os import sys import time from subprocess import call from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class from marmot.experiment.preprocessing_utils import tags_from_contexts, contexts_to_features, flatten, fit_binarizers, binarize from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number from marmot.experiment.learning_utils import map_classifiers, predict_all from marmot.evaluation.evaluation_utils import compare_vocabulary from marmot.util.persist_features import persist_features from marmot.util.generate_crf_template import generate_crf_template logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('experiment_logger') def label_test(flat_labels, new_test_name, text_file, method_name): tag_map = {0: 'BAD', 1: 'OK'} new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w') new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w') start_idx = 0 for s_idx, txt in enumerate(open(text_file)): words = txt[:-1].decode('utf-8').strip().split() tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))] new_test_plain.write('%s\n' % ' '.join(tag_seq)) for t_idx, (tag, word) in enumerate(zip(tag_seq, words)): new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag)) # write both hypothesis and reference def label_test_hyp_ref(flat_labels, flat_true_labels, new_test_name, text_file): tag_map = {0: 'BAD', 1: 'OK'} new_test = open(new_test_name, 'w') new_test_plain = open(new_test_name+'.plain', 'w') start = 0 for s_idx, txt in enumerate(open(text_file)): words = txt[:-1].decode('utf-8').strip().split() tag_seq = [tag_map[flat_labels[i]] for i in range(start, start+len(words))] true_tag_seq = [tag_map[flat_true_labels[i]] for i in range(start, start+len(words))] new_test_plain.write('%s\n' % ' '.join(tag_seq)) start += len(words) for t_idx, (tag, true_tag, word) in enumerate(zip(tag_seq, true_tag_seq, words)): new_test.write('%d\t%d\t%s\t%s\t%s\n' % (s_idx, t_idx, word.encode('utf-8'), true_tag, tag)) # check that everything in a data_obj matches: # - all source and target sentences exist # - alignments don't hit out of bounds # - target tokens really exist and are in their places def main(config, stamp): # the data_type is the format corresponding to the model of the data that the user wishes to learn data_type = config['data_type'] if 'data_type' in config else (config['contexts'] if 'contexts' in config else 'plain') bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic' logger.info("data_type -- {}, bad_tagging -- {}".format(data_type, bad_tagging)) # time_stamp = str(time.time()) time_stamp = stamp workers = config['workers'] tmp_dir = config['tmp_dir'] # one generator train_data_generator = build_object(config['datasets']['training'][0]) train_data = train_data_generator.generate() # test test_data_generator = build_object(config['datasets']['test'][0]) test_data = test_data_generator.generate() logger.info("Train data keys: {}".format(train_data.keys())) logger.info("Train data sequences: {}".format(len(train_data['target']))) logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]])) # additional representations if 'representations' in config: representation_generators = build_objects(config['representations']) else: representation_generators = [] for r in representation_generators: train_data = r.generate(train_data) test_data = r.generate(test_data) borders = config['borders'] if 'borders' in config else False logger.info('here are the keys in your representations: {}'.format(train_data.keys())) bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic' # test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging) test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging, tags_format=config['tags_format']) print("Objects in the train data: {}".format(len(train_data['target']))) print("UNAMBIGUOUS: ", config['unambiguous']) # train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous']) train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=config['tags_format']) #print("Train contexts: {}".format(len(train_contexts))) #print("1st context:", train_contexts[0]) # the list of context objects' 'target' field lengths # to restore the word-level tags from the phrase-level #test_context_correspondence = get_contexts_words_number(test_contexts) if data_type == 'sequential': test_context_correspondence = flatten([get_contexts_words_number(cont) for cont in test_contexts]) #print(test_context_correspondence) for idx, cont in enumerate(test_contexts): get_cont = get_contexts_words_number(cont) count_cont = [len(c['token']) for c in cont] assert(all([get_cont[i] == count_cont[i] for i in range(len(cont))])), "Sum doesn't match at line {}:\n{}\n{}".format(idx, ' '.join([str(c) for c in get_cont]), ' '.join([str(c) for c in count_cont])) assert(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont])), "Sums don't match: {} and {}".format(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont])) else: test_context_correspondence = get_contexts_words_number(test_contexts) assert(sum(test_context_correspondence) == sum([len(c['token']) for c in test_contexts])), "Sums don't match: {} and {}".format(sum(test_context_correspondence), sum([len(c['token']) for c in test_contexts])) # print("Token lengths:", sum([len(c['token']) for c in test_contexts])) # assert(sum(test_context_correspondence) == 9613), "GOLAKTEKO OPASNOSTE!!!, {}".format(sum(test_context_correspondence)) # sys.exit() # if data_type == 'sequential': # test_context_correspondence = flatten(test_context_correspondence) logger.info('Vocabulary comparison -- coverage for each dataset: ') logger.info(compare_vocabulary([train_data['target'], test_data['target']])) # END REPRESENTATION GENERATION # FEATURE EXTRACTION train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type) test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type) test_tags_true = test_data['tags'] tag_idx = 0 seg_idx = 0 # test_context_correspondence_seq = [get_contexts_words_number(cont) for cont in test_contexts] # for idx, (tag_seq, phr_seq) in enumerate(zip(test_data['tags'], test_context_correspondence_seq)): # assert(len(tag_seq) == sum(phr_seq)),"Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq])) # tag_idx = 0 # for d in phr_seq: # first_tag = tag_seq[tag_idx] # assert(all([t == first_tag for t in tag_seq[tag_idx:tag_idx+d]])), "Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq])) # try: # indicator = [t == first_tag for t in test_data['tags'][seg_idx][tag_idx:tag_idx+d]] # assert(all(indicator)) # tags_cnt += d # if tags_cnt == len(test_data['tags'][seg_idx]): # tags_cnt = 0 # seg_idx += 1 # elif tags_cnt > len(test_data['tags'][seg_idx]): # raise # except: # print("No correspondence in line {}, tag {}: \n{}\n{}".format(seg_idx, tag_idx, ' '.join(test_data['tags'][seg_idx]), d)) # sys.exit() #assert(sum(test_context_correspondence) == len(flatten(test_data['tags']))), "Sums don't match for phrase contexts and test data object: {} and {}".format(sum(test_context_correspondence), len(flatten(test_data['tags']))) # flat_cont = flatten(test_contexts) # flat_tags = flatten(test_data['tags']) # for ii in range(len(flat_cont)): if data_type == 'plain': assert(len(test_context_correspondence) == len(test_tags)), "Lengths don't match for phrase contexts and test tags: {} and {}".format(len(test_context_correspondence), len(test_tags)) # test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential') logger.info('creating feature extractors...') feature_extractors = build_objects(config['feature_extractors']) logger.info('mapping the feature extractors over the contexts for test...') test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type) logger.info('mapping the feature extractors over the contexts for train...') train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type) logger.info('number of training instances: {}'.format(len(train_features))) logger.info('number of testing instances: {}'.format(len(test_features))) logger.info('All of your features now exist in their raw representation, but they may not be numbers yet') # END FEATURE EXTRACTION from sklearn.metrics import f1_score, precision_score, recall_score from sklearn.cross_validation import permutation_test_score import numpy as np tag_map = {u'OK': 1, u'BAD': 0} if data_type == 'sequential': # TODO: save features for CRFSuite, call it logger.info('training sequential model...') experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}] feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()] print("FEATURE NAMES: ", feature_names) persist_dir = tmp_dir logger.info('persisting your features to: {}'.format(persist_dir)) # for each dataset, write a file and persist the features if 'persist_format' not in config: config['persist_format'] = 'crf_suite' for dataset_obj in experiment_datasets: persist_features(dataset_obj['name']+time_stamp, dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=config['persist_format']) feature_num = len(train_features[0][0]) train_file = os.path.join(tmp_dir, 'train'+time_stamp+'.crf') test_file = os.path.join(tmp_dir, 'test'+time_stamp+'.crf') if config['persist_format'] == 'crf++': # generate a template for CRF++ feature extractor generate_crf_template(feature_num, 'template', tmp_dir) # train a CRF++ model call(['crf_learn', os.path.join(tmp_dir, 'template'), train_file, os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp)]) # tag a test set call(['crf_test', '-m', os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp), '-o', test_file+'.tagged', test_file]) elif config['persist_format'] == 'crf_suite': crfsuite_algorithm = config['crfsuite_algorithm'] if 'crfsuite_algorithm' in config else 'arow' call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), train_file]) test_out = open(test_file+'.tagged', 'w') call(['crfsuite', 'tag', '-tr', '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), test_file], stdout=test_out) test_out.close() else: print("Unknown persist format: {}".format(config['persist_format'])) sys.exit() sequential_true = [[]] sequential_predictions = [[]] flat_true = [] flat_predictions = [] for line in open(test_file+'.tagged'): # end of tagging, statistics reported if line.startswith('Performance'): break if line == '\n': sequential_predictions.append([]) continue chunks = line[:-1].decode('utf-8').split() flat_true.append(chunks[-2]) sequential_true[-1].append(chunks[-2]) flat_predictions.append(chunks[-1]) sequential_predictions[-1].append(chunks[-1]) # restoring the word-level tags test_predictions_word, test_tags_word = [], [] for idx, n in enumerate(test_context_correspondence): for i in range(n): test_predictions_word.append(flat_predictions[idx]) test_tags_word.append(flat_true[idx]) print(f1_score(test_predictions_word, test_tags_word, average=None)) print(f1_score(test_predictions_word, test_tags_word, average='weighted', pos_label=None)) print("Precision: {}, recall: {}".format(precision_score(test_predictions_word, test_tags_word, average=None), recall_score(test_predictions_word, test_tags_word, average=None))) else: train_tags = [tag_map[tag] for tag in train_tags] #print(test_tags) test_tags = [tag_map[tag] for tag in test_tags] #print(test_tags) #sys.exit() # data_type is 'token' or 'plain' logger.info('start training...') classifier_type = import_class(config['learning']['classifier']['module']) # train the classifier(s) classifier_map = map_classifiers(train_features, train_tags, classifier_type, data_type=data_type) logger.info('classifying the test instances') test_predictions = predict_all(test_features, classifier_map, data_type=data_type) # assert(len(test_predictions) == len(flatten(test_tags_seq))), "long predictions: {}, sequential: {}".format(len(test_predictions), len(flatten(test_tags_seq))) cnt = 0 test_predictions_seq = [] test_tags_seq_num = [] tag_map = {'OK': 1, 'BAD': 0, 1: 1, 0: 0} long_test = True if 'multiply_data_test' in config and (config['multiply_data_test'] == 'ngrams' or config['multiply_data_test'] == '1ton') else False # restoring the word-level tags test_predictions_word, test_tags_word = [], [] logger.info("Test predictions lenght: {}".format(len(test_predictions))) for idx, n in enumerate(test_context_correspondence): for i in range(n): test_predictions_word.append(test_predictions[idx]) test_tags_word.append(test_tags[idx]) test_tags_true_flat = flatten(test_tags_true) test_tags_true_flat = [tag_map[t] for t in test_tags_true_flat] # print(f1_score(test_tags_word, test_predictions_word, average=None)) # print(f1_score(test_tags_word, test_predictions_word, average='weighted', pos_label=None)) print(f1_score(test_tags_true_flat, test_predictions_word, average=None)) print(f1_score(test_tags_true_flat, test_predictions_word, average='weighted', pos_label=None)) print("Precision: {}, recall: {}".format(precision_score(test_tags_true_flat, test_predictions_word, average=None), recall_score(test_tags_true_flat, test_predictions_word, average=None))) # TODO: remove the hard coding of the tags here bad_count = sum(1 for t in test_tags if t == u'BAD' or t == 0) good_count = sum(1 for t in test_tags if t == u'OK' or t == 1) total = len(test_tags) assert (total == bad_count+good_count), 'tag counts should be correct' percent_good = good_count / total logger.info('percent good in test set: {}'.format(percent_good)) logger.info('percent bad in test set: {}'.format(1 - percent_good)) random_class_results = [] random_weighted_results = [] for i in range(20): random_tags_phrase = list(np.random.choice([1, 0], total, [percent_good, 1-percent_good])) random_tags = [] for idx, n in enumerate(test_context_correspondence): for i in range(n): random_tags.append(random_tags_phrase[idx]) # random_tags = [u'GOOD' for i in range(total)] random_class_f1 = f1_score(test_tags_true_flat, random_tags, average=None) random_class_results.append(random_class_f1) logger.info('two class f1 random score ({}): {}'.format(i, random_class_f1)) # random_average_f1 = f1_score(random_tags, test_tags, average='weighted') random_average_f1 = f1_score(test_tags_true_flat, random_tags, average='weighted', pos_label=None) random_weighted_results.append(random_average_f1) # logger.info('average f1 random score ({}): {}'.format(i, random_average_f1)) avg_random_class = np.average(random_class_results, axis=0) avg_weighted = np.average(random_weighted_results) logger.info('two class f1 random average score: {}'.format(avg_random_class)) logger.info('weighted f1 random average score: {}'.format(avg_weighted)) # print("Cross-validation:") # print(permutation_test_score()) # logger.info("Sequence correlation: ") # print(sequence_correlation_weighted(test_tags_seq_num, test_predictions_seq, verbose=True)[1]) label_test_hyp_ref(test_predictions_word, test_tags_true_flat, os.path.join(tmp_dir, config['output_name']), config["output_test"]) # label_test(test_predictions, '/export/data/varvara/marmot/marmot/experiment/final_submissions/baseline', '/export/data/varvara/corpora/wmt15_corrected/test.target', 'BASELINE') if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).") parser.add_argument("--data_type", help="data type - sequential or plain") parser.add_argument("--bad_tagging", help="tagging -- optimistic, pessimistic or super-pessimistic") parser.add_argument("--unambiguous", default=0, help="make the tagging unambiguous -- no segmentation for spans of BAD tag (values - 0 or 1, default 0)") parser.add_argument("--output_name", default="output", help="file to store the test set tagging") args = parser.parse_args() experiment_config = {} # Experiment hyperparams cfg_path = args.configuration_file # read configuration file with open(cfg_path, "r") as cfg_file: experiment_config = yaml.load(cfg_file.read()) if args.data_type is not None: experiment_config['data_type'] = args.data_type if args.bad_tagging is not None: experiment_config['bad_tagging'] = args.bad_tagging experiment_config['unambiguous'] = True if int(args.unambiguous) == 1 else False experiment_config['output_name'] = args.output_name stamp = os.path.basename(cfg_path).replace('config', '').replace('.yaml', '') + '_' + experiment_config['bad_tagging'] + '_' + experiment_config['data_type'] if experiment_config['unambiguous']: stamp += '_un' main(experiment_config, stamp)
[ "logging.getLogger", "sklearn.metrics.precision_score", "marmot.experiment.import_utils.build_object", "sklearn.metrics.recall_score", "marmot.experiment.context_utils.create_contexts_ngram", "sys.exit", "argparse.ArgumentParser", "marmot.experiment.learning_utils.map_classifiers", "marmot.experimen...
[((768, 863), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (787, 863), False, 'import logging\n'), ((869, 907), 'logging.getLogger', 'logging.getLogger', (['"""experiment_logger"""'], {}), "('experiment_logger')\n", (886, 907), False, 'import logging\n'), ((3205, 3252), 'marmot.experiment.import_utils.build_object', 'build_object', (["config['datasets']['training'][0]"], {}), "(config['datasets']['training'][0])\n", (3217, 3252), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((3340, 3383), 'marmot.experiment.import_utils.build_object', 'build_object', (["config['datasets']['test'][0]"], {}), "(config['datasets']['test'][0])\n", (3352, 3383), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((4373, 4501), 'marmot.experiment.context_utils.create_contexts_ngram', 'create_contexts_ngram', (['test_data'], {'data_type': 'data_type', 'test': '(True)', 'bad_tagging': 'bad_tagging', 'tags_format': "config['tags_format']"}), "(test_data, data_type=data_type, test=True,\n bad_tagging=bad_tagging, tags_format=config['tags_format'])\n", (4394, 4501), False, 'from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number\n'), ((4784, 4943), 'marmot.experiment.context_utils.create_contexts_ngram', 'create_contexts_ngram', (['train_data'], {'data_type': 'data_type', 'bad_tagging': 'bad_tagging', 'unambiguous': "config['unambiguous']", 'tags_format': "config['tags_format']"}), "(train_data, data_type=data_type, bad_tagging=\n bad_tagging, unambiguous=config['unambiguous'], tags_format=config[\n 'tags_format'])\n", (4805, 4943), False, 'from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number\n'), ((6919, 6997), 'marmot.experiment.import_utils.call_for_each_element', 'call_for_each_element', (['train_contexts', 'tags_from_contexts'], {'data_type': 'data_type'}), '(train_contexts, tags_from_contexts, data_type=data_type)\n', (6940, 6997), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((7014, 7091), 'marmot.experiment.import_utils.call_for_each_element', 'call_for_each_element', (['test_contexts', 'tags_from_contexts'], {'data_type': 'data_type'}), '(test_contexts, tags_from_contexts, data_type=data_type)\n', (7035, 7091), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((9133, 9176), 'marmot.experiment.import_utils.build_objects', 'build_objects', (["config['feature_extractors']"], {}), "(config['feature_extractors'])\n", (9146, 9176), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((9277, 9392), 'marmot.experiment.import_utils.call_for_each_element', 'call_for_each_element', (['test_contexts', 'contexts_to_features', '[feature_extractors, workers]'], {'data_type': 'data_type'}), '(test_contexts, contexts_to_features, [\n feature_extractors, workers], data_type=data_type)\n', (9298, 9392), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((9490, 9606), 'marmot.experiment.import_utils.call_for_each_element', 'call_for_each_element', (['train_contexts', 'contexts_to_features', '[feature_extractors, workers]'], {'data_type': 'data_type'}), '(train_contexts, contexts_to_features, [\n feature_extractors, workers], data_type=data_type)\n', (9511, 9606), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((18348, 18364), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (18362, 18364), False, 'from argparse import ArgumentParser\n'), ((3781, 3821), 'marmot.experiment.import_utils.build_objects', 'build_objects', (["config['representations']"], {}), "(config['representations'])\n", (3794, 3821), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((6098, 6138), 'marmot.experiment.context_utils.get_contexts_words_number', 'get_contexts_words_number', (['test_contexts'], {}), '(test_contexts)\n', (6123, 6138), False, 'from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number\n'), ((6773, 6836), 'marmot.evaluation.evaluation_utils.compare_vocabulary', 'compare_vocabulary', (["[train_data['target'], test_data['target']]"], {}), "([train_data['target'], test_data['target']])\n", (6791, 6836), False, 'from marmot.evaluation.evaluation_utils import compare_vocabulary\n'), ((11126, 11178), 'os.path.join', 'os.path.join', (['tmp_dir', "('train' + time_stamp + '.crf')"], {}), "(tmp_dir, 'train' + time_stamp + '.crf')\n", (11138, 11178), False, 'import os\n'), ((11195, 11246), 'os.path.join', 'os.path.join', (['tmp_dir', "('test' + time_stamp + '.crf')"], {}), "(tmp_dir, 'test' + time_stamp + '.crf')\n", (11207, 11246), False, 'import os\n'), ((12372, 12382), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12380, 12382), False, 'import sys\n'), ((14010, 14066), 'marmot.experiment.import_utils.import_class', 'import_class', (["config['learning']['classifier']['module']"], {}), "(config['learning']['classifier']['module'])\n", (14022, 14066), False, 'from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class\n'), ((14126, 14212), 'marmot.experiment.learning_utils.map_classifiers', 'map_classifiers', (['train_features', 'train_tags', 'classifier_type'], {'data_type': 'data_type'}), '(train_features, train_tags, classifier_type, data_type=\n data_type)\n', (14141, 14212), False, 'from marmot.experiment.learning_utils import map_classifiers, predict_all\n'), ((14289, 14352), 'marmot.experiment.learning_utils.predict_all', 'predict_all', (['test_features', 'classifier_map'], {'data_type': 'data_type'}), '(test_features, classifier_map, data_type=data_type)\n', (14300, 14352), False, 'from marmot.experiment.learning_utils import map_classifiers, predict_all\n'), ((15235, 15258), 'marmot.experiment.preprocessing_utils.flatten', 'flatten', (['test_tags_true'], {}), '(test_tags_true)\n', (15242, 15258), False, 'from marmot.experiment.preprocessing_utils import tags_from_contexts, contexts_to_features, flatten, fit_binarizers, binarize\n'), ((17481, 17521), 'numpy.average', 'np.average', (['random_class_results'], {'axis': '(0)'}), '(random_class_results, axis=0)\n', (17491, 17521), True, 'import numpy as np\n'), ((17545, 17580), 'numpy.average', 'np.average', (['random_weighted_results'], {}), '(random_weighted_results)\n', (17555, 17580), True, 'import numpy as np\n'), ((5494, 5525), 'marmot.experiment.context_utils.get_contexts_words_number', 'get_contexts_words_number', (['cont'], {}), '(cont)\n', (5519, 5525), False, 'from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number\n'), ((10876, 11065), 'marmot.util.persist_features.persist_features', 'persist_features', (["(dataset_obj['name'] + time_stamp)", "dataset_obj['features']", 'persist_dir'], {'feature_names': 'feature_names', 'tags': "dataset_obj['tags']", 'file_format': "config['persist_format']"}), "(dataset_obj['name'] + time_stamp, dataset_obj['features'],\n persist_dir, feature_names=feature_names, tags=dataset_obj['tags'],\n file_format=config['persist_format'])\n", (10892, 11065), False, 'from marmot.util.persist_features import persist_features\n'), ((11366, 11421), 'marmot.util.generate_crf_template.generate_crf_template', 'generate_crf_template', (['feature_num', '"""template"""', 'tmp_dir'], {}), "(feature_num, 'template', tmp_dir)\n", (11387, 11421), False, 'from marmot.util.generate_crf_template import generate_crf_template\n'), ((13355, 13416), 'sklearn.metrics.f1_score', 'f1_score', (['test_predictions_word', 'test_tags_word'], {'average': 'None'}), '(test_predictions_word, test_tags_word, average=None)\n', (13363, 13416), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((13432, 13519), 'sklearn.metrics.f1_score', 'f1_score', (['test_predictions_word', 'test_tags_word'], {'average': '"""weighted"""', 'pos_label': 'None'}), "(test_predictions_word, test_tags_word, average='weighted',\n pos_label=None)\n", (13440, 13519), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((15523, 15589), 'sklearn.metrics.f1_score', 'f1_score', (['test_tags_true_flat', 'test_predictions_word'], {'average': 'None'}), '(test_tags_true_flat, test_predictions_word, average=None)\n', (15531, 15589), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((15605, 15697), 'sklearn.metrics.f1_score', 'f1_score', (['test_tags_true_flat', 'test_predictions_word'], {'average': '"""weighted"""', 'pos_label': 'None'}), "(test_tags_true_flat, test_predictions_word, average='weighted',\n pos_label=None)\n", (15613, 15697), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((16887, 16943), 'sklearn.metrics.f1_score', 'f1_score', (['test_tags_true_flat', 'random_tags'], {'average': 'None'}), '(test_tags_true_flat, random_tags, average=None)\n', (16895, 16943), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((17209, 17287), 'sklearn.metrics.f1_score', 'f1_score', (['test_tags_true_flat', 'random_tags'], {'average': '"""weighted"""', 'pos_label': 'None'}), "(test_tags_true_flat, random_tags, average='weighted', pos_label=None)\n", (17217, 17287), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((18050, 18094), 'os.path.join', 'os.path.join', (['tmp_dir', "config['output_name']"], {}), "(tmp_dir, config['output_name'])\n", (18062, 18094), False, 'import os\n'), ((5316, 5347), 'marmot.experiment.context_utils.get_contexts_words_number', 'get_contexts_words_number', (['cont'], {}), '(cont)\n', (5341, 5347), False, 'from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number\n'), ((13566, 13634), 'sklearn.metrics.precision_score', 'precision_score', (['test_predictions_word', 'test_tags_word'], {'average': 'None'}), '(test_predictions_word, test_tags_word, average=None)\n', (13581, 13634), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((13636, 13701), 'sklearn.metrics.recall_score', 'recall_score', (['test_predictions_word', 'test_tags_word'], {'average': 'None'}), '(test_predictions_word, test_tags_word, average=None)\n', (13648, 13701), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((15744, 15817), 'sklearn.metrics.precision_score', 'precision_score', (['test_tags_true_flat', 'test_predictions_word'], {'average': 'None'}), '(test_tags_true_flat, test_predictions_word, average=None)\n', (15759, 15817), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((15819, 15889), 'sklearn.metrics.recall_score', 'recall_score', (['test_tags_true_flat', 'test_predictions_word'], {'average': 'None'}), '(test_tags_true_flat, test_predictions_word, average=None)\n', (15831, 15889), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((16538, 16603), 'numpy.random.choice', 'np.random.choice', (['[1, 0]', 'total', '[percent_good, 1 - percent_good]'], {}), '([1, 0], total, [percent_good, 1 - percent_good])\n', (16554, 16603), True, 'import numpy as np\n'), ((11487, 11520), 'os.path.join', 'os.path.join', (['tmp_dir', '"""template"""'], {}), "(tmp_dir, 'template')\n", (11499, 11520), False, 'import os\n'), ((11534, 11588), 'os.path.join', 'os.path.join', (['tmp_dir', "('crfpp_model_file' + time_stamp)"], {}), "(tmp_dir, 'crfpp_model_file' + time_stamp)\n", (11546, 11588), False, 'import os\n'), ((11654, 11708), 'os.path.join', 'os.path.join', (['tmp_dir', "('crfpp_model_file' + time_stamp)"], {}), "(tmp_dir, 'crfpp_model_file' + time_stamp)\n", (11666, 11708), False, 'import os\n'), ((11980, 12037), 'os.path.join', 'os.path.join', (['tmp_dir', "('crfsuite_model_file' + time_stamp)"], {}), "(tmp_dir, 'crfsuite_model_file' + time_stamp)\n", (11992, 12037), False, 'import os\n'), ((12154, 12211), 'os.path.join', 'os.path.join', (['tmp_dir', "('crfsuite_model_file' + time_stamp)"], {}), "(tmp_dir, 'crfsuite_model_file' + time_stamp)\n", (12166, 12211), False, 'import os\n'), ((19516, 19542), 'os.path.basename', 'os.path.basename', (['cfg_path'], {}), '(cfg_path)\n', (19532, 19542), False, 'import os\n')]
import math import random import numpy from dataclasses import dataclass @dataclass class NetworkParams: """ Parameters to tweak the network """ A: int B: int C: int D: int sigma: float alpha: int = 50 class MatcherNetwork: """ The mathcer network """ SIZE = 10 def __init__(self, ranks: numpy.ndarray, params: NetworkParams): """ :param ranks: the rankings females gave to males :param params: params to control network behaviour """ self.ranks = ranks self.params = params self.neurons = numpy.random.rand(self.SIZE, self.SIZE) def _in_to_out(self, u: float) -> float: """ transforms neuron input potential to neuron output potential :param u: input :return: next value """ return (1 / 2) * (1 + numpy.tanh(self.params.alpha * u)) def _input_potential(self, i: int, j: int) -> float: """ calculate neuron input potential based on index """ row_sum = numpy.sum(self.neurons, axis=1) col_sum = numpy.sum(self.neurons, axis=0) all_sum = numpy.sum(row_sum) cell_value = self.neurons[i][j] result = ( -(self.params.A * (row_sum[i] - cell_value)) - (self.params.B * (col_sum[j] - cell_value)) - (self.params.C * (all_sum - (self.SIZE + self.params.sigma))) - (self.params.D * self.ranks[i][j]) ) return result def _step(self, i: int, j: int) -> bool: """ advances a neuron at a given index :return: True if the value changed, False otherwise """ next_value = self._in_to_out(self._input_potential(i, j)) if self.neurons[i][j] != next_value: self.neurons[i][j] = next_value return True return False def step(self) -> bool: """ makes a single step on a random neuron in the network :return: True if the value changed, False otherwise """ i = math.floor(random.random() * self.SIZE) j = math.floor(random.random() * self.SIZE) return self._step(i, j) def epoch(self) -> bool: """ makes a step for all the neurons in the network, in an ordered fashion :return: True if any neuron value changed, False otherwise """ changed = False for i in range(self.SIZE): for j in range(self.SIZE): changed |= self._step(i, j) return changed def random_epoch(self) -> bool: """ makes a step for all the neurons in the network, in a random fashion :return: True if any neuron value changed, False otherwise """ changed = False rows = list(range(self.SIZE)) cols = list(range(self.SIZE)) random.shuffle(rows) random.shuffle(cols) for i in rows: for j in cols: changed |= self._step(i, j) return changed
[ "random.shuffle", "numpy.random.rand", "numpy.tanh", "numpy.sum", "random.random" ]
[((609, 648), 'numpy.random.rand', 'numpy.random.rand', (['self.SIZE', 'self.SIZE'], {}), '(self.SIZE, self.SIZE)\n', (626, 648), False, 'import numpy\n'), ((1061, 1092), 'numpy.sum', 'numpy.sum', (['self.neurons'], {'axis': '(1)'}), '(self.neurons, axis=1)\n', (1070, 1092), False, 'import numpy\n'), ((1111, 1142), 'numpy.sum', 'numpy.sum', (['self.neurons'], {'axis': '(0)'}), '(self.neurons, axis=0)\n', (1120, 1142), False, 'import numpy\n'), ((1161, 1179), 'numpy.sum', 'numpy.sum', (['row_sum'], {}), '(row_sum)\n', (1170, 1179), False, 'import numpy\n'), ((2876, 2896), 'random.shuffle', 'random.shuffle', (['rows'], {}), '(rows)\n', (2890, 2896), False, 'import random\n'), ((2905, 2925), 'random.shuffle', 'random.shuffle', (['cols'], {}), '(cols)\n', (2919, 2925), False, 'import random\n'), ((870, 903), 'numpy.tanh', 'numpy.tanh', (['(self.params.alpha * u)'], {}), '(self.params.alpha * u)\n', (880, 903), False, 'import numpy\n'), ((2084, 2099), 'random.random', 'random.random', ([], {}), '()\n', (2097, 2099), False, 'import random\n'), ((2136, 2151), 'random.random', 'random.random', ([], {}), '()\n', (2149, 2151), False, 'import random\n')]
""" Test the infrastructure for building a state by projection onto the +1 eigenspace of a set of generators or stabilizers """ import numpy as np from referenceqvm.stabilizer_utils import (compute_action, project_stabilized_state, binary_stabilizer_to_pauli_stabilizer, pauli_stabilizer_to_binary_stabilizer) from referenceqvm.tests.test_stabilizer_qvm import (five_qubit_code_generators, bell_stabilizer) from pyquil.paulis import sX, sZ, sY, sI, PauliSum import pytest def test_compute_action_type_checks(): """ Make sure type checks are consistent and working """ with pytest.raises(TypeError): compute_action([0, 0, 0, 0, 0], PauliSum([sX(0)]), 5) with pytest.raises(TypeError): compute_action([0, 0, 0, 0, 0], sX(0), 4) with pytest.raises(TypeError): compute_action(3, 'a', 4) with pytest.raises(TypeError): compute_action(-3, sX(0), 4) with pytest.raises(TypeError): compute_action('0001', sX(0), 4) def test_stabilizer_to_matrix_conversion(): # bitflip code stabilizer_matrix = pauli_stabilizer_to_binary_stabilizer(bell_stabilizer) true_stabilizer_matrix = np.array([[0, 0, 1, 1, 0], [1, 1, 0, 0, 0]]) assert np.allclose(true_stabilizer_matrix, stabilizer_matrix) test_stabilizer_list = binary_stabilizer_to_pauli_stabilizer(true_stabilizer_matrix) for idx, term in enumerate(test_stabilizer_list): assert term == bell_stabilizer[idx] # given some codes convert them to code matrices stabilizer_matrix = pauli_stabilizer_to_binary_stabilizer(five_qubit_code_generators) true_stabilizer_matrix = np.array([[1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0]]) assert np.allclose(true_stabilizer_matrix, stabilizer_matrix) test_stabilizer_list = binary_stabilizer_to_pauli_stabilizer(true_stabilizer_matrix) for idx, term in enumerate(test_stabilizer_list): assert term == five_qubit_code_generators[idx] def test_compute_action_identity(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(4): pauli_term = sI(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python assert new_basis_state == comp_basis_state assert np.isclose(coeff, 1) def test_compute_action_X(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(4): pauli_term = sX(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) comp_basis_state = [1, 1, 1, 1] for ii in range(4): pauli_term = sX(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] ^ 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) def test_compute_action_XX(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(3): pauli_term = sX(ii) * sX(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii + 1] = 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) comp_basis_state = [1, 1, 1, 1] for ii in range(3): pauli_term = sX(ii) * sX(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] ^ 1 true_basis_state[ii + 1] = true_basis_state[ii + 1] ^ 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) def test_compute_action_Y(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(4): pauli_term = sY(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] ^ 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, 1j) comp_basis_state = [1, 1, 1, 1] for ii in range(4): pauli_term = sY(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] ^ 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, -1j) def test_compute_action_YY(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(3): pauli_term = sY(ii) * sY(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii + 1] = 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, -1) comp_basis_state = [1, 1, 1, 1] for ii in range(3): pauli_term = sY(ii) * sY(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] ^ 1 true_basis_state[ii + 1] = true_basis_state[ii + 1] ^ 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, -1) def test_compute_action_Z(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(4): pauli_term = sZ(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) comp_basis_state = [1, 1, 1, 1] for ii in range(4): pauli_term = sZ(ii) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] = true_basis_state[ii] assert new_basis_state == true_basis_state assert np.isclose(coeff, -1) def test_compute_action_ZZ(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(3): pauli_term = sZ(ii) * sZ(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) comp_basis_state = [1, 1, 1, 1] for ii in range(3): pauli_term = sZ(ii) * sZ(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() assert new_basis_state == true_basis_state assert np.isclose(coeff, 1) def test_compute_action_XY(): """ Action of Pauli operators on state """ comp_basis_state = [0, 0, 0, 0] for ii in range(3): pauli_term = sX(ii) * sY(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] ^= 1 true_basis_state[ii + 1] ^= 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, 1j) comp_basis_state = [1, 1, 1, 1] for ii in range(3): pauli_term = sX(ii) * sY(ii + 1) new_basis_state, coeff = compute_action(comp_basis_state, pauli_term, len(comp_basis_state)) # abuse of comparisons in python true_basis_state = comp_basis_state.copy() true_basis_state[ii] ^= 1 true_basis_state[ii + 1] ^= 1 assert new_basis_state == true_basis_state assert np.isclose(coeff, -1j) def test_stabilizer_projection_Z(): """ test if we project out the correct state """ stabilizer_state = project_stabilized_state([sZ(0)]) true_state = np.zeros((2, 1)) true_state[0, 0] = 1 assert np.allclose(true_state, stabilizer_state.todense()) def test_stabilizer_projection_ZZ(): """ test if we project out the correct state """ stabilizer_state = project_stabilized_state([sZ(0) * sZ(1), sX(0) * sX(1)]) true_state = np.zeros((4, 1)) true_state[0, 0] = true_state[3, 0] = 1 true_state /= np.sqrt(2) assert np.allclose(true_state, stabilizer_state.todense()) def test_stabilizer_projection_ZZZ(): """ test if we project out the correct state """ stabilizer_state = project_stabilized_state([sZ(0) * sZ(1), sZ(1) * sZ(2), sX(0) * sX(1) * sX(2)]) true_state = np.zeros((8, 1)) true_state[0, 0] = true_state[7, 0] = 1 true_state /= np.sqrt(2) assert np.allclose(true_state, np.array(stabilizer_state.todense()))
[ "numpy.allclose", "numpy.sqrt", "numpy.isclose", "pyquil.paulis.sY", "pyquil.paulis.sZ", "referenceqvm.stabilizer_utils.pauli_stabilizer_to_binary_stabilizer", "referenceqvm.stabilizer_utils.compute_action", "pyquil.paulis.sI", "numpy.array", "numpy.zeros", "pytest.raises", "pyquil.paulis.sX",...
[((1212, 1266), 'referenceqvm.stabilizer_utils.pauli_stabilizer_to_binary_stabilizer', 'pauli_stabilizer_to_binary_stabilizer', (['bell_stabilizer'], {}), '(bell_stabilizer)\n', (1249, 1266), False, 'from referenceqvm.stabilizer_utils import compute_action, project_stabilized_state, binary_stabilizer_to_pauli_stabilizer, pauli_stabilizer_to_binary_stabilizer\n'), ((1296, 1340), 'numpy.array', 'np.array', (['[[0, 0, 1, 1, 0], [1, 1, 0, 0, 0]]'], {}), '([[0, 0, 1, 1, 0], [1, 1, 0, 0, 0]])\n', (1304, 1340), True, 'import numpy as np\n'), ((1391, 1445), 'numpy.allclose', 'np.allclose', (['true_stabilizer_matrix', 'stabilizer_matrix'], {}), '(true_stabilizer_matrix, stabilizer_matrix)\n', (1402, 1445), True, 'import numpy as np\n'), ((1474, 1535), 'referenceqvm.stabilizer_utils.binary_stabilizer_to_pauli_stabilizer', 'binary_stabilizer_to_pauli_stabilizer', (['true_stabilizer_matrix'], {}), '(true_stabilizer_matrix)\n', (1511, 1535), False, 'from referenceqvm.stabilizer_utils import compute_action, project_stabilized_state, binary_stabilizer_to_pauli_stabilizer, pauli_stabilizer_to_binary_stabilizer\n'), ((1713, 1778), 'referenceqvm.stabilizer_utils.pauli_stabilizer_to_binary_stabilizer', 'pauli_stabilizer_to_binary_stabilizer', (['five_qubit_code_generators'], {}), '(five_qubit_code_generators)\n', (1750, 1778), False, 'from referenceqvm.stabilizer_utils import compute_action, project_stabilized_state, binary_stabilizer_to_pauli_stabilizer, pauli_stabilizer_to_binary_stabilizer\n'), ((1808, 1962), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0], [1, \n 0, 1, 0, 0, 0, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0]]'], {}), '([[1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 1, 1, 0,\n 0], [1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0]])\n', (1816, 1962), True, 'import numpy as np\n'), ((2084, 2138), 'numpy.allclose', 'np.allclose', (['true_stabilizer_matrix', 'stabilizer_matrix'], {}), '(true_stabilizer_matrix, stabilizer_matrix)\n', (2095, 2138), True, 'import numpy as np\n'), ((2167, 2228), 'referenceqvm.stabilizer_utils.binary_stabilizer_to_pauli_stabilizer', 'binary_stabilizer_to_pauli_stabilizer', (['true_stabilizer_matrix'], {}), '(true_stabilizer_matrix)\n', (2204, 2228), False, 'from referenceqvm.stabilizer_utils import compute_action, project_stabilized_state, binary_stabilizer_to_pauli_stabilizer, pauli_stabilizer_to_binary_stabilizer\n'), ((10268, 10284), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (10276, 10284), True, 'import numpy as np\n'), ((10570, 10586), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (10578, 10586), True, 'import numpy as np\n'), ((10649, 10659), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10656, 10659), True, 'import numpy as np\n'), ((10997, 11013), 'numpy.zeros', 'np.zeros', (['(8, 1)'], {}), '((8, 1))\n', (11005, 11013), True, 'import numpy as np\n'), ((11076, 11086), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11083, 11086), True, 'import numpy as np\n'), ((729, 753), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (742, 753), False, 'import pytest\n'), ((827, 851), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (840, 851), False, 'import pytest\n'), ((913, 937), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (926, 937), False, 'import pytest\n'), ((947, 972), 'referenceqvm.stabilizer_utils.compute_action', 'compute_action', (['(3)', '"""a"""', '(4)'], {}), "(3, 'a', 4)\n", (961, 972), False, 'from referenceqvm.stabilizer_utils import compute_action, project_stabilized_state, binary_stabilizer_to_pauli_stabilizer, pauli_stabilizer_to_binary_stabilizer\n'), ((983, 1007), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (996, 1007), False, 'import pytest\n'), ((1056, 1080), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1069, 1080), False, 'import pytest\n'), ((2512, 2518), 'pyquil.paulis.sI', 'sI', (['ii'], {}), '(ii)\n', (2514, 2518), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((2771, 2791), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (2781, 2791), True, 'import numpy as np\n'), ((2959, 2965), 'pyquil.paulis.sX', 'sX', (['ii'], {}), '(ii)\n', (2961, 2965), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((3302, 3322), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (3312, 3322), True, 'import numpy as np\n'), ((3405, 3411), 'pyquil.paulis.sX', 'sX', (['ii'], {}), '(ii)\n', (3407, 3411), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((3771, 3791), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (3781, 3791), True, 'import numpy as np\n'), ((4343, 4363), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (4353, 4363), True, 'import numpy as np\n'), ((4889, 4909), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (4899, 4909), True, 'import numpy as np\n'), ((5077, 5083), 'pyquil.paulis.sY', 'sY', (['ii'], {}), '(ii)\n', (5079, 5083), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((5443, 5466), 'numpy.isclose', 'np.isclose', (['coeff', '(1.0j)'], {}), '(coeff, 1.0j)\n', (5453, 5466), True, 'import numpy as np\n'), ((5547, 5553), 'pyquil.paulis.sY', 'sY', (['ii'], {}), '(ii)\n', (5549, 5553), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((5913, 5937), 'numpy.isclose', 'np.isclose', (['coeff', '(-1.0j)'], {}), '(coeff, -1.0j)\n', (5923, 5937), True, 'import numpy as np\n'), ((6487, 6508), 'numpy.isclose', 'np.isclose', (['coeff', '(-1)'], {}), '(coeff, -1)\n', (6497, 6508), True, 'import numpy as np\n'), ((7034, 7055), 'numpy.isclose', 'np.isclose', (['coeff', '(-1)'], {}), '(coeff, -1)\n', (7044, 7055), True, 'import numpy as np\n'), ((7223, 7229), 'pyquil.paulis.sZ', 'sZ', (['ii'], {}), '(ii)\n', (7225, 7229), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((7585, 7605), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (7595, 7605), True, 'import numpy as np\n'), ((7688, 7694), 'pyquil.paulis.sZ', 'sZ', (['ii'], {}), '(ii)\n', (7690, 7694), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((8050, 8071), 'numpy.isclose', 'np.isclose', (['coeff', '(-1)'], {}), '(coeff, -1)\n', (8060, 8071), True, 'import numpy as np\n'), ((8563, 8583), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (8573, 8583), True, 'import numpy as np\n'), ((8989, 9009), 'numpy.isclose', 'np.isclose', (['coeff', '(1)'], {}), '(coeff, 1)\n', (8999, 9009), True, 'import numpy as np\n'), ((9573, 9596), 'numpy.isclose', 'np.isclose', (['coeff', '(1.0j)'], {}), '(coeff, 1.0j)\n', (9583, 9596), True, 'import numpy as np\n'), ((10072, 10096), 'numpy.isclose', 'np.isclose', (['coeff', '(-1.0j)'], {}), '(coeff, -1.0j)\n', (10082, 10096), True, 'import numpy as np\n'), ((893, 898), 'pyquil.paulis.sX', 'sX', (['(0)'], {}), '(0)\n', (895, 898), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((1036, 1041), 'pyquil.paulis.sX', 'sX', (['(0)'], {}), '(0)\n', (1038, 1041), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((1113, 1118), 'pyquil.paulis.sX', 'sX', (['(0)'], {}), '(0)\n', (1115, 1118), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((3960, 3966), 'pyquil.paulis.sX', 'sX', (['ii'], {}), '(ii)\n', (3962, 3966), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((3969, 3979), 'pyquil.paulis.sX', 'sX', (['(ii + 1)'], {}), '(ii + 1)\n', (3971, 3979), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((4446, 4452), 'pyquil.paulis.sX', 'sX', (['ii'], {}), '(ii)\n', (4448, 4452), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((4455, 4465), 'pyquil.paulis.sX', 'sX', (['(ii + 1)'], {}), '(ii + 1)\n', (4457, 4465), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((6104, 6110), 'pyquil.paulis.sY', 'sY', (['ii'], {}), '(ii)\n', (6106, 6110), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((6113, 6123), 'pyquil.paulis.sY', 'sY', (['(ii + 1)'], {}), '(ii + 1)\n', (6115, 6123), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((6591, 6597), 'pyquil.paulis.sY', 'sY', (['ii'], {}), '(ii)\n', (6593, 6597), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((6600, 6610), 'pyquil.paulis.sY', 'sY', (['(ii + 1)'], {}), '(ii + 1)\n', (6602, 6610), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((8240, 8246), 'pyquil.paulis.sZ', 'sZ', (['ii'], {}), '(ii)\n', (8242, 8246), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((8249, 8259), 'pyquil.paulis.sZ', 'sZ', (['(ii + 1)'], {}), '(ii + 1)\n', (8251, 8259), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((8666, 8672), 'pyquil.paulis.sZ', 'sZ', (['ii'], {}), '(ii)\n', (8668, 8672), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((8675, 8685), 'pyquil.paulis.sZ', 'sZ', (['(ii + 1)'], {}), '(ii + 1)\n', (8677, 8685), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((9178, 9184), 'pyquil.paulis.sX', 'sX', (['ii'], {}), '(ii)\n', (9180, 9184), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((9187, 9197), 'pyquil.paulis.sY', 'sY', (['(ii + 1)'], {}), '(ii + 1)\n', (9189, 9197), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((9677, 9683), 'pyquil.paulis.sX', 'sX', (['ii'], {}), '(ii)\n', (9679, 9683), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((9686, 9696), 'pyquil.paulis.sY', 'sY', (['(ii + 1)'], {}), '(ii + 1)\n', (9688, 9696), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10243, 10248), 'pyquil.paulis.sZ', 'sZ', (['(0)'], {}), '(0)\n', (10245, 10248), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10522, 10527), 'pyquil.paulis.sZ', 'sZ', (['(0)'], {}), '(0)\n', (10524, 10527), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10530, 10535), 'pyquil.paulis.sZ', 'sZ', (['(1)'], {}), '(1)\n', (10532, 10535), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10537, 10542), 'pyquil.paulis.sX', 'sX', (['(0)'], {}), '(0)\n', (10539, 10542), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10545, 10550), 'pyquil.paulis.sX', 'sX', (['(1)'], {}), '(1)\n', (10547, 10550), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10873, 10878), 'pyquil.paulis.sZ', 'sZ', (['(0)'], {}), '(0)\n', (10875, 10878), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10881, 10886), 'pyquil.paulis.sZ', 'sZ', (['(1)'], {}), '(1)\n', (10883, 10886), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10888, 10893), 'pyquil.paulis.sZ', 'sZ', (['(1)'], {}), '(1)\n', (10890, 10893), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10896, 10901), 'pyquil.paulis.sZ', 'sZ', (['(2)'], {}), '(2)\n', (10898, 10901), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10972, 10977), 'pyquil.paulis.sX', 'sX', (['(2)'], {}), '(2)\n', (10974, 10977), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((805, 810), 'pyquil.paulis.sX', 'sX', (['(0)'], {}), '(0)\n', (807, 810), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10956, 10961), 'pyquil.paulis.sX', 'sX', (['(0)'], {}), '(0)\n', (10958, 10961), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n'), ((10964, 10969), 'pyquil.paulis.sX', 'sX', (['(1)'], {}), '(1)\n', (10966, 10969), False, 'from pyquil.paulis import sX, sZ, sY, sI, PauliSum\n')]
import numpy as np import scipy as sp import pandas as pd from scipy import stats def get_ecdf(series_): return lambda x: (series_.sort_values() < x).astype(int).mean() def get_sample_acvf(x, h): """ x: series h: shift param return: autocovariance estimator """ n = x.shape[0] shift_x = x.shift(h) mean = x.mean() result = (x - mean) * (shift_x - mean) return result.sum() / n def autocovariance_f(x, nlags): """ x: series nlags: range of lags param return: array of autocovariance estimators """ results = np.array([get_sample_acvf(x, h) for h in range(nlags)]) return results def autocorrelation_f(x, nlags): """ x: series nlags: range of lags param return: array of autocorrelation estimators """ gammas = autocovariance_f(x, nlags) gamma_0 = get_sample_acvf(x, 0) return gammas / gamma_0 def rank_acf(x, nlags): """ x: series nlags: range of lags param return: array of autocorrelation estimators using Spearman rank-order correlation """ results = [sp.stats.spearmanr(x.shift(h), x, nan_policy='omit')[ 0] for h in range(nlags)] return np.array(results) def get_sample_ccvf(x, y, h): """ x: series y: series h: shift param return: cross-covariance estimator """ n = x.shape[0] shift_x = x.shift(h) mean_x = x.mean() mean_y = y.mean() result = (shift_x - mean_x) * (y - mean_y) return result.sum() / n def crosscorrelation_f(x, y, nlags): """ x: series y: series nlags: range of lags param return: array of cross-correlation estimators """ results = np.array([get_sample_ccvf(x, y, h) for h in range(nlags)]) gamma_x_0 = get_sample_acvf(x, 0) gamma_y_0 = get_sample_acvf(y, 0) denominator = np.sqrt(gamma_x_0 * gamma_y_0) return results / denominator def stats_ccf(x, y, nlags): return stats.ccf(y, x, unbiased=False)[:nlags] def rank_sample_ccf(x, y, h): """ x: series that we will perform the lag y: series h: lag param return: cross-correlation estimator using Spearman rank-order correlation """ x_h = x.shift(h) return sp.stats.spearmanr(x_h, y, nan_policy='omit')[0] def rank_ccf(x, y, nlags): """ x: series y: series nlags: range of lags param return: array of cross-correlation estimators """ results = [rank_sample_ccf(x, y, h) for h in range(nlags)] return np.array(results) def test_normality_skewness(returns, alpha=0.05): """ Let $\{x_1 ,\dots , x_T \}$ be a random sample of $X$ with $T$ observations. Under the normality assumption, the sample skewness is distributed asymptotically as normal with zero mean and variances $6/T$.Given an asset return series $\{r_1 ,\dots , r_T\}$, to test the skewness of the returns, we consider the null hypothesis $H_0 : S(r) = 0$ versus the alternative hypothesis $H_a : S(r) \not= 0$. The t-ratio statistic of the sample is \begin{equation} t = \frac{\hat{S}(r)}{\sqrt{6/T}} \end{equation} where $\hat{S}(r)$ is the sample skewness. The decision rule is as follows. Reject the null hypothesis at the $\alpha$ significance level, if $|t| > Z_{\alpha/2}$ , where $Z_{\alpha/2}$ is the upper $100(\alpha/2)$th quantile of the standard normal distribution. :param returns: daily returns :type returns: pd.Series :param alpha: significant level :type alpha: float :return: test results :rtype: pd.DataFrame """ size = returns.shape[0] skew = returns.skew() name = returns.name test_statistic = skew / np.sqrt(6 / size) abs_test_statistic = np.abs(test_statistic) z_alpha = stats.norm.ppf(1 - (alpha / 2)) p_value = (1 - stats.norm.cdf(abs_test_statistic)) * 2 if abs_test_statistic > z_alpha: decision = r"Reject $H_0$" else: decision = r"Retain $H_0$" df = pd.DataFrame([(name, skew, test_statistic, p_value, decision)], columns=["name", "sample skewness", "test_statistic", "p_value", "decision"]) return df
[ "numpy.abs", "scipy.stats.ccf", "numpy.sqrt", "scipy.stats.norm.cdf", "scipy.stats.norm.ppf", "numpy.array", "pandas.DataFrame", "scipy.stats.spearmanr" ]
[((1203, 1220), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (1211, 1220), True, 'import numpy as np\n'), ((1849, 1879), 'numpy.sqrt', 'np.sqrt', (['(gamma_x_0 * gamma_y_0)'], {}), '(gamma_x_0 * gamma_y_0)\n', (1856, 1879), True, 'import numpy as np\n'), ((2515, 2532), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (2523, 2532), True, 'import numpy as np\n'), ((3746, 3768), 'numpy.abs', 'np.abs', (['test_statistic'], {}), '(test_statistic)\n', (3752, 3768), True, 'import numpy as np\n'), ((3783, 3812), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['(1 - alpha / 2)'], {}), '(1 - alpha / 2)\n', (3797, 3812), False, 'from scipy import stats\n'), ((4000, 4146), 'pandas.DataFrame', 'pd.DataFrame', (['[(name, skew, test_statistic, p_value, decision)]'], {'columns': "['name', 'sample skewness', 'test_statistic', 'p_value', 'decision']"}), "([(name, skew, test_statistic, p_value, decision)], columns=[\n 'name', 'sample skewness', 'test_statistic', 'p_value', 'decision'])\n", (4012, 4146), True, 'import pandas as pd\n'), ((1954, 1985), 'scipy.stats.ccf', 'stats.ccf', (['y', 'x'], {'unbiased': '(False)'}), '(y, x, unbiased=False)\n', (1963, 1985), False, 'from scipy import stats\n'), ((2238, 2283), 'scipy.stats.spearmanr', 'sp.stats.spearmanr', (['x_h', 'y'], {'nan_policy': '"""omit"""'}), "(x_h, y, nan_policy='omit')\n", (2256, 2283), True, 'import scipy as sp\n'), ((3703, 3720), 'numpy.sqrt', 'np.sqrt', (['(6 / size)'], {}), '(6 / size)\n', (3710, 3720), True, 'import numpy as np\n'), ((3834, 3868), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['abs_test_statistic'], {}), '(abs_test_statistic)\n', (3848, 3868), False, 'from scipy import stats\n')]
# coding=UTF-8 """Tests for Urban Flood Risk Mitigation Model.""" import unittest import tempfile import shutil import os from osgeo import gdal from osgeo import osr from osgeo import ogr import numpy import pygeoprocessing import shapely.geometry class UFRMTests(unittest.TestCase): """Tests for the Urban Flood Risk Mitigation Model.""" def setUp(self): """Override setUp function to create temp workspace directory.""" # this lets us delete the workspace after its done no matter the # the rest result self.workspace_dir = tempfile.mkdtemp(suffix='\U0001f60e') # smiley def tearDown(self): """Override tearDown function to remove temporary directory.""" shutil.rmtree(self.workspace_dir) def _make_args(self): """Create args list for UFRM.""" base_dir = os.path.dirname(__file__) args = { 'aoi_watersheds_path': os.path.join( base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'watersheds.gpkg'), 'built_infrastructure_vector_path': os.path.join( base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'infrastructure.gpkg'), 'curve_number_table_path': os.path.join( base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'Biophysical_water_SF.csv'), 'infrastructure_damage_loss_table_path': os.path.join( base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'Damage.csv'), 'lulc_path': os.path.join( base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'lulc.tif'), 'rainfall_depth': 40, 'results_suffix': 'Test1', 'soils_hydrological_group_raster_path': os.path.join( base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'soilgroup.tif'), 'workspace_dir': self.workspace_dir, } return args def test_ufrm_regression(self): """UFRM: regression test.""" from natcap.invest import urban_flood_risk_mitigation args = self._make_args() urban_flood_risk_mitigation.execute(args) result_vector = gdal.OpenEx(os.path.join( args['workspace_dir'], 'flood_risk_service_Test1.shp'), gdal.OF_VECTOR) result_layer = result_vector.GetLayer() # Check that all four expected fields are there. self.assertEqual( set(('aff_bld', 'serv_blt', 'rnf_rt_idx', 'rnf_rt_m3', 'flood_vol')), set(field.GetName() for field in result_layer.schema)) result_feature = result_layer.GetFeature(0) for fieldname, expected_value in ( ('aff_bld', 187010830.32202843), ('serv_blt', 13253546667257.65), ('rnf_rt_idx', 0.70387527942), ('rnf_rt_m3', 70870.4765625), ('flood_vol', 29815.640625)): result_val = result_feature.GetField(fieldname) places_to_round = ( int(round(numpy.log(expected_value)/numpy.log(10)))-6) self.assertAlmostEqual( result_val, expected_value, places=-places_to_round) result_feature = None result_layer = None result_vector = None def test_ufrm_regression_no_infrastructure(self): """UFRM: regression for no infrastructure.""" from natcap.invest import urban_flood_risk_mitigation args = self._make_args() del args['built_infrastructure_vector_path'] urban_flood_risk_mitigation.execute(args) result_raster = gdal.OpenEx(os.path.join( args['workspace_dir'], 'Runoff_retention_m3_Test1.tif'), gdal.OF_RASTER) band = result_raster.GetRasterBand(1) array = band.ReadAsArray() nodata = band.GetNoDataValue() band = None result_raster = None result_sum = numpy.sum(array[~numpy.isclose(array, nodata)]) # expected result observed from regression run. expected_result = 156070.36 self.assertAlmostEqual(result_sum, expected_result, places=0) result_vector = gdal.OpenEx(os.path.join( args['workspace_dir'], 'flood_risk_service_Test1.shp'), gdal.OF_VECTOR) result_layer = result_vector.GetLayer() result_feature = result_layer.GetFeature(0) # Check that only the two expected fields are there. self.assertEqual( set(('rnf_rt_idx', 'rnf_rt_m3', 'flood_vol')), set(field.GetName() for field in result_layer.schema)) for fieldname, expected_value in ( ('rnf_rt_idx', 0.70387527942), ('rnf_rt_m3', 70870.4765625), ('flood_vol', 29815.640625)): result_val = result_feature.GetField(fieldname) places_to_round = ( int(round(numpy.log(expected_value)/numpy.log(10)))-6) self.assertAlmostEqual( result_val, expected_value, places=-places_to_round) def test_ufrm_value_error_on_bad_soil(self): """UFRM: assert exception on bad soil raster values.""" from natcap.invest import urban_flood_risk_mitigation args = self._make_args() bad_soil_raster = os.path.join( self.workspace_dir, 'bad_soilgroups.tif') value_map = { 1: 1, 2: 2, 3: 9, # only 1, 2, 3, 4 are valid values for this raster. 4: 4 } pygeoprocessing.reclassify_raster( (args['soils_hydrological_group_raster_path'], 1), value_map, bad_soil_raster, gdal.GDT_Int16, -9) args['soils_hydrological_group_raster_path'] = bad_soil_raster with self.assertRaises(ValueError) as cm: urban_flood_risk_mitigation.execute(args) actual_message = str(cm.exception) expected_message = ( 'Check that the Soil Group raster does not contain') self.assertTrue(expected_message in actual_message) def test_ufrm_value_error_on_bad_lucode(self): """UFRM: assert exception on missing lucodes.""" import pandas from natcap.invest import urban_flood_risk_mitigation args = self._make_args() bad_cn_table_path = os.path.join( self.workspace_dir, 'bad_cn_table.csv') cn_table = pandas.read_csv(args['curve_number_table_path']) # drop a row with an lucode known to exist in lulc raster # This is a code that will successfully index into the # CN table sparse matrix, but will not return valid data. bad_cn_table = cn_table[cn_table['lucode'] != 0] bad_cn_table.to_csv(bad_cn_table_path, index=False) args['curve_number_table_path'] = bad_cn_table_path with self.assertRaises(ValueError) as cm: urban_flood_risk_mitigation.execute(args) actual_message = str(cm.exception) expected_message = ( f'The biophysical table is missing a row for lucode(s) {[0]}') self.assertEqual(expected_message, actual_message) # drop rows with lucodes known to exist in lulc raster # These are codes that will raise an IndexError on # indexing into the CN table sparse matrix. The test # LULC raster has values from 0 to 21. bad_cn_table = cn_table[cn_table['lucode'] < 15] bad_cn_table.to_csv(bad_cn_table_path, index=False) args['curve_number_table_path'] = bad_cn_table_path with self.assertRaises(ValueError) as cm: urban_flood_risk_mitigation.execute(args) actual_message = str(cm.exception) expected_message = ( f'The biophysical table is missing a row for lucode(s) ' f'{[16, 17, 18, 21]}') self.assertEqual(expected_message, actual_message) def test_ufrm_string_damage_to_infrastructure(self): """UFRM: handle str(int) structure indices. This came up on the forums, where a user had provided a string column type that contained integer data. OGR returned these ints as strings, leading to a ``KeyError``. See https://github.com/natcap/invest/issues/590. """ from natcap.invest import urban_flood_risk_mitigation srs = osr.SpatialReference() srs.ImportFromEPSG(3157) projection_wkt = srs.ExportToWkt() origin = (443723.127327877911739, 4956546.905980412848294) pos_x = origin[0] pos_y = origin[1] aoi_geometry = [ shapely.geometry.box(pos_x, pos_y, pos_x + 200, pos_y + 200), ] def _infra_geom(xoff, yoff): """Create sample infrastructure geometry at a position offset. The geometry will be centered on (x+xoff, y+yoff). Parameters: xoff (number): The x offset, referenced against ``pos_x`` from the outer scope. yoff (number): The y offset, referenced against ``pos_y`` from the outer scope. Returns: A ``shapely.Geometry`` of a point buffered by ``20`` centered on the provided (x+xoff, y+yoff) point. """ return shapely.geometry.Point( pos_x + xoff, pos_y + yoff).buffer(20) infra_geometries = [ _infra_geom(x_offset, 100) for x_offset in range(0, 200, 40)] infra_fields = {'Type': ogr.OFTString} # THIS IS THE THING TESTED infra_attrs = [ {'Type': str(index)} for index in range(len(infra_geometries))] infrastructure_path = os.path.join( self.workspace_dir, 'infra_vector.shp') pygeoprocessing.shapely_geometry_to_vector( infra_geometries, infrastructure_path, projection_wkt, 'ESRI Shapefile', fields=infra_fields, attribute_list=infra_attrs, ogr_geom_type=ogr.wkbPolygon) aoi_path = os.path.join(self.workspace_dir, 'aoi.shp') pygeoprocessing.shapely_geometry_to_vector( aoi_geometry, aoi_path, projection_wkt, 'ESRI Shapefile', ogr_geom_type=ogr.wkbPolygon) structures_damage_table_path = os.path.join( self.workspace_dir, 'damage_table_path.csv') with open(structures_damage_table_path, 'w') as csv_file: csv_file.write('"Type","damage"\n') for attr_dict in infra_attrs: type_index = int(attr_dict['Type']) csv_file.write(f'"{type_index}",1\n') aoi_damage_dict = ( urban_flood_risk_mitigation._calculate_damage_to_infrastructure_in_aoi( aoi_path, infrastructure_path, structures_damage_table_path)) # Total damage is the sum of the area of all infrastructure geometries # that intersect the AOI, with each area multiplied by the damage cost. # For this test, damage is always 1, so it's just the intersecting # area. self.assertEqual(len(aoi_damage_dict), 1) numpy.testing.assert_allclose(aoi_damage_dict[0], 5645.787282992962) def test_ufrm_invalid_validation(self): """UFRM: assert validation error on bad args.""" from natcap.invest import urban_flood_risk_mitigation with self.assertRaises(ValueError): urban_flood_risk_mitigation.execute({}) def test_validate(self): """UFRM: test validate function.""" from natcap.invest import urban_flood_risk_mitigation args = self._make_args() self.assertEqual( len(urban_flood_risk_mitigation.validate(args)), 0) del args['workspace_dir'] validation_warnings = urban_flood_risk_mitigation.validate(args) self.assertEqual(len(validation_warnings), 1) args['workspace_dir'] = '' result = urban_flood_risk_mitigation.validate(args) self.assertTrue('has no value' in result[0][1]) args = self._make_args() args['lulc_path'] = 'fake/path/notfound.tif' result = urban_flood_risk_mitigation.validate(args) self.assertTrue('not found' in result[0][1]) args = self._make_args() args['lulc_path'] = args['aoi_watersheds_path'] result = urban_flood_risk_mitigation.validate(args) self.assertTrue('GDAL raster' in result[0][1]) args = self._make_args() args['aoi_watersheds_path'] = args['lulc_path'] result = urban_flood_risk_mitigation.validate(args) self.assertTrue('GDAL vector' in result[0][1]) args = self._make_args() del args['infrastructure_damage_loss_table_path'] result = urban_flood_risk_mitigation.validate(args) self.assertTrue('missing from the args dict' in result[0][1])
[ "pygeoprocessing.reclassify_raster", "pygeoprocessing.shapely_geometry_to_vector", "numpy.isclose", "pandas.read_csv", "osgeo.osr.SpatialReference", "natcap.invest.urban_flood_risk_mitigation._calculate_damage_to_infrastructure_in_aoi", "numpy.testing.assert_allclose", "os.path.join", "natcap.invest...
[((593, 621), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""😎"""'}), "(suffix='😎')\n", (609, 621), False, 'import tempfile\n'), ((750, 783), 'shutil.rmtree', 'shutil.rmtree', (['self.workspace_dir'], {}), '(self.workspace_dir)\n', (763, 783), False, 'import shutil\n'), ((875, 900), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (890, 900), False, 'import os\n'), ((2236, 2277), 'natcap.invest.urban_flood_risk_mitigation.execute', 'urban_flood_risk_mitigation.execute', (['args'], {}), '(args)\n', (2271, 2277), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((3712, 3753), 'natcap.invest.urban_flood_risk_mitigation.execute', 'urban_flood_risk_mitigation.execute', (['args'], {}), '(args)\n', (3747, 3753), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((5491, 5545), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""bad_soilgroups.tif"""'], {}), "(self.workspace_dir, 'bad_soilgroups.tif')\n", (5503, 5545), False, 'import os\n'), ((5731, 5872), 'pygeoprocessing.reclassify_raster', 'pygeoprocessing.reclassify_raster', (["(args['soils_hydrological_group_raster_path'], 1)", 'value_map', 'bad_soil_raster', 'gdal.GDT_Int16', '(-9)'], {}), "((args[\n 'soils_hydrological_group_raster_path'], 1), value_map, bad_soil_raster,\n gdal.GDT_Int16, -9)\n", (5764, 5872), False, 'import pygeoprocessing\n'), ((6537, 6589), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""bad_cn_table.csv"""'], {}), "(self.workspace_dir, 'bad_cn_table.csv')\n", (6549, 6589), False, 'import os\n'), ((6624, 6672), 'pandas.read_csv', 'pandas.read_csv', (["args['curve_number_table_path']"], {}), "(args['curve_number_table_path'])\n", (6639, 6672), False, 'import pandas\n'), ((8601, 8623), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (8621, 8623), False, 'from osgeo import osr\n'), ((9993, 10045), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""infra_vector.shp"""'], {}), "(self.workspace_dir, 'infra_vector.shp')\n", (10005, 10045), False, 'import os\n'), ((10069, 10272), 'pygeoprocessing.shapely_geometry_to_vector', 'pygeoprocessing.shapely_geometry_to_vector', (['infra_geometries', 'infrastructure_path', 'projection_wkt', '"""ESRI Shapefile"""'], {'fields': 'infra_fields', 'attribute_list': 'infra_attrs', 'ogr_geom_type': 'ogr.wkbPolygon'}), "(infra_geometries,\n infrastructure_path, projection_wkt, 'ESRI Shapefile', fields=\n infra_fields, attribute_list=infra_attrs, ogr_geom_type=ogr.wkbPolygon)\n", (10111, 10272), False, 'import pygeoprocessing\n'), ((10326, 10369), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""aoi.shp"""'], {}), "(self.workspace_dir, 'aoi.shp')\n", (10338, 10369), False, 'import os\n'), ((10379, 10513), 'pygeoprocessing.shapely_geometry_to_vector', 'pygeoprocessing.shapely_geometry_to_vector', (['aoi_geometry', 'aoi_path', 'projection_wkt', '"""ESRI Shapefile"""'], {'ogr_geom_type': 'ogr.wkbPolygon'}), "(aoi_geometry, aoi_path,\n projection_wkt, 'ESRI Shapefile', ogr_geom_type=ogr.wkbPolygon)\n", (10421, 10513), False, 'import pygeoprocessing\n'), ((10579, 10636), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""damage_table_path.csv"""'], {}), "(self.workspace_dir, 'damage_table_path.csv')\n", (10591, 10636), False, 'import os\n'), ((10962, 11098), 'natcap.invest.urban_flood_risk_mitigation._calculate_damage_to_infrastructure_in_aoi', 'urban_flood_risk_mitigation._calculate_damage_to_infrastructure_in_aoi', (['aoi_path', 'infrastructure_path', 'structures_damage_table_path'], {}), '(aoi_path\n , infrastructure_path, structures_damage_table_path)\n', (11032, 11098), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((11429, 11497), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['aoi_damage_dict[0]', '(5645.787282992962)'], {}), '(aoi_damage_dict[0], 5645.787282992962)\n', (11458, 11497), False, 'import numpy\n'), ((12100, 12142), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (12136, 12142), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((12254, 12296), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (12290, 12296), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((12462, 12504), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (12498, 12504), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((12670, 12712), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (12706, 12712), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((12880, 12922), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (12916, 12922), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((13092, 13134), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (13128, 13134), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((955, 1042), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '"""data"""', '"""invest-test-data"""', '"""ufrm"""', '"""watersheds.gpkg"""'], {}), "(base_dir, '..', 'data', 'invest-test-data', 'ufrm',\n 'watersheds.gpkg')\n", (967, 1042), False, 'import os\n'), ((1124, 1215), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '"""data"""', '"""invest-test-data"""', '"""ufrm"""', '"""infrastructure.gpkg"""'], {}), "(base_dir, '..', 'data', 'invest-test-data', 'ufrm',\n 'infrastructure.gpkg')\n", (1136, 1215), False, 'import os\n'), ((1288, 1384), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '"""data"""', '"""invest-test-data"""', '"""ufrm"""', '"""Biophysical_water_SF.csv"""'], {}), "(base_dir, '..', 'data', 'invest-test-data', 'ufrm',\n 'Biophysical_water_SF.csv')\n", (1300, 1384), False, 'import os\n'), ((1471, 1549), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '"""data"""', '"""invest-test-data"""', '"""ufrm"""', '"""Damage.csv"""'], {}), "(base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'Damage.csv')\n", (1483, 1549), False, 'import os\n'), ((1612, 1688), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '"""data"""', '"""invest-test-data"""', '"""ufrm"""', '"""lulc.tif"""'], {}), "(base_dir, '..', 'data', 'invest-test-data', 'ufrm', 'lulc.tif')\n", (1624, 1688), False, 'import os\n'), ((1853, 1938), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '"""data"""', '"""invest-test-data"""', '"""ufrm"""', '"""soilgroup.tif"""'], {}), "(base_dir, '..', 'data', 'invest-test-data', 'ufrm',\n 'soilgroup.tif')\n", (1865, 1938), False, 'import os\n'), ((2317, 2384), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""flood_risk_service_Test1.shp"""'], {}), "(args['workspace_dir'], 'flood_risk_service_Test1.shp')\n", (2329, 2384), False, 'import os\n'), ((3793, 3861), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""Runoff_retention_m3_Test1.tif"""'], {}), "(args['workspace_dir'], 'Runoff_retention_m3_Test1.tif')\n", (3805, 3861), False, 'import os\n'), ((4354, 4421), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""flood_risk_service_Test1.shp"""'], {}), "(args['workspace_dir'], 'flood_risk_service_Test1.shp')\n", (4366, 4421), False, 'import os\n'), ((6029, 6070), 'natcap.invest.urban_flood_risk_mitigation.execute', 'urban_flood_risk_mitigation.execute', (['args'], {}), '(args)\n', (6064, 6070), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((7119, 7160), 'natcap.invest.urban_flood_risk_mitigation.execute', 'urban_flood_risk_mitigation.execute', (['args'], {}), '(args)\n', (7154, 7160), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((7855, 7896), 'natcap.invest.urban_flood_risk_mitigation.execute', 'urban_flood_risk_mitigation.execute', (['args'], {}), '(args)\n', (7890, 7896), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((11726, 11765), 'natcap.invest.urban_flood_risk_mitigation.execute', 'urban_flood_risk_mitigation.execute', (['{}'], {}), '({})\n', (11761, 11765), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((11984, 12026), 'natcap.invest.urban_flood_risk_mitigation.validate', 'urban_flood_risk_mitigation.validate', (['args'], {}), '(args)\n', (12020, 12026), False, 'from natcap.invest import urban_flood_risk_mitigation\n'), ((4119, 4147), 'numpy.isclose', 'numpy.isclose', (['array', 'nodata'], {}), '(array, nodata)\n', (4132, 4147), False, 'import numpy\n'), ((3196, 3221), 'numpy.log', 'numpy.log', (['expected_value'], {}), '(expected_value)\n', (3205, 3221), False, 'import numpy\n'), ((3222, 3235), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (3231, 3235), False, 'import numpy\n'), ((5096, 5121), 'numpy.log', 'numpy.log', (['expected_value'], {}), '(expected_value)\n', (5105, 5121), False, 'import numpy\n'), ((5122, 5135), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (5131, 5135), False, 'import numpy\n')]
from agents import * from models import * import numpy as np import matplotlib matplotlib.use('tkagg') import matplotlib.pyplot as plt import sys import pickle # end class world def speed_profile(file_names): """ This function is to plot speed profiles for several evaluation results. Args: file_names (array of string): file names to be draw speed profile. """ # instantiate the class robots = [] records = [] dT = 0.05 for i in range(1,len(file_names)): f = open(file_names[i], 'rb') record = pickle.load(f) records.append(record) exec('robots.append(' + record.model + '(' + record.algorithm + '(), dT))'); print(len(records)) fig = plt.figure() ax1=plt.subplot(2, 1, 1) ax2=plt.subplot(2, 1, 2) for i in range(len(records)): d = [] dot_d = [] human = HumanBall3D(MobileAgent(), dT); for t in range(records[0].tot): records[i].robot_moves[:, t] human.update(robots[0]) human.move(records[0].human_moves[:, t]) robots[i].update(human) robots[i].x = records[i].robot_moves[:, t] Mr = robots[i].m Mh = human.m dim = np.shape(Mr)[0] // 2 p_idx = np.arange(dim) v_idx = p_idx + dim d.append(np.linalg.norm(Mr[p_idx] - Mh[p_idx])) sgn = (Mr[p_idx+dim] - Mh[p_idx+dim]).T * (Mr[p_idx] - Mh[p_idx]) sgn = -1 if sgn < 0 else 1 dot_d.append(sgn * np.linalg.norm(Mr[p_idx+dim] - Mh[p_idx+dim])) print(d[:10]) print(dot_d[:10]) ax1.plot(d, c='C'+str(i), label=records[i].algorithm, linestyle='-') ax2.plot(dot_d, c='C'+str(i), label=records[i].algorithm, linestyle='--') ax2.plot(range(-100,800,100), np.linspace(0,0,9),c='black', linestyle='-') ax1.legend() ax1.set_xlim(0,200) ax1.set_ylabel('m', fontsize = 20) # plt.show() # fig.legend() ax2.set_xlim(0,200) ax2.set_xlabel('Frame (0.05s)', fontsize = 20) ax2.set_ylabel('m/s', fontsize = 20) # tikz_save(model+'.tex') fig.savefig('speed_profile.pdf', bbox_inches='tight') if __name__ == '__main__': speed_profile(sys.argv)
[ "matplotlib.use", "pickle.load", "matplotlib.pyplot.figure", "numpy.linspace", "numpy.linalg.norm", "numpy.shape", "matplotlib.pyplot.subplot", "numpy.arange" ]
[((79, 102), 'matplotlib.use', 'matplotlib.use', (['"""tkagg"""'], {}), "('tkagg')\n", (93, 102), False, 'import matplotlib\n'), ((732, 744), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (742, 744), True, 'import matplotlib.pyplot as plt\n'), ((754, 774), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (765, 774), True, 'import matplotlib.pyplot as plt\n'), ((783, 803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (794, 803), True, 'import matplotlib.pyplot as plt\n'), ((557, 571), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (568, 571), False, 'import pickle\n'), ((1299, 1313), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (1308, 1313), True, 'import numpy as np\n'), ((1866, 1886), 'numpy.linspace', 'np.linspace', (['(0)', '(0)', '(9)'], {}), '(0, 0, 9)\n', (1877, 1886), True, 'import numpy as np\n'), ((1368, 1405), 'numpy.linalg.norm', 'np.linalg.norm', (['(Mr[p_idx] - Mh[p_idx])'], {}), '(Mr[p_idx] - Mh[p_idx])\n', (1382, 1405), True, 'import numpy as np\n'), ((1258, 1270), 'numpy.shape', 'np.shape', (['Mr'], {}), '(Mr)\n', (1266, 1270), True, 'import numpy as np\n'), ((1555, 1604), 'numpy.linalg.norm', 'np.linalg.norm', (['(Mr[p_idx + dim] - Mh[p_idx + dim])'], {}), '(Mr[p_idx + dim] - Mh[p_idx + dim])\n', (1569, 1604), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """Data association solver module. Data association is the key to match a list of observed objects and the list of currently tracked items. It can be reduced to linear assignment problem and solved by Hungarian algorithm (or Kuhn–Munkres algorithm). `scipy` package already implements this algorithm so we can directly use it. """ # package dependency import numpy as np from scipy.optimize import linear_sum_assignment def associate(bbox_list, label_list, tracker_list, metric_thr=0.1): """Association solver method. Given a list of bounding boxes (with their labels) and a list of trackers, this method returns the indices of matched pairs and unmatched items. The metric includes IoU on bounding boxes and label consistency check. Args: bbox_list (list of list): List of bounding boxes (one bounding box is a list of as [xmin, ymin, xmax, ymax]). label_list (list of int): List of label from detector. tracker_list (list fo tracker): List of trackers. metric_thr (float, optional): Metric threshold for matching, default 0.1. Return: match_idx_pair (numpy.array): Matched indices pairs, dim = (n_pairs, 2). unmatched_bbox_idx (numpy.array): Unmatched bounding box indices, dim = (n_ub,). unmatched_tracker_idx (numpy.array): Unmatched tracker indices, dim = (n_ut,). """ assert len(bbox_list) == len(label_list) # compute match matrix match_matrix = np.zeros((len(bbox_list),len(tracker_list))).astype(np.float32) for b_idx, bbox in enumerate(bbox_list): for t_idx, tracker in enumerate(tracker_list): if label_list[b_idx] == tracker.get_est_dict()['label']: match_matrix[b_idx,t_idx] = iou(bbox, tracker.get_bbox()) # solve linear assignment # each entry is interpreted as the cost for that assignment # so IoU is taken with minus match_row, match_col = linear_sum_assignment(-match_matrix) match_idx_pair_raw = list(zip(match_row, match_col)) # kick out the match under threshold match_idx_pair = [] for m in match_idx_pair_raw: if match_matrix[m[0],m[1]] > metric_thr: match_idx_pair.append(m) # keep this as np.array with fix dimension for convenience if(len(match_idx_pair)==0): match_idx_pair = np.empty((0,2),dtype=int) else: match_idx_pair = np.array(match_idx_pair) # pick out the unmatched bbox unmatched_bbox_idx = [] for b_idx, bbox in enumerate(bbox_list): if b_idx not in match_idx_pair[:,0]: unmatched_bbox_idx.append(b_idx) unmatched_bbox_idx = np.array(unmatched_bbox_idx) # pick out the unmatched tracker unmatched_tracker_idx = [] for t_idx, tracker in enumerate(tracker_list): if t_idx not in match_idx_pair[:,1]: unmatched_tracker_idx.append(t_idx) unmatched_tracker_idx = np.array(unmatched_tracker_idx) return match_idx_pair, unmatched_bbox_idx, unmatched_tracker_idx def iou(bbox_a, bbox_b): """IoU method. Compute the intersection over union of 2 bounding boxes. Args: bbox_a (list): Bounding box A in a list as [xmin, ymin, xmax, ymax]. bbox_b (list): Bounding box B in a list as [xmin, ymin, xmax, ymax]. Return: iou (float): Value of IoU, between [0, 1]. """ # utility functions def dx(bbox): return max(0, (bbox[2] - bbox[0] + 1)) def dy(bbox): return max(0, (bbox[3] - bbox[1] + 1)) # bbox area bbox_area_a = dx(bbox_a) * dy(bbox_a) bbox_area_b = dx(bbox_b) * dy(bbox_b) # intersection coordinates bbox_i = np.array([ max(bbox_a[0], bbox_b[0]), max(bbox_a[1], bbox_b[1]), min(bbox_a[2], bbox_b[2]), min(bbox_a[3], bbox_b[3])]) # intersection area intersec_area = dx(bbox_i) * dy(bbox_i) # union area union_area = bbox_area_a + bbox_area_b - intersec_area # intersection over union return float(intersec_area)/float(union_area) # testing code def test(_): from tracker import tracker # used for generating a random bounding box def gen_bbox(): bbox = np.random.randint(0,99,size=4) bbox[0], bbox[2] = min(bbox[0], bbox[2]), max(bbox[0], bbox[2]) bbox[1], bbox[3] = min(bbox[1], bbox[3]), max(bbox[1], bbox[3]) return bbox bbox_list = [] label_list = [] tracker_list = [] for i in range(flags.FLAGS.num): bbox = gen_bbox() label = i tracker_list.append(tracker(tid=i, bbox=bbox, depth=0, est_dict={'label': i})) bbox_list.append(bbox) label_list.append(label) logging.info('test perfect matching') m, ub, ut = associate(bbox_list, label_list, tracker_list) logging.info('m={}, ub={}, ut={}'.format(m, ub, ut)) logging.info('test empty bbox list') m, ub, ut = associate((), (), tracker_list) logging.info('m={}, ub={}, ut={}'.format(m, ub, ut)) logging.info('test empty tracker list') m, ub, ut = associate(bbox_list, label_list, ()) logging.info('m={}, ub={}, ut={}'.format(m, ub, ut)) bbox_list = [] for i in range(flags.FLAGS.num): bbox_list.append(gen_bbox()) logging.info('test random matching') m, ub, ut = associate(bbox_list, label_list, tracker_list) logging.info('m={}, ub={}, ut={}'.format(m, ub, ut)) if __name__ == "__main__": from absl import app, flags, logging flags.DEFINE_integer('num', 10, 'Number of runs.') logging.set_verbosity(logging.INFO) app.run(test)
[ "tracker.tracker.get_est_dict", "scipy.optimize.linear_sum_assignment", "tracker.tracker", "absl.flags.DEFINE_integer", "tracker.tracker.get_bbox", "absl.logging.info", "absl.app.run", "numpy.array", "numpy.random.randint", "numpy.empty", "absl.logging.set_verbosity" ]
[((1987, 2023), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['(-match_matrix)'], {}), '(-match_matrix)\n', (2008, 2023), False, 'from scipy.optimize import linear_sum_assignment\n'), ((2646, 2674), 'numpy.array', 'np.array', (['unmatched_bbox_idx'], {}), '(unmatched_bbox_idx)\n', (2654, 2674), True, 'import numpy as np\n'), ((2900, 2931), 'numpy.array', 'np.array', (['unmatched_tracker_idx'], {}), '(unmatched_tracker_idx)\n', (2908, 2931), True, 'import numpy as np\n'), ((4557, 4594), 'absl.logging.info', 'logging.info', (['"""test perfect matching"""'], {}), "('test perfect matching')\n", (4569, 4594), False, 'from absl import app, flags, logging\n'), ((4716, 4752), 'absl.logging.info', 'logging.info', (['"""test empty bbox list"""'], {}), "('test empty bbox list')\n", (4728, 4752), False, 'from absl import app, flags, logging\n'), ((4859, 4898), 'absl.logging.info', 'logging.info', (['"""test empty tracker list"""'], {}), "('test empty tracker list')\n", (4871, 4898), False, 'from absl import app, flags, logging\n'), ((5098, 5134), 'absl.logging.info', 'logging.info', (['"""test random matching"""'], {}), "('test random matching')\n", (5110, 5134), False, 'from absl import app, flags, logging\n'), ((5320, 5370), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num"""', '(10)', '"""Number of runs."""'], {}), "('num', 10, 'Number of runs.')\n", (5340, 5370), False, 'from absl import app, flags, logging\n'), ((5373, 5408), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (5394, 5408), False, 'from absl import app, flags, logging\n'), ((5411, 5424), 'absl.app.run', 'app.run', (['test'], {}), '(test)\n', (5418, 5424), False, 'from absl import app, flags, logging\n'), ((2361, 2388), 'numpy.empty', 'np.empty', (['(0, 2)'], {'dtype': 'int'}), '((0, 2), dtype=int)\n', (2369, 2388), True, 'import numpy as np\n'), ((2416, 2440), 'numpy.array', 'np.array', (['match_idx_pair'], {}), '(match_idx_pair)\n', (2424, 2440), True, 'import numpy as np\n'), ((4101, 4133), 'numpy.random.randint', 'np.random.randint', (['(0)', '(99)'], {'size': '(4)'}), '(0, 99, size=4)\n', (4118, 4133), True, 'import numpy as np\n'), ((4437, 4494), 'tracker.tracker', 'tracker', ([], {'tid': 'i', 'bbox': 'bbox', 'depth': '(0)', 'est_dict': "{'label': i}"}), "(tid=i, bbox=bbox, depth=0, est_dict={'label': i})\n", (4444, 4494), False, 'from tracker import tracker\n'), ((1742, 1764), 'tracker.tracker.get_est_dict', 'tracker.get_est_dict', ([], {}), '()\n', (1762, 1764), False, 'from tracker import tracker\n'), ((1821, 1839), 'tracker.tracker.get_bbox', 'tracker.get_bbox', ([], {}), '()\n', (1837, 1839), False, 'from tracker import tracker\n')]
from sklearn.neural_network import MLPRegressor import numpy as np import matplotlib.pyplot as plt x = np.arange(0.0, 1, 0.01).reshape(-1, 1) y = np.sinc(x).ravel() nn = MLPRegressor(hidden_layer_sizes=(3), activation='tanh', solver='lbfgs') n = nn.fit(x, y) test_x = np.arange(-0.1, 1.1, 0.01).reshape(-1, 1) test_y = nn.predict(test_x) fig = plt.figure() ax1 = fig.add_subplot(111) ax1.scatter(x, y, s=5, c='b', marker="o", label='real') ax1.plot(test_x,test_y, c='r', label='NN Prediction') plt.legend() plt.show()
[ "sklearn.neural_network.MLPRegressor", "numpy.arange", "numpy.sinc", "matplotlib.pyplot.figure", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((172, 241), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': '(3)', 'activation': '"""tanh"""', 'solver': '"""lbfgs"""'}), "(hidden_layer_sizes=3, activation='tanh', solver='lbfgs')\n", (184, 241), False, 'from sklearn.neural_network import MLPRegressor\n'), ((368, 380), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (378, 380), True, 'import matplotlib.pyplot as plt\n'), ((520, 532), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (530, 532), True, 'import matplotlib.pyplot as plt\n'), ((533, 543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (541, 543), True, 'import matplotlib.pyplot as plt\n'), ((104, 127), 'numpy.arange', 'np.arange', (['(0.0)', '(1)', '(0.01)'], {}), '(0.0, 1, 0.01)\n', (113, 127), True, 'import numpy as np\n'), ((147, 157), 'numpy.sinc', 'np.sinc', (['x'], {}), '(x)\n', (154, 157), True, 'import numpy as np\n'), ((291, 317), 'numpy.arange', 'np.arange', (['(-0.1)', '(1.1)', '(0.01)'], {}), '(-0.1, 1.1, 0.01)\n', (300, 317), True, 'import numpy as np\n')]
import numpy as np import cv2 import socketserver from threading import Thread cap = cv2.VideoCapture(0) cv2.namedWindow('frame') def nothing(x): pass lowh = 0 lowl = 0 lows = 0 highh = 255 highl = 255 highs = 255 cv2.createTrackbar('lowh', 'frame', 0, 255, nothing) cv2.createTrackbar('lowl', 'frame', 0, 255, nothing) cv2.createTrackbar('lows', 'frame', 0, 255, nothing) cv2.createTrackbar('highh', 'frame', 0, 255, nothing) cv2.createTrackbar('highl', 'frame', 0, 255, nothing) cv2.createTrackbar('highs', 'frame', 0, 255, nothing) while(1): # Capture frame-by-frame ret, frame = cap.read() h = cv2.getTrackbarPos('lowh', 'frame') l = cv2.getTrackbarPos('lowl', 'frame') s = cv2.getTrackbarPos('lows', 'frame') hh = cv2.getTrackbarPos('highh', 'frame') ll = cv2.getTrackbarPos('highl', 'frame') ss = cv2.getTrackbarPos('highs', 'frame') # Our operations on the frame come here hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) low = np.array([h, l, s]) high = np.array([hh, ll, ss]) cvt = cv2.inRange(hsv, low, high) #res = cv2.bitwise_and(frame,frame, mask= cvt) # Display the resulting frame im2, contours, hierarchy = cv2.findContours(cvt, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) minArea = 300 cnt = contours[0] moms = cv2.moments(cnt) print(moms['m00']) print(moms['m01']) if moms["m00"] != 0: cx = int(moms['m10']/moms['m00']) cy = int(moms['m01']/moms['m00']) else: cx, cy = 0, 0 area = cv2.contourArea(cnt) for i in range(len(contours)): if area < minArea: continue rect = cv2.boundingRect(contours[i]) width = rect.width height = rect.height aspectRatio = height / width perfectAS = 2.5 AStol = .5 if aspectRatio < perfectAS - AStol or aspectRatio > perfectAS + AStol: continue rectang = area / (width * height) if rectang < .7: continue center = (moms.m10 / moms.m00, moms.m01 / moms.m00) print(len(cnt)) cv2.imshow('frame', cvt) #cv2.imshow('dab', res) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
[ "cv2.inRange", "cv2.contourArea", "cv2.imshow", "numpy.array", "cv2.waitKey", "cv2.destroyAllWindows", "cv2.VideoCapture", "cv2.getTrackbarPos", "cv2.cvtColor", "cv2.findContours", "cv2.moments", "cv2.createTrackbar", "cv2.namedWindow", "cv2.boundingRect" ]
[((86, 105), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (102, 105), False, 'import cv2\n'), ((106, 130), 'cv2.namedWindow', 'cv2.namedWindow', (['"""frame"""'], {}), "('frame')\n", (121, 130), False, 'import cv2\n'), ((220, 272), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""lowh"""', '"""frame"""', '(0)', '(255)', 'nothing'], {}), "('lowh', 'frame', 0, 255, nothing)\n", (238, 272), False, 'import cv2\n'), ((273, 325), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""lowl"""', '"""frame"""', '(0)', '(255)', 'nothing'], {}), "('lowl', 'frame', 0, 255, nothing)\n", (291, 325), False, 'import cv2\n'), ((326, 378), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""lows"""', '"""frame"""', '(0)', '(255)', 'nothing'], {}), "('lows', 'frame', 0, 255, nothing)\n", (344, 378), False, 'import cv2\n'), ((379, 432), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""highh"""', '"""frame"""', '(0)', '(255)', 'nothing'], {}), "('highh', 'frame', 0, 255, nothing)\n", (397, 432), False, 'import cv2\n'), ((433, 486), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""highl"""', '"""frame"""', '(0)', '(255)', 'nothing'], {}), "('highl', 'frame', 0, 255, nothing)\n", (451, 486), False, 'import cv2\n'), ((487, 540), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""highs"""', '"""frame"""', '(0)', '(255)', 'nothing'], {}), "('highs', 'frame', 0, 255, nothing)\n", (505, 540), False, 'import cv2\n'), ((2251, 2274), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2272, 2274), False, 'import cv2\n'), ((617, 652), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""lowh"""', '"""frame"""'], {}), "('lowh', 'frame')\n", (635, 652), False, 'import cv2\n'), ((661, 696), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""lowl"""', '"""frame"""'], {}), "('lowl', 'frame')\n", (679, 696), False, 'import cv2\n'), ((705, 740), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""lows"""', '"""frame"""'], {}), "('lows', 'frame')\n", (723, 740), False, 'import cv2\n'), ((750, 786), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""highh"""', '"""frame"""'], {}), "('highh', 'frame')\n", (768, 786), False, 'import cv2\n'), ((796, 832), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""highl"""', '"""frame"""'], {}), "('highl', 'frame')\n", (814, 832), False, 'import cv2\n'), ((842, 878), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""highs"""', '"""frame"""'], {}), "('highs', 'frame')\n", (860, 878), False, 'import cv2\n'), ((933, 971), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (945, 971), False, 'import cv2\n'), ((982, 1001), 'numpy.array', 'np.array', (['[h, l, s]'], {}), '([h, l, s])\n', (990, 1001), True, 'import numpy as np\n'), ((1013, 1035), 'numpy.array', 'np.array', (['[hh, ll, ss]'], {}), '([hh, ll, ss])\n', (1021, 1035), True, 'import numpy as np\n'), ((1046, 1073), 'cv2.inRange', 'cv2.inRange', (['hsv', 'low', 'high'], {}), '(hsv, low, high)\n', (1057, 1073), False, 'import cv2\n'), ((1190, 1251), 'cv2.findContours', 'cv2.findContours', (['cvt', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(cvt, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (1206, 1251), False, 'import cv2\n'), ((1304, 1320), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (1315, 1320), False, 'import cv2\n'), ((1519, 1539), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1534, 1539), False, 'import cv2\n'), ((2083, 2107), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'cvt'], {}), "('frame', cvt)\n", (2093, 2107), False, 'import cv2\n'), ((1639, 1668), 'cv2.boundingRect', 'cv2.boundingRect', (['contours[i]'], {}), '(contours[i])\n', (1655, 1668), False, 'import cv2\n'), ((2143, 2157), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2154, 2157), False, 'import cv2\n')]
r"""This module implements the RichardGrowth class. Inheriting from the base Growth class, this class implements a Richard (1959) growth model. """ from growth_modeling import Growth import numpy as np class RichardGrowth(Growth): r"""Implement the Richard's equation growth model. Attributes ---------- params_signature : array_like An array containing the name of each parameter sorted by how they are called in compute_t and compute_y. params : dict A dictionary of parameter fit and used by the model to predict. The params dictionary should be ordered and have the following keys: "a", "b", "d", "K" corresponding to the parameters of self.compute_y method. bounds : array_like Bounds for each parameter similar to the bounds parameter of scipy.curve_fit function. """ def __init__(self, params, bounds): r"""Initialize a Richard Growth Model. Parameters ---------- params: dict dict with the keys corresponding to the params_signature attribute. bounds : array_like Bounds for each parameter similar to the bounds parameter of scipy.curve_fit function should be order as params_signature. """ super().__init__(params, bounds) self.params_signature = ("a", "b", "d", "K") self._check_params() def compute_y(self, t, *args): r"""Compute growth cumulated values using Richard's equation. If the parameters in \*args are not specified the values from self.params are used. If one value from \*args is specified then all other values must be specified. Parameters ---------- t : array_like time values for which to compute the response values. a : float The maximum intrinsic rate of increase (RGR) of the response. (a > 0) b : float An additional parameter in the Richards equation introduced as a power law so that it can define asymmetric curves.(b > 0) d : float A parameter in the Richards equation which allows the time at which y = K/2 to be varied. K : int The upper asymptote of the response y. Returns ------- array_like the response values corresponding to the growth of t. Notes ----- The computation of the response values corresponds to the solution to the following differential equation introduced by [1]_: .. math:: \frac{\partial y}{\partial t} = ay[1 - (\frac{y}{K})^b] which as a solution for y when a > 0 and b > 0 [2]_: .. math:: y = K(1 + e^{(d − abt)})^{−1/b} References ---------- .. [1] <NAME>. 1959. "A flexible growth function for empirical use." Journal of Experimental Botany 10: 290–300. .. [2] <NAME>, <NAME>. 1981. "The biometry of plant growth." London: <NAME>. """ a, b, d, K = self._get_compute_parameters(args) return K * (1 + np.exp(d - a * b * t)) ** (- 1 / b)
[ "numpy.exp" ]
[((3190, 3211), 'numpy.exp', 'np.exp', (['(d - a * b * t)'], {}), '(d - a * b * t)\n', (3196, 3211), True, 'import numpy as np\n')]
import tkinter as tk #possibly nessesary unsure import cv2 import numpy as np import matplotlib.pyplot as plt import time import sys import os import re SCALEDOWNFACTOR = 0.2 #This is a magic number but what can ya do. I dont have a good way of getting image resolution. SCALEUPFACTOR = 2.0 GAIN = 13.2 DISTANCE_BETWEEN_TEETH = 10 PREDEFINED_LENGTH = 5.738 ONLYINCREASING = 0 USING_EXCEL_DATA_CHANGE = 1 TEST = 1 SWITCH_POINT = 20 def imclearborders(imgBW, radius): # Given a black and white image, first find all of its contours imgBWcopy = imgBW.copy() contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # Get dimensions of image imgRows = imgBW.shape[0] imgCols = imgBW.shape[1] contourList = [] # ID list of contours that touch the border # For each contour... for idx in np.arange(len(contours)): # Get the i'th contour cnt = contours[idx] # Look at each point in the contour for pt in cnt: rowCnt = pt[0][1] colCnt = pt[0][0] # If this is within the radius of the border # this contour goes bye bye! check1 = (rowCnt >= 0 and rowCnt < radius) or (rowCnt >= imgRows-1-radius and rowCnt < imgRows) check2 = (colCnt >= 0 and colCnt < radius) or (colCnt >= imgCols-1-radius and colCnt < imgCols) if check1 or check2: contourList.append(idx) break for idx in contourList: cv2.drawContours(imgBWcopy, contours, idx, (0,0,0), -1) return imgBWcopy def electricFieldToForce(voltageArray, stressArray): i = 0 x = 0 xPowerTwo = 0 xPowerThree = 0 xPowerFour = 0 if(USING_EXCEL_DATA_CHANGE == 1): A = 6*(pow(10, -11)) B = 6*(pow(10, -12)) C = 9*(pow(10, -12)) while(i < len(voltageArray)): x = voltageArray[i] xPowerTwo = pow(x, 2) stressArray[i] = (A*xPowerTwo)+(B*x)+C i = i + 1 elif(USING_EXCEL_DATA_CHANGE != 1): A = 4.9*(pow(10, -14)) B = 1.2*(pow(10, -12)) C = 6.5*(pow(10, -11)) D = -2.4*(pow(10, -11)) while(i<len(voltageArray)): x = voltageArray[i] xPowerTwo = pow(x, 2) xPowerThree = pow(x, 3) xPowerFour = pow(x, 4) stressArray[i] = (A*xPowerFour)+(B*xPowerThree)+(C*xPowerTwo)+(D*x) i = i + 1 else: i == i return 0 def hysteresis (strainArray, stressArray, switchPoint, TEST, BUG_TESTING_TEXT_OUTPUT_FILE): #const variable initialization SIZEOFGUASSDATATWENTYFOUR = 24 #initialization of iterators i = 0 j = 0 #intialize int variables indexWithMaximumValueInStrainArray = -1 midpoint = 0 leftbound = 0 rightbound = 0 GLOuter = 0 GLInner = 0 integral = 0 root = 0 weight = 0 #initialize double variables maxValueInStrainArray = 0.0 #intialize list variables splitStrainArrayIncreasing = [] splitStressArrayIncreasing = [] splitStrainArrayDecreasing = [] splitStressArrayDecreasing = [] #test initialization if(TEST == 1): strainArray = [0] stressArray = [0] x = np.linspace(0.0001, 1, 101) y = 0.1*np.exp(0.3*x) + 0.1*np.random.random(len(x)) x2 = np.linspace(0, 10, 101) y2 = np.log(9*x) + 0.1*np.random.random(len(x)) #end of test initialization x = np.linspace(-2, 2, 101) data = np.genfromtxt('GAUSS-24.dat', skip_header=1, skip_footer=1, names=True, dtype=None, delimiter=' ') #maxValueIndexOfStressArray = np.argmax(strainArray) do not trust use of numpy as it is a different color. Maybe its better lengthOfStrainArray = len(strainArray) lengthOfStressArray = len(stressArray)#Bug checking value #bug checking value if(lengthOfStrainArray != lengthOfStressArray): print('mismatched strain and stress arrays inputed within hysteresis function') return 0 else: #else do nothing i=i print('past bug checking') sys.stdout.flush() #while loop finds maximum value in the strain array #unchecked for off by one errors while(i < lengthOfStrainArray): if(maxValueInStrainArray < strainArray[i]): maxValueInStrainArray = strainArray[i] indexWithMaximumValueInStrainArray = i else: #else do nothing i=i i = i + 1 print('past strain value check') sys.stdout.flush() #bug checkin value if(indexWithMaximumValueInStrainArray == -1): print('no value in strain array over -1') return 0 else: #else do nothing i=i i = 0 #Creates stress/strain array for increasing values #unchecked for off by one errors while(i <= switchPoint): splitStrainArrayIncreasing.append(strainArray[i]) splitStressArrayIncreasing.append(stressArray[i]) if(TEST == 1): #overwrite with testing splitStrainArrayIncreasing[i] = x splitStressArrayIncreasing[i] = y else: i=i #else do nothing i = i + 1 print('past switch point check') sys.stdout.flush() #creates stress/strain array for decreasing values #unchecked for off by one errors while(i < lengthOfStrainArray): splitStrainArrayDecreasing.append(strainArray[i]) splitStressArrayDecreasing.append(stressArray[i]) if(TEST == 1): #overwrite with testing splitStrainArrayIncreasing[i] = x2 splitStressArrayIncreasing[i] = y2 else: i=i #else do nothing i = i + 1 j = j + 1 print('past arraySplitCheck') sys.stdout.flush() #should obtain a decreasing function of the form y=Ae^(Bx) i = 0 j = 0 stressArrayArr = np.asarray(stressArray) strainArrayArr = np.asarray(strainArray) stressArrayDecreasing = np.asarray(splitStressArrayDecreasing) strainArrayDecreasing = np.asarray(splitStrainArrayDecreasing) strainArrayDecreasingAbs = np.absolute(splitStrainArrayDecreasing) stressArrayIncreasing = np.asarray(splitStressArrayIncreasing) strainArrayIncreasing = np.asarray(splitStrainArrayIncreasing) strainArrayIncreasingAbs = np.absolute(splitStrainArrayIncreasing) if(ONLYINCREASING == 0): while(i<len(stressArrayIncreasing)): print(stressArrayIncreasing[i]) i = i + 1 i = 0 while(i<len(stressArrayDecreasing)): print(stressArrayDecreasing[i]) i = i + 1 i = 0 while(i < len(strainArrayIncreasing)): print(strainArrayIncreasing[i]) i = i + 1 i = 0 while(i << len(strainArrayDecreasing)): print(strainArrayDecreasing[i]) i = i + 1 print('past exponential equation') sys.stdout.flush() else: i == i #beta = 9999 #Delta = -999999 stressArrayDecreasingArr = np.asarray(stressArrayDecreasing) stressArrayIncreasingArr = np.asarray(stressArrayIncreasing) strainArrayDecreasingArr = np.asarray(strainArrayDecreasing) strainArrayIncreasingArr = np.asarray(strainArrayIncreasing) strainArrayDecreasingSquared = np.square(strainArrayDecreasingArr) strainArrayIncreasingSquared = np.square(strainArrayIncreasingArr) print('strain decreasing') sys.stdout.flush() a = plt.figure(figsize = (10,8)) axes= a.add_axes([0.1,0.1,0.8,0.8]) #plt.plot(strainArrayIncreasingAbs, splitStressArrayIncreasing, 'b.')#ln #plt.plot(strainArrayDecreasingAbs, splitStressArrayDecreasing, 'b.')#e X = np.arange(0, 20) if(ONLYINCREASING == 0): axes.plot(strainArrayDecreasing, stressArrayDecreasing, 'o') #axes.plot(stressArrayDecreasing, np.polyval(yEst, X)) axes.plot(strainArrayIncreasing, stressArrayIncreasing, 'o') #axes.plot(stressArrayIncreasing, np.polyval(yEst2, X)) #plt.plot(x, alpha*np.exp(beta*x), 'r') #plt.plot(x, (Cappa*np.log(np.absolute(Delta*x))+2), 'r') else: i == i A = np.vstack([strainArrayArr, np.ones(len(strainArrayArr))]).T stressArrayArr = stressArrayArr[:, np.newaxis] linearSlope = np.dot((np.dot(np.linalg.inv(np.dot(A.T,A)),A.T)),stressArrayArr) print(linearSlope) B = np.vstack([strainArrayDecreasingArr, np.ones(len(strainArrayDecreasingArr))]) B = np.vstack([strainArrayDecreasingSquared, B]).T stressArrayDecreasingArr = stressArrayDecreasingArr[:, np.newaxis] polyValuesDecreasing = np.dot((np.dot(np.linalg.inv(np.dot(B.T,B)),B.T)),stressArrayDecreasingArr) print(polyValuesDecreasing) C = np.vstack([strainArrayIncreasingSquared, strainArrayIncreasingArr, np.ones(len(strainArrayIncreasingArr))]).T stressArrayIncreasingArr = stressArrayIncreasingArr[:, np.newaxis] polyValuesIncreasing = np.dot((np.dot(np.linalg.inv(np.dot(C.T,C)),C.T)),stressArrayIncreasingArr) print(linearSlope) axes.plot(x, linearSlope[0]*x+linearSlope[1], 'r') axes.plot(x, (polyValuesDecreasing[0]*x*x)+polyValuesDecreasing[1]*x+polyValuesDecreasing[2], 'r') axes.plot(x, (polyValuesIncreasing[0]*x*x)+polyValuesIncreasing[1]*x+polyValuesIncreasing[2], 'r') plt.ylim([0,0.5]) plt.xlim([-0.2,0.2]) plt.xlabel('strain') plt.ylabel('stress (Pa)') plt.title('Stiffness Curve') plt.show() plt.savefig('hystersis_curve.png') GLOuter = (leftbound - rightbound)/2 GLInner = (leftbound + rightbound)/2 if(ONLYINCREASING == 0): i = 0 while(i < SIZEOFGUASSDATATWENTYFOUR): combineRootWeightValues = data[i] root = combineRootWeightValues[0] weight = combineRootWeightValues[1] integral = (GLOuter) * (weight) *(alpha * np.exp(beta * (GLOuter) * root * (GLInner))) + integral i = i + 1 print(integral) return 0 def bwareaopen(img, min_size, connectivity=8): """Remove small objects from binary image (approximation of bwareaopen in Matlab for 2D images). Args: img: a binary image (dtype=uint8) to remove small objects from min_size: minimum size (in pixels) for an object to remain in the image connectivity: Pixel connectivity; either 4 (connected via edges) or 8 (connected via edges and corners). Returns: the binary image with small objects removed """ # Find all connected components (called here "labels") num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats( img, connectivity=connectivity) # check size of all connected components (area in pixels) for i in range(num_labels): label_size = stats[i, cv2.CC_STAT_AREA] # remove connected components smaller than min_size if label_size < min_size: img[labels == i] = 0 return img def fillInBlanks(strainList): #I despise this function as it invariably looses data but its the only way to get around limitations with pixels and resolution i = 0 j = 1 k = -1 lengthOfStrainList = len(strainList) nextInLineForStrainList = 0 while(i < lengthOfStrainList): if(strainList[i] == 0 & (i != 0)): while(strainList[i] == 0): j = j + 1 nextInLineForStrainList = strainList[j] if(nextInLineForStrainList == 0): i = i #repeat elif(nextInLineForStrainList != 0): if(strainList[j] == 0): i == i elif(strainList[j] != 0): strainList[i] = i else: exit() i = i + 1 j = i + 2 k = i return 0 def ImageAnalysis(voltageList, imageList, Gain, distanceBetweenTeeth, predefiniedLength, switchPoint): #test is on at 1 TEST = 0 SKIP_MANUAL_IMAGE_CROP = 0 ALLOW_PRINTS_FOR_TESTING = 1 JUST_OVER_TWO_THIRDS_A_PLATELET = 0.65 #iterators i = 0 j = 0 k = 0 numFrames = len(imageList) #these all seem like old values pixelCheckGate = 0 numberOfWhitePixels = 0 lengthOfImageArrayWhitePixels = 0 longestLengthOfImageArrayWhitePixels = -1 lengthOfPixel = -1 #need to change designation to list as array is a sperate numpy class applicable to lists in math and functions lengthArray = [] strainArray = [] stressArray = [] forceArray = [] stressArrayToPascals = [] #these values exist but will be deleted in final code amplifiedVoltageArray = [] electricFieldArray = [] forceArray = [] cropImageSidesListTest = [319, 156, 194, 154] BUG_TESTING_TEXT_OUTPUT_FILE = open("bugReport.txt", "w+") print("data sent through") while(i < numFrames): lengthArray.append(0) i = i + 1 i = 0 while(i < (len(voltageList))): strainArray.append(0) stressArray.append(0) amplifiedVoltageArray.append(0) electricFieldArray.append(0) forceArray.append(0) stressArrayToPascals.append(0) i = i + 1 i = 0 print("lists set up") PI = np.pi tempStressArray = np.empty(numFrames, dtype=object) #needs to be changed to consider new data while ((i < numFrames) and (TEST != 1)): frame = imageList[i] frameScaled = cv2.resize(frame, None, fx= SCALEDOWNFACTOR, fy= SCALEDOWNFACTOR, interpolation= cv2.INTER_LINEAR)#daniels images are in 4k ULTRA resolution. Opencv HATES this so this will scale it down hopefully with little data loss frameNormalized = cv2.normalize(frameScaled, dst=None, alpha=0, beta=500, norm_type=cv2.NORM_MINMAX)#beta and alpha are magic numbers. I dont really understand why .tiff files are like this height, width, channels = frameNormalized.shape #will obtain a cropped image named pletelet grayPlatelet = cv2.cvtColor(frameScaled, cv2.COLOR_BGR2GRAY) thresholdValue, plateletImgThresholdApplied = cv2.threshold(grayPlatelet, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)#attepmt at otsus thresholding techniques. print("initial opencv finished") if(SKIP_MANUAL_IMAGE_CROP == 1): imCrop = plateletImgThresholdApplied[int(cropImageSidesListTest[1]):int(cropImageSidesListTest[1]+cropImageSidesListTest[3]), int(cropImageSidesListTest[0]):int(cropImageSidesListTest[0]+cropImageSidesListTest[2])] #imCrop = plateletImgThresholdApplied[156:310, 319:513] #non manual image crop for testing for specific test case elif(i == 0): # Select ROI fromCenter = False #Designates ROI not auto defined from center allowing user input in opencv function cropImageSidesList = cv2.selectROI("Crop Stage user input required", plateletImgThresholdApplied, fromCenter) #function for selection of region of interest # Crop image imCrop = plateletImgThresholdApplied[int(cropImageSidesList[1]):int(cropImageSidesList[1]+cropImageSidesList[3]), int(cropImageSidesList[0]):int(cropImageSidesList[0]+cropImageSidesList[2])] #using obtained regions of interest crop is preformed # Display cropped image cv2.imshow("cropped image", imCrop) resizedCrop = cv2.resize(imCrop, None, fx= SCALEUPFACTOR, fy= SCALEUPFACTOR, interpolation= cv2.INTER_LINEAR) cv2.imshow("cropped image resized", resizedCrop) elif(i != 0): imCrop = plateletImgThresholdApplied[int(cropImageSidesList[1]):int(cropImageSidesList[1]+cropImageSidesList[3]), int(cropImageSidesList[0]):int(cropImageSidesList[0]+cropImageSidesList[2])] else: BUG_TESTING_TEXT_OUTPUT_FILE.write("value of i is unexpected error") BUG_TESTING_TEXT_OUTPUT_FILE.close() sys.exit() whitePixelsOnScreen = np.sum(imCrop == 255) MINUMUMSIZEOFPXIELS = whitePixelsOnScreen*(JUST_OVER_TWO_THIRDS_A_PLATELET) print("white pixels finished") #Filling platelet holes. plateletFloodFilled = plateletImgThresholdApplied.copy()#preinitialization of platelet flood filled height, width = plateletFloodFilled.shape[:2]#I am unable to understand what .shape() and cannot find what this is online but seems to give width and height with difference of n pixels mask = np.zeros((height+2, width+2), np.uint8)#creates a mask or a zeros array of same size and shape of given platelet matrix array. Values within this array are set to unsigned int lengths of 8 cv2.floodFill(plateletFloodFilled, mask, (0,0), 255)#holes within the space selected are filled here plateletFloodFilledInverse = cv2.bitwise_not(plateletFloodFilled) plateletBinarizedHoleFilter = plateletImgThresholdApplied | plateletFloodFilledInverse plateletBinarizedHoleFilterClearedBorders = imclearborders(plateletBinarizedHoleFilter, 4)#put this definition into the function no nessecity for includes plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter = bwareaopen(plateletBinarizedHoleFilterClearedBorders, MINUMUMSIZEOFPXIELS, 4) print("mask found") if(i == 0): cv2.imshow("post processing image", plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter) else: i = i #j = 0 #k = 0 #XLim = cropImageSidesList[3] - cropImageSidesList[1] #YLim = cropImageSidesList[2] - cropImageSidesList[0] #these values present data but are incorrect j = int(cropImageSidesList[1]) k = int(cropImageSidesList[0]) XLim = int(cropImageSidesList[3] + cropImageSidesList[1]) YLim = int(cropImageSidesList[2] + cropImageSidesList[0]) longestLengthOfImageArrayWhitePixels = 0 #finding longest length of pixel in given image while(j < XLim): while(k < YLim): if(plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter[j, k] == 255): pixelCheckGate = 1 lengthOfImageArrayWhitePixels = lengthOfImageArrayWhitePixels + 1 else: if(pixelCheckGate == 0): i = i else: break k = k + 1 k = int(cropImageSidesList[0]) pixelCheckGate = 0 j = j + 1 if(lengthOfImageArrayWhitePixels > longestLengthOfImageArrayWhitePixels): longestLengthOfImageArrayWhitePixels = lengthOfImageArrayWhitePixels lengthOfImageArrayWhitePixels = 0 else: lengthOfImageArrayWhitePixels = 0 print("longest length found") #defining pixel length and area with given pixel length with outside parameters with first run barring that some other value if((i == 0) or (lengthOfPixel == -1)): numberOfWhitePixels = np.sum(plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter == 255) lengthOfPixel = predefiniedLength/longestLengthOfImageArrayWhitePixels #platelet area will not change but this is possibly a area of bug checking, large area change implies bad data or occlusion areaOfPlateletInitial = numberOfWhitePixels * lengthOfPixel if((i == 0) and (lengthOfPixel == -1)): print("initial loop did not find pixel length") elif(i != 0): i = i else: exit() print("pixel length definied") #pixel length bug check if(longestLengthOfImageArrayWhitePixels == -1): print('there is either a bug with the length check or there is no vision object within the given parameters') elif(longestLengthOfImageArrayWhitePixels > -1): #print(longestLengthOfImageArrayWhitePixels) i = i else: print('value of longestLengthOfImageArrayWhitePixels is not positive and below thought to be possible values') lengthArray[i] = longestLengthOfImageArrayWhitePixels * lengthOfPixel #testing function will be deleted in final product if(TEST == 1): hysteresis() else: i = i + 1; #do nothing #while loop split i = 0 while(i < (len(lengthArray) - 1)): j = i + 1 if(lengthArray[i] == 0): print('radius array zero') #not nessesarily an error but will tell user that there is no value here elif(lengthArray[i] != 0): strainArray[i] = (lengthArray[j] - lengthArray[i])/lengthArray[i] else: print('length array error') exit() i = i + 1 print("strain array discovered") i = 0 while(i < (len(voltageList))): amplifiedVoltageArray[i] = voltageList[i]*Gain electricFieldArray[i] = amplifiedVoltageArray[i]/distanceBetweenTeeth i = i + 1 electricFieldToForce(electricFieldArray, forceArray) i = 0 while(i < len(stressArray)): stressArray[i] = forceArray[i]/areaOfPlateletInitial stressArrayToPascals[i] = stressArray[i] * pow(10,12) i = i + 1 print("stress discovered") print("strain values") print(strainArray) print("stress values pascals") print(stressArrayToPascals) hysteresis(strainArray, stressArrayToPascals, switchPoint, TEST, BUG_TESTING_TEXT_OUTPUT_FILE) # release the video capture object plateletVideoData.release() # Closes all the windows currently opened. cv2.destroyAllWindows() ImageAnalysis(voltageList, imageList, GAIN, DISTANCE_BETWEEN_TEETH, PREDEFINED_LENGTH, SWITCH_POINT)
[ "cv2.normalize", "matplotlib.pyplot.ylabel", "numpy.log", "cv2.imshow", "cv2.destroyAllWindows", "sys.exit", "numpy.genfromtxt", "numpy.arange", "cv2.threshold", "matplotlib.pyplot.xlabel", "numpy.asarray", "numpy.exp", "numpy.linspace", "numpy.dot", "numpy.empty", "numpy.vstack", "m...
[((3702, 3725), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (3713, 3725), True, 'import numpy as np\n'), ((3738, 3840), 'numpy.genfromtxt', 'np.genfromtxt', (['"""GAUSS-24.dat"""'], {'skip_header': '(1)', 'skip_footer': '(1)', 'names': '(True)', 'dtype': 'None', 'delimiter': '""" """'}), "('GAUSS-24.dat', skip_header=1, skip_footer=1, names=True,\n dtype=None, delimiter=' ')\n", (3751, 3840), True, 'import numpy as np\n'), ((4465, 4483), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4481, 4483), False, 'import sys\n'), ((4909, 4927), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4925, 4927), False, 'import sys\n'), ((5668, 5686), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5684, 5686), False, 'import sys\n'), ((6245, 6263), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6261, 6263), False, 'import sys\n'), ((6378, 6401), 'numpy.asarray', 'np.asarray', (['stressArray'], {}), '(stressArray)\n', (6388, 6401), True, 'import numpy as np\n'), ((6424, 6447), 'numpy.asarray', 'np.asarray', (['strainArray'], {}), '(strainArray)\n', (6434, 6447), True, 'import numpy as np\n'), ((6479, 6517), 'numpy.asarray', 'np.asarray', (['splitStressArrayDecreasing'], {}), '(splitStressArrayDecreasing)\n', (6489, 6517), True, 'import numpy as np\n'), ((6547, 6585), 'numpy.asarray', 'np.asarray', (['splitStrainArrayDecreasing'], {}), '(splitStrainArrayDecreasing)\n', (6557, 6585), True, 'import numpy as np\n'), ((6618, 6657), 'numpy.absolute', 'np.absolute', (['splitStrainArrayDecreasing'], {}), '(splitStrainArrayDecreasing)\n', (6629, 6657), True, 'import numpy as np\n'), ((6689, 6727), 'numpy.asarray', 'np.asarray', (['splitStressArrayIncreasing'], {}), '(splitStressArrayIncreasing)\n', (6699, 6727), True, 'import numpy as np\n'), ((6757, 6795), 'numpy.asarray', 'np.asarray', (['splitStrainArrayIncreasing'], {}), '(splitStrainArrayIncreasing)\n', (6767, 6795), True, 'import numpy as np\n'), ((6828, 6867), 'numpy.absolute', 'np.absolute', (['splitStrainArrayIncreasing'], {}), '(splitStrainArrayIncreasing)\n', (6839, 6867), True, 'import numpy as np\n'), ((7607, 7640), 'numpy.asarray', 'np.asarray', (['stressArrayDecreasing'], {}), '(stressArrayDecreasing)\n', (7617, 7640), True, 'import numpy as np\n'), ((7673, 7706), 'numpy.asarray', 'np.asarray', (['stressArrayIncreasing'], {}), '(stressArrayIncreasing)\n', (7683, 7706), True, 'import numpy as np\n'), ((7739, 7772), 'numpy.asarray', 'np.asarray', (['strainArrayDecreasing'], {}), '(strainArrayDecreasing)\n', (7749, 7772), True, 'import numpy as np\n'), ((7805, 7838), 'numpy.asarray', 'np.asarray', (['strainArrayIncreasing'], {}), '(strainArrayIncreasing)\n', (7815, 7838), True, 'import numpy as np\n'), ((7877, 7912), 'numpy.square', 'np.square', (['strainArrayDecreasingArr'], {}), '(strainArrayDecreasingArr)\n', (7886, 7912), True, 'import numpy as np\n'), ((7949, 7984), 'numpy.square', 'np.square', (['strainArrayIncreasingArr'], {}), '(strainArrayIncreasingArr)\n', (7958, 7984), True, 'import numpy as np\n'), ((8026, 8044), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8042, 8044), False, 'import sys\n'), ((8054, 8081), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (8064, 8081), True, 'import matplotlib.pyplot as plt\n'), ((8288, 8304), 'numpy.arange', 'np.arange', (['(0)', '(20)'], {}), '(0, 20)\n', (8297, 8304), True, 'import numpy as np\n'), ((9943, 9961), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 0.5]'], {}), '([0, 0.5])\n', (9951, 9961), True, 'import matplotlib.pyplot as plt\n'), ((9966, 9987), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.2, 0.2]'], {}), '([-0.2, 0.2])\n', (9974, 9987), True, 'import matplotlib.pyplot as plt\n'), ((9992, 10012), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""strain"""'], {}), "('strain')\n", (10002, 10012), True, 'import matplotlib.pyplot as plt\n'), ((10018, 10043), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""stress (Pa)"""'], {}), "('stress (Pa)')\n", (10028, 10043), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10077), 'matplotlib.pyplot.title', 'plt.title', (['"""Stiffness Curve"""'], {}), "('Stiffness Curve')\n", (10058, 10077), True, 'import matplotlib.pyplot as plt\n'), ((10083, 10093), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10091, 10093), True, 'import matplotlib.pyplot as plt\n'), ((10099, 10133), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hystersis_curve.png"""'], {}), "('hystersis_curve.png')\n", (10110, 10133), True, 'import matplotlib.pyplot as plt\n'), ((11319, 11383), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['img'], {'connectivity': 'connectivity'}), '(img, connectivity=connectivity)\n', (11351, 11383), False, 'import cv2\n'), ((14247, 14280), 'numpy.empty', 'np.empty', (['numFrames'], {'dtype': 'object'}), '(numFrames, dtype=object)\n', (14255, 14280), True, 'import numpy as np\n'), ((22876, 22899), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (22897, 22899), False, 'import cv2\n'), ((1589, 1646), 'cv2.drawContours', 'cv2.drawContours', (['imgBWcopy', 'contours', 'idx', '(0, 0, 0)', '(-1)'], {}), '(imgBWcopy, contours, idx, (0, 0, 0), -1)\n', (1605, 1646), False, 'import cv2\n'), ((3471, 3498), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(1)', '(101)'], {}), '(0.0001, 1, 101)\n', (3482, 3498), True, 'import numpy as np\n'), ((3576, 3599), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (3587, 3599), True, 'import numpy as np\n'), ((7483, 7501), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7499, 7501), False, 'import sys\n'), ((9087, 9131), 'numpy.vstack', 'np.vstack', (['[strainArrayDecreasingSquared, B]'], {}), '([strainArrayDecreasingSquared, B])\n', (9096, 9131), True, 'import numpy as np\n'), ((14433, 14532), 'cv2.resize', 'cv2.resize', (['frame', 'None'], {'fx': 'SCALEDOWNFACTOR', 'fy': 'SCALEDOWNFACTOR', 'interpolation': 'cv2.INTER_LINEAR'}), '(frame, None, fx=SCALEDOWNFACTOR, fy=SCALEDOWNFACTOR,\n interpolation=cv2.INTER_LINEAR)\n', (14443, 14532), False, 'import cv2\n'), ((14679, 14766), 'cv2.normalize', 'cv2.normalize', (['frameScaled'], {'dst': 'None', 'alpha': '(0)', 'beta': '(500)', 'norm_type': 'cv2.NORM_MINMAX'}), '(frameScaled, dst=None, alpha=0, beta=500, norm_type=cv2.\n NORM_MINMAX)\n', (14692, 14766), False, 'import cv2\n'), ((14987, 15032), 'cv2.cvtColor', 'cv2.cvtColor', (['frameScaled', 'cv2.COLOR_BGR2GRAY'], {}), '(frameScaled, cv2.COLOR_BGR2GRAY)\n', (14999, 15032), False, 'import cv2\n'), ((15090, 15162), 'cv2.threshold', 'cv2.threshold', (['grayPlatelet', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(grayPlatelet, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (15103, 15162), False, 'import cv2\n'), ((16988, 17009), 'numpy.sum', 'np.sum', (['(imCrop == 255)'], {}), '(imCrop == 255)\n', (16994, 17009), True, 'import numpy as np\n'), ((17497, 17540), 'numpy.zeros', 'np.zeros', (['(height + 2, width + 2)', 'np.uint8'], {}), '((height + 2, width + 2), np.uint8)\n', (17505, 17540), True, 'import numpy as np\n'), ((17697, 17750), 'cv2.floodFill', 'cv2.floodFill', (['plateletFloodFilled', 'mask', '(0, 0)', '(255)'], {}), '(plateletFloodFilled, mask, (0, 0), 255)\n', (17710, 17750), False, 'import cv2\n'), ((17838, 17874), 'cv2.bitwise_not', 'cv2.bitwise_not', (['plateletFloodFilled'], {}), '(plateletFloodFilled)\n', (17853, 17874), False, 'import cv2\n'), ((3614, 3627), 'numpy.log', 'np.log', (['(9 * x)'], {}), '(9 * x)\n', (3620, 3627), True, 'import numpy as np\n'), ((18358, 18459), 'cv2.imshow', 'cv2.imshow', (['"""post processing image"""', 'plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter'], {}), "('post processing image',\n plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter)\n", (18368, 18459), False, 'import cv2\n'), ((20160, 20235), 'numpy.sum', 'np.sum', (['(plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter == 255)'], {}), '(plateletBinarizedHoleFilterClearedBordersWSmallObjectsFilter == 255)\n', (20166, 20235), True, 'import numpy as np\n'), ((3517, 3532), 'numpy.exp', 'np.exp', (['(0.3 * x)'], {}), '(0.3 * x)\n', (3523, 3532), True, 'import numpy as np\n'), ((8926, 8940), 'numpy.dot', 'np.dot', (['A.T', 'A'], {}), '(A.T, A)\n', (8932, 8940), True, 'import numpy as np\n'), ((9267, 9281), 'numpy.dot', 'np.dot', (['B.T', 'B'], {}), '(B.T, B)\n', (9273, 9281), True, 'import numpy as np\n'), ((9601, 9615), 'numpy.dot', 'np.dot', (['C.T', 'C'], {}), '(C.T, C)\n', (9607, 9615), True, 'import numpy as np\n'), ((15858, 15950), 'cv2.selectROI', 'cv2.selectROI', (['"""Crop Stage user input required"""', 'plateletImgThresholdApplied', 'fromCenter'], {}), "('Crop Stage user input required', plateletImgThresholdApplied,\n fromCenter)\n", (15871, 15950), False, 'import cv2\n'), ((16331, 16366), 'cv2.imshow', 'cv2.imshow', (['"""cropped image"""', 'imCrop'], {}), "('cropped image', imCrop)\n", (16341, 16366), False, 'import cv2\n'), ((16394, 16491), 'cv2.resize', 'cv2.resize', (['imCrop', 'None'], {'fx': 'SCALEUPFACTOR', 'fy': 'SCALEUPFACTOR', 'interpolation': 'cv2.INTER_LINEAR'}), '(imCrop, None, fx=SCALEUPFACTOR, fy=SCALEUPFACTOR, interpolation=\n cv2.INTER_LINEAR)\n', (16404, 16491), False, 'import cv2\n'), ((16503, 16551), 'cv2.imshow', 'cv2.imshow', (['"""cropped image resized"""', 'resizedCrop'], {}), "('cropped image resized', resizedCrop)\n", (16513, 16551), False, 'import cv2\n'), ((16941, 16951), 'sys.exit', 'sys.exit', ([], {}), '()\n', (16949, 16951), False, 'import sys\n'), ((10522, 10561), 'numpy.exp', 'np.exp', (['(beta * GLOuter * root * GLInner)'], {}), '(beta * GLOuter * root * GLInner)\n', (10528, 10561), True, 'import numpy as np\n')]
#!/usr/bin/env python # coding: utf-8 # In[24]: import json import random import numpy as np from pathlib import Path from typing import Tuple, List import math import matplotlib.pyplot as plt #get_ipython().run_line_magic('matplotlib', 'inline') from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed from tensorflow.keras.models import Model, Sequential from tensorflow.keras.applications import vgg16 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications import mobilenet_v2 from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping from tensorflow.keras.utils import plot_model, Sequence, to_categorical, multi_gpu_model from livelossplot.tf_keras import PlotLossesCallback # from tensorboardcolab import * import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) tf.keras.backend.set_session(session) import platform print('Python', platform.python_version()) # In[25]: import tensorflow tensorflow.__version__ # In[26]: COMMANDS = ['left', 'get_left_lane', 'keep_lane', 'straight', 'get_right_lane', 'right'] def command_to_onehot(command: str) -> np.array: """keep_lane -> [0. 0. 1. 0. 0. 0.]""" all_zeros = np.zeros(len(COMMANDS)) all_zeros[COMMANDS.index(command)] = 1 return all_zeros def onehot_to_command(onehot: np.array): """[0. 0. 1. 0. 0. 0.] -> keep_lane""" return COMMANDS[np.argmax(onehot)] def load_image(path: Path): height, width, channels = 224, 224, 3 image = load_img(str(path), target_size=(height, width, channels)) return image def load_preprocessed_image(path: Path, preprocessor='mobilenet_v2') -> np.ndarray: """preprocessor: mobilenet_v2 or vgg16""" image = load_image(path) image_arr = img_to_array(image) height, width, channels = image_arr.shape image_reshaped = image_arr.reshape(height, width, channels) if preprocessor == 'mobilenet_v2': image_preprocessed = mobilenet_v2.preprocess_input(image_reshaped) else: image_preprocessed = vgg16.preprocess_input(image_reshaped) return image_preprocessed def load_json(path: Path) -> Tuple[np.ndarray, str]: with path.open() as f: data = json.load(f) return data def get_spline(data: dict) -> np.array: """Spline is reshaped from (7,2) -> (14, 1). 1D np.array (first 7 distance values, then 7 angle values) """ waypoints = data['spline'] relative_distance = lambda waypoint: waypoint[0] relative_angle = lambda waypoint: waypoint[1] distances = [relative_distance(wp) for wp in waypoints] angles = [relative_angle(wp) for wp in waypoints] # normalized_dist = lambda dist: (dist - STATS['min_dist']) / (STATS['max_dist'] - STATS['min_dist']) # normalized_angle = lambda angle: (angle - STATS['min_angle']) / (STATS['max_angle'] - STATS['min_angle']) # distances = [normalized_dist(dist) for dist, _ in waypoints] # angles = [normalized_angle(angle) for _, angle in waypoints] return np.array(distances + angles) def get_command_input(data: dict): return command_to_onehot(data['command']) # def (path: Path) -> Tuple[np.ndarray, str]: # steering_angle = data.get('angle') # is_intersection = data.get('intersection_ahead') # return , command_onehot, steering_angle, is_intersection #------------------------------------------------------------------------ # Visualization def rel_point(point, angle, length): ''' point - Tuple (x, y) angle - Angle (OY perspective, not OX!) length - Length of the line you want to plot. ''' # unpack the first point x, y = point # find the end point endx = length * math.sin(math.radians(angle)) + x endy = length * math.cos(math.radians(angle)) + y return endx, endy def plottable_spline(spline: np.array, shift_by: Tuple[float, float] = (200, 112), scale: float = 5) -> Tuple[np.array, np.array]: """Transform 1D array into points that can be easily visualized with plt.plot(xs, ys).""" vertical_shift, horizontal_shift = shift_by xs, ys = [], [] last_point = (0, 0) distances_arr = true_dist(spline[:7]) angles_arr = true_angle(spline[7:14]) for rel_distance, rel_angle in zip(distances_arr, angles_arr): x, y = rel_point(last_point, rel_angle, rel_distance) xs.append(x) ys.append(y) last_point = (x, y) xs = np.array(xs) ys = np.array(ys) xs = xs * scale + horizontal_shift ys = vertical_shift - ys * scale return xs, ys # In[27]: TRAIN_DATASET_DIRS = [ Path('/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset1+2'), Path('/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset3') ] TEST_DATASET_DIRS = [ Path('/home/bwroblew/Datasets/waypoint_predition_combined/test_dataset1') ] # In[28]: def paths(dirs, pattern): together = [] for directory in dirs: together += list(directory.glob(pattern)) return together img_train = paths(TRAIN_DATASET_DIRS, '*.png') json_train = paths(TRAIN_DATASET_DIRS, '*.json') assert len(img_train) == len(json_train) print(f'{len(img_train)} images found in {TRAIN_DATASET_DIRS}') img_test = paths(TEST_DATASET_DIRS, '*.png') json_test = paths(TEST_DATASET_DIRS, '*.json') assert len(img_test) == len(json_test) print(f'{len(img_test)} images found in {TEST_DATASET_DIRS}') print(f'{len(img_train) / (len(img_train)+len(img_test)) * 100}% is training data') # In[29]: def calculate_spline_stats(json_paths): all_distances = [] all_angles = [] for idx, json_path in enumerate(json_paths): data = load_json(json_path) waypoints = data['spline'] distances = [dist for dist, _ in waypoints] angles = [angle for _, angle in waypoints] all_distances += distances all_angles += angles # print(f'{idx}/{len(json_paths)}') assert len(all_distances) == len(all_angles) plt.hist(all_distances, bins=100) plt.show() plt.hist(all_angles, bins=200) plt.show() return dict( mean_dist = sum(all_distances) / len(all_distances), mean_angle = sum(all_angles) / len(all_angles), min_dist = min(all_distances), min_angle = min(all_angles), max_dist = max(all_distances), max_angle = max(all_angles), ) STATS = calculate_spline_stats(json_train + json_test) print('OSZUSTWO NORMALIZACJI') STATS['min_angle'] = -100 STATS['max_angle'] = 100 STATS['max_dist'] = 10 print(json.dumps(STATS, indent=4)) # In[30]: def true_dist(normalized_distances: np.array): denominator = STATS['max_dist'] - STATS['min_dist'] return STATS['min_dist'] + (normalized_distances * denominator) def true_angle(normalized_angles: np.array): denominator = STATS['max_angle'] - STATS['min_angle'] return STATS['min_angle'] + (normalized_angles * denominator) # In[31]: # Fetch all images and json files in sequence def visualize_dataset_sample(img_path: Path, predicted_spline=None, last_row_only: bool=False): json_path = img_path.parent / f'{img_path.stem}.json' # Load original image (without preprocessing) original_image = load_image(img_path) # Load spline and command data = load_json(json_path) flatten_spline = get_spline(data) command = data.get('command') steering_angle = data.get('angle') intersection_ahead = data.get('intersection_ahead') # Display image with command plt.title(f'Original [{command}]') plt.imshow(original_image) # Overlay with spline xs, ys = plottable_spline(flatten_spline) plt.plot(xs, ys, '.-r') if predicted_spline is not None: xs, ys = plottable_spline(predicted_spline) plt.plot(xs, ys, '.-b') # Overlay is intersection plt.text(224/2, 220, "Intersection: YES" if intersection_ahead else "Intersection: NO", color='w') # Overlay with steering angle plt.barh(200, steering_angle*100) # if predicted_spline is not None: # last_frame_ax = axes[-1] # xs, ys = plottable_spline(predicted_spline) # last_frame_ax.plot(xs, ys, '.-b') plt.show() # save_dir = '/content/gdrive/My Drive/plots/' # Path(save_dir).mkdir(exist_ok=True, parents=True) # plot_path = f'{save_dir}/}.png' # plt.savefig(plot_path) #print('test data') #start_idx = random.randint(0, len(img_test)) #sample_img_paths = img_test[start_idx:start_idx+50] #for path in sample_img_paths: # visualize_dataset_sample(path, predicted_spline=None, last_row_only=True) # In[32]: # # Train data # sequences_dir = [path for path in Path(train_dataset_dir).iterdir() if path.is_dir()] # seq_dir = random.choice(sequences_dir) # visualize_sequence(gt_dir=seq_dir, predicted_spline=None, last_row_only=True) def examples_per_command(path: Path): count = {'left': 0, 'get_left_lane': 0, 'keep_lane': 0, 'straight': 0, 'get_right_lane': 0, 'right': 0} json_paths = list(path.glob('*.json')) for json_path in json_paths: with json_path.open() as f: data = json.load(f) count[data['command']] += 1 return count # In[33]: # train_balance = examples_per_command(TRAIN_DATASET_DIR) # test_balance = examples_per_command(TEST_DATASET_DIR) # print('Train examples: ', json.dumps(train_balance, indent=2)) # print('Test examples: ', json.dumps(test_balance, indent=2)) # In[46]: def visualize_batch(batch): """Visualizes first sample in batch""" show_last = 10 fig, axes = plt.subplots(2, 5, figsize=(45, 20), sharex='all', sharey='all') fig.tight_layout() axes = axes.ravel() X, Y = batch imgs = X[0] # Shape: (bs, 224, 224, 3) cmds = X[1] # Shape: (bs, 6) splines = Y # Shape: (bs, 14) # print(imgs.shape) # print(cmds.shape) # print(splines.shape) for idx_in_batch in range(show_last): ax = axes[idx_in_batch] # Load preprocessed image preprocessed_image = imgs[idx_in_batch] # Load spline and command command = onehot_to_command(cmds[idx_in_batch]) # Display image with command ax.title.set_text(f'Original [{idx_in_batch}][{command}]') # Line below solves this warning: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa preprocessed_image = (preprocessed_image * 100).astype(np.uint8) ax.imshow(preprocessed_image) # Overlay with spline xs, ys = plottable_spline(splines[idx_in_batch]) ax.plot(xs, ys, '.-w') plt.show() class ImageDatasetGenerator(Sequence): def __init__(self, dataset_dirs: List[Path], batch_size: int): """Load paths""" self.batch_size = batch_size self.img_paths = [] for dataset_dir in dataset_dirs: self.img_paths += [Path(img) for img in dataset_dir.glob('**/*.png')] def __len__(self): """Returns number of batches""" return len(self.img_paths) // self.batch_size def __getitem__(self, idx): """Prepares and returns shuffled mini-batches""" batch_range = range(idx * self.batch_size, (idx + 1) * self.batch_size) batch_of_imgs = [] batch_of_commands = [] batch_of_splines = [] for img_id in batch_range: img_path = self.img_paths[img_id] json_path = img_path.parent / f'{img_path.stem}.json' data = load_json(json_path) # X1 => Image batch_of_imgs.append(load_preprocessed_image(img_path)) # X2 => Command command_one_hot = get_command_input(data) batch_of_commands.append(command_one_hot) # Y => Expected sequence of splines spline = get_spline(data) batch_of_splines.append(spline) # Prepare randomized indexes for shuffling mini-batches indices = np.arange(self.batch_size) np.random.shuffle(indices) # Convert to numpy array and shuffle each batch in the same way batch_of_imgs = np.array(batch_of_imgs)[indices] batch_of_commands = np.array(batch_of_commands)[indices] batch_of_splines = np.array(batch_of_splines)[indices] # Shape: [(bs, seq_len, 224, 224, 3), (bs, seq_len, cmd_onehot_len)], (bs, seq_len, 14) return [batch_of_imgs, batch_of_commands], batch_of_splines #bs = 32 #print('Calculated with bs =', 32) #ds = ImageDatasetGenerator(TRAIN_DATASET_DIRS, batch_size=bs) #print('Train batches:', len(ds)) #ds = ImageDatasetGenerator(TEST_DATASET_DIRS, batch_size=bs) #print('Test batches:', len(ds)) # In[47]: #random_batch_idx = random.randint(0, len(ds) - 1) #visualize_batch(ds[random_batch_idx]) # In[48]: import os import tensorflow as tf from tensorflow.keras.callbacks import TensorBoard from tensorflow.python.eager import context class TrainValTensorBoard(TensorBoard): def __init__(self, log_dir, **kwargs): self.val_log_dir = os.path.join(log_dir, 'validation') training_log_dir = os.path.join(log_dir, 'training') super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs) def set_model(self, model): if context.executing_eagerly(): self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir) else: self.val_writer = tf.summary.FileWriter(self.val_log_dir) super(TrainValTensorBoard, self).set_model(model) def _write_custom_summaries(self, step, logs=None): logs = logs or {} val_logs = {k.replace('val_', ''): v for k, v in logs.items() if 'val_' in k} if context.executing_eagerly(): with self.val_writer.as_default(), tf.contrib.summary.always_record_summaries(): for name, value in val_logs.items(): tf.contrib.summary.scalar(name, value.item(), step=step) else: for name, value in val_logs.items(): summary = tf.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.val_writer.add_summary(summary, step) self.val_writer.flush() logs = {k: v for k, v in logs.items() if not 'val_' in k} super(TrainValTensorBoard, self)._write_custom_summaries(step, logs) def on_train_end(self, logs=None): super(TrainValTensorBoard, self).on_train_end(logs) self.val_writer.close() # In[49]: LOSS_FUNCTION_NAME = 'mean_squared_error' def freeze_layers(model): for layer in model.layers: layer.trainable=False def define_corrected_model(): model_name = 'miczi_v10' # Inputs input_img_seq = Input(shape=(224, 224, 3)) input_command = Input(shape=(6,)) # Image branch base_model = VGG16(input_shape=(224,224,3), weights='imagenet', include_top=False) freeze_layers(base_model) _ = Flatten()(base_model.output) _ = Dense(1024)(_) _ = Dropout(0.5)(_) cnn_output = Dense(256, activation='relu')(_) _ = concatenate([cnn_output, input_command]) _ = Dense(128, activation='relu')(_) _ = Dropout(0.4)(_) outputs = Dense(14, activation='linear')(_) # Combine inputs, outputs model = Model(inputs=[base_model.input, input_command], outputs=outputs) model.compile(loss=LOSS_FUNCTION_NAME, optimizer='adam', metrics=['accuracy', 'mse', 'mae']) print(model_name, model.summary()) # plot_model(model, show_shapes=True, to_file=model_name +'_plot.png') return model, model_name def define_lstm_model(): model_name = 'miczi_v11' # Inputs input_img_seq = Input(shape=(224, 224, 3)) input_command = Input(shape=(6,)) # Image branch base_model = VGG16(input_shape=(224,224,3), weights='imagenet', include_top=False) freeze_layers(base_model) _ = Flatten()(base_model.output) cnn_output = Dense(256, activation='relu')(_) _ = concatenate([cnn_output, input_command]) _ = Dense(128, activation='relu')(_) _ = Dropout(0.2)(_) _ = Dense(64, activation='relu')(_) _ = LSTM(32, dropout=0.2, input_shape=(64, 1), return_sequences=True)(_) outputs = Dense(14, activation='linear')(_) # Combine inputs, outputs model = Model(inputs=[base_model.input, input_command], outputs=outputs) model.compile(loss=LOSS_FUNCTION_NAME, optimizer='adam', metrics=['accuracy', 'mse', 'mae']) print(model_name, model.summary()) # plot_model(model, show_shapes=True, to_file=model_name +'_plot.png') return model, model_name def define_model(): model_name = 'miczi_v12' # Inputs input_img_seq = Input(shape=(224, 224, 3)) input_command = Input(shape=(6,)) # encoded = noise.GaussianNoise(0.2)(input_sh) # Image branch cnn_model = MobileNetV2(input_shape=(224,224,3), include_top=False, pooling='avg', weights='imagenet') freeze_layers(cnn_model) _ = Dense(512)(cnn_model.output) _ = Dropout(0.4)(_) cnn_output = Dense(128)(_) # Merge two branches: features extracted by CNN with command _ = concatenate([cnn_output, input_command]) _ = Dense(128, activation='relu')(_) _ = Dense(128, activation='relu')(_) outputs = Dense(14, activation='linear')(_) # Combine inputs, outputs model = Model(inputs=[cnn_model.input, input_command], outputs=outputs) model.compile(loss=LOSS_FUNCTION_NAME, optimizer='adam', metrics=['accuracy', 'mae']) print(model_name, model.summary()) # plot_model(model, show_shapes=True, to_file=model_name +'_plot.png') return model, model_name # In[50]: # Training setup # from keras.utils.training_utils import multi_gpu_model model, model_name = define_model() # multigpu_model = multi_gpu_model(model) # In[51]: BATCH_SIZE = 512 # Adam w paperze oryginalnym ma 128 train_set_gen = ImageDatasetGenerator(TRAIN_DATASET_DIRS, batch_size=BATCH_SIZE) test_set_gen = ImageDatasetGenerator(TEST_DATASET_DIRS, batch_size=BATCH_SIZE) # In[52]: DESCRIPTION = 'train_ds1+2+3_test_ds1__bs_{}'.format(BATCH_SIZE) checkpoint_path = f'training_{model_name}__{DESCRIPTION}/checkpoint.ckpt' # _{epoch:02d}_{val_acc:.2f}.ckpt' checkpointing_callback = ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, monitor=LOSS_FUNCTION_NAME, verbose=1, save_freq='epoch') plot_callback = PlotLossesCallback() tensorboard_callback = TrainValTensorBoard(log_dir=f'./tensorboard_logs_{model_name}__{DESCRIPTION}', batch_size=BATCH_SIZE, # histogram_freq=1, write_graph=False, write_images=False) # tensorboard_callback = TensorBoard(log_dir=f'./tensorboard_logs_{model_name}__{DESCRIPTION}', # batch_size=BATCH_SIZE, # histogram_freq=0, # write_graph=True, # write_images=True) #tensorboard_colab = TensorBoardColab() #tensorboard_colab_callback = TensorBoardColabCallback(tensorboard_colab) # # TODO Check this out reduce_lr_callback = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001) early_stopping_callback = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=5, verbose=1, mode='auto', restore_best_weights=True) callbacks = [checkpointing_callback, plot_callback, tensorboard_callback, reduce_lr_callback, early_stopping_callback ] # In[ ]: # Train try: model.load_weights(checkpoint_path) except: print('No checkpoint to load!!!') model.fit_generator(train_set_gen, steps_per_epoch=len(train_set_gen), validation_data=test_set_gen, validation_steps=len(test_set_gen), shuffle=True, epochs=60, callbacks=callbacks, verbose=1, workers=20, use_multiprocessing=True, max_queue_size=80, initial_epoch=0 ) # !ls -l --block-size=M training_miczi_v3 # files.download(checkpoint_path) # DGX, Full dataset "bingo" , BS=40, EP=20, no mini-batch shuffling # In[30]: # Evaluate model print('Hello Im here!!!') exit() TEST_DATASET_DIR = Path('/home/bwroblew/Datasets/waypoint_prediction_reworked/test') img_paths = [path for path in TEST_DATASET_DIR.glob('*.png')] model.load_weights(checkpoint_path) def visualize_evaluation(img_paths: Path): nrows, ncols = 10, 3 img_paths_sample = random.sample(img_paths, nrows * ncols) fig, axes = plt.subplots(nrows,ncols, figsize=(4*ncols, 4*nrows), sharex='all', sharey='all') fig.tight_layout() for row in range(nrows): for col in range(ncols): # Load image img_path = img_paths_sample[row * ncols + col] img = load_preprocessed_image(img_path) orig_img = load_image(img_path) input_img = np.expand_dims(img, axis=0) # Load target spline data = load_json(img_path.parent / f'{img_path.stem}.json') target_flatten_spline = get_spline(data) # Load GT command command_onehot = get_command_input(data) command = onehot_to_command(command_onehot) input_command = np.expand_dims(command_onehot, axis=0) # Predict predicted_spline = model.predict([input_img, input_command], verbose=1) ax = axes[row, col] # Display image with command ax.set_title(f'Original [{command}]') # (preprocessed_image * 255) ax.imshow(orig_img) #np.uint8(img * 255)) # Overlay with spline xs, ys = plottable_spline(target_flatten_spline) ax.plot(xs, ys, '.-r') predicted_flatten_spline = np.squeeze(predicted_spline) xs, ys = plottable_spline(predicted_flatten_spline) ax.plot(xs, ys, '.-b') fig.show() visualize_evaluation(img_paths) # In[ ]:
[ "matplotlib.pyplot.hist", "tensorflow.keras.callbacks.EarlyStopping", "numpy.array", "tensorflow.keras.layers.Dense", "numpy.arange", "matplotlib.pyplot.imshow", "tensorflow.keras.layers.Input", "pathlib.Path", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.Session", "json.dumps", ...
[((1099, 1115), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1113, 1115), True, 'import tensorflow as tf\n'), ((1165, 1190), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1175, 1190), True, 'import tensorflow as tf\n'), ((1191, 1228), 'tensorflow.keras.backend.set_session', 'tf.keras.backend.set_session', (['session'], {}), '(session)\n', (1219, 1228), True, 'import tensorflow as tf\n'), ((18892, 19020), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'save_weights_only': '(True)', 'monitor': 'LOSS_FUNCTION_NAME', 'verbose': '(1)', 'save_freq': '"""epoch"""'}), "(filepath=checkpoint_path, save_weights_only=True, monitor=\n LOSS_FUNCTION_NAME, verbose=1, save_freq='epoch')\n", (18907, 19020), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping\n'), ((19196, 19216), 'livelossplot.tf_keras.PlotLossesCallback', 'PlotLossesCallback', ([], {}), '()\n', (19214, 19216), False, 'from livelossplot.tf_keras import PlotLossesCallback\n'), ((20023, 20098), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.2)', 'patience': '(3)', 'min_lr': '(0.001)'}), "(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001)\n", (20040, 20098), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping\n'), ((20125, 20243), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.0001)', 'patience': '(5)', 'verbose': '(1)', 'mode': '"""auto"""', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0.0001, patience=5, verbose=1,\n mode='auto', restore_best_weights=True)\n", (20138, 20243), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping\n'), ((21249, 21314), 'pathlib.Path', 'Path', (['"""/home/bwroblew/Datasets/waypoint_prediction_reworked/test"""'], {}), "('/home/bwroblew/Datasets/waypoint_prediction_reworked/test')\n", (21253, 21314), False, 'from pathlib import Path\n'), ((1262, 1287), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (1285, 1287), False, 'import platform\n'), ((2111, 2130), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (2123, 2130), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((3388, 3416), 'numpy.array', 'np.array', (['(distances + angles)'], {}), '(distances + angles)\n', (3396, 3416), True, 'import numpy as np\n'), ((4801, 4813), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (4809, 4813), True, 'import numpy as np\n'), ((4823, 4835), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (4831, 4835), True, 'import numpy as np\n'), ((4976, 5052), 'pathlib.Path', 'Path', (['"""/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset1+2"""'], {}), "('/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset1+2')\n", (4980, 5052), False, 'from pathlib import Path\n'), ((5058, 5132), 'pathlib.Path', 'Path', (['"""/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset3"""'], {}), "('/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset3')\n", (5062, 5132), False, 'from pathlib import Path\n'), ((5161, 5234), 'pathlib.Path', 'Path', (['"""/home/bwroblew/Datasets/waypoint_predition_combined/test_dataset1"""'], {}), "('/home/bwroblew/Datasets/waypoint_predition_combined/test_dataset1')\n", (5165, 5234), False, 'from pathlib import Path\n'), ((6400, 6433), 'matplotlib.pyplot.hist', 'plt.hist', (['all_distances'], {'bins': '(100)'}), '(all_distances, bins=100)\n', (6408, 6433), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6448), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6446, 6448), True, 'import matplotlib.pyplot as plt\n'), ((6453, 6483), 'matplotlib.pyplot.hist', 'plt.hist', (['all_angles'], {'bins': '(200)'}), '(all_angles, bins=200)\n', (6461, 6483), True, 'import matplotlib.pyplot as plt\n'), ((6488, 6498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6496, 6498), True, 'import matplotlib.pyplot as plt\n'), ((6962, 6989), 'json.dumps', 'json.dumps', (['STATS'], {'indent': '(4)'}), '(STATS, indent=4)\n', (6972, 6989), False, 'import json\n'), ((7926, 7960), 'matplotlib.pyplot.title', 'plt.title', (['f"""Original [{command}]"""'], {}), "(f'Original [{command}]')\n", (7935, 7960), True, 'import matplotlib.pyplot as plt\n'), ((7965, 7991), 'matplotlib.pyplot.imshow', 'plt.imshow', (['original_image'], {}), '(original_image)\n', (7975, 7991), True, 'import matplotlib.pyplot as plt\n'), ((8069, 8092), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '""".-r"""'], {}), "(xs, ys, '.-r')\n", (8077, 8092), True, 'import matplotlib.pyplot as plt\n'), ((8258, 8362), 'matplotlib.pyplot.text', 'plt.text', (['(224 / 2)', '(220)', "('Intersection: YES' if intersection_ahead else 'Intersection: NO')"], {'color': '"""w"""'}), "(224 / 2, 220, 'Intersection: YES' if intersection_ahead else\n 'Intersection: NO', color='w')\n", (8266, 8362), True, 'import matplotlib.pyplot as plt\n'), ((8400, 8435), 'matplotlib.pyplot.barh', 'plt.barh', (['(200)', '(steering_angle * 100)'], {}), '(200, steering_angle * 100)\n', (8408, 8435), True, 'import matplotlib.pyplot as plt\n'), ((8611, 8621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8619, 8621), True, 'import matplotlib.pyplot as plt\n'), ((10007, 10071), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(5)'], {'figsize': '(45, 20)', 'sharex': '"""all"""', 'sharey': '"""all"""'}), "(2, 5, figsize=(45, 20), sharex='all', sharey='all')\n", (10019, 10071), True, 'import matplotlib.pyplot as plt\n'), ((11107, 11117), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11115, 11117), True, 'import matplotlib.pyplot as plt\n'), ((15329, 15355), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (15334, 15355), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15376, 15393), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(6,)'}), '(shape=(6,))\n', (15381, 15393), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15435, 15506), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'input_shape': '(224, 224, 3)', 'weights': '"""imagenet"""', 'include_top': '(False)'}), "(input_shape=(224, 224, 3), weights='imagenet', include_top=False)\n", (15440, 15506), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((15682, 15722), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[cnn_output, input_command]'], {}), '([cnn_output, input_command])\n', (15693, 15722), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15887, 15951), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[base_model.input, input_command]', 'outputs': 'outputs'}), '(inputs=[base_model.input, input_command], outputs=outputs)\n', (15892, 15951), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((16290, 16316), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (16295, 16316), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16337, 16354), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(6,)'}), '(shape=(6,))\n', (16342, 16354), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16396, 16467), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'input_shape': '(224, 224, 3)', 'weights': '"""imagenet"""', 'include_top': '(False)'}), "(input_shape=(224, 224, 3), weights='imagenet', include_top=False)\n", (16401, 16467), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((16596, 16636), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[cnn_output, input_command]'], {}), '([cnn_output, input_command])\n', (16607, 16636), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16918, 16982), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[base_model.input, input_command]', 'outputs': 'outputs'}), '(inputs=[base_model.input, input_command], outputs=outputs)\n', (16923, 16982), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((17316, 17342), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (17321, 17342), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17363, 17380), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(6,)'}), '(shape=(6,))\n', (17368, 17380), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17472, 17568), 'tensorflow.keras.applications.mobilenet_v2.MobileNetV2', 'MobileNetV2', ([], {'input_shape': '(224, 224, 3)', 'include_top': '(False)', 'pooling': '"""avg"""', 'weights': '"""imagenet"""'}), "(input_shape=(224, 224, 3), include_top=False, pooling='avg',\n weights='imagenet')\n", (17483, 17568), False, 'from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\n'), ((17762, 17802), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[cnn_output, input_command]'], {}), '([cnn_output, input_command])\n', (17773, 17802), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17984, 18047), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[cnn_model.input, input_command]', 'outputs': 'outputs'}), '(inputs=[cnn_model.input, input_command], outputs=outputs)\n', (17989, 18047), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((21508, 21547), 'random.sample', 'random.sample', (['img_paths', '(nrows * ncols)'], {}), '(img_paths, nrows * ncols)\n', (21521, 21547), False, 'import random\n'), ((21569, 21659), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': '(4 * ncols, 4 * nrows)', 'sharex': '"""all"""', 'sharey': '"""all"""'}), "(nrows, ncols, figsize=(4 * ncols, 4 * nrows), sharex='all',\n sharey='all')\n", (21581, 21659), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1768), 'numpy.argmax', 'np.argmax', (['onehot'], {}), '(onehot)\n', (1760, 1768), True, 'import numpy as np\n'), ((2319, 2364), 'tensorflow.keras.applications.mobilenet_v2.preprocess_input', 'mobilenet_v2.preprocess_input', (['image_reshaped'], {}), '(image_reshaped)\n', (2348, 2364), False, 'from tensorflow.keras.applications import mobilenet_v2\n'), ((2404, 2442), 'tensorflow.keras.applications.vgg16.preprocess_input', 'vgg16.preprocess_input', (['image_reshaped'], {}), '(image_reshaped)\n', (2426, 2442), False, 'from tensorflow.keras.applications import vgg16\n'), ((2570, 2582), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2579, 2582), False, 'import json\n'), ((8195, 8218), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '""".-b"""'], {}), "(xs, ys, '.-b')\n", (8203, 8218), True, 'import matplotlib.pyplot as plt\n'), ((12474, 12500), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (12483, 12500), True, 'import numpy as np\n'), ((12509, 12535), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (12526, 12535), True, 'import numpy as np\n'), ((13557, 13592), 'os.path.join', 'os.path.join', (['log_dir', '"""validation"""'], {}), "(log_dir, 'validation')\n", (13569, 13592), False, 'import os\n'), ((13620, 13653), 'os.path.join', 'os.path.join', (['log_dir', '"""training"""'], {}), "(log_dir, 'training')\n", (13632, 13653), False, 'import os\n'), ((13776, 13803), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (13801, 13803), False, 'from tensorflow.python.eager import context\n'), ((14213, 14240), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (14238, 14240), False, 'from tensorflow.python.eager import context\n'), ((15543, 15552), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (15550, 15552), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15580, 15591), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {}), '(1024)\n', (15585, 15591), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15603, 15615), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (15610, 15615), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15636, 15665), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (15641, 15665), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15731, 15760), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (15736, 15760), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15772, 15784), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (15779, 15784), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((15802, 15832), 'tensorflow.keras.layers.Dense', 'Dense', (['(14)'], {'activation': '"""linear"""'}), "(14, activation='linear')\n", (15807, 15832), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16504, 16513), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (16511, 16513), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16550, 16579), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (16555, 16579), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16645, 16674), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (16650, 16674), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16686, 16698), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (16693, 16698), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16710, 16738), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (16715, 16738), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16750, 16815), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(32)'], {'dropout': '(0.2)', 'input_shape': '(64, 1)', 'return_sequences': '(True)'}), '(32, dropout=0.2, input_shape=(64, 1), return_sequences=True)\n', (16754, 16815), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((16833, 16863), 'tensorflow.keras.layers.Dense', 'Dense', (['(14)'], {'activation': '"""linear"""'}), "(14, activation='linear')\n", (16838, 16863), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17600, 17610), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (17605, 17610), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17637, 17649), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (17644, 17649), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17670, 17680), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (17675, 17680), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17811, 17840), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (17816, 17840), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17852, 17881), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (17857, 17881), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((17899, 17929), 'tensorflow.keras.layers.Dense', 'Dense', (['(14)'], {'activation': '"""linear"""'}), "(14, activation='linear')\n", (17904, 17929), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed\n'), ((9552, 9564), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9561, 9564), False, 'import json\n'), ((12633, 12656), 'numpy.array', 'np.array', (['batch_of_imgs'], {}), '(batch_of_imgs)\n', (12641, 12656), True, 'import numpy as np\n'), ((12694, 12721), 'numpy.array', 'np.array', (['batch_of_commands'], {}), '(batch_of_commands)\n', (12702, 12721), True, 'import numpy as np\n'), ((12758, 12784), 'numpy.array', 'np.array', (['batch_of_splines'], {}), '(batch_of_splines)\n', (12766, 12784), True, 'import numpy as np\n'), ((13835, 13890), 'tensorflow.contrib.summary.create_file_writer', 'tf.contrib.summary.create_file_writer', (['self.val_log_dir'], {}), '(self.val_log_dir)\n', (13872, 13890), True, 'import tensorflow as tf\n'), ((13935, 13974), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.val_log_dir'], {}), '(self.val_log_dir)\n', (13956, 13974), True, 'import tensorflow as tf\n'), ((21940, 21967), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (21954, 21967), True, 'import numpy as np\n'), ((22319, 22357), 'numpy.expand_dims', 'np.expand_dims', (['command_onehot'], {'axis': '(0)'}), '(command_onehot, axis=0)\n', (22333, 22357), True, 'import numpy as np\n'), ((22884, 22912), 'numpy.squeeze', 'np.squeeze', (['predicted_spline'], {}), '(predicted_spline)\n', (22894, 22912), True, 'import numpy as np\n'), ((4070, 4089), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (4082, 4089), False, 'import math\n'), ((4124, 4143), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (4136, 4143), False, 'import math\n'), ((11388, 11397), 'pathlib.Path', 'Path', (['img'], {}), '(img)\n', (11392, 11397), False, 'from pathlib import Path\n'), ((14289, 14333), 'tensorflow.contrib.summary.always_record_summaries', 'tf.contrib.summary.always_record_summaries', ([], {}), '()\n', (14331, 14333), True, 'import tensorflow as tf\n'), ((14554, 14566), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (14564, 14566), True, 'import tensorflow as tf\n')]
import numpy as np import pp def test_connect_bundle_optical2(): """FIXME. Actual length of the route = 499 for some reason the route length is 10um shorter than the layout. b = 15.708 route_length = 10+35+95.05+35+b+35+208+35+b+15 print(route_length) = 499.46 route_length = 10+t+89.55+t+b+t+9.44+t+b+20.5 print(route_length) = 300.906 """ c = pp.Component() w = c << pp.c.waveguide_array(n_waveguides=4, spacing=200) d = c << pp.c.nxn(west=4, east=1) d.y = w.y d.xmin = w.xmax + 200 ports1 = [ w.ports["E1"], w.ports["E0"], ] ports2 = [ d.ports["W1"], d.ports["W0"], ] r = pp.routing.link_optical_ports(ports1, ports2, sort_ports=True, bend_radius=10) print(r[0].parent.length) assert np.isclose(r[0].parent.length, 489.46592653589795, atol=0.1) print(r[1].parent.length) assert np.isclose(r[1].parent.length, 290.798926535898, atol=0.1) c.add(r) return c if __name__ == "__main__": c = test_connect_bundle_optical2() pp.show(c)
[ "pp.Component", "numpy.isclose", "pp.routing.link_optical_ports", "pp.c.waveguide_array", "pp.show", "pp.c.nxn" ]
[((386, 400), 'pp.Component', 'pp.Component', ([], {}), '()\n', (398, 400), False, 'import pp\n'), ((687, 765), 'pp.routing.link_optical_ports', 'pp.routing.link_optical_ports', (['ports1', 'ports2'], {'sort_ports': '(True)', 'bend_radius': '(10)'}), '(ports1, ports2, sort_ports=True, bend_radius=10)\n', (716, 765), False, 'import pp\n'), ((808, 868), 'numpy.isclose', 'np.isclose', (['r[0].parent.length', '(489.46592653589795)'], {'atol': '(0.1)'}), '(r[0].parent.length, 489.46592653589795, atol=0.1)\n', (818, 868), True, 'import numpy as np\n'), ((911, 969), 'numpy.isclose', 'np.isclose', (['r[1].parent.length', '(290.798926535898)'], {'atol': '(0.1)'}), '(r[1].parent.length, 290.798926535898, atol=0.1)\n', (921, 969), True, 'import numpy as np\n'), ((1069, 1079), 'pp.show', 'pp.show', (['c'], {}), '(c)\n', (1076, 1079), False, 'import pp\n'), ((415, 464), 'pp.c.waveguide_array', 'pp.c.waveguide_array', ([], {'n_waveguides': '(4)', 'spacing': '(200)'}), '(n_waveguides=4, spacing=200)\n', (435, 464), False, 'import pp\n'), ((478, 502), 'pp.c.nxn', 'pp.c.nxn', ([], {'west': '(4)', 'east': '(1)'}), '(west=4, east=1)\n', (486, 502), False, 'import pp\n')]
import os import tensorflow_datasets as tfds import tensorflow as tf import numpy as np from common.inputs.data_input import DataInfo bxs_m2 = [[1, 1], [1, -1], [-1, 1], [-1, -1]] def parse_multi_mnist1(serialized_example): """ Data parsing function. """ features = tf.io.parse_single_example(serialized_example, features={ 'height': tf.io.FixedLenFeature([], tf.int64), 'width': tf.io.FixedLenFeature([], tf.int64), 'depth': tf.io.FixedLenFeature([], tf.int64), 'label_1': tf.io.FixedLenFeature([], tf.int64), 'label_2': tf.io.FixedLenFeature([], tf.int64), 'image_raw_1': tf.io.FixedLenFeature([], tf.string), 'image_raw_2': tf.io.FixedLenFeature([], tf.string), 'merged_raw': tf.io.FixedLenFeature([], tf.string), }) # Decode 3 images image_raw_1 = tf.io.decode_raw(features['image_raw_1'], tf.uint8) image_raw_1 = tf.reshape(image_raw_1, shape=[36, 36, 1]) image_raw_2 = tf.io.decode_raw(features['image_raw_2'], tf.uint8) image_raw_2 = tf.reshape(image_raw_2, shape=[36, 36, 1]) merged_raw = tf.io.decode_raw(features['merged_raw'], tf.uint8) merged_raw = tf.reshape(merged_raw, shape=[36, 36, 1]) # Convert from [0, 255] -> [-0.5, 0.5] floats. image_raw_1 = tf.cast(image_raw_1, tf.float32) * (1. / 255) image_raw_2 = tf.cast(image_raw_2, tf.float32) * (1. / 255) merged_raw = tf.cast(merged_raw, tf.float32) * (1. / 255) # Convert label from a scalar uint8 tensor to an int32 scalar. label_1 = tf.one_hot(tf.cast(features['label_1'], tf.int32), 10) label_2 = tf.one_hot(tf.cast(features['label_2'], tf.int32), 10) label = label_1 + label_2 features = {'images': merged_raw, 'labels': label, 'recons_label': label_1, 'recons_image': image_raw_1, 'spare_label': label_2, 'spare_image': image_raw_2} return features def parse_multi_mnist(serialized_example): """ Data parsing function. """ features = tf.io.parse_single_example(serialized_example, features={ 'height': tf.io.FixedLenFeature([], tf.int64), 'width': tf.io.FixedLenFeature([], tf.int64), 'depth': tf.io.FixedLenFeature([], tf.int64), 'label_1': tf.io.FixedLenFeature([], tf.int64), 'label_2': tf.io.FixedLenFeature([], tf.int64), 'image_raw_1': tf.io.FixedLenFeature([], tf.string), 'image_raw_2': tf.io.FixedLenFeature([], tf.string), # 'merged_raw': tf.io.FixedLenFeature([], tf.string), }) # Decode 3 images image_raw_1 = tf.io.decode_raw(features['image_raw_1'], tf.uint8) image_raw_1 = tf.reshape(image_raw_1, shape=[36, 36, 1]) image_raw_2 = tf.io.decode_raw(features['image_raw_2'], tf.uint8) image_raw_2 = tf.reshape(image_raw_2, shape=[36, 36, 1]) merged_raw = tf.add(tf.cast(image_raw_1, tf.int32), tf.cast(image_raw_2, tf.int32)) merged_raw = tf.minimum(merged_raw, 255) # Convert from [0, 255] -> [-0.5, 0.5] floats. image_raw_1 = tf.cast(image_raw_1, tf.float32) * (1. / 255) image_raw_2 = tf.cast(image_raw_2, tf.float32) * (1. / 255) merged_raw = tf.cast(merged_raw, tf.float32) * (1. / 255) # Convert label from a scalar uint8 tensor to an int32 scalar. label_1 = tf.one_hot(tf.cast(features['label_1'], tf.int32), 10) label_2 = tf.one_hot(tf.cast(features['label_2'], tf.int32), 10) label = label_1 + label_2 features = {'images': merged_raw, 'labels': label, 'recons_label': label_1, 'recons_image': image_raw_1, 'spare_label': label_2, 'spare_image': image_raw_2} return features def parse_aff_mnist(serialized_example): """ Data parsing function. """ features = tf.io.parse_single_example(serialized_example, features={'image': tf.io.FixedLenFeature([], tf.string), 'label': tf.io.FixedLenFeature([], tf.int64), 'height': tf.io.FixedLenFeature([], tf.int64), 'width': tf.io.FixedLenFeature([], tf.int64), 'depth': tf.io.FixedLenFeature([], tf.int64)}) image = tf.io.decode_raw(features['image'], tf.uint8) image = tf.reshape(image, shape=[40, 40, 1]) image = tf.cast(image, tf.float32) * (1. / 255) label = tf.cast(features['label'], tf.int32) label = tf.one_hot(label, 10) return image, label def build_parse(dataset): if dataset == 'aff_mnist': return parse_aff_mnist elif dataset == 'shift_mnist': return parse_aff_mnist elif dataset == 'multi_mnist': return parse_multi_mnist elif dataset == 'multi_mnist1': return parse_multi_mnist1 def get_dataset(name, data_path): if name == 'aff_mnist':# 1920000/320000 train_files = os.path.join(data_path, "train_affnist.tfrecord") test_files = os.path.join(data_path, "test_affnist.tfrecord") train_parse_fun = build_parse('aff_mnist') test_parse_fun = build_parse('aff_mnist') info = DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=(40, 40, 1)), 'label': tfds.features.ClassLabel(num_classes=10)}), {'train_examples': 1920000, 'test_examples': 320000}) elif name == 'shift_mnist':# 10140000/1690000 train_files = os.path.join(data_path, "train_6shifted_mnist.tfrecord") test_files = os.path.join(data_path, "test_6shifted_mnist.tfrecord") train_parse_fun = build_parse('shift_mnist') test_parse_fun = build_parse('shift_mnist') info = DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=(40, 40, 1)), 'label': tfds.features.ClassLabel(num_classes=10)}), {'train_examples': 10140000, 'test_examples': 1690000}) # elif name == 'multi_mnist':# 6000000/1000000 # train_files = [os.path.join(data_path, 'train', "multitrain_6shifted_mnist.tfrecords-0000{}-of-00060".format(i)) for i # in range(10)] + [os.path.join(data_path, 'train', "multitrain_6shifted_mnist.tfrecords-000{}-of-00060".format(i)) for i # in range(10, 60)] # test_files = [os.path.join(data_path, 'test', "multitest_6shifted_mnist.tfrecords-0000{}-of-00010".format(i)) for i in range(10)] # train_parse_fun = build_parse('multi_mnist') # test_parse_fun = build_parse('multi_mnist') # info = DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=(36, 36, 1)), # 'label': tfds.features.ClassLabel(num_classes=10)}), # {'train_examples': 6000000, # 'test_examples': 1000000}) elif name == 'multi_mnist':# 599999/100000 train_files = [os.path.join(data_path, "multitrain_6shifted_mnist.tfrecords-0000{}-of-00006".format(i)) for i in range(6)] test_files = [os.path.join(data_path, "multitest_6shifted_mnist.tfrecords-00000-of-00001")] train_parse_fun = build_parse('multi_mnist1') test_parse_fun = build_parse('multi_mnist1') info = DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=(36, 36, 1)), 'label': tfds.features.ClassLabel(num_classes=10)}), {'train_examples': 599999, 'test_examples': 100000}) else: raise Exception('dataset note support!') train = tf.data.TFRecordDataset(train_files) test = tf.data.TFRecordDataset(test_files) return train, test, train_parse_fun, test_parse_fun, info def build_dataset(name, data_dir='data', batch_size=128, buffer_size=50000): data_path = os.path.join(data_dir, name) train, test, train_parse_fun, test_parse_fun, info = get_dataset(name, data_path) if buffer_size > 0: train = train.shuffle(buffer_size=buffer_size) train = train.map(train_parse_fun, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ .batch(batch_size)\ .prefetch(tf.data.experimental.AUTOTUNE) test = test.map(test_parse_fun, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ .batch(batch_size)\ .prefetch(tf.data.experimental.AUTOTUNE) return train, test, info def count_data(name): train, test, _ = build_dataset(name, '') train_num = 0 for image, label in train: train_num += image.shape[0] test_num = 0 for image, label in test: test_num += image.shape[0] print('train num:', train_num) print('test num:', test_num) def count_multi_mnist(): train, test, _ = build_dataset('multi_mnist', '') train_num = 0 for feature in train: train_num += feature['images'].shape[0] test_num = 0 for feature in test: test_num += feature['images'].shape[0] print('train num:', train_num) print('test num:', test_num) def view_data(name, img_stand=False): train, test, _ = build_dataset(name, '') for image, label in train: if not img_stand: image /= 255. out_image(image, label) break for image, label in test: if not img_stand: image /= 255. out_image(image, label) break def view_multi_mnist(img_stand=False): train, test, _ = build_dataset('multi_mnist', '') for features in train: image = features['images'] label = features['labels'] recons_label = features['recons_label'] recons_image = features['recons_image'] spare_label = features['spare_label'] spare_image = features['spare_image'] image_final = tf.concat([recons_image, spare_image, image], axis=2) if not img_stand: image_final /= 255. out_image(image_final, label) break def out_image(images, labels): import matplotlib.pyplot as plt plt.figure() for i in range(16): plt.subplot(4, 4, i+1) plt.title(tf.argmax(labels[i]).numpy()) image = images[i, :, :, :] if image.shape[-1] == 1: image = np.squeeze(image, -1) plt.imshow(image, cmap='gray') else: plt.imshow(image) plt.subplots_adjust(hspace=0.5) plt.show() if __name__ == "__main__": # view_data('aff_mnist') # count_data('shift_mnist') # view_multi_mnist() count_multi_mnist()
[ "tensorflow.one_hot", "tensorflow.data.TFRecordDataset", "matplotlib.pyplot.imshow", "os.path.join", "numpy.squeeze", "tensorflow.concat", "matplotlib.pyplot.figure", "tensorflow.io.FixedLenFeature", "tensorflow_datasets.features.ClassLabel", "tensorflow.argmax", "tensorflow.io.decode_raw", "t...
[((1229, 1280), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["features['image_raw_1']", 'tf.uint8'], {}), "(features['image_raw_1'], tf.uint8)\n", (1245, 1280), True, 'import tensorflow as tf\n'), ((1299, 1341), 'tensorflow.reshape', 'tf.reshape', (['image_raw_1'], {'shape': '[36, 36, 1]'}), '(image_raw_1, shape=[36, 36, 1])\n', (1309, 1341), True, 'import tensorflow as tf\n'), ((1360, 1411), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["features['image_raw_2']", 'tf.uint8'], {}), "(features['image_raw_2'], tf.uint8)\n", (1376, 1411), True, 'import tensorflow as tf\n'), ((1430, 1472), 'tensorflow.reshape', 'tf.reshape', (['image_raw_2'], {'shape': '[36, 36, 1]'}), '(image_raw_2, shape=[36, 36, 1])\n', (1440, 1472), True, 'import tensorflow as tf\n'), ((1490, 1540), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["features['merged_raw']", 'tf.uint8'], {}), "(features['merged_raw'], tf.uint8)\n", (1506, 1540), True, 'import tensorflow as tf\n'), ((1558, 1599), 'tensorflow.reshape', 'tf.reshape', (['merged_raw'], {'shape': '[36, 36, 1]'}), '(merged_raw, shape=[36, 36, 1])\n', (1568, 1599), True, 'import tensorflow as tf\n'), ((3387, 3438), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["features['image_raw_1']", 'tf.uint8'], {}), "(features['image_raw_1'], tf.uint8)\n", (3403, 3438), True, 'import tensorflow as tf\n'), ((3457, 3499), 'tensorflow.reshape', 'tf.reshape', (['image_raw_1'], {'shape': '[36, 36, 1]'}), '(image_raw_1, shape=[36, 36, 1])\n', (3467, 3499), True, 'import tensorflow as tf\n'), ((3518, 3569), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["features['image_raw_2']", 'tf.uint8'], {}), "(features['image_raw_2'], tf.uint8)\n", (3534, 3569), True, 'import tensorflow as tf\n'), ((3588, 3630), 'tensorflow.reshape', 'tf.reshape', (['image_raw_2'], {'shape': '[36, 36, 1]'}), '(image_raw_2, shape=[36, 36, 1])\n', (3598, 3630), True, 'import tensorflow as tf\n'), ((3736, 3763), 'tensorflow.minimum', 'tf.minimum', (['merged_raw', '(255)'], {}), '(merged_raw, 255)\n', (3746, 3763), True, 'import tensorflow as tf\n'), ((5153, 5198), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["features['image']", 'tf.uint8'], {}), "(features['image'], tf.uint8)\n", (5169, 5198), True, 'import tensorflow as tf\n'), ((5211, 5247), 'tensorflow.reshape', 'tf.reshape', (['image'], {'shape': '[40, 40, 1]'}), '(image, shape=[40, 40, 1])\n', (5221, 5247), True, 'import tensorflow as tf\n'), ((5312, 5348), 'tensorflow.cast', 'tf.cast', (["features['label']", 'tf.int32'], {}), "(features['label'], tf.int32)\n", (5319, 5348), True, 'import tensorflow as tf\n'), ((5361, 5382), 'tensorflow.one_hot', 'tf.one_hot', (['label', '(10)'], {}), '(label, 10)\n', (5371, 5382), True, 'import tensorflow as tf\n'), ((8678, 8714), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['train_files'], {}), '(train_files)\n', (8701, 8714), True, 'import tensorflow as tf\n'), ((8726, 8761), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['test_files'], {}), '(test_files)\n', (8749, 8761), True, 'import tensorflow as tf\n'), ((8920, 8948), 'os.path.join', 'os.path.join', (['data_dir', 'name'], {}), '(data_dir, name)\n', (8932, 8948), False, 'import os\n'), ((11140, 11152), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11150, 11152), True, 'import matplotlib.pyplot as plt\n'), ((11457, 11488), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (11476, 11488), True, 'import matplotlib.pyplot as plt\n'), ((11493, 11503), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11501, 11503), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1702), 'tensorflow.cast', 'tf.cast', (['image_raw_1', 'tf.float32'], {}), '(image_raw_1, tf.float32)\n', (1677, 1702), True, 'import tensorflow as tf\n'), ((1734, 1766), 'tensorflow.cast', 'tf.cast', (['image_raw_2', 'tf.float32'], {}), '(image_raw_2, tf.float32)\n', (1741, 1766), True, 'import tensorflow as tf\n'), ((1797, 1828), 'tensorflow.cast', 'tf.cast', (['merged_raw', 'tf.float32'], {}), '(merged_raw, tf.float32)\n', (1804, 1828), True, 'import tensorflow as tf\n'), ((1935, 1973), 'tensorflow.cast', 'tf.cast', (["features['label_1']", 'tf.int32'], {}), "(features['label_1'], tf.int32)\n", (1942, 1973), True, 'import tensorflow as tf\n'), ((2004, 2042), 'tensorflow.cast', 'tf.cast', (["features['label_2']", 'tf.int32'], {}), "(features['label_2'], tf.int32)\n", (2011, 2042), True, 'import tensorflow as tf\n'), ((3655, 3685), 'tensorflow.cast', 'tf.cast', (['image_raw_1', 'tf.int32'], {}), '(image_raw_1, tf.int32)\n', (3662, 3685), True, 'import tensorflow as tf\n'), ((3687, 3717), 'tensorflow.cast', 'tf.cast', (['image_raw_2', 'tf.int32'], {}), '(image_raw_2, tf.int32)\n', (3694, 3717), True, 'import tensorflow as tf\n'), ((3834, 3866), 'tensorflow.cast', 'tf.cast', (['image_raw_1', 'tf.float32'], {}), '(image_raw_1, tf.float32)\n', (3841, 3866), True, 'import tensorflow as tf\n'), ((3898, 3930), 'tensorflow.cast', 'tf.cast', (['image_raw_2', 'tf.float32'], {}), '(image_raw_2, tf.float32)\n', (3905, 3930), True, 'import tensorflow as tf\n'), ((3961, 3992), 'tensorflow.cast', 'tf.cast', (['merged_raw', 'tf.float32'], {}), '(merged_raw, tf.float32)\n', (3968, 3992), True, 'import tensorflow as tf\n'), ((4099, 4137), 'tensorflow.cast', 'tf.cast', (["features['label_1']", 'tf.int32'], {}), "(features['label_1'], tf.int32)\n", (4106, 4137), True, 'import tensorflow as tf\n'), ((4168, 4206), 'tensorflow.cast', 'tf.cast', (["features['label_2']", 'tf.int32'], {}), "(features['label_2'], tf.int32)\n", (4175, 4206), True, 'import tensorflow as tf\n'), ((5260, 5286), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (5267, 5286), True, 'import tensorflow as tf\n'), ((5803, 5852), 'os.path.join', 'os.path.join', (['data_path', '"""train_affnist.tfrecord"""'], {}), "(data_path, 'train_affnist.tfrecord')\n", (5815, 5852), False, 'import os\n'), ((5874, 5922), 'os.path.join', 'os.path.join', (['data_path', '"""test_affnist.tfrecord"""'], {}), "(data_path, 'test_affnist.tfrecord')\n", (5886, 5922), False, 'import os\n'), ((10903, 10956), 'tensorflow.concat', 'tf.concat', (['[recons_image, spare_image, image]'], {'axis': '(2)'}), '([recons_image, spare_image, image], axis=2)\n', (10912, 10956), True, 'import tensorflow as tf\n'), ((11185, 11209), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', '(i + 1)'], {}), '(4, 4, i + 1)\n', (11196, 11209), True, 'import matplotlib.pyplot as plt\n'), ((6405, 6461), 'os.path.join', 'os.path.join', (['data_path', '"""train_6shifted_mnist.tfrecord"""'], {}), "(data_path, 'train_6shifted_mnist.tfrecord')\n", (6417, 6461), False, 'import os\n'), ((6483, 6538), 'os.path.join', 'os.path.join', (['data_path', '"""test_6shifted_mnist.tfrecord"""'], {}), "(data_path, 'test_6shifted_mnist.tfrecord')\n", (6495, 6538), False, 'import os\n'), ((11344, 11365), 'numpy.squeeze', 'np.squeeze', (['image', '(-1)'], {}), '(image, -1)\n', (11354, 11365), True, 'import numpy as np\n'), ((11378, 11408), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (11388, 11408), True, 'import matplotlib.pyplot as plt\n'), ((11435, 11452), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (11445, 11452), True, 'import matplotlib.pyplot as plt\n'), ((439, 474), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (460, 474), True, 'import tensorflow as tf\n'), ((531, 566), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (552, 566), True, 'import tensorflow as tf\n'), ((623, 658), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (644, 658), True, 'import tensorflow as tf\n'), ((717, 752), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (738, 752), True, 'import tensorflow as tf\n'), ((811, 846), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (832, 846), True, 'import tensorflow as tf\n'), ((909, 945), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (930, 945), True, 'import tensorflow as tf\n'), ((1008, 1044), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1029, 1044), True, 'import tensorflow as tf\n'), ((1106, 1142), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1127, 1142), True, 'import tensorflow as tf\n'), ((2595, 2630), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2616, 2630), True, 'import tensorflow as tf\n'), ((2687, 2722), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2708, 2722), True, 'import tensorflow as tf\n'), ((2779, 2814), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2800, 2814), True, 'import tensorflow as tf\n'), ((2873, 2908), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2894, 2908), True, 'import tensorflow as tf\n'), ((2967, 3002), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2988, 3002), True, 'import tensorflow as tf\n'), ((3065, 3101), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3086, 3101), True, 'import tensorflow as tf\n'), ((3164, 3200), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3185, 3200), True, 'import tensorflow as tf\n'), ((4709, 4745), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4730, 4745), True, 'import tensorflow as tf\n'), ((4808, 4843), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4829, 4843), True, 'import tensorflow as tf\n'), ((4907, 4942), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4928, 4942), True, 'import tensorflow as tf\n'), ((5005, 5040), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (5026, 5040), True, 'import tensorflow as tf\n'), ((5103, 5138), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (5124, 5138), True, 'import tensorflow as tf\n'), ((6085, 6123), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': '(40, 40, 1)'}), '(shape=(40, 40, 1))\n', (6104, 6123), True, 'import tensorflow_datasets as tfds\n'), ((6186, 6226), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (6210, 6226), True, 'import tensorflow_datasets as tfds\n'), ((8113, 8189), 'os.path.join', 'os.path.join', (['data_path', '"""multitest_6shifted_mnist.tfrecords-00000-of-00001"""'], {}), "(data_path, 'multitest_6shifted_mnist.tfrecords-00000-of-00001')\n", (8125, 8189), False, 'import os\n'), ((11226, 11246), 'tensorflow.argmax', 'tf.argmax', (['labels[i]'], {}), '(labels[i])\n', (11235, 11246), True, 'import tensorflow as tf\n'), ((6705, 6743), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': '(40, 40, 1)'}), '(shape=(40, 40, 1))\n', (6724, 6743), True, 'import tensorflow_datasets as tfds\n'), ((6806, 6846), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (6830, 6846), True, 'import tensorflow_datasets as tfds\n'), ((8359, 8397), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': '(36, 36, 1)'}), '(shape=(36, 36, 1))\n', (8378, 8397), True, 'import tensorflow_datasets as tfds\n'), ((8460, 8500), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (8484, 8500), True, 'import tensorflow_datasets as tfds\n')]
#!/usr/bin/env python # coding=utf-8 import numpy as np import os import site site.addsitedir('../lib/') import htools import hdm ########################################################################### class parameters: def __init__(self): self.N = 10 self.d = 4 self.n_del = 0 self.delta = 0.01 self.maxIter = 100 self.n_del_init = 0 self.path = '../results/missing_measurements/' #'../results/sensitivity/' self.experiment = 'missing_measurements' # 'sensitivity' self.cost = 'TRACE' # 'TRACE', 'LOG-DET' self.norm = 'fro' # 'l1', 'l2', 'p1', 'fro' self.solver = 'CVXOPT' # 'CVXOPT', 'SCS' self.error_list = 10**(np.linspace(-2, 0, num=5)) self.delta_list = 10**(np.linspace(0, -3, num=5)) param = parameters() ########################################################################### param.n_del_init = 1 if param.experiment == 'missing_measurements': range_of_N = range(10,20) for N in range_of_N: param.N = N for n_del in range(param.n_del_init, htools.edgeCnt(param)): param.n_del = n_del prob = hdm.FindMaxSprs(param) print('N=', N, 'n_del=', param.n_del,',d=', param.d,',p=',prob) ########################################################################### if param.experiment == 'sensitivity': range_of_N = range(5,20) R = 4 K = 10 M = 50 for N in range_of_N: print('N is ', N) param.N = N hdm.sensitivity(param,K,M,R)
[ "hdm.sensitivity", "numpy.linspace", "htools.edgeCnt", "hdm.FindMaxSprs", "site.addsitedir" ]
[((78, 104), 'site.addsitedir', 'site.addsitedir', (['"""../lib/"""'], {}), "('../lib/')\n", (93, 104), False, 'import site\n'), ((1518, 1549), 'hdm.sensitivity', 'hdm.sensitivity', (['param', 'K', 'M', 'R'], {}), '(param, K, M, R)\n', (1533, 1549), False, 'import hdm\n'), ((720, 745), 'numpy.linspace', 'np.linspace', (['(-2)', '(0)'], {'num': '(5)'}), '(-2, 0, num=5)\n', (731, 745), True, 'import numpy as np\n'), ((778, 803), 'numpy.linspace', 'np.linspace', (['(0)', '(-3)'], {'num': '(5)'}), '(0, -3, num=5)\n', (789, 803), True, 'import numpy as np\n'), ((1090, 1111), 'htools.edgeCnt', 'htools.edgeCnt', (['param'], {}), '(param)\n', (1104, 1111), False, 'import htools\n'), ((1165, 1187), 'hdm.FindMaxSprs', 'hdm.FindMaxSprs', (['param'], {}), '(param)\n', (1180, 1187), False, 'import hdm\n')]
from agent import agent from trade_env import StockTradingEnv import numpy as np from datetime import datetime import matplotlib.pyplot as plt if __name__=='__main__': """ 流通股本1亿,初始价格10元 100个agent,每个agent 20万现金,100万股 """ agent_count = 100 ep = 20 env = StockTradingEnv(1e9,10,0.1,0.01,0.01,6,4) agent_list = [] price_l=[] for i in range(agent_count): agent_list.append(agent(i,2e5,1e6)) #初始化100个agent for t in range(ep): #假设进行100次交易 print('*'*70) print('第{}次交易循环'.format(t)) print('*' * 70) for i in range(100): #每次交易每个agent各自进行操作 now_price = env.market_price() action = agent_list[i].trade(now_price) if action==None: print('agent{}持仓不动'.format(agent_list[i].id)) continue print('agent{}执行交易,买/卖{}手,挂单价格为{:.2f}'.format(action[2], action[0], action[1])) now_price, list_of_sell, list_of_buy=env.step(action) if now_price==None: continue k=0 del_sell=[] del_buy = [] for sell,buy in zip(list_of_sell,list_of_buy): if sell[0]==0: agent_id = int(sell[2]) agent_list[agent_id].update(sell[3]) #更新agent状态 del_sell.append(k) if buy[0]==0: agent_id = int(buy[2]) agent_list[agent_id].update(buy[3]) #更新agent状态 del_buy.append(k) k+=1 list_of_sell = np.delete(list_of_sell, del_sell, axis=0) list_of_buy = np.delete(list_of_buy,del_buy, axis=0) # 删除买单 env._update_list(list_of_sell,list_of_buy) print('{}市场价格为{:.2f}'.format(datetime.now().strftime('%H:%M:%S'),now_price)) price_l.append(now_price) index_ = list(range(len(price_l))) plt.rcParams['font.sans-serif'] = ['SimHei'] plt.plot(index_,price_l) plt.xlabel("序列") plt.ylabel("价格") plt.title('买卖均势') plt.show() plt.savefig('{}agent-{}ep—balance.png'.format(agent_count,ep))
[ "trade_env.StockTradingEnv", "matplotlib.pyplot.ylabel", "numpy.delete", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "datetime.datetime.now", "matplotlib.pyplot.title", "agent.agent", "matplotlib.pyplot.show" ]
[((282, 338), 'trade_env.StockTradingEnv', 'StockTradingEnv', (['(1000000000.0)', '(10)', '(0.1)', '(0.01)', '(0.01)', '(6)', '(4)'], {}), '(1000000000.0, 10, 0.1, 0.01, 0.01, 6, 4)\n', (297, 338), False, 'from trade_env import StockTradingEnv\n'), ((1977, 2002), 'matplotlib.pyplot.plot', 'plt.plot', (['index_', 'price_l'], {}), '(index_, price_l)\n', (1985, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2022), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""序列"""'], {}), "('序列')\n", (2016, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2027, 2043), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""价格"""'], {}), "('价格')\n", (2037, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2065), 'matplotlib.pyplot.title', 'plt.title', (['"""买卖均势"""'], {}), "('买卖均势')\n", (2057, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2080), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2078, 2080), True, 'import matplotlib.pyplot as plt\n'), ((419, 448), 'agent.agent', 'agent', (['i', '(200000.0)', '(1000000.0)'], {}), '(i, 200000.0, 1000000.0)\n', (424, 448), False, 'from agent import agent\n'), ((1577, 1618), 'numpy.delete', 'np.delete', (['list_of_sell', 'del_sell'], {'axis': '(0)'}), '(list_of_sell, del_sell, axis=0)\n', (1586, 1618), True, 'import numpy as np\n'), ((1645, 1684), 'numpy.delete', 'np.delete', (['list_of_buy', 'del_buy'], {'axis': '(0)'}), '(list_of_buy, del_buy, axis=0)\n', (1654, 1684), True, 'import numpy as np\n'), ((1799, 1813), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1811, 1813), False, 'from datetime import datetime\n')]
import copy from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple import numpy import torch import torch.nn as nn import torch.nn.functional as F from allennlp.modules import FeedForward, InputVariationalDropout from allennlp.modules.matrix_attention.bilinear_matrix_attention import \ BilinearMatrixAttention from allennlp.nn import Activation from allennlp.nn.chu_liu_edmonds import decode_mst from allennlp.nn.util import (get_device_of, get_lengths_from_binary_sequence_mask, get_range_vector, masked_log_softmax) from torch import Tensor from .component import BaseModel, BaseModelConfig, BertInput, PoolDecoderConfig, MeanPoolDecoderConfig @dataclass class SeqClassConfig(BaseModelConfig): num_labels: int = -1 def build(self): return SeqClass(self) class SeqClass(BaseModel): def __init__(self, config: SeqClassConfig): assert isinstance(config, SeqClassConfig) assert (isinstance(config.decoder_config, PoolDecoderConfig) or isinstance(config.decoder_config, MeanPoolDecoderConfig)) super().__init__(config) self.config = config self.classifier = nn.Linear(config.decoder_config.output_dim, config.num_labels) def forward(self, token: Tensor, token_type: Tensor, position: Tensor, mask: Tensor, label: Tensor, ex_lang: Tensor, lang_key: Optional[str] = None): inputs = BertInput(token=token, token_type=token_type, position=position, mask=mask, lang_key=lang_key) ctx = self.embed(inputs) #ctx = ctx.cpu() logits = F.log_softmax(self.classifier(ctx), dim=-1) loss = F.nll_loss(logits, label) self.evaluator.add(label, logits) return loss @dataclass class SeqLabelConfig(BaseModelConfig): num_labels: int = -1 label_pad_idx: int = -1 def build(self): return SeqLabel(self) class SeqLabel(BaseModel): def __init__(self, config: SeqLabelConfig): assert isinstance(config, SeqLabelConfig) super().__init__(config) self.config = config self.classifier = nn.Linear(config.decoder_config.output_dim, config.num_labels) def forward(self, token: Tensor, token_type: Tensor, position: Tensor, mask: Tensor, label: Tensor, ex_lang: Tensor, lang_key: Optional[str] = None): inputs = BertInput(token=token, token_type=token_type, position=position, mask=mask, lang_key=lang_key) ctx = self.embed(inputs) #ctx = ctx.cpu() logits = F.log_softmax(self.classifier(ctx), dim=-1) loss = F.nll_loss(logits.view(-1, self.config.num_labels), label.view(-1), ignore_index=self.config.label_pad_idx) self.evaluator.add(label, logits) return loss # similar to above, but with separate top-level classification layer @dataclass class SeqSepTopClassConfig(BaseModelConfig): num_labels: int = -1 def build(self): return SeqSepTopClass(self) class SeqSepTopClass(BaseModel): def __init__(self, config: SeqSepTopClassConfig): assert isinstance(config, SeqSepTopClassConfig) assert (isinstance(config.decoder_config, PoolDecoderConfig) or isinstance(config.decoder_config, MeanPoolDecoderConfig)) super().__init__(config) self.config = config self.classifier = {} def forward(self, token: Tensor, token_type: Tensor, position: Tensor, mask: Tensor, label: Tensor, ex_lang: Tensor, lang_key: Optional[str] = None): inputs = BertInput(token=token, token_type=token_type, position=position, mask=mask, lang_key=lang_key) ctx = self.embed(inputs) ctx = ctx.cpu() if ex_lang.item() not in self.classifier.keys(): self.classifier[ex_lang.item()] = nn.Linear(self.config.decoder_config.output_dim, self.config.num_labels) logits = F.log_softmax(self.classifier[ex_lang.item()](ctx), dim=-1) loss = F.nll_loss(logits, label.cpu()) self.evaluator.add(label.cpu(), logits) return loss @dataclass class SeqSepTopLabelConfig(BaseModelConfig): num_labels: int = -1 label_pad_idx: int = -1 def build(self): return SeqSepTopLabel(self) class SeqSepTopLabel(BaseModel): def __init__(self, config: SeqSepTopLabelConfig): assert isinstance(config, SeqSepTopLabelConfig) super().__init__(config) self.config = config self.classifier = {} def forward(self, token: Tensor, token_type: Tensor, position: Tensor, mask: Tensor, label: Tensor, ex_lang: Tensor, lang_key: Optional[str] = None): inputs = BertInput(token=token, token_type=token_type, position=position, mask=mask, lang_key=lang_key) ctx = self.embed(inputs) ctx = ctx.cpu() if ex_lang.item() not in self.classifier.keys(): self.classifier[ex_lang.item()] = nn.Linear(self.config.decoder_config.output_dim, self.config.num_labels) logits = F.log_softmax(self.classifier[ex_lang.item()](ctx), dim=-1) loss = F.nll_loss(logits.view(-1, self.config.num_labels), label.view(-1).cpu(), ignore_index=self.config.label_pad_idx) self.evaluator.add(label.cpu(), logits) return loss @dataclass class BaselineSeqClassConfig(BaseModelConfig): num_labels: int = -1 def build(self): return BaselineSeqClass(self) class BaselineSeqClass(BaseModel): def __init__(self, config: BaselineSeqClassConfig): assert isinstance(config, BaselineSeqClassConfig) assert (isinstance(config.decoder_config, PoolDecoderConfig) or isinstance(config.decoder_config, MeanPoolDecoderConfig)) super().__init__(config) self.config = config self.embedding = nn.EmbeddingBag(119547, 768, mode='mean') self.classifier = nn.Linear(config.decoder_config.output_dim, config.num_labels) initrange = 0.1 self.embedding.weight.data.uniform_(-initrange, initrange) def forward(self, token: Tensor, token_type: Tensor, position: Tensor, mask: Tensor, label: Tensor, ex_lang: Tensor, lang_key: Optional[str] = None): ctx = self.embedding(token) #ctx = ctx.cpu() logits = F.log_softmax(self.classifier(ctx), dim=-1) loss = F.nll_loss(logits, label) self.evaluator.add(label, logits) return loss @dataclass class BaselineSeqLabelConfig(BaseModelConfig): num_labels: int = -1 label_pad_idx: int = -1 def build(self): return BaselineSeqLabel(self) class BaselineSeqLabel(BaseModel): def __init__(self, config: BaselineSeqLabelConfig): assert isinstance(config, BaselineSeqLabelConfig) super().__init__(config) self.config = config self.embedding = nn.EmbeddingBag(119547, 768, mode='mean') self.classifier = nn.Linear(config.decoder_config.output_dim, config.num_labels) initrange = 0.1 self.embedding.weight.data.uniform_(-initrange, initrange) def forward(self, token: Tensor, token_type: Tensor, position: Tensor, mask: Tensor, label: Tensor, ex_lang: Tensor, lang_key: Optional[str] = None): ctx = self.embedding(inputs) #ctx = ctx.cpu() logits = F.log_softmax(self.classifier(ctx), dim=-1) loss = F.nll_loss(logits.view(-1, self.config.num_labels), label.view(-1), ignore_index=self.config.label_pad_idx) self.evaluator.add(label, logits) return loss @dataclass class ParsingConfig(BaseModelConfig): num_labels: int = -1 num_pos: int = -1 use_pos: bool = False pos_dim: int = 100 tag_dim: int = 128 arc_dim: int = 512 use_mst_decoding_for_validation: bool = True dropout: float = 0.33 def build(self): return BiaffineDependencyParser(self) class BiaffineDependencyParser(BaseModel): """ This dependency parser follows the model of ` Deep Biaffine Attention for Neural Dependency Parsing (<NAME>, 2016) <https://arxiv.org/abs/1611.01734>`_ . (Based on AllenNLP) """ def __init__(self, config: ParsingConfig): assert isinstance(config, ParsingConfig) super().__init__(config) self.config = config encoder_dim = config.decoder_config.output_dim if self.config.use_pos: self.pos_embedding = nn.Embedding(config.num_pos, config.pos_dim, padding_idx=0) encoder_dim += config.pos_dim self.head_arc_feedforward = FeedForward(encoder_dim, 1, config.arc_dim, Activation.by_name("elu")()) self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward) self.arc_attention = BilinearMatrixAttention(config.arc_dim, config.arc_dim, use_input_biases=True) self.head_tag_feedforward = FeedForward(encoder_dim, 1, config.tag_dim, Activation.by_name("elu")()) self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward) self.tag_bilinear = torch.nn.modules.Bilinear(config.tag_dim, config.tag_dim, config.num_labels) self.dropout = InputVariationalDropout(config.dropout) self.use_mst_decoding_for_validation = config.use_mst_decoding_for_validation def forward(self, input_ids: Tensor, pos_ids: Tensor, segment_ids: Tensor, position: Tensor, input_mask: Tensor, nonword_mask: Tensor, head_tags: Tensor, head_indices: Tensor, lang_key: Optional[str] = None): """ Parameters ---------- input_ids: torch.LongTensor, required. Has shape ``(batch_size, sequence_length)``. input_mask: torch.LongTensor, required. Has shape ``(batch_size, sequence_length)``. nonword_mask: torch.LongTensor, required. Has shape ``(batch_size, sequence_length)``. segment_ids: torch.LongTensor, required. Has shape ``(batch_size, sequence_length)``. head_tags : torch.LongTensor, optional (default = None) A torch tensor representing the sequence of integer gold class labels for the arcs in the dependency parse. Has shape ``(batch_size, sequence_length)``. head_indices : torch.LongTensor, optional (default = None) A torch tensor representing the sequence of integer indices denoting the parent of every word in the dependency parse. Has shape ``(batch_size, sequence_length)``. """ inputs = BertInput(token=input_ids, token_type=segment_ids, position=position, mask=input_mask, lang_key=lang_key) encoded_text = self.embed(inputs) if self.config.use_pos: encoded_pos = self.decoder.dropout(self.pos_embedding(pos_ids)) encoded_text = torch.cat((encoded_text, encoded_pos), dim=-1) batch_size, _, encoding_dim = encoded_text.size() float_mask = nonword_mask.float() # shape (batch_size, sequence_length, arc_dim) head_arc_representation = self.dropout( self.head_arc_feedforward(encoded_text)) child_arc_representation = self.dropout( self.child_arc_feedforward(encoded_text)) # shape (batch_size, sequence_length, tag_dim) head_tag_representation = self.dropout( self.head_tag_feedforward(encoded_text)) child_tag_representation = self.dropout( self.child_tag_feedforward(encoded_text)) # shape (batch_size, sequence_length, sequence_length) attended_arcs = self.arc_attention(head_arc_representation, child_arc_representation) minus_inf = -1e8 minus_mask = (1 - float_mask) * minus_inf attended_arcs = attended_arcs + minus_mask.unsqueeze( 2) + minus_mask.unsqueeze(1) if self.training or not self.use_mst_decoding_for_validation: predicted_heads, predicted_head_tags = self._greedy_decode( head_tag_representation, child_tag_representation, attended_arcs, nonword_mask) else: lengths = input_mask.data.sum(dim=1).long().cpu().numpy() predicted_heads, predicted_head_tags = self._mst_decode( head_tag_representation, child_tag_representation, attended_arcs, nonword_mask, lengths) arc_nll, tag_nll = self._construct_loss( head_tag_representation=head_tag_representation, child_tag_representation=child_tag_representation, attended_arcs=attended_arcs, head_indices=head_indices, head_tags=head_tags, mask=nonword_mask) loss = arc_nll + tag_nll # We calculate attatchment scores for the whole sentence # but excluding the symbolic ROOT token at the start, # which is why we start from the second element in the sequence. self.evaluator.add(head_indices[:, 1:], head_tags[:, 1:], predicted_heads[:, 1:], predicted_head_tags[:, 1:], nonword_mask[:, 1:]) return loss def _construct_loss(self, head_tag_representation: torch.Tensor, child_tag_representation: torch.Tensor, attended_arcs: torch.Tensor, head_indices: torch.Tensor, head_tags: torch.Tensor, mask: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Computes the arc and tag loss for a sequence given gold head indices and tags. Parameters ---------- head_tag_representation : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. child_tag_representation : ``torch.Tensor``, required A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. attended_arcs : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, sequence_length) used to generate a distribution over attachments of a given word to all other words. head_indices : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length). The indices of the heads for every word. head_tags : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length). The dependency labels of the heads for every word. mask : ``torch.Tensor``, required. A mask of shape (batch_size, sequence_length), denoting unpadded elements in the sequence. Returns ------- arc_nll : ``torch.Tensor``, required. The negative log likelihood from the arc loss. tag_nll : ``torch.Tensor``, required. The negative log likelihood from the arc tag loss. """ float_mask = mask.float() batch_size, sequence_length, _ = attended_arcs.size() # shape (batch_size, 1) range_vector = get_range_vector( batch_size, get_device_of(attended_arcs)).unsqueeze(1) # shape (batch_size, sequence_length, sequence_length) normalised_arc_logits = masked_log_softmax( attended_arcs, mask) * float_mask.unsqueeze(2) * float_mask.unsqueeze(1) # shape (batch_size, sequence_length, num_head_tags) head_tag_logits = self._get_head_tags(head_tag_representation, child_tag_representation, head_indices) normalised_head_tag_logits = masked_log_softmax( head_tag_logits, mask.unsqueeze(-1)) * float_mask.unsqueeze(-1) # index matrix with shape (batch, sequence_length) timestep_index = get_range_vector(sequence_length, get_device_of(attended_arcs)) child_index = timestep_index.view(1, sequence_length).expand( batch_size, sequence_length).long() # shape (batch_size, sequence_length) arc_loss = normalised_arc_logits[range_vector, child_index, head_indices] tag_loss = normalised_head_tag_logits[range_vector, child_index, head_tags] # We don't care about predictions for the symbolic ROOT token's head, # so we remove it from the loss. arc_loss = arc_loss[:, 1:] tag_loss = tag_loss[:, 1:] # The number of valid positions is equal to the number of unmasked elements minus # 1 per sequence in the batch, to account for the symbolic HEAD token. valid_positions = mask.sum() - batch_size arc_nll = -arc_loss.sum() / valid_positions.float() tag_nll = -tag_loss.sum() / valid_positions.float() return arc_nll, tag_nll def _greedy_decode(self, head_tag_representation: torch.Tensor, child_tag_representation: torch.Tensor, attended_arcs: torch.Tensor, mask: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Decodes the head and head tag predictions by decoding the unlabeled arcs independently for each word and then again, predicting the head tags of these greedily chosen arcs independently. Note that this method of decoding is not guaranteed to produce trees (i.e. there maybe be multiple roots, or cycles when children are attached to their parents). Parameters ---------- head_tag_representation : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. child_tag_representation : ``torch.Tensor``, required A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. attended_arcs : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, sequence_length) used to generate a distribution over attachments of a given word to all other words. Returns ------- heads : ``torch.Tensor`` A tensor of shape (batch_size, sequence_length) representing the greedily decoded heads of each word. head_tags : ``torch.Tensor`` A tensor of shape (batch_size, sequence_length) representing the dependency tags of the greedily decoded heads of each word. """ # Mask the diagonal, because the head of a word can't be itself. attended_arcs = attended_arcs + torch.diag( attended_arcs.new(mask.size(1)).fill_(-numpy.inf)) # Mask padded tokens, because we only want to consider actual words as heads. if mask is not None: minus_mask = (1 - mask).byte().unsqueeze(2) attended_arcs.masked_fill_(minus_mask, -numpy.inf) # Compute the heads greedily. # shape (batch_size, sequence_length) _, heads = attended_arcs.max(dim=2) # Given the greedily predicted heads, decode their dependency tags. # shape (batch_size, sequence_length, num_head_tags) head_tag_logits = self._get_head_tags(head_tag_representation, child_tag_representation, heads) _, head_tags = head_tag_logits.max(dim=2) return heads, head_tags def _mst_decode(self, head_tag_representation: torch.Tensor, child_tag_representation: torch.Tensor, attended_arcs: torch.Tensor, mask: torch.Tensor, lengths: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Decodes the head and head tag predictions using the Edmonds' Algorithm for finding minimum spanning trees on directed graphs. Nodes in the graph are the words in the sentence, and between each pair of nodes, there is an edge in each direction, where the weight of the edge corresponds to the most likely dependency label probability for that arc. The MST is then generated from this directed graph. Parameters ---------- head_tag_representation : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. child_tag_representation : ``torch.Tensor``, required A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. attended_arcs : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, sequence_length) used to generate a distribution over attachments of a given word to all other words. Returns ------- heads : ``torch.Tensor`` A tensor of shape (batch_size, sequence_length) representing the greedily decoded heads of each word. head_tags : ``torch.Tensor`` A tensor of shape (batch_size, sequence_length) representing the dependency tags of the optimally decoded heads of each word. """ batch_size, sequence_length, tag_dim = head_tag_representation.size() # lengths = mask.data.sum(dim=1).long().cpu().numpy() expanded_shape = [ batch_size, sequence_length, sequence_length, tag_dim ] head_tag_representation = head_tag_representation.unsqueeze(2) head_tag_representation = head_tag_representation.expand( *expanded_shape).contiguous() child_tag_representation = child_tag_representation.unsqueeze(1) child_tag_representation = child_tag_representation.expand( *expanded_shape).contiguous() # Shape (batch_size, sequence_length, sequence_length, num_head_tags) pairwise_head_logits = self.tag_bilinear(head_tag_representation, child_tag_representation) # Note that this log_softmax is over the tag dimension, and we don't consider pairs # of tags which are invalid (e.g are a pair which includes a padded element) anyway below. # Shape (batch, num_labels,sequence_length, sequence_length) normalized_pairwise_head_logits = F.log_softmax(pairwise_head_logits, dim=3).permute( 0, 3, 1, 2) # Mask padded tokens, because we only want to consider actual words as heads. minus_inf = -1e8 minus_mask = (1 - mask.float()) * minus_inf attended_arcs = attended_arcs + minus_mask.unsqueeze( 2) + minus_mask.unsqueeze(1) # Shape (batch_size, sequence_length, sequence_length) normalized_arc_logits = F.log_softmax(attended_arcs, dim=2).transpose(1, 2) # Shape (batch_size, num_head_tags, sequence_length, sequence_length) # This energy tensor expresses the following relation: # energy[i,j] = "Score that i is the head of j". In this # case, we have heads pointing to their children. batch_energy = torch.exp( normalized_arc_logits.unsqueeze(1) + normalized_pairwise_head_logits) return self._run_mst_decoding(batch_energy, lengths) @staticmethod def _run_mst_decoding(batch_energy: torch.Tensor, lengths: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: heads = [] head_tags = [] for energy, length in zip(batch_energy.detach().cpu(), lengths): scores, tag_ids = energy.max(dim=0) # Although we need to include the root node so that the MST includes it, # we do not want any word to be the parent of the root node. # Here, we enforce this by setting the scores for all word -> ROOT edges # edges to be 0. scores[0, :] = 0 # Decode the heads. Because we modify the scores to prevent # adding in word -> ROOT edges, we need to find the labels ourselves. instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False) # Find the labels which correspond to the edges in the max spanning tree. instance_head_tags = [] for child, parent in enumerate(instance_heads): instance_head_tags.append(tag_ids[parent, child].item()) # We don't care what the head or tag is for the root token, but by default it's # not necesarily the same in the batched vs unbatched case, which is annoying. # Here we'll just set them to zero. instance_heads[0] = 0 instance_head_tags[0] = 0 heads.append(instance_heads) head_tags.append(instance_head_tags) return torch.from_numpy(numpy.stack(heads)), torch.from_numpy( numpy.stack(head_tags)) def _get_head_tags(self, head_tag_representation: torch.Tensor, child_tag_representation: torch.Tensor, head_indices: torch.Tensor) -> torch.Tensor: """ Decodes the head tags given the head and child tag representations and a tensor of head indices to compute tags for. Note that these are either gold or predicted heads, depending on whether this function is being called to compute the loss, or if it's being called during inference. Parameters ---------- head_tag_representation : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. child_tag_representation : ``torch.Tensor``, required A tensor of shape (batch_size, sequence_length, tag_dim), which will be used to generate predictions for the dependency tags for the given arcs. head_indices : ``torch.Tensor``, required. A tensor of shape (batch_size, sequence_length). The indices of the heads for every word. Returns ------- head_tag_logits : ``torch.Tensor`` A tensor of shape (batch_size, sequence_length, num_head_tags), representing logits for predicting a distribution over tags for each arc. """ batch_size = head_tag_representation.size(0) # shape (batch_size,) range_vector = get_range_vector( batch_size, get_device_of(head_tag_representation)).unsqueeze(1) # This next statement is quite a complex piece of indexing, which you really # need to read the docs to understand. See here: # https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing # In effect, we are selecting the indices corresponding to the heads of each word from the # sequence length dimension for each element in the batch. # shape (batch_size, sequence_length, tag_dim) selected_head_tag_representations = head_tag_representation[ range_vector, head_indices] selected_head_tag_representations = selected_head_tag_representations.contiguous( ) # shape (batch_size, sequence_length, num_head_tags) head_tag_logits = self.tag_bilinear(selected_head_tag_representations, child_tag_representation) return head_tag_logits
[ "torch.nn.EmbeddingBag", "torch.nn.functional.nll_loss", "allennlp.nn.Activation.by_name", "allennlp.nn.util.get_device_of", "torch.nn.modules.Bilinear", "torch.cat", "numpy.stack", "torch.nn.Linear", "copy.deepcopy", "allennlp.nn.util.masked_log_softmax", "allennlp.modules.InputVariationalDropo...
[((1234, 1296), 'torch.nn.Linear', 'nn.Linear', (['config.decoder_config.output_dim', 'config.num_labels'], {}), '(config.decoder_config.output_dim, config.num_labels)\n', (1243, 1296), True, 'import torch.nn as nn\n'), ((1954, 1979), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['logits', 'label'], {}), '(logits, label)\n', (1964, 1979), True, 'import torch.nn.functional as F\n'), ((2414, 2476), 'torch.nn.Linear', 'nn.Linear', (['config.decoder_config.output_dim', 'config.num_labels'], {}), '(config.decoder_config.output_dim, config.num_labels)\n', (2423, 2476), True, 'import torch.nn as nn\n'), ((6939, 6980), 'torch.nn.EmbeddingBag', 'nn.EmbeddingBag', (['(119547)', '(768)'], {'mode': '"""mean"""'}), "(119547, 768, mode='mean')\n", (6954, 6980), True, 'import torch.nn as nn\n'), ((7007, 7069), 'torch.nn.Linear', 'nn.Linear', (['config.decoder_config.output_dim', 'config.num_labels'], {}), '(config.decoder_config.output_dim, config.num_labels)\n', (7016, 7069), True, 'import torch.nn as nn\n'), ((7601, 7626), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['logits', 'label'], {}), '(logits, label)\n', (7611, 7626), True, 'import torch.nn.functional as F\n'), ((8100, 8141), 'torch.nn.EmbeddingBag', 'nn.EmbeddingBag', (['(119547)', '(768)'], {'mode': '"""mean"""'}), "(119547, 768, mode='mean')\n", (8115, 8141), True, 'import torch.nn as nn\n'), ((8168, 8230), 'torch.nn.Linear', 'nn.Linear', (['config.decoder_config.output_dim', 'config.num_labels'], {}), '(config.decoder_config.output_dim, config.num_labels)\n', (8177, 8230), True, 'import torch.nn as nn\n'), ((10236, 10276), 'copy.deepcopy', 'copy.deepcopy', (['self.head_arc_feedforward'], {}), '(self.head_arc_feedforward)\n', (10249, 10276), False, 'import copy\n'), ((10307, 10385), 'allennlp.modules.matrix_attention.bilinear_matrix_attention.BilinearMatrixAttention', 'BilinearMatrixAttention', (['config.arc_dim', 'config.arc_dim'], {'use_input_biases': '(True)'}), '(config.arc_dim, config.arc_dim, use_input_biases=True)\n', (10330, 10385), False, 'from allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention\n'), ((10687, 10727), 'copy.deepcopy', 'copy.deepcopy', (['self.head_tag_feedforward'], {}), '(self.head_tag_feedforward)\n', (10700, 10727), False, 'import copy\n'), ((10757, 10833), 'torch.nn.modules.Bilinear', 'torch.nn.modules.Bilinear', (['config.tag_dim', 'config.tag_dim', 'config.num_labels'], {}), '(config.tag_dim, config.tag_dim, config.num_labels)\n', (10782, 10833), False, 'import torch\n'), ((10965, 11004), 'allennlp.modules.InputVariationalDropout', 'InputVariationalDropout', (['config.dropout'], {}), '(config.dropout)\n', (10988, 11004), False, 'from allennlp.modules import FeedForward, InputVariationalDropout\n'), ((4594, 4666), 'torch.nn.Linear', 'nn.Linear', (['self.config.decoder_config.output_dim', 'self.config.num_labels'], {}), '(self.config.decoder_config.output_dim, self.config.num_labels)\n', (4603, 4666), True, 'import torch.nn as nn\n'), ((5961, 6033), 'torch.nn.Linear', 'nn.Linear', (['self.config.decoder_config.output_dim', 'self.config.num_labels'], {}), '(self.config.decoder_config.output_dim, self.config.num_labels)\n', (5970, 6033), True, 'import torch.nn as nn\n'), ((9847, 9906), 'torch.nn.Embedding', 'nn.Embedding', (['config.num_pos', 'config.pos_dim'], {'padding_idx': '(0)'}), '(config.num_pos, config.pos_dim, padding_idx=0)\n', (9859, 9906), True, 'import torch.nn as nn\n'), ((12790, 12836), 'torch.cat', 'torch.cat', (['(encoded_text, encoded_pos)'], {'dim': '(-1)'}), '((encoded_text, encoded_pos), dim=-1)\n', (12799, 12836), False, 'import torch\n'), ((18113, 18141), 'allennlp.nn.util.get_device_of', 'get_device_of', (['attended_arcs'], {}), '(attended_arcs)\n', (18126, 18141), False, 'from allennlp.nn.util import get_device_of, get_lengths_from_binary_sequence_mask, get_range_vector, masked_log_softmax\n'), ((10170, 10195), 'allennlp.nn.Activation.by_name', 'Activation.by_name', (['"""elu"""'], {}), "('elu')\n", (10188, 10195), False, 'from allennlp.nn import Activation\n'), ((10621, 10646), 'allennlp.nn.Activation.by_name', 'Activation.by_name', (['"""elu"""'], {}), "('elu')\n", (10639, 10646), False, 'from allennlp.nn import Activation\n'), ((17438, 17477), 'allennlp.nn.util.masked_log_softmax', 'masked_log_softmax', (['attended_arcs', 'mask'], {}), '(attended_arcs, mask)\n', (17456, 17477), False, 'from allennlp.nn.util import get_device_of, get_lengths_from_binary_sequence_mask, get_range_vector, masked_log_softmax\n'), ((24886, 24928), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pairwise_head_logits'], {'dim': '(3)'}), '(pairwise_head_logits, dim=3)\n', (24899, 24928), True, 'import torch.nn.functional as F\n'), ((25429, 25464), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['attended_arcs'], {'dim': '(2)'}), '(attended_arcs, dim=2)\n', (25442, 25464), True, 'import torch.nn.functional as F\n'), ((27612, 27630), 'numpy.stack', 'numpy.stack', (['heads'], {}), '(heads)\n', (27623, 27630), False, 'import numpy\n'), ((27663, 27685), 'numpy.stack', 'numpy.stack', (['head_tags'], {}), '(head_tags)\n', (27674, 27685), False, 'import numpy\n'), ((17300, 17328), 'allennlp.nn.util.get_device_of', 'get_device_of', (['attended_arcs'], {}), '(attended_arcs)\n', (17313, 17328), False, 'from allennlp.nn.util import get_device_of, get_lengths_from_binary_sequence_mask, get_range_vector, masked_log_softmax\n'), ((29314, 29352), 'allennlp.nn.util.get_device_of', 'get_device_of', (['head_tag_representation'], {}), '(head_tag_representation)\n', (29327, 29352), False, 'from allennlp.nn.util import get_device_of, get_lengths_from_binary_sequence_mask, get_range_vector, masked_log_softmax\n')]
import numpy as np from .layer_base import LayerBase class ReluLayer(LayerBase): def __init__(self): super().__init__() self.cache = {} def id(self): return "Relu" def forward(self, x): y = np.maximum(x, 0) self.cache["is_negative"] = (x < 0) return y def backward(self, dy): is_negative = self.cache["is_negative"] dx = dy dx[is_negative] = 0 return dx class SigmoidLayer(LayerBase): def __init__(self): super().__init__() self.cache = {} def id(self): return "Sigmoid" def forward(self, x): y = 1 / (1 + np.exp(-x)) self.cache["y"] = y return y def backward(self, dy): y = self.cache["y"] dx = y * (1 - y) * dy return dx class TanhLayer(LayerBase): def __init__(self): super().__init__() self.cache = {} def id(self): return "Tanh" def forward(self, x): y = np.tanh(x) self.cache["y"] = y return y def backward(self, dy): y = self.cache["y"] dx = (1 - np.power(y, 2)) * dy return dx class SoftmaxWithLossLayer(LayerBase): def __init__(self): super().__init__() self.cache = {} def id(self): return "SoftmaxWithLoss" def forward(self, x, target): batch_size = target.shape[0] c = np.max(x) y = np.exp(x - c) / np.sum(np.exp(x - c), axis=1, keepdims=True) loss = -np.sum(np.sum(target * np.log(y), axis=1)) / batch_size self.cache["target"] = target.copy() self.cache["y"] = y.copy() return loss def backward(self, dy=1): y = self.cache["y"].copy() target = self.cache["target"].copy() batch_size = target.shape[0] dx = dy * (y - target) / batch_size return dx
[ "numpy.power", "numpy.log", "numpy.tanh", "numpy.max", "numpy.exp", "numpy.maximum" ]
[((240, 256), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (250, 256), True, 'import numpy as np\n'), ((1014, 1024), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (1021, 1024), True, 'import numpy as np\n'), ((1441, 1450), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1447, 1450), True, 'import numpy as np\n'), ((1463, 1476), 'numpy.exp', 'np.exp', (['(x - c)'], {}), '(x - c)\n', (1469, 1476), True, 'import numpy as np\n'), ((662, 672), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (668, 672), True, 'import numpy as np\n'), ((1147, 1161), 'numpy.power', 'np.power', (['y', '(2)'], {}), '(y, 2)\n', (1155, 1161), True, 'import numpy as np\n'), ((1486, 1499), 'numpy.exp', 'np.exp', (['(x - c)'], {}), '(x - c)\n', (1492, 1499), True, 'import numpy as np\n'), ((1563, 1572), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (1569, 1572), True, 'import numpy as np\n')]
# Copyright 2019 The TensorNetwork Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import pytest import numpy as np import tensornetwork as tn from tensornetwork.backends import backend_factory from tensornetwork.matrixproductstates.base_mps import BaseMPS import tensorflow as tf from jax.config import config config.update("jax_enable_x64", True) tf.compat.v1.enable_v2_behavior() @pytest.fixture( name="backend_dtype_values", params=[('numpy', np.float64), ('numpy', np.complex128), ('tensorflow', np.float64), ('tensorflow', np.complex128), ('pytorch', np.float64), ('jax', np.float64)]) def backend_dtype(request): return request.param def get_random_np(shape, dtype, seed=0): np.random.seed(seed) #get the same tensors every time you call this function if dtype is np.complex64: return np.random.randn(*shape).astype( np.float32) + 1j * np.random.randn(*shape).astype(np.float32) if dtype is np.complex128: return np.random.randn(*shape).astype( np.float64) + 1j * np.random.randn(*shape).astype(np.float64) return np.random.randn(*shape).astype(dtype) def test_normalization(backend): D, d, N = 10, 2, 10 tensors = [np.random.randn(1, d, D)] + [ np.random.randn(D, d, D) for _ in range(N - 2) ] + [np.random.randn(D, d, 1)] mps = BaseMPS(tensors, center_position=0, backend=backend) mps.position(len(mps) - 1) Z = mps.position(0, normalize=True) np.testing.assert_allclose(Z, 1.0) def test_backend_initialization(backend): be = backend_factory.get_backend(backend) D, d, N = 10, 2, 10 tensors = [np.random.randn(1, d, D)] + [ np.random.randn(D, d, D) for _ in range(N - 2) ] + [np.random.randn(D, d, 1)] mps = BaseMPS(tensors, center_position=0, backend=be) mps.position(len(mps) - 1) Z = mps.position(0, normalize=True) np.testing.assert_allclose(Z, 1.0) def test_left_orthonormalization(backend_dtype_values): backend = backend_dtype_values[0] dtype = backend_dtype_values[1] D, d, N = 10, 2, 10 tensors = [get_random_np((1, d, D), dtype)] + [ get_random_np((D, d, D), dtype) for _ in range(N - 2) ] + [get_random_np((D, d, 1), dtype)] mps = BaseMPS(tensors, center_position=N - 1, backend=backend) mps.position(0) mps.position(len(mps) - 1) assert all([ mps.check_orthonormality('left', site) < 1E-12 for site in range(len(mps)) ]) def test_right_orthonormalization(backend_dtype_values): backend = backend_dtype_values[0] dtype = backend_dtype_values[1] D, d, N = 10, 2, 10 tensors = [get_random_np((1, d, D), dtype)] + [ get_random_np((D, d, D), dtype) for _ in range(N - 2) ] + [get_random_np((D, d, 1), dtype)] mps = BaseMPS(tensors, center_position=0, backend=backend) mps.position(len(mps) - 1) mps.position(0) assert all([ mps.check_orthonormality('right', site) < 1E-12 for site in range(len(mps)) ]) def test_apply_one_site_gate(backend_dtype_values): backend = backend_dtype_values[0] dtype = backend_dtype_values[1] D, d, N = 10, 2, 10 tensors = [get_random_np((1, d, D), dtype)] + [ get_random_np((D, d, D), dtype) for _ in range(N - 2) ] + [get_random_np((D, d, 1), dtype)] mps = BaseMPS(tensors, center_position=0, backend=backend) tensor = mps.tensors[5] gate = get_random_np((2, 2), dtype) mps.apply_one_site_gate(gate, 5) actual = np.transpose(np.tensordot(tensor, gate, ([1], [1])), (0, 2, 1)) np.testing.assert_allclose(mps.tensors[5], actual) def test_apply_two_site_gate(backend_dtype_values): backend = backend_dtype_values[0] dtype = backend_dtype_values[1] D, d, N = 10, 2, 10 tensors = [get_random_np((1, d, D), dtype)] + [ get_random_np((D, d, D), dtype) for _ in range(N - 2) ] + [get_random_np((D, d, 1), dtype)] mps = BaseMPS(tensors, center_position=0, backend=backend) gate = get_random_np((2, 2, 2, 2), dtype) tensor1 = mps.tensors[5] tensor2 = mps.tensors[6] mps.apply_two_site_gate(gate, 5, 6) tmp = np.tensordot(tensor1, tensor2, ([2], [0])) actual = np.transpose(np.tensordot(tmp, gate, ([1, 2], [2, 3])), (0, 2, 3, 1)) node1 = tn.Node(mps.tensors[5], backend=backend) node2 = tn.Node(mps.tensors[6], backend=backend) node1[2] ^ node2[0] order = [node1[0], node1[1], node2[1], node2[2]] res = tn.contract_between(node1, node2) res.reorder_edges(order) np.testing.assert_allclose(res.tensor, actual) def test_position_raises_error(backend): D, d, N = 10, 2, 10 tensors = [np.random.randn(1, d, D)] + [ np.random.randn(D, d, D) for _ in range(N - 2) ] + [np.random.randn(D, d, 1)] mps = BaseMPS(tensors, center_position=0, backend=backend) with pytest.raises(ValueError): mps.position(-1) with pytest.raises(ValueError): mps.position(11) def test_position_no_normalization(backend): D, d, N = 4, 2, 6 tensors = [np.ones((1, d, D))] + [np.ones((D, d, D)) for _ in range(N - 2) ] + [np.ones((D, d, 1))] mps = BaseMPS(tensors, center_position=0, backend=backend) Z = mps.position(len(mps) - 1, normalize=False) np.testing.assert_allclose(Z, 8192.0) def test_position_shift_left(backend): D, d, N = 4, 2, 6 tensors = [np.ones((1, d, D))] + [np.ones((D, d, D)) for _ in range(N - 2) ] + [np.ones((D, d, 1))] mps = BaseMPS(tensors, center_position=int(N / 2), backend=backend) Z = mps.position(0, normalize=True) np.testing.assert_allclose(Z, 2.828427) def test_position_shift_right(backend): D, d, N = 4, 2, 6 tensors = [np.ones((1, d, D))] + [np.ones((D, d, D)) for _ in range(N - 2) ] + [np.ones((D, d, 1))] mps = BaseMPS(tensors, center_position=int(N / 2), backend=backend) Z = mps.position(N - 1, normalize=True) np.testing.assert_allclose(Z, 2.828427) def test_position_no_shift(backend): D, d, N = 4, 2, 6 tensors = [np.ones((1, d, D))] + [np.ones((D, d, D)) for _ in range(N - 2) ] + [np.ones((D, d, 1))] mps = BaseMPS(tensors, center_position=int(N / 2), backend=backend) Z = mps.position(int(N / 2), normalize=True) np.testing.assert_allclose(Z, 5.656854) def test_position_no_shift_no_normalization(backend): D, d, N = 4, 2, 6 tensors = [np.ones((1, d, D))] + [np.ones((D, d, D)) for _ in range(N - 2) ] + [np.ones((D, d, 1))] mps = BaseMPS(tensors, center_position=int(N / 2), backend=backend) Z = mps.position(int(N / 2), normalize=False) np.testing.assert_allclose(Z, 5.656854) def test_different_dtypes_raises_error(): D, d = 4, 2 tensors = [ np.ones((1, d, D), dtype=np.float64), np.ones((D, d, D), dtype=np.complex64) ] with pytest.raises(TypeError): BaseMPS(tensors, backend='numpy') _tensors = [ np.ones((1, d, D), dtype=np.float64), np.ones((D, d, D), dtype=np.float64) ] mps = BaseMPS(_tensors, backend='numpy') mps.tensors = tensors with pytest.raises(TypeError): mps.dtype def test_not_implemented(): D, d = 4, 2 tensors = [np.ones((1, d, D)), np.ones((D, d, D))] mps = BaseMPS(tensors, backend='numpy') with pytest.raises(NotImplementedError): mps.save('tmp') with pytest.raises(NotImplementedError): mps.right_envs([0]) with pytest.raises(NotImplementedError): mps.left_envs([0]) with pytest.raises(NotImplementedError): mps.canonicalize() def test_physical_dimensions(backend): D = 3 tensors = [np.ones((1, 2, D)), np.ones((D, 3, D)), np.ones((D, 4, 1))] mps = BaseMPS(tensors, backend=backend) assert mps.physical_dimensions == [2, 3, 4] def test_apply_transfer_operator_left(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mat = backend.convert_to_tensor( np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)) mps = BaseMPS(tensors, backend=backend) expected = np.array([[74., 58., 38.], [78., 146., 102.], [38., 114., 74.]]) actual = mps.apply_transfer_operator(site=3, direction=1, matrix=mat) np.testing.assert_allclose(actual, expected) actual = mps.apply_transfer_operator(site=3, direction="l", matrix=mat) np.testing.assert_allclose(actual, expected) actual = mps.apply_transfer_operator(site=3, direction="left", matrix=mat) np.testing.assert_allclose(actual, expected) def test_apply_transfer_operator_right(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mat = backend.convert_to_tensor( np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)) mps = BaseMPS(tensors, backend=backend) expected = np.array([[80., -20., 128.], [-20., 10., -60.], [144., -60., 360.]]) actual = mps.apply_transfer_operator(site=3, direction=-1, matrix=mat) np.testing.assert_allclose(actual, expected) actual = mps.apply_transfer_operator(site=3, direction="r", matrix=mat) np.testing.assert_allclose(actual, expected) actual = mps.apply_transfer_operator(site=3, direction="right", matrix=mat) np.testing.assert_allclose(actual, expected) def test_apply_transfer_operator_invalid_direction_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mat = backend.convert_to_tensor( np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)) mps = BaseMPS(tensors, backend=backend) with pytest.raises(ValueError): mps.apply_transfer_operator(site=3, direction=0, matrix=mat) with pytest.raises(ValueError): mps.apply_transfer_operator(site=3, direction="keft", matrix=mat) def test_measure_local_operator_value_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] operator = backend.convert_to_tensor( np.array([[1, -1], [-1, 1]], dtype=np.float64)) mps = BaseMPS(tensors, backend=backend) with pytest.raises(ValueError): mps.measure_local_operator(ops=2 * [operator], sites=[1, 2, 3]) def test_measure_two_body_correlator_value_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] operator = backend.convert_to_tensor( np.array([[1, -1], [-1, 1]], dtype=np.float64)) mps = BaseMPS(tensors, backend=backend) with pytest.raises(ValueError): mps.measure_two_body_correlator( op1=operator, op2=operator, site1=-1, sites2=[2]) def test_get_tensor(backend): backend = backend_factory.get_backend(backend) tensor1 = np.ones((2, 3, 2), dtype=np.float64) tensor2 = 2 * np.ones((2, 3, 2), dtype=np.float64) tensors = [tensor1, tensor2] mps = BaseMPS(tensors, backend=backend) np.testing.assert_allclose(mps.get_tensor(0), tensor1) np.testing.assert_allclose(mps.get_tensor(1), tensor2) def test_get_tensor_connector_matrix(backend): backend = backend_factory.get_backend(backend) tensor1 = np.ones((2, 3, 2), dtype=np.float64) tensor2 = 2 * np.ones((2, 3, 2), dtype=np.float64) connector = backend.convert_to_tensor(np.ones((2, 2), dtype=np.float64)) tensors = [tensor1, tensor2] mps = BaseMPS(tensors, backend=backend, connector_matrix=connector) np.testing.assert_allclose(mps.get_tensor(0), tensor1) np.testing.assert_allclose(mps.get_tensor(1), 2 * tensor2) def test_get_tensor_raises_error(backend): backend = backend_factory.get_backend(backend) tensor1 = np.ones((2, 3, 2), dtype=np.float64) tensor2 = 2 * np.ones((2, 3, 2), dtype=np.float64) tensors = [tensor1, tensor2] mps = BaseMPS(tensors, backend=backend) with pytest.raises(ValueError): mps.get_tensor(site=-1) with pytest.raises(IndexError): mps.get_tensor(site=3) def test_check_canonical(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) np.testing.assert_allclose(mps.check_canonical(), 71.714713) def test_check_normality_raises_value_error(backend): backend = backend_factory.get_backend(backend) tensor = np.ones((2, 3, 2), dtype=np.float64) tensors = [tensor] mps = BaseMPS(tensors, backend=backend) with pytest.raises(ValueError): mps.check_orthonormality(which="keft", site=0) def test_apply_two_site_gate_2(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor( np.array([[[[0., 1.], [0., 0.]], [[1., 0.], [0., 0.]]], [[[0., 0.], [0., 1.]], [[0., 0.], [1., 0.]]]], dtype=np.float64)) actual = mps.apply_two_site_gate( gate=gate, site1=1, site2=2, max_singular_values=1) np.testing.assert_allclose(actual[0], 9.133530) expected = np.array([[5.817886], [9.039142]]) np.testing.assert_allclose(np.abs(mps.tensors[1][0]), expected, rtol=1e-04) expected = np.array([[0.516264, 0.080136, 0.225841], [0.225841, 0.59876, 0.516264]]) np.testing.assert_allclose(np.abs(mps.tensors[2][0]), expected, rtol=1e-04) def test_apply_two_site_wrong_gate_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate1 = backend.convert_to_tensor(np.ones((2, 2, 2), dtype=np.float64)) gate2 = backend.convert_to_tensor(np.ones((2, 2, 2, 2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate1, site1=1, site2=2) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate2, site1=1, site2=2) def test_apply_two_site_wrong_site1_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor(np.ones((2, 2, 2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=-1, site2=2) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=6, site2=2) def test_apply_two_site_wrong_site2_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor(np.ones((2, 2, 2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=0, site2=0) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=0, site2=6) def test_apply_two_site_wrong_site1_site2_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor(np.ones((2, 2, 2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=2, site2=2) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=2, site2=4) def test_apply_two_site_max_singular_value_not_center_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor(np.ones((2, 2, 2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=3, site2=4, max_singular_values=1) with pytest.raises(ValueError): mps.apply_two_site_gate(gate=gate, site1=3, site2=4, max_truncation_err=.1) def test_apply_one_site_gate_2(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor(np.array([[0, 1], [1, 0]], dtype=np.float64)) mps.apply_one_site_gate(gate=gate, site=1) expected = np.array([[1., -2., 1.], [1., 2., 1.]]) np.testing.assert_allclose(mps.tensors[1][0], expected) def test_apply_one_site_gate_wrong_gate_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate1 = backend.convert_to_tensor(np.ones((2, 2, 2), dtype=np.float64)) gate2 = backend.convert_to_tensor(np.ones((2, 2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_one_site_gate(gate=gate1, site=1) with pytest.raises(ValueError): mps.apply_one_site_gate(gate=gate2, site=1) def test_apply_one_site_gate_invalid_site_raises_error(backend): backend = backend_factory.get_backend(backend) tensor = np.array([[[1., 2., 1.], [1., -2., 1.]], [[-1., 1., -1.], [-1., 1., -1.]], [[1., 2, 3], [3, 2, 1]]], dtype=np.float64) tensors = 6 * [backend.convert_to_tensor(tensor)] mps = BaseMPS(tensors, backend=backend, center_position=2) gate = backend.convert_to_tensor(np.ones((2, 2), dtype=np.float64)) with pytest.raises(ValueError): mps.apply_one_site_gate(gate=gate, site=-1) with pytest.raises(ValueError): mps.apply_one_site_gate(gate=gate, site=6)
[ "numpy.abs", "tensornetwork.matrixproductstates.base_mps.BaseMPS", "numpy.ones", "tensornetwork.contract_between", "numpy.testing.assert_allclose", "numpy.tensordot", "tensorflow.compat.v1.enable_v2_behavior", "tensornetwork.backends.backend_factory.get_backend", "pytest.fixture", "numpy.array", ...
[((930, 967), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (943, 967), False, 'from jax.config import config\n'), ((968, 1001), 'tensorflow.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ([], {}), '()\n', (999, 1001), True, 'import tensorflow as tf\n'), ((1005, 1220), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""backend_dtype_values"""', 'params': "[('numpy', np.float64), ('numpy', np.complex128), ('tensorflow', np.float64\n ), ('tensorflow', np.complex128), ('pytorch', np.float64), ('jax', np.\n float64)]"}), "(name='backend_dtype_values', params=[('numpy', np.float64),\n ('numpy', np.complex128), ('tensorflow', np.float64), ('tensorflow', np\n .complex128), ('pytorch', np.float64), ('jax', np.float64)])\n", (1019, 1220), False, 'import pytest\n'), ((1341, 1361), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1355, 1361), True, 'import numpy as np\n'), ((1943, 1995), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'backend'}), '(tensors, center_position=0, backend=backend)\n', (1950, 1995), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((2065, 2099), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(1.0)'], {}), '(Z, 1.0)\n', (2091, 2099), True, 'import numpy as np\n'), ((2151, 2187), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (2178, 2187), False, 'from tensornetwork.backends import backend_factory\n'), ((2347, 2394), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'be'}), '(tensors, center_position=0, backend=be)\n', (2354, 2394), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((2464, 2498), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(1.0)'], {}), '(Z, 1.0)\n', (2490, 2498), True, 'import numpy as np\n'), ((2808, 2864), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(N - 1)', 'backend': 'backend'}), '(tensors, center_position=N - 1, backend=backend)\n', (2815, 2864), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((3328, 3380), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'backend'}), '(tensors, center_position=0, backend=backend)\n', (3335, 3380), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((3842, 3894), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'backend'}), '(tensors, center_position=0, backend=backend)\n', (3849, 3894), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((4072, 4122), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mps.tensors[5]', 'actual'], {}), '(mps.tensors[5], actual)\n', (4098, 4122), True, 'import numpy as np\n'), ((4428, 4480), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'backend'}), '(tensors, center_position=0, backend=backend)\n', (4435, 4480), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((4626, 4668), 'numpy.tensordot', 'np.tensordot', (['tensor1', 'tensor2', '([2], [0])'], {}), '(tensor1, tensor2, ([2], [0]))\n', (4638, 4668), True, 'import numpy as np\n'), ((4760, 4800), 'tensornetwork.Node', 'tn.Node', (['mps.tensors[5]'], {'backend': 'backend'}), '(mps.tensors[5], backend=backend)\n', (4767, 4800), True, 'import tensornetwork as tn\n'), ((4811, 4851), 'tensornetwork.Node', 'tn.Node', (['mps.tensors[6]'], {'backend': 'backend'}), '(mps.tensors[6], backend=backend)\n', (4818, 4851), True, 'import tensornetwork as tn\n'), ((4934, 4967), 'tensornetwork.contract_between', 'tn.contract_between', (['node1', 'node2'], {}), '(node1, node2)\n', (4953, 4967), True, 'import tensornetwork as tn\n'), ((4997, 5043), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res.tensor', 'actual'], {}), '(res.tensor, actual)\n', (5023, 5043), True, 'import numpy as np\n'), ((5246, 5298), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'backend'}), '(tensors, center_position=0, backend=backend)\n', (5253, 5298), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((5621, 5673), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'center_position': '(0)', 'backend': 'backend'}), '(tensors, center_position=0, backend=backend)\n', (5628, 5673), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((5726, 5763), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(8192.0)'], {}), '(Z, 8192.0)\n', (5752, 5763), True, 'import numpy as np\n'), ((6072, 6111), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(2.828427)'], {}), '(Z, 2.828427)\n', (6098, 6111), True, 'import numpy as np\n'), ((6425, 6464), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(2.828427)'], {}), '(Z, 2.828427)\n', (6451, 6464), True, 'import numpy as np\n'), ((6780, 6819), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(5.656854)'], {}), '(Z, 5.656854)\n', (6806, 6819), True, 'import numpy as np\n'), ((7153, 7192), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z', '(5.656854)'], {}), '(Z, 5.656854)\n', (7179, 7192), True, 'import numpy as np\n'), ((7545, 7579), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['_tensors'], {'backend': '"""numpy"""'}), "(_tensors, backend='numpy')\n", (7552, 7579), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((7756, 7789), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': '"""numpy"""'}), "(tensors, backend='numpy')\n", (7763, 7789), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((8182, 8215), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (8189, 8215), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((8324, 8360), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (8351, 8360), False, 'from tensornetwork.backends import backend_factory\n'), ((8372, 8508), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (8380, 8508), True, 'import numpy as np\n'), ((8697, 8730), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (8704, 8730), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((8745, 8818), 'numpy.array', 'np.array', (['[[74.0, 58.0, 38.0], [78.0, 146.0, 102.0], [38.0, 114.0, 74.0]]'], {}), '([[74.0, 58.0, 38.0], [78.0, 146.0, 102.0], [38.0, 114.0, 74.0]])\n', (8753, 8818), True, 'import numpy as np\n'), ((8884, 8928), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (8910, 8928), True, 'import numpy as np\n'), ((9005, 9049), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (9031, 9049), True, 'import numpy as np\n'), ((9129, 9173), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (9155, 9173), True, 'import numpy as np\n'), ((9237, 9273), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (9264, 9273), False, 'from tensornetwork.backends import backend_factory\n'), ((9285, 9421), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (9293, 9421), True, 'import numpy as np\n'), ((9610, 9643), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (9617, 9643), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((9657, 9734), 'numpy.array', 'np.array', (['[[80.0, -20.0, 128.0], [-20.0, 10.0, -60.0], [144.0, -60.0, 360.0]]'], {}), '([[80.0, -20.0, 128.0], [-20.0, 10.0, -60.0], [144.0, -60.0, 360.0]])\n', (9665, 9734), True, 'import numpy as np\n'), ((9863, 9907), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (9889, 9907), True, 'import numpy as np\n'), ((9984, 10028), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (10010, 10028), True, 'import numpy as np\n'), ((10109, 10153), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (10135, 10153), True, 'import numpy as np\n'), ((10242, 10278), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (10269, 10278), False, 'from tensornetwork.backends import backend_factory\n'), ((10290, 10426), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (10298, 10426), True, 'import numpy as np\n'), ((10615, 10648), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (10622, 10648), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((10920, 10956), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (10947, 10956), False, 'from tensornetwork.backends import backend_factory\n'), ((10968, 11104), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (10976, 11104), True, 'import numpy as np\n'), ((11283, 11316), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (11290, 11316), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((11492, 11528), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (11519, 11528), False, 'from tensornetwork.backends import backend_factory\n'), ((11540, 11676), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (11548, 11676), True, 'import numpy as np\n'), ((11855, 11888), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (11862, 11888), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((12062, 12098), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (12089, 12098), False, 'from tensornetwork.backends import backend_factory\n'), ((12111, 12147), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (12118, 12147), True, 'import numpy as np\n'), ((12240, 12273), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (12247, 12273), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((12449, 12485), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (12476, 12485), False, 'from tensornetwork.backends import backend_factory\n'), ((12498, 12534), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (12505, 12534), True, 'import numpy as np\n'), ((12702, 12763), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'connector_matrix': 'connector'}), '(tensors, backend=backend, connector_matrix=connector)\n', (12709, 12763), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((12939, 12975), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (12966, 12975), False, 'from tensornetwork.backends import backend_factory\n'), ((12988, 13024), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (12995, 13024), True, 'import numpy as np\n'), ((13117, 13150), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (13124, 13150), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((13323, 13359), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (13350, 13359), False, 'from tensornetwork.backends import backend_factory\n'), ((13371, 13507), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (13379, 13507), True, 'import numpy as np\n'), ((13591, 13643), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (13598, 13643), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((13775, 13811), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (13802, 13811), False, 'from tensornetwork.backends import backend_factory\n'), ((13823, 13859), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (13830, 13859), True, 'import numpy as np\n'), ((13889, 13922), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend'}), '(tensors, backend=backend)\n', (13896, 13922), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((14063, 14099), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (14090, 14099), False, 'from tensornetwork.backends import backend_factory\n'), ((14111, 14247), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (14119, 14247), True, 'import numpy as np\n'), ((14331, 14383), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (14338, 14383), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((14675, 14721), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual[0]', '(9.13353)'], {}), '(actual[0], 9.13353)\n', (14701, 14721), True, 'import numpy as np\n'), ((14736, 14770), 'numpy.array', 'np.array', (['[[5.817886], [9.039142]]'], {}), '([[5.817886], [9.039142]])\n', (14744, 14770), True, 'import numpy as np\n'), ((14862, 14935), 'numpy.array', 'np.array', (['[[0.516264, 0.080136, 0.225841], [0.225841, 0.59876, 0.516264]]'], {}), '([[0.516264, 0.080136, 0.225841], [0.225841, 0.59876, 0.516264]])\n', (14870, 14935), True, 'import numpy as np\n'), ((15109, 15145), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (15136, 15145), False, 'from tensornetwork.backends import backend_factory\n'), ((15157, 15293), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (15165, 15293), True, 'import numpy as np\n'), ((15377, 15429), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (15384, 15429), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((15841, 15877), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (15868, 15877), False, 'from tensornetwork.backends import backend_factory\n'), ((15889, 16025), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (15897, 16025), True, 'import numpy as np\n'), ((16109, 16161), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (16116, 16161), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((16494, 16530), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (16521, 16530), False, 'from tensornetwork.backends import backend_factory\n'), ((16542, 16678), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (16550, 16678), True, 'import numpy as np\n'), ((16762, 16814), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (16769, 16814), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((17152, 17188), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (17179, 17188), False, 'from tensornetwork.backends import backend_factory\n'), ((17200, 17336), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (17208, 17336), True, 'import numpy as np\n'), ((17420, 17472), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (17427, 17472), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((17822, 17858), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (17849, 17858), False, 'from tensornetwork.backends import backend_factory\n'), ((17870, 18006), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (17878, 18006), True, 'import numpy as np\n'), ((18090, 18142), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (18097, 18142), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((18502, 18538), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (18529, 18538), False, 'from tensornetwork.backends import backend_factory\n'), ((18550, 18686), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (18558, 18686), True, 'import numpy as np\n'), ((18770, 18822), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (18777, 18822), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((18962, 19007), 'numpy.array', 'np.array', (['[[1.0, -2.0, 1.0], [1.0, 2.0, 1.0]]'], {}), '([[1.0, -2.0, 1.0], [1.0, 2.0, 1.0]])\n', (18970, 19007), True, 'import numpy as np\n'), ((19004, 19059), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mps.tensors[1][0]', 'expected'], {}), '(mps.tensors[1][0], expected)\n', (19030, 19059), True, 'import numpy as np\n'), ((19137, 19173), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (19164, 19173), False, 'from tensornetwork.backends import backend_factory\n'), ((19185, 19321), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (19193, 19321), True, 'import numpy as np\n'), ((19405, 19457), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (19412, 19457), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((19849, 19885), 'tensornetwork.backends.backend_factory.get_backend', 'backend_factory.get_backend', (['backend'], {}), '(backend)\n', (19876, 19885), False, 'from tensornetwork.backends import backend_factory\n'), ((19897, 20033), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, 1.0, -1.0]\n ], [[1.0, 2, 3], [3, 2, 1]]]'], {'dtype': 'np.float64'}), '([[[1.0, 2.0, 1.0], [1.0, -2.0, 1.0]], [[-1.0, 1.0, -1.0], [-1.0, \n 1.0, -1.0]], [[1.0, 2, 3], [3, 2, 1]]], dtype=np.float64)\n', (19905, 20033), True, 'import numpy as np\n'), ((20117, 20169), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': 'backend', 'center_position': '(2)'}), '(tensors, backend=backend, center_position=2)\n', (20124, 20169), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((4019, 4057), 'numpy.tensordot', 'np.tensordot', (['tensor', 'gate', '([1], [1])'], {}), '(tensor, gate, ([1], [1]))\n', (4031, 4057), True, 'import numpy as np\n'), ((4693, 4734), 'numpy.tensordot', 'np.tensordot', (['tmp', 'gate', '([1, 2], [2, 3])'], {}), '(tmp, gate, ([1, 2], [2, 3]))\n', (4705, 4734), True, 'import numpy as np\n'), ((5306, 5331), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5319, 5331), False, 'import pytest\n'), ((5361, 5386), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5374, 5386), False, 'import pytest\n'), ((7271, 7307), 'numpy.ones', 'np.ones', (['(1, d, D)'], {'dtype': 'np.float64'}), '((1, d, D), dtype=np.float64)\n', (7278, 7307), True, 'import numpy as np\n'), ((7315, 7353), 'numpy.ones', 'np.ones', (['(D, d, D)'], {'dtype': 'np.complex64'}), '((D, d, D), dtype=np.complex64)\n', (7322, 7353), True, 'import numpy as np\n'), ((7365, 7389), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7378, 7389), False, 'import pytest\n'), ((7395, 7428), 'tensornetwork.matrixproductstates.base_mps.BaseMPS', 'BaseMPS', (['tensors'], {'backend': '"""numpy"""'}), "(tensors, backend='numpy')\n", (7402, 7428), False, 'from tensornetwork.matrixproductstates.base_mps import BaseMPS\n'), ((7451, 7487), 'numpy.ones', 'np.ones', (['(1, d, D)'], {'dtype': 'np.float64'}), '((1, d, D), dtype=np.float64)\n', (7458, 7487), True, 'import numpy as np\n'), ((7495, 7531), 'numpy.ones', 'np.ones', (['(D, d, D)'], {'dtype': 'np.float64'}), '((D, d, D), dtype=np.float64)\n', (7502, 7531), True, 'import numpy as np\n'), ((7611, 7635), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7624, 7635), False, 'import pytest\n'), ((7708, 7726), 'numpy.ones', 'np.ones', (['(1, d, D)'], {}), '((1, d, D))\n', (7715, 7726), True, 'import numpy as np\n'), ((7728, 7746), 'numpy.ones', 'np.ones', (['(D, d, D)'], {}), '((D, d, D))\n', (7735, 7746), True, 'import numpy as np\n'), ((7797, 7831), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7810, 7831), False, 'import pytest\n'), ((7860, 7894), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7873, 7894), False, 'import pytest\n'), ((7927, 7961), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7940, 7961), False, 'import pytest\n'), ((7993, 8027), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (8006, 8027), False, 'import pytest\n'), ((8114, 8132), 'numpy.ones', 'np.ones', (['(1, 2, D)'], {}), '((1, 2, D))\n', (8121, 8132), True, 'import numpy as np\n'), ((8134, 8152), 'numpy.ones', 'np.ones', (['(D, 3, D)'], {}), '((D, 3, D))\n', (8141, 8152), True, 'import numpy as np\n'), ((8154, 8172), 'numpy.ones', 'np.ones', (['(D, 4, 1)'], {}), '((D, 4, 1))\n', (8161, 8172), True, 'import numpy as np\n'), ((8626, 8687), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float64'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)\n', (8634, 8687), True, 'import numpy as np\n'), ((9539, 9600), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float64'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)\n', (9547, 9600), True, 'import numpy as np\n'), ((10544, 10605), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float64'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)\n', (10552, 10605), True, 'import numpy as np\n'), ((10656, 10681), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10669, 10681), False, 'import pytest\n'), ((10755, 10780), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10768, 10780), False, 'import pytest\n'), ((11227, 11273), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1]]'], {'dtype': 'np.float64'}), '([[1, -1], [-1, 1]], dtype=np.float64)\n', (11235, 11273), True, 'import numpy as np\n'), ((11324, 11349), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11337, 11349), False, 'import pytest\n'), ((11799, 11845), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1]]'], {'dtype': 'np.float64'}), '([[1, -1], [-1, 1]], dtype=np.float64)\n', (11807, 11845), True, 'import numpy as np\n'), ((11896, 11921), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11909, 11921), False, 'import pytest\n'), ((12164, 12200), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (12171, 12200), True, 'import numpy as np\n'), ((12551, 12587), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (12558, 12587), True, 'import numpy as np\n'), ((12628, 12661), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.float64'}), '((2, 2), dtype=np.float64)\n', (12635, 12661), True, 'import numpy as np\n'), ((13041, 13077), 'numpy.ones', 'np.ones', (['(2, 3, 2)'], {'dtype': 'np.float64'}), '((2, 3, 2), dtype=np.float64)\n', (13048, 13077), True, 'import numpy as np\n'), ((13158, 13183), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13171, 13183), False, 'import pytest\n'), ((13220, 13245), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (13233, 13245), False, 'import pytest\n'), ((13930, 13955), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13943, 13955), False, 'import pytest\n'), ((14426, 14567), 'numpy.array', 'np.array', (['[[[[0.0, 1.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]]], [[[0.0, 0.0], [0.0, \n 1.0]], [[0.0, 0.0], [1.0, 0.0]]]]'], {'dtype': 'np.float64'}), '([[[[0.0, 1.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]]], [[[0.0, 0.0\n ], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0]]]], dtype=np.float64)\n', (14434, 14567), True, 'import numpy as np\n'), ((14800, 14825), 'numpy.abs', 'np.abs', (['mps.tensors[1][0]'], {}), '(mps.tensors[1][0])\n', (14806, 14825), True, 'import numpy as np\n'), ((14988, 15013), 'numpy.abs', 'np.abs', (['mps.tensors[2][0]'], {}), '(mps.tensors[2][0])\n', (14994, 15013), True, 'import numpy as np\n'), ((15466, 15502), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2), dtype=np.float64)\n', (15473, 15502), True, 'import numpy as np\n'), ((15540, 15582), 'numpy.ones', 'np.ones', (['(2, 2, 2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2, 2, 2), dtype=np.float64)\n', (15547, 15582), True, 'import numpy as np\n'), ((15591, 15616), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15604, 15616), False, 'import pytest\n'), ((15683, 15708), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15696, 15708), False, 'import pytest\n'), ((16197, 16236), 'numpy.ones', 'np.ones', (['(2, 2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2, 2), dtype=np.float64)\n', (16204, 16236), True, 'import numpy as np\n'), ((16245, 16270), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16258, 16270), False, 'import pytest\n'), ((16337, 16362), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16350, 16362), False, 'import pytest\n'), ((16850, 16889), 'numpy.ones', 'np.ones', (['(2, 2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2, 2), dtype=np.float64)\n', (16857, 16889), True, 'import numpy as np\n'), ((16898, 16923), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16911, 16923), False, 'import pytest\n'), ((16989, 17014), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17002, 17014), False, 'import pytest\n'), ((17508, 17547), 'numpy.ones', 'np.ones', (['(2, 2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2, 2), dtype=np.float64)\n', (17515, 17547), True, 'import numpy as np\n'), ((17556, 17581), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17569, 17581), False, 'import pytest\n'), ((17647, 17672), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17660, 17672), False, 'import pytest\n'), ((18178, 18217), 'numpy.ones', 'np.ones', (['(2, 2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2, 2), dtype=np.float64)\n', (18185, 18217), True, 'import numpy as np\n'), ((18226, 18251), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18239, 18251), False, 'import pytest\n'), ((18340, 18365), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18353, 18365), False, 'import pytest\n'), ((18858, 18902), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {'dtype': 'np.float64'}), '([[0, 1], [1, 0]], dtype=np.float64)\n', (18866, 18902), True, 'import numpy as np\n'), ((19494, 19530), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2), dtype=np.float64)\n', (19501, 19530), True, 'import numpy as np\n'), ((19568, 19604), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {'dtype': 'np.float64'}), '((2, 2, 2), dtype=np.float64)\n', (19575, 19604), True, 'import numpy as np\n'), ((19613, 19638), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19626, 19638), False, 'import pytest\n'), ((19695, 19720), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19708, 19720), False, 'import pytest\n'), ((20205, 20238), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.float64'}), '((2, 2), dtype=np.float64)\n', (20212, 20238), True, 'import numpy as np\n'), ((20247, 20272), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20260, 20272), False, 'import pytest\n'), ((20329, 20354), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20342, 20354), False, 'import pytest\n'), ((1711, 1734), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1726, 1734), True, 'import numpy as np\n'), ((1909, 1933), 'numpy.random.randn', 'np.random.randn', (['D', 'd', '(1)'], {}), '(D, d, 1)\n', (1924, 1933), True, 'import numpy as np\n'), ((2313, 2337), 'numpy.random.randn', 'np.random.randn', (['D', 'd', '(1)'], {}), '(D, d, 1)\n', (2328, 2337), True, 'import numpy as np\n'), ((5212, 5236), 'numpy.random.randn', 'np.random.randn', (['D', 'd', '(1)'], {}), '(D, d, 1)\n', (5227, 5236), True, 'import numpy as np\n'), ((5593, 5611), 'numpy.ones', 'np.ones', (['(D, d, 1)'], {}), '((D, d, 1))\n', (5600, 5611), True, 'import numpy as np\n'), ((5942, 5960), 'numpy.ones', 'np.ones', (['(D, d, 1)'], {}), '((D, d, 1))\n', (5949, 5960), True, 'import numpy as np\n'), ((6291, 6309), 'numpy.ones', 'np.ones', (['(D, d, 1)'], {}), '((D, d, 1))\n', (6298, 6309), True, 'import numpy as np\n'), ((6641, 6659), 'numpy.ones', 'np.ones', (['(D, d, 1)'], {}), '((D, d, 1))\n', (6648, 6659), True, 'import numpy as np\n'), ((7013, 7031), 'numpy.ones', 'np.ones', (['(D, d, 1)'], {}), '((D, d, 1))\n', (7020, 7031), True, 'import numpy as np\n'), ((1819, 1843), 'numpy.random.randn', 'np.random.randn', (['(1)', 'd', 'D'], {}), '(1, d, D)\n', (1834, 1843), True, 'import numpy as np\n'), ((1855, 1879), 'numpy.random.randn', 'np.random.randn', (['D', 'd', 'D'], {}), '(D, d, D)\n', (1870, 1879), True, 'import numpy as np\n'), ((2223, 2247), 'numpy.random.randn', 'np.random.randn', (['(1)', 'd', 'D'], {}), '(1, d, D)\n', (2238, 2247), True, 'import numpy as np\n'), ((2259, 2283), 'numpy.random.randn', 'np.random.randn', (['D', 'd', 'D'], {}), '(D, d, D)\n', (2274, 2283), True, 'import numpy as np\n'), ((5122, 5146), 'numpy.random.randn', 'np.random.randn', (['(1)', 'd', 'D'], {}), '(1, d, D)\n', (5137, 5146), True, 'import numpy as np\n'), ((5158, 5182), 'numpy.random.randn', 'np.random.randn', (['D', 'd', 'D'], {}), '(D, d, D)\n', (5173, 5182), True, 'import numpy as np\n'), ((5489, 5507), 'numpy.ones', 'np.ones', (['(1, d, D)'], {}), '((1, d, D))\n', (5496, 5507), True, 'import numpy as np\n'), ((5512, 5530), 'numpy.ones', 'np.ones', (['(D, d, D)'], {}), '((D, d, D))\n', (5519, 5530), True, 'import numpy as np\n'), ((5838, 5856), 'numpy.ones', 'np.ones', (['(1, d, D)'], {}), '((1, d, D))\n', (5845, 5856), True, 'import numpy as np\n'), ((5861, 5879), 'numpy.ones', 'np.ones', (['(D, d, D)'], {}), '((D, d, D))\n', (5868, 5879), True, 'import numpy as np\n'), ((6187, 6205), 'numpy.ones', 'np.ones', (['(1, d, D)'], {}), '((1, d, D))\n', (6194, 6205), True, 'import numpy as np\n'), ((6210, 6228), 'numpy.ones', 'np.ones', (['(D, d, D)'], {}), '((D, d, D))\n', (6217, 6228), True, 'import numpy as np\n'), ((6537, 6555), 'numpy.ones', 'np.ones', (['(1, d, D)'], {}), '((1, d, D))\n', (6544, 6555), True, 'import numpy as np\n'), ((6560, 6578), 'numpy.ones', 'np.ones', (['(D, d, D)'], {}), '((D, d, D))\n', (6567, 6578), True, 'import numpy as np\n'), ((6909, 6927), 'numpy.ones', 'np.ones', (['(1, d, D)'], {}), '((1, d, D))\n', (6916, 6927), True, 'import numpy as np\n'), ((6932, 6950), 'numpy.ones', 'np.ones', (['(D, d, D)'], {}), '((D, d, D))\n', (6939, 6950), True, 'import numpy as np\n'), ((1458, 1481), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1473, 1481), True, 'import numpy as np\n'), ((1600, 1623), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1615, 1623), True, 'import numpy as np\n'), ((1517, 1540), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1532, 1540), True, 'import numpy as np\n'), ((1659, 1682), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1674, 1682), True, 'import numpy as np\n')]
import hsmm4acc.hsmm as hsmm import numpy as np def test_initialize_model(): Nmax = 2 dim = 3 model = hsmm.initialize_model(Nmax, dim) assert len(model.obs_distns) == Nmax def test_colormap(): num_states = 5 colormap, cmap = hsmm.get_color_map(num_states) assert len(colormap.keys()) == num_states def test_train_hsmm_one(): # np.random.seed(1) maxduration = 5 X_list = create_dataset_many( dim=2, nrstates=5, size=1, maxduration=maxduration) nr_resamples = 10 model = hsmm.train_hsmm(X_list, Nmax=10, nr_resamples=nr_resamples, trunc=maxduration, visualize=False, example_index=0, max_hamming=0.05) assert len(model.stateseqs) == len(X_list) def create_dataset_many(dim, nrstates, size, maxduration): means = 10 * np.random.rand(nrstates, dim) X_list = [] for i in range(size): X = np.zeros((0, dim)) for j in range(nrstates): length = np.random.randint(1, maxduration) X = np.concatenate( (X, np.random.normal(means[j], 0.2, (length, dim)))) X_list.append(X) return X_list def test_train_hsmm_many(): # np.random.seed(1) nrstates = 5 maxduration = 3 X_list = create_dataset_many( dim=2, size=10, maxduration=maxduration, nrstates=nrstates) # print([X for X in X_list]) nr_resamples = 10 model = hsmm.train_hsmm( X_list, Nmax=nrstates, nr_resamples=nr_resamples, trunc=maxduration, visualize=False, example_index=0, max_hamming=0.05) assert len(model.stateseqs) == len(X_list)
[ "hsmm4acc.hsmm.initialize_model", "numpy.random.normal", "numpy.random.rand", "numpy.zeros", "numpy.random.randint", "hsmm4acc.hsmm.get_color_map", "hsmm4acc.hsmm.train_hsmm" ]
[((116, 148), 'hsmm4acc.hsmm.initialize_model', 'hsmm.initialize_model', (['Nmax', 'dim'], {}), '(Nmax, dim)\n', (137, 148), True, 'import hsmm4acc.hsmm as hsmm\n'), ((253, 283), 'hsmm4acc.hsmm.get_color_map', 'hsmm.get_color_map', (['num_states'], {}), '(num_states)\n', (271, 283), True, 'import hsmm4acc.hsmm as hsmm\n'), ((531, 666), 'hsmm4acc.hsmm.train_hsmm', 'hsmm.train_hsmm', (['X_list'], {'Nmax': '(10)', 'nr_resamples': 'nr_resamples', 'trunc': 'maxduration', 'visualize': '(False)', 'example_index': '(0)', 'max_hamming': '(0.05)'}), '(X_list, Nmax=10, nr_resamples=nr_resamples, trunc=\n maxduration, visualize=False, example_index=0, max_hamming=0.05)\n', (546, 666), True, 'import hsmm4acc.hsmm as hsmm\n'), ((1506, 1647), 'hsmm4acc.hsmm.train_hsmm', 'hsmm.train_hsmm', (['X_list'], {'Nmax': 'nrstates', 'nr_resamples': 'nr_resamples', 'trunc': 'maxduration', 'visualize': '(False)', 'example_index': '(0)', 'max_hamming': '(0.05)'}), '(X_list, Nmax=nrstates, nr_resamples=nr_resamples, trunc=\n maxduration, visualize=False, example_index=0, max_hamming=0.05)\n', (1521, 1647), True, 'import hsmm4acc.hsmm as hsmm\n'), ((910, 939), 'numpy.random.rand', 'np.random.rand', (['nrstates', 'dim'], {}), '(nrstates, dim)\n', (924, 939), True, 'import numpy as np\n'), ((994, 1012), 'numpy.zeros', 'np.zeros', (['(0, dim)'], {}), '((0, dim))\n', (1002, 1012), True, 'import numpy as np\n'), ((1068, 1101), 'numpy.random.randint', 'np.random.randint', (['(1)', 'maxduration'], {}), '(1, maxduration)\n', (1085, 1101), True, 'import numpy as np\n'), ((1154, 1200), 'numpy.random.normal', 'np.random.normal', (['means[j]', '(0.2)', '(length, dim)'], {}), '(means[j], 0.2, (length, dim))\n', (1170, 1200), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ pandas 学习 字典形式的numpy """ from __future__ import print_function import numpy as np import pandas as pd dates = pd.date_range('20130101', periods=6) df = pd.DataFrame(np.arange(24).reshape((6,4)),index=dates, columns=['A','B','C','D']) """ A B C D 2013-01-01 0 1 2 3 2013-01-02 4 5 6 7 2013-01-03 8 9 10 11 2013-01-04 12 13 14 15 2013-01-05 16 17 18 19 2013-01-06 20 21 22 23 """ # print(df['A']) print(df.A) print(df[0:3]) """ A B C D 2013-01-01 0 1 2 3 2013-01-02 4 5 6 7 2013-01-03 8 9 10 11 """ print(df['20130102':'20130104']) """ A B C D 2013-01-02 4 5 6 7 2013-01-03 8 9 10 11 2013-01-04 12 13 14 15 """ print(df.loc['20130102']) """ A 4 B 5 C 6 D 7 Name: 2013-01-02 00:00:00, dtype: int64 """ print(df.loc[:,['A','B']]) """ A B 2013-01-01 0 1 2013-01-02 4 5 2013-01-03 8 9 2013-01-04 12 13 2013-01-05 16 17 2013-01-06 20 21 """ print(df.loc['20130102',['A','B']]) """ A 4 B 5 Name: 2013-01-02 00:00:00, dtype: int64 """ print(df.iloc[3,1]) # 13 print(df.iloc[3:5,1:3]) """ B C 2013-01-04 13 14 2013-01-05 17 18 """ print(df.iloc[[1,3,5],1:3]) """ B C 2013-01-02 5 6 2013-01-04 13 14 2013-01-06 21 22 """ # 根据混合的这两种 ix print(df.ix[:3,['A','C']]) """ A C 2013-01-01 0 2 2013-01-02 4 6 2013-01-03 8 10 """ print(df[df.A>8]) """ A B C D 2013-01-04 12 13 14 15 2013-01-05 16 17 18 19 2013-01-06 20 21 22 23 """
[ "pandas.date_range", "numpy.arange" ]
[((166, 202), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(6)'}), "('20130101', periods=6)\n", (179, 202), True, 'import pandas as pd\n'), ((221, 234), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (230, 234), True, 'import numpy as np\n')]
"""This module contains njitted routines and data structures to: - Find the best possible split of a node. For a given node, a split is characterized by a feature and a bin. - Apply a split to a node, i.e. split the indices of the samples at the node into the newly created left and right childs. """ import numpy as np from numba import njit, jitclass, prange, float32, uint8, uint32, boolean import numba from .histogram import _build_histogram from .histogram import _build_histogram_root from .histogram import HISTOGRAM_DTYPE, sum_histogram import pdb @jitclass([ ('gain', float32), ('feature_idx', uint32), ('bin_idx', uint8), ('left_g_hf', float32), ('left_gx_hfx', float32), ('left_h', float32), ('left_hx', float32), ('left_hx2', float32), ('if_left_linear', boolean), ('right_g_hf', float32), ('right_gx_hfx', float32), ('right_h', float32), ("right_hx", float32), ("right_hx2", float32), ('if_right_linear', boolean), ('n_samples_left', uint32), ('n_samples_right', uint32), ]) class SplitInfo: """Pure data class to store information about a potential split. Parameters ---------- gain : float32 The gain of the split feature_idx : int The index of the feature to be split bin_idx : int The index of the bin on which the split is made left_g_hf, left_gx_hfx, left_h, left_hx, left_hx2: float32 Accumulants in the left child right_g_hf, right_gx_hfx, right_h, right_hx, right_hx2: float32 Accumulants in the right child n_samples_left : int The number of samples in the left child n_samples_right : int The number of samples in the right child """ def __init__(self, gain=-1., feature_idx=0, bin_idx=0, left_g_hf=0., left_gx_hfx=0., left_h=0., left_hx=0., left_hx2=0., right_g_hf=0., right_gx_hfx=0., right_h=0, right_hx=0, right_hx2=0, n_samples_left=0, n_samples_right=0): self.gain = gain self.feature_idx = feature_idx self.bin_idx = bin_idx self.left_g_hf = left_g_hf self.left_gx_hfx = left_gx_hfx self.left_h = left_h self.left_hx = left_hx self.left_hx2 = left_hx2 self.right_g_hf = right_g_hf self.right_gx_hfx = right_gx_hfx self.right_h = right_h self.right_hx = right_hx self.right_hx2 = right_hx2 self.n_samples_left = n_samples_left self.n_samples_right = n_samples_right @jitclass([ ('n_features', uint32), ('X_binned', uint8[::1, :]), ('X', float32[::1, :]), ('max_bins', uint32), ('n_bins_per_feature', uint32[::1]), ('min_samples_leaf', uint32), ('min_gain_to_split', float32), ('gradients', float32[::1]), ('hessians', float32[::1]), ('prediction_value', float32[::1]), ('w_l2_reg', float32), ('b_l2_reg', float32), ('min_hessian_to_split', float32), ('partition', uint32[::1]), ('left_indices_buffer', uint32[::1]), ('right_indices_buffer', uint32[::1]), ]) class SplittingContext: """Pure data class defining a splitting context. Ideally it would also have methods but numba does not support annotating jitclasses (so we can't use parallel=True). This structure is instanciated in the grower and stores all the required information to compute the SplitInfo and histograms of each node. Parameters ---------- X_binned : array of int The binned input samples. Must be Fortran-aligned. max_bins : int, optional(default=256) The maximum number of bins. Used to define the shape of the histograms. n_bins_per_feature : array-like of int The actual number of bins needed for each feature, which is lower or equal to max_bins. gradients : array-like, shape=(n_samples,) The gradients of each training sample. Those are the gradients of the loss w.r.t the predictions, evaluated at iteration i - 1. hessians : array-like, shape=(n_samples,) The hessians of each training sample. Those are the hessians of the loss w.r.t the predictions, evaluated at iteration i - 1. l2_regularization : float The L2 regularization parameter. min_hessian_to_split : float The minimum sum of hessians needed in each node. Splits that result in at least one child having a sum of hessians less than min_hessian_to_split are discarded. min_samples_leaf : int The minimum number of samples per leaf. min_gain_to_split : float, optional(default=0.) The minimum gain needed to split a node. Splits with lower gain will be ignored. """ def __init__(self, X_binned, X, max_bins, n_bins_per_feature, gradients, hessians, w_l2_reg, b_l2_reg, min_hessian_to_split=1e-3, min_samples_leaf=20, min_gain_to_split=0.): self.X_binned = X_binned self.X = X self.n_features = X_binned.shape[1] # Note: all histograms will have <max_bins> bins, but some of the # last bins may be unused if n_bins_per_feature[f] < max_bins self.max_bins = max_bins self.n_bins_per_feature = n_bins_per_feature self.gradients = gradients self.hessians = hessians self.prediction_value = np.zeros_like(self.gradients) # for root node, gradients and hessians are already ordered self.w_l2_reg = w_l2_reg self.b_l2_reg = b_l2_reg self.min_hessian_to_split = min_hessian_to_split self.min_samples_leaf = min_samples_leaf self.min_gain_to_split = min_gain_to_split # The partition array maps each sample index into the leaves of the # tree (a leaf in this context is a node that isn't splitted yet, not # necessarily a 'finalized' leaf). Initially, the root contains all # the indices, e.g.: # partition = [abcdefghijkl] # After a call to split_indices, it may look e.g. like this: # partition = [cef|abdghijkl] # we have 2 leaves, the left one is at position 0 and the second one at # position 3. The order of the samples is irrelevant. self.partition = np.arange(0, X_binned.shape[0], 1, np.uint32) # buffers used in split_indices to support parallel splitting. self.left_indices_buffer = np.empty_like(self.partition) self.right_indices_buffer = np.empty_like(self.partition) @njit(parallel=True) def update_prediction_values(context, sample_indices, coefficient, feature_idx): n_samples = sample_indices.shape[0] prediction_value = context.prediction_value for i in prange(n_samples): sample_idx = sample_indices[i] prediction_value[sample_idx] += context.X[sample_idx][feature_idx] * coefficient @njit(parallel=True, locals={'sample_idx': uint32, 'left_count': uint32, 'right_count': uint32}) def split_indices(context, split_info, sample_indices): """Split samples into left and right arrays. Parameters ---------- context : SplittingContext The splitting context split_ingo : SplitInfo The SplitInfo of the node to split sample_indices : array of int The indices of the samples at the node to split. This is a view on context.partition, and it is modified inplace by placing the indices of the left child at the beginning, and the indices of the right child at the end. Returns ------- left_indices : array of int The indices of the samples in the left child. This is a view on context.partition. right_indices : array of int The indices of the samples in the right child. This is a view on context.partition. """ # This is a multi-threaded implementation inspired by lightgbm. # Here is a quick break down. Let's suppose we want to split a node with # 24 samples named from a to x. context.partition looks like this (the * # are indices in other leaves that we don't care about): # partition = [*************abcdefghijklmnopqrstuvwx****************] # ^ ^ # node_position node_position + node.n_samples # Ultimately, we want to reorder the samples inside the boundaries of the # leaf (which becomes a node) to now represent the samples in its left and # right child. For example: # partition = [*************abefilmnopqrtuxcdghjksvw*****************] # ^ ^ # left_child_pos right_child_pos # Note that left_child_pos always takes the value of node_position, and # right_child_pos = left_child_pos + left_child.n_samples. The order of # the samples inside a leaf is irrelevant. # 1. samples_indices is a view on this region a..x. We conceptually # divide it into n_threads regions. Each thread will be responsible for # its own region. Here is an example with 4 threads: # samples_indices = [abcdef|ghijkl|mnopqr|stuvwx] # 2. Each thread processes 6 = 24 // 4 entries and maps them into # left_indices_buffer or right_indices_buffer. For example, we could # have the following mapping ('.' denotes an undefined entry): # - left_indices_buffer = [abef..|il....|mnopqr|tux...] # - right_indices_buffer = [cd....|ghjk..|......|svw...] # 3. We keep track of the start positions of the regions (the '|') in # ``offset_in_buffers`` as well as the size of each region. We also keep # track of the number of samples put into the left/right child by each # thread. Concretely: # - left_counts = [4, 2, 6, 3] # - right_counts = [2, 4, 0, 3] # 4. Finally, we put left/right_indices_buffer back into the # samples_indices, without any undefined entries and the partition looks # as expected # partition = [*************abefilmnopqrtuxcdghjksvw*****************] # Note: We here show left/right_indices_buffer as being the same size as # sample_indices for simplicity, but in reality they are of the same size # as partition. X_binned = context.X_binned.T[split_info.feature_idx] n_threads = numba.config.NUMBA_DEFAULT_NUM_THREADS n_samples = sample_indices.shape[0] # Note: we could probably allocate all the arrays of size n_threads in the # splitting context as well, but gains are probably going to be minimal sizes = np.full(n_threads, n_samples // n_threads, dtype=np.int32) if n_samples % n_threads > 0: # array[:0] will cause a bug in numba 0.41 so we need the if. Remove # once issue numba 3554 is fixed. sizes[:n_samples % n_threads] += 1 offset_in_buffers = np.zeros(n_threads, dtype=np.int32) offset_in_buffers[1:] = np.cumsum(sizes[:-1]) left_counts = np.empty(n_threads, dtype=np.int32) right_counts = np.empty(n_threads, dtype=np.int32) # Need to declare local variables, else they're not updated :/ # (see numba issue 3459) left_indices_buffer = context.left_indices_buffer right_indices_buffer = context.right_indices_buffer # map indices from samples_indices to left/right_indices_buffer for thread_idx in prange(n_threads): left_count = 0 right_count = 0 start = offset_in_buffers[thread_idx] stop = start + sizes[thread_idx] for i in range(start, stop): sample_idx = sample_indices[i] if X_binned[sample_idx] <= split_info.bin_idx: left_indices_buffer[start + left_count] = sample_idx left_count += 1 else: right_indices_buffer[start + right_count] = sample_idx right_count += 1 left_counts[thread_idx] = left_count right_counts[thread_idx] = right_count # position of right child = just after the left child right_child_position = left_counts.sum() # offset of each thread in samples_indices for left and right child, i.e. # where each thread will start to write. left_offset = np.zeros(n_threads, dtype=np.int32) left_offset[1:] = np.cumsum(left_counts[:-1]) right_offset = np.full(n_threads, right_child_position, dtype=np.int32) right_offset[1:] += np.cumsum(right_counts[:-1]) # map indices in left/right_indices_buffer back into samples_indices. This # also updates context.partition since samples_indice is a view. for thread_idx in prange(n_threads): for i in range(left_counts[thread_idx]): sample_indices[left_offset[thread_idx] + i] = \ left_indices_buffer[offset_in_buffers[thread_idx] + i] for i in range(right_counts[thread_idx]): sample_indices[right_offset[thread_idx] + i] = \ right_indices_buffer[offset_in_buffers[thread_idx] + i] return (sample_indices[:right_child_position], sample_indices[right_child_position:]) @njit(parallel=True) def find_node_split(context, sample_indices): """For each feature, find the best bin to split on at a given node. Returns the best split info among all features, and the histograms of all the features. The histograms are computed by scanning the whole data. Parameters ---------- context : SplittingContext The splitting context sample_indices : array of int The indices of the samples at the node to split. Returns ------- best_split_info : SplitInfo The info about the best possible split among all features. histograms : array of HISTOGRAM_DTYPE, shape=(n_features, max_bins) The histograms of each feature. A histogram is an array of HISTOGRAM_DTYPE of size ``max_bins`` (only ``n_bins_per_features[feature]`` entries are relevant). """ n_samples = sample_indices.shape[0] # Pre-allocate the results datastructure to be able to use prange: # numba jitclass do not seem to properly support default values for kwargs. split_infos = [SplitInfo(-1., 0, 0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0, 0) for i in range(context.n_features)] histograms = np.empty( shape=(np.int64(context.n_features), np.int64(context.max_bins)), dtype=HISTOGRAM_DTYPE ) for feature_idx in prange(context.n_features): X_binned = context.X_binned.T[feature_idx] X = context.X.T[feature_idx] root_node = X_binned.shape[0] == n_samples if root_node: histogram = _build_histogram_root( context.max_bins, X_binned, X, context.prediction_value, context.gradients, context.hessians) else: histogram = _build_histogram( context.max_bins, sample_indices, X_binned, X, context.prediction_value, context.gradients, context.hessians) histogram_summ = sum_histogram(histogram) split_info = _find_best_bin_to_split_helper( context, feature_idx, histogram, n_samples, histogram_summ) split_infos[feature_idx] = split_info histograms[feature_idx, :] = histogram split_info = _find_best_feature_to_split_helper(split_infos) return split_info, histograms @njit def _find_best_feature_to_split_helper(split_infos): best_gain = None for i, split_info in enumerate(split_infos): gain = split_info.gain if best_gain is None or gain > best_gain: best_gain = gain best_split_info = split_info return best_split_info @njit(locals={'left_g_hf': float32, 'left_hx_hfx': float32, 'left_h': float32, 'left_hx': float32, 'left_hx2': float32, 'n_samples_left': uint32}, fastmath=True) def _find_best_bin_to_split_helper(context, feature_idx, histogram, n_samples, h_summ): """Find best bin to split on, and return the corresponding SplitInfo. Splits that do not satisfy the splitting constraints (min_gain_to_split, etc.) are discarded here. If no split can satisfy the constraints, a SplitInfo with a gain of -1 is returned. If for a given node the best SplitInfo has a gain of -1, it is finalized into a leaf. """ # Allocate the structure for the best split information. It can be # returned as such (with a negative gain) if the min_hessian_to_split # condition is not satisfied. Such invalid splits are later discarded by # the TreeGrower. best_split = SplitInfo(-1., 0, 0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0, 0) left_g_hf, left_gx_hfx, left_h, left_hx, left_hx2 = 0., 0., 0., 0., 0. n_samples_left = 0 for bin_idx in range(context.n_bins_per_feature[feature_idx]): n_samples_left += histogram[bin_idx]['count'] n_samples_right = n_samples - n_samples_left left_g_hf += histogram[bin_idx]['sum_g_hf'] right_g_hf = h_summ[0]['sum_g_hf'] - left_g_hf left_gx_hfx += histogram[bin_idx]['sum_gx_hfx'] right_gx_hfx = h_summ[0]['sum_gx_hfx'] - left_gx_hfx left_h += histogram[bin_idx]['sum_h'] right_h = h_summ[0]['sum_h'] - left_h left_hx += histogram[bin_idx]['sum_hx'] right_hx = h_summ[0]['sum_hx'] - left_hx left_hx2 += histogram[bin_idx]['sum_hx2'] right_hx2 = h_summ[0]['sum_hx2'] - left_hx2 if n_samples_left < context.min_samples_leaf: continue if n_samples_right < context.min_samples_leaf: # won't get any better break left_denominator = (left_hx2 * left_h - left_hx**2) right_denominator = (right_hx2 * right_h - right_hx**2) if min(left_denominator, right_denominator) < context.min_hessian_to_split: continue gain = _split_gain( left_g_hf, left_gx_hfx, left_h, left_hx, left_hx2, right_g_hf, right_gx_hfx, right_h, right_hx, right_hx2, h_summ[0]['sum_g_hf'], h_summ[0]['sum_gx_hfx'], h_summ[0]['sum_h'], h_summ[0]['sum_hx'], h_summ[0]['sum_hx2'], context.w_l2_reg, context.b_l2_reg) if gain > best_split.gain and gain > context.min_gain_to_split: best_split.gain = gain best_split.feature_idx = feature_idx best_split.bin_idx = bin_idx best_split.n_samples_left = n_samples_left best_split.n_samples_right = n_samples_right best_split.left_g_hf = left_g_hf best_split.left_gx_hfx = left_gx_hfx best_split.left_h = left_h best_split.left_hx = left_hx best_split.left_hx2 = left_hx2 best_split.right_g_hf = right_g_hf best_split.right_gx_hfx = right_gx_hfx best_split.right_h = right_h best_split.right_hx = right_hx best_split.right_hx2 = right_hx2 return best_split @njit(fastmath=False) def _split_gain(left_g_hf, left_gx_hfx, left_h, left_hx, left_hx2, right_g_hf, right_gx_hfx, right_h, right_hx, right_hx2, sum_g_hf, sum_gx_hfx, sum_h, sum_hx, sum_hx2, w_l2_reg, b_l2_reg): def negative_loss_constant(g_hf, h): return g_hf ** 2 / (h + b_l2_reg) def negative_loss_linear(g_hf, gx_hfx, h, hx, hx2): hx2_reg = hx2 + w_l2_reg h_reg = h + b_l2_reg return (hx2_reg * g_hf**2 + h_reg * gx_hfx**2 - 2 * g_hf * gx_hfx * hx) / (hx2_reg * h_reg - hx**2) gain = negative_loss_linear(left_g_hf, left_gx_hfx, left_h, left_hx, left_hx2) gain += negative_loss_linear(right_g_hf, right_gx_hfx, right_h, right_hx, right_hx2) gain -= negative_loss_constant(sum_g_hf, sum_h) return gain
[ "numpy.int64", "numpy.full", "numba.njit", "numba.jitclass", "numpy.zeros", "numpy.empty", "numpy.cumsum", "numpy.empty_like", "numba.prange", "numpy.zeros_like", "numpy.arange" ]
[((566, 1016), 'numba.jitclass', 'jitclass', (["[('gain', float32), ('feature_idx', uint32), ('bin_idx', uint8), (\n 'left_g_hf', float32), ('left_gx_hfx', float32), ('left_h', float32), (\n 'left_hx', float32), ('left_hx2', float32), ('if_left_linear', boolean),\n ('right_g_hf', float32), ('right_gx_hfx', float32), ('right_h', float32\n ), ('right_hx', float32), ('right_hx2', float32), ('if_right_linear',\n boolean), ('n_samples_left', uint32), ('n_samples_right', uint32)]"], {}), "([('gain', float32), ('feature_idx', uint32), ('bin_idx', uint8), (\n 'left_g_hf', float32), ('left_gx_hfx', float32), ('left_h', float32), (\n 'left_hx', float32), ('left_hx2', float32), ('if_left_linear', boolean),\n ('right_g_hf', float32), ('right_gx_hfx', float32), ('right_h', float32\n ), ('right_hx', float32), ('right_hx2', float32), ('if_right_linear',\n boolean), ('n_samples_left', uint32), ('n_samples_right', uint32)])\n", (574, 1016), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((2556, 3075), 'numba.jitclass', 'jitclass', (["[('n_features', uint32), ('X_binned', uint8[::1, :]), ('X', float32[::1, :]\n ), ('max_bins', uint32), ('n_bins_per_feature', uint32[::1]), (\n 'min_samples_leaf', uint32), ('min_gain_to_split', float32), (\n 'gradients', float32[::1]), ('hessians', float32[::1]), (\n 'prediction_value', float32[::1]), ('w_l2_reg', float32), ('b_l2_reg',\n float32), ('min_hessian_to_split', float32), ('partition', uint32[::1]),\n ('left_indices_buffer', uint32[::1]), ('right_indices_buffer', uint32[::1])\n ]"], {}), "([('n_features', uint32), ('X_binned', uint8[::1, :]), ('X',\n float32[::1, :]), ('max_bins', uint32), ('n_bins_per_feature', uint32[:\n :1]), ('min_samples_leaf', uint32), ('min_gain_to_split', float32), (\n 'gradients', float32[::1]), ('hessians', float32[::1]), (\n 'prediction_value', float32[::1]), ('w_l2_reg', float32), ('b_l2_reg',\n float32), ('min_hessian_to_split', float32), ('partition', uint32[::1]),\n ('left_indices_buffer', uint32[::1]), ('right_indices_buffer', uint32[:\n :1])])\n", (2564, 3075), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((6555, 6574), 'numba.njit', 'njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (6559, 6574), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((6907, 7006), 'numba.njit', 'njit', ([], {'parallel': '(True)', 'locals': "{'sample_idx': uint32, 'left_count': uint32, 'right_count': uint32}"}), "(parallel=True, locals={'sample_idx': uint32, 'left_count': uint32,\n 'right_count': uint32})\n", (6911, 7006), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((13146, 13165), 'numba.njit', 'njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (13150, 13165), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((15751, 15919), 'numba.njit', 'njit', ([], {'locals': "{'left_g_hf': float32, 'left_hx_hfx': float32, 'left_h': float32, 'left_hx':\n float32, 'left_hx2': float32, 'n_samples_left': uint32}", 'fastmath': '(True)'}), "(locals={'left_g_hf': float32, 'left_hx_hfx': float32, 'left_h':\n float32, 'left_hx': float32, 'left_hx2': float32, 'n_samples_left':\n uint32}, fastmath=True)\n", (15755, 15919), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((19050, 19070), 'numba.njit', 'njit', ([], {'fastmath': '(False)'}), '(fastmath=False)\n', (19054, 19070), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((6757, 6774), 'numba.prange', 'prange', (['n_samples'], {}), '(n_samples)\n', (6763, 6774), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((10643, 10701), 'numpy.full', 'np.full', (['n_threads', '(n_samples // n_threads)'], {'dtype': 'np.int32'}), '(n_threads, n_samples // n_threads, dtype=np.int32)\n', (10650, 10701), True, 'import numpy as np\n'), ((10922, 10957), 'numpy.zeros', 'np.zeros', (['n_threads'], {'dtype': 'np.int32'}), '(n_threads, dtype=np.int32)\n', (10930, 10957), True, 'import numpy as np\n'), ((10986, 11007), 'numpy.cumsum', 'np.cumsum', (['sizes[:-1]'], {}), '(sizes[:-1])\n', (10995, 11007), True, 'import numpy as np\n'), ((11027, 11062), 'numpy.empty', 'np.empty', (['n_threads'], {'dtype': 'np.int32'}), '(n_threads, dtype=np.int32)\n', (11035, 11062), True, 'import numpy as np\n'), ((11082, 11117), 'numpy.empty', 'np.empty', (['n_threads'], {'dtype': 'np.int32'}), '(n_threads, dtype=np.int32)\n', (11090, 11117), True, 'import numpy as np\n'), ((11416, 11433), 'numba.prange', 'prange', (['n_threads'], {}), '(n_threads)\n', (11422, 11433), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((12271, 12306), 'numpy.zeros', 'np.zeros', (['n_threads'], {'dtype': 'np.int32'}), '(n_threads, dtype=np.int32)\n', (12279, 12306), True, 'import numpy as np\n'), ((12329, 12356), 'numpy.cumsum', 'np.cumsum', (['left_counts[:-1]'], {}), '(left_counts[:-1])\n', (12338, 12356), True, 'import numpy as np\n'), ((12376, 12432), 'numpy.full', 'np.full', (['n_threads', 'right_child_position'], {'dtype': 'np.int32'}), '(n_threads, right_child_position, dtype=np.int32)\n', (12383, 12432), True, 'import numpy as np\n'), ((12457, 12485), 'numpy.cumsum', 'np.cumsum', (['right_counts[:-1]'], {}), '(right_counts[:-1])\n', (12466, 12485), True, 'import numpy as np\n'), ((12657, 12674), 'numba.prange', 'prange', (['n_threads'], {}), '(n_threads)\n', (12663, 12674), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((14506, 14532), 'numba.prange', 'prange', (['context.n_features'], {}), '(context.n_features)\n', (14512, 14532), False, 'from numba import njit, jitclass, prange, float32, uint8, uint32, boolean\n'), ((5412, 5441), 'numpy.zeros_like', 'np.zeros_like', (['self.gradients'], {}), '(self.gradients)\n', (5425, 5441), True, 'import numpy as np\n'), ((6304, 6349), 'numpy.arange', 'np.arange', (['(0)', 'X_binned.shape[0]', '(1)', 'np.uint32'], {}), '(0, X_binned.shape[0], 1, np.uint32)\n', (6313, 6349), True, 'import numpy as np\n'), ((6456, 6485), 'numpy.empty_like', 'np.empty_like', (['self.partition'], {}), '(self.partition)\n', (6469, 6485), True, 'import numpy as np\n'), ((6522, 6551), 'numpy.empty_like', 'np.empty_like', (['self.partition'], {}), '(self.partition)\n', (6535, 6551), True, 'import numpy as np\n'), ((14387, 14415), 'numpy.int64', 'np.int64', (['context.n_features'], {}), '(context.n_features)\n', (14395, 14415), True, 'import numpy as np\n'), ((14417, 14443), 'numpy.int64', 'np.int64', (['context.max_bins'], {}), '(context.max_bins)\n', (14425, 14443), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import numpy as np def load_values(filename): values = [] with open(filename) as file: lines = file.readlines() values = [list(map(float, line.split(','))) for line in lines] return np.array(values).mean(axis=0) if __name__ == "__main__": fig, axs = plt.subplots(2, 1, figsize=(8,8)) y11 = load_values("data/values_2000-1000-10_0-1_0-1_0.00.txt") y12 = load_values("data/values_2000-1000-10_0-1_0-1_0.01.txt") y13 = load_values("data/values_2000-1000-10_0-1_0-1_0.10.txt") y21 = load_values("data/maxQ_2000-1000-10_0-1_0-1_0.00.txt") y22 = load_values("data/maxQ_2000-1000-10_0-1_0-1_0.01.txt") y23 = load_values("data/maxQ_2000-1000-10_0-1_0-1_0.10.txt") x = np.linspace(1, len(y11), len(y11), endpoint=True) axs[0].set_ylabel('Mean value') axs[0].plot(x, y11, label="\u03B5 = 0.0") axs[0].plot(x, y12, label="\u03B5 = 0.01") axs[0].plot(x, y13, label="\u03B5 = 0.1") axs[0].legend() axs[1].set_ylabel('Mean of best picks [%]') axs[1].set_xlabel('steps') axs[1].plot(x, y21, x, y22, x, y23) plt.show()
[ "numpy.array", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ]
[((329, 363), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 8)'}), '(2, 1, figsize=(8, 8))\n', (341, 363), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1151, 1153), True, 'import matplotlib.pyplot as plt\n'), ((255, 271), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (263, 271), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ ================ Network Rewiring ================ :Author: <NAME> :Date: 2011-03-01 :Copyright: Copyright(c) 2011 Jacobs University of Bremen. All rights reserved. :File: randomisation.py """ import numpy import networkx as nx def standard_directed_groups(graph): """ Makes two categories of unidirectional and bidirectional links. Note ---- Self-links are recognised as bidirectional links and produce an error. """ if graph.number_of_selfloops() > 0: raise nx.NetworkXError("the standard setup does not allow self-links") uni = list() bi = list() for edge in graph.edges_iter(): if graph.has_edge(edge[1], edge[0]): bi.append(edge) else: uni.append(edge) return [[uni, 1], [bi, 2]] def metabolic_directed_groups(graph): """ Separates reversible and irreversible reactions and for each of them groups links in substrate-reaction and reaction-product pairs. """ prod_forward = list() subs_forward = list() prod_reversible = list() subs_reversible = list() for rxn in graph.reactions: if rxn.reversible: prod_group = prod_reversible subs_group = subs_reversible else: prod_group = prod_forward subs_group = subs_forward for cmpd in graph.predecessors_iter(rxn): subs_group.append((cmpd, rxn)) for cmpd in graph.successors_iter(rxn): prod_group.append((rxn, cmpd)) return [[prod_forward, 1], [subs_forward, 1], [prod_reversible, 1], [subs_reversible, 1]] def selflinks_directed_groups(graph): """ Normal categorisation of links with the addition of self-links as unidirectional links. """ uni = list() bi = list() for edge in graph.edges_iter(): if edge[0] == edge[1]: uni.append(edge) elif graph.has_edge(edge[1], edge[0]): bi.append(edge) else: uni.append(edge) return [[uni, 1], [bi, 2]] def check_standard(graph, first, second): """ Standard rewiring conditions as in original theory. """ # curiously the conditions for switching unidirectional and bidirectional # links are the same for just slightly different reasons if first == second: return False # prevent creation of self-links if first[0] == second[1]: return False if second[0] == first[1]: return False # check if we would create a parallel edge if second[1] in graph[first[0]]: return False if first[1] in graph[second[0]]: return False # check if we would create a bidirectional link # or cover existing reverse link in double link switching if first[0] in graph[second[1]]: return False if second[0] in graph[first[1]]: return False return True class NetworkRewiring(object): """ """ def __init__(self): """ """ object.__init__(self) self.conditions = check_standard self.make_groups = standard_directed_groups self.graph = None def _add_edge(self, src, tar, bunch, i): """ """ self.graph.add_edge(src, tar) # over-write old edge bunch[i, 0] = src bunch[i, 1] = tar def _remove_edge(self, src, tar, bunch): """ """ self.graph.remove_edge(src, tar) def _switch_double(self, first, second, group, u, v): """ """ # find the reverse edges for u and v # indices of target(first) x = numpy.nonzero(group[:, 0] == first[1])[0] # intersection with indices of source(first) to find index x = x[numpy.nonzero(group[x, 1].flatten() == first[0])[0]] # similarly y = numpy.nonzero(group[:, 0] == second[1])[0] y = y[numpy.nonzero(group[y, 1].flatten() == second[0])[0]] if self.conditions(self.graph, first, second): # if all of these conditions are met, switch double edge # add the forward direction self._add_edge(first[0], second[1], group, u) self._add_edge(second[0], first[1], group, v) # add the backward direction self._add_edge(first[1], second[0], group, x) self._add_edge(second[1], first[0], group, y) # remove old edges self._remove_edge(first[0], first[1], group) self._remove_edge(first[1], first[0], group) self._remove_edge(second[0], second[1], group) self._remove_edge(second[1], second[0], group) return 1 else: return 0 def _switch_single(self, first, second, group, u, v): """ """ if self.conditions(self.graph, first, second): # all conditions passed self._add_edge(first[0], second[1], group, u) self._add_edge(second[0], first[1], group, v) self._remove_edge(first[0], first[1], group) self._remove_edge(second[0], second[1], group) return 1 else: return 0 def randomise(self, template, flip=100, copy=True): """ This is achieved by switching edges in the graph a number of times equal to the number of edges present times 'flip'. This function is intended as a basis for calculating three-node subgraph statistics. As such only degrees of nodes, bi-directional link properties, etc. are conserved. For larger subgraph statistics also the smaller subgraph statistics would have to be conserved. Notes ----- Nodes need to be integers. References ---------- <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME> Network Motifs: Simple Building Blocks of Complex Networks Science, 298:824-827 (2002) """ if template.is_multigraph(): raise nx.NetworkXError("not defined for multigraphs") if copy: self.graph = template.copy() else: self.graph = template if not self.graph.size(): return (self.graph, 0.0) sets = self.make_groups(self.graph) len_sets = len(sets) # set up progress tracking # the probability of switching a link in a certain group is proportional # to the number of flips left in that group # for each group we record: # expected flips, attempts left, successful flips track = numpy.zeros(shape=(len_sets, 4), dtype=int) for (i, (group, weight)) in enumerate(sets): w_group = len(group) # convert links groups to two dimensional array sets[i][0] = numpy.array(group) if w_group > weight: track[i, 0] = flip * weight * w_group # expected flips track[i, 1] = track[i, 0] # attempts left # store number of links - 1 for inclusive randint range track[i, 3] = w_group - 1 total_left = track[:, 1].sum() try: probs = [float(track[k, 1]) / float(total_left)\ for k in xrange(len_sets)] except ZeroDivisionError: return (self.graph, 0.0) # randomly rewire groups depending on their probability uniform = numpy.random.random_sample randint = numpy.random.random_integers for i in xrange(total_left): # loop value is stored, so no problems draw = uniform() for j in xrange(len_sets): if draw < float(sum(probs[:j + 1])): group = sets[j][0] (u, v) = randint(0, track[j, 3], 2) # two random links if sets[j][1] == 1: track[j, 2] += self._switch_single(tuple(group[u]), tuple(group[v]), group, u, v) elif sets[j][1] == 2: track[j, 2] += self._switch_double(tuple(group[u]), tuple(group[v]), group, u, v) else: nx.NetworkXError("unknown category") total_left -= 1 track[j, 1] -= 1 try: probs = [float(track[k, 1]) / float(total_left)\ for k in xrange(len_sets)] except ZeroDivisionError: pass # end of randomisation break # selected right category, continue with outer loop return (self.graph, float(track[:, 2].sum()) / float(track[:, 0].sum()))
[ "numpy.array", "numpy.zeros", "networkx.NetworkXError", "numpy.nonzero" ]
[((570, 634), 'networkx.NetworkXError', 'nx.NetworkXError', (['"""the standard setup does not allow self-links"""'], {}), "('the standard setup does not allow self-links')\n", (586, 634), True, 'import networkx as nx\n'), ((6617, 6660), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(len_sets, 4)', 'dtype': 'int'}), '(shape=(len_sets, 4), dtype=int)\n', (6628, 6660), False, 'import numpy\n'), ((3676, 3714), 'numpy.nonzero', 'numpy.nonzero', (['(group[:, 0] == first[1])'], {}), '(group[:, 0] == first[1])\n', (3689, 3714), False, 'import numpy\n'), ((3884, 3923), 'numpy.nonzero', 'numpy.nonzero', (['(group[:, 0] == second[1])'], {}), '(group[:, 0] == second[1])\n', (3897, 3923), False, 'import numpy\n'), ((6041, 6088), 'networkx.NetworkXError', 'nx.NetworkXError', (['"""not defined for multigraphs"""'], {}), "('not defined for multigraphs')\n", (6057, 6088), True, 'import networkx as nx\n'), ((6832, 6850), 'numpy.array', 'numpy.array', (['group'], {}), '(group)\n', (6843, 6850), False, 'import numpy\n'), ((8233, 8269), 'networkx.NetworkXError', 'nx.NetworkXError', (['"""unknown category"""'], {}), "('unknown category')\n", (8249, 8269), True, 'import networkx as nx\n')]
# # Copyright (c) 2021 The GPflux Contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # r""" This module contains helper functions for constructing :class:`~gpflow.kernels.MultioutputKernel`, :class:`~gpflow.inducing_variables.MultioutputInducingVariables`, :class:`~gpflow.mean_functions.MeanFunction`, and :class:`~gpflux.layers.GPLayer` objects. """ import inspect import warnings from dataclasses import fields from typing import List, Optional, Type, TypeVar, Union import numpy as np import gpflow from gpflow import default_float from gpflow.inducing_variables import ( InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables, ) from gpflow.kernels import SeparateIndependent, SharedIndependent from gpflow.utilities import deepcopy from gpflux.layers.gp_layer import GPLayer def construct_basic_kernel( kernels: Union[gpflow.kernels.Kernel, List[gpflow.kernels.Kernel]], output_dim: Optional[int] = None, share_hyperparams: bool = False, ) -> gpflow.kernels.MultioutputKernel: r""" Construct a :class:`~gpflow.kernels.MultioutputKernel` to use in :class:`GPLayer`\ s. :param kernels: A single kernel or list of :class:`~gpflow.kernels.Kernel`\ s. - When a single kernel is passed, the same kernel is used for all outputs. Depending on ``share_hyperparams``, the hyperparameters will be shared across outputs. You must also specify ``output_dim``. - When a list of kernels is passed, each kernel in the list is used on a separate output dimension and a :class:`gpflow.kernels.SeparateIndependent` is returned. :param output_dim: The number of outputs. This is equal to the number of latent GPs in the :class:`GPLayer`. When only a single kernel is specified for ``kernels``, you must also specify ``output_dim``. When a list of kernels is specified for ``kernels``, we assume that ``len(kernels) == output_dim``, and ``output_dim`` is not required. :param share_hyperparams: If `True`, use the type of kernel and the same hyperparameters (variance and lengthscales) for the different outputs. Otherwise, the same type of kernel (Squared-Exponential, Matern12, and so on) is used for the different outputs, but the kernel can have different hyperparameter values for each. """ if isinstance(kernels, list): mo_kern = SeparateIndependent(kernels) elif not share_hyperparams: copies = [deepcopy(kernels) for _ in range(output_dim)] mo_kern = SeparateIndependent(copies) else: mo_kern = SharedIndependent(kernels, output_dim) return mo_kern def construct_basic_inducing_variables( num_inducing: Union[int, List[int]], input_dim: int, output_dim: Optional[int] = None, share_variables: bool = False, z_init: Optional[np.ndarray] = None, ) -> gpflow.inducing_variables.MultioutputInducingVariables: r""" Construct a compatible :class:`~gpflow.inducing_variables.MultioutputInducingVariables` to use in :class:`GPLayer`\ s. :param num_inducing: The total number of inducing variables, ``M``. This parameter can be freely chosen by the user. General advice is to set it as high as possible, but smaller than the number of datapoints. The computational complexity of the layer is cubic in ``M``. If a list is passed, each element in the list specifies the number of inducing variables to use for each ``output_dim``. :param input_dim: The dimensionality of the input data (or features) ``X``. Typically, this corresponds to ``X.shape[-1]``. For :class:`~gpflow.inducing_variables.InducingPoints`, this specifies the dimensionality of ``Z``. :param output_dim: The dimensionality of the outputs (or targets) ``Y``. Typically, this corresponds to ``Y.shape[-1]`` or the number of latent GPs. The parameter is used to determine the number of inducing variable sets to create when a different set is used for each output. The parameter is redundant when ``num_inducing`` is a list, because the code assumes that ``len(num_inducing) == output_dim``. :param share_variables: If `True`, use the same inducing variables for different outputs. Otherwise, create a different set for each output. Set this parameter to `False` when ``num_inducing`` is a list, because otherwise the two arguments contradict each other. If you set this parameter to `True`, you must also specify ``output_dim``, because that is used to determine the number of inducing variable sets to create. :param z_init: Raw values to use to initialise :class:`gpflow.inducing_variables.InducingPoints`. If `None` (the default), values will be initialised from ``N(0, 1)``. The shape of ``z_init`` depends on the other input arguments. If a single set of inducing points is used for all outputs (that is, if ``share_variables`` is `True`), ``z_init`` should be rank two, with the dimensions ``[M, input_dim]``. If a different set of inducing points is used for the outputs (ithat is, if ``num_inducing`` is a list, or if ``share_variables`` is `False`), ``z_init`` should be a rank three tensor with the dimensions ``[output_dim, M, input_dim]``. """ if z_init is None: warnings.warn( "No `z_init` has been specified in `construct_basic_inducing_variables`. " "Default initialization using random normal N(0, 1) will be used." ) z_init_is_given = z_init is not None if isinstance(num_inducing, list): if output_dim is not None: # TODO: the following assert may clash with MixedMultiOutputFeatures # where the number of independent GPs can differ from the output # dimension assert output_dim == len(num_inducing) # pragma: no cover assert share_variables is False inducing_variables = [] for i, num_ind_var in enumerate(num_inducing): if z_init_is_given: assert len(z_init[i]) == num_ind_var z_init_i = z_init[i] else: z_init_i = np.random.randn(num_ind_var, input_dim).astype(dtype=default_float()) assert z_init_i.shape == (num_ind_var, input_dim) inducing_variables.append(InducingPoints(z_init_i)) return SeparateIndependentInducingVariables(inducing_variables) elif not share_variables: inducing_variables = [] for o in range(output_dim): if z_init_is_given: if z_init.shape != (output_dim, num_inducing, input_dim): raise ValueError( "When not sharing variables, z_init must have shape" "[output_dim, num_inducing, input_dim]" ) z_init_o = z_init[o] else: z_init_o = np.random.randn(num_inducing, input_dim).astype(dtype=default_float()) inducing_variables.append(InducingPoints(z_init_o)) return SeparateIndependentInducingVariables(inducing_variables) else: # TODO: should we assert output_dim is None ? z_init = ( z_init if z_init_is_given else np.random.randn(num_inducing, input_dim).astype(dtype=default_float()) ) shared_ip = InducingPoints(z_init) return SharedIndependentInducingVariables(shared_ip) def construct_mean_function( X: np.ndarray, D_in: int, D_out: int ) -> gpflow.mean_functions.MeanFunction: """ Return :class:`gpflow.mean_functions.Identity` when ``D_in`` and ``D_out`` are equal. Otherwise, use the principal components of the inputs matrix ``X`` to build a :class:`~gpflow.mean_functions.Linear` mean function. .. note:: The returned mean function is set to be untrainable. To change this, use :meth:`gpflow.set_trainable`. :param X: A data array with the shape ``[N, D_in]`` used to determine the principal components to use to create a :class:`~gpflow.mean_functions.Linear` mean function when ``D_in != D_out``. :param D_in: The dimensionality of the input data (or features) ``X``. Typically, this corresponds to ``X.shape[-1]``. :param D_out: The dimensionality of the outputs (or targets) ``Y``. Typically, this corresponds to ``Y.shape[-1]`` or the number of latent GPs in the layer. """ assert X.shape[-1] == D_in if D_in == D_out: mean_function = gpflow.mean_functions.Identity() else: if D_in > D_out: _, _, V = np.linalg.svd(X, full_matrices=False) W = V[:D_out, :].T else: W = np.concatenate([np.eye(D_in), np.zeros((D_in, D_out - D_in))], axis=1) assert W.shape == (D_in, D_out) mean_function = gpflow.mean_functions.Linear(W) gpflow.set_trainable(mean_function, False) return mean_function def construct_gp_layer( num_data: int, num_inducing: int, input_dim: int, output_dim: int, kernel_class: Type[gpflow.kernels.Stationary] = gpflow.kernels.SquaredExponential, z_init: Optional[np.ndarray] = None, name: Optional[str] = None, ) -> GPLayer: """ Builds a vanilla GP layer with a single kernel shared among all outputs, shared inducing point variables and zero mean function. :param num_data: total number of datapoints in the dataset, *N*. Typically corresponds to ``X.shape[0] == len(X)``. :param num_inducing: total number of inducing variables, *M*. This parameter can be freely chosen by the user. General advice is to pick it as high as possible, but smaller than *N*. The computational complexity of the layer is cubic in *M*. :param input_dim: dimensionality of the input data (or features) X. Typically, this corresponds to ``X.shape[-1]``. :param output_dim: The dimensionality of the outputs (or targets) ``Y``. Typically, this corresponds to ``Y.shape[-1]``. :param kernel_class: The kernel class used by the layer. This can be as simple as :class:`gpflow.kernels.SquaredExponential`, or more complex, for example, ``lambda **_: gpflow.kernels.Linear() + gpflow.kernels.Periodic()``. It will be passed a ``lengthscales`` keyword argument. :param z_init: The initial value for the inducing variable inputs. :param name: The name for the GP layer. """ lengthscale = float(input_dim) ** 0.5 base_kernel = kernel_class(lengthscales=np.full(input_dim, lengthscale)) kernel = construct_basic_kernel(base_kernel, output_dim=output_dim, share_hyperparams=True) inducing_variable = construct_basic_inducing_variables( num_inducing, input_dim, output_dim=output_dim, share_variables=True, z_init=z_init, ) gp_layer = GPLayer( kernel=kernel, inducing_variable=inducing_variable, num_data=num_data, mean_function=gpflow.mean_functions.Zero(), name=name, ) return gp_layer T = TypeVar("T") def make_dataclass_from_class(dataclass: Type[T], instance: object, **updates: object) -> T: """ Take a regular object ``instance`` with a superset of fields for a :class:`dataclasses.dataclass` (``@dataclass``-decorated class), and return an instance of the dataclass. The ``instance`` has all of the dataclass's fields but might also have more. ``key=value`` keyword arguments supersede the fields in ``instance``. """ dataclass_keys = [f.name for f in fields(dataclass)] field_dict = {k: v for k, v in inspect.getmembers(instance) if k in dataclass_keys} field_dict.update(updates) return dataclass(**field_dict) # type: ignore def xavier_initialization_numpy(input_dim: int, output_dim: int) -> np.ndarray: r""" Generate initial weights for a neural network layer with the given input and output dimensionality using the Xavier Glorot normal initialiser. From: Glorot, Xavier, and <NAME>. "Understanding the difficulty of training deep feedforward neural networks." Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2010. Draw samples from a normal distribution centred on :math:`0` with standard deviation :math:`\sqrt(2 / (\text{input_dim} + \text{output_dim}))`. """ return np.random.randn(input_dim, output_dim) * (2.0 / (input_dim + output_dim)) ** 0.5
[ "gpflow.default_float", "gpflow.utilities.deepcopy", "inspect.getmembers", "gpflow.mean_functions.Linear", "gpflow.inducing_variables.SeparateIndependentInducingVariables", "gpflow.inducing_variables.InducingPoints", "warnings.warn", "gpflow.mean_functions.Identity", "gpflow.kernels.SharedIndependen...
[((11766, 11778), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (11773, 11778), False, 'from typing import List, Optional, Type, TypeVar, Union\n'), ((2923, 2951), 'gpflow.kernels.SeparateIndependent', 'SeparateIndependent', (['kernels'], {}), '(kernels)\n', (2942, 2951), False, 'from gpflow.kernels import SeparateIndependent, SharedIndependent\n'), ((5943, 6106), 'warnings.warn', 'warnings.warn', (['"""No `z_init` has been specified in `construct_basic_inducing_variables`. Default initialization using random normal N(0, 1) will be used."""'], {}), "(\n 'No `z_init` has been specified in `construct_basic_inducing_variables`. Default initialization using random normal N(0, 1) will be used.'\n )\n", (5956, 6106), False, 'import warnings\n'), ((7010, 7066), 'gpflow.inducing_variables.SeparateIndependentInducingVariables', 'SeparateIndependentInducingVariables', (['inducing_variables'], {}), '(inducing_variables)\n', (7046, 7066), False, 'from gpflow.inducing_variables import InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n'), ((9181, 9213), 'gpflow.mean_functions.Identity', 'gpflow.mean_functions.Identity', ([], {}), '()\n', (9211, 9213), False, 'import gpflow\n'), ((9506, 9537), 'gpflow.mean_functions.Linear', 'gpflow.mean_functions.Linear', (['W'], {}), '(W)\n', (9534, 9537), False, 'import gpflow\n'), ((9546, 9588), 'gpflow.set_trainable', 'gpflow.set_trainable', (['mean_function', '(False)'], {}), '(mean_function, False)\n', (9566, 9588), False, 'import gpflow\n'), ((13141, 13179), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (13156, 13179), True, 'import numpy as np\n'), ((3066, 3093), 'gpflow.kernels.SeparateIndependent', 'SeparateIndependent', (['copies'], {}), '(copies)\n', (3085, 3093), False, 'from gpflow.kernels import SeparateIndependent, SharedIndependent\n'), ((3122, 3160), 'gpflow.kernels.SharedIndependent', 'SharedIndependent', (['kernels', 'output_dim'], {}), '(kernels, output_dim)\n', (3139, 3160), False, 'from gpflow.kernels import SeparateIndependent, SharedIndependent\n'), ((7705, 7761), 'gpflow.inducing_variables.SeparateIndependentInducingVariables', 'SeparateIndependentInducingVariables', (['inducing_variables'], {}), '(inducing_variables)\n', (7741, 7761), False, 'from gpflow.inducing_variables import InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n'), ((8015, 8037), 'gpflow.inducing_variables.InducingPoints', 'InducingPoints', (['z_init'], {}), '(z_init)\n', (8029, 8037), False, 'from gpflow.inducing_variables import InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n'), ((8053, 8098), 'gpflow.inducing_variables.SharedIndependentInducingVariables', 'SharedIndependentInducingVariables', (['shared_ip'], {}), '(shared_ip)\n', (8087, 8098), False, 'from gpflow.inducing_variables import InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n'), ((9271, 9308), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {'full_matrices': '(False)'}), '(X, full_matrices=False)\n', (9284, 9308), True, 'import numpy as np\n'), ((11224, 11255), 'numpy.full', 'np.full', (['input_dim', 'lengthscale'], {}), '(input_dim, lengthscale)\n', (11231, 11255), True, 'import numpy as np\n'), ((11685, 11713), 'gpflow.mean_functions.Zero', 'gpflow.mean_functions.Zero', ([], {}), '()\n', (11711, 11713), False, 'import gpflow\n'), ((12264, 12281), 'dataclasses.fields', 'fields', (['dataclass'], {}), '(dataclass)\n', (12270, 12281), False, 'from dataclasses import fields\n'), ((12318, 12346), 'inspect.getmembers', 'inspect.getmembers', (['instance'], {}), '(instance)\n', (12336, 12346), False, 'import inspect\n'), ((3002, 3019), 'gpflow.utilities.deepcopy', 'deepcopy', (['kernels'], {}), '(kernels)\n', (3010, 3019), False, 'from gpflow.utilities import deepcopy\n'), ((6969, 6993), 'gpflow.inducing_variables.InducingPoints', 'InducingPoints', (['z_init_i'], {}), '(z_init_i)\n', (6983, 6993), False, 'from gpflow.inducing_variables import InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n'), ((7664, 7688), 'gpflow.inducing_variables.InducingPoints', 'InducingPoints', (['z_init_o'], {}), '(z_init_o)\n', (7678, 7688), False, 'from gpflow.inducing_variables import InducingPoints, SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n'), ((9386, 9398), 'numpy.eye', 'np.eye', (['D_in'], {}), '(D_in)\n', (9392, 9398), True, 'import numpy as np\n'), ((9400, 9430), 'numpy.zeros', 'np.zeros', (['(D_in, D_out - D_in)'], {}), '((D_in, D_out - D_in))\n', (9408, 9430), True, 'import numpy as np\n'), ((6799, 6838), 'numpy.random.randn', 'np.random.randn', (['num_ind_var', 'input_dim'], {}), '(num_ind_var, input_dim)\n', (6814, 6838), True, 'import numpy as np\n'), ((6852, 6867), 'gpflow.default_float', 'default_float', ([], {}), '()\n', (6865, 6867), False, 'from gpflow import default_float\n'), ((7914, 7954), 'numpy.random.randn', 'np.random.randn', (['num_inducing', 'input_dim'], {}), '(num_inducing, input_dim)\n', (7929, 7954), True, 'import numpy as np\n'), ((7968, 7983), 'gpflow.default_float', 'default_float', ([], {}), '()\n', (7981, 7983), False, 'from gpflow import default_float\n'), ((7555, 7595), 'numpy.random.randn', 'np.random.randn', (['num_inducing', 'input_dim'], {}), '(num_inducing, input_dim)\n', (7570, 7595), True, 'import numpy as np\n'), ((7609, 7624), 'gpflow.default_float', 'default_float', ([], {}), '()\n', (7622, 7624), False, 'from gpflow import default_float\n')]
import os import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class ReplayBuffer: def __init__(self, state_dim, action_dim,max_size=1e6, device = torch.device('cpu')): self.state_dim = state_dim self.action_dim = action_dim self.max_size = max_size self.device = device self.state_buf = torch.empty((max_size, state_dim), dtype=torch.float32, device=device) self.other_buf = torch.empty((max_size, action_dim + 2), dtype=torch.float32, device=device) self.index = 0 self.total_len = 0 def append(self, state, other): #other: reward, done, action self.index = self.index % self.max_size self.state_buf[self.index] = torch.as_tensor(state, dtype=torch.float32, device=self.device) self.other_buf[self.index] = torch.as_tensor(other, dtype=torch.float32, device=self.device) self.index = self.index+1 self.total_len = min(self.max_size, max(self.index, self.total_len)) def sample_batch(self, batch_size): self.idx = np.random.randint(0, self.total_len-1, batch_size) state = self.state_buf[self.idx] action = self.other_buf[self.idx, 2:].long() reward = self.other_buf[self.idx, 0].unsqueeze(1) mask = self.other_buf[self.idx, 1].unsqueeze(1) next_state = self.state_buf[self.idx + 1] return state, action, reward, mask, next_state class ActorSAC(nn.Module): def __init__(self, state_dim, action_dim, mid_dim=256): super(ActorSAC, self).__init__() self.net = nn.Sequential( nn.Linear(state_dim, mid_dim), nn.ReLU(), nn.Linear(mid_dim, mid_dim), nn.ReLU(), nn.Linear(mid_dim, mid_dim), nn.ReLU(), ) self.avg = nn.Linear(mid_dim, action_dim) self.log_std = nn.Linear(mid_dim, action_dim) self.log_sqrt_2pi = np.log(np.sqrt(2 * np.pi)) def forward(self, state): x = self.net(state) return self.avg(x).tanh() def get_actions(self, state): x = self.net(state) avg = self.avg(x) std = self.log_std(x).clamp(-20, 2).exp() action = avg + torch.rand_like(avg) * std return action.tanh() def get_actions_logprob(self, state): x =self.net(state) avg = self.avg(x) log_std = self.log_std(x).clamp(-20, 2) noise = torch.rand_like(avg, requires_grad=True) action_tanh = (avg + noise * log_std.exp()).tanh() log_prob = log_std + self.log_sqrt_2pi + noise.pow(2).__mul__(0.5) log_prob = log_prob + (1. - action_tanh.pow(2)).log() return action_tanh, log_prob.sum(1,keepdim=True) class CriticSAC(nn.Module): def __init__(self, state_dim, action_dim, mid_dim=256): super(CriticSAC, self).__init__() self.net = nn.Sequential( nn.Linear(state_dim+action_dim, mid_dim), nn.ReLU(), nn.Linear(mid_dim, mid_dim), nn.ReLU() ) self.q1 = nn.Linear(mid_dim, 1) # optimal Q value self.q2 = nn.Linear(mid_dim, 1) def forward(self, state, action): x = torch.cat((state, action), dim=1) x = self.net(x) q1 = self.q1(x) q2 = self.q2(x) return q1, q2 class SACAgent: def __init__(self, state_dim, action_dim): self.learning_rate = 1e-4 self.batch_size = 128 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.alpha = 0.1 self.max_memo = int(1e6) self.target_step = 1024 self.gamma = 0.99 self.mid_dim = 256 self.actor = ActorSAC(state_dim, action_dim, self.mid_dim).to(self.device) self.critic = CriticSAC(state_dim, action_dim,self.mid_dim).to(self.device) self.actor_optim = torch.optim.Adam(self.actor.parameters(), lr=self.learning_rate) self.critic_optim = torch.optim.Adam(self.critic.parameters(), lr=self.learning_rate) self.replay_buffer = ReplayBuffer(state_dim, action_dim, self.max_memo, self.device) self.repect_time = 32 self.state = None def select_action(self, states): states = torch.as_tensor((states, ), dtype=torch.float32, device=self.device) action = self.actor.get_actions(states) return action.cpu().detach().numpy()[0] def explore_env(self, env): state = self.state if self.state is not None else env.reset() for __ in range(self.target_step): action = self.select_action(state) state_, reward, done, _ = env.step(action) other = (reward, (1 - done) * self.gamma, *action) self.replay_buffer.append(state, other) state = state_ if not done else env.reset() self.state = state return self.target_step def update(self): for i in range(int(self.target_step * self.repect_time / self.batch_size)): batch_state, batch_action, batch_reward, batch_mask, batch_next_state = self.replay_buffer.sample_batch(self.batch_size) with torch.no_grad(): next_action, next_log_prob = self.actor.get_actions_logprob(batch_next_state) next_q = torch.min(*self.critic(batch_next_state, next_action)) q_label = batch_reward + batch_mask * (next_q + self.alpha * next_log_prob) # critic optim q1, q2 = self.critic(batch_state, batch_action) critic_loss = F.mse_loss(q1, q_label) + F.mse_loss(q2, q_label) self.optim_update(self.critic_optim, critic_loss) # actor optim action_pg, log_prob = self.actor.get_actions_logprob(batch_state) actor_obj = -torch.mean(torch.min(*self.critic(batch_state, action_pg))+ self.alpha * log_prob) self.optim_update(self.actor_optim, actor_obj) @staticmethod def optim_update(optimizer, objective): optimizer.zero_grad() objective.backward() optimizer.step() @torch.no_grad() def evaluate(self, env, render=False): epochs = 20 res = np.zeros((epochs,)) obs = env.reset() index = 0 while index < epochs: if render: env.render() obs = torch.as_tensor((obs,), dtype=torch.float32, device=self.device) action = self.actor(obs) action = action.detach().cpu().numpy()[0] s_, reward, done, _ = env.step(action) res[index] += reward if done: index += 1 obs = env.reset() else: obs = s_ return res.mean(), res.std() def load_and_save_weight(self, path, mode='load'): actor_path = os.path.join(path, 'actor.pth') critic_path = os.path.join(path, 'critic.pth') if mode == 'load': if os.path.exists(actor_path) and os.path.exists(critic_path): self.actor.load_state_dict(torch.load(actor_path)) self.critic.load_state_dict(torch.load(critic_path)) else: if not os.path.exists(path): os.makedirs(path) torch.save(self.actor.state_dict(), actor_path) torch.save(self.critic.state_dict(), critic_path)
[ "os.path.exists", "torch.nn.ReLU", "torch.as_tensor", "numpy.sqrt", "torch.rand_like", "torch.nn.functional.mse_loss", "os.makedirs", "torch.load", "os.path.join", "numpy.random.randint", "numpy.zeros", "torch.cuda.is_available", "torch.nn.Linear", "torch.no_grad", "torch.empty", "torc...
[((6024, 6039), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6037, 6039), False, 'import torch\n'), ((185, 204), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (197, 204), False, 'import torch\n'), ((367, 437), 'torch.empty', 'torch.empty', (['(max_size, state_dim)'], {'dtype': 'torch.float32', 'device': 'device'}), '((max_size, state_dim), dtype=torch.float32, device=device)\n', (378, 437), False, 'import torch\n'), ((463, 538), 'torch.empty', 'torch.empty', (['(max_size, action_dim + 2)'], {'dtype': 'torch.float32', 'device': 'device'}), '((max_size, action_dim + 2), dtype=torch.float32, device=device)\n', (474, 538), False, 'import torch\n'), ((748, 811), 'torch.as_tensor', 'torch.as_tensor', (['state'], {'dtype': 'torch.float32', 'device': 'self.device'}), '(state, dtype=torch.float32, device=self.device)\n', (763, 811), False, 'import torch\n'), ((850, 913), 'torch.as_tensor', 'torch.as_tensor', (['other'], {'dtype': 'torch.float32', 'device': 'self.device'}), '(other, dtype=torch.float32, device=self.device)\n', (865, 913), False, 'import torch\n'), ((1085, 1137), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.total_len - 1)', 'batch_size'], {}), '(0, self.total_len - 1, batch_size)\n', (1102, 1137), True, 'import numpy as np\n'), ((1801, 1831), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', 'action_dim'], {}), '(mid_dim, action_dim)\n', (1810, 1831), True, 'import torch.nn as nn\n'), ((1855, 1885), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', 'action_dim'], {}), '(mid_dim, action_dim)\n', (1864, 1885), True, 'import torch.nn as nn\n'), ((2412, 2452), 'torch.rand_like', 'torch.rand_like', (['avg'], {'requires_grad': '(True)'}), '(avg, requires_grad=True)\n', (2427, 2452), False, 'import torch\n'), ((3017, 3038), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', '(1)'], {}), '(mid_dim, 1)\n', (3026, 3038), True, 'import torch.nn as nn\n'), ((3075, 3096), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', '(1)'], {}), '(mid_dim, 1)\n', (3084, 3096), True, 'import torch.nn as nn\n'), ((3148, 3181), 'torch.cat', 'torch.cat', (['(state, action)'], {'dim': '(1)'}), '((state, action), dim=1)\n', (3157, 3181), False, 'import torch\n'), ((4189, 4256), 'torch.as_tensor', 'torch.as_tensor', (['(states,)'], {'dtype': 'torch.float32', 'device': 'self.device'}), '((states,), dtype=torch.float32, device=self.device)\n', (4204, 4256), False, 'import torch\n'), ((6117, 6136), 'numpy.zeros', 'np.zeros', (['(epochs,)'], {}), '((epochs,))\n', (6125, 6136), True, 'import numpy as np\n'), ((6744, 6775), 'os.path.join', 'os.path.join', (['path', '"""actor.pth"""'], {}), "(path, 'actor.pth')\n", (6756, 6775), False, 'import os\n'), ((6798, 6830), 'os.path.join', 'os.path.join', (['path', '"""critic.pth"""'], {}), "(path, 'critic.pth')\n", (6810, 6830), False, 'import os\n'), ((1626, 1655), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'mid_dim'], {}), '(state_dim, mid_dim)\n', (1635, 1655), True, 'import torch.nn as nn\n'), ((1657, 1666), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1664, 1666), True, 'import torch.nn as nn\n'), ((1680, 1707), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', 'mid_dim'], {}), '(mid_dim, mid_dim)\n', (1689, 1707), True, 'import torch.nn as nn\n'), ((1709, 1718), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1716, 1718), True, 'import torch.nn as nn\n'), ((1732, 1759), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', 'mid_dim'], {}), '(mid_dim, mid_dim)\n', (1741, 1759), True, 'import torch.nn as nn\n'), ((1761, 1770), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1768, 1770), True, 'import torch.nn as nn\n'), ((1921, 1939), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1928, 1939), True, 'import numpy as np\n'), ((2885, 2927), 'torch.nn.Linear', 'nn.Linear', (['(state_dim + action_dim)', 'mid_dim'], {}), '(state_dim + action_dim, mid_dim)\n', (2894, 2927), True, 'import torch.nn as nn\n'), ((2927, 2936), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2934, 2936), True, 'import torch.nn as nn\n'), ((2950, 2977), 'torch.nn.Linear', 'nn.Linear', (['mid_dim', 'mid_dim'], {}), '(mid_dim, mid_dim)\n', (2959, 2977), True, 'import torch.nn as nn\n'), ((2979, 2988), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2986, 2988), True, 'import torch.nn as nn\n'), ((6265, 6329), 'torch.as_tensor', 'torch.as_tensor', (['(obs,)'], {'dtype': 'torch.float32', 'device': 'self.device'}), '((obs,), dtype=torch.float32, device=self.device)\n', (6280, 6329), False, 'import torch\n'), ((2196, 2216), 'torch.rand_like', 'torch.rand_like', (['avg'], {}), '(avg)\n', (2211, 2216), False, 'import torch\n'), ((3450, 3475), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3473, 3475), False, 'import torch\n'), ((5089, 5104), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5102, 5104), False, 'import torch\n'), ((5486, 5509), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['q1', 'q_label'], {}), '(q1, q_label)\n', (5496, 5509), True, 'import torch.nn.functional as F\n'), ((5512, 5535), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['q2', 'q_label'], {}), '(q2, q_label)\n', (5522, 5535), True, 'import torch.nn.functional as F\n'), ((6873, 6899), 'os.path.exists', 'os.path.exists', (['actor_path'], {}), '(actor_path)\n', (6887, 6899), False, 'import os\n'), ((6904, 6931), 'os.path.exists', 'os.path.exists', (['critic_path'], {}), '(critic_path)\n', (6918, 6931), False, 'import os\n'), ((7102, 7122), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7116, 7122), False, 'import os\n'), ((7140, 7157), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (7151, 7157), False, 'import os\n'), ((6976, 6998), 'torch.load', 'torch.load', (['actor_path'], {}), '(actor_path)\n', (6986, 6998), False, 'import torch\n'), ((7044, 7067), 'torch.load', 'torch.load', (['critic_path'], {}), '(critic_path)\n', (7054, 7067), False, 'import torch\n')]
import ipopt ipopt.setLoggingLevel(50) import numpy as np from collections import namedtuple from pycalphad.core.constants import MIN_SITE_FRACTION SolverResult = namedtuple('SolverResult', ['converged', 'x', 'chemical_potentials']) class SolverBase(object): """"Base class for solvers.""" ignore_convergence = False def solve(self, prob): """ *Implement this method.* Solve a non-linear problem Parameters ---------- prob : pycalphad.core.problem.Problem Returns ------- pycalphad.core.solver.SolverResult """ raise NotImplementedError("A subclass of Solver must be implemented.") class InteriorPointSolver(SolverBase): """ Standard solver class that uses IPOPT. Attributes ---------- verbose : bool If True, will print solver diagonstics. Defaults to False. infeasibility_threshold : float Dual infeasibility threshold used to tighten constraints and attempt a second solve, if necessary. Defaults to 1e-4. ipopt_options : dict Dictionary of options to pass to IPOPT. Methods ------- solve Solve a pycalphad.core.problem.Problem apply_options Encodes ipopt_options and applies them to problem """ def __init__(self, verbose=False, infeasibility_threshold=1e-4, **ipopt_options): """ Standard solver class that uses IPOPT. Parameters ---------- verbose : bool If True, will print solver diagonstics. Defaults to False. infeasibility_threshold : float Dual infeasibility threshold used to tighten constraints and attempt a second solve, if necessary. Defaults to 1e-4. ipopt_options : dict See https://www.coin-or.org/Ipopt/documentation/node40.html for all options """ self.verbose = verbose self.infeasibility_threshold = infeasibility_threshold # set default options self.ipopt_options = { 'max_iter': 200, 'print_level': 0, 'tol': 1e-1, 'constr_viol_tol': 1e-5, 'nlp_scaling_method': 'none', 'hessian_approximation': 'exact' } if not self.verbose: # suppress the "This program contains Ipopt" banner self.ipopt_options['sb'] = ipopt_options.pop('sb', 'yes') # update the default options with the passed options self.ipopt_options.update(ipopt_options) def apply_options(self, problem): """ Apply global options to the solver Parameters ---------- problem : ipopt.problem A problem object that will be solved Notes ----- Strings are encoded to byte strings. """ for option, value in self.ipopt_options.items(): if isinstance(value, str): problem.addOption(option.encode(), value.encode()) else: problem.addOption(option.encode(), value) def solve(self, prob): """ Solve a non-linear problem Parameters ---------- prob : pycalphad.core.problem.Problem Returns ------- SolverResult """ cur_conds = prob.conditions comps = prob.pure_elements nlp = ipopt.problem( n=prob.num_vars, m=prob.num_constraints, problem_obj=prob, lb=prob.xl, ub=prob.xu, cl=prob.cl, cu=prob.cu ) self.apply_options(nlp) # XXX: Hack until exact chemical potential Hessians are implemented if len(prob.fixed_chempot_indices) > 0: nlp.addOption(b'hessian_approximation', b'limited-memory') if self.verbose: print('Turning off exact Hessians due to advanced condition') # Note: Using the ipopt derivative checker can be tricky at the edges of composition space # It will not give valid results for the finite difference approximation x, info = nlp.solve(prob.x0) length_scale = max(np.min(np.abs(x)), 1e-9) if length_scale < 1e-2: if self.verbose: print('Trying to improve poor solution') # Constraints are getting tiny; need to be strict about bounds nlp.addOption(b'compl_inf_tol', 1e-3 * float(length_scale)) nlp.addOption(b'bound_relax_factor', MIN_SITE_FRACTION) # This option ensures any bounds failures will fail "loudly" # Otherwise we are liable to have subtle mass balance errors nlp.addOption(b'honor_original_bounds', b'no') accurate_x, accurate_info = nlp.solve(x) if accurate_info['status'] >= 0: x, info = accurate_x, accurate_info chemical_potentials = prob.chemical_potentials(x) if info['status'] == -10: # Not enough degrees of freedom; nothing to do if len(prob.composition_sets) == 1: converged = True chemical_potentials[:] = prob.composition_sets[0].energy else: converged = False elif info['status'] < 0: if self.verbose: print('Calculation Failed: ', cur_conds, info['status_msg']) converged = False else: converged = True if self.verbose: print('Chemical Potentials', chemical_potentials) print(info['mult_x_L']) print(x) print('Status:', info['status'], info['status_msg']) return SolverResult(converged=converged, x=x, chemical_potentials=chemical_potentials)
[ "ipopt.setLoggingLevel", "collections.namedtuple", "numpy.abs", "ipopt.problem" ]
[((13, 38), 'ipopt.setLoggingLevel', 'ipopt.setLoggingLevel', (['(50)'], {}), '(50)\n', (34, 38), False, 'import ipopt\n'), ((164, 233), 'collections.namedtuple', 'namedtuple', (['"""SolverResult"""', "['converged', 'x', 'chemical_potentials']"], {}), "('SolverResult', ['converged', 'x', 'chemical_potentials'])\n", (174, 233), False, 'from collections import namedtuple\n'), ((3395, 3520), 'ipopt.problem', 'ipopt.problem', ([], {'n': 'prob.num_vars', 'm': 'prob.num_constraints', 'problem_obj': 'prob', 'lb': 'prob.xl', 'ub': 'prob.xu', 'cl': 'prob.cl', 'cu': 'prob.cu'}), '(n=prob.num_vars, m=prob.num_constraints, problem_obj=prob, lb\n =prob.xl, ub=prob.xu, cl=prob.cl, cu=prob.cu)\n', (3408, 3520), False, 'import ipopt\n'), ((4195, 4204), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (4201, 4204), True, 'import numpy as np\n')]
import unittest import os import numpy as np from platform import python_implementation from sentinelhub import read_data, write_data, TestSentinelHub class TestIO(TestSentinelHub): class IOTestCase: def __init__(self, filename, mean, shape=(2048, 2048, 3)): self.filename = filename self.mean = mean self.shape = shape @classmethod def setUpClass(cls): super().setUpClass() cls.test_cases = [ cls.IOTestCase('img.tif', 13577.494856), cls.IOTestCase('img.jpg', 52.41194), cls.IOTestCase('img.png', 52.33736), cls.IOTestCase('img-8bit.jp2', 47.09060, (343, 343, 3)), cls.IOTestCase('img-15bit.jp2', 0.3041897, (1830, 1830)), cls.IOTestCase('img-16bit.jp2', 0.3041897, (1830, 1830)), ] def test_img_read(self): for test_case in self.test_cases: with self.subTest(msg=test_case.filename): file_path = os.path.join(self.INPUT_FOLDER, test_case.filename) img = read_data(file_path) self.assertEqual(img.shape, test_case.shape, 'Expected shape {}, got {}'.format(test_case.shape, img.shape)) if test_case.filename != 'img.jpg' or python_implementation() != 'PyPy': self.assertAlmostEqual(np.mean(img), test_case.mean, delta=1e-4, msg='Expected mean {}, got {}'.format(test_case.mean, np.mean(img))) self.assertTrue(img.flags['WRITEABLE'], msg='Obtained numpy array is not writeable') new_file_path = os.path.join(self.OUTPUT_FOLDER, test_case.filename) write_data(new_file_path, img) new_img = read_data(new_file_path) if not test_case.filename.endswith('jpg'): self.assertTrue(np.array_equal(img, new_img), msg="Original and new image are not the same") if __name__ == '__main__': unittest.main()
[ "platform.python_implementation", "numpy.mean", "os.path.join", "numpy.array_equal", "unittest.main", "sentinelhub.write_data", "sentinelhub.read_data" ]
[((2033, 2048), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2046, 2048), False, 'import unittest\n'), ((1000, 1051), 'os.path.join', 'os.path.join', (['self.INPUT_FOLDER', 'test_case.filename'], {}), '(self.INPUT_FOLDER, test_case.filename)\n', (1012, 1051), False, 'import os\n'), ((1074, 1094), 'sentinelhub.read_data', 'read_data', (['file_path'], {}), '(file_path)\n', (1083, 1094), False, 'from sentinelhub import read_data, write_data, TestSentinelHub\n'), ((1676, 1728), 'os.path.join', 'os.path.join', (['self.OUTPUT_FOLDER', 'test_case.filename'], {}), '(self.OUTPUT_FOLDER, test_case.filename)\n', (1688, 1728), False, 'import os\n'), ((1745, 1775), 'sentinelhub.write_data', 'write_data', (['new_file_path', 'img'], {}), '(new_file_path, img)\n', (1755, 1775), False, 'from sentinelhub import read_data, write_data, TestSentinelHub\n'), ((1802, 1826), 'sentinelhub.read_data', 'read_data', (['new_file_path'], {}), '(new_file_path)\n', (1811, 1826), False, 'from sentinelhub import read_data, write_data, TestSentinelHub\n'), ((1309, 1332), 'platform.python_implementation', 'python_implementation', ([], {}), '()\n', (1330, 1332), False, 'from platform import python_implementation\n'), ((1387, 1399), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (1394, 1399), True, 'import numpy as np\n'), ((1923, 1951), 'numpy.array_equal', 'np.array_equal', (['img', 'new_img'], {}), '(img, new_img)\n', (1937, 1951), True, 'import numpy as np\n'), ((1526, 1538), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (1533, 1538), True, 'import numpy as np\n')]
"""Performs vector and matrix operations. 2020, <NAME> <<EMAIL>> """ import numpy as np from mathstuff import root_finding from typing import Callable, Tuple def legendre_polynomial(x: float, n: int) -> float: """Evaluate n-order Legendre polynomial. Args: x: Abscissa to evaluate. n: Polynomial order. Returns: Value of polynomial. """ if n == 0: return 1 elif n == 1: return x else: polynomials = [1, x] for k in range(1, n): new = ((2 * k + 1) * x * polynomials[-1] - k * polynomials[-2]) / (k + 1) polynomials.append(new) return polynomials[-1] def find_bisection_bounds( *, n: int, polynomial: Callable ) -> Tuple[Tuple[float, float], ...]: """Find root bounds for n-order polynomial. Args: n: Polynomial order. polynomial: Function to evaluate polynomial. Returns: Bisection bounds for n roots. """ k = 1 while True: bounds = np.linspace(-1, 1, k * n + 1) values = np.array([polynomial(x, n) for x in bounds]) mask = values[:-1] * values[1:] < 0 if sum(mask) == n: return tuple(x for x in zip(bounds[:-1][mask], bounds[1:][mask])) else: k += 1 def integrate_legendre(func: Callable, n: int, a: float, b: float) -> float: """Integrate with Legendre polynomials of order n. Args: func: Function to integrate. n: Polynomial order. a: Left bound of integration interval. b: Right bound of integration interval. Returns: Value of integral. """ # find bisection bounds of polynomial roots bounds = find_bisection_bounds(n=n, polynomial=legendre_polynomial) # find roots of polynomial roots = [ root_finding.hybrid_secant_bisection( x_left=bound[0], x_right=bound[1], func=legendre_polynomial, func_args=(n,), ) for bound in bounds ] # compute integration weights weights = np.array( [ 2.0 * (1.0 - root ** 2) / ((n * legendre_polynomial(root, n - 1)) ** 2) for root in roots ] ) # compute function values func_values = np.array([func(0.5 * ((b - a) * root + a + b)) for root in roots]) return 0.5 * (b - a) * sum(weights * func_values)
[ "numpy.linspace", "mathstuff.root_finding.hybrid_secant_bisection" ]
[((1020, 1049), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(k * n + 1)'], {}), '(-1, 1, k * n + 1)\n', (1031, 1049), True, 'import numpy as np\n'), ((1822, 1939), 'mathstuff.root_finding.hybrid_secant_bisection', 'root_finding.hybrid_secant_bisection', ([], {'x_left': 'bound[0]', 'x_right': 'bound[1]', 'func': 'legendre_polynomial', 'func_args': '(n,)'}), '(x_left=bound[0], x_right=bound[1],\n func=legendre_polynomial, func_args=(n,))\n', (1858, 1939), False, 'from mathstuff import root_finding\n')]
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from wbia_cnn import utils from wbia_cnn import ingest_helpers from wbia_cnn import ingest_wbia from wbia_cnn.dataset import DataSet from os.path import join, basename, splitext import utool as ut print, rrr, profile = ut.inject2(__name__) NOCACHE_DATASET = ut.get_argflag(('--nocache-cnn', '--nocache-dataset')) def testdata_dataset(): dataset = get_wbia_patch_siam_dataset(max_examples=5) return dataset def testdata_patchmatch(): """ >>> from wbia_cnn.ingest_data import * # NOQA """ dataset = get_wbia_patch_siam_dataset(max_examples=5) data_fpath = dataset.data_fpath labels_fpath = dataset.labels_fpath data_cv2, labels = utils.load(data_fpath, labels_fpath) data = utils.convert_cv2_images_to_theano_images(data_cv2) return data, labels def testdata_patchmatch2(): """ >>> from wbia_cnn.ingest_data import * # NOQA """ dataset = get_wbia_patch_siam_dataset(max_examples=5) data_fpath = dataset.data_fpath labels_fpath = dataset.labels_fpath data, labels = utils.load(data_fpath, labels_fpath) return data, labels def get_extern_training_dpath(alias_key): return DataSet.from_alias_key(alias_key).training_dpath def view_training_directories(): r""" CommandLine: python -m wbia_cnn.ingest_data --test-view_training_directories Example: >>> # UTILITY_SCRIPT >>> from wbia_cnn.ingest_data import * # NOQA >>> result = view_training_directories() >>> print(result) """ ut.vd(ingest_wbia.get_juction_dpath()) def merge_datasets(dataset_list): """ Merges a list of dataset objects into a single combined dataset. """ def consensus_check_factory(): """ Returns a temporary function used to check that all incoming values with the same key are consistent """ from collections import defaultdict past_values = defaultdict(lambda: None) def consensus_check(value, key): assert ( past_values[key] is None or past_values[key] == value ), 'key=%r with value=%r does not agree with past_value=%r' % ( key, value, past_values[key], ) past_values[key] = value return value return consensus_check total_num_labels = 0 total_num_data = 0 input_alias_list = [dataset.alias_key for dataset in dataset_list] alias_key = 'combo_' + ut.hashstr27(repr(input_alias_list), hashlen=8) training_dpath = ut.ensure_app_resource_dir('wbia_cnn', 'training', alias_key) data_fpath = ut.unixjoin(training_dpath, alias_key + '_data.hdf5') labels_fpath = ut.unixjoin(training_dpath, alias_key + '_labels.hdf5') try: # Try and short circut cached loading merged_dataset = DataSet.from_alias_key(alias_key) return merged_dataset except (Exception, AssertionError) as ex: ut.printex( ex, 'alias definitions have changed. alias_key=%r' % (alias_key,), iswarning=True, ) # Build the dataset consensus_check = consensus_check_factory() for dataset in dataset_list: print(ut.get_file_nBytes_str(dataset.data_fpath)) print(dataset.data_fpath_dict['full']) print(dataset.num_labels) print(dataset.data_per_label) total_num_labels += dataset.num_labels total_num_data += dataset.data_per_label * dataset.num_labels # check that all data_dims agree data_shape = consensus_check(dataset.data_shape, 'data_shape') data_per_label = consensus_check(dataset.data_per_label, 'data_per_label') # hack record this import numpy as np data_dtype = np.uint8 label_dtype = np.int32 data = np.empty((total_num_data,) + data_shape, dtype=data_dtype) labels = np.empty(total_num_labels, dtype=label_dtype) # def iterable_assignment(): # pass data_left = 0 data_right = None labels_left = 0 labels_right = None for dataset in ut.ProgressIter(dataset_list, lbl='combining datasets', freq=1): X_all, y_all = dataset.subset('full') labels_right = labels_left + y_all.shape[0] data_right = data_left + X_all.shape[0] data[data_left:data_right] = X_all labels[labels_left:labels_right] = y_all data_left = data_right labels_left = labels_right ut.save_data(data_fpath, data) ut.save_data(labels_fpath, labels) labels = ut.load_data(labels_fpath) num_labels = len(labels) merged_dataset = DataSet.new_training_set( alias_key=alias_key, data_fpath=data_fpath, labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=training_dpath, data_shape=data_shape, data_per_label=data_per_label, output_dims=1, num_labels=num_labels, ) return merged_dataset def grab_dataset(ds_tag=None, datatype='siam-patch'): if datatype == 'siam-patch': return grab_siam_dataset(ds_tag=ds_tag) elif datatype == 'siam-part': return get_wbia_part_siam_dataset() elif datatype == 'category': return grab_mnist_category_dataset() def grab_siam_dataset(ds_tag=None): r""" Will build the dataset using the command line if it doesn't exist CommandLine: python -m wbia_cnn.ingest_data --test-grab_siam_dataset --db mnist --show python -m wbia_cnn.ingest_data --test-grab_siam_dataset --db liberty --show python -m wbia_cnn.ingest_data --test-grab_siam_dataset --db PZ_MTEST --show python -m wbia_cnn.ingest_data --test-grab_siam_dataset --db PZ_MTEST --show --nohud --nometa python -m wbia_cnn.ingest_data --test-grab_siam_dataset --db liberty --show --nohud --nometa Example: >>> # ENABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> ds_tag = None >>> dataset = grab_siam_dataset(ds_tag=ds_tag) >>> ut.quit_if_noshow() >>> from wbia_cnn import draw_results >>> dataset.interact(ibs=dataset.getprop('ibs', None), key='test', chunck_sizes=(8, 4)) >>> ut.show_if_requested() """ if ds_tag is not None: try: return DataSet.from_alias_key(ds_tag) except Exception as ex: ut.printex( ex, 'Could not resolve alias. Need to rebuild dataset', keys=['ds_tag'] ) raise dbname = ut.get_argval('--db') if dbname == 'liberty': pairs = 250000 dataset = grab_liberty_siam_dataset(pairs) elif dbname == 'mnist': dataset = grab_mnist_siam_dataset() else: dataset = get_wbia_patch_siam_dataset() return dataset def grab_mnist_category_dataset_float(): r""" CommandLine: python -m wbia_cnn grab_mnist_category_dataset_float python -m wbia_cnn grab_mnist_category_dataset_float --show Example: >>> # DISABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> dataset = grab_mnist_category_dataset_float() >>> dataset.print_subset_info() >>> dataset.print_dir_tree() >>> ut.quit_if_noshow() >>> inter = dataset.interact() >>> ut.show_if_requested() """ import numpy as np training_dpath = ut.ensure_app_resource_dir('wbia_cnn', 'training') dataset = DataSet( name='mnist_float32', training_dpath=training_dpath, data_shape=(28, 28, 1) ) try: dataset.load() except IOError: data, labels, metadata = ingest_helpers.grab_mnist2() # Get indicies of test / train split splitset = np.array(metadata['splitset']) train_idxs = np.where(splitset == 'train')[0] test_idxs = np.where(splitset == 'test')[0] # Give dataset the full data dataset.save(data, labels, metadata, data_per_label=1) # And the split sets dataset.add_split('train', train_idxs) dataset.add_split('test', test_idxs) dataset.clear_cache() dataset.ensure_symlinked() return dataset def grab_mnist_category_dataset(): r""" CommandLine: python -m wbia_cnn grab_mnist_category_dataset python -m wbia_cnn grab_mnist_category_dataset_float python -m wbia_cnn grab_mnist_category_dataset --show Example: >>> # DISABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> dataset = grab_mnist_category_dataset() >>> dataset.print_subset_info() >>> dataset.print_dir_tree() >>> ut.quit_if_noshow() >>> inter = dataset.interact() >>> ut.show_if_requested() """ import numpy as np training_dpath = ut.ensure_app_resource_dir('wbia_cnn', 'training') dataset = DataSet( name='mnist_uint8', training_dpath=training_dpath, data_shape=(28, 28, 1) ) try: dataset.load() except IOError: data, labels, metadata = ingest_helpers.grab_mnist1() # Get indicies of test / train split train_idxs = np.arange(60000) test_idxs = np.arange(10000) + 60000 # Give dataset the full data dataset.save(data, labels, metadata, data_per_label=1) # And the split sets dataset.add_split('train', train_idxs) dataset.add_split('test', test_idxs) dataset.clear_cache() dataset.ensure_symlinked() return dataset def grab_mnist_siam_dataset(): r""" CommandLine: python -m wbia_cnn.ingest_data --test-grab_mnist_siam_dataset --show Example: >>> # ENABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> dataset = grab_mnist_siam_dataset() >>> ut.quit_if_noshow() >>> from wbia_cnn import draw_results >>> #ibsplugin.rrr() >>> flat_metadata = {} >>> data, labels = dataset.subset('full') >>> ut.quit_if_noshow() >>> dataset.interact() >>> ut.show_if_requested() """ training_dpath = ut.ensure_app_resource_dir('wbia_cnn', 'training') dataset = DataSet( name='mnist_pairs', training_dpath=training_dpath, data_shape=(28, 28, 1), ) try: dataset.load() except IOError: data_, labels_, metadata_ = ingest_helpers.grab_mnist2() data, labels = ingest_helpers.convert_category_to_siam_data(data_, labels_) dataset.save(data, labels, data_per_label=2) return dataset def grab_liberty_siam_dataset(pairs=250000): """ References: http://www.cs.ubc.ca/~mbrown/patchdata/patchdata.html https://github.com/osdf/datasets/blob/master/patchdata/dataset.py Notes: "info.txt" contains the match information Each row of info.txt corresponds corresponds to a separate patch, with the patches ordered from left to right and top to bottom in each bitmap image. 3 types of metadata files info.txt - contains patch ids that correspond with the order of patches in the bmp images In the format: pointid, unused interest.txt - interest points corresponding to patches with patchids has same number of rows as info.txt In the format: reference image id, x, y, orientation, scale (in log2 units) m50_<d>_<d>_0.txt - matches files patchID1 3DpointID1 unused1 patchID2 3DpointID2 unused2 CommandLine: python -m wbia_cnn.ingest_data --test-grab_liberty_siam_dataset --show Example: >>> # ENABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> pairs = 500 >>> dataset = grab_liberty_siam_dataset(pairs) >>> ut.quit_if_noshow() >>> from wbia_cnn import draw_results >>> #ibsplugin.rrr() >>> flat_metadata = {} >>> data, labels = dataset.subset('full') >>> ut.quit_if_noshow() >>> warped_patch1_list = data[::2] >>> warped_patch2_list = data[1::2] >>> dataset.interact() >>> ut.show_if_requested() """ datakw = { 'detector': 'dog', 'pairs': pairs, } assert datakw['detector'] in ['dog', 'harris'] assert pairs in [500, 50000, 100000, 250000] liberty_urls = { 'dog': 'http://www.cs.ubc.ca/~mbrown/patchdata/liberty.zip', 'harris': 'http://www.cs.ubc.ca/~mbrown/patchdata/liberty_harris.zip', } url = liberty_urls[datakw['detector']] ds_path = ut.grab_zipped_url(url) ds_name = splitext(basename(ds_path))[0] alias_key = 'liberty;' + ut.dict_str(datakw, nl=False, explicit=True) cfgstr = ','.join([str(val) for key, val in ut.iteritems_sorted(datakw)]) # TODO: allow a move of the base data prefix training_dpath = ut.ensure_app_resource_dir('wbia_cnn', 'training', ds_name) if ut.get_argflag('--vtd'): ut.vd(training_dpath) ut.ensuredir(training_dpath) data_fpath = join(training_dpath, 'liberty_data_' + cfgstr + '.pkl') labels_fpath = join(training_dpath, 'liberty_labels_' + cfgstr + '.pkl') if not ut.checkpath(data_fpath, verbose=True): data, labels = ingest_helpers.extract_liberty_style_patches(ds_path, pairs) ut.save_data(data_fpath, data) ut.save_data(labels_fpath, labels) # hack for caching num_labels labels = ut.load_data(labels_fpath) num_labels = len(labels) dataset = DataSet.new_training_set( alias_key=alias_key, data_fpath=data_fpath, labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=training_dpath, data_shape=(64, 64, 1), data_per_label=2, output_dims=1, num_labels=num_labels, ) return dataset def get_wbia_patch_siam_dataset(**kwargs): """ CommandLine: python -m wbia_cnn.ingest_data --test-get_wbia_patch_siam_dataset --show python -m wbia_cnn.ingest_data --test-get_wbia_patch_siam_dataset --show --db PZ_Master1 --acfg_name default python -m wbia_cnn.ingest_data --test-get_wbia_patch_siam_dataset --show --db PZ_Master1 --acfg_name timectrl python -m wbia_cnn.ingest_data --test-get_wbia_patch_siam_dataset --show --db PZ_MTEST --acfg_name unctrl --dryrun Example: >>> # ENABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> from wbia_cnn import draw_results >>> import wbia >>> kwargs = {} # ut.argparse_dict({'max_examples': None, 'num_top': 3}) >>> dataset = get_wbia_patch_siam_dataset(**kwargs) >>> ut.quit_if_noshow() >>> dataset.interact() >>> ut.show_if_requested() """ datakw = ut.argparse_dict( { #'db': 'PZ_MTEST', 'max_examples': None, #'num_top': 3, 'num_top': None, 'min_featweight': 0.8 if not ut.WIN32 else None, 'controlled': True, 'colorspace': 'gray', 'acfg_name': None, }, alias_dict={'acfg_name': ['acfg', 'a']}, verbose=True, ) datakw.update(kwargs) # ut.get_func_kwargs(ingest_wbia.get_aidpairs_and_matches) if datakw['acfg_name'] is not None: del datakw['controlled'] if datakw['max_examples'] is None: del datakw['max_examples'] if datakw['num_top'] is None: del datakw['num_top'] with ut.Indenter('[LOAD IBEIS DB]'): import wbia dbname = ut.get_argval('--db', default='PZ_MTEST') ibs = wbia.opendb(dbname=dbname, defaultdb='PZ_MTEST') # Nets dir is the root dir for all training on this data training_dpath = ibs.get_neuralnet_dir() ut.ensuredir(training_dpath) print('\n\n[get_wbia_patch_siam_dataset] START') # log_dir = join(training_dpath, 'logs') # ut.start_logging(log_dir=log_dir) alias_key = ibs.get_dbname() + ';' + ut.dict_str(datakw, nl=False, explicit=True) try: if NOCACHE_DATASET: raise Exception('forced cache off') # Try and short circut cached loading dataset = DataSet.from_alias_key(alias_key) dataset.setprop('ibs', lambda: wbia.opendb(db=dbname)) return dataset except Exception as ex: ut.printex( ex, 'alias definitions have changed. alias_key=%r' % (alias_key,), iswarning=True, ) with ut.Indenter('[BuildDS]'): # Get training data pairs colorspace = datakw.pop('colorspace') patchmatch_tup = ingest_wbia.get_aidpairs_and_matches(ibs, **datakw) ( aid1_list, aid2_list, kpts1_m_list, kpts2_m_list, fm_list, metadata_lists, ) = patchmatch_tup # Extract and cache the data # TODO: metadata if ut.get_argflag('--dryrun'): print('exiting due to dry run') import sys sys.exit(0) tup = ingest_wbia.cached_patchmetric_training_data_fpaths( ibs, aid1_list, aid2_list, kpts1_m_list, kpts2_m_list, fm_list, metadata_lists, colorspace=colorspace, ) data_fpath, labels_fpath, metadata_fpath, training_dpath, data_shape = tup print('\n[get_wbia_patch_siam_dataset] FINISH\n\n') # hack for caching num_labels labels = ut.load_data(labels_fpath) num_labels = len(labels) dataset = DataSet.new_training_set( alias_key=alias_key, data_fpath=data_fpath, labels_fpath=labels_fpath, metadata_fpath=metadata_fpath, training_dpath=training_dpath, data_shape=data_shape, data_per_label=2, output_dims=1, num_labels=num_labels, ) dataset.setprop('ibs', ibs) return dataset def get_wbia_part_siam_dataset(**kwargs): """ PARTS based network data CommandLine: python -m wbia_cnn.ingest_data --test-get_wbia_part_siam_dataset --show python -m wbia_cnn.ingest_data --test-get_wbia_part_siam_dataset --show --db PZ_Master1 --acfg_name timectrl python -m wbia_cnn.ingest_data --test-get_wbia_part_siam_dataset --show --db PZ_MTEST --acfg_name unctrl --dryrun Example: >>> # ENABLE_DOCTEST >>> from wbia_cnn.ingest_data import * # NOQA >>> from wbia_cnn import draw_results >>> import wbia >>> kwargs = {} # ut.argparse_dict({'max_examples': None, 'num_top': 3}) >>> dataset = get_wbia_part_siam_dataset(**kwargs) >>> ut.quit_if_noshow() >>> dataset.interact(ibs=dataset.getprop('ibs')) >>> ut.show_if_requested() """ import wbia datakw = ut.argparse_dict( { 'colorspace': 'gray', 'acfg_name': 'ctrl', #'db': None, 'db': 'PZ_MTEST', }, alias_dict={'acfg_name': ['acfg']}, verbose=True, ) datakw.update(kwargs) print('\n\n[get_wbia_part_siam_dataset] START') alias_key = ut.dict_str(datakw, nl=False, explicit=True) dbname = datakw.pop('db') try: if NOCACHE_DATASET: raise Exception('forced cache off') # Try and short circut cached loading dataset = DataSet.from_alias_key(alias_key) dataset.setprop('ibs', lambda: wbia.opendb(db=dbname)) return dataset except Exception as ex: ut.printex( ex, 'alias definitions have changed. alias_key=%r' % (alias_key,), iswarning=True, ) with ut.Indenter('[LOAD IBEIS DB]'): ibs = wbia.opendb(db=dbname) # Nets dir is the root dir for all training on this data training_dpath = ibs.get_neuralnet_dir() ut.ensuredir(training_dpath) with ut.Indenter('[BuildDS]'): # Get training data pairs colorspace = datakw.pop('colorspace') (aid_pairs, label_list, flat_metadata) = ingest_wbia.get_aidpairs_partmatch( ibs, **datakw ) # Extract and cache the data, labels, and metadata if ut.get_argflag('--dryrun'): print('exiting due to dry run') import sys sys.exit(0) tup = ingest_wbia.cached_part_match_training_data_fpaths( ibs, aid_pairs, label_list, flat_metadata, colorspace=colorspace ) data_fpath, labels_fpath, metadata_fpath, training_dpath, data_shape = tup print('\n[get_wbia_part_siam_dataset] FINISH\n\n') # hack for caching num_labels labels = ut.load_data(labels_fpath) num_labels = len(labels) dataset = DataSet.new_training_set( alias_key=alias_key, data_fpath=data_fpath, labels_fpath=labels_fpath, metadata_fpath=metadata_fpath, training_dpath=training_dpath, data_shape=data_shape, data_per_label=2, output_dims=1, num_labels=num_labels, ) dataset.setprop('ibs', ibs) return dataset def get_numpy_dataset(data_fpath, labels_fpath, training_dpath): """""" import numpy as np # hack for caching num_labels data = np.load(data_fpath) data_shape = data.shape[1:] labels = np.load(labels_fpath) num_labels = len(labels) alias_key = 'temp' ut.ensuredir(training_dpath) dataset = DataSet.new_training_set( alias_key=alias_key, data_fpath=data_fpath, labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=training_dpath, data_shape=data_shape, data_per_label=1, output_dims=1, num_labels=num_labels, ) return dataset def get_numpy_dataset2(name, data_fpath, labels_fpath, training_dpath, cache=True): """""" import numpy as np # hack for caching num_labels data = np.load(data_fpath) data_shape = data.shape[1:] labels = np.load(labels_fpath) num_labels = len(labels) metadata = None dataset = DataSet( name=name, training_dpath=training_dpath, data_shape=data_shape, ) error = False try: dataset.load() except IOError: error = True if error or not cache: import random # Get indicies of valid / train split idx_list = list(range(num_labels)) random.shuffle(idx_list) split_idx = int(num_labels * 0.80) train_idxs = np.array(idx_list[:split_idx]) valid_idxs = np.array(idx_list[split_idx:]) # Give dataset the full data dataset.save(data, labels, metadata, data_per_label=1) # And the split sets dataset.add_split('train', train_idxs) dataset.add_split('valid', valid_idxs) dataset.clear_cache() print('LOADING FROM DATASET RAW') dataset.ensure_symlinked() return dataset if __name__ == '__main__': """ CommandLine: python -m wbia_cnn.ingest_data python -m wbia_cnn.ingest_data --allexamples python -m wbia_cnn.ingest_data --allexamples --noface --nosrc """ import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut # NOQA ut.doctest_funcs()
[ "utool.dict_str", "utool.grab_zipped_url", "wbia_cnn.dataset.DataSet.from_alias_key", "utool.doctest_funcs", "numpy.array", "wbia_cnn.dataset.DataSet.new_training_set", "multiprocessing.freeze_support", "wbia_cnn.utils.convert_cv2_images_to_theano_images", "wbia_cnn.ingest_helpers.extract_liberty_st...
[((309, 329), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (319, 329), True, 'import utool as ut\n'), ((350, 404), 'utool.get_argflag', 'ut.get_argflag', (["('--nocache-cnn', '--nocache-dataset')"], {}), "(('--nocache-cnn', '--nocache-dataset'))\n", (364, 404), True, 'import utool as ut\n'), ((761, 797), 'wbia_cnn.utils.load', 'utils.load', (['data_fpath', 'labels_fpath'], {}), '(data_fpath, labels_fpath)\n', (771, 797), False, 'from wbia_cnn import utils\n'), ((809, 860), 'wbia_cnn.utils.convert_cv2_images_to_theano_images', 'utils.convert_cv2_images_to_theano_images', (['data_cv2'], {}), '(data_cv2)\n', (850, 860), False, 'from wbia_cnn import utils\n'), ((1135, 1171), 'wbia_cnn.utils.load', 'utils.load', (['data_fpath', 'labels_fpath'], {}), '(data_fpath, labels_fpath)\n', (1145, 1171), False, 'from wbia_cnn import utils\n'), ((2661, 2722), 'utool.ensure_app_resource_dir', 'ut.ensure_app_resource_dir', (['"""wbia_cnn"""', '"""training"""', 'alias_key'], {}), "('wbia_cnn', 'training', alias_key)\n", (2687, 2722), True, 'import utool as ut\n'), ((2740, 2793), 'utool.unixjoin', 'ut.unixjoin', (['training_dpath', "(alias_key + '_data.hdf5')"], {}), "(training_dpath, alias_key + '_data.hdf5')\n", (2751, 2793), True, 'import utool as ut\n'), ((2813, 2868), 'utool.unixjoin', 'ut.unixjoin', (['training_dpath', "(alias_key + '_labels.hdf5')"], {}), "(training_dpath, alias_key + '_labels.hdf5')\n", (2824, 2868), True, 'import utool as ut\n'), ((3917, 3975), 'numpy.empty', 'np.empty', (['((total_num_data,) + data_shape)'], {'dtype': 'data_dtype'}), '((total_num_data,) + data_shape, dtype=data_dtype)\n', (3925, 3975), True, 'import numpy as np\n'), ((3989, 4034), 'numpy.empty', 'np.empty', (['total_num_labels'], {'dtype': 'label_dtype'}), '(total_num_labels, dtype=label_dtype)\n', (3997, 4034), True, 'import numpy as np\n'), ((4186, 4249), 'utool.ProgressIter', 'ut.ProgressIter', (['dataset_list'], {'lbl': '"""combining datasets"""', 'freq': '(1)'}), "(dataset_list, lbl='combining datasets', freq=1)\n", (4201, 4249), True, 'import utool as ut\n'), ((4560, 4590), 'utool.save_data', 'ut.save_data', (['data_fpath', 'data'], {}), '(data_fpath, data)\n', (4572, 4590), True, 'import utool as ut\n'), ((4595, 4629), 'utool.save_data', 'ut.save_data', (['labels_fpath', 'labels'], {}), '(labels_fpath, labels)\n', (4607, 4629), True, 'import utool as ut\n'), ((4644, 4670), 'utool.load_data', 'ut.load_data', (['labels_fpath'], {}), '(labels_fpath)\n', (4656, 4670), True, 'import utool as ut\n'), ((4722, 4974), 'wbia_cnn.dataset.DataSet.new_training_set', 'DataSet.new_training_set', ([], {'alias_key': 'alias_key', 'data_fpath': 'data_fpath', 'labels_fpath': 'labels_fpath', 'metadata_fpath': 'None', 'training_dpath': 'training_dpath', 'data_shape': 'data_shape', 'data_per_label': 'data_per_label', 'output_dims': '(1)', 'num_labels': 'num_labels'}), '(alias_key=alias_key, data_fpath=data_fpath,\n labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=\n training_dpath, data_shape=data_shape, data_per_label=data_per_label,\n output_dims=1, num_labels=num_labels)\n', (4746, 4974), False, 'from wbia_cnn.dataset import DataSet\n'), ((6627, 6648), 'utool.get_argval', 'ut.get_argval', (['"""--db"""'], {}), "('--db')\n", (6640, 6648), True, 'import utool as ut\n'), ((7491, 7541), 'utool.ensure_app_resource_dir', 'ut.ensure_app_resource_dir', (['"""wbia_cnn"""', '"""training"""'], {}), "('wbia_cnn', 'training')\n", (7517, 7541), True, 'import utool as ut\n'), ((7556, 7644), 'wbia_cnn.dataset.DataSet', 'DataSet', ([], {'name': '"""mnist_float32"""', 'training_dpath': 'training_dpath', 'data_shape': '(28, 28, 1)'}), "(name='mnist_float32', training_dpath=training_dpath, data_shape=(28,\n 28, 1))\n", (7563, 7644), False, 'from wbia_cnn.dataset import DataSet\n'), ((8899, 8949), 'utool.ensure_app_resource_dir', 'ut.ensure_app_resource_dir', (['"""wbia_cnn"""', '"""training"""'], {}), "('wbia_cnn', 'training')\n", (8925, 8949), True, 'import utool as ut\n'), ((8964, 9051), 'wbia_cnn.dataset.DataSet', 'DataSet', ([], {'name': '"""mnist_uint8"""', 'training_dpath': 'training_dpath', 'data_shape': '(28, 28, 1)'}), "(name='mnist_uint8', training_dpath=training_dpath, data_shape=(28, \n 28, 1))\n", (8971, 9051), False, 'from wbia_cnn.dataset import DataSet\n'), ((10202, 10252), 'utool.ensure_app_resource_dir', 'ut.ensure_app_resource_dir', (['"""wbia_cnn"""', '"""training"""'], {}), "('wbia_cnn', 'training')\n", (10228, 10252), True, 'import utool as ut\n'), ((10267, 10354), 'wbia_cnn.dataset.DataSet', 'DataSet', ([], {'name': '"""mnist_pairs"""', 'training_dpath': 'training_dpath', 'data_shape': '(28, 28, 1)'}), "(name='mnist_pairs', training_dpath=training_dpath, data_shape=(28, \n 28, 1))\n", (10274, 10354), False, 'from wbia_cnn.dataset import DataSet\n'), ((12722, 12745), 'utool.grab_zipped_url', 'ut.grab_zipped_url', (['url'], {}), '(url)\n', (12740, 12745), True, 'import utool as ut\n'), ((13016, 13075), 'utool.ensure_app_resource_dir', 'ut.ensure_app_resource_dir', (['"""wbia_cnn"""', '"""training"""', 'ds_name'], {}), "('wbia_cnn', 'training', ds_name)\n", (13042, 13075), True, 'import utool as ut\n'), ((13083, 13106), 'utool.get_argflag', 'ut.get_argflag', (['"""--vtd"""'], {}), "('--vtd')\n", (13097, 13106), True, 'import utool as ut\n'), ((13142, 13170), 'utool.ensuredir', 'ut.ensuredir', (['training_dpath'], {}), '(training_dpath)\n', (13154, 13170), True, 'import utool as ut\n'), ((13189, 13244), 'os.path.join', 'join', (['training_dpath', "('liberty_data_' + cfgstr + '.pkl')"], {}), "(training_dpath, 'liberty_data_' + cfgstr + '.pkl')\n", (13193, 13244), False, 'from os.path import join, basename, splitext\n'), ((13264, 13321), 'os.path.join', 'join', (['training_dpath', "('liberty_labels_' + cfgstr + '.pkl')"], {}), "(training_dpath, 'liberty_labels_' + cfgstr + '.pkl')\n", (13268, 13321), False, 'from os.path import join, basename, splitext\n'), ((13588, 13614), 'utool.load_data', 'ut.load_data', (['labels_fpath'], {}), '(labels_fpath)\n', (13600, 13614), True, 'import utool as ut\n'), ((13659, 13899), 'wbia_cnn.dataset.DataSet.new_training_set', 'DataSet.new_training_set', ([], {'alias_key': 'alias_key', 'data_fpath': 'data_fpath', 'labels_fpath': 'labels_fpath', 'metadata_fpath': 'None', 'training_dpath': 'training_dpath', 'data_shape': '(64, 64, 1)', 'data_per_label': '(2)', 'output_dims': '(1)', 'num_labels': 'num_labels'}), '(alias_key=alias_key, data_fpath=data_fpath,\n labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=\n training_dpath, data_shape=(64, 64, 1), data_per_label=2, output_dims=1,\n num_labels=num_labels)\n', (13683, 13899), False, 'from wbia_cnn.dataset import DataSet\n'), ((14923, 15154), 'utool.argparse_dict', 'ut.argparse_dict', (["{'max_examples': None, 'num_top': None, 'min_featweight': 0.8 if not ut.\n WIN32 else None, 'controlled': True, 'colorspace': 'gray', 'acfg_name':\n None}"], {'alias_dict': "{'acfg_name': ['acfg', 'a']}", 'verbose': '(True)'}), "({'max_examples': None, 'num_top': None, 'min_featweight': \n 0.8 if not ut.WIN32 else None, 'controlled': True, 'colorspace': 'gray',\n 'acfg_name': None}, alias_dict={'acfg_name': ['acfg', 'a']}, verbose=True)\n", (14939, 15154), True, 'import utool as ut\n'), ((15917, 15945), 'utool.ensuredir', 'ut.ensuredir', (['training_dpath'], {}), '(training_dpath)\n', (15929, 15945), True, 'import utool as ut\n'), ((17654, 17680), 'utool.load_data', 'ut.load_data', (['labels_fpath'], {}), '(labels_fpath)\n', (17666, 17680), True, 'import utool as ut\n'), ((17725, 17973), 'wbia_cnn.dataset.DataSet.new_training_set', 'DataSet.new_training_set', ([], {'alias_key': 'alias_key', 'data_fpath': 'data_fpath', 'labels_fpath': 'labels_fpath', 'metadata_fpath': 'metadata_fpath', 'training_dpath': 'training_dpath', 'data_shape': 'data_shape', 'data_per_label': '(2)', 'output_dims': '(1)', 'num_labels': 'num_labels'}), '(alias_key=alias_key, data_fpath=data_fpath,\n labels_fpath=labels_fpath, metadata_fpath=metadata_fpath,\n training_dpath=training_dpath, data_shape=data_shape, data_per_label=2,\n output_dims=1, num_labels=num_labels)\n', (17749, 17973), False, 'from wbia_cnn.dataset import DataSet\n'), ((18981, 19114), 'utool.argparse_dict', 'ut.argparse_dict', (["{'colorspace': 'gray', 'acfg_name': 'ctrl', 'db': 'PZ_MTEST'}"], {'alias_dict': "{'acfg_name': ['acfg']}", 'verbose': '(True)'}), "({'colorspace': 'gray', 'acfg_name': 'ctrl', 'db':\n 'PZ_MTEST'}, alias_dict={'acfg_name': ['acfg']}, verbose=True)\n", (18997, 19114), True, 'import utool as ut\n'), ((19310, 19354), 'utool.dict_str', 'ut.dict_str', (['datakw'], {'nl': '(False)', 'explicit': '(True)'}), '(datakw, nl=False, explicit=True)\n', (19321, 19354), True, 'import utool as ut\n'), ((20023, 20051), 'utool.ensuredir', 'ut.ensuredir', (['training_dpath'], {}), '(training_dpath)\n', (20035, 20051), True, 'import utool as ut\n'), ((20822, 20848), 'utool.load_data', 'ut.load_data', (['labels_fpath'], {}), '(labels_fpath)\n', (20834, 20848), True, 'import utool as ut\n'), ((20893, 21141), 'wbia_cnn.dataset.DataSet.new_training_set', 'DataSet.new_training_set', ([], {'alias_key': 'alias_key', 'data_fpath': 'data_fpath', 'labels_fpath': 'labels_fpath', 'metadata_fpath': 'metadata_fpath', 'training_dpath': 'training_dpath', 'data_shape': 'data_shape', 'data_per_label': '(2)', 'output_dims': '(1)', 'num_labels': 'num_labels'}), '(alias_key=alias_key, data_fpath=data_fpath,\n labels_fpath=labels_fpath, metadata_fpath=metadata_fpath,\n training_dpath=training_dpath, data_shape=data_shape, data_per_label=2,\n output_dims=1, num_labels=num_labels)\n', (20917, 21141), False, 'from wbia_cnn.dataset import DataSet\n'), ((21407, 21426), 'numpy.load', 'np.load', (['data_fpath'], {}), '(data_fpath)\n', (21414, 21426), True, 'import numpy as np\n'), ((21472, 21493), 'numpy.load', 'np.load', (['labels_fpath'], {}), '(labels_fpath)\n', (21479, 21493), True, 'import numpy as np\n'), ((21551, 21579), 'utool.ensuredir', 'ut.ensuredir', (['training_dpath'], {}), '(training_dpath)\n', (21563, 21579), True, 'import utool as ut\n'), ((21595, 21834), 'wbia_cnn.dataset.DataSet.new_training_set', 'DataSet.new_training_set', ([], {'alias_key': 'alias_key', 'data_fpath': 'data_fpath', 'labels_fpath': 'labels_fpath', 'metadata_fpath': 'None', 'training_dpath': 'training_dpath', 'data_shape': 'data_shape', 'data_per_label': '(1)', 'output_dims': '(1)', 'num_labels': 'num_labels'}), '(alias_key=alias_key, data_fpath=data_fpath,\n labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=\n training_dpath, data_shape=data_shape, data_per_label=1, output_dims=1,\n num_labels=num_labels)\n', (21619, 21834), False, 'from wbia_cnn.dataset import DataSet\n'), ((22086, 22105), 'numpy.load', 'np.load', (['data_fpath'], {}), '(data_fpath)\n', (22093, 22105), True, 'import numpy as np\n'), ((22151, 22172), 'numpy.load', 'np.load', (['labels_fpath'], {}), '(labels_fpath)\n', (22158, 22172), True, 'import numpy as np\n'), ((22237, 22309), 'wbia_cnn.dataset.DataSet', 'DataSet', ([], {'name': 'name', 'training_dpath': 'training_dpath', 'data_shape': 'data_shape'}), '(name=name, training_dpath=training_dpath, data_shape=data_shape)\n', (22244, 22309), False, 'from wbia_cnn.dataset import DataSet\n'), ((23355, 23387), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (23385, 23387), False, 'import multiprocessing\n'), ((23437, 23455), 'utool.doctest_funcs', 'ut.doctest_funcs', ([], {}), '()\n', (23453, 23455), True, 'import utool as ut\n'), ((1251, 1284), 'wbia_cnn.dataset.DataSet.from_alias_key', 'DataSet.from_alias_key', (['alias_key'], {}), '(alias_key)\n', (1273, 1284), False, 'from wbia_cnn.dataset import DataSet\n'), ((1624, 1655), 'wbia_cnn.ingest_wbia.get_juction_dpath', 'ingest_wbia.get_juction_dpath', ([], {}), '()\n', (1653, 1655), False, 'from wbia_cnn import ingest_wbia\n'), ((2022, 2048), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (2033, 2048), False, 'from collections import defaultdict\n'), ((2950, 2983), 'wbia_cnn.dataset.DataSet.from_alias_key', 'DataSet.from_alias_key', (['alias_key'], {}), '(alias_key)\n', (2972, 2983), False, 'from wbia_cnn.dataset import DataSet\n'), ((12821, 12865), 'utool.dict_str', 'ut.dict_str', (['datakw'], {'nl': '(False)', 'explicit': '(True)'}), '(datakw, nl=False, explicit=True)\n', (12832, 12865), True, 'import utool as ut\n'), ((13116, 13137), 'utool.vd', 'ut.vd', (['training_dpath'], {}), '(training_dpath)\n', (13121, 13137), True, 'import utool as ut\n'), ((13334, 13372), 'utool.checkpath', 'ut.checkpath', (['data_fpath'], {'verbose': '(True)'}), '(data_fpath, verbose=True)\n', (13346, 13372), True, 'import utool as ut\n'), ((13397, 13457), 'wbia_cnn.ingest_helpers.extract_liberty_style_patches', 'ingest_helpers.extract_liberty_style_patches', (['ds_path', 'pairs'], {}), '(ds_path, pairs)\n', (13441, 13457), False, 'from wbia_cnn import ingest_helpers\n'), ((13466, 13496), 'utool.save_data', 'ut.save_data', (['data_fpath', 'data'], {}), '(data_fpath, data)\n', (13478, 13496), True, 'import utool as ut\n'), ((13505, 13539), 'utool.save_data', 'ut.save_data', (['labels_fpath', 'labels'], {}), '(labels_fpath, labels)\n', (13517, 13539), True, 'import utool as ut\n'), ((15631, 15661), 'utool.Indenter', 'ut.Indenter', (['"""[LOAD IBEIS DB]"""'], {}), "('[LOAD IBEIS DB]')\n", (15642, 15661), True, 'import utool as ut\n'), ((15701, 15742), 'utool.get_argval', 'ut.get_argval', (['"""--db"""'], {'default': '"""PZ_MTEST"""'}), "('--db', default='PZ_MTEST')\n", (15714, 15742), True, 'import utool as ut\n'), ((15757, 15805), 'wbia.opendb', 'wbia.opendb', ([], {'dbname': 'dbname', 'defaultdb': '"""PZ_MTEST"""'}), "(dbname=dbname, defaultdb='PZ_MTEST')\n", (15768, 15805), False, 'import wbia\n'), ((16126, 16170), 'utool.dict_str', 'ut.dict_str', (['datakw'], {'nl': '(False)', 'explicit': '(True)'}), '(datakw, nl=False, explicit=True)\n', (16137, 16170), True, 'import utool as ut\n'), ((16320, 16353), 'wbia_cnn.dataset.DataSet.from_alias_key', 'DataSet.from_alias_key', (['alias_key'], {}), '(alias_key)\n', (16342, 16353), False, 'from wbia_cnn.dataset import DataSet\n'), ((16627, 16651), 'utool.Indenter', 'ut.Indenter', (['"""[BuildDS]"""'], {}), "('[BuildDS]')\n", (16638, 16651), True, 'import utool as ut\n'), ((16758, 16809), 'wbia_cnn.ingest_wbia.get_aidpairs_and_matches', 'ingest_wbia.get_aidpairs_and_matches', (['ibs'], {}), '(ibs, **datakw)\n', (16794, 16809), False, 'from wbia_cnn import ingest_wbia\n'), ((17067, 17093), 'utool.get_argflag', 'ut.get_argflag', (['"""--dryrun"""'], {}), "('--dryrun')\n", (17081, 17093), True, 'import utool as ut\n'), ((17201, 17363), 'wbia_cnn.ingest_wbia.cached_patchmetric_training_data_fpaths', 'ingest_wbia.cached_patchmetric_training_data_fpaths', (['ibs', 'aid1_list', 'aid2_list', 'kpts1_m_list', 'kpts2_m_list', 'fm_list', 'metadata_lists'], {'colorspace': 'colorspace'}), '(ibs, aid1_list,\n aid2_list, kpts1_m_list, kpts2_m_list, fm_list, metadata_lists,\n colorspace=colorspace)\n', (17252, 17363), False, 'from wbia_cnn import ingest_wbia\n'), ((19536, 19569), 'wbia_cnn.dataset.DataSet.from_alias_key', 'DataSet.from_alias_key', (['alias_key'], {}), '(alias_key)\n', (19558, 19569), False, 'from wbia_cnn.dataset import DataSet\n'), ((19843, 19873), 'utool.Indenter', 'ut.Indenter', (['"""[LOAD IBEIS DB]"""'], {}), "('[LOAD IBEIS DB]')\n", (19854, 19873), True, 'import utool as ut\n'), ((19889, 19911), 'wbia.opendb', 'wbia.opendb', ([], {'db': 'dbname'}), '(db=dbname)\n', (19900, 19911), False, 'import wbia\n'), ((20062, 20086), 'utool.Indenter', 'ut.Indenter', (['"""[BuildDS]"""'], {}), "('[BuildDS]')\n", (20073, 20086), True, 'import utool as ut\n'), ((20217, 20266), 'wbia_cnn.ingest_wbia.get_aidpairs_partmatch', 'ingest_wbia.get_aidpairs_partmatch', (['ibs'], {}), '(ibs, **datakw)\n', (20251, 20266), False, 'from wbia_cnn import ingest_wbia\n'), ((20359, 20385), 'utool.get_argflag', 'ut.get_argflag', (['"""--dryrun"""'], {}), "('--dryrun')\n", (20373, 20385), True, 'import utool as ut\n'), ((20493, 20613), 'wbia_cnn.ingest_wbia.cached_part_match_training_data_fpaths', 'ingest_wbia.cached_part_match_training_data_fpaths', (['ibs', 'aid_pairs', 'label_list', 'flat_metadata'], {'colorspace': 'colorspace'}), '(ibs, aid_pairs,\n label_list, flat_metadata, colorspace=colorspace)\n', (20543, 20613), False, 'from wbia_cnn import ingest_wbia\n'), ((22580, 22604), 'random.shuffle', 'random.shuffle', (['idx_list'], {}), '(idx_list)\n', (22594, 22604), False, 'import random\n'), ((22670, 22700), 'numpy.array', 'np.array', (['idx_list[:split_idx]'], {}), '(idx_list[:split_idx])\n', (22678, 22700), True, 'import numpy as np\n'), ((22722, 22752), 'numpy.array', 'np.array', (['idx_list[split_idx:]'], {}), '(idx_list[split_idx:])\n', (22730, 22752), True, 'import numpy as np\n'), ((3068, 3166), 'utool.printex', 'ut.printex', (['ex', "('alias definitions have changed. alias_key=%r' % (alias_key,))"], {'iswarning': '(True)'}), "(ex, 'alias definitions have changed. alias_key=%r' % (alias_key,\n ), iswarning=True)\n", (3078, 3166), True, 'import utool as ut\n'), ((3330, 3372), 'utool.get_file_nBytes_str', 'ut.get_file_nBytes_str', (['dataset.data_fpath'], {}), '(dataset.data_fpath)\n', (3352, 3372), True, 'import utool as ut\n'), ((6406, 6436), 'wbia_cnn.dataset.DataSet.from_alias_key', 'DataSet.from_alias_key', (['ds_tag'], {}), '(ds_tag)\n', (6428, 6436), False, 'from wbia_cnn.dataset import DataSet\n'), ((7740, 7768), 'wbia_cnn.ingest_helpers.grab_mnist2', 'ingest_helpers.grab_mnist2', ([], {}), '()\n', (7766, 7768), False, 'from wbia_cnn import ingest_helpers\n'), ((7833, 7863), 'numpy.array', 'np.array', (["metadata['splitset']"], {}), "(metadata['splitset'])\n", (7841, 7863), True, 'import numpy as np\n'), ((9146, 9174), 'wbia_cnn.ingest_helpers.grab_mnist1', 'ingest_helpers.grab_mnist1', ([], {}), '()\n', (9172, 9174), False, 'from wbia_cnn import ingest_helpers\n'), ((9241, 9257), 'numpy.arange', 'np.arange', (['(60000)'], {}), '(60000)\n', (9250, 9257), True, 'import numpy as np\n'), ((10469, 10497), 'wbia_cnn.ingest_helpers.grab_mnist2', 'ingest_helpers.grab_mnist2', ([], {}), '()\n', (10495, 10497), False, 'from wbia_cnn import ingest_helpers\n'), ((10521, 10581), 'wbia_cnn.ingest_helpers.convert_category_to_siam_data', 'ingest_helpers.convert_category_to_siam_data', (['data_', 'labels_'], {}), '(data_, labels_)\n', (10565, 10581), False, 'from wbia_cnn import ingest_helpers\n'), ((12770, 12787), 'os.path.basename', 'basename', (['ds_path'], {}), '(ds_path)\n', (12778, 12787), False, 'from os.path import join, basename, splitext\n'), ((16476, 16574), 'utool.printex', 'ut.printex', (['ex', "('alias definitions have changed. alias_key=%r' % (alias_key,))"], {'iswarning': '(True)'}), "(ex, 'alias definitions have changed. alias_key=%r' % (alias_key,\n ), iswarning=True)\n", (16486, 16574), True, 'import utool as ut\n'), ((17175, 17186), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (17183, 17186), False, 'import sys\n'), ((19692, 19790), 'utool.printex', 'ut.printex', (['ex', "('alias definitions have changed. alias_key=%r' % (alias_key,))"], {'iswarning': '(True)'}), "(ex, 'alias definitions have changed. alias_key=%r' % (alias_key,\n ), iswarning=True)\n", (19702, 19790), True, 'import utool as ut\n'), ((20467, 20478), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (20475, 20478), False, 'import sys\n'), ((6481, 6569), 'utool.printex', 'ut.printex', (['ex', '"""Could not resolve alias. Need to rebuild dataset"""'], {'keys': "['ds_tag']"}), "(ex, 'Could not resolve alias. Need to rebuild dataset', keys=[\n 'ds_tag'])\n", (6491, 6569), True, 'import utool as ut\n'), ((7885, 7914), 'numpy.where', 'np.where', (["(splitset == 'train')"], {}), "(splitset == 'train')\n", (7893, 7914), True, 'import numpy as np\n'), ((7938, 7966), 'numpy.where', 'np.where', (["(splitset == 'test')"], {}), "(splitset == 'test')\n", (7946, 7966), True, 'import numpy as np\n'), ((9278, 9294), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (9287, 9294), True, 'import numpy as np\n'), ((12914, 12941), 'utool.iteritems_sorted', 'ut.iteritems_sorted', (['datakw'], {}), '(datakw)\n', (12933, 12941), True, 'import utool as ut\n'), ((16393, 16415), 'wbia.opendb', 'wbia.opendb', ([], {'db': 'dbname'}), '(db=dbname)\n', (16404, 16415), False, 'import wbia\n'), ((19609, 19631), 'wbia.opendb', 'wbia.opendb', ([], {'db': 'dbname'}), '(db=dbname)\n', (19620, 19631), False, 'import wbia\n')]
from openrec import ModelTrainer from openrec.utils import Dataset from openrec.recommenders import YouTubeRec from openrec.utils.evaluators import AUC, Recall from openrec.utils.samplers import YouTubeSampler, YouTubeEvaluationSampler import numpy as np train_data = np.load('dataset/lastfm/lastfm_train.npy') test_data = np.load('dataset/lastfm/lastfm_test.npy') user_feature = np.load('dataset/lastfm/user_feature.npy') total_users = 992 total_items = 14598 user_dict = {'gender': 3, 'geo': 67} item_dict = {'id': total_items} dim_item_embed = {'total': 50, 'id': 50} dim_user_embed = {'total': 30, 'geo': 20, 'gender': 10} max_seq_len = 20 total_iter = int(1e5) batch_size = 100 eval_iter = 100 save_iter = eval_iter train_dataset = Dataset(train_data, total_users, total_items, sortby='ts', name='Train') test_dataset = Dataset(test_data, total_users, total_items, sortby='ts', name='Test') train_sampler = YouTubeSampler(user_feature=user_feature, batch_size=batch_size, max_seq_len=max_seq_len, dataset=train_dataset, num_process=1) test_sampler = YouTubeEvaluationSampler(user_feature=user_feature, dataset=test_dataset, max_seq_len=max_seq_len) model = YouTubeRec(batch_size=batch_size, user_dict=user_dict, item_dict=item_dict, max_seq_len=max_seq_len, dim_item_embed=dim_item_embed, dim_user_embed=dim_user_embed, save_model_dir='youtube_recommender/', train=True, serve=True) model_trainer = ModelTrainer(model=model) auc_evaluator = AUC() recall_evaluator = Recall(recall_at=[100, 200, 300, 400, 500]) model_trainer.train(total_iter=total_iter, eval_iter=eval_iter, save_iter=save_iter,train_sampler=train_sampler, eval_samplers=[test_sampler], evaluators=[auc_evaluator, recall_evaluator])
[ "openrec.utils.samplers.YouTubeEvaluationSampler", "openrec.utils.evaluators.Recall", "openrec.utils.evaluators.AUC", "openrec.utils.samplers.YouTubeSampler", "openrec.utils.Dataset", "numpy.load", "openrec.ModelTrainer", "openrec.recommenders.YouTubeRec" ]
[((269, 311), 'numpy.load', 'np.load', (['"""dataset/lastfm/lastfm_train.npy"""'], {}), "('dataset/lastfm/lastfm_train.npy')\n", (276, 311), True, 'import numpy as np\n'), ((324, 365), 'numpy.load', 'np.load', (['"""dataset/lastfm/lastfm_test.npy"""'], {}), "('dataset/lastfm/lastfm_test.npy')\n", (331, 365), True, 'import numpy as np\n'), ((381, 423), 'numpy.load', 'np.load', (['"""dataset/lastfm/user_feature.npy"""'], {}), "('dataset/lastfm/user_feature.npy')\n", (388, 423), True, 'import numpy as np\n'), ((744, 816), 'openrec.utils.Dataset', 'Dataset', (['train_data', 'total_users', 'total_items'], {'sortby': '"""ts"""', 'name': '"""Train"""'}), "(train_data, total_users, total_items, sortby='ts', name='Train')\n", (751, 816), False, 'from openrec.utils import Dataset\n'), ((856, 926), 'openrec.utils.Dataset', 'Dataset', (['test_data', 'total_users', 'total_items'], {'sortby': '"""ts"""', 'name': '"""Test"""'}), "(test_data, total_users, total_items, sortby='ts', name='Test')\n", (863, 926), False, 'from openrec.utils import Dataset\n'), ((972, 1103), 'openrec.utils.samplers.YouTubeSampler', 'YouTubeSampler', ([], {'user_feature': 'user_feature', 'batch_size': 'batch_size', 'max_seq_len': 'max_seq_len', 'dataset': 'train_dataset', 'num_process': '(1)'}), '(user_feature=user_feature, batch_size=batch_size,\n max_seq_len=max_seq_len, dataset=train_dataset, num_process=1)\n', (986, 1103), False, 'from openrec.utils.samplers import YouTubeSampler, YouTubeEvaluationSampler\n'), ((1115, 1217), 'openrec.utils.samplers.YouTubeEvaluationSampler', 'YouTubeEvaluationSampler', ([], {'user_feature': 'user_feature', 'dataset': 'test_dataset', 'max_seq_len': 'max_seq_len'}), '(user_feature=user_feature, dataset=test_dataset,\n max_seq_len=max_seq_len)\n', (1139, 1217), False, 'from openrec.utils.samplers import YouTubeSampler, YouTubeEvaluationSampler\n'), ((1224, 1462), 'openrec.recommenders.YouTubeRec', 'YouTubeRec', ([], {'batch_size': 'batch_size', 'user_dict': 'user_dict', 'item_dict': 'item_dict', 'max_seq_len': 'max_seq_len', 'dim_item_embed': 'dim_item_embed', 'dim_user_embed': 'dim_user_embed', 'save_model_dir': '"""youtube_recommender/"""', 'train': '(True)', 'serve': '(True)'}), "(batch_size=batch_size, user_dict=user_dict, item_dict=item_dict,\n max_seq_len=max_seq_len, dim_item_embed=dim_item_embed, dim_user_embed=\n dim_user_embed, save_model_dir='youtube_recommender/', train=True,\n serve=True)\n", (1234, 1462), False, 'from openrec.recommenders import YouTubeRec\n'), ((1604, 1629), 'openrec.ModelTrainer', 'ModelTrainer', ([], {'model': 'model'}), '(model=model)\n', (1616, 1629), False, 'from openrec import ModelTrainer\n'), ((1647, 1652), 'openrec.utils.evaluators.AUC', 'AUC', ([], {}), '()\n', (1650, 1652), False, 'from openrec.utils.evaluators import AUC, Recall\n'), ((1672, 1715), 'openrec.utils.evaluators.Recall', 'Recall', ([], {'recall_at': '[100, 200, 300, 400, 500]'}), '(recall_at=[100, 200, 300, 400, 500])\n', (1678, 1715), False, 'from openrec.utils.evaluators import AUC, Recall\n')]
import numpy as np import mxnet as mx from mxnet import gluon import gluoncv as gcv from .nets import * from .dataset import * __all__ = ['get_data_loader', 'get_network', 'imagenet_batch_fn', 'default_batch_fn', 'default_val_fn', 'default_train_fn'] def get_data_loader(dataset, input_size, batch_size, num_workers, final_fit): if isinstance(dataset, AutoGluonObject): dataset = dataset.init() if isinstance(dataset, str): train_dataset = get_built_in_dataset(dataset, train=True, input_size=input_size, batch_size=batch_size, num_workers=num_workers).init() val_dataset = get_built_in_dataset(dataset, train=False, input_size=input_size, batch_size=batch_size, num_workers=num_workers).init() else: train_dataset = dataset.train val_dataset = dataset.val if val_dataset is None and not final_fit: train_dataset, val_dataset = _train_val_split(train_dataset) if isinstance(dataset, str) and dataset.lower() == 'imagenet': train_data = train_dataset val_data = val_dataset batch_fn = imagenet_batch_fn imagenet_samples = 1281167 num_batches = imagenet_samples // batch_size else: train_data = gluon.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, last_batch="rollover", num_workers=num_workers) val_data = None if not final_fit: val_data = gluon.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) batch_fn = default_batch_fn num_batches = len(train_data) return train_data, val_data, batch_fn, num_batches def get_network(net, num_classes, ctx): if type(net) == str: net = get_built_in_network(net, num_classes, ctx=ctx) else: net.initialize(ctx=ctx) return net def imagenet_batch_fn(batch, ctx): data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) return data, label def default_batch_fn(batch, ctx): data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0) return data, label def default_val_fn(net, batch, batch_fn, metric, ctx): with mx.autograd.pause(train_mode=False): data, label = batch_fn(batch, ctx) outputs = [net(X) for X in data] metric.update(label, outputs) def default_train_fn(net, batch, batch_size, criterion, trainer, batch_fn, ctx): data, label = batch_fn(batch, ctx) with mx.autograd.record(): outputs = [net(X) for X in data] loss = [criterion(yhat, y) for yhat, y in zip(outputs, label)] for l in loss: l.backward() trainer.step(batch_size, ignore_stale_grad=True) def _train_val_split(train_dataset, split_ratio=0.2): num_samples = len(train_dataset) split_idx = int(num_samples * split_ratio) val_sampler = SplitSampler(0, split_idx) train_sampler = SplitSampler(split_idx, num_samples) return _SampledDataset(train_dataset, train_sampler), _SampledDataset(train_dataset, val_sampler) class SplitSampler(object): """Samples elements from [start, start+length) randomly without replacement. Parameters ---------- length : int Length of the sequence. """ def __init__(self, start, end): self._start = start self._end = end self._length = end - start def __iter__(self): indices = list(range(self._start, self._end)) np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class _SampledDataset(mx.gluon.data.Dataset): """Dataset with elements chosen by a sampler""" def __init__(self, dataset, sampler): self._dataset = dataset self._sampler = sampler self._indices = list(iter(sampler)) def __len__(self): return len(self._sampler) def __getitem__(self, idx): return self._dataset[self._indices[idx]]
[ "mxnet.autograd.record", "mxnet.gluon.utils.split_and_load", "mxnet.autograd.pause", "mxnet.gluon.data.DataLoader", "numpy.random.shuffle" ]
[((2204, 2273), 'mxnet.gluon.utils.split_and_load', 'gluon.utils.split_and_load', (['batch.data[0]'], {'ctx_list': 'ctx', 'batch_axis': '(0)'}), '(batch.data[0], ctx_list=ctx, batch_axis=0)\n', (2230, 2273), False, 'from mxnet import gluon\n'), ((2286, 2356), 'mxnet.gluon.utils.split_and_load', 'gluon.utils.split_and_load', (['batch.label[0]'], {'ctx_list': 'ctx', 'batch_axis': '(0)'}), '(batch.label[0], ctx_list=ctx, batch_axis=0)\n', (2312, 2356), False, 'from mxnet import gluon\n'), ((2426, 2490), 'mxnet.gluon.utils.split_and_load', 'gluon.utils.split_and_load', (['batch[0]'], {'ctx_list': 'ctx', 'batch_axis': '(0)'}), '(batch[0], ctx_list=ctx, batch_axis=0)\n', (2452, 2490), False, 'from mxnet import gluon\n'), ((2503, 2567), 'mxnet.gluon.utils.split_and_load', 'gluon.utils.split_and_load', (['batch[1]'], {'ctx_list': 'ctx', 'batch_axis': '(0)'}), '(batch[1], ctx_list=ctx, batch_axis=0)\n', (2529, 2567), False, 'from mxnet import gluon\n'), ((1491, 1616), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'last_batch': '"""rollover"""', 'num_workers': 'num_workers'}), "(train_dataset, batch_size=batch_size, shuffle=True,\n last_batch='rollover', num_workers=num_workers)\n", (1512, 1616), False, 'from mxnet import gluon\n'), ((2656, 2691), 'mxnet.autograd.pause', 'mx.autograd.pause', ([], {'train_mode': '(False)'}), '(train_mode=False)\n', (2673, 2691), True, 'import mxnet as mx\n'), ((2941, 2961), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (2959, 2961), True, 'import mxnet as mx\n'), ((3920, 3946), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (3937, 3946), True, 'import numpy as np\n'), ((1711, 1812), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(val_dataset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (1732, 1812), False, 'from mxnet import gluon\n')]
''' ''' # # Adapted from MATLAB code written by <NAME> (see Nishimoto, et al., 2011). # <NAME> (Jan, 2016) # # Updates: # <NAME> (Apr, 2020) # import itertools from PIL import Image import numpy as np from moten.utils import (DotDict, iterator_func, log_compress, sqrt_sum_squares, pointwise_square, ) ############################## # ############################## def raw_project_stimulus(stimulus, filters, vhsize=(), dtype='float32'): '''Obtain responses to the stimuli from all filter quadrature-pairs. Parameters ---------- stimulus : np.ndarray, (nimages, vdim, hdim) or (nimages, npixels) The movie frames. If `stimulus` is two-dimensional with shape (nimages, npixels), then `vhsize=(vdim,hdim)` is required and `npixels == vdim*hdim`. Returns ------- output_sin : np.ndarray, (nimages, nfilters) output_cos : np.ndarray, (nimages, nfilters) ''' # parameters if stimulus.ndim == 3: nimages, vdim, hdim = stimulus.shape stimulus = stimulus.reshape(stimulus.shape[0], -1) vhsize = (vdim, hdim) # checks for 2D stimuli assert stimulus.ndim == 2 # (nimages, pixels) assert isinstance(vhsize, tuple) and len(vhsize) == 2 # (hdim, vdim) assert np.product(vhsize) == stimulus.shape[1] # hdim*vdim == pixels # Compute responses nfilters = len(filters) nimages = stimulus.shape[0] sin_responses = np.zeros((nimages, nfilters), dtype=dtype) cos_responses = np.zeros((nimages, nfilters), dtype=dtype) for gaborid, gabor_parameters in iterator_func(enumerate(filters), 'project_stimulus', total=len(filters)): sgabor0, sgabor90, tgabor0, tgabor90 = mk_3d_gabor(vhsize, **gabor_parameters) channel_sin, channel_cos = dotdelay_frames(sgabor0, sgabor90, tgabor0, tgabor90, stimulus) sin_responses[:, gaborid] = channel_sin cos_responses[:, gaborid] = channel_cos return sin_responses, cos_responses def project_stimulus(stimulus, filters, quadrature_combination=sqrt_sum_squares, output_nonlinearity=log_compress, vhsize=(), dtype='float32'): '''Compute the motion energy filter responses to the stimuli. Parameters ---------- stimulus : np.ndarray, (nimages, vdim, hdim) or (nimages, npixels) The movie frames. If `stimulus` is two-dimensional with shape (nimages, npixels), then `vhsize=(vdim,hdim)` is required and `npixels == vdim*hdim`. Returns ------- filter_responses : np.ndarray, (nimages, nfilters) ''' # parameters if stimulus.ndim == 3: nimages, vdim, hdim = stimulus.shape stimulus = stimulus.reshape(stimulus.shape[0], -1) vhsize = (vdim, hdim) # checks for 2D stimuli assert stimulus.ndim == 2 # (nimages, pixels) assert isinstance(vhsize, tuple) and len(vhsize) == 2 # (hdim, vdim) assert np.product(vhsize) == stimulus.shape[1] # hdim*vdim == pixels # Compute responses nfilters = len(filters) nimages = stimulus.shape[0] filter_responses = np.zeros((nimages, nfilters), dtype=dtype) for gaborid, gabor_parameters in iterator_func(enumerate(filters), 'project_stimulus', total=len(filters)): sgabor0, sgabor90, tgabor0, tgabor90 = mk_3d_gabor(vhsize, **gabor_parameters) channel_sin, channel_cos = dotdelay_frames(sgabor0, sgabor90, tgabor0, tgabor90, stimulus) channel_response = quadrature_combination(channel_sin, channel_cos) channel_response = output_nonlinearity(channel_response) filter_responses[:, gaborid] = channel_response return filter_responses ############################## # core functionality ############################## def mk_3d_gabor(vhsize, stimulus_fps, aspect_ratio='auto', filter_temporal_width='auto', centerh=0.5, centerv=0.5, direction=45.0, spatial_freq=16.0, spatial_env=0.3, temporal_freq=2.0, temporal_env=0.3, spatial_phase_offset=0.0, ): '''Make a motion energy filter. A motion energy filter is a 3D gabor with two spatial and one temporal dimension. Each dimension is defined by two sine waves which differ in phase by 90 degrees. The sine waves are then multiplied by a gaussian. Parameters ---------- vhsize : tuple of ints, (vdim, hdim) Size of the stimulus in pixels (vdim, hdim) `vdim` : vertical dimension `hdim` : horizontal dimension stimulus_fps : scalar, [Hz] Stimulus playback speed in frames per second. centerv : scalar Vertical filter position from top of frame (min=0, max=1.0). centerh : scalar Horizontal filter position from left of frame (min=0, max=aspect_ratio). direction : scalar, [degrees] Direction of filter motion. Degree position corresponds to standard unit-circle coordinates (i.e. 0=right, 180=left). spatial_freq : float, [cycles-per-image] Spatial frequency of the filter. temporal_freq : float, [Hz] Temporal frequency of the filter filter_temporal_width : int Temporal window of the motion energy filter (e.g. 10). Defaults to approximately 0.666[secs] (`floor(stimulus_fps*(2/3))`). aspect_ratio : optional, 'auto' or float-like, Defaults to stimulus aspect ratio: hdim/vdim Useful for preserving the spatial gabors circular even when images have non-square aspect ratios. For example, a 16:9 image would have `aspect_ratio`=16/9. spatial_env : float Spatial envelope (s.d. of the gaussian) temporal_env : float Temporal envelope (s.d. of gaussian) spatial_phase_offset : float, [degrees Phase offset for the spatial sinusoid Returns ------- spatial_gabor_sin : 2D np.ndarray, (vdim, hdim) spatial_gabor_cos : 2D np.ndarray, (vdim, hdim) Spatial gabor quadrature pair. ``spatial_gabor_cos`` has a 90 degree phase offset relative to ``spatial_gabor_sin`` temporal_gabor_sin : 1D np.ndarray, (`filter_temporal_width`,) temporal_gabor_cos : 1D np.ndarray, (`filter_temporal_width`,) Temporal gabor quadrature pair. ``temporal_gabor_cos`` has a 90 degree phase offset relative to ``temporal_gabor_sin`` Notes ----- Same method as Nishimoto, et al., 2011. ''' vdim, hdim = vhsize if aspect_ratio == 'auto': aspect_ratio = hdim/float(vdim) if filter_temporal_width == 'auto': filter_temporal_width = int(stimulus_fps*(2/3.)) # cast filter width to integer frames assert np.allclose(filter_temporal_width, int(filter_temporal_width)) filter_temporal_width = int(filter_temporal_width) dh = np.linspace(0, aspect_ratio, hdim, endpoint=True) dv = np.linspace(0, 1, vdim, endpoint=True) dt = np.linspace(0, 1, filter_temporal_width, endpoint=False) # AN: Actually, `dt` should include endpoint. # Currently, the center of the filter width is +(1./fps)/2. # However, this would break backwards compatibility. # TODO: Allow for `dt_endpoint` as an argument # and set default to False. ihs, ivs = np.meshgrid(dh,dv) fh = -spatial_freq*np.cos(direction/180.*np.pi)*2*np.pi fv = spatial_freq*np.sin(direction/180.*np.pi)*2*np.pi # normalize temporal frequency to wavelet size ft = np.real(temporal_freq*(filter_temporal_width/float(stimulus_fps)))*2*np.pi # spatial filters spatial_gaussian = np.exp(-((ihs - centerh)**2 + (ivs - centerv)**2)/(2*spatial_env**2)) spatial_grating_sin = np.sin((ihs - centerh)*fh + (ivs - centerv)*fv + spatial_phase_offset) spatial_grating_cos = np.cos((ihs - centerh)*fh + (ivs - centerv)*fv + spatial_phase_offset) spatial_gabor_sin = spatial_gaussian * spatial_grating_sin spatial_gabor_cos = spatial_gaussian * spatial_grating_cos ############################## temporal_gaussian = np.exp(-(dt - 0.5)**2/(2*temporal_env**2)) temporal_grating_sin = np.sin((dt - 0.5)*ft) temporal_grating_cos = np.cos((dt - 0.5)*ft) temporal_gabor_sin = temporal_gaussian*temporal_grating_sin temporal_gabor_cos = temporal_gaussian*temporal_grating_cos return spatial_gabor_sin, spatial_gabor_cos, temporal_gabor_sin, temporal_gabor_cos def generate_3dgabor_array(vhsize=(576,1024), stimulus_fps=24, aspect_ratio='auto', filter_temporal_width='auto', centerh=0.5, centerv=0.5, direction=45.0, spatial_freq=16.0, spatial_env=0.3, temporal_freq=2.0, temporal_env=0.3, phase_offset=0.0): ''' ''' vdim, hdim = vhsize if aspect_ratio == 'auto': aspect_ratio = hdim/float(vdim) if filter_temporal_width == 'auto': filter_temporal_width = int(stimulus_fps*(2/3.)) gabor_components = mk_3d_gabor(vhsize, stimulus_fps=stimulus_fps, aspect_ratio=aspect_ratio, filter_temporal_width=filter_temporal_width, centerh=centerh, centerv=centerv, direction=direction, spatial_freq=spatial_freq, spatial_env=spatial_env, temporal_freq=temporal_freq, temporal_env=temporal_env, phase_offset=phase_offset, ) gabor_video = mk_spatiotemporal_gabor(*gabor_components) return gabor_video def dotspatial_frames(spatial_gabor_sin, spatial_gabor_cos, stimulus, masklimit=0.001): '''Dot the spatial gabor filters filter with the stimulus Parameters ---------- spatial_gabor_sin : np.array, (vdim,hdim) spatial_gabor_cos : np.array, (vdim,hdim) Spatial gabor quadrature pair stimulus : 2D np.array (nimages, vdim*hdim) The movie frames with the spatial dimension collapsed. masklimit : float-like Threshold to find the non-zero filter region Returns ------- channel_sin : np.ndarray, (nimages, ) channel_cos : np.ndarray, (nimages, ) The filter response to each stimulus The quadrature pair can be combined: (x^2 + y^2)^0.5 ''' gabors = np.asarray([spatial_gabor_sin.ravel(), spatial_gabor_cos.ravel()]) # dot the gabors with the stimulus mask = np.abs(gabors).sum(0) > masklimit gabor_prod = (gabors[:,mask].squeeze() @ stimulus.T[mask].squeeze()).T gabor_sin, gabor_cos = gabor_prod[:,0], gabor_prod[:,1] return gabor_sin, gabor_cos def dotdelay_frames(spatial_gabor_sin, spatial_gabor_cos, temporal_gabor_sin, temporal_gabor_cos, stimulus, masklimit=0.001): '''Convolve the motion energy filter with a stimulus Parameters ---------- spatial_gabor_sin : np.array, (vdim,hdim) spatial_gabor_cos : np.array, (vdim,hdim) Spatial gabor quadrature pair temporal_gabor_sin : np.array, (temporal_filter_width,) temporal_gabor_cos : np.array, (temporal_filter_width,) Temporal gabor quadrature pair stimulus : 2D np.array (nimages, vdim*hdim) The movie frames with the spatial dimension collapsed. Returns ------- channel_sin : np.ndarray, (nimages, ) channel_cos : np.ndarray, (nimages, ) The filter response to the stimulus at each time point The quadrature pair can be combined: (x^2 + y^2)^0.5 ''' gabor_sin, gabor_cos = dotspatial_frames(spatial_gabor_sin, spatial_gabor_cos, stimulus, masklimit=masklimit) gabor_prod = np.c_[gabor_sin, gabor_cos] temporal_gabors = np.asarray([temporal_gabor_sin, temporal_gabor_cos]) # dot the product with the temporal gabors outs = gabor_prod[:, [0]] @ temporal_gabors[[1]] + gabor_prod[:, [1]] @ temporal_gabors[[0]] outc = -gabor_prod[:, [0]] @ temporal_gabors[[0]] + gabor_prod[:, [1]] @ temporal_gabors[[1]] # sum across delays nouts = np.zeros_like(outs) noutc = np.zeros_like(outc) tdxc = int(np.ceil(outs.shape[1]/2.0)) delays = np.arange(outs.shape[1])-tdxc +1 for ddx, num in enumerate(delays): if num == 0: nouts[:, ddx] = outs[:,ddx] noutc[:, ddx] = outc[:,ddx] elif num > 0: nouts[num:, ddx] = outs[:-num,ddx] noutc[num:, ddx] = outc[:-num,ddx] elif num < 0: nouts[:num, ddx] = outs[abs(num):,ddx] noutc[:num, ddx] = outc[abs(num):,ddx] channel_sin = nouts.sum(-1) channel_cos = noutc.sum(-1) return channel_sin, channel_cos def mk_spatiotemporal_gabor(spatial_gabor_sin, spatial_gabor_cos, temporal_gabor_sin, temporal_gabor_cos): '''Make 3D motion energy filter defined by the spatial and temporal gabors. Takes the output of :func:`mk_3d_gabor` and constructs the 3D filter. This is useful for visualization. Parameters ---------- spatial_gabor_sin : np.array, (vdim,hdim) spatial_gabor_cos : np.array, (vdim,hdim) Spatial gabor quadrature pair temporal_gabor_sin : np.array, (filter_temporal_width,) temporal_gabor_cos : np.array, (filter_temporal_width,) Temporal gabor quadrature pair Returns ------- motion_energy_filter : np.array, (vdim, hdim, filter_temporal_width) The motion energy filter ''' a = -spatial_gabor_sin.ravel()[...,None] @ temporal_gabor_sin[...,None].T b = spatial_gabor_cos.ravel()[...,None] @ temporal_gabor_cos[...,None].T x,y = spatial_gabor_sin.shape t = temporal_gabor_sin.shape[0] return (a+b).reshape(x,y,t) def compute_spatial_gabor_responses(stimulus, aspect_ratio='auto', spatial_frequencies=[0,2,4,8,16,32], quadrature_combination=sqrt_sum_squares, output_nonlinearity=log_compress, dtype=np.float64, dozscore=True): """Compute the spatial gabor filters' response to each stimulus. Parameters ---------- stimulus : 3D np.array (n, vdim, hdim) The stimulus frames. spatial_frequencies : array-like The spatial frequencies to compute. The spatial envelope is determined by this. quadrature_combination : function, optional Specifies how to combine the channel reponses quadratures. The function must take the sin and cos as arguments in order. Defaults to: (sin^2 + cos^2)^1/2 output_nonlinearity : function, optional Passes the channels (after `quadrature_combination`) through a non-linearity. The function input is the (`n`,`nfilters`) array. Defaults to: ln(x + 1e-05) dozscore : bool, optional Whether to z-score the channel responses in time dtype : np.dtype Defaults to np.float64 Returns ------- filter_responses : np.array, (n, nfilters) """ nimages, vdim, hdim = stimulus.shape vhsize = (vdim, hdim) if aspect_ratio == 'auto': aspect_ratio = hdim/float(vdim) stimulus = stimulus.reshape(stimulus.shape[0], -1) parameter_names, gabor_parameters = mk_moten_pyramid_params( 1., # fps filter_temporal_width=1., aspect_ratio=aspect_ratio, temporal_frequencies=[0.], spatial_directions=[0.], spatial_frequencies=spatial_frequencies, ) ngabors = gabor_parameters.shape[0] filters = [{name : gabor_parameters[idx, pdx] for pdx, name \ in enumerate(parameter_names)} \ for idx in range(ngabors)] info = 'Computing responses for #%i filters across #%i images (aspect_ratio=%0.03f)' print(info%(len(gabor_parameters), nimages, aspect_ratio)) channels = np.zeros((nimages, len(gabor_parameters)), dtype=dtype) for idx, gabor_param_dict in iterator_func(enumerate(filters), '%s.compute_spatial_gabor_responses'%__name__, total=len(gabor_parameters)): sgabor_sin, sgabor_cos, _, _ = mk_3d_gabor(vhsize, **gabor_param_dict) channel_sin, channel_cos = dotspatial_frames(sgabor_sin, sgabor_cos, stimulus) channel = quadrature_combination(channel_sin, channel_cos) channels[:, idx] = channel channels = output_nonlinearity(channels) if dozscore: from scipy.stats import zscore channels = zscore(channels) return channels def compute_filter_responses(stimulus, stimulus_fps, aspect_ratio='auto', filter_temporal_width='auto', quadrature_combination=sqrt_sum_squares, output_nonlinearity=log_compress, dozscore=True, dtype=np.float64, pyramid_parameters={}): """Compute the motion energy filters' response to the stimuli. Parameters ---------- stimulus : 3D np.array (n, vdim, hdim) The movie frames. stimulus_fps : scalar The temporal frequency of the stimulus aspect_ratio : bool, or scalar Defaults to hdim/vdim. Otherwise, pass as scalar filter_temporal_width : int, None The number of frames in one filter. Defaults to approximately 0.666[secs] (floor(stimulus_fps*(2/3))). quadrature_combination : function, optional Specifies how to combine the channel reponses quadratures. The function must take the sin and cos as arguments in order. Defaults to: (sin^2 + cos^2)^1/2 output_nonlinearity : function, optional Passes the channels (after `quadrature_combination`) through a non-linearity. The function input is the (`n`,`nfilters`) array. Defaults to: ln(x + 1e-05) dozscore : bool, optional Whether to z-score the channel responses in time dtype : np.dtype Defaults to np.float64 pyramid_parameters: dict See :func:`mk_moten_pyramid_params` for details on parameters specifiying a motion energy pyramid. Returns ------- filter_responses : np.array, (n, nfilters) """ nimages, vdim, hdim = stimulus.shape stimulus = stimulus.reshape(stimulus.shape[0], -1) vhsize = (vdim, hdim) if aspect_ratio == 'auto': aspect_ratio = hdim/float(vdim) if filter_temporal_width == 'auto': filter_temporal_width = int(stimulus_fps*(2./3.)) # pass parameters pkwargs = dict(aspect_ratio=aspect_ratio, filter_temporal_width=filter_temporal_width) pkwargs.update(**pyramid_parameters) parameter_names, gabor_parameters = mk_moten_pyramid_params(stimulus_fps, **pkwargs) ngabors = gabor_parameters.shape[0] filters = [{name : gabor_parameters[idx, pdx] for pdx, name \ in enumerate(parameter_names)} \ for idx in range(ngabors)] info = 'Computing responses for #%i filters across #%i images (aspect_ratio=%0.03f)' print(info%(len(gabor_parameters), nimages, aspect_ratio)) channels = np.zeros((nimages, len(gabor_parameters)), dtype=dtype) for idx, gabor_param_dict in iterator_func(enumerate(filters), '%s.compute_filter_responses'%__name__, total=len(filters)): gabor = mk_3d_gabor(vhsize, **gabor_param_dict) gabor0, gabor90, tgabor0, tgabor90 = gabor channel_sin, channel_cos = dotdelay_frames(gabor0, gabor90, tgabor0, tgabor90, stimulus, ) channel = quadrature_combination(channel_sin, channel_cos) channels[:,idx] = channel channels = output_nonlinearity(channels) if dozscore: from scipy.stats import zscore channels = zscore(channels) return channels def mk_moten_pyramid_params(stimulus_fps, filter_temporal_width='auto', aspect_ratio='auto', temporal_frequencies=[0,2,4], spatial_frequencies=[0,2,4,8,16,32], spatial_directions=[0,45,90,135,180,225,270,315], sf_gauss_ratio=0.6, max_spatial_env=0.3, gabor_spacing=3.5, tf_gauss_ratio=10., max_temp_env=0.3, spatial_phase_offset=0.0, include_edges=False, ): """Parametrize a motion energy pyramid that tiles the stimulus. Parameters ---------- stimulus_fps : scalar, [Hz] Stimulus playback speed in frames per second. spatial_frequencies : array-like, [cycles-per-image] Spatial frequencies for the filters spatial_directions : array-like, [degrees] Direction of filter motion. Degree position corresponds to standard unit-circle coordinates (i.e. 0=right, 180=left). temporal_frequencies : array-like, [Hz] Temporal frequencies of the filters filter_temporal_width : int Temporal window of the motion energy filter (e.g. 10). Defaults to approximately 0.666[secs] (`floor(stimulus_fps*(2/3))`). aspect_ratio : optional, 'auto' or float-like, Defaults to stimulus aspect ratio: hdim/vdim Useful for preserving the spatial gabors circular even when images have non-square aspect ratios. For example, a 16:9 image would have `aspect_ratio`=16/9. sf_gauss_ratio : scalar The ratio of spatial frequency to gaussian s.d. This controls the number of cycles in a filter max_spatial_env : scalar Defines the maximum s.d. of the gaussian gabor_spacing : scalar Defines the spacing between spatial gabors (in s.d. units) tf_gauss_ratio : scalar The ratio of temporal frequency to gaussian s.d. This controls the number of temporal cycles max_temp_env : scalar Defines the maximum s.d. of the temporal gaussian include_edges : bool Determines whether to include filters at the edge of the image which might be partially outside the stimulus field-of-view Returns ------- parameter_names : list of strings The name of the parameters gabor_parameters : 2D np.ndarray, (nfilters, 11) Parameters that define the motion energy filter Each of the `nfilters` has the following parameters: * centerv,centerh : y:vertical and x:horizontal position ('0,0' is top left) * direction : direction of motion [degrees] * spatial_freq : spatial frequency [cpi] * spatial_env : spatial envelope (gaussian s.d.) * temporal_freq : temporal frequency [Hz] * temporal_env : temporal envelope (gaussian s.d.) * filter_temporal_width : temporal window of filter [frames] * aspect_ratio : width/height * stimulus_fps : stimulus playback speed in frames per second * spatial_phase_offset : filter phase offset in [degrees] Notes ----- Same method as Nishimoto, et al., 2011. """ assert isinstance(aspect_ratio, (int, float, np.ndarray)) def compute_envelope(freq, ratio): return np.inf if freq == 0 else (1.0/freq)*ratio spatial_frequencies = np.asarray(spatial_frequencies) spatial_directions = np.asarray(spatial_directions) temporal_frequencies = np.asarray(temporal_frequencies) include_edges = int(include_edges) # We have to deal with zero frequency spatial filters differently include_local_dc = True if 0 in spatial_frequencies else False spatial_frequencies = np.asarray([t for t in spatial_frequencies if t != 0]) # add temporal envelope max params = list(itertools.product(spatial_frequencies, spatial_directions)) gabor_parameters = [] for spatial_freq, spatial_direction in params: spatial_env = min(compute_envelope(spatial_freq, sf_gauss_ratio), max_spatial_env) # compute the number of gaussians that will fit in the FOV vertical_space = np.floor(((1.0 - spatial_env*gabor_spacing)/(gabor_spacing*spatial_env))/2.0) horizontal_space = np.floor(((aspect_ratio - spatial_env*gabor_spacing)/(gabor_spacing*spatial_env))/2.0) # include the edges of screen? vertical_space = max(vertical_space, 0) + include_edges horizontal_space = max(horizontal_space, 0) + include_edges # get the spatial gabor locations ycenters = spatial_env*gabor_spacing*np.arange(-vertical_space, vertical_space+1) + 0.5 xcenters = spatial_env*gabor_spacing*np.arange(-horizontal_space, horizontal_space+1) + aspect_ratio/2. for ii, (cx, cy) in enumerate(itertools.product(xcenters,ycenters)): for temp_freq in temporal_frequencies: temp_env = min(compute_envelope(temp_freq, tf_gauss_ratio), max_temp_env) if temp_freq == 0 and spatial_direction >= 180: # 0Hz temporal filter doesn't have motion, so # 0 and 180 degrees orientations are the same filters continue gabor_parameters.append([cx, cy, spatial_direction, spatial_freq, spatial_env, temp_freq, temp_env, filter_temporal_width, aspect_ratio, stimulus_fps, spatial_phase_offset, ]) if spatial_direction == 0 and include_local_dc: # add local 0 spatial frequency non-directional temporal filter gabor_parameters.append([cx, cy, spatial_direction, 0., # zero spatial freq spatial_env, temp_freq, temp_env, filter_temporal_width, aspect_ratio, stimulus_fps, spatial_phase_offset, ]) parameter_names = ('centerh', 'centerv', 'direction', 'spatial_freq', 'spatial_env', 'temporal_freq', 'temporal_env', 'filter_temporal_width', 'aspect_ratio', 'stimulus_fps', 'spatial_phase_offset', ) gabor_parameters = np.asarray(gabor_parameters) return parameter_names, gabor_parameters
[ "numpy.product", "numpy.abs", "numpy.ceil", "itertools.product", "numpy.asarray", "numpy.floor", "numpy.exp", "numpy.zeros", "numpy.linspace", "scipy.stats.zscore", "numpy.cos", "numpy.sin", "numpy.meshgrid", "numpy.zeros_like", "numpy.arange" ]
[((1666, 1708), 'numpy.zeros', 'np.zeros', (['(nimages, nfilters)'], {'dtype': 'dtype'}), '((nimages, nfilters), dtype=dtype)\n', (1674, 1708), True, 'import numpy as np\n'), ((1729, 1771), 'numpy.zeros', 'np.zeros', (['(nimages, nfilters)'], {'dtype': 'dtype'}), '((nimages, nfilters), dtype=dtype)\n', (1737, 1771), True, 'import numpy as np\n'), ((3640, 3682), 'numpy.zeros', 'np.zeros', (['(nimages, nfilters)'], {'dtype': 'dtype'}), '((nimages, nfilters), dtype=dtype)\n', (3648, 3682), True, 'import numpy as np\n'), ((7665, 7714), 'numpy.linspace', 'np.linspace', (['(0)', 'aspect_ratio', 'hdim'], {'endpoint': '(True)'}), '(0, aspect_ratio, hdim, endpoint=True)\n', (7676, 7714), True, 'import numpy as np\n'), ((7724, 7762), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'vdim'], {'endpoint': '(True)'}), '(0, 1, vdim, endpoint=True)\n', (7735, 7762), True, 'import numpy as np\n'), ((7772, 7828), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'filter_temporal_width'], {'endpoint': '(False)'}), '(0, 1, filter_temporal_width, endpoint=False)\n', (7783, 7828), True, 'import numpy as np\n'), ((8099, 8118), 'numpy.meshgrid', 'np.meshgrid', (['dh', 'dv'], {}), '(dh, dv)\n', (8110, 8118), True, 'import numpy as np\n'), ((8419, 8498), 'numpy.exp', 'np.exp', (['(-((ihs - centerh) ** 2 + (ivs - centerv) ** 2) / (2 * spatial_env ** 2))'], {}), '(-((ihs - centerh) ** 2 + (ivs - centerv) ** 2) / (2 * spatial_env ** 2))\n', (8425, 8498), True, 'import numpy as np\n'), ((8516, 8590), 'numpy.sin', 'np.sin', (['((ihs - centerh) * fh + (ivs - centerv) * fv + spatial_phase_offset)'], {}), '((ihs - centerh) * fh + (ivs - centerv) * fv + spatial_phase_offset)\n', (8522, 8590), True, 'import numpy as np\n'), ((8613, 8687), 'numpy.cos', 'np.cos', (['((ihs - centerh) * fh + (ivs - centerv) * fv + spatial_phase_offset)'], {}), '((ihs - centerh) * fh + (ivs - centerv) * fv + spatial_phase_offset)\n', (8619, 8687), True, 'import numpy as np\n'), ((8871, 8921), 'numpy.exp', 'np.exp', (['(-(dt - 0.5) ** 2 / (2 * temporal_env ** 2))'], {}), '(-(dt - 0.5) ** 2 / (2 * temporal_env ** 2))\n', (8877, 8921), True, 'import numpy as np\n'), ((8941, 8964), 'numpy.sin', 'np.sin', (['((dt - 0.5) * ft)'], {}), '((dt - 0.5) * ft)\n', (8947, 8964), True, 'import numpy as np\n'), ((8990, 9013), 'numpy.cos', 'np.cos', (['((dt - 0.5) * ft)'], {}), '((dt - 0.5) * ft)\n', (8996, 9013), True, 'import numpy as np\n'), ((13101, 13153), 'numpy.asarray', 'np.asarray', (['[temporal_gabor_sin, temporal_gabor_cos]'], {}), '([temporal_gabor_sin, temporal_gabor_cos])\n', (13111, 13153), True, 'import numpy as np\n'), ((13469, 13488), 'numpy.zeros_like', 'np.zeros_like', (['outs'], {}), '(outs)\n', (13482, 13488), True, 'import numpy as np\n'), ((13501, 13520), 'numpy.zeros_like', 'np.zeros_like', (['outc'], {}), '(outc)\n', (13514, 13520), True, 'import numpy as np\n'), ((25471, 25502), 'numpy.asarray', 'np.asarray', (['spatial_frequencies'], {}), '(spatial_frequencies)\n', (25481, 25502), True, 'import numpy as np\n'), ((25528, 25558), 'numpy.asarray', 'np.asarray', (['spatial_directions'], {}), '(spatial_directions)\n', (25538, 25558), True, 'import numpy as np\n'), ((25586, 25618), 'numpy.asarray', 'np.asarray', (['temporal_frequencies'], {}), '(temporal_frequencies)\n', (25596, 25618), True, 'import numpy as np\n'), ((25822, 25876), 'numpy.asarray', 'np.asarray', (['[t for t in spatial_frequencies if t != 0]'], {}), '([t for t in spatial_frequencies if t != 0])\n', (25832, 25876), True, 'import numpy as np\n'), ((29296, 29324), 'numpy.asarray', 'np.asarray', (['gabor_parameters'], {}), '(gabor_parameters)\n', (29306, 29324), True, 'import numpy as np\n'), ((1492, 1510), 'numpy.product', 'np.product', (['vhsize'], {}), '(vhsize)\n', (1502, 1510), True, 'import numpy as np\n'), ((3463, 3481), 'numpy.product', 'np.product', (['vhsize'], {}), '(vhsize)\n', (3473, 3481), True, 'import numpy as np\n'), ((13536, 13564), 'numpy.ceil', 'np.ceil', (['(outs.shape[1] / 2.0)'], {}), '(outs.shape[1] / 2.0)\n', (13543, 13564), True, 'import numpy as np\n'), ((18123, 18139), 'scipy.stats.zscore', 'zscore', (['channels'], {}), '(channels)\n', (18129, 18139), False, 'from scipy.stats import zscore\n'), ((21800, 21816), 'scipy.stats.zscore', 'zscore', (['channels'], {}), '(channels)\n', (21806, 21816), False, 'from scipy.stats import zscore\n'), ((25928, 25986), 'itertools.product', 'itertools.product', (['spatial_frequencies', 'spatial_directions'], {}), '(spatial_frequencies, spatial_directions)\n', (25945, 25986), False, 'import itertools\n'), ((26251, 26339), 'numpy.floor', 'np.floor', (['((1.0 - spatial_env * gabor_spacing) / (gabor_spacing * spatial_env) / 2.0)'], {}), '((1.0 - spatial_env * gabor_spacing) / (gabor_spacing * spatial_env\n ) / 2.0)\n', (26259, 26339), True, 'import numpy as np\n'), ((26356, 26452), 'numpy.floor', 'np.floor', (['((aspect_ratio - spatial_env * gabor_spacing) / (gabor_spacing *\n spatial_env) / 2.0)'], {}), '((aspect_ratio - spatial_env * gabor_spacing) / (gabor_spacing *\n spatial_env) / 2.0)\n', (26364, 26452), True, 'import numpy as np\n'), ((13577, 13601), 'numpy.arange', 'np.arange', (['outs.shape[1]'], {}), '(outs.shape[1])\n', (13586, 13601), True, 'import numpy as np\n'), ((26905, 26942), 'itertools.product', 'itertools.product', (['xcenters', 'ycenters'], {}), '(xcenters, ycenters)\n', (26922, 26942), False, 'import itertools\n'), ((8142, 8175), 'numpy.cos', 'np.cos', (['(direction / 180.0 * np.pi)'], {}), '(direction / 180.0 * np.pi)\n', (8148, 8175), True, 'import numpy as np\n'), ((8201, 8234), 'numpy.sin', 'np.sin', (['(direction / 180.0 * np.pi)'], {}), '(direction / 180.0 * np.pi)\n', (8207, 8234), True, 'import numpy as np\n'), ((11752, 11766), 'numpy.abs', 'np.abs', (['gabors'], {}), '(gabors)\n', (11758, 11766), True, 'import numpy as np\n'), ((26703, 26749), 'numpy.arange', 'np.arange', (['(-vertical_space)', '(vertical_space + 1)'], {}), '(-vertical_space, vertical_space + 1)\n', (26712, 26749), True, 'import numpy as np\n'), ((26799, 26849), 'numpy.arange', 'np.arange', (['(-horizontal_space)', '(horizontal_space + 1)'], {}), '(-horizontal_space, horizontal_space + 1)\n', (26808, 26849), True, 'import numpy as np\n')]
# Copyright 2018 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Shared configuration options for observations.""" import collections import numpy as np class ObservableSpec(collections.namedtuple( 'ObservableSpec', ['enabled', 'update_interval', 'buffer_size', 'delay', 'aggregator', 'corruptor'])): """Configuration options for generic observables.""" __slots__ = () class CameraObservableSpec(collections.namedtuple( 'CameraObservableSpec', ('height', 'width') + ObservableSpec._fields)): """Configuration options for camera observables.""" __slots__ = () class ObservationSettings(collections.namedtuple( 'ObservationSettings', ['proprio', 'ftt', 'prop_pose', 'camera'])): """Container of `ObservableSpecs` grouped by category.""" __slots__ = () class ObservableNames(collections.namedtuple( 'ObservableNames', ['proprio', 'ftt', 'prop_pose', 'camera'])): """Container that groups the names of observables by category.""" __slots__ = () def __new__(cls, proprio=(), ftt=(), prop_pose=(), camera=()): return super(ObservableNames, cls).__new__( cls, proprio=proprio, ftt=ftt, prop_pose=prop_pose, camera=camera) # Global defaults for "feature" observables (i.e. anything that isn't a camera). _DISABLED_FEATURE = ObservableSpec( enabled=False, update_interval=1, buffer_size=1, delay=0, aggregator=None, corruptor=None) _ENABLED_FEATURE = _DISABLED_FEATURE._replace(enabled=True) # Force, torque and touch-sensor readings are scaled using a symmetric # logarithmic transformation that handles 0 and negative values. _symlog1p = lambda x, random_state: np.sign(x) * np.log1p(abs(x)) _DISABLED_FTT = _DISABLED_FEATURE._replace(corruptor=_symlog1p) _ENABLED_FTT = _ENABLED_FEATURE._replace(corruptor=_symlog1p) # Global defaults for camera observables. _DISABLED_CAMERA = CameraObservableSpec( height=84, width=84, enabled=False, update_interval=1, buffer_size=1, delay=0, aggregator=None, corruptor=None) _ENABLED_CAMERA = _DISABLED_CAMERA._replace(enabled=True) # Predefined sets of configurations options to apply to each category of # observable. PERFECT_FEATURES = ObservationSettings( proprio=_ENABLED_FEATURE, ftt=_ENABLED_FTT, prop_pose=_ENABLED_FEATURE, camera=_DISABLED_CAMERA) VISION = ObservationSettings( proprio=_ENABLED_FEATURE, ftt=_ENABLED_FTT, prop_pose=_DISABLED_FEATURE, camera=_ENABLED_CAMERA) JACO_ARM_OBSERVABLES = ObservableNames( proprio=['joints_pos', 'joints_vel'], ftt=['joints_torque']) JACO_HAND_OBSERVABLES = ObservableNames( proprio=['joints_pos', 'joints_vel', 'pinch_site_pos', 'pinch_site_rmat']) FREEPROP_OBSERVABLES = ObservableNames( prop_pose=['position', 'orientation', 'linear_velocity', 'angular_velocity']) def make_options(obs_settings, obs_names): """Constructs a dict of configuration options for a set of named observables. Args: obs_settings: An `ObservationSettings` instance. obs_names: An `ObservableNames` instance. Returns: A nested dict containing `{observable_name: {option_name: value}}`. """ observable_options = {} for category, spec in obs_settings._asdict().items(): for observable_name in getattr(obs_names, category): observable_options[observable_name] = spec._asdict() return observable_options
[ "collections.namedtuple", "numpy.sign" ]
[((782, 909), 'collections.namedtuple', 'collections.namedtuple', (['"""ObservableSpec"""', "['enabled', 'update_interval', 'buffer_size', 'delay', 'aggregator',\n 'corruptor']"], {}), "('ObservableSpec', ['enabled', 'update_interval',\n 'buffer_size', 'delay', 'aggregator', 'corruptor'])\n", (804, 909), False, 'import collections\n'), ((1023, 1119), 'collections.namedtuple', 'collections.namedtuple', (['"""CameraObservableSpec"""', "(('height', 'width') + ObservableSpec._fields)"], {}), "('CameraObservableSpec', ('height', 'width') +\n ObservableSpec._fields)\n", (1045, 1119), False, 'import collections\n'), ((1222, 1314), 'collections.namedtuple', 'collections.namedtuple', (['"""ObservationSettings"""', "['proprio', 'ftt', 'prop_pose', 'camera']"], {}), "('ObservationSettings', ['proprio', 'ftt',\n 'prop_pose', 'camera'])\n", (1244, 1314), False, 'import collections\n'), ((1419, 1507), 'collections.namedtuple', 'collections.namedtuple', (['"""ObservableNames"""', "['proprio', 'ftt', 'prop_pose', 'camera']"], {}), "('ObservableNames', ['proprio', 'ftt', 'prop_pose',\n 'camera'])\n", (1441, 1507), False, 'import collections\n'), ((2252, 2262), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (2259, 2262), True, 'import numpy as np\n')]
""" Unit tests for scCODA """ import unittest import numpy as np import scanpy as sc import tensorflow as tf import pandas as pd import os import sys sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) from sccoda.util import cell_composition_data as dat from sccoda.util import comp_ana as mod from sccoda.util import data_generation as gen pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) #%% class TestDataGeneration(unittest.TestCase): """ Testing whether the data generation functions from data_generation work as intended Returns ------- boolean -- all tests were passed or not """ def setUp(self): self.N = 3 self.D = 1 self.K = 2 self.n_total = [1000] * self.N self.noise_std_true = 1 self.covariate_mean = None self.covariate_var = None self.sigma = None self.b_true = None self.w_true = None def test_case_control_gen(self): """ Tests data generation for case/control scenarios Returns ------- boolean -- all tests were passed or not """ np.random.seed(1234) cases = 1 K = 2 n_total = 1000 n_samples = [2, 2] noise_std_true = 0 sigma = None b_true = None w_true = None data = gen.generate_case_control(cases, K, n_total, n_samples, sigma, b_true, w_true) test = True if any(np.abs(data.obs["x_0"] - [0, 0, 1, 1]) > 1e-5): print("obs is not correct!") test = False if not np.array_equal(data.X, np.array([[74., 926.], [58., 942.], [32., 968.], [53., 947.]])): print("X is not correct!") test = False if not np.array_equal(data.uns["b_true"], np.array([-1.8508832, 0.7326526], dtype=np.float64)) & \ np.array_equal(data.uns["w_true"], np.array([[0., 0.]])): print("uns is not correct!") test = False self.assertTrue(test) def test_change_functions(self): """ Tests gen.b_w_from_abs_change and gen.counts_from_first Returns ------- boolean -- all tests were passed or not """ np.random.seed(1234) correct = True counts_before = np.array([600, 400]) abs_change = 100 n_total = 1000 K = 2 b_0 = 600 b, w = gen.b_w_from_abs_change(counts_before, abs_change, n_total) if any(np.abs(b - [-0.51082562, -0.91629073]) > 1e-5): print("gen.b_w_from_abs_change: b not correct!") correct = False if any(np.abs(w - [0.44183275, 0.]) > 1e-5): print("gen.b_w_from_abs_change: b not correct!") correct = False b_2 = gen.counts_from_first(b_0, n_total, K) if not np.array_equal(b_2, [600., 400.]): print("gen.counts_from_first not correct!") correct = False self.assertTrue(correct) class TestDataImport(unittest.TestCase): def test_from_pandas(self): # Get Haber Salmonella data data_raw = pd.read_csv(os.path.abspath("../../data/haber_counts.csv")) salm_indices = [0, 1, 2, 3, 8, 9] salm_df = data_raw.iloc[salm_indices, :] data_salm = dat.from_pandas(salm_df, covariate_columns=["Mouse"]) data_salm.obs["Condition"] = data_salm.obs["Mouse"].str.replace(r"_[0-9]", "") # Only check size of x, obs x_shape = (data_salm.X.shape == (6, 8)) obs_shape = (data_salm.obs.shape == (6, 2)) self.assertTrue(x_shape & obs_shape) def test_from_scanpy(self): # Get scanpy example data, add covariates, read in three times adata_ref = sc.datasets.pbmc3k_processed() adata_ref.uns["cov"] = {"x1": 0, "x2": 1} data = dat.from_scanpy_list([adata_ref, adata_ref, adata_ref], cell_type_identifier="louvain", covariate_key="cov") # Only check size of x, obs x_shape = (data.X.shape == (3, 8)) obs_shape = (data.obs.shape == (3, 2)) var_names = (data.var.index.tolist() == ['CD4 T cells', 'CD14+ Monocytes', 'B cells', 'CD8 T cells', 'NK cells', 'FCGR3A+ Monocytes', 'Dendritic cells', 'Megakaryocytes']) self.assertTrue(x_shape & obs_shape & var_names) class TestModels(unittest.TestCase): def setUp(self): # Get Haber count data data_raw = pd.read_csv(os.path.abspath("../../data/haber_counts.csv")) salm_indices = [0, 1, 2, 3, 8, 9] salm_df = data_raw.iloc[salm_indices, :] data_salm = dat.from_pandas(salm_df, covariate_columns=["Mouse"]) data_salm.obs["Condition"] = data_salm.obs["Mouse"].str.replace(r"_[0-9]", "") self.data = data_salm def test_no_reference(self): np.random.seed(1234) tf.random.set_seed(5678) model_salm = mod.CompositionalAnalysis(self.data, formula="Condition", reference_cell_type=None) # Run MCMC sim_results = model_salm.sample_hmc(num_results=20000, num_burnin=5000) alpha_df, beta_df = sim_results.summary_prepare() # Mean cell counts for both groups alphas_true = np.round(np.mean(self.data.X[:4], 0), 0) betas_true = np.round(np.mean(self.data.X[4:], 0), 0) # Mean cell counts for simulated data final_alphas = np.round(alpha_df.loc[:, "Expected Sample"].tolist(), 0) final_betas = np.round(beta_df.loc[:, "Expected Sample"].tolist(), 0) # Check if model approximately predicts ground truth differing_alphas = any(np.abs(alphas_true - final_alphas) > 30) differing_betas = any(np.abs(betas_true - final_betas) > 30) self.assertTrue((not differing_alphas) & (not differing_betas)) def test_reference(self): np.random.seed(1234) tf.random.set_seed(5678) model_salm = mod.CompositionalAnalysis(self.data, formula="Condition", reference_cell_type=5) # Run MCMC sim_results = model_salm.sample_hmc(num_results=20000, num_burnin=5000) alpha_df, beta_df = sim_results.summary_prepare() # Mean cell counts for both groups alphas_true = np.round(np.mean(self.data.X[:4], 0), 0) betas_true = np.round(np.mean(self.data.X[4:], 0), 0) # Mean cell counts for simulated data final_alphas = np.round(alpha_df.loc[:, "Expected Sample"].tolist(), 0) final_betas = np.round(beta_df.loc[:, "Expected Sample"].tolist(), 0) # Check if model approximately predicts ground truth differing_alphas = any(np.abs(alphas_true - final_alphas) > 30) differing_betas = any(np.abs(betas_true - final_betas) > 30) self.assertTrue((not differing_alphas) & (not differing_betas)) #%% if __name__ == '__main__': unittest.main()
[ "numpy.mean", "numpy.abs", "sccoda.util.data_generation.b_w_from_abs_change", "tensorflow.random.set_seed", "scanpy.datasets.pbmc3k_processed", "sccoda.util.data_generation.counts_from_first", "sccoda.util.cell_composition_data.from_pandas", "unittest.main", "pandas.set_option", "numpy.array", "...
[((376, 417), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (389, 417), True, 'import pandas as pd\n'), ((418, 456), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (431, 456), True, 'import pandas as pd\n'), ((170, 190), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (185, 190), False, 'import os\n'), ((211, 232), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (226, 232), False, 'import os\n'), ((7000, 7015), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7013, 7015), False, 'import unittest\n'), ((1188, 1208), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1202, 1208), True, 'import numpy as np\n'), ((1400, 1478), 'sccoda.util.data_generation.generate_case_control', 'gen.generate_case_control', (['cases', 'K', 'n_total', 'n_samples', 'sigma', 'b_true', 'w_true'], {}), '(cases, K, n_total, n_samples, sigma, b_true, w_true)\n', (1425, 1478), True, 'from sccoda.util import data_generation as gen\n'), ((2284, 2304), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (2298, 2304), True, 'import numpy as np\n'), ((2353, 2373), 'numpy.array', 'np.array', (['[600, 400]'], {}), '([600, 400])\n', (2361, 2373), True, 'import numpy as np\n'), ((2470, 2529), 'sccoda.util.data_generation.b_w_from_abs_change', 'gen.b_w_from_abs_change', (['counts_before', 'abs_change', 'n_total'], {}), '(counts_before, abs_change, n_total)\n', (2493, 2529), True, 'from sccoda.util import data_generation as gen\n'), ((2841, 2879), 'sccoda.util.data_generation.counts_from_first', 'gen.counts_from_first', (['b_0', 'n_total', 'K'], {}), '(b_0, n_total, K)\n', (2862, 2879), True, 'from sccoda.util import data_generation as gen\n'), ((3352, 3405), 'sccoda.util.cell_composition_data.from_pandas', 'dat.from_pandas', (['salm_df'], {'covariate_columns': "['Mouse']"}), "(salm_df, covariate_columns=['Mouse'])\n", (3367, 3405), True, 'from sccoda.util import cell_composition_data as dat\n'), ((3800, 3830), 'scanpy.datasets.pbmc3k_processed', 'sc.datasets.pbmc3k_processed', ([], {}), '()\n', (3828, 3830), True, 'import scanpy as sc\n'), ((3897, 4009), 'sccoda.util.cell_composition_data.from_scanpy_list', 'dat.from_scanpy_list', (['[adata_ref, adata_ref, adata_ref]'], {'cell_type_identifier': '"""louvain"""', 'covariate_key': '"""cov"""'}), "([adata_ref, adata_ref, adata_ref],\n cell_type_identifier='louvain', covariate_key='cov')\n", (3917, 4009), True, 'from sccoda.util import cell_composition_data as dat\n'), ((4777, 4830), 'sccoda.util.cell_composition_data.from_pandas', 'dat.from_pandas', (['salm_df'], {'covariate_columns': "['Mouse']"}), "(salm_df, covariate_columns=['Mouse'])\n", (4792, 4830), True, 'from sccoda.util import cell_composition_data as dat\n'), ((4990, 5010), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (5004, 5010), True, 'import numpy as np\n'), ((5019, 5043), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(5678)'], {}), '(5678)\n', (5037, 5043), True, 'import tensorflow as tf\n'), ((5066, 5153), 'sccoda.util.comp_ana.CompositionalAnalysis', 'mod.CompositionalAnalysis', (['self.data'], {'formula': '"""Condition"""', 'reference_cell_type': 'None'}), "(self.data, formula='Condition',\n reference_cell_type=None)\n", (5091, 5153), True, 'from sccoda.util import comp_ana as mod\n'), ((5997, 6017), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (6011, 6017), True, 'import numpy as np\n'), ((6026, 6050), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(5678)'], {}), '(5678)\n', (6044, 6050), True, 'import tensorflow as tf\n'), ((6073, 6158), 'sccoda.util.comp_ana.CompositionalAnalysis', 'mod.CompositionalAnalysis', (['self.data'], {'formula': '"""Condition"""', 'reference_cell_type': '(5)'}), "(self.data, formula='Condition', reference_cell_type=5\n )\n", (6098, 6158), True, 'from sccoda.util import comp_ana as mod\n'), ((2895, 2930), 'numpy.array_equal', 'np.array_equal', (['b_2', '[600.0, 400.0]'], {}), '(b_2, [600.0, 400.0])\n', (2909, 2930), True, 'import numpy as np\n'), ((3191, 3237), 'os.path.abspath', 'os.path.abspath', (['"""../../data/haber_counts.csv"""'], {}), "('../../data/haber_counts.csv')\n", (3206, 3237), False, 'import os\n'), ((4616, 4662), 'os.path.abspath', 'os.path.abspath', (['"""../../data/haber_counts.csv"""'], {}), "('../../data/haber_counts.csv')\n", (4631, 4662), False, 'import os\n'), ((5383, 5410), 'numpy.mean', 'np.mean', (['self.data.X[:4]', '(0)'], {}), '(self.data.X[:4], 0)\n', (5390, 5410), True, 'import numpy as np\n'), ((5445, 5472), 'numpy.mean', 'np.mean', (['self.data.X[4:]', '(0)'], {}), '(self.data.X[4:], 0)\n', (5452, 5472), True, 'import numpy as np\n'), ((6387, 6414), 'numpy.mean', 'np.mean', (['self.data.X[:4]', '(0)'], {}), '(self.data.X[:4], 0)\n', (6394, 6414), True, 'import numpy as np\n'), ((6449, 6476), 'numpy.mean', 'np.mean', (['self.data.X[4:]', '(0)'], {}), '(self.data.X[4:], 0)\n', (6456, 6476), True, 'import numpy as np\n'), ((1515, 1553), 'numpy.abs', 'np.abs', (["(data.obs['x_0'] - [0, 0, 1, 1])"], {}), "(data.obs['x_0'] - [0, 0, 1, 1])\n", (1521, 1553), True, 'import numpy as np\n'), ((1667, 1737), 'numpy.array', 'np.array', (['[[74.0, 926.0], [58.0, 942.0], [32.0, 968.0], [53.0, 947.0]]'], {}), '([[74.0, 926.0], [58.0, 942.0], [32.0, 968.0], [53.0, 947.0]])\n', (1675, 1737), True, 'import numpy as np\n'), ((2546, 2584), 'numpy.abs', 'np.abs', (['(b - [-0.51082562, -0.91629073])'], {}), '(b - [-0.51082562, -0.91629073])\n', (2552, 2584), True, 'import numpy as np\n'), ((2699, 2728), 'numpy.abs', 'np.abs', (['(w - [0.44183275, 0.0])'], {}), '(w - [0.44183275, 0.0])\n', (2705, 2728), True, 'import numpy as np\n'), ((5775, 5809), 'numpy.abs', 'np.abs', (['(alphas_true - final_alphas)'], {}), '(alphas_true - final_alphas)\n', (5781, 5809), True, 'import numpy as np\n'), ((5846, 5878), 'numpy.abs', 'np.abs', (['(betas_true - final_betas)'], {}), '(betas_true - final_betas)\n', (5852, 5878), True, 'import numpy as np\n'), ((6779, 6813), 'numpy.abs', 'np.abs', (['(alphas_true - final_alphas)'], {}), '(alphas_true - final_alphas)\n', (6785, 6813), True, 'import numpy as np\n'), ((6850, 6882), 'numpy.abs', 'np.abs', (['(betas_true - final_betas)'], {}), '(betas_true - final_betas)\n', (6856, 6882), True, 'import numpy as np\n'), ((1846, 1897), 'numpy.array', 'np.array', (['[-1.8508832, 0.7326526]'], {'dtype': 'np.float64'}), '([-1.8508832, 0.7326526], dtype=np.float64)\n', (1854, 1897), True, 'import numpy as np\n'), ((1950, 1972), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (1958, 1972), True, 'import numpy as np\n')]
# -*— coding: utf-8 -*- import numpy as np import operator ''' 函数说明:knn算法,分类器 parameters: inX - 用于分类的数据(测试集) dataset - 用于训练的数据(训练集) labels - 分类标签 k - knn算法参数, 选择距离最小的k个点 returns: sortedClassCount[0][0] - 分类结果 ''' def classfy0(inX, dataSet, labels, k): # numpy函数shape[0]返回dataSet的行数 dataSetSize = dataSet.shape[0] # 在列向量方向上重复inX共一次(横向), 行向量方向上重复inX共dataSetSize次(纵向) diff = np.tile(inX,(dataSetSize,1)) - dataSet # 二维特征相减后平方 sqDiffMat = diff ** 2 # sum()所有元素相加, sum(0)列相加, sum(1)行相加 sqDistances = sqDiffMat.sum(axis=1) # 开方, 计算出距离 distances = sqDistances ** 0.5 # 返回distances中元素从小到大排序后的索引值 sortedDistIndices = distances.argsort() # 定一个记录类别次数的字典 classCount = {} for i in range(k): # 取出前k个元素的类别 voteLabel = labels[sortedDistIndices[i]] # 计算类别的次数 classCount[voteLabel] = classCount.get(voteLabel, 0) + 1 # key = operator.itemgetter(1)根据字典的值进行排序 # key = operator.itemgetter(0)根据字典的键进行排序 # reverse = True 降序排序字典 sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True) # 返回次数最多的类别, 即所要分类的类别 return sortedClassCount[0][0] def createDataset(): # 四组二位特征 group = np.array([[1,101], [5,89], [108,5], [115,8]]) # 四组特征标签 labels = ['爱情片', '爱情片', '动作片', '动作片'] return group, labels if __name__ == '__main__': # 创建数据集 group, labels = createDataset() # 测试集 test = [101, 20] # KNN分类 test_class = classfy0(test, group, labels, 3) # 打印分类结果 # print("test_class[0]: ",test_class[0]) # print("test_class[0][0]: ",test_class[0][0]) # print("test_class[0][1]: ",test_class[0][1]) print(test_class) """ 总结 KNN算法的优缺点: 优点: 简单好用, 容易理解, 精度高, 理论成熟, 既可以用来做分类也可以用来做回归 可用于数值型数据和离散型数据 训练时间复杂度为O(n), 无数据输入假定 对异常值不敏感 缺点: 计算复杂性高, 空间复杂性高 样本不平衡问题(有些类别的样本数量很多,而有些样本的数量很少) 一般数值很大的时候不用这个, 计算量太大,但是单个样本又不能太少,否则容易发生误分 最大缺点是无法给出数据的内在含义 """
[ "numpy.array", "numpy.tile", "operator.itemgetter" ]
[((1286, 1335), 'numpy.array', 'np.array', (['[[1, 101], [5, 89], [108, 5], [115, 8]]'], {}), '([[1, 101], [5, 89], [108, 5], [115, 8]])\n', (1294, 1335), True, 'import numpy as np\n'), ((439, 469), 'numpy.tile', 'np.tile', (['inX', '(dataSetSize, 1)'], {}), '(inX, (dataSetSize, 1))\n', (446, 469), True, 'import numpy as np\n'), ((1133, 1155), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1152, 1155), False, 'import operator\n')]
# coding: utf-8 # In[1]: import sklearn import numpy as np from glob import glob from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.pipeline import Pipeline import string from os import listdir from nltk.corpus import stopwords from nltk.corpus import stopwords from pickle import dump from string import punctuation # In[2]: categories = ['positive', 'negative'] # In[3]: # save a dataset to file def save_dataset(dataset, filename): dump(dataset, open(filename, 'wb')) print('Saved: %s' % filename) # In[4]: # load doc into memory def load_doc(filename): # open the file as read only file = open(filename, 'r', encoding='utf-8') # read all text text = file.read() # close the file file.close() return text # In[5]: import csv stop_words = 'stopwords_bn.txt' text_data = [] with open(stop_words, 'r', encoding='utf-8') as temp_output_file: reader = csv.reader(temp_output_file, delimiter='\n') for row in reader: text_data.append(row) stop_word_list = [x[0] for x in text_data] # In[6]: import string # turn a doc into clean tokens def clean_doc(doc): # split into tokens by white space tokens = doc.split() # remove punctuation from each token table = str.maketrans('', '', string.punctuation) tokens = [w.translate(table) for w in tokens] # remove remaining tokens that are not alphabetic tokens = [word for word in tokens if word.isalpha()] # filter out stop words stop_words = stop_word_list tokens = [w for w in tokens if not w in stop_words] # filter out short tokens tokens = [word for word in tokens if len(word) > 1] tokens = ' '.join(tokens) return tokens # In[7]: def load_data_and_labels(positive_data_file, negative_data_file): # load the positive doc pos_doc = load_doc(positive_data_file) # clean doc positive_examples = clean_doc(pos_doc) # load the negative doc neg_doc = load_doc(negative_data_file) negative_examples = clean_doc(neg_doc) # Split by words x_text = positive_examples + negative_examples # Generate labels positive_labels = [[0] for _ in positive_examples] negative_labels = [[1] for _ in negative_examples] trainy = [0 for _ in positive_examples] + [1 for _ in negative_examples] y = np.concatenate([positive_labels, negative_labels], 0) return [x_text, trainy] # In[8]: print("Loading data...") #Training dataset positive_data_file = 'C:/Users/admin-karim/Downloads/tmp/train/bangla.pos' negative_data_file = 'C:/Users/admin-karim/Downloads/tmp/train/bangla.neg' trainX, trainY = load_data_and_labels(positive_data_file, negative_data_file) #Testing dataset positive_data_file = 'C:/Users/admin-karim/Downloads/tmp/test/bangla.pos' negative_data_file = 'C:/Users/admin-karim/Downloads/tmp/test/bangla.neg' testX, testY = load_data_and_labels(positive_data_file, negative_data_file) # In[9]: save_dataset([trainX, trainY], 'train.pkl') save_dataset([testX, testY], 'test.pkl') # In[10]: from numpy import array from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.utils.vis_utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout from keras.layers import Embedding from keras.layers.convolutional import Conv1D from keras.layers import LSTM from keras.layers.convolutional import MaxPooling1D from keras.layers.merge import concatenate from pickle import load # In[11]: # load a clean dataset def load_dataset(filename): return load(open(filename, 'rb')) # fit a tokenizer def create_tokenizer(lines): tokenizer = Tokenizer() tokenizer.fit_on_texts(lines) return tokenizer # calculate the maximum document length def max_length(lines): return len(lines) # encode a list of lines def encode_text(tokenizer, lines, length): # integer encode encoded = tokenizer.texts_to_sequences(lines) # pad encoded sequences padded = pad_sequences(encoded, maxlen = length, padding = 'post') return padded # In[35]: import gensim from gensim.models import Word2Vec from gensim.utils import simple_preprocess NUM_WORDS=20000 from gensim.models.keyedvectors import KeyedVectors word_vectors = Word2Vec.load('C:/Users/admin-karim/Desktop/BengWord2Vec/posts.bin') EMBEDDING_DIM=300 vocabulary_size = len(tokenizer.word_index) + 1 word_index = tokenizer.word_index embedding_matrix = np.zeros((vocabulary_size, EMBEDDING_DIM)) for word, i in word_index.items(): if i>=NUM_WORDS: continue try: embedding_vector = word_vectors[word] embedding_matrix[i] = embedding_vector except KeyError: embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),EMBEDDING_DIM) del(word_vectors) from keras.layers import Embedding embedding_layer = Embedding(vocabulary_size, EMBEDDING_DIM, weights=[embedding_matrix], trainable=True) # In[45]: # define the model def define_model(length, vocab_size): # channel 1 input1 = Input(shape=(length,)) embedding_layer_1 = embedding_layer(input1) conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(embedding_layer_1) drop1 = Dropout(0.5)(conv1) pool1 = MaxPooling1D(pool_size=2)(drop1) flat1 = Flatten()(pool1) # channel 2 input2 = Input(shape=(length,)) embedding_layer_2 = embedding_layer(input2) conv2 = Conv1D(filters=32, kernel_size=6, activation='relu')(embedding_layer_2) drop2 = Dropout(0.5)(conv2) pool2 = MaxPooling1D(pool_size=2)(drop2) flat2 = Flatten()(pool2) # channel 3 input3 = Input(shape=(length,)) embedding_layer_3 = embedding_layer(input3) conv3 = Conv1D(filters=32, kernel_size=8, activation='relu')(embedding_layer_3) drop3 = Dropout(0.5)(conv3) pool3 = MaxPooling1D(pool_size=2)(drop3) flat3 = Flatten()(pool3) # merge CNN_layer = concatenate([flat1, flat2, flat3]) # LSTM x = embedding_layer(input3) LSTM_layer = LSTM(128)(x) CNN_LSTM_layer = concatenate([LSTM_layer, CNN_layer]) # interpretation dense1 = Dense(10, activation='relu')(CNN_LSTM_layer) outputs = Dense(1, activation='sigmoid')(dense1) model = Model(inputs=[input1, input2, input3], outputs=outputs) # compile model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # summarize print(model.summary()) return model # In[46]: # load training dataset trainLines, trainLabels = load_dataset('train.pkl') # create tokenizer tokenizer = create_tokenizer(trainLines) # calculate max document length trainLength = 300 # calculate vocabulary size vocab_size = len(tokenizer.word_index) + 1 print('Max document length: %d' % trainLength) print('Vocabulary size: %d' % vocab_size) # encode data trainX = encode_text(tokenizer, trainLines, trainLength) print(trainX.shape) # define model model = define_model(trainLength, vocab_size) # fit model model.fit([trainX,trainX,trainX], array(trainLabels), epochs=1, batch_size=128) # save the model model.save('model.h5') # In[47]: import os os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/' plot_model(model, show_shapes=True, to_file='multichannel.png') # In[48]: from pickle import load from numpy import array from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import load_model testLines, testLabels = load_dataset('test.pkl') # create tokenizer tokenizer = create_tokenizer(testLines) # calculate max document length length = trainLength # calculate vocabulary size vocab_size = len(tokenizer.word_index) + 1 print('Max document length: %d' % length) print('Vocabulary size: %d' % vocab_size) # encode data testX = encode_text(tokenizer, testLines, length) print(testX.shape) # load the model model = load_model('model.h5') # evaluate model on test dataset dataset loss, acc = model.evaluate([testX,testX,testX],array(testLabels), verbose=0) print('Test accuracy: %f' % (acc*100))
[ "numpy.sqrt", "keras.utils.vis_utils.plot_model", "numpy.array", "keras.layers.Dense", "keras.preprocessing.sequence.pad_sequences", "keras.layers.merge.concatenate", "gensim.models.Word2Vec.load", "keras.layers.convolutional.Conv1D", "keras.layers.LSTM", "keras.models.Model", "numpy.concatenate...
[((4615, 4683), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""C:/Users/admin-karim/Desktop/BengWord2Vec/posts.bin"""'], {}), "('C:/Users/admin-karim/Desktop/BengWord2Vec/posts.bin')\n", (4628, 4683), False, 'from gensim.models import Word2Vec\n'), ((4804, 4846), 'numpy.zeros', 'np.zeros', (['(vocabulary_size, EMBEDDING_DIM)'], {}), '((vocabulary_size, EMBEDDING_DIM))\n', (4812, 4846), True, 'import numpy as np\n'), ((5193, 5282), 'keras.layers.Embedding', 'Embedding', (['vocabulary_size', 'EMBEDDING_DIM'], {'weights': '[embedding_matrix]', 'trainable': '(True)'}), '(vocabulary_size, EMBEDDING_DIM, weights=[embedding_matrix],\n trainable=True)\n', (5202, 5282), False, 'from keras.layers import Embedding\n'), ((7462, 7525), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'show_shapes': '(True)', 'to_file': '"""multichannel.png"""'}), "(model, show_shapes=True, to_file='multichannel.png')\n", (7472, 7525), False, 'from keras.utils.vis_utils import plot_model\n'), ((8158, 8180), 'keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (8168, 8180), False, 'from keras.models import load_model\n'), ((1179, 1223), 'csv.reader', 'csv.reader', (['temp_output_file'], {'delimiter': '"""\n"""'}), "(temp_output_file, delimiter='\\n')\n", (1189, 1223), False, 'import csv\n'), ((2586, 2639), 'numpy.concatenate', 'np.concatenate', (['[positive_labels, negative_labels]', '(0)'], {}), '([positive_labels, negative_labels], 0)\n', (2600, 2639), True, 'import numpy as np\n'), ((4036, 4047), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (4045, 4047), False, 'from keras.preprocessing.text import Tokenizer\n'), ((4351, 4404), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded'], {'maxlen': 'length', 'padding': '"""post"""'}), "(encoded, maxlen=length, padding='post')\n", (4364, 4404), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5373, 5395), 'keras.layers.Input', 'Input', ([], {'shape': '(length,)'}), '(shape=(length,))\n', (5378, 5395), False, 'from keras.layers import Input\n'), ((5647, 5669), 'keras.layers.Input', 'Input', ([], {'shape': '(length,)'}), '(shape=(length,))\n', (5652, 5669), False, 'from keras.layers import Input\n'), ((5921, 5943), 'keras.layers.Input', 'Input', ([], {'shape': '(length,)'}), '(shape=(length,))\n', (5926, 5943), False, 'from keras.layers import Input\n'), ((6194, 6228), 'keras.layers.merge.concatenate', 'concatenate', (['[flat1, flat2, flat3]'], {}), '([flat1, flat2, flat3])\n', (6205, 6228), False, 'from keras.layers.merge import concatenate\n'), ((6317, 6353), 'keras.layers.merge.concatenate', 'concatenate', (['[LSTM_layer, CNN_layer]'], {}), '([LSTM_layer, CNN_layer])\n', (6328, 6353), False, 'from keras.layers.merge import concatenate\n'), ((6491, 6546), 'keras.models.Model', 'Model', ([], {'inputs': '[input1, input2, input3]', 'outputs': 'outputs'}), '(inputs=[input1, input2, input3], outputs=outputs)\n', (6496, 6546), False, 'from keras.models import Model\n'), ((7272, 7290), 'numpy.array', 'array', (['trainLabels'], {}), '(trainLabels)\n', (7277, 7290), False, 'from numpy import array\n'), ((8271, 8288), 'numpy.array', 'array', (['testLabels'], {}), '(testLabels)\n', (8276, 8288), False, 'from numpy import array\n'), ((5450, 5502), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(4)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=4, activation='relu')\n", (5456, 5502), False, 'from keras.layers.convolutional import Conv1D\n'), ((5531, 5543), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5538, 5543), False, 'from keras.layers import Dropout\n'), ((5560, 5585), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (5572, 5585), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((5602, 5611), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5609, 5611), False, 'from keras.layers import Flatten\n'), ((5724, 5776), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(6)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=6, activation='relu')\n", (5730, 5776), False, 'from keras.layers.convolutional import Conv1D\n'), ((5805, 5817), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5812, 5817), False, 'from keras.layers import Dropout\n'), ((5834, 5859), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (5846, 5859), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((5876, 5885), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5883, 5885), False, 'from keras.layers import Flatten\n'), ((5998, 6050), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(8)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=8, activation='relu')\n", (6004, 6050), False, 'from keras.layers.convolutional import Conv1D\n'), ((6079, 6091), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (6086, 6091), False, 'from keras.layers import Dropout\n'), ((6108, 6133), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (6120, 6133), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((6150, 6159), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6157, 6159), False, 'from keras.layers import Flatten\n'), ((6285, 6294), 'keras.layers.LSTM', 'LSTM', (['(128)'], {}), '(128)\n', (6289, 6294), False, 'from keras.layers import LSTM\n'), ((6387, 6415), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (6392, 6415), False, 'from keras.layers import Dense\n'), ((6443, 6473), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6448, 6473), False, 'from keras.layers import Dense\n'), ((5091, 5104), 'numpy.sqrt', 'np.sqrt', (['(0.25)'], {}), '(0.25)\n', (5098, 5104), True, 'import numpy as np\n')]
import datetime as dt import hydra import logging import numpy as np import os import time import torch import torch.distributed as dist from hydra.utils import instantiate from ignite.contrib.handlers import ProgressBar from ignite.engine import Engine, Events from ignite.handlers import Checkpoint, TerminateOnNan from ignite.metrics import Metric, RunningAverage from omegaconf import DictConfig from timm.scheduler.scheduler import Scheduler from torch.utils.data import DistributedSampler from typing import Dict, List, Optional from data.sampler import CustomSampler from data.utils import create_train_loader, create_val_loader from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, \ setup_checkpoints, setup_ema from utils.typings import Batch, Device, FloatDict from utils.visualize import setup_visualizations Metrics = Dict[str, Metric] def humanize_time(timestamp: float) -> str: return dt.datetime.fromtimestamp(timestamp).strftime('%H:%M:%S') def on_epoch_start(engine: Engine): engine.state.t0 = time.time() engine.state.lr = 0.0 def log_iter(engine, pbar, interval_it=100, name="stage"): # type: (Engine, ProgressBar, Optional[int], Optional[str]) -> None epoch = engine.state.epoch iteration = engine.state.iteration metrics = engine.state.metrics stats = ", ".join(["%s: %.4f" % k_v for k_v in metrics.items()]) stats += ", lr: %.4f" % engine.state.lr t0 = engine.state.t0 t1 = time.time() it_time = (t1 - t0) / interval_it cur_time = humanize_time(t1) pbar.log_message("[{}][{:.3f} s] {} | ep: {:2d}, it: {:3d}, {}".format( cur_time, it_time, name, epoch, iteration, stats)) engine.state.t0 = t1 def log_epoch(engine: Engine, name="stage") -> None: epoch = engine.state.epoch metrics = engine.state.metrics stats = ", ".join(["%s: %.3f" % k_v for k_v in metrics.items()]) logging.info("{} | ep: {}, {}".format(name, epoch, stats)) def build_engine(loop_func, metrics=None): trainer = Engine(loop_func) if metrics: for name, metric in metrics.items(): metric.attach(trainer, name) return trainer def create_metrics(keys: List[str], device: Device = None) -> Metrics: def _out_transform(kek: str): return lambda out: out[kek] metrics = {key: RunningAverage(output_transform=_out_transform(key), device=device) for key in keys} return metrics def _upd_pbar_iter_from_cp(engine: Engine, pbar: ProgressBar) -> None: pbar.n = engine.state.iteration def run(conf: DictConfig, local_rank=0, distributed=False): epochs = conf.train.epochs epoch_length = conf.train.epoch_length torch.manual_seed(conf.seed) if distributed: rank = dist.get_rank() num_replicas = dist.get_world_size() torch.cuda.set_device(local_rank) else: rank = 0 num_replicas = 1 torch.cuda.set_device(conf.gpu) device = torch.device('cuda') loader_args = dict(mean=conf.data.mean, std=conf.data.std) master_node = rank == 0 if master_node: print(conf.pretty()) if num_replicas > 1: epoch_length = epoch_length // num_replicas loader_args["rank"] = rank loader_args["num_replicas"] = num_replicas train_dl = create_train_loader(conf.data.train, **loader_args) valid_dl = create_val_loader(conf.data.val, **loader_args) if epoch_length < 1: epoch_length = len(train_dl) model = instantiate(conf.model).to(device) model_ema, update_ema = setup_ema(conf, model, device=device, master_node=master_node) optim = build_optimizer(conf.optim, model) scheduler_kwargs = dict() if "schedule.OneCyclePolicy" in conf.lr_scheduler["class"]: scheduler_kwargs["cycle_steps"] = epoch_length lr_scheduler: Scheduler = instantiate(conf.lr_scheduler, optim, **scheduler_kwargs) use_amp = False if conf.use_apex: import apex from apex import amp logging.debug("Nvidia's Apex package is available") model, optim = amp.initialize(model, optim, **conf.amp) use_amp = True if master_node: logging.info("Using AMP with opt_level={}".format(conf.amp.opt_level)) else: apex, amp = None, None to_save = dict(model=model, optim=optim) if use_amp: to_save["amp"] = amp if model_ema is not None: to_save["model_ema"] = model_ema if master_node and conf.logging.model: logging.info(model) if distributed: sync_bn = conf.distributed.sync_bn if apex is not None: if sync_bn: model = apex.parallel.convert_syncbn_model(model) model = apex.parallel.distributed.DistributedDataParallel( model, delay_allreduce=True) else: if sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank, ], output_device=local_rank) upd_interval = conf.optim.step_interval ema_interval = conf.smoothing.interval_it * upd_interval clip_grad = conf.optim.clip_grad _handle_batch_train = build_process_batch_func(conf.data, stage="train", device=device) _handle_batch_val = build_process_batch_func(conf.data, stage="val", device=device) def _update(eng: Engine, batch: Batch) -> FloatDict: model.train() batch = _handle_batch_train(batch) losses: Dict = model(*batch) stats = {k: v.item() for k, v in losses.items()} loss = losses["loss"] del losses if use_amp: with amp.scale_loss(loss, optim) as scaled_loss: scaled_loss.backward() else: loss.backward() it = eng.state.iteration if not it % upd_interval: if clip_grad > 0: params = amp.master_params(optim) if use_amp else model.parameters() torch.nn.utils.clip_grad_norm_(params, clip_grad) optim.step() optim.zero_grad() lr_scheduler.step_update(it) if not it % ema_interval: update_ema() eng.state.lr = optim.param_groups[0]["lr"] return stats calc_map = conf.validate.calc_map min_score = conf.validate.get("min_score", -1) model_val = model if conf.train.skip and model_ema is not None: model_val = model_ema.to(device) def _validate(eng: Engine, batch: Batch) -> FloatDict: model_val.eval() images, targets = _handle_batch_val(batch) with torch.no_grad(): out: Dict = model_val(images, targets) pred_boxes = out.pop("detections") stats = {k: v.item() for k, v in out.items()} if calc_map: pred_boxes = pred_boxes.detach().cpu().numpy() true_boxes = targets['bbox'].cpu().numpy() img_scale = targets['img_scale'].cpu().numpy() # yxyx -> xyxy true_boxes = true_boxes[:, :, [1, 0, 3, 2]] # xyxy -> xywh true_boxes[:, :, [2, 3]] -= true_boxes[:, :, [0, 1]] # scale downsized boxes to match predictions on a full-sized image true_boxes *= img_scale[:, None, None] scores = [] for i in range(len(images)): mask = pred_boxes[i, :, 4] >= min_score s = calculate_image_precision(true_boxes[i], pred_boxes[i, mask, :4], thresholds=IOU_THRESHOLDS, form='coco') scores.append(s) stats['map'] = np.mean(scores) return stats train_metric_names = list(conf.logging.out.train) train_metrics = create_metrics(train_metric_names, device if distributed else None) val_metric_names = list(conf.logging.out.val) if calc_map: from utils.metric import calculate_image_precision, IOU_THRESHOLDS val_metric_names.append('map') val_metrics = create_metrics(val_metric_names, device if distributed else None) trainer = build_engine(_update, train_metrics) evaluator = build_engine(_validate, val_metrics) to_save['trainer'] = trainer every_iteration = Events.ITERATION_COMPLETED trainer.add_event_handler(every_iteration, TerminateOnNan()) if distributed: dist_bn = conf.distributed.dist_bn if dist_bn in ["reduce", "broadcast"]: from timm.utils import distribute_bn @trainer.on(Events.EPOCH_COMPLETED) def _distribute_bn_stats(eng: Engine): reduce = dist_bn == "reduce" if master_node: logging.info("Distributing BN stats...") distribute_bn(model, num_replicas, reduce) sampler = train_dl.sampler if isinstance(sampler, (CustomSampler, DistributedSampler)): @trainer.on(Events.EPOCH_STARTED) def _set_epoch(eng: Engine): sampler.set_epoch(eng.state.epoch - 1) @trainer.on(Events.EPOCH_COMPLETED) def _scheduler_step(eng: Engine): # it starts from 1, so we don't need to add 1 here ep = eng.state.epoch lr_scheduler.step(ep) cp = conf.checkpoints pbar, pbar_vis = None, None if master_node: log_interval = conf.logging.interval_it log_event = Events.ITERATION_COMPLETED(every=log_interval) pbar = ProgressBar(persist=False) pbar.attach(trainer, metric_names=train_metric_names) pbar.attach(evaluator, metric_names=val_metric_names) for engine, name in zip([trainer, evaluator], ['train', 'val']): engine.add_event_handler(Events.EPOCH_STARTED, on_epoch_start) engine.add_event_handler(log_event, log_iter, pbar, interval_it=log_interval, name=name) engine.add_event_handler(Events.EPOCH_COMPLETED, log_epoch, name=name) setup_checkpoints(trainer, to_save, epoch_length, conf) if 'load' in cp.keys() and cp.load is not None: if master_node: logging.info("Resume from a checkpoint: {}".format(cp.load)) trainer.add_event_handler(Events.STARTED, _upd_pbar_iter_from_cp, pbar) resume_from_checkpoint(to_save, cp, device=device) state = trainer.state # epoch counter start from 1 lr_scheduler.step(state.epoch - 1) state.max_epochs = epochs @trainer.on(Events.EPOCH_COMPLETED(every=conf.validate.interval_ep)) def _run_validation(eng: Engine): if distributed: torch.cuda.synchronize(device) evaluator.run(valid_dl) skip_train = conf.train.skip if master_node and conf.visualize.enabled: vis_eng = evaluator if skip_train else trainer setup_visualizations(vis_eng, model, valid_dl, device, conf, force_run=skip_train) try: if skip_train: evaluator.run(valid_dl) else: trainer.run(train_dl, max_epochs=epochs, epoch_length=epoch_length) except Exception as e: import traceback logging.error(traceback.format_exc()) for pb in [pbar, pbar_vis]: if pb is not None: pbar.close() @hydra.main(config_path="../config/train.yaml") def main(conf: DictConfig): env = os.environ.copy() world_size = int(env.get('WORLD_SIZE', -1)) local_rank = int(env.get('LOCAL_RANK', -1)) dist_conf: DictConfig = conf.distributed distributed = world_size > 1 and local_rank >= 0 if distributed: if not torch.cuda.is_available(): raise RuntimeError("Unable to find any CUDA device") torch.backends.cudnn.benchmark = True dist.init_process_group(dist_conf.backend, init_method=dist_conf.url) if local_rank == 0: print("\nDistributed setting:") print("\tbackend: {}".format(dist.get_backend())) print("\tworld size: {}".format(dist.get_world_size())) print("\trank: {}\n".format(dist.get_rank())) try: run(conf, local_rank, distributed) except KeyboardInterrupt: print("Shutting down...") except Exception as e: if distributed: dist.destroy_process_group() raise e if distributed: dist.destroy_process_group() if __name__ == '__main__': main()
[ "utils.train.resume_from_checkpoint", "apex.parallel.distributed.DistributedDataParallel", "apex.amp.scale_loss", "logging.debug", "torch.distributed.destroy_process_group", "torch.nn.utils.clip_grad_norm_", "ignite.engine.Engine", "torch.cuda.synchronize", "apex.amp.initialize", "torch.cuda.is_av...
[((11371, 11417), 'hydra.main', 'hydra.main', ([], {'config_path': '"""../config/train.yaml"""'}), "(config_path='../config/train.yaml')\n", (11381, 11417), False, 'import hydra\n'), ((1068, 1079), 'time.time', 'time.time', ([], {}), '()\n', (1077, 1079), False, 'import time\n'), ((1491, 1502), 'time.time', 'time.time', ([], {}), '()\n', (1500, 1502), False, 'import time\n'), ((2046, 2063), 'ignite.engine.Engine', 'Engine', (['loop_func'], {}), '(loop_func)\n', (2052, 2063), False, 'from ignite.engine import Engine, Events\n'), ((2752, 2780), 'torch.manual_seed', 'torch.manual_seed', (['conf.seed'], {}), '(conf.seed)\n', (2769, 2780), False, 'import torch\n'), ((3025, 3045), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3037, 3045), False, 'import torch\n'), ((3366, 3417), 'data.utils.create_train_loader', 'create_train_loader', (['conf.data.train'], {}), '(conf.data.train, **loader_args)\n', (3385, 3417), False, 'from data.utils import create_train_loader, create_val_loader\n'), ((3433, 3480), 'data.utils.create_val_loader', 'create_val_loader', (['conf.data.val'], {}), '(conf.data.val, **loader_args)\n', (3450, 3480), False, 'from data.utils import create_train_loader, create_val_loader\n'), ((3620, 3682), 'utils.train.setup_ema', 'setup_ema', (['conf', 'model'], {'device': 'device', 'master_node': 'master_node'}), '(conf, model, device=device, master_node=master_node)\n', (3629, 3682), False, 'from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, setup_checkpoints, setup_ema\n'), ((3695, 3729), 'utils.train.build_optimizer', 'build_optimizer', (['conf.optim', 'model'], {}), '(conf.optim, model)\n', (3710, 3729), False, 'from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, setup_checkpoints, setup_ema\n'), ((3910, 3967), 'hydra.utils.instantiate', 'instantiate', (['conf.lr_scheduler', 'optim'], {}), '(conf.lr_scheduler, optim, **scheduler_kwargs)\n', (3921, 3967), False, 'from hydra.utils import instantiate\n'), ((5313, 5378), 'utils.train.build_process_batch_func', 'build_process_batch_func', (['conf.data'], {'stage': '"""train"""', 'device': 'device'}), "(conf.data, stage='train', device=device)\n", (5337, 5378), False, 'from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, setup_checkpoints, setup_ema\n'), ((5403, 5466), 'utils.train.build_process_batch_func', 'build_process_batch_func', (['conf.data'], {'stage': '"""val"""', 'device': 'device'}), "(conf.data, stage='val', device=device)\n", (5427, 5466), False, 'from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, setup_checkpoints, setup_ema\n'), ((11456, 11473), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (11471, 11473), False, 'import os\n'), ((2817, 2832), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2830, 2832), True, 'import torch.distributed as dist\n'), ((2856, 2877), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2875, 2877), True, 'import torch.distributed as dist\n'), ((2886, 2919), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (2907, 2919), False, 'import torch\n'), ((2980, 3011), 'torch.cuda.set_device', 'torch.cuda.set_device', (['conf.gpu'], {}), '(conf.gpu)\n', (3001, 3011), False, 'import torch\n'), ((4068, 4119), 'logging.debug', 'logging.debug', (['"""Nvidia\'s Apex package is available"""'], {}), '("Nvidia\'s Apex package is available")\n', (4081, 4119), False, 'import logging\n'), ((4144, 4184), 'apex.amp.initialize', 'amp.initialize', (['model', 'optim'], {}), '(model, optim, **conf.amp)\n', (4158, 4184), False, 'from apex import amp\n'), ((4570, 4589), 'logging.info', 'logging.info', (['model'], {}), '(model)\n', (4582, 4589), False, 'import logging\n'), ((8489, 8505), 'ignite.handlers.TerminateOnNan', 'TerminateOnNan', ([], {}), '()\n', (8503, 8505), False, 'from ignite.handlers import Checkpoint, TerminateOnNan\n'), ((9536, 9582), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': 'log_interval'}), '(every=log_interval)\n', (9562, 9582), False, 'from ignite.engine import Engine, Events\n'), ((9598, 9624), 'ignite.contrib.handlers.ProgressBar', 'ProgressBar', ([], {'persist': '(False)'}), '(persist=False)\n', (9609, 9624), False, 'from ignite.contrib.handlers import ProgressBar\n'), ((10091, 10146), 'utils.train.setup_checkpoints', 'setup_checkpoints', (['trainer', 'to_save', 'epoch_length', 'conf'], {}), '(trainer, to_save, epoch_length, conf)\n', (10108, 10146), False, 'from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, setup_checkpoints, setup_ema\n'), ((10389, 10439), 'utils.train.resume_from_checkpoint', 'resume_from_checkpoint', (['to_save', 'cp'], {'device': 'device'}), '(to_save, cp, device=device)\n', (10411, 10439), False, 'from utils.train import build_optimizer, build_process_batch_func, resume_from_checkpoint, setup_checkpoints, setup_ema\n'), ((10601, 10656), 'ignite.engine.Events.EPOCH_COMPLETED', 'Events.EPOCH_COMPLETED', ([], {'every': 'conf.validate.interval_ep'}), '(every=conf.validate.interval_ep)\n', (10623, 10656), False, 'from ignite.engine import Engine, Events\n'), ((10939, 11026), 'utils.visualize.setup_visualizations', 'setup_visualizations', (['vis_eng', 'model', 'valid_dl', 'device', 'conf'], {'force_run': 'skip_train'}), '(vis_eng, model, valid_dl, device, conf, force_run=\n skip_train)\n', (10959, 11026), False, 'from utils.visualize import setup_visualizations\n'), ((11851, 11920), 'torch.distributed.init_process_group', 'dist.init_process_group', (['dist_conf.backend'], {'init_method': 'dist_conf.url'}), '(dist_conf.backend, init_method=dist_conf.url)\n', (11874, 11920), True, 'import torch.distributed as dist\n'), ((12435, 12463), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (12461, 12463), True, 'import torch.distributed as dist\n'), ((950, 986), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (975, 986), True, 'import datetime as dt\n'), ((3557, 3580), 'hydra.utils.instantiate', 'instantiate', (['conf.model'], {}), '(conf.model)\n', (3568, 3580), False, 'from hydra.utils import instantiate\n'), ((4793, 4871), 'apex.parallel.distributed.DistributedDataParallel', 'apex.parallel.distributed.DistributedDataParallel', (['model'], {'delay_allreduce': '(True)'}), '(model, delay_allreduce=True)\n', (4842, 4871), False, 'import apex\n'), ((5024, 5127), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[local_rank]', 'output_device': 'local_rank'}), '(model, device_ids=[local_rank],\n output_device=local_rank)\n', (5065, 5127), False, 'import torch\n'), ((6741, 6756), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6754, 6756), False, 'import torch\n'), ((7807, 7822), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (7814, 7822), True, 'import numpy as np\n'), ((10732, 10762), 'torch.cuda.synchronize', 'torch.cuda.synchronize', (['device'], {}), '(device)\n', (10754, 10762), False, 'import torch\n'), ((11704, 11729), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11727, 11729), False, 'import torch\n'), ((4731, 4772), 'apex.parallel.convert_syncbn_model', 'apex.parallel.convert_syncbn_model', (['model'], {}), '(model)\n', (4765, 4772), False, 'import apex\n'), ((4951, 5003), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['model'], {}), '(model)\n', (4996, 5003), False, 'import torch\n'), ((5771, 5798), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optim'], {}), '(loss, optim)\n', (5785, 5798), False, 'from apex import amp\n'), ((6095, 6144), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['params', 'clip_grad'], {}), '(params, clip_grad)\n', (6125, 6144), False, 'import torch\n'), ((7549, 7658), 'utils.metric.calculate_image_precision', 'calculate_image_precision', (['true_boxes[i]', 'pred_boxes[i, mask, :4]'], {'thresholds': 'IOU_THRESHOLDS', 'form': '"""coco"""'}), "(true_boxes[i], pred_boxes[i, mask, :4],\n thresholds=IOU_THRESHOLDS, form='coco')\n", (7574, 7658), False, 'from utils.metric import calculate_image_precision, IOU_THRESHOLDS\n'), ((8921, 8963), 'timm.utils.distribute_bn', 'distribute_bn', (['model', 'num_replicas', 'reduce'], {}), '(model, num_replicas, reduce)\n', (8934, 8963), False, 'from timm.utils import distribute_bn\n'), ((11259, 11281), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11279, 11281), False, 'import traceback\n'), ((12361, 12389), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (12387, 12389), True, 'import torch.distributed as dist\n'), ((6019, 6043), 'apex.amp.master_params', 'amp.master_params', (['optim'], {}), '(optim)\n', (6036, 6043), False, 'from apex import amp\n'), ((8864, 8904), 'logging.info', 'logging.info', (['"""Distributing BN stats..."""'], {}), "('Distributing BN stats...')\n", (8876, 8904), False, 'import logging\n'), ((12034, 12052), 'torch.distributed.get_backend', 'dist.get_backend', ([], {}), '()\n', (12050, 12052), True, 'import torch.distributed as dist\n'), ((12099, 12120), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (12118, 12120), True, 'import torch.distributed as dist\n'), ((12163, 12178), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12176, 12178), True, 'import torch.distributed as dist\n')]
""" evaluation.py ------------- This module provides classes and functions for evaluating a model. By: <NAME>, Ph.D., 2018 """ # Compatibility imports from __future__ import absolute_import, division, print_function # 3rd party imports import numpy as np import itertools import matplotlib.pylab as plt from sklearn.metrics import confusion_matrix from matplotlib.colors import LinearSegmentedColormap def plot_confusion_matrix(y_true, y_pred, classes, figure_size=(8, 8)): """This function plots a confusion matrix.""" # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] * 100 # Build Laussen Labs colormap cmap = LinearSegmentedColormap.from_list('laussen_labs_green', ['w', '#43BB9B'], N=256) # Setup plot plt.figure(figsize=figure_size) # Plot confusion matrix plt.imshow(cm, interpolation='nearest', cmap=cmap) # Modify axes tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) thresh = cm.max() / 1.5 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, str(np.round(cm[i, j], 2)) + ' %', horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", fontsize=20) plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.tight_layout() plt.ylabel('True Label', fontsize=25) plt.xlabel('Predicted Label', fontsize=25) plt.show()
[ "matplotlib.pylab.xticks", "matplotlib.pylab.ylabel", "matplotlib.pylab.figure", "matplotlib.pylab.tight_layout", "matplotlib.pylab.xlabel", "matplotlib.pylab.imshow", "matplotlib.pylab.show", "matplotlib.pylab.yticks", "matplotlib.colors.LinearSegmentedColormap.from_list", "numpy.round", "sklea...
[((568, 600), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (584, 600), False, 'from sklearn.metrics import confusion_matrix\n'), ((713, 798), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""laussen_labs_green"""', "['w', '#43BB9B']"], {'N': '(256)'}), "('laussen_labs_green', ['w', '#43BB9B'], N=256\n )\n", (746, 798), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((816, 847), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': 'figure_size'}), '(figsize=figure_size)\n', (826, 847), True, 'import matplotlib.pylab as plt\n'), ((881, 931), 'matplotlib.pylab.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (891, 931), True, 'import matplotlib.pylab as plt\n'), ((996, 1040), 'matplotlib.pylab.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(90)'}), '(tick_marks, classes, rotation=90)\n', (1006, 1040), True, 'import matplotlib.pylab as plt\n'), ((1045, 1076), 'matplotlib.pylab.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (1055, 1076), True, 'import matplotlib.pylab as plt\n'), ((1351, 1374), 'matplotlib.pylab.xticks', 'plt.xticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (1361, 1374), True, 'import matplotlib.pylab as plt\n'), ((1379, 1402), 'matplotlib.pylab.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (1389, 1402), True, 'import matplotlib.pylab as plt\n'), ((1407, 1425), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1423, 1425), True, 'import matplotlib.pylab as plt\n'), ((1430, 1467), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""True Label"""'], {'fontsize': '(25)'}), "('True Label', fontsize=25)\n", (1440, 1467), True, 'import matplotlib.pylab as plt\n'), ((1472, 1514), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Predicted Label"""'], {'fontsize': '(25)'}), "('Predicted Label', fontsize=25)\n", (1482, 1514), True, 'import matplotlib.pylab as plt\n'), ((1520, 1530), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1528, 1530), True, 'import matplotlib.pylab as plt\n'), ((1207, 1228), 'numpy.round', 'np.round', (['cm[i, j]', '(2)'], {}), '(cm[i, j], 2)\n', (1215, 1228), True, 'import numpy as np\n')]
#!/usr/bin/env python ''' Filename: test_equally_space.py Description: unit tests to test equally_space_old.py ''' __author__ = '<NAME>' __email__ = '<EMAIL>' __status__ = 'prototype' # standard imports import unittest from equally_space import * import matplotlib.pyplot as plt import numpy as np class TestEquallySpace(unittest.TestCase): def setUp(self): pass def _make_test_array(self, N, tot_dist): # generate N test points randomly spaced between 0 to tot_dist test_array = sorted(np.random.random(N) * tot_dist) # set endpoints to be 0 and tot dist test_array[0], test_array[-1] = 0, tot_dist return test_array def assert_arrays_almost_equal(self, a1, a2): # works on two numpy arrays of 1 or 2 dimensions t1 = a1 - a2 if isinstance(sum(t1), float): self.assertAlmostEqual(sum(t1), 0.0) self.assertAlmostEqual(max(t1), 0.0) self.assertAlmostEqual(min(t1), 0.0) else: for i in t1: self.assertAlmostEqual(sum(i), 0.0) self.assertAlmostEqual(max(i), 0.0) self.assertAlmostEqual(min(i), 0.0) def test_matrix_conversion(self): # create numpy arrays x = np.arange(12).reshape(3, 4) y = np.arange(100, 112).reshape(3, 4) # convert back and forth twice spines = spine_matricies_to_points(x, y) x1, y1 = create_spine_matricies(spines) spines2 = spine_matricies_to_points(x1, y1) self.assert_arrays_almost_equal(x1, x) self.assert_arrays_almost_equal(y1, y) for spine1, spine2 in izip(spines, spines2): for pt1, pt2 in izip(spine1, spine2): self.assertAlmostEqual(pt1, pt2) def test_equally_space_N(self, show_plot=False): N = 10 tot_dist = 10 test_points = self._make_test_array(N, tot_dist) sp1, sp2 = equally_space_N_xy_points(test_points, test_points, N=N) solution_points = np.linspace(0, tot_dist, N) if show_plot: plt.plot(test_points, lw=0.5, marker='o', label='random') plt.plot(sp1, lw=0.5, marker='o', label='spaced1') plt.plot(sp2, lw=0.5, marker='o', label='spaced2') plt.plot(solution_points, lw=0.5, label='solution') plt.legend() plt.show() self.assert_arrays_almost_equal(sp1, solution_points) self.assert_arrays_almost_equal(sp2, solution_points) # TODO: # 1. pair this with test_equally_space_N. # equally_space_xy_for_stepsize(x, y, step=0.5, kind='linear', n_interp_pts=50) # 2. # def set_matrix_orientation(x_mat, y_mat, verbose=True) # 3. # smooth_matricies_cols(x_mat, y_mat, window, order) # smooth_matricies_rows(x_mat, y_mat, window, order) # 4. # equally_space_matrix_distances(x_mat, y_mat) # equally_space_matricies_times(eq_times, orig_times, x_mat, y_mat) if __name__ == '__main__': unittest.main()
[ "numpy.arange", "numpy.random.random", "matplotlib.pyplot.plot", "numpy.linspace", "unittest.main", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((3034, 3049), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3047, 3049), False, 'import unittest\n'), ((2061, 2088), 'numpy.linspace', 'np.linspace', (['(0)', 'tot_dist', 'N'], {}), '(0, tot_dist, N)\n', (2072, 2088), True, 'import numpy as np\n'), ((2123, 2180), 'matplotlib.pyplot.plot', 'plt.plot', (['test_points'], {'lw': '(0.5)', 'marker': '"""o"""', 'label': '"""random"""'}), "(test_points, lw=0.5, marker='o', label='random')\n", (2131, 2180), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2243), 'matplotlib.pyplot.plot', 'plt.plot', (['sp1'], {'lw': '(0.5)', 'marker': '"""o"""', 'label': '"""spaced1"""'}), "(sp1, lw=0.5, marker='o', label='spaced1')\n", (2201, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2306), 'matplotlib.pyplot.plot', 'plt.plot', (['sp2'], {'lw': '(0.5)', 'marker': '"""o"""', 'label': '"""spaced2"""'}), "(sp2, lw=0.5, marker='o', label='spaced2')\n", (2264, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2319, 2370), 'matplotlib.pyplot.plot', 'plt.plot', (['solution_points'], {'lw': '(0.5)', 'label': '"""solution"""'}), "(solution_points, lw=0.5, label='solution')\n", (2327, 2370), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2395), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2393, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2418), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2416, 2418), True, 'import matplotlib.pyplot as plt\n'), ((533, 552), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (549, 552), True, 'import numpy as np\n'), ((1295, 1308), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (1304, 1308), True, 'import numpy as np\n'), ((1335, 1354), 'numpy.arange', 'np.arange', (['(100)', '(112)'], {}), '(100, 112)\n', (1344, 1354), True, 'import numpy as np\n')]
"""Module for interfacing with power factory.""" import os import itertools import numpy as np import pandas as pd import powerfactory as pf from sinfactory.line import Line from sinfactory.generator import Generator from sinfactory.load import Load from sinfactory.area import Area from sinfactory.bus import Bus from sinfactory.eigenresults import EigenValueResults from sinfactory.pfresults import PFResults class PFactoryGrid(object): """Class for interfacing with powerfactory.""" def __init__(self, project_name): """Class constructor.""" # Start PowerFactory. self.app = ( pf.GetApplication() ) # powerfactory.application object created and returned if self.app is None: raise RuntimeError("Failed to load powerfactory.") # Activate project. self.project = self.app.ActivateProject(project_name) if self.project is None: raise RuntimeError("No project activated.") # Get the output window self.window = self.app.GetOutputWindow() # Get the load flow object self.ldf = self.app.GetFromStudyCase("ComLdf") self.lines = {line.cDisplayName: Line(line) for line in self.app.GetCalcRelevantObjects("*.ElmLne")} self.gens = {gen.cDisplayName: Generator(gen) for gen in self.app.GetCalcRelevantObjects("*.ElmSym")} self.loads = {load.cDisplayName: Load(load) for load in self.app.GetCalcRelevantObjects("*.ElmLod")} self.areas = {area.GetFullName( ).split("\\")[-1].split(".")[0]: Area(area) for area in self.app.GetCalcRelevantObjects("*.ElmArea")} self.buses = {bus.cDisplayName: Bus(bus) for bus in self.app.GetCalcRelevantObjects("*.ElmTerm")} def activate_study_case(self, study_case_name, folder_name=""): """Activate study case.""" study_case_folder = self.app.GetProjectFolder("study") study_case_file = study_case_name + ".IntCase" self.study_case = study_case_folder.GetContents(study_case_file)[0] self.study_case.Activate() def prepare_dynamic_sim( self, sim_type="rms", variables={}, start_time=0.0, step_size=0.01, end_time=10.0, ): """Method for calculating dynamic simulation initial conditions. Method that sets relevant parameters for calculating the initial conditions for the dynamic simulation and calculates them. It also determines which variables to monitor Args: variables (Dict): A dictionary containing the keys are the elements to be monitored and the data is a list of variables to monitor. sim_type (str): The simulation type it can be either rms for rms simulation and ins for EMT the default is rms. start_time (float): The starting time for the simulation the default is 0.0 step_size (float): The time step used for the simulation. The default is 0.01 end_time: The end time for the simulation. The default is 10.0 Returns: True if all initial conditions are verified. False otherwise. """ self.variables = variables # Get result file. self.res = self.app.GetFromStudyCase("*.ElmRes") # Select result variable to monitor. for elm_name, var_names in variables.items(): # Get all elements that match elm_name elements = self.app.GetCalcRelevantObjects(elm_name) # Select variables to monitor for each element for element in elements: self.res.AddVars(element, *var_names) # Retrieve initial conditions and time domain simulation object self.inc = self.app.GetFromStudyCase("ComInc") self.sim = self.app.GetFromStudyCase("ComSim") # Set simulation type self.inc.iopt_sim = sim_type # Set time options self.inc.tstart = start_time self.inc.dtgrid = step_size self.sim.tstop = end_time # Verify initial conditions self.inc.iopt_show = True # Calculate initial conditions self.inc.Execute() return self.inc.ZeroDerivative() def initialize_and_run_dynamic_sim( self, var_machines=("m:u:bus1", "m:P:bus1", "s:outofstep", "s:firel"), var_loads=("m:u:bus1", "m:P:bus1"), var_lines=("m:u:bus1", "c:loading"), var_buses=("m:u", "b:ipat"), sim_time=10.0, sim_type='rms' ): """ Initialize and run dynamic simulation. Saving result file as attribute. Args: var_names: Variables to track. sim_type: Type of dynamic simulation - 'rms' or 'emt' """ variables = self.generate_variables( var_machines=var_machines, var_loads=var_loads, var_lines=var_lines, var_buses=var_buses, ) self.prepare_dynamic_sim(sim_type=sim_type, variables=variables, end_time=sim_time) self.run_dynamic_sim() self.result = self.get_results(variables=variables) def run_dynamic_sim(self): """Run dynamic simulation. Returns: bool: False for success, True otherwise. """ return bool(self.sim.Execute()) def write_results_to_file(self, variables, filepath): """ Writes results to csv-file. Args: variables (dict): maps pf-object to list of variables. filepath (string): filename for the temporary csv-file """ self.ComRes = self.app.GetFromStudyCase("ComRes") self.ComRes.head = [] # Header of the file self.ComRes.col_Sep = "," # Column separator self.ComRes.dec_Sep = "." # Decimal separator self.ComRes.iopt_exp = 6 # Export type (csv) self.ComRes.iopt_csel = 1 # Export only user defined vars self.ComRes.ciopt_head = 1 # Use parameter names for variables self.ComRes.iopt_sep = 0 # Don't use system separators self.ComRes.f_name = filepath # Adding time as first column resultobj = [self.res] elements = [self.res] cvariable = ["b:tnow"] self.ComRes.head = [] # Defining all other results for elm_name, var_names in variables.items(): for element in self.app.GetCalcRelevantObjects(elm_name): full_name = element.GetFullName() split_name = full_name.split("\\") full_name_reduced = [] for dir in split_name[:-1]: full_name_reduced.append(dir.split(".")[0]) full_name_reduced.append(split_name[-1]) full_name_reduced = "\\".join(full_name_reduced) if not ((elm_name in full_name) or (elm_name in full_name_reduced)): continue for variable in var_names: self.ComRes.head.append(elm_name + "\\" + variable) elements.append(element) cvariable.append(variable) resultobj.append(self.res) self.ComRes.variable = cvariable self.ComRes.resultobj = resultobj self.ComRes.element = elements self.ComRes.ExportFullRange() def get_results(self, variables=None, filepath="results.csv"): """ Writes simulation results to csv-file and re-import to dataframe. Args: variables (dict): maps pf-object to list of variables. filepath (string): filename for the temporary csv-file Returns: dataframe: two-level dataframe with simulation results """ if not variables and hasattr(self, "variables"): variables = self.variables self.write_results_to_file(variables, filepath) res = pd.read_csv(filepath, sep=",", decimal=".", header=[0, 1], index_col=0) # res.dropna(how='any') res = res.apply(pd.to_numeric, errors="coerce").astype(float) res.rename( {i: i.split(":")[1].split(" in ")[0] for i in res.columns.levels[1]}, axis=1, level=1, inplace=True, ) res.columns.rename(("unit", "variable"), level=[0, 1], inplace=True) res.index.name = "time" return res def generate_variables( self, var_machines=("m:u:bus1", "m:P:bus1", "s:outofstep", "s:firel"), var_loads=("m:u:bus1", "m:P:bus1"), var_lines=("m:u:bus1", "c:loading"), var_buses=("m:u", "b:ipat"), ): """ Generate dictionary with variables for all machines Args: var_names Returns: Dictionary with all machines with all input variables """ output = {} for name, gen in self.gens.items(): if gen.in_service: output[name + ".ElmSym"] = list(var_machines) for name in self.loads.keys(): output[name + ".ElmLod"] = list(var_loads) for name in self.lines.keys(): output[name + ".ElmLne"] = list(var_lines) for name in self.buses.keys(): output[name + ".ElmTerm"] = list(var_buses) return output def check_islands(self): """ Check existence of islands. Returns true if there is islands and false if not """ var = "ipat" island_var = [] for bus in self.buses.keys(): isolated_area_result = self.result.loc[1:1000, (bus, var)].values end_val = len(isolated_area_result) - 1 island_var.append(isolated_area_result[end_val]) return max(island_var) def get_island_elements(self, islands): """ Return list of elemnts of the islands. Args: islands: number of islands Returns 2-D array where each island corresponds to a row which contains its elements """ var = "ipat" element_list = [] counter = 0 while islands - counter > 0: element_list.append([]) counter += 1 for elm in self.buses.keys(): isolated_area_result = self.result.loc[:, (elm, var)].values end_val = len(isolated_area_result) - 1 element_list[int(isolated_area_result[end_val]) - 1].append(elm) return element_list def get_init_value(self, feature_name, loads, machines, tripped_lines, dynamic=False): """ Generate and return intial value of a feature. Args: feature_name: name of feature loads: loads in an island machines: machines in an island tripped_lines: tripped lines dynamic: dynamic simulation or static (default) Returns: value: value of selected feature """ value = -1 if feature_name == "COI angle": if dynamic: init_ang = self.get_initial_rotor_angles( machine_names=machines) num = 0 denum = 0 for i, m in enumerate(machines): num += ( self.get_inertia(m) * self.get_number_of_parallell(m) * init_ang[i] ) denum += self.get_inertia( m) * self.get_number_of_parallell(m) value = num / denum else: init_ang = self.get_rotor_angles_static(machine_names=machines) num = 0 denum = 0 for i, m in enumerate(machines): num += ( self.get_inertia( m) * self.get_number_of_parallell(m) * init_ang[i] ) denum += self.get_inertia( m) * self.get_number_of_parallell(m) value = num / denum elif feature_name == "Production": value = 0 if dynamic: for machine in machines: production = self.get_active_power(machine) value += production[0] else: for machine in machines: machine_obj = self.app.GetCalcRelevantObjects( machine+".ElmSym") value += machine_obj.pgini elif feature_name == "Net flow": net_flow = 0 for line in tripped_lines: net_flow += self.get_branch_flow(line) value = net_flow elif feature_name == "Max flow": max_flow = 0 for line in tripped_lines: flow = self.get_branch_flow(line) if flow > max_flow: max_flow = flow value = max_flow elif feature_name == "Load": value = 0 if dynamic: for load in loads: consumption = self.get_active_power(load) value += consumption[0] else: for load in loads: load_obj = self.app.GetCalcRelevantObjects( machine+".ElmLod") value += load_obj.plini elif feature_name == "Inertia": value = 0 for machine in machines: value += self.get_inertia( machine) * self.get_number_of_parallell(machine) elif feature_name == "Clearing time": print("Clearing time: NotImplementedError") return value def find_connected_element(self, elm_name, elm_type): """ Find connected elements of elm_type connected to an elm_name Args: elm_name: element that is relevant to find its connected element elm_type: type of element which is wanted to find Returns: connected_element: name of connected element of elm_type """ elm = self.app.GetCalcRelevantObjects(elm_name + ".*")[0] cubicles = elm.GetCalcRelevantCubicles() for cubicle in cubicles: connected_element = cubicle.obj_id.loc_name try: load = self.app.GetCalcRelevantObjects( connected_element + elm_type)[0] except: load = None if load is not None: return connected_element def pole_slip(self, machine_name): """ Check if there has been a pole slip at any active machines Args: machine_name: name of machine Returns: true if there has been a pole slip at machine """ var = "outofstep" pole_var = self.result.loc[:, (machine_name, var)].values pole_slip = False if np.count_nonzero(pole_var) > 0: pole_slip = True return pole_slip def get_rotor_angles_static(self, machine_names=None): """ Get relative rotor angles from load flow simulations Returns: Initial relative rotor angles for all machines """ if machine_names is None: machines = self.app.GetCalcRelevantObjects("*.ElmSym") else: machines = [] for machine_name in machine_names: machine_object = self.app.GetCalcRelevantObjects( machine_name + ".ElmSym" ) machines.append(machine_object[0]) rotor_ang = [] phi_ref = 0 for m in machines: if self.check_if_in_service(m.loc_name): u_t = m.GetAttribute("n:u1:bus1") i_t = m.GetAttribute("m:i1:bus1") r_stator = m.typ_id.rstr x_q = m.typ_id.xq phi = np.arctan(u_t + i_t*(r_stator+x_q))*180/np.pi - 90 if self.is_ref(m.loc_name): rotor_ang.append(0) phi_ref = phi else: rotor_ang.append(phi-phi_ref-m.GetAttribute( "n:phiurel:bus1")) return rotor_ang def get_initial_rotor_angles(self): """ Get initial relative rotor angles Returns: Initial relative rotor angles for all machines """ var = "firel" initial_ang = [] for name, gen in self.gens.items(): if gen.in_service: pole_slip = self.result.loc[ 0, (name, "outofstep") ] # always float angle = self.result.loc[0, (name, var)] # .values if type(angle) != type(pole_slip): angle = angle.replace(",", ".") angle = float(angle) initial_ang.append(angle) else: initial_ang.append(0) return initial_ang # TODO, this mehtod should be generalised and a test made def get_generator_voltage_angles(self, machine_names=None): """ Get machine voltage angles Returns: Voltage angles for all machines """ if machine_names is None: machines = self.app.GetCalcRelevantObjects("*.ElmSym") else: machines = [] for machine_name in machine_names: machine_object = self.app.GetCalcRelevantObjects( machine_name + ".ElmSym" ) machines.append(machine_object[0]) initial_ang = [] for m in machines: if self.check_if_in_service(m.loc_name): initial_ang.append(m.GetAttribute("n:phiurel:bus1")) else: initial_ang.append(0) return initial_ang def get_machines_inertia_list(self): """ Function to get array of all machines inertias,'M', corresponding to 2HS/omega_0. Returns: List with machine name and corresponding inertia """ # generator types (ed up with H array) omega_0 = 50 machine_list = self.app.GetCalcRelevantObjects("*.ElmSym") machine_type = [] machine_name = [] # Identify the machine type # (GENSAL - salient pole, or GENROU - round pole) for machine in machine_list: machine_type.append(machine.typ_id) machine_name.append(machine.loc_name) inertias = [] for machine in machine_type: inertias.append(2 * machine.sgn * machine.h / omega_0) inertia_list = np.column_stack([machine_name, inertias]) return inertia_list def create_short_circuit(self, target, time, name): """Create a three phase short circuit. Args: target: Component to short. time: Start time of the short circuit. name: Name of the event. """ # Get the event folder evt_folder = self.app.GetFromStudyCase("IntEvt") # Get event name of events in folder events = [i.loc_name for i in evt_folder.GetContents("*.EvtShc")] # Delete existing events with the same name if name in events: self.delete_short_circuit(name) # Create an empty short circuit event evt_folder.CreateObject("EvtShc", name) # Get the empty short circuit event sc = evt_folder.GetContents(name + ".EvtShc")[0] # Set time, target and type of short circuit sc.time = time sc.p_target = target.pf_object sc.i_shc = 0 def delete_short_circuit(self, name): """Delete a short circuit event. Args: name: Name of the event. """ # Get the event folder evt_folder = self.app.GetFromStudyCase("IntEvt") # Find the short circuit and clear event to delete sc = evt_folder.GetContents(name + ".EvtShc") scc = evt_folder.GetContents(name + "_clear" + ".EvtShc") if sc: sc[0].Delete() if scc: scc[0].Delete() def create_switch_event(self, target, time, name=None): """Create a switching event. Args: target: Component to switch. time: When to switch name: Name of the event. comp: Object to create the event for """ if not name: name = target.name + "_switch" # Get the event folder evt_folder = self.app.GetFromStudyCase("IntEvt") # Get event name of events in folder events = [i.loc_name for i in evt_folder.GetContents("*.EvtSwitch")] # Delete existing events with the same name if name in events: self.delete_switch_event(name) # Create an empty switch event evt_folder.CreateObject("EvtSwitch", name) # Get the empty switch event sw = evt_folder.GetContents(name + ".EvtSwitch")[0] # Set time, target and type of short circuit sw.time = time sw.p_target = target.pf_object def delete_switch_event(self, name): """Delete a switch event. Args: name: Name of the event. """ # Get the event folder evt_folder = self.app.GetFromStudyCase("IntEvt") # Find the switch event and clear event to delete sw = evt_folder.GetContents(name + ".EvtSwitch") sww = evt_folder.GetContents(name + "_clear" + ".EvtSwitch") if sw: sw[0].Delete() if sww: sww[0].Delete() def clear_all_events(self): # Get the event folder evt_folder = self.app.GetFromStudyCase("IntEvt") # Get a list of all events events = evt_folder.GetContents("*") # Loop through all events and use the correct delete function for e in events: evt_name = e.loc_name evt_class = e.GetClassName() if evt_class == "EvtSwitch": self.delete_short_circuit(evt_name) elif evt_class == "EvtShc": if evt_name.split("-")[0] == "trip": self.delete_trip_line_event(evt_name) else: self.delete_switch_event(evt_name) def get_events(self): """ Return a list of events """ evt_folder = self.app.GetFromStudyCase("IntEvt") events = [i.loc_name for i in evt_folder.GetContents()] return events def get_output_window_content(self): """Returns the messages from the power factory output window.""" return self.window.GetContent() def clear_output_window(self): """Clears the output window.""" self.window.Clear() def run_load_flow(self, balanced=0, power_control=0, slack=0): """Method for running a load flow. Args: balanced: 0: Three phase balanced load flow. 1: Three phase unbalanced load flow. 2: DC load flow. power_control: 0: As dispatched 1: According to secondary control 2: According to primary control 3: According to inertias slack: This is only relevant if power_control is 0 0: By reference machine 1: By load at reference bus 2: By static generator at reference bus 3: By loads 4: By synchronous generators 5: By synchronous generators and static generators """ self.ldf.ipot_net = balanced self.ldf.iopt_aptdist = power_control self.ldf.iPbalancing = slack return self.ldf.Execute() def set_element_OPF_attr( self, attr, element_type, relative_attr={"Pmin_uc": "P_max", "Pmax_uc": "P_max"} ): """ Set attributes of element in optimal power flow Args: attribute (str) element_type (str) e.g. *.ElmSym for all generators """ for elm in self.app.GetCalcRelevantObjects(element_type): for k, v in attr.items(): if k in relative_attr.keys(): base_val = getattr(elm, relative_attr[k]) v_mod = np.array(v) * base_val setattr(elm, k, v_mod.tolist()) else: setattr(elm, k, v) def set_generator_OPF_cost(self, cost_dict): """ Set generator cost attributes for optimal power flow Args: cost_segments: double dict key 1: generator names, dict 2: ccost: list of segment cost_data cpower: list of segment power iInterPol: int 0: spline 1: piecewiselinear 2: polynomial 3: hermine penaltyCost: float fixedCost: float """ for cf, cost_data in cost_dict.items(): if len(cost_data["ccost"]) != len(cost_data["cpower"]): print("Number of segments for cost and power is not equal!") gen_set = cost_data["generators"] for gen_name in gen_set: relative_attr = ["ccost", "cpower"] gen = self.app.GetCalcRelevantObjects(gen_name + ".ElmSym")[0] for k, v in cost_data.items(): if k == "generators": continue if k in relative_attr: v_mod = np.array(v) * gen.P_max setattr(gen, k, v_mod.tolist()) continue setattr(gen, k, v) def run_OPF(self, power_flow=0, obj_function='cst', attributes={}): """Method for running optimal power flow Args: power_flow: 0: AC optimization (interior point method) 1: DC optimization (linear programming (LP)) 2: Contingency constrained DC optimization (LP)) obj_function: los: Minimization of losses (total) slo: Minimization of losses (selection) cst: Minimization of cost shd: Minimization of load shedding rpr: Maximization of reactive power reserve dev: Minimization of control variable deviations Kwargs: Controls (boolean): iopt_pd: Generator active power dispatch iopt_qd: Generator/SVS reactive power dispatch iopt_trf: Transformer tap positions iopt_sht: Switchable shunts iopt_genP: Active power limits of generators iopt_genQ: Reactive power limits of generators/SVS iopt_brnch: Branch flow limits (max. loading) iopt_bus: Voltage limits of busbars/terminals iopt_add: Boundary flow limits Soft constraints (boolean): penaltySoftConstr: Penalty factor for soft constraints (float) isForceSoftPLims: Enforce soft active power limits of generators isForceSoftQLims: Enforce soft reactive power limits of generators/SVS isForceSoftLoadingLims: Enforce soft branch flow limits (max. loading) isForceSoftVoltageLims: Enforce soft voltage limits of busbars/terminal """ if not hasattr(self, "opf"): self.opf = self.app.GetFromStudyCase("ComOpf") self.opf.ipopt_ACDC = power_flow self.opf.iopt_obj = obj_function for k, v in attributes.items(): setattr(self.opf, k, v) return self.opf.Execute() def get_OPF_results(self): opf_res = {} gens = self.app.GetCalcRelevantObjects("*.ElmSym") gen_var = ["c:avgCosts", "c:Pdisp", "c:cst_disp"] for gen in gens: gen_name = gen.GetFullName().split("\\")[-1].split(".")[0] opf_res[gen_name] = {i.split(":")[1]: gen.GetAttribute(i) for i in gen_var} loads = self.app.GetCalcRelevantObjects("*.ElmLod") load_var = ["m:P:bus1", "c:Pmism"] for load in loads: load_name = load.GetFullName().split("\\")[-1].split(".")[0] opf_res[load_name] = { i.split(":")[1]: load.GetAttribute(i) for i in load_var } lines = self.app.GetCalcRelevantObjects("*.ElmLne") line_var = ["m:P:bus1", "c:loading"] for line in lines: if not line.outserv: line_name = line.GetFullName().split('\\')[-1].split('.')[0] opf_res[line_name] = { i.split(':')[1]: line.GetAttribute(i) for i in line_var } grid = self.app.GetCalcRelevantObjects('*.ElmNet')[0] sys_var = ['c:cst_disp', 'c:LossP', 'c:LossQ', 'c:GenP', 'c:GenQ'] opf_res['system'] = {i.split(':')[1]: grid.GetAttribute(i) for i in sys_var} opf_res = pd.DataFrame(opf_res).unstack().dropna() return opf_res def calculate_isf(self, lines=None, delta_p=5, balanced=0, power_control=0, slack=0): """Method that calculates the injection shift factors for tie lines This method calculates the injection shift factors for tie lines given all generators. These factors can be used for redispatching generation to alleviate tie line overloads. The method can be extended to also consider changes in loads. The resulting matrix is an (m x n) matrix where m is the number of tie lines in the system and n is the number of generators. Args: lines: The lines to include in the ISF matrix. The default is all. delta_p: Amount of power to change on generator balanced: 0: Three phase balanced load flow. 1: Three phase unbalanced load flow. 2: DC load flow. power_control: 0: As dispatched 1: According to secondary control 2: According to primary control 3: According to inertias slack: This is only relevant if power_control is 0 0: By reference machine 1: By load at reference bus 2: By static generator at reference bus 3: By loads 4: By synchronous generators 5: By synchronous generators and static generators """ if not lines: lines = self.lines gens = self.gens.values() isf = np.zeros((len(lines), len(gens))) for idx, gen in enumerate(gens): # Run load flow before changing the power if self.run_load_flow(balanced, power_control, slack): raise RuntimeError("Power flow did not converge") # Get the load flow before changing power y_0 = [line.p for line in lines.values()] # Change flow and calculate ISF p = float(gen.p_set) gen.p_set = delta_p+p self.run_load_flow(balanced, power_control, slack) y_1 = [line.p for line in lines.values()] isf[:, idx] = (np.array(y_1)-np.array(y_0))/delta_p # Change the load back gen.p_set = p return isf def calculate_eigenvalues(self, res_file="Modal_Analysis"): """Method that calulates the eigenvalues of a system. Args: res_file: The name of the file to read the results from. """ mode = self.app.GetFromStudyCase("ComMod") # Modal analysis mode.Execute() res = self.app.GetFromStudyCase(res_file+'.ElmRes') res.Load() # load the data for reading # We want to store a, b, frequency, and damping df = pd.DataFrame(np.zeros((res.GetNumberOfRows(), 4)), columns=["a", "b", "damping", "frequency"]) min_damping = np.inf for i in range(0, res.GetNumberOfRows()): a = res.GetValue(i, 0)[1] b = res.GetValue(i, 1)[1] df.iloc[i, 3] = abs(b/2/np.pi) df.iloc[i, 0] = a df.iloc[i, 1] = b df.iloc[i, 2] = -a/np.sqrt(a**2 + b**2) if df.iloc[i, 2] < min_damping: min_damping = df.iloc[i, 2] return EigenValueResults(df, min_damping) def init_system_from_res(self, res): """Initialise system from old results.""" self.init_objs_from_df(res.gen, self.gens) self.init_objs_from_df(res.load, self.loads) def init_objs_from_df(self, df, objs): """Initialise an object type from df.""" for obj in df.index: for prop in df.columns: setattr(objs[obj], prop, df.loc[obj, prop]) def change_os(self, series): """Initialise the grid from a pandas Series The panda series should have multi index, where the first index is the type of component, loads, gens, lines or areas. The second index should be the name of the component, and the third index is the property to set.""" for idx in series.index: obj = getattr(self, idx[0]) setattr(obj[idx[1]], idx[2], series[idx]) def get_total_load(self): """Return the total load of the system.""" return sum(load.p_set for load in self.loads.values()) def get_total_gen(self): """Return the total load of the system.""" return sum(gen.p_set for gen in self.gens.values()) def get_pf_results(self): """Return a PFResults object.""" return PFResults(self)
[ "sinfactory.pfresults.PFResults", "sinfactory.eigenresults.EigenValueResults", "sinfactory.area.Area", "numpy.sqrt", "pandas.read_csv", "powerfactory.GetApplication", "sinfactory.load.Load", "numpy.column_stack", "numpy.count_nonzero", "numpy.array", "sinfactory.line.Line", "pandas.DataFrame",...
[((628, 647), 'powerfactory.GetApplication', 'pf.GetApplication', ([], {}), '()\n', (645, 647), True, 'import powerfactory as pf\n'), ((8173, 8244), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '""","""', 'decimal': '"""."""', 'header': '[0, 1]', 'index_col': '(0)'}), "(filepath, sep=',', decimal='.', header=[0, 1], index_col=0)\n", (8184, 8244), True, 'import pandas as pd\n'), ((19051, 19092), 'numpy.column_stack', 'np.column_stack', (['[machine_name, inertias]'], {}), '([machine_name, inertias])\n', (19066, 19092), True, 'import numpy as np\n'), ((33218, 33252), 'sinfactory.eigenresults.EigenValueResults', 'EigenValueResults', (['df', 'min_damping'], {}), '(df, min_damping)\n', (33235, 33252), False, 'from sinfactory.eigenresults import EigenValueResults\n'), ((34519, 34534), 'sinfactory.pfresults.PFResults', 'PFResults', (['self'], {}), '(self)\n', (34528, 34534), False, 'from sinfactory.pfresults import PFResults\n'), ((1211, 1221), 'sinfactory.line.Line', 'Line', (['line'], {}), '(line)\n', (1215, 1221), False, 'from sinfactory.line import Line\n'), ((1341, 1355), 'sinfactory.generator.Generator', 'Generator', (['gen'], {}), '(gen)\n', (1350, 1355), False, 'from sinfactory.generator import Generator\n'), ((1483, 1493), 'sinfactory.load.Load', 'Load', (['load'], {}), '(load)\n', (1487, 1493), False, 'from sinfactory.load import Load\n'), ((1663, 1673), 'sinfactory.area.Area', 'Area', (['area'], {}), '(area)\n', (1667, 1673), False, 'from sinfactory.area import Area\n'), ((1803, 1811), 'sinfactory.bus.Bus', 'Bus', (['bus'], {}), '(bus)\n', (1806, 1811), False, 'from sinfactory.bus import Bus\n'), ((15269, 15295), 'numpy.count_nonzero', 'np.count_nonzero', (['pole_var'], {}), '(pole_var)\n', (15285, 15295), True, 'import numpy as np\n'), ((33093, 33117), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (33100, 33117), True, 'import numpy as np\n'), ((32073, 32086), 'numpy.array', 'np.array', (['y_1'], {}), '(y_1)\n', (32081, 32086), True, 'import numpy as np\n'), ((32087, 32100), 'numpy.array', 'np.array', (['y_0'], {}), '(y_0)\n', (32095, 32100), True, 'import numpy as np\n'), ((24746, 24757), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (24754, 24757), True, 'import numpy as np\n'), ((29797, 29818), 'pandas.DataFrame', 'pd.DataFrame', (['opf_res'], {}), '(opf_res)\n', (29809, 29818), True, 'import pandas as pd\n'), ((16273, 16312), 'numpy.arctan', 'np.arctan', (['(u_t + i_t * (r_stator + x_q))'], {}), '(u_t + i_t * (r_stator + x_q))\n', (16282, 16312), True, 'import numpy as np\n'), ((26106, 26117), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (26114, 26117), True, 'import numpy as np\n')]
#!/usr/bin/env python from __future__ import division """@package etddf ROS interface script for delta tiering filter Filter operates in ENU """ from etddf.delta_tier import DeltaTier import rospy import threading from minau.msg import ControlStatus from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity from etddf.srv import GetMeasurementPackage import numpy as np import tf np.set_printoptions(suppress=True) from copy import deepcopy from std_msgs.msg import Header, Float64 from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped from nav_msgs.msg import Odometry from minau.msg import SonarTargetList, SonarTarget from cuprint.cuprint import CUPrint __author__ = "<NAME>" __copyright__ = "Copyright 2020, COHRINT Lab" __email__ = "<EMAIL>" __status__ = "Development" __license__ = "MIT" __maintainer__ = "<NAME>" __version__ = "3.0" NUM_OWNSHIP_STATES = 6 class ETDDF_Node: def __init__(self, my_name, \ update_rate, \ delta_tiers, \ asset2id, \ delta_codebook_table, \ buffer_size, \ meas_space_table, \ missed_meas_tolerance_table, \ x0,\ P0,\ Q,\ default_meas_variance, use_control_input): self.update_rate = update_rate self.asset2id = asset2id self.Q = Q self.use_control_input = use_control_input self.default_meas_variance = default_meas_variance self.my_name = my_name self.landmark_dict = rospy.get_param("~landmarks", {}) self.cuprint = CUPrint(rospy.get_name()) self.filter = DeltaTier(NUM_OWNSHIP_STATES, \ x0,\ P0,\ buffer_size,\ meas_space_table,\ missed_meas_tolerance_table, \ delta_codebook_table,\ delta_tiers,\ self.asset2id,\ my_name) self.network_pub = rospy.Publisher("etddf/estimate/network", NetworkEstimate, queue_size=10) self.statistics_pub = rospy.Publisher("etddf/statistics", EtddfStatistics, queue_size=10) self.statistics = EtddfStatistics(0, rospy.get_rostime(), 0, 0, delta_tiers, [0 for _ in delta_tiers], 0.0, [], False) self.asset_pub_dict = {} for asset in self.asset2id.keys(): if "surface" in asset: continue self.asset_pub_dict[asset] = rospy.Publisher("etddf/estimate/" + asset, Odometry, queue_size=10) self.update_seq = 0 self.last_depth_meas = None rospy.sleep(rospy.Duration(1 / self.update_rate)) self.last_update_time = rospy.get_rostime() - rospy.Duration(1 / self.update_rate) self.meas_lock = threading.Lock() self.update_lock = threading.Lock() self.last_orientation = None self.red_asset_found = False self.red_asset_names = rospy.get_param("~red_team_names") # Depth Sensor if rospy.get_param("~measurement_topics/depth") != "None": rospy.Subscriber(rospy.get_param("~measurement_topics/depth"), Float64, self.depth_callback, queue_size=1) # Modem & Measurement Packages rospy.Subscriber("etddf/packages_in", MeasurementPackage, self.meas_pkg_callback, queue_size=1) if self.use_control_input: self.control_input = None rospy.Subscriber("uuv_control/control_status", ControlStatus, self.control_status_callback, queue_size=1) if rospy.get_param("~strapdown"): rospy.Subscriber(rospy.get_param("~measurement_topics/imu_est"), Odometry, self.orientation_estimate_callback, queue_size=1) rospy.wait_for_message(rospy.get_param("~measurement_topics/imu_est"), Odometry) # IMU Covariance Intersection if rospy.get_param("~strapdown") and rospy.get_param("~measurement_topics/imu_ci") != "None": self.cuprint("Intersecting with strapdown") self.intersection_pub = rospy.Publisher("strapdown/intersection_result", PositionVelocity, queue_size=1) rospy.Subscriber(rospy.get_param("~measurement_topics/imu_ci"), PositionVelocity, self.nav_filter_callback, queue_size=1) else: self.cuprint("Not intersecting with strapdown filter") rospy.Timer(rospy.Duration(1 / self.update_rate), self.no_nav_filter_callback) # Sonar Subscription if rospy.get_param("~measurement_topics/sonar") != "None": rospy.Subscriber(rospy.get_param("~measurement_topics/sonar"), SonarTargetList, self.sonar_callback) self.data_x, self.data_y = None, None # rospy.Subscriber("pose_gt", Odometry, self.gps_callback, queue_size=1) # Initialize Buffer Service rospy.Service('etddf/get_measurement_package', GetMeasurementPackage, self.get_meas_pkg_callback) self.cuprint("loaded") def gps_callback(self, msg): self.data_x = msg.pose.pose.position.x + np.random.normal(0, scale=0.05) self.data_y = msg.pose.pose.position.y + np.random.normal(0, scale=0.05) def orientation_estimate_callback(self, odom): self.meas_lock.acquire() self.last_orientation = odom.pose.pose.orientation self.last_orientation_cov = np.array(odom.pose.covariance).reshape(6,6) self.last_orientation_dot = odom.twist.twist.angular self.last_orientation_dot_cov = np.array(odom.twist.covariance).reshape(6,6) self.meas_lock.release() def sonar_callback(self, sonar_list): for target in sonar_list.targets: # self.cuprint("Receiving sonar measurements") if self.last_orientation is None: # No orientation, no linearization of the sonar measurement # print("no ori") return if target.id == "detection": continue # self.cuprint("Receiving sonar data") # Convert quaternions to Euler angles. self.meas_lock.acquire() (r, p, y) = tf.transformations.euler_from_quaternion([self.last_orientation.x, \ self.last_orientation.y, self.last_orientation.z, self.last_orientation.w]) self.meas_lock.release() # y = (np.pi/180.0) * 8 bearing_world = y + target.bearing_rad z = target.range_m * np.sin(target.elevation_rad) xy_dist = target.range_m * np.cos(target.elevation_rad) x = xy_dist * np.cos(bearing_world) y = xy_dist * np.sin(bearing_world) now = rospy.get_rostime() sonar_x, sonar_y = None, None if "landmark_" in target.id: sonar_x = Measurement("sonar_x", now, self.my_name, "", x, self.default_meas_variance["sonar_x"], self.landmark_dict[target.id[len("landmark_"):]]) sonar_y = Measurement("sonar_y", now, self.my_name, "", y, self.default_meas_variance["sonar_x"], self.landmark_dict[target.id[len("landmark_"):]]) else: sonar_x = Measurement("sonar_x", now, self.my_name, target.id, x, self.default_meas_variance["sonar_x"], []) sonar_y = Measurement("sonar_y", now, self.my_name, target.id, y, self.default_meas_variance["sonar_y"], []) if target.id in self.red_asset_names and not self.red_asset_found: self.cuprint("Red Asset detected!") self.red_asset_found = True # sonar_z = Measurement("sonar_z", now, self.my_name, target.id, z, self.default_meas_variance["sonar_z"], [] self.filter.add_meas(sonar_x) self.filter.add_meas(sonar_y) # self.filter.add_meas(sonar_z) # self.cuprint("meas added") def publish_stats(self, last_update_time): self.statistics.seq = self.update_seq self.statistics.stamp = last_update_time self.statistics.overflown, delta, buffer = self.filter.peek_buffer() self.statistics.current_lowest_multiplier = delta meas_name_list = [x.meas_type for x in buffer] self.statistics.current_lowest_buffer = meas_name_list self.statistics_pub.publish(self.statistics) def no_nav_filter_callback(self, event): t_now = rospy.get_rostime() delta_t_ros = t_now - self.last_update_time self.update_lock.acquire() ### Run Prediction ### ### Run Prediction ### if self.use_control_input and self.control_input is not None: self.filter.predict(self.control_input, self.Q, delta_t_ros.to_sec(), False) else: self.filter.predict(np.zeros((3,1)), self.Q, delta_t_ros.to_sec(), False) ### Run Correction ### # Construct depth measurement z_r = self.default_meas_variance["depth"] z_data = self.last_depth_meas if z_data != None: z = Measurement("depth", t_now, self.my_name,"", z_data, z_r, []) self.filter.add_meas(z) self.last_depth_meas = None # correction self.filter.correct(t_now) self.publish_estimates(t_now) self.last_update_time = t_now self.update_seq += 1 self.update_lock.release() self.publish_stats(t_now) def nav_filter_callback(self, pv_msg): # Update at specified rate t_now = rospy.get_rostime() delta_t_ros = t_now - self.last_update_time if delta_t_ros < rospy.Duration(1/self.update_rate): return self.update_lock.acquire() ### Run Prediction ### if self.use_control_input and self.control_input is not None: self.filter.predict(self.control_input, self.Q, delta_t_ros.to_sec(), False) else: self.filter.predict(np.zeros((3,1)), self.Q, delta_t_ros.to_sec(), False) ### Run Correction ### # Construct depth measurement z_r = self.default_meas_variance["depth"] z_data = self.last_depth_meas if z_data != None: z = Measurement("depth", t_now, self.my_name,"", z_data, z_r, []) # Flip z data to transform enu -> NED self.filter.add_meas(z) self.last_depth_meas = None if self.data_x != None: x = Measurement("gps_x", t_now, self.my_name,"", self.data_x, 0.1, []) self.filter.add_meas(x) self.data_x = None if self.data_y != None: y = Measurement("gps_y", t_now, self.my_name,"", self.data_y, 0.1, []) self.filter.add_meas(y) self.data_y = None # correction self.filter.correct(t_now) ### Covariancee Intersect ### # Turn odom estimate into numpy mean = np.array([[pv_msg.position.x, pv_msg.position.y, pv_msg.position.z, \ pv_msg.velocity.x, pv_msg.velocity.y, pv_msg.velocity.z]]).T cov = np.array(pv_msg.covariance).reshape(6,6) # Run covariance intersection c_bar, Pcc = self.filter.intersect(mean, cov) position = Vector3(c_bar[0,0], c_bar[1,0], c_bar[2,0]) velocity = Vector3(c_bar[3,0], c_bar[4,0], c_bar[5,0]) covariance = list(Pcc.flatten()) new_pv_msg = PositionVelocity(position, velocity, covariance) self.intersection_pub.publish(new_pv_msg) self.publish_estimates(t_now) self.last_update_time = t_now self.update_seq += 1 self.update_lock.release() self.publish_stats(t_now) def control_status_callback(self, msg): self.update_lock.acquire() if msg.is_setpoint_active and msg.is_heading_velocity_setpoint_active: self.control_input = np.array([[msg.setpoint_velocity.y, msg.setpoint_velocity.z, -msg.setpoint_velocity.z]]).T else: self.control_input = None # GRAB CONTROL INPUT self.update_lock.release() def depth_callback(self, msg): self.meas_lock.acquire() self.last_depth_meas = msg.data self.meas_lock.release() def publish_estimates(self, timestamp): ne = NetworkEstimate() for asset in self.asset2id.keys(): if "surface" in asset: continue if "red" in asset and not self.red_asset_found: continue # else: # print("publishing " + asset + "'s estimate") # Construct Odometry Msg for Asset mean, cov = self.filter.get_asset_estimate(asset) pose_cov = np.zeros((6,6)) pose_cov[:3,:3] = cov[:3,:3] if asset == self.my_name: pose = Pose(Point(mean[0],mean[1],mean[2]), \ self.last_orientation) pose_cov[3:,3:] = self.last_orientation_cov[3:,3:] else: pose = Pose(Point(mean[0],mean[1],mean[2]), \ Quaternion(0,0,0,1)) pose_cov[3:,3:] = np.eye(3) * 3 pwc = PoseWithCovariance(pose, list(pose_cov.flatten())) twist_cov = np.zeros((6,6)) twist_cov[:3,:3] = cov[3:6,3:6] if asset == self.my_name: tw = Twist(Vector3(mean[3],mean[4],mean[5]), self.last_orientation_dot) twist_cov[3:, 3:] = self.last_orientation_dot_cov[3:,3:] else: tw = Twist(Vector3(mean[3],mean[4],mean[5]), Vector3(0,0,0)) twist_cov[3:, 3:] = np.eye(3) * -1 twc = TwistWithCovariance(tw, list(twist_cov.flatten())) h = Header(self.update_seq, timestamp, "map") o = Odometry(h, "map", pwc, twc) ae = AssetEstimate(o, asset) ne.assets.append(ae) self.asset_pub_dict[asset].publish(o) self.network_pub.publish(ne) def meas_pkg_callback(self, msg): # Modem Meas taken by surface if msg.src_asset == "surface": self.cuprint("Receiving Surface Modem Measurements") for meas in msg.measurements: # Approximate the fuse on the next update, so we can get other asset's position immediately if meas.meas_type == "modem_elevation": rospy.logerr("Ignoring Modem Elevation Measurement since we have depth measurements") continue elif meas.meas_type == "modem_azimuth": meas.global_pose = list(meas.global_pose) # self.cuprint("azimuth: " + str(meas.data)) meas.data = (meas.data * np.pi) / 180 meas.variance = self.default_meas_variance["modem_azimuth"] elif meas.meas_type == "modem_range": meas.global_pose = list(meas.global_pose) # self.cuprint("range: " + str(meas.data)) meas.variance = self.default_meas_variance["modem_range"] self.filter.add_meas(meas, force_fuse=True) # Modem Meas taken by me elif msg.src_asset == self.my_name: # self.cuprint("Receiving Modem Measurements Taken by Me") for meas in msg.measurements: # Approximate the fuse on the next update, so we can get other asset's position immediately if meas.meas_type == "modem_elevation": rospy.logerr("Ignoring Modem Elevation Measurement since we have depth measurements") continue elif meas.meas_type == "modem_azimuth": meas.global_pose = list(meas.global_pose) meas.data = (meas.data * np.pi) / 180 meas.variance = self.default_meas_variance["modem_azimuth"] elif meas.meas_type == "modem_range": meas.global_pose = list(meas.global_pose) meas.variance = self.default_meas_variance["modem_range"] self.filter.add_meas(meas, force_fuse=True) # Buffer else: self.cuprint("receiving buffer") self.update_lock.acquire() # Loop through buffer and see if we've found the red agent for m in msg.measurements: if m.measured_asset in self.red_asset_names and not self.red_asset_found: self.red_asset_found = True self.cuprint("Red asset measurement received!") implicit_cnt, explicit_cnt = self.filter.catch_up(msg.delta_multiplier, msg.measurements) self.cuprint("...caught up") self.update_lock.release() self.statistics.implicit_count += implicit_cnt self.statistics.explicit_count += explicit_cnt def get_meas_pkg_callback(self, req): self.cuprint("pulling buffer") delta, buffer = self.filter.pull_buffer() ind = self.statistics.delta_tiers.index(delta) self.statistics.buffer_counts[ind] += 1 mp = MeasurementPackage(buffer, self.my_name, delta) print(mp) return mp ################################ ### Initialization Functions ### ################################ def get_indices_from_asset_names(blue_team): my_name = rospy.get_param("~my_name") red_team = rospy.get_param("~red_team_names") asset2id = {} asset2id[my_name] = 0 next_index = 1 for asset in blue_team: if asset == my_name: continue else: asset2id[asset] = next_index next_index += 1 for asset in red_team: asset2id[asset] = next_index next_index += 1 if my_name != "surface": asset2id["surface"] = -1 # arbitrary negative number return asset2id def get_delta_codebook_table(): delta_codebook = {} meas_info = rospy.get_param("~measurements") for meas in meas_info.keys(): base_et_delta = meas_info[meas]["base_et_delta"] delta_codebook[meas] = base_et_delta return delta_codebook def get_missed_meas_tolerance_table(): meas_tolerance_table = {} meas_info = rospy.get_param("~measurements") for meas in meas_info.keys(): meas_tolerance_table[meas] = meas_info[meas]["missed_tolerance"] return meas_tolerance_table def get_meas_space_table(): meas_space_table = {} meas_info = rospy.get_param("~measurements") for meas in meas_info.keys(): meas_space_table[meas] = meas_info[meas]["buffer_size"] meas_space_table["bookstart"] = rospy.get_param("~buffer_space/bookstart") meas_space_table["bookend"] = rospy.get_param("~buffer_space/bookend") meas_space_table["final_time"] = rospy.get_param("~buffer_space/final_time") return meas_space_table def _dict2arr(d): return np.array([[d["x"]],\ [d["y"]],\ [d["z"]],\ [d["x_vel"]], \ [d["y_vel"]],\ [d["z_vel"]]]) def _list2arr(l): return np.array([l]).reshape(-1,1) def _add_velocity_states(base_states): velocities = np.zeros((base_states.size,1)) return np.concatenate((base_states, velocities), axis=0) def get_initial_estimate(num_states, blue_team_names, blue_team_positions): default_starting_position = _dict2arr(rospy.get_param("~default_starting_position")) uncertainty_known_starting_position = _dict2arr( rospy.get_param("~initial_uncertainty/known_starting_position")) uncertainty_unknown_starting_position = _dict2arr( rospy.get_param("~initial_uncertainty/unknown_starting_position")) my_starting_position = rospy.get_param("~starting_position") if not my_starting_position: my_starting_position = deepcopy(default_starting_position) else: my_starting_position = _add_velocity_states( _list2arr(my_starting_position)) ownship_uncertainty = _dict2arr( rospy.get_param("~initial_uncertainty/ownship") ) uncertainty = np.zeros((num_states,num_states)) uncertainty_vector = np.zeros((num_states,1)) uncertainty_vector[:NUM_OWNSHIP_STATES] = ownship_uncertainty uncertainty += np.eye(num_states) * uncertainty_vector state_vector = my_starting_position my_name = rospy.get_param("~my_name") red_team_names = rospy.get_param("~red_team_names") next_index_unc = 1 next_index_pos = 1 for asset in blue_team_names: if asset == my_name: next_index_pos += 1 continue if len(blue_team_positions) >= next_index_pos: # we were given the positione of this asset in roslaunch next_position = _add_velocity_states( _list2arr( blue_team_positions[next_index_pos-1])) uncertainty_vector = np.zeros((num_states,1)) uncertainty_vector[next_index_unc*NUM_OWNSHIP_STATES:(next_index_unc+1)*NUM_OWNSHIP_STATES] = uncertainty_known_starting_position uncertainty += np.eye(num_states) * uncertainty_vector else: next_position = deepcopy(default_starting_position) uncertainty_vector = np.zeros((num_states,1)) uncertainty_vector[next_index_unc*NUM_OWNSHIP_STATES:(next_index_unc+1)*NUM_OWNSHIP_STATES] = uncertainty_unknown_starting_position uncertainty += np.eye(num_states) * uncertainty_vector state_vector = np.concatenate((state_vector, next_position),axis=0) next_index_unc += 1 next_index_pos += 1 for asset in red_team_names: next_position = deepcopy(default_starting_position) state_vector = np.concatenate((state_vector, next_position),axis=0) uncertainty_vector = np.zeros((num_states,1)) uncertainty_vector[next_index_unc*NUM_OWNSHIP_STATES:(next_index_unc+1)*NUM_OWNSHIP_STATES] = uncertainty_unknown_starting_position uncertainty += np.eye(num_states) * uncertainty_vector next_index_unc += 1 return state_vector, uncertainty def get_process_noise(num_states, blue_team_names): Q = np.zeros((num_states, num_states)) ownship_Q = _dict2arr(rospy.get_param("~process_noise/ownship")) blueteam_Q = _dict2arr(rospy.get_param("~process_noise/blueteam")) redteam_Q = _dict2arr(rospy.get_param("~process_noise/redteam")) Q_vec = np.zeros((num_states,1)) Q_vec[:NUM_OWNSHIP_STATES] = ownship_Q Q += np.eye(num_states) * Q_vec my_name = rospy.get_param("~my_name") red_team_names = rospy.get_param("~red_team_names") next_index = 1 for asset in blue_team_names: if asset == my_name: continue Q_vec = np.zeros((num_states,1)) Q_vec[next_index*NUM_OWNSHIP_STATES:(next_index+1)*NUM_OWNSHIP_STATES] = blueteam_Q Q += np.eye(num_states) * Q_vec next_index += 1 for asset in red_team_names: Q_vec = np.zeros((num_states,1)) Q_vec[next_index*NUM_OWNSHIP_STATES:(next_index+1)*NUM_OWNSHIP_STATES] = redteam_Q Q += np.eye(num_states) * Q_vec next_index += 1 return Q def get_default_meas_variance(): meas_vars = {} meas_info = rospy.get_param("~measurements") for meas in meas_info.keys(): sd = meas_info[meas]["default_sd"] meas_vars[meas] = sd ** 2 return meas_vars if __name__ == "__main__": rospy.init_node("etddf_node") my_name = rospy.get_param("~my_name") update_rate = rospy.get_param("~update_rate") delta_tiers = rospy.get_param("~delta_tiers") blue_team_names = rospy.get_param("~blue_team_names") blue_team_positions = rospy.get_param("~blue_team_positions") # Don't track surface if it isn't this agent if my_name != "surface": ind = blue_team_names.index("surface") if ind >= 0: blue_team_names.pop(ind) blue_team_positions.pop(ind) asset2id = get_indices_from_asset_names(blue_team_names) delta_codebook_table = get_delta_codebook_table() buffer_size = rospy.get_param("~buffer_space/capacity") meas_space_table = get_meas_space_table() missed_meas_tolerance_table = get_missed_meas_tolerance_table() if my_name != "surface": num_assets = len(asset2id) - 1 # subtract surface else: num_assets = len(asset2id) x0, P0 = get_initial_estimate(num_assets * NUM_OWNSHIP_STATES, blue_team_names, blue_team_positions) Q = get_process_noise(num_assets * NUM_OWNSHIP_STATES, blue_team_names) rospy.logwarn("{}, {}, {}, {}".format(my_name, x0.shape, P0.shape, Q.shape)) default_meas_variance = get_default_meas_variance() use_control_input = rospy.get_param("~use_control_input") et_node = ETDDF_Node(my_name, update_rate, \ delta_tiers, \ asset2id, \ delta_codebook_table, \ buffer_size, \ meas_space_table, \ missed_meas_tolerance_table, \ x0,\ P0,\ Q,\ default_meas_variance,\ use_control_input) rospy.spin()
[ "rospy.logerr", "geometry_msgs.msg.Vector3", "rospy.init_node", "numpy.array", "copy.deepcopy", "numpy.sin", "etddf.delta_tier.DeltaTier", "etddf.msg.PositionVelocity", "nav_msgs.msg.Odometry", "threading.Lock", "rospy.Service", "geometry_msgs.msg.Quaternion", "rospy.spin", "numpy.concaten...
[((448, 482), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (467, 482), True, 'import numpy as np\n'), ((17586, 17613), 'rospy.get_param', 'rospy.get_param', (['"""~my_name"""'], {}), "('~my_name')\n", (17601, 17613), False, 'import rospy\n'), ((17629, 17663), 'rospy.get_param', 'rospy.get_param', (['"""~red_team_names"""'], {}), "('~red_team_names')\n", (17644, 17663), False, 'import rospy\n'), ((18163, 18195), 'rospy.get_param', 'rospy.get_param', (['"""~measurements"""'], {}), "('~measurements')\n", (18178, 18195), False, 'import rospy\n'), ((18445, 18477), 'rospy.get_param', 'rospy.get_param', (['"""~measurements"""'], {}), "('~measurements')\n", (18460, 18477), False, 'import rospy\n'), ((18690, 18722), 'rospy.get_param', 'rospy.get_param', (['"""~measurements"""'], {}), "('~measurements')\n", (18705, 18722), False, 'import rospy\n'), ((18858, 18900), 'rospy.get_param', 'rospy.get_param', (['"""~buffer_space/bookstart"""'], {}), "('~buffer_space/bookstart')\n", (18873, 18900), False, 'import rospy\n'), ((18935, 18975), 'rospy.get_param', 'rospy.get_param', (['"""~buffer_space/bookend"""'], {}), "('~buffer_space/bookend')\n", (18950, 18975), False, 'import rospy\n'), ((19013, 19056), 'rospy.get_param', 'rospy.get_param', (['"""~buffer_space/final_time"""'], {}), "('~buffer_space/final_time')\n", (19028, 19056), False, 'import rospy\n'), ((19116, 19203), 'numpy.array', 'np.array', (["[[d['x']], [d['y']], [d['z']], [d['x_vel']], [d['y_vel']], [d['z_vel']]]"], {}), "([[d['x']], [d['y']], [d['z']], [d['x_vel']], [d['y_vel']], [d[\n 'z_vel']]])\n", (19124, 19203), True, 'import numpy as np\n'), ((19419, 19450), 'numpy.zeros', 'np.zeros', (['(base_states.size, 1)'], {}), '((base_states.size, 1))\n', (19427, 19450), True, 'import numpy as np\n'), ((19461, 19510), 'numpy.concatenate', 'np.concatenate', (['(base_states, velocities)'], {'axis': '(0)'}), '((base_states, velocities), axis=0)\n', (19475, 19510), True, 'import numpy as np\n'), ((19945, 19982), 'rospy.get_param', 'rospy.get_param', (['"""~starting_position"""'], {}), "('~starting_position')\n", (19960, 19982), False, 'import rospy\n'), ((20285, 20319), 'numpy.zeros', 'np.zeros', (['(num_states, num_states)'], {}), '((num_states, num_states))\n', (20293, 20319), True, 'import numpy as np\n'), ((20344, 20369), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (20352, 20369), True, 'import numpy as np\n'), ((20549, 20576), 'rospy.get_param', 'rospy.get_param', (['"""~my_name"""'], {}), "('~my_name')\n", (20564, 20576), False, 'import rospy\n'), ((20598, 20632), 'rospy.get_param', 'rospy.get_param', (['"""~red_team_names"""'], {}), "('~red_team_names')\n", (20613, 20632), False, 'import rospy\n'), ((22315, 22349), 'numpy.zeros', 'np.zeros', (['(num_states, num_states)'], {}), '((num_states, num_states))\n', (22323, 22349), True, 'import numpy as np\n'), ((22572, 22597), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (22580, 22597), True, 'import numpy as np\n'), ((22691, 22718), 'rospy.get_param', 'rospy.get_param', (['"""~my_name"""'], {}), "('~my_name')\n", (22706, 22718), False, 'import rospy\n'), ((22740, 22774), 'rospy.get_param', 'rospy.get_param', (['"""~red_team_names"""'], {}), "('~red_team_names')\n", (22755, 22774), False, 'import rospy\n'), ((23387, 23419), 'rospy.get_param', 'rospy.get_param', (['"""~measurements"""'], {}), "('~measurements')\n", (23402, 23419), False, 'import rospy\n'), ((23584, 23613), 'rospy.init_node', 'rospy.init_node', (['"""etddf_node"""'], {}), "('etddf_node')\n", (23599, 23613), False, 'import rospy\n'), ((23628, 23655), 'rospy.get_param', 'rospy.get_param', (['"""~my_name"""'], {}), "('~my_name')\n", (23643, 23655), False, 'import rospy\n'), ((23674, 23705), 'rospy.get_param', 'rospy.get_param', (['"""~update_rate"""'], {}), "('~update_rate')\n", (23689, 23705), False, 'import rospy\n'), ((23724, 23755), 'rospy.get_param', 'rospy.get_param', (['"""~delta_tiers"""'], {}), "('~delta_tiers')\n", (23739, 23755), False, 'import rospy\n'), ((23778, 23813), 'rospy.get_param', 'rospy.get_param', (['"""~blue_team_names"""'], {}), "('~blue_team_names')\n", (23793, 23813), False, 'import rospy\n'), ((23840, 23879), 'rospy.get_param', 'rospy.get_param', (['"""~blue_team_positions"""'], {}), "('~blue_team_positions')\n", (23855, 23879), False, 'import rospy\n'), ((24240, 24281), 'rospy.get_param', 'rospy.get_param', (['"""~buffer_space/capacity"""'], {}), "('~buffer_space/capacity')\n", (24255, 24281), False, 'import rospy\n'), ((24870, 24907), 'rospy.get_param', 'rospy.get_param', (['"""~use_control_input"""'], {}), "('~use_control_input')\n", (24885, 24907), False, 'import rospy\n'), ((25425, 25437), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (25435, 25437), False, 'import rospy\n'), ((1714, 1747), 'rospy.get_param', 'rospy.get_param', (['"""~landmarks"""', '{}'], {}), "('~landmarks', {})\n", (1729, 1747), False, 'import rospy\n'), ((1829, 1994), 'etddf.delta_tier.DeltaTier', 'DeltaTier', (['NUM_OWNSHIP_STATES', 'x0', 'P0', 'buffer_size', 'meas_space_table', 'missed_meas_tolerance_table', 'delta_codebook_table', 'delta_tiers', 'self.asset2id', 'my_name'], {}), '(NUM_OWNSHIP_STATES, x0, P0, buffer_size, meas_space_table,\n missed_meas_tolerance_table, delta_codebook_table, delta_tiers, self.\n asset2id, my_name)\n', (1838, 1994), False, 'from etddf.delta_tier import DeltaTier\n'), ((2321, 2394), 'rospy.Publisher', 'rospy.Publisher', (['"""etddf/estimate/network"""', 'NetworkEstimate'], {'queue_size': '(10)'}), "('etddf/estimate/network', NetworkEstimate, queue_size=10)\n", (2336, 2394), False, 'import rospy\n'), ((2425, 2492), 'rospy.Publisher', 'rospy.Publisher', (['"""etddf/statistics"""', 'EtddfStatistics'], {'queue_size': '(10)'}), "('etddf/statistics', EtddfStatistics, queue_size=10)\n", (2440, 2492), False, 'import rospy\n'), ((3113, 3129), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3127, 3129), False, 'import threading\n'), ((3157, 3173), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3171, 3173), False, 'import threading\n'), ((3279, 3313), 'rospy.get_param', 'rospy.get_param', (['"""~red_team_names"""'], {}), "('~red_team_names')\n", (3294, 3313), False, 'import rospy\n'), ((3572, 3672), 'rospy.Subscriber', 'rospy.Subscriber', (['"""etddf/packages_in"""', 'MeasurementPackage', 'self.meas_pkg_callback'], {'queue_size': '(1)'}), "('etddf/packages_in', MeasurementPackage, self.\n meas_pkg_callback, queue_size=1)\n", (3588, 3672), False, 'import rospy\n'), ((3881, 3910), 'rospy.get_param', 'rospy.get_param', (['"""~strapdown"""'], {}), "('~strapdown')\n", (3896, 3910), False, 'import rospy\n'), ((5157, 5259), 'rospy.Service', 'rospy.Service', (['"""etddf/get_measurement_package"""', 'GetMeasurementPackage', 'self.get_meas_pkg_callback'], {}), "('etddf/get_measurement_package', GetMeasurementPackage, self.\n get_meas_pkg_callback)\n", (5170, 5259), False, 'import rospy\n'), ((8643, 8662), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (8660, 8662), False, 'import rospy\n'), ((9748, 9767), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (9765, 9767), False, 'import rospy\n'), ((11445, 11491), 'geometry_msgs.msg.Vector3', 'Vector3', (['c_bar[0, 0]', 'c_bar[1, 0]', 'c_bar[2, 0]'], {}), '(c_bar[0, 0], c_bar[1, 0], c_bar[2, 0])\n', (11452, 11491), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((11508, 11554), 'geometry_msgs.msg.Vector3', 'Vector3', (['c_bar[3, 0]', 'c_bar[4, 0]', 'c_bar[5, 0]'], {}), '(c_bar[3, 0], c_bar[4, 0], c_bar[5, 0])\n', (11515, 11554), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((11614, 11662), 'etddf.msg.PositionVelocity', 'PositionVelocity', (['position', 'velocity', 'covariance'], {}), '(position, velocity, covariance)\n', (11630, 11662), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((12491, 12508), 'etddf.msg.NetworkEstimate', 'NetworkEstimate', ([], {}), '()\n', (12506, 12508), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((17341, 17388), 'etddf.msg.MeasurementPackage', 'MeasurementPackage', (['buffer', 'self.my_name', 'delta'], {}), '(buffer, self.my_name, delta)\n', (17359, 17388), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((19630, 19675), 'rospy.get_param', 'rospy.get_param', (['"""~default_starting_position"""'], {}), "('~default_starting_position')\n", (19645, 19675), False, 'import rospy\n'), ((19730, 19793), 'rospy.get_param', 'rospy.get_param', (['"""~initial_uncertainty/known_starting_position"""'], {}), "('~initial_uncertainty/known_starting_position')\n", (19745, 19793), False, 'import rospy\n'), ((19850, 19915), 'rospy.get_param', 'rospy.get_param', (['"""~initial_uncertainty/unknown_starting_position"""'], {}), "('~initial_uncertainty/unknown_starting_position')\n", (19865, 19915), False, 'import rospy\n'), ((20047, 20082), 'copy.deepcopy', 'deepcopy', (['default_starting_position'], {}), '(default_starting_position)\n', (20055, 20082), False, 'from copy import deepcopy\n'), ((20216, 20263), 'rospy.get_param', 'rospy.get_param', (['"""~initial_uncertainty/ownship"""'], {}), "('~initial_uncertainty/ownship')\n", (20231, 20263), False, 'import rospy\n'), ((20454, 20472), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (20460, 20472), True, 'import numpy as np\n'), ((21647, 21700), 'numpy.concatenate', 'np.concatenate', (['(state_vector, next_position)'], {'axis': '(0)'}), '((state_vector, next_position), axis=0)\n', (21661, 21700), True, 'import numpy as np\n'), ((21813, 21848), 'copy.deepcopy', 'deepcopy', (['default_starting_position'], {}), '(default_starting_position)\n', (21821, 21848), False, 'from copy import deepcopy\n'), ((21872, 21925), 'numpy.concatenate', 'np.concatenate', (['(state_vector, next_position)'], {'axis': '(0)'}), '((state_vector, next_position), axis=0)\n', (21886, 21925), True, 'import numpy as np\n'), ((21955, 21980), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (21963, 21980), True, 'import numpy as np\n'), ((22376, 22417), 'rospy.get_param', 'rospy.get_param', (['"""~process_noise/ownship"""'], {}), "('~process_noise/ownship')\n", (22391, 22417), False, 'import rospy\n'), ((22446, 22488), 'rospy.get_param', 'rospy.get_param', (['"""~process_noise/blueteam"""'], {}), "('~process_noise/blueteam')\n", (22461, 22488), False, 'import rospy\n'), ((22516, 22557), 'rospy.get_param', 'rospy.get_param', (['"""~process_noise/redteam"""'], {}), "('~process_noise/redteam')\n", (22531, 22557), False, 'import rospy\n'), ((22649, 22667), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (22655, 22667), True, 'import numpy as np\n'), ((22895, 22920), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (22903, 22920), True, 'import numpy as np\n'), ((23125, 23150), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (23133, 23150), True, 'import numpy as np\n'), ((1780, 1796), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1794, 1796), False, 'import rospy\n'), ((2538, 2557), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (2555, 2557), False, 'import rospy\n'), ((2798, 2865), 'rospy.Publisher', 'rospy.Publisher', (["('etddf/estimate/' + asset)", 'Odometry'], {'queue_size': '(10)'}), "('etddf/estimate/' + asset, Odometry, queue_size=10)\n", (2813, 2865), False, 'import rospy\n'), ((2959, 2995), 'rospy.Duration', 'rospy.Duration', (['(1 / self.update_rate)'], {}), '(1 / self.update_rate)\n', (2973, 2995), False, 'import rospy\n'), ((3029, 3048), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (3046, 3048), False, 'import rospy\n'), ((3051, 3087), 'rospy.Duration', 'rospy.Duration', (['(1 / self.update_rate)'], {}), '(1 / self.update_rate)\n', (3065, 3087), False, 'import rospy\n'), ((3349, 3393), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/depth"""'], {}), "('~measurement_topics/depth')\n", (3364, 3393), False, 'import rospy\n'), ((3754, 3864), 'rospy.Subscriber', 'rospy.Subscriber', (['"""uuv_control/control_status"""', 'ControlStatus', 'self.control_status_callback'], {'queue_size': '(1)'}), "('uuv_control/control_status', ControlStatus, self.\n control_status_callback, queue_size=1)\n", (3770, 3864), False, 'import rospy\n'), ((4192, 4221), 'rospy.get_param', 'rospy.get_param', (['"""~strapdown"""'], {}), "('~strapdown')\n", (4207, 4221), False, 'import rospy\n'), ((4375, 4460), 'rospy.Publisher', 'rospy.Publisher', (['"""strapdown/intersection_result"""', 'PositionVelocity'], {'queue_size': '(1)'}), "('strapdown/intersection_result', PositionVelocity, queue_size=1\n )\n", (4390, 4460), False, 'import rospy\n'), ((4803, 4847), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/sonar"""'], {}), "('~measurement_topics/sonar')\n", (4818, 4847), False, 'import rospy\n'), ((5369, 5400), 'numpy.random.normal', 'np.random.normal', (['(0)'], {'scale': '(0.05)'}), '(0, scale=0.05)\n', (5385, 5400), True, 'import numpy as np\n'), ((5450, 5481), 'numpy.random.normal', 'np.random.normal', (['(0)'], {'scale': '(0.05)'}), '(0, scale=0.05)\n', (5466, 5481), True, 'import numpy as np\n'), ((6423, 6570), 'tf.transformations.euler_from_quaternion', 'tf.transformations.euler_from_quaternion', (['[self.last_orientation.x, self.last_orientation.y, self.last_orientation.z,\n self.last_orientation.w]'], {}), '([self.last_orientation.x, self.\n last_orientation.y, self.last_orientation.z, self.last_orientation.w])\n', (6463, 6570), False, 'import tf\n'), ((6954, 6973), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (6971, 6973), False, 'import rospy\n'), ((9275, 9337), 'etddf.msg.Measurement', 'Measurement', (['"""depth"""', 't_now', 'self.my_name', '""""""', 'z_data', 'z_r', '[]'], {}), "('depth', t_now, self.my_name, '', z_data, z_r, [])\n", (9286, 9337), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((9846, 9882), 'rospy.Duration', 'rospy.Duration', (['(1 / self.update_rate)'], {}), '(1 / self.update_rate)\n', (9860, 9882), False, 'import rospy\n'), ((10430, 10492), 'etddf.msg.Measurement', 'Measurement', (['"""depth"""', 't_now', 'self.my_name', '""""""', 'z_data', 'z_r', '[]'], {}), "('depth', t_now, self.my_name, '', z_data, z_r, [])\n", (10441, 10492), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((10654, 10721), 'etddf.msg.Measurement', 'Measurement', (['"""gps_x"""', 't_now', 'self.my_name', '""""""', 'self.data_x', '(0.1)', '[]'], {}), "('gps_x', t_now, self.my_name, '', self.data_x, 0.1, [])\n", (10665, 10721), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((10836, 10903), 'etddf.msg.Measurement', 'Measurement', (['"""gps_y"""', 't_now', 'self.my_name', '""""""', 'self.data_y', '(0.1)', '[]'], {}), "('gps_y', t_now, self.my_name, '', self.data_y, 0.1, [])\n", (10847, 10903), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((11122, 11253), 'numpy.array', 'np.array', (['[[pv_msg.position.x, pv_msg.position.y, pv_msg.position.z, pv_msg.velocity.\n x, pv_msg.velocity.y, pv_msg.velocity.z]]'], {}), '([[pv_msg.position.x, pv_msg.position.y, pv_msg.position.z, pv_msg.\n velocity.x, pv_msg.velocity.y, pv_msg.velocity.z]])\n', (11130, 11253), True, 'import numpy as np\n'), ((12914, 12930), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (12922, 12930), True, 'import numpy as np\n'), ((13460, 13476), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (13468, 13476), True, 'import numpy as np\n'), ((13950, 13991), 'std_msgs.msg.Header', 'Header', (['self.update_seq', 'timestamp', '"""map"""'], {}), "(self.update_seq, timestamp, 'map')\n", (13956, 13991), False, 'from std_msgs.msg import Header, Float64\n'), ((14008, 14036), 'nav_msgs.msg.Odometry', 'Odometry', (['h', '"""map"""', 'pwc', 'twc'], {}), "(h, 'map', pwc, twc)\n", (14016, 14036), False, 'from nav_msgs.msg import Odometry\n'), ((14055, 14078), 'etddf.msg.AssetEstimate', 'AssetEstimate', (['o', 'asset'], {}), '(o, asset)\n', (14068, 14078), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((19334, 19347), 'numpy.array', 'np.array', (['[l]'], {}), '([l])\n', (19342, 19347), True, 'import numpy as np\n'), ((21042, 21067), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (21050, 21067), True, 'import numpy as np\n'), ((21318, 21353), 'copy.deepcopy', 'deepcopy', (['default_starting_position'], {}), '(default_starting_position)\n', (21326, 21353), False, 'from copy import deepcopy\n'), ((21387, 21412), 'numpy.zeros', 'np.zeros', (['(num_states, 1)'], {}), '((num_states, 1))\n', (21395, 21412), True, 'import numpy as np\n'), ((22143, 22161), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (22149, 22161), True, 'import numpy as np\n'), ((23025, 23043), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (23031, 23043), True, 'import numpy as np\n'), ((23254, 23272), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (23260, 23272), True, 'import numpy as np\n'), ((3434, 3478), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/depth"""'], {}), "('~measurement_topics/depth')\n", (3449, 3478), False, 'import rospy\n'), ((3941, 3987), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/imu_est"""'], {}), "('~measurement_topics/imu_est')\n", (3956, 3987), False, 'import rospy\n'), ((4084, 4130), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/imu_est"""'], {}), "('~measurement_topics/imu_est')\n", (4099, 4130), False, 'import rospy\n'), ((4226, 4271), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/imu_ci"""'], {}), "('~measurement_topics/imu_ci')\n", (4241, 4271), False, 'import rospy\n'), ((4485, 4530), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/imu_ci"""'], {}), "('~measurement_topics/imu_ci')\n", (4500, 4530), False, 'import rospy\n'), ((4695, 4731), 'rospy.Duration', 'rospy.Duration', (['(1 / self.update_rate)'], {}), '(1 / self.update_rate)\n', (4709, 4731), False, 'import rospy\n'), ((4888, 4932), 'rospy.get_param', 'rospy.get_param', (['"""~measurement_topics/sonar"""'], {}), "('~measurement_topics/sonar')\n", (4903, 4932), False, 'import rospy\n'), ((5662, 5692), 'numpy.array', 'np.array', (['odom.pose.covariance'], {}), '(odom.pose.covariance)\n', (5670, 5692), True, 'import numpy as np\n'), ((5807, 5838), 'numpy.array', 'np.array', (['odom.twist.covariance'], {}), '(odom.twist.covariance)\n', (5815, 5838), True, 'import numpy as np\n'), ((6742, 6770), 'numpy.sin', 'np.sin', (['target.elevation_rad'], {}), '(target.elevation_rad)\n', (6748, 6770), True, 'import numpy as np\n'), ((6810, 6838), 'numpy.cos', 'np.cos', (['target.elevation_rad'], {}), '(target.elevation_rad)\n', (6816, 6838), True, 'import numpy as np\n'), ((6865, 6886), 'numpy.cos', 'np.cos', (['bearing_world'], {}), '(bearing_world)\n', (6871, 6886), True, 'import numpy as np\n'), ((6913, 6934), 'numpy.sin', 'np.sin', (['bearing_world'], {}), '(bearing_world)\n', (6919, 6934), True, 'import numpy as np\n'), ((7429, 7532), 'etddf.msg.Measurement', 'Measurement', (['"""sonar_x"""', 'now', 'self.my_name', 'target.id', 'x', "self.default_meas_variance['sonar_x']", '[]'], {}), "('sonar_x', now, self.my_name, target.id, x, self.\n default_meas_variance['sonar_x'], [])\n", (7440, 7532), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((7554, 7657), 'etddf.msg.Measurement', 'Measurement', (['"""sonar_y"""', 'now', 'self.my_name', 'target.id', 'y', "self.default_meas_variance['sonar_y']", '[]'], {}), "('sonar_y', now, self.my_name, target.id, y, self.\n default_meas_variance['sonar_y'], [])\n", (7565, 7657), False, 'from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity\n'), ((9019, 9035), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (9027, 9035), True, 'import numpy as np\n'), ((10174, 10190), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (10182, 10190), True, 'import numpy as np\n'), ((11291, 11318), 'numpy.array', 'np.array', (['pv_msg.covariance'], {}), '(pv_msg.covariance)\n', (11299, 11318), True, 'import numpy as np\n'), ((12084, 12177), 'numpy.array', 'np.array', (['[[msg.setpoint_velocity.y, msg.setpoint_velocity.z, -msg.setpoint_velocity.z]]'], {}), '([[msg.setpoint_velocity.y, msg.setpoint_velocity.z, -msg.\n setpoint_velocity.z]])\n', (12092, 12177), True, 'import numpy as np\n'), ((21236, 21254), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (21242, 21254), True, 'import numpy as np\n'), ((21583, 21601), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (21589, 21601), True, 'import numpy as np\n'), ((13037, 13069), 'geometry_msgs.msg.Point', 'Point', (['mean[0]', 'mean[1]', 'mean[2]'], {}), '(mean[0], mean[1], mean[2])\n', (13042, 13069), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((13235, 13267), 'geometry_msgs.msg.Point', 'Point', (['mean[0]', 'mean[1]', 'mean[2]'], {}), '(mean[0], mean[1], mean[2])\n', (13240, 13267), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((13297, 13319), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (13307, 13319), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((13352, 13361), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (13358, 13361), True, 'import numpy as np\n'), ((13585, 13619), 'geometry_msgs.msg.Vector3', 'Vector3', (['mean[3]', 'mean[4]', 'mean[5]'], {}), '(mean[3], mean[4], mean[5])\n', (13592, 13619), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((13764, 13798), 'geometry_msgs.msg.Vector3', 'Vector3', (['mean[3]', 'mean[4]', 'mean[5]'], {}), '(mean[3], mean[4], mean[5])\n', (13771, 13798), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((13798, 13814), 'geometry_msgs.msg.Vector3', 'Vector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (13805, 13814), False, 'from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped\n'), ((13850, 13859), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (13856, 13859), True, 'import numpy as np\n'), ((14607, 14697), 'rospy.logerr', 'rospy.logerr', (['"""Ignoring Modem Elevation Measurement since we have depth measurements"""'], {}), "(\n 'Ignoring Modem Elevation Measurement since we have depth measurements')\n", (14619, 14697), False, 'import rospy\n'), ((15735, 15825), 'rospy.logerr', 'rospy.logerr', (['"""Ignoring Modem Elevation Measurement since we have depth measurements"""'], {}), "(\n 'Ignoring Modem Elevation Measurement since we have depth measurements')\n", (15747, 15825), False, 'import rospy\n')]
import numpy as np from xwavecal.utils.correlate import correlate2d def test_correlate2d(): arr = np.ones((5, 5)) arr[1:4, 1:4] = 2 sig = correlate2d(arr, 2 * np.ones((3, 3)), max_lag=1) assert sig[2, 2] == np.max(sig)
[ "numpy.ones", "numpy.max" ]
[((105, 120), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (112, 120), True, 'import numpy as np\n'), ((226, 237), 'numpy.max', 'np.max', (['sig'], {}), '(sig)\n', (232, 237), True, 'import numpy as np\n'), ((174, 189), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (181, 189), True, 'import numpy as np\n')]
""" Machine learning layer class definition module """ import numpy as np import tensorflow as tf rng = np.random.RandomState(1000) class Dense(object): """ Dense layer class """ def __init__(self): self.W = 0 self.b = 0 self.name_W = 'hoge' self.name_b = 'hoge' pass def f_prop(self): pass
[ "numpy.random.RandomState" ]
[((106, 133), 'numpy.random.RandomState', 'np.random.RandomState', (['(1000)'], {}), '(1000)\n', (127, 133), True, 'import numpy as np\n')]
"""Custom utilities for interacting with the Materials Project. Mostly for getting and manipulating structures. With all of the function definitions and docstrings, these are more verbose """ import fnmatch import os from pymatgen.ext.matproj import MPRester, Structure from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar from pymatgen.io.vasp.outputs import Vasprun, Outcar from dfttk.analysis.relaxing import get_non_isotropic_strain, get_bond_distance_change from fireworks.fw_config import config_to_dict from fireworks import LaunchPad from ase.build import get_deviation_from_optimal_cell_shape from monty.serialization import loadfn, dumpfn import numpy as np import itertools import scipy import math # TODO: wrap MPRester calls in a try-except block to catch errors and retry automatically eV_per_atom_to_J_per_mol = scipy.constants.eV*scipy.constants.Avogadro J_per_mol_to_eV_per_atom = 1/(scipy.constants.eV*scipy.constants.Avogadro) def mp_structures_from_ids(mp_ids, API_KEY=None): """Returns a list of structures from MP ids Args: mp_ids ([str]): list of Materials Project ids in the form of 'mp-###' API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None. Returns: List of Structure objects """ structs = [] with MPRester(API_KEY) as mpr: for mp_id in mp_ids: structs.append(mpr.get_structure_by_material_id(mp_id)) return structs def mp_structures_from_system(system, API_KEY=None): """Supply a chemical system (e.g. Fe-Cr) and get all of the structures back Args: system (str): system name (e.g. Fe-Cr) API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None. Returns: List of Structure objects """ with MPRester(API_KEY) as mpr: structs = mpr.get_structures(system) return structs def mp_structures_and_energies_from_system(system, API_KEY=None): """Supply a chemical system (e.g. Fe-Cr) and get dicts of the structures and properties back Args: system (str): system name (e.g. Fe-Cr), but could also be mp-ids or formula API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None. Returns: List of {"material_id": id, "pretty_formula": formula, "energy_per_atom"} """ with MPRester(API_KEY) as mpr: entries = mpr.get_data(system) return entries def mp_sorted_structures_from_system(system, filter_energy=0.2, API_KEY=None): """Supply a chemical system (e.g. Fe-Cr) and get back Structures sorted by energy above hull Energies too far above the hull can be removed with the filter_energy Args: system (str): system name (e.g. Fe-Cr), but could also be mp-ids or formula filter_energy (float): Maximum energy above hull allowed in eV API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None. Returns: List of Structure objects sorted by energy above hull """ entries = mp_structures_and_energies_from_system(system, API_KEY=API_KEY) # if Structure objects cannot be created from the entries mp_ids = [entry["material_id"] for entry in entries] energies_above_hull = [entry["e_above_hull"] for entry in entries] sorted_mp_ids = [mp_id for energy, mp_id in sorted(zip(energies_above_hull, mp_ids)) if energy <= filter_energy] sorted_structs = mp_structures_from_ids(sorted_mp_ids) return sorted_structs def get_launchpad(launchpad_file=None): """ Returns a LaunchPad object. If the launchpad_file is None, then try to auto load from environment Args: launchpad_file (File-like): A file-like or file path to the LaunchPad file. Returns: LaunchPad """ if launchpad_file: if isinstance(launchpad_file, file): # a file object was found ext = launchpad_file.name.split('.')[-1] if ext == 'yaml': launchpad = LaunchPad.from_format(launchpad_file.read(), f_format='yaml') else: # assume json launchpad = LaunchPad.from_format(launchpad_file.read()) else: # assume launchpad_file is a path launchpad = LaunchPad.from_file(launchpad_file) else: launchpad = LaunchPad.auto_load() return launchpad def update_fws_spec(wf, spec_dict, fw_name_constraint=None): """ Update the fireworks matching the name constraint with the passed spec_dict. Can be used for generically updating the spec as long as update can be expressed as a dictionary. Args: wf (Workflow): The original workflow object spec_dict (dict): the keys and values to update in the spec, e.g. {'_queueadapter': {'walltime': '24:00:00'}} fw_name_constraint (str): a constraint on the FW name Returns: Workflow """ for fw in wf.fws: if fw_name_constraint is None or fw_name_constraint in fw.name: fw.spec.update(spec_dict) return wf def recursive_glob(start, pattern): """ Recursively glob for the given pattern from the start directory. Taken from ESPEI. Args: start (str): Path of the directory to walk while for file globbing pattern (str): Filename pattern to match in the glob Returns: [str]: List of matched filenames """ matches = [] for root, dirnames, filenames in os.walk(start): for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) return sorted(matches) def sort_x_by_y(x, y): """Sort a list of x in the order of sorting y""" return [xx for _, xx in sorted(zip(y, x), key=lambda pair: pair[0])] def get_norm_cell(cell, target_size, target_shape='sc'): ''' Get the normed cell Parameters ---------- cell: 2D array of floats Metric given as a (3x3 matrix) of the input structure. target_size: integer Size of desired super cell in number of unit cells. target_shape: str Desired supercell shape. Can be 'sc' for simple cubic or 'fcc' for face-centered cubic. Returns ------- norm_cell: 2D array of floats The normed cell Note: This function is extracted from ase ''' target_shape = target_shape.lower() target_size = int(target_size) if target_shape in ["sc", "simple-cubic"]: target_metric = np.eye(3) elif target_shape in ["fcc", "face-centered cubic"]: target_metric = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=float) else: raise ValueError('The target_shape should be sc or fcc (ase supported)') norm = (target_size * np.linalg.det(cell) / np.linalg.det(target_metric)) ** (-1.0 / 3.) norm_cell = norm * cell return norm_cell def find_optimal_cell_shape_in_range(cell, target_size, target_shape, size_range=None, optimize_sc=False, lower_limit=-2, upper_limit=2, sc_tolerance=1e-5, verbose=False,): """ Returns the transformation matrix that produces a supercell corresponding to *target_size* unit cells with metric *cell* that most closely approximates the shape defined by *target_shape*. Parameters: cell: 2D array of floats Metric given as a (3x3 matrix) of the input structure. target_size: integer Size of desired super cell in number of unit cells. target_shape: str Desired supercell shape. Can be 'sc' for simple cubic or 'fcc' for face-centered cubic. size_range: None/int/float/list/tuple The range of the target_size if None, 80%-120% target_size elif int/float < 1 (1-size_range, 1+size_range) * target_size = 1 == target_size > 1 (-int(size_range), int(size_range)) + target_size elif list/tuple (2 elements) [size_range(0), size_range(1)] optimize_sc: bool Optimize the super cell matrix (True) or not (False) If False, then use the closest integer transformation matrix of ideal matrix lower_limit: int Lower limit of search range. upper_limit: int Upper limit of search range. sc_tolerance: float The tolerance for the search. If the score is less than sc_tolerance, stop searching verbose: bool Set to True to obtain additional information regarding construction of transformation matrix. Return numpy.ndarray 2d array of a scaling matrix, e.g. [[3,0,0],[0,3,0],[0,0,3]] Note: this function taken from ase(ase.build.find_optimal_cell_shape), and we made some improvements (add sc_tolerance and set the ideal_matrix as the first loop, which will speed up the code for high symmetry add size_range, which will allow search in a range) """ # Set up target metric if target_shape in ["sc", "simple-cubic"]: target_metric = np.eye(3) elif target_shape in ["fcc", "face-centered cubic"]: target_metric = 0.5 * np.array( [[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=float ) if verbose: print("target metric (h_target):") print(target_metric) if size_range is None: min_size = int(target_size * 0.8) max_size = math.ceil(target_size * 1.2) elif isinstance(size_range, (float, int)): if size_range < 1: min_size = int(target_size * (1 - size_range)) max_size = math.ceil(target_size * (1 + size_range)) elif size_range == 1: min_size = target_size max_size = target_size else: min_size = target_size - int(size_range) max_size = target_size + math.ceil(size_range) elif isinstance(size_range, (list, tuple)): min_size = int(size_range[0]) max_size = math.ceil(size_range[1]) else: raise ValueError('Unsupported size range.') # Normalize cell metric to reduce computation time during looping norm = ( target_size * np.linalg.det(cell) / np.linalg.det(target_metric) ) ** (-1.0 / 3) norm_cell = norm * cell if verbose: print("normalization factor (Q): %g" % norm) # Approximate initial P matrix ideal_P = np.dot(target_metric, np.linalg.inv(norm_cell)) if verbose: print("idealized transformation matrix:") print(ideal_P) starting_P = np.array(np.around(ideal_P, 0), dtype=int) if verbose: print("closest integer transformation matrix (P_0):") print(starting_P) if optimize_sc: # Prepare run. best_score = 1e6 optimal_P = None #Set the starting_P as the first one dPlist = list(itertools.product(range(lower_limit, upper_limit + 1), repeat=9)) dP0 = (0, 0, 0, 0, 0, 0, 0, 0, 0) dPlist.pop(dPlist.index(dP0)) dPlist = [dP0] + dPlist for dP in dPlist: dP = np.array(dP, dtype=int).reshape(3, 3) P = starting_P + dP New_size = np.around(np.linalg.det(P)) if New_size < min_size or New_size > max_size: continue norm_new = get_norm_cell(cell, New_size, target_shape=target_shape) score = get_deviation_from_optimal_cell_shape(np.dot(P, norm_new), target_shape=target_shape, norm=1.0) if score < best_score: best_score = score optimal_P = P if best_score < sc_tolerance: break if optimal_P is None: optimal_P = starting_P print("Failed to find a transformation matrix, using the ideal one.") # Finalize. if verbose: print("smallest score (|Q P h_p - h_target|_2): %f" % best_score) print("optimal transformation matrix (P_opt):") print(optimal_P) print("supercell metric:") print(np.round(np.dot(optimal_P, cell), 4)) print( "determinant of optimal transformation matrix: %g" % np.linalg.det(optimal_P) ) else: optimal_P = starting_P return optimal_P def supercell_scaling_by_atom_lat_vol(structure, min_obj=60, max_obj=120, scale_object='atom', optimize_sc=False, target_shape='sc', lower_search_limit=-2, upper_search_limit=2, verbose=False, sc_tolerance=1e-5): """ Find a the supercell scaling matrix that gives the most cubic supercell for a structure, where the supercell has between the minimum and maximum nubmer of object(atom/lattice/volume). Parameters ---------- structure : pymatgen.Structure Unitcell of a structure scale_object: str control the scale object, atom or lattice or volume (only first letter matters) min_obj/max_obj : int/float minimum/maximum atoms/lattice/volume(controlled by scale_object) target_shape : str Target shape of supercell. Could choose 'sc' for simple cubic or 'fcc' for face centered cubic. Default is 'sc'. lower_search_limit : int How far to search below the 'ideal' cubic scaling. Default is -2. upper_search_limit : int How far to search below the 'ideal' cubic scaling. Default is 2. verbose : bool Whether to print extra details on the cell shapes and scores. Useful for debugging. sc_tolerance: float The tolerance for the search. If the score is less than sc_tolerance, stop searching Returns ------- numpy.ndarray 2d array of a scaling matrix, e.g. [[3,0,0],[0,3,0],[0,0,3]] Notes ----- The motiviation for this is for use in phonon calculations and defect calculations. It is important that defect atoms are far enough apart that they do not interact. Scaling unit cells that are not cubic by even dimensions might result in interacting defects. An example would be a tetragonal cell with 2x8x8 Ang lattice vectors being made into a 2x2x2 supercell. Atoms along the first dimension would not be very far apart. We are using a pure Python implementation from ASE, which is not very fast for a given supercell size. This allows for a variable supercell size, so it's going to be slow for a large range of atoms. (TODO: The performance is improved, but still can be faster) The search limits are passed directloy to ``find_optimal_cell_shape``. They define the search space for each individual supercell based on the "ideal" scaling. For example, a cell with 4 atoms and a target size of 110 atoms might have an ideal scaling of 3x3x3. The search space for a lower and upper limit of -2/+2 would be 1-5. Since the calculations are based on the cartesian product of 3x3 matrices, large search ranges are very expensive. """ #from ase.build import get_deviation_from_optimal_cell_shape # range of supercell sizes in number of unitcells scale_object = scale_object.lower() if scale_object.startswith('a'): unit_obj = len(structure) elif scale_object.startswith('l'): unit_obj = structure.volume min_obj = min_obj ** 3 max_obj = max_obj ** 3 elif scale_object.startswith('v'): unit_obj = structure.volume else: raise ValueError('Unsupported scale object, please choose atom or lattice or volume.') size_range = [int(min_obj/unit_obj), math.ceil(max_obj/unit_obj)] optimal_supercell_shapes = [] # numpy arrays of optimal shapes optimal_supercell_scores = [] # will correspond to supercell size supercell_sizes_out = [] # find the target shapes for sc_size in range(size_range[0], size_range[1]): optimal_shape = find_optimal_cell_shape_in_range(structure.lattice.matrix, sc_size, target_shape, size_range=size_range, upper_limit=upper_search_limit, lower_limit=lower_search_limit, verbose=True, sc_tolerance=sc_tolerance, optimize_sc=optimize_sc) optimal_supercell_shapes.append(optimal_shape) norm_cell = get_norm_cell(structure.lattice.matrix, sc_size, target_shape=target_shape) scores = get_deviation_from_optimal_cell_shape(np.dot(optimal_shape, norm_cell), target_shape) optimal_supercell_scores.append(scores) supercell_sizes_out.append(sc_size) if scores < sc_tolerance: break if verbose: for i in range(len(optimal_supercell_shapes)): print('{} {:0.4f} {}'.format(supercell_sizes_out[i], optimal_supercell_scores[i], optimal_supercell_shapes[i].tolist())) # find the most optimal cell shape along the range of sizes optimal_sc_shape = optimal_supercell_shapes[np.argmin(optimal_supercell_scores)] return optimal_sc_shape def recursive_flatten(l): """ Flat list(the elements of which may contain multi-layer list) into a single-layer list recursively Parameter --------- l: multi-layer list, e.g. l = [[2, 3, 4], 5, [[[6, 7, 8]]]] Returns single-layer list, e.g. [2, 3, 4, 5, 6, 7, 8] """ if l == []: return l if isinstance(l[0], list): return recursive_flatten(l[0]) + recursive_flatten(l[1:]) return l[:1] + recursive_flatten(l[1:]) def mget(d, path): """Get from a dict using dot notation Parameters ---------- d : dict Nested dictionary structure path : str Dot separated property, e.g. output.structure.lattice.volume Returns ------- Object Value of the dictionary Examples -------- >>> nested_dict = {'top_level': {'second_level': 'my_value'}} >>> mget(nested_dict, 'top_level.second_level') 'my_value' """ keys = path.split('.') current_path = "" curr_dict = d for k in keys: if not isinstance(curr_dict, dict): raise ValueError("Object at path \"{}\" (type: {}) is not a dictionary and \"{}\" in path \"{}\" cannot be looked up.".format(current_path[:-1], type(curr_dict), k, path)) current_path += k try: curr_dict = curr_dict[k] except KeyError: raise KeyError("Cannot access key \"{}\" in path \"{}\". Possible keys are [{}].".format(k, current_path, ", ".join(curr_dict.keys()))) current_path += "." return curr_dict def get_mat_info(struct): """ Get some basic information of the structure, e.g. name, configuration Parameters ---------- struct: pymatgen.structure Returns ------- name: string The name of the structure configuration: list The configuration of the structure occupancy: list The occupancy of the structure site_ratio: list the site-ratio of the structure """ name = struct.formula configuration = [] occupancy = [] site_ratio = [] for e, a in struct.composition.items(): configuration.append([str(e)]) occupancy.append([1.0]) site_ratio.append([a]) return name, configuration, occupancy, site_ratio def mark_adopted_TF(tag, db_file, adpoted, phonon=False): from atomate.vasp.database import VaspCalcDb if db_file!=">>db_file<<": vasp_db = VaspCalcDb.from_db_file(db_file, admin=True) else: t_file = loadfn(config_to_dict()["FWORKER_LOC"])["env"]["db_file"] vasp_db = VaspCalcDb.from_db_file(t_file, admin=True) if vasp_db: vasp_db.collection.update({'metadata.tag': tag}, {'$set': {'adopted': adpoted}}, upsert = True, multi = True) if phonon: vasp_db.db['phonon'].update({'metadata.tag': tag}, {'$set': {'adopted': adpoted}}, upsert = True, multi = True) def mark_adopted(tag, db_file, volumes, phonon=False): mark_adopted_TF(tag, db_file, False, phonon=phonon) # Mark all as adpoted from atomate.vasp.database import VaspCalcDb if db_file!=">>db_file<<": vasp_db = VaspCalcDb.from_db_file(db_file, admin=True) else: t_file = loadfn(config_to_dict()["FWORKER_LOC"])["env"]["db_file"] vasp_db = VaspCalcDb.from_db_file(t_file, admin=True) for volume in volumes: vasp_db.collection.update({'$and':[ {'metadata.tag': tag}, {'output.structure.lattice.volume': volume} ]}, {'$set': {'adopted': True}}, upsert = True, multi = False) # Mark only one if phonon: vasp_db.db['phonon'].update({'$and':[ {'metadata.tag': tag}, {'volume': volume} ]}, {'$set': {'adopted': True}}, upsert = True, multi = False) def consistent_check_db(db_file, tag): ''' In the subsequent running(run DFTTK again with the same tag exists in Mongo DB), if phonon method is committed, it'd better to check the lengths of "task" and "phonon" collections. ''' from atomate.vasp.database import VaspCalcDb if db_file!=">>db_file<<": vasp_db = VaspCalcDb.from_db_file(db_file, admin=True) else: t_file = loadfn(config_to_dict()["FWORKER_LOC"])["env"]["db_file"] vasp_db = VaspCalcDb.from_db_file(t_file, admin=True) num_task = vasp_db.collection.count_documents({'$and':[ {'metadata.tag': tag}, {'adopted': True} ]}) num_phonon = vasp_db.db['phonon'].count_documents({'$and':[ {'metadata.tag': tag}, {'adopted': True} ]}) if num_task == num_phonon: return(True) else: print('The records length of "task"(%s) differs to the length of "phonon"(%s) in mongodb.' %(num_task, num_phonon)) return(False) def check_relax_path(relax_path, db_file, tag, run_isif2, pass_isif4): from atomate.vasp.database import VaspCalcDb if db_file!=">>db_file<<": vasp_db = VaspCalcDb.from_db_file(db_file, admin=True) else: t_file = loadfn(config_to_dict()["FWORKER_LOC"])["env"]["db_file"] vasp_db = VaspCalcDb.from_db_file(t_file, admin=True) if relax_path != '': if os.path.exists(relax_path): return(relax_path, run_isif2, pass_isif4) if vasp_db.db["relax"].count_documents({'metadata.tag': tag}) > 0: items = vasp_db.db["relax"].find({'metadata.tag': tag}).sort([('_id', -1)]).limit(1) if os.path.exists(items[0]['path']): print('Relax result "%s" with "run_isif2 = %s" and "run_isif4 = %s" has been found, and will be used for new static calculations.' %(items[0]['path'], items[0]['run_isif2'], items[0]['pass_isif4'])) return(items[0]['path'], items[0]['run_isif2'], items[0]['pass_isif4']) else: print('Relax result "%s" has been found but NOT exists. Change tag and try again!' %relax_path) return('', run_isif2, pass_isif4) else: print('No relax result found.') return('', run_isif2, pass_isif4) def add_modify_incar_by_FWname(wf, modify_incar_params): from atomate.vasp.powerups import add_modify_incar for keyword in modify_incar_params.keys(): add_modify_incar(wf, modify_incar_params = modify_incar_params[keyword], fw_name_constraint = keyword) def add_modify_kpoints(original_wf, modify_kpoints_params, fw_name_constraint=None): """ Every FireWork that runs VASP has a ModifyIncar task just beforehand. For example, allows you to modify the INCAR based on the Worker using env_chk or using hard-coded changes. Args: original_wf (Workflow) modify_incar_params (dict) - dict of parameters for ModifyIncar. fw_name_constraint (str) - Only apply changes to FWs where fw_name contains this substring. Returns: Workflow """ from atomate.utils.utils import get_fws_and_tasks from dfttk.ftasks import ModifyKpoints for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint, task_name_constraint="RunVasp"): original_wf.fws[idx_fw].tasks.insert(idx_t, ModifyKpoints(modify_kpoints_params = modify_kpoints_params)) return original_wf def add_modify_kpoints_by_FWname(wf, modify_kpoints_params): for keyword in modify_kpoints_params.keys(): add_modify_kpoints(wf, modify_kpoints_params = modify_kpoints_params[keyword], fw_name_constraint = keyword) import re class metadata_in_POSCAR(): ''' First line in POSCAR is like: SIGMA1;[0.5,0.5]16[0.25,0.75]32...;SQS; Occupancies of 0 in [,] could not omitted meta = metadata_in_POSCAR('POSCAR') for config in configs: #configs writen like [['V', 'Ni'], ['Cr']] metadata = meta.get_metadata(config) ''' def __init__(self, filename='POSCAR'): self.poscarfile = filename ss = self.parse_poscar() if len(ss) <= 0: self.phase_name = '' else: self.phase_name = ss[0] if len(ss) < 3: return self.occupancies = [] self.site_ratios = [] digiarrs = ss[1].split('[') for digis in digiarrs: occupancy = [] if digis == '': continue digis = re.split(',|]', digis) for i in range(len(digis) - 1): occupancy.append(float(digis[i])) self.site_ratios.append(int(digis[-1])) self.occupancies.append(occupancy) self.method = ss[2] def parse_poscar(self): ''' To parse the first line in POSCAR Each tag word segmented by a semicolon(";") Tag word could be as followings: ''' if not os.path.exists(self.poscarfile): raise FileNotFoundError('No such file ({}), please set the first line of POSCAR properly.'.format(self.poscarfile)) else: file = open(self.poscarfile) firstline = file.readline() file.close firstline = firstline.strip('\n') firstline = firstline.replace(' ', '') firstline = firstline.upper() ss = re.split('[:;~]', firstline) i = len(ss) - 1 while i >= 0: if ss[i] == '': ss.pop(i) i -= 1 return(ss) def get_metadata(self, config): ''' configs writen like [['V', 'Ni'], ['Cr']] ''' m = len(config) n = len(self.occupancies) if m != n: print('Material configuration number(%s) is NOT equal to the occupancy number(%s), please check!' %(m, n)) return() for i in range(m): if len(config[i]) > len(self.occupancies[i]): print('Wrong configuration in %s, please check!' %config) return() if not self.check_POSCAR(config): return() metadata = { 'phase': self.phase_name, 'sublattice_model': { 'configuration': config, 'occupancies': self.occupancies, 'site_ratios': self.site_ratios }, 'method': self.method } return(metadata) def check_POSCAR(self, config): ''' First line must like [1,0]32 to match the elements in POSCAR, 0 could not ignored. ''' # To check the sum of occupancies for m in range(len(self.site_ratios)): sum_occupancy = 0 for n in range(len(self.occupancies[m])): sum_occupancy += self.occupancies[m][n] if abs(sum_occupancy - 1) > 1e-10: print('The sum of occupancies in %s is NOT equal to 1, please check!' %self.occupancies[m]) return(False) # To check config and occupancy temp_struct = Structure.from_file(self.poscarfile) namelist_elements = [] numlist_elements = [] for e, a in temp_struct.composition.items(): namelist_elements.append(e) numlist_elements.append(a) # [8.0, 24.0] num_element_firstline = 0 for ocs in self.occupancies: num_element_firstline += len(ocs) if len(numlist_elements) != num_element_firstline: print('The number of element kind(%s) in first line of POASCAR is NOT same one in the structure(%s), maybe "0" occupancy should be added.' %(num_element_firstline, len(numlist_elements))) return(False) index = 0 for m in range(len(self.site_ratios)): if len(config[m]) < len(self.occupancies[m]): num_occupancy = 0 for n in range(len(self.occupancies[m])): num_occupancy += self.occupancies[m][n] * self.site_ratios[m] if abs(num_occupancy - numlist_elements[index]) > 1e-10: print('The sum of sites in %s%s is NOT equal to %s(Element: %s), please check!' %(self.occupancies[m], self.site_ratios[m], numlist_elements[index], namelist_elements[index])) return(False) index += len(self.occupancies[m]) else: for n in range(len(self.occupancies[m])): if abs(numlist_elements[index] - self.occupancies[m][n] * self.site_ratios[m]) > 1e-10: print('The sites in %s * %s is NOT equal to %s(Element: %s), please check!' %(self.occupancies[m][n], self.site_ratios[m], numlist_elements[index], namelist_elements[index])) return(False) index += 1 return(True) def check_symbol(InputSet): """ Check the symbol line in POSCAR and write corresponding POTCAR file Note: there are two ways to write the magmom: site_properties: magmom or other properties? run the after self.write_input, then all the thing is written into INCAR INCAR: parse the magmom or other list-like properties Paramter -------- InputSet: VaspInputSet The input set defined by pymatgen, e.g. MPRelaxSet Return ------ symbol: list(str) natom: list(str) """ struc = InputSet.structure syms = [site.specie.symbol for site in struc] incar_dict = InputSet.incar.as_dict() if "MAGMOM" in incar_dict: magmom = incar_dict["MAGMOM"] syms = [syms[i]+str(magmom[i]) for i in range(len(syms))] symbol = [a[0] for a in itertools.groupby(syms)] symbol = ["".join(re.findall(r"[A-Z][a-z]*", symboli)) for symboli in symbol] natom = [str(len(tuple(a[1]))) for a in itertools.groupby(syms)] return symbol, natom def update_pos_by_symbols(InputSet, write_file=True,**kwargs): """ Update POSCAR by symbols considering the MAGMOM difference Parameter --------- InputSet: VaspInputSet The input set defined by pymatgen, e.g. MPRelaxSet write_file: bool Write POSCAR (True) or not (False) kwargs: dict vasp4_compatible: bool Return ------ poscar_str: str The str of the POSCAR """ symbol, natom = check_symbol(InputSet) poscar_str = InputSet.poscar.get_string(**kwargs) poscar_list = poscar_str.split("\n") if "vasp4_compatible" in kwargs and kwargs["vasp4_compatible"]: poscar_list[5] = " ".join(natom) #Replace the natom line else: poscar_list[5] = " ".join(symbol) #Replace the symbol line poscar_list[6] = " ".join(natom) #Replace the natom line poscar_str = "\n".join(poscar_list) if write_file: with open("POSCAR", "w+") as f: f.write(poscar_str) return poscar_str def update_pot_by_symbols(InputSet, write_file=True): """ Update POTCAR by symbols considering the MAGMOM difference Parameter --------- InputSet: VaspInputSet The input set defined by pymatgen, e.g. MPRelaxSet write_file: bool Write POSCAR (True) or not (False) Return ------ potcar: Potcar (in pymatgen) The Potcar type defined in pymatgen """ symbol, natom = check_symbol(InputSet) potcar_symbols = [] settings = InputSet._config_dict["POTCAR"] if isinstance(settings[symbol[-1]], dict): for el in symbol: potcar_symbols.append(settings[el]["symbol"] if el in settings else el) else: for el in symbol: potcar_symbols.append(settings.get(el, el)) potcar = Potcar(symbols=potcar_symbols, functional=InputSet.potcar_functional) if write_file: potcar.write_file(filename="POTCAR") return potcar def check_symmetry(tol_energy=0.025, tol_strain=0.05, tol_bond=0.10, site_properties=None): ''' Check symmetry for vasp run. This should be run for each vasp run Parameter --------- tol_energy: float The tolerance of energy tol_strain: float The tolerance of strain tol_bond: float The tolerance of bond Return symm_data: dict It will store the initial structure/final_structure, isif, initial_energy_per_atom, final_energy_per_atom, symmetry_checks_passed, tolerances, failures, number_of_failures ------ ''' # Get relevant files as pmg objects incar = Incar.from_file("INCAR") outcar = Outcar('OUTCAR') vasprun = Vasprun("vasprun.xml") inp_struct = Structure.from_file("POSCAR") out_struct = Structure.from_file("CONTCAR") if site_properties: if 'magmom' in site_properties: in_mag = incar.as_dict()['MAGMOM'] inp_struct.add_site_property('magmom', in_mag) out_mag = [m['tot'] for m in outcar.magnetization] out_struct.add_site_property('magmom', out_mag) site_properties.pop('magmom') for site_property in site_properties: inp_struct.add_site_property(site_property, site_properties[site_property]) out_struct.add_site_property(site_property, site_properties[site_property]) current_isif = incar['ISIF'] initial_energy = float(vasprun.ionic_steps[0]['e_wo_entrp'])/len(inp_struct) final_energy = float(vasprun.final_energy)/len(out_struct) # perform all symmetry breaking checks failures = [] energy_difference = np.abs(final_energy - initial_energy) if energy_difference > tol_energy: fail_dict = { 'reason': 'energy', 'tolerance': tol_energy, 'value': energy_difference, } failures.append(fail_dict) strain_norm = get_non_isotropic_strain(inp_struct.lattice.matrix, out_struct.lattice.matrix) if strain_norm > tol_strain: fail_dict = { 'reason': 'strain', 'tolerance': tol_strain, 'value': strain_norm, } failures.append(fail_dict) bond_distance_change = get_bond_distance_change(inp_struct, out_struct) if bond_distance_change > tol_bond: fail_dict = { 'reason': 'bond distance', 'tolerance': tol_bond, 'value': bond_distance_change, } failures.append(fail_dict) symm_data = { "initial_structure": inp_struct.as_dict(), "final_structure": out_struct.as_dict(), "isif": current_isif, "initial_energy_per_atom": initial_energy, "final_energy_per_atom": final_energy, "real_value": { "energy": energy_difference, "strain": strain_norm, "bond": bond_distance_change }, "tolerances": { "energy": tol_energy, "strain": tol_strain, "bond": tol_bond, }, "failures": failures, "number_of_failures": len(failures), "symmetry_checks_passed": len(failures) == 0, } return symm_data
[ "pymatgen.io.vasp.inputs.Incar.from_file", "fireworks.LaunchPad.from_file", "numpy.array", "fireworks.LaunchPad.auto_load", "os.walk", "os.path.exists", "re.split", "pymatgen.io.vasp.outputs.Outcar", "atomate.vasp.database.VaspCalcDb.from_db_file", "dfttk.ftasks.ModifyKpoints", "pymatgen.io.vasp...
[((5504, 5518), 'os.walk', 'os.walk', (['start'], {}), '(start)\n', (5511, 5518), False, 'import os\n'), ((24194, 24299), 'atomate.utils.utils.get_fws_and_tasks', 'get_fws_and_tasks', (['original_wf'], {'fw_name_constraint': 'fw_name_constraint', 'task_name_constraint': '"""RunVasp"""'}), "(original_wf, fw_name_constraint=fw_name_constraint,\n task_name_constraint='RunVasp')\n", (24211, 24299), False, 'from atomate.utils.utils import get_fws_and_tasks\n'), ((33034, 33103), 'pymatgen.io.vasp.inputs.Potcar', 'Potcar', ([], {'symbols': 'potcar_symbols', 'functional': 'InputSet.potcar_functional'}), '(symbols=potcar_symbols, functional=InputSet.potcar_functional)\n', (33040, 33103), False, 'from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar\n'), ((33874, 33898), 'pymatgen.io.vasp.inputs.Incar.from_file', 'Incar.from_file', (['"""INCAR"""'], {}), "('INCAR')\n", (33889, 33898), False, 'from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar\n'), ((33912, 33928), 'pymatgen.io.vasp.outputs.Outcar', 'Outcar', (['"""OUTCAR"""'], {}), "('OUTCAR')\n", (33918, 33928), False, 'from pymatgen.io.vasp.outputs import Vasprun, Outcar\n'), ((33943, 33965), 'pymatgen.io.vasp.outputs.Vasprun', 'Vasprun', (['"""vasprun.xml"""'], {}), "('vasprun.xml')\n", (33950, 33965), False, 'from pymatgen.io.vasp.outputs import Vasprun, Outcar\n'), ((33983, 34012), 'pymatgen.ext.matproj.Structure.from_file', 'Structure.from_file', (['"""POSCAR"""'], {}), "('POSCAR')\n", (34002, 34012), False, 'from pymatgen.ext.matproj import MPRester, Structure\n'), ((34030, 34060), 'pymatgen.ext.matproj.Structure.from_file', 'Structure.from_file', (['"""CONTCAR"""'], {}), "('CONTCAR')\n", (34049, 34060), False, 'from pymatgen.ext.matproj import MPRester, Structure\n'), ((34883, 34920), 'numpy.abs', 'np.abs', (['(final_energy - initial_energy)'], {}), '(final_energy - initial_energy)\n', (34889, 34920), True, 'import numpy as np\n'), ((35154, 35232), 'dfttk.analysis.relaxing.get_non_isotropic_strain', 'get_non_isotropic_strain', (['inp_struct.lattice.matrix', 'out_struct.lattice.matrix'], {}), '(inp_struct.lattice.matrix, out_struct.lattice.matrix)\n', (35178, 35232), False, 'from dfttk.analysis.relaxing import get_non_isotropic_strain, get_bond_distance_change\n'), ((35463, 35511), 'dfttk.analysis.relaxing.get_bond_distance_change', 'get_bond_distance_change', (['inp_struct', 'out_struct'], {}), '(inp_struct, out_struct)\n', (35487, 35511), False, 'from dfttk.analysis.relaxing import get_non_isotropic_strain, get_bond_distance_change\n'), ((1322, 1339), 'pymatgen.ext.matproj.MPRester', 'MPRester', (['API_KEY'], {}), '(API_KEY)\n', (1330, 1339), False, 'from pymatgen.ext.matproj import MPRester, Structure\n'), ((1818, 1835), 'pymatgen.ext.matproj.MPRester', 'MPRester', (['API_KEY'], {}), '(API_KEY)\n', (1826, 1835), False, 'from pymatgen.ext.matproj import MPRester, Structure\n'), ((2381, 2398), 'pymatgen.ext.matproj.MPRester', 'MPRester', (['API_KEY'], {}), '(API_KEY)\n', (2389, 2398), False, 'from pymatgen.ext.matproj import MPRester, Structure\n'), ((4376, 4397), 'fireworks.LaunchPad.auto_load', 'LaunchPad.auto_load', ([], {}), '()\n', (4395, 4397), False, 'from fireworks import LaunchPad\n'), ((5544, 5578), 'fnmatch.filter', 'fnmatch.filter', (['filenames', 'pattern'], {}), '(filenames, pattern)\n', (5558, 5578), False, 'import fnmatch\n'), ((6557, 6566), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6563, 6566), True, 'import numpy as np\n'), ((9231, 9240), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (9237, 9240), True, 'import numpy as np\n'), ((9584, 9612), 'math.ceil', 'math.ceil', (['(target_size * 1.2)'], {}), '(target_size * 1.2)\n', (9593, 9612), False, 'import math\n'), ((10575, 10599), 'numpy.linalg.inv', 'np.linalg.inv', (['norm_cell'], {}), '(norm_cell)\n', (10588, 10599), True, 'import numpy as np\n'), ((10716, 10737), 'numpy.around', 'np.around', (['ideal_P', '(0)'], {}), '(ideal_P, 0)\n', (10725, 10737), True, 'import numpy as np\n'), ((15768, 15797), 'math.ceil', 'math.ceil', (['(max_obj / unit_obj)'], {}), '(max_obj / unit_obj)\n', (15777, 15797), False, 'import math\n'), ((17051, 17086), 'numpy.argmin', 'np.argmin', (['optimal_supercell_scores'], {}), '(optimal_supercell_scores)\n', (17060, 17086), True, 'import numpy as np\n'), ((19605, 19649), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['db_file'], {'admin': '(True)'}), '(db_file, admin=True)\n', (19628, 19649), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((19760, 19803), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['t_file'], {'admin': '(True)'}), '(t_file, admin=True)\n', (19783, 19803), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((20326, 20370), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['db_file'], {'admin': '(True)'}), '(db_file, admin=True)\n', (20349, 20370), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((20481, 20524), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['t_file'], {'admin': '(True)'}), '(t_file, admin=True)\n', (20504, 20524), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((21351, 21395), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['db_file'], {'admin': '(True)'}), '(db_file, admin=True)\n', (21374, 21395), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((21506, 21549), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['t_file'], {'admin': '(True)'}), '(t_file, admin=True)\n', (21529, 21549), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((22158, 22202), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['db_file'], {'admin': '(True)'}), '(db_file, admin=True)\n', (22181, 22202), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((22313, 22356), 'atomate.vasp.database.VaspCalcDb.from_db_file', 'VaspCalcDb.from_db_file', (['t_file'], {'admin': '(True)'}), '(t_file, admin=True)\n', (22336, 22356), False, 'from atomate.vasp.database import VaspCalcDb\n'), ((22393, 22419), 'os.path.exists', 'os.path.exists', (['relax_path'], {}), '(relax_path)\n', (22407, 22419), False, 'import os\n'), ((22655, 22687), 'os.path.exists', 'os.path.exists', (["items[0]['path']"], {}), "(items[0]['path'])\n", (22669, 22687), False, 'import os\n'), ((23436, 23538), 'atomate.vasp.powerups.add_modify_incar', 'add_modify_incar', (['wf'], {'modify_incar_params': 'modify_incar_params[keyword]', 'fw_name_constraint': 'keyword'}), '(wf, modify_incar_params=modify_incar_params[keyword],\n fw_name_constraint=keyword)\n', (23452, 23538), False, 'from atomate.vasp.powerups import add_modify_incar\n'), ((28175, 28211), 'pymatgen.ext.matproj.Structure.from_file', 'Structure.from_file', (['self.poscarfile'], {}), '(self.poscarfile)\n', (28194, 28211), False, 'from pymatgen.ext.matproj import MPRester, Structure\n'), ((4310, 4345), 'fireworks.LaunchPad.from_file', 'LaunchPad.from_file', (['launchpad_file'], {}), '(launchpad_file)\n', (4329, 4345), False, 'from fireworks import LaunchPad\n'), ((6850, 6878), 'numpy.linalg.det', 'np.linalg.det', (['target_metric'], {}), '(target_metric)\n', (6863, 6878), True, 'import numpy as np\n'), ((10357, 10385), 'numpy.linalg.det', 'np.linalg.det', (['target_metric'], {}), '(target_metric)\n', (10370, 10385), True, 'import numpy as np\n'), ((16541, 16573), 'numpy.dot', 'np.dot', (['optimal_shape', 'norm_cell'], {}), '(optimal_shape, norm_cell)\n', (16547, 16573), True, 'import numpy as np\n'), ((24392, 24450), 'dfttk.ftasks.ModifyKpoints', 'ModifyKpoints', ([], {'modify_kpoints_params': 'modify_kpoints_params'}), '(modify_kpoints_params=modify_kpoints_params)\n', (24405, 24450), False, 'from dfttk.ftasks import ModifyKpoints\n'), ((25551, 25573), 're.split', 're.split', (['""",|]"""', 'digis'], {}), "(',|]', digis)\n", (25559, 25573), False, 'import re\n'), ((26016, 26047), 'os.path.exists', 'os.path.exists', (['self.poscarfile'], {}), '(self.poscarfile)\n', (26030, 26047), False, 'import os\n'), ((26452, 26480), 're.split', 're.split', (['"""[:;~]"""', 'firstline'], {}), "('[:;~]', firstline)\n", (26460, 26480), False, 'import re\n'), ((30978, 31001), 'itertools.groupby', 'itertools.groupby', (['syms'], {}), '(syms)\n', (30995, 31001), False, 'import itertools\n'), ((31025, 31059), 're.findall', 're.findall', (['"""[A-Z][a-z]*"""', 'symboli'], {}), "('[A-Z][a-z]*', symboli)\n", (31035, 31059), False, 'import re\n'), ((31129, 31152), 'itertools.groupby', 'itertools.groupby', (['syms'], {}), '(syms)\n', (31146, 31152), False, 'import itertools\n'), ((5607, 5635), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (5619, 5635), False, 'import os\n'), ((6654, 6710), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {'dtype': 'float'}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=float)\n', (6662, 6710), True, 'import numpy as np\n'), ((6828, 6847), 'numpy.linalg.det', 'np.linalg.det', (['cell'], {}), '(cell)\n', (6841, 6847), True, 'import numpy as np\n'), ((9328, 9384), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {'dtype': 'float'}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=float)\n', (9336, 9384), True, 'import numpy as np\n'), ((9769, 9810), 'math.ceil', 'math.ceil', (['(target_size * (1 + size_range))'], {}), '(target_size * (1 + size_range))\n', (9778, 9810), False, 'import math\n'), ((10142, 10166), 'math.ceil', 'math.ceil', (['size_range[1]'], {}), '(size_range[1])\n', (10151, 10166), False, 'import math\n'), ((10335, 10354), 'numpy.linalg.det', 'np.linalg.det', (['cell'], {}), '(cell)\n', (10348, 10354), True, 'import numpy as np\n'), ((11339, 11355), 'numpy.linalg.det', 'np.linalg.det', (['P'], {}), '(P)\n', (11352, 11355), True, 'import numpy as np\n'), ((11579, 11598), 'numpy.dot', 'np.dot', (['P', 'norm_new'], {}), '(P, norm_new)\n', (11585, 11598), True, 'import numpy as np\n'), ((11236, 11259), 'numpy.array', 'np.array', (['dP'], {'dtype': 'int'}), '(dP, dtype=int)\n', (11244, 11259), True, 'import numpy as np\n'), ((12224, 12247), 'numpy.dot', 'np.dot', (['optimal_P', 'cell'], {}), '(optimal_P, cell)\n', (12230, 12247), True, 'import numpy as np\n'), ((12357, 12381), 'numpy.linalg.det', 'np.linalg.det', (['optimal_P'], {}), '(optimal_P)\n', (12370, 12381), True, 'import numpy as np\n'), ((10015, 10036), 'math.ceil', 'math.ceil', (['size_range'], {}), '(size_range)\n', (10024, 10036), False, 'import math\n'), ((19691, 19707), 'fireworks.fw_config.config_to_dict', 'config_to_dict', ([], {}), '()\n', (19705, 19707), False, 'from fireworks.fw_config import config_to_dict\n'), ((20412, 20428), 'fireworks.fw_config.config_to_dict', 'config_to_dict', ([], {}), '()\n', (20426, 20428), False, 'from fireworks.fw_config import config_to_dict\n'), ((21437, 21453), 'fireworks.fw_config.config_to_dict', 'config_to_dict', ([], {}), '()\n', (21451, 21453), False, 'from fireworks.fw_config import config_to_dict\n'), ((22244, 22260), 'fireworks.fw_config.config_to_dict', 'config_to_dict', ([], {}), '()\n', (22258, 22260), False, 'from fireworks.fw_config import config_to_dict\n')]
import imageProcess import numpy from skimage.measure import label, regionprops def areaFilter(bImg): labelImg = label(bImg) numLabel = labelImg.max() props = regionprops(labelImg) bImg[:] = False areaList = [] for i in range(numLabel): areaList.append(props[i].area) areaList = sorted(areaList,reverse=True) areaList = areaList[:2] for i in range(numLabel): area = props[i].area if (area==areaList[0] or area==areaList[1]): bImgN = labelImg==i+1 bImg = numpy.logical_or(bImg,bImgN) return bImg
[ "numpy.logical_or", "skimage.measure.label", "skimage.measure.regionprops" ]
[((118, 129), 'skimage.measure.label', 'label', (['bImg'], {}), '(bImg)\n', (123, 129), False, 'from skimage.measure import label, regionprops\n'), ((172, 193), 'skimage.measure.regionprops', 'regionprops', (['labelImg'], {}), '(labelImg)\n', (183, 193), False, 'from skimage.measure import label, regionprops\n'), ((549, 578), 'numpy.logical_or', 'numpy.logical_or', (['bImg', 'bImgN'], {}), '(bImg, bImgN)\n', (565, 578), False, 'import numpy\n')]
#! /usr/bin/env python # coding: utf-8 """ This is Ros node for pwm control rc car """ import RPi.GPIO as GPIO import pigpio import time import numpy as np import math import tf from enum import Enum import rospy from geometry_msgs.msg import Twist, TwistStamped, PoseStamped from ackermann_msgs.msg import AckermannDriveStamped from rc_car_msgs.msg import CarParams, CarPwmContol from std_srvs.srv import SetBool from PID import PID from std_msgs.msg import Float64 from dynamic_reconfigure.server import Server from rc_bringup.cfg import RcVelControllerConfig class RemoteMode(Enum): odometry_vel = 0 pwm = 1 drive = 2 # init params servo_pin = 4 # inut pin of servo motor_pin = 17 # inut pin of motor motor_run = True # enable/disable motor use_odometry_vel = False # use odometry velocity disable_stop = True # clip pwm output true = midle_motor -> 2000; false = 1000 -> 2000 middle_servo = 1550 middle_motor = 1550 # for rc550 offset = 47.0 # offset of servo revers_servo = False # revers of servo direction revers_val = 1.0 max_vel = 2.5 # max speed of the car min_vel = -2.5 # min speed of the car max_angle = 25.0 # in degrees wheelbase = 0.28 # in meters # PID params kP = 1.0 kI = 0.0 kD = 0.2 hz = 50 prev_vel = 0.0 current_course = float() # init PID motor_pid = PID() motor_pid.setWindup(500) #topics cmd_vel_topic = "/cmd_vel" vel_topic = "/mavros/local_position/velocity" goal_topic = "/goal" pose_topic = "/mavros/local_position/pose" # init topics cmd_vel_topic = "/cmd_vel" # remote via velocity pwm_topic = "/pwm" # direct remote PWM drive_topic ="/ackermann_cmd" # remote like-car pwm_output_topic = "/pwm_output" # remote like-car vel_topic = "/mavros/local_position/velocity" param_topic = "/params" setmode_srv_topic = "/car/set_mode" encoder_topic = "/encoder_vel" # PWM init pi = pigpio.pi() pi.set_servo_pulsewidth(servo_pin, middle_servo) # middle servo angle pi.set_servo_pulsewidth(motor_pin, middle_motor) # zero speed for motor (different depending on ESC) # init value current_mode = RemoteMode.vel current_velocity = TwistStamped() # vector of velocity norm_velocity = float() # in m/s odometry_vel = float() # velosity from odometry goal_vel_msg = Twist() pwm_msg = CarPwmContol() drive_msg = AckermannDriveStamped() pwm_output_msg = CarPwmContol() time_clb = 0.0 def convert_trans_rot_vel_to_steering_angle(v, omega, wheelbase): global max_angle if omega == 0.0: return 0 if v == 0.0: return math.degrees(omega) radius = v / omega steering_angle = math.degrees(math.atan(wheelbase / radius)) return np.clip(steering_angle, -max_angle, max_angle) ## Callbeck from ROS def cmd_vel_clb(data): """ Get velocity value from topic :param data: velocity value :type data: Twist """ global goal_vel_msg, time_clb, max_vel, min_vel, current_mode goal_vel_msg = data goal_vel_msg.linear.x = np.clip(goal_vel_msg.linear.x, min_vel, max_vel) current_mode = RemoteMode.vel time_clb = 0.0 # print("goal_vel_msg.linear.x", goal_vel_msg.linear.x) def pwm_clb(data): """car_break_topic Get PWM value from topic :param data: velocity value :type data: RcCarControl """ global pwm_msg, time_clb, current_mode pwm_msg = data current_mode = RemoteMode.pwm time_clb = 0.0 def drive_vel_clb(data): """ Get drive value from topic :param data: velocity and steering value :type data: AckermannDriveStamped """ global drive_msg,time_clb, max_vel, current_mode drive_msg = data drive_msg.drive.speed = np.clip(drive_msg.drive.speed, -max_vel, max_vel) current_mode = RemoteMode.drive time_clb = 0.0 def current_pose_clb(data): """ Get current pose from topic :param data: :return: """ global current_course rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] # convert euler from quaternion (roll, pitch, yaw) = tf.transformations.euler_from_quaternion(rot) current_course = yaw def velocity_clb(data): """ Get current velocity from FCU :param data: velocity from NED """ global time_clb, current_mode, current_velocity, norm_velocity, current_course rot=current_course current_velocity = data rotate=np.array([[math.cos(rot),-math.sin(rot)], [math.sin(rot),math.cos(rot)]]) velocity=np.array([[data.twist.linear.x], [-data.twist.linear.y]]) vector=np.dot(rotate,velocity) norm_velocity =vector[0] # print("vector:",vector[0],vector[1]) current_mode = RemoteMode.vel time_clb = 0.0 def encoder_clb(data): global odometry_vel odometry_vel=data def SetModeSrv_clb(req): """ Servoce for set mode of regulator :param req: :return: """ global motor_run motor_run = req.data def cfg_callback(config, level): """ Get params from dynamic reconfigure :param config: :param level: :return: """ global max_vel, min_vel, max_angle, kP, kI, kD, offset, use_odometry_vel, motor_run print("config") max_vel = float(config["max_vel"]) min_vel = float(config["min_vel"]) max_angle = math.radians(float(config["max_angle"])) offset = float(config["servo_offset"]) use_odometry_vel = bool(config["use_imu_vel"]) motor_run = bool(config["motor_run"]) kP = float(config["kP"]) kI = float(config["kI"]) kD = float(config["kD"]) return config ## Other function def setPIDk(): """ update PID coefficients :return: """ global motor_pid, kP, kI, kD motor_pid.setKp(kP) motor_pid.setKi(kI) motor_pid.setKd(kD) def set_rc_remote(mode): """ Recalculation velocity data to pulse and set to PWM servo and motor :return: """ global goal_vel_msg, pwm_msg, \ intercept_remote, revers_val, \ max_angle, wheelbase, drive_msg, pwm_output_msg, prev_vel, use_odometry_vel, motor_pid,odometry_vel, disable_stop motor_val = 0.0 if mode == RemoteMode.pwm: pwm_output_msg = pwm_msg if(pwm_msg.ServoPWM > 0): pi.set_servo_pulsewidth(servo_pin, pwm_msg.ServoPWM) if(pwm_msg.MotorPWM > 0): pi.set_servo_pulsewidth(motor_pin, pwm_msg.MotorPWM) elif mode == RemoteMode.vel: # send servo # v = vel_msg.linear.x-vel_msg.linear.y # steering = convert_trans_rot_vel_to_steering_angle(v,vel_msg.angular.z, wheelbase) servo_val = valmap(goal_vel_msg.angular.z, max_angle * revers_val, max_angle * -revers_val, 1000 + offset, 2000 + offset) pwm_output_msg.ServoPWM = servo_val try: pi.set_servo_pulsewidth(servo_pin, servo_val) except: print("error:", servo_val) # send motor # # send motor data # ## check input velocity data # if (0.0 <= vel_msg.linear.x < 0.1): # if target velocity a small, breake speed # pi.set_servo_pulsewidth(motor_pin, middle_motor) # pwm_output_msg.MotorPWM = middle_motor # pass ## stop motor correction # if prev_vel > 0 and vel_msg.linear.x <= 0.0: #for forward moving brake # print("stop motor") # motor_val = 1300 # pi.set_servo_pulsewidth(motor_pin, motor_val) # pwm_output_msg.MotorPWM = motor_val # print("val 1:", motor_val) # time.sleep(0.5) #first signal need to repay previous value on engine # pi.set_servo_pulsewidth(motor_pin, middle_motor) # pwm_output_msg.MotorPWM = middle_motor # print("val 2:", motor_val) # time.sleep(0.5) #second to stop the car if use_odometry_vel: # PID controlled # motor_val = valmap(vel_msg.linear.x, -2.4, 2.4, 1200, 1600, False) setPIDk() #set PID coefficients error_vel = odometry_vel - goal_vel_msg.linear.x motor_pid.update(error_vel) motor_val = motor_pid.output + middle_motor else: # use relative velocity if goal_vel_msg.linear.x >= 0.0: motor_val = valmap(goal_vel_msg.linear.x, 0.0, 1.0, middle_motor, 1700, False) if goal_vel_msg.linear.x < 0.0: motor_val = valmap(goal_vel_msg.linear.x, -1.0, 0.0, 1400, middle_motor, False) #1400 # Send to pwm motor if disable_stop: motor_val = np.clip(motor_val, middle_motor, 2000) else: motor_val = np.clip(motor_val, 1000, 2000) pi.set_servo_pulsewidth(motor_pin, motor_val) pwm_output_msg.MotorPWM = motor_val prev_vel = goal_vel_msg.linear.x #read prev velocity value elif mode == RemoteMode.drive: # send servo v = drive_msg.drive.speed steering = convert_trans_rot_vel_to_steering_angle(v, drive_msg.drive.steering_angle, wheelbase) servo_val = valmap(steering, max_angle * revers_val, max_angle * -revers_val, 1000 + offset, 2000 + offset) pi.set_servo_pulsewidth(servo_pin, servo_val) pwm_output_msg.ServoPWM = servo_val # send motor data # ## check input velocity data # if (-0.1 <= drive_msg.drive.speed < 0.1): # if target velocity a small, breake speed # pi.set_servo_pulsewidth(motor_pin, middle_motor) # pwm_output_msg.MotorPWM = middle_motor # print("break motor") # pass ## stop motor correction if prev_vel > 0 and drive_msg.drive.speed <= 0.0: #for forward moving brake motor_val = 1300 pi.set_servo_pulsewidth(motor_pin, motor_val) pwm_output_msg.MotorPWM = motor_val time.sleep(0.5) #first signal need to repay previous value on engine pi.set_servo_pulsewidth(motor_pin, middle_motor) pwm_output_msg.MotorPWM = middle_motor time.sleep(0.5) #second to stop the car if use_odometry_vel: # PID controlled # motor_val = valmap(vel_msg.linear.x, -2.4, 2.4, 1200, 1600, False) setPIDk() #set PID coefficients error_vel = norm_velocity + drive_msg.drive.speed motor_pid.update(error_vel) motor_val = motor_pid.output + middle_motor else: # use relative velocity if drive_msg.drive.speed >= 0.0: motor_val = valmap(drive_msg.drive.speed, 0.0, 6.0, middle_motor, 1700, False) if drive_msg.drive.speed < 0.0: motor_val = valmap(drive_msg.drive.speed, -2.0, 0.0,1300, middle_motor, False) # Send to pwm motor motor_val = np.clip(motor_val, 1000, 2000) pi.set_servo_pulsewidth(motor_pin, motor_val) pwm_output_msg.MotorPWM = motor_val prev_vel = drive_msg.drive.speed #read prev velocity value else: print("error") pwm_pub.publish(pwm_output_msg) def valmap(value, istart, istop, ostart, ostop, clip_flag = True): """ Re-maps a number from one range to another. That is, a value of istart would get mapped to ostart, a value of istop to ostop, values in-between to values in-between, etc. :param value: value :param istart: the lower bound of the value’s current range :param istop: the upper bound of the value’s current range :param ostart: the lower bound of the value’s target range :param ostop: the upper bound of the value’s target range :return: The mapped value. """ try: val = ostart + (ostop - ostart) * ((value - istart) / (istop - istart)) except: print("map error", value, istart, istop, ostart, ostop) val = 0.0 if clip_flag: return np.clip(val, ostart, ostop) else: return val def get_car_params(): """ Get car params data :return: CarParams """ data = CarParams() data.motor_run = motor_run data.maxSteeringAngle = max_angle data.maxVel = max_vel data.wheelbase = wheelbase return data if __name__ == "__main__": try: rospy.init_node("rc_control") rate = rospy.Rate(hz) # init dynamic reconfigure server cfg_srv = Server(RcVelControllerConfig, cfg_callback) # get args from ros params ## topics cmd_vel_topic = rospy.get_param('~cmd_vel', cmd_vel_topic) pwm_topic = rospy.get_param('~pwm_topic', pwm_topic) pwm_output_topic = rospy.get_param('~pwm_output_topic', pwm_output_topic) drive_topic = rospy.get_param('~drive_topic', drive_topic) param_topic = rospy.get_param('~param_topic', param_topic) vel_topic = rospy.get_param('~vel_topic', vel_topic) ## GPIO servo_pin = rospy.get_param('~servo_pin', servo_pin) motor_pin = rospy.get_param('~motor_pin', motor_pin) middle_servo = rospy.get_param('~middle_servo', middle_servo) middle_motor = rospy.get_param('~middle_motor', middle_motor) revers_servo = rospy.get_param('~revers_servo', revers_servo) offset = rospy.get_param('~servo_offset', offset) ## rover params wheelbase = rospy.get_param('~wheelbase', wheelbase) if rospy.has_param('~max_vel'): max_vel = rospy.get_param('~max_vel', max_vel) cfg_srv.update_configuration({"max_vel": max_vel}) if rospy.has_param('~min_vel'): min_vel = rospy.get_param('~min_vel', min_vel) cfg_srv.update_configuration({"min_vel": min_vel}) if rospy.has_param('~max_angle'): max_angle = rospy.get_param('~max_angle', max_angle) cfg_srv.update_configuration({"max_angle": max_angle}) ## PID params if rospy.has_param('~use_imu_vel'): use_odometry_vel = rospy.get_param('~use_imu_vel', use_odometry_vel) cfg_srv.update_configuration({"use_imu_vel": use_odometry_vel}) if rospy.has_param('~kP'): kP = rospy.get_param('~kP', kP) cfg_srv.update_configuration({"kP": kP}) if rospy.has_param('~kI'): kI = rospy.get_param('~kI', kI) cfg_srv.update_configuration({"kI": kI}) if rospy.has_param('~kD'): kD = rospy.get_param('~kD', kD) cfg_srv.update_configuration({"kD": kD}) if revers_servo: revers_val = -1.0 else: revers_val = 1.0 # Subscribe and Publisher to topics rospy.Subscriber(cmd_vel_topic, Twist, cmd_vel_clb) rospy.Subscriber(pwm_topic, CarPwmContol, pwm_clb) rospy.Subscriber(drive_topic, AckermannDriveStamped, drive_vel_clb) rospy.Subscriber(vel_topic, TwistStamped, velocity_clb) rospy.Subscriber(pose_topic, PoseStamped, current_pose_clb) rospy.Subscriber(encoder_topic, Float64, encoder_clb) pwm_pub = rospy.Publisher(pwm_output_topic, CarPwmContol, queue_size=10) param_pub = rospy.Publisher(param_topic, CarParams, queue_size=10) s = rospy.Service(setmode_srv_topic, SetBool, SetModeSrv_clb) print ("RC_control params: \n" "cmd_vel_topic: %s \n" "pwm_toppic: %s \n" "drive_topic: %s \n" "pwm_output_topic: %s \n" "max_vel: %f \n" "min_vel: %f \n" "max_steering_angle: %f \n" "wheelbase: %f \n" "servo_pin: %d \n" "middle_servo: %d \n" "servo_offset: %d \n" "motor_pin: %d \n" "middle_motor: %d \n" "revers servo: %f \n" "===================\n" % (cmd_vel_topic, pwm_topic, drive_topic, pwm_output_topic, max_vel, min_vel, max_angle, wheelbase, servo_pin, middle_servo, offset, motor_pin, middle_motor, revers_servo)) while not rospy.is_shutdown(): try: time_clb += 1.0 / hz if time_clb < 1.0 and motor_run: set_rc_remote(current_mode) # set pwm mode else: # not cld remote data break pwm pi.set_servo_pulsewidth(servo_pin, 0) pi.set_servo_pulsewidth(motor_pin, 0) except: pi.set_servo_pulsewidth(servo_pin, 0) pi.set_servo_pulsewidth(motor_pin, 0) print("rc control: error") param_pub.publish(get_car_params()) #publish car params from topic rate.sleep() except KeyboardInterrupt: # if put ctr+c print("ctrl+C exit") pi.set_servo_pulsewidth(servo_pin, 0) pi.set_servo_pulsewidth(motor_pin, 0) pi.stop() GPIO.cleanup() finally: # if exit print("exit") pi.set_servo_pulsewidth(servo_pin, 0) pi.set_servo_pulsewidth(motor_pin, 0) pi.stop() GPIO.cleanup()
[ "numpy.clip", "rospy.init_node", "time.sleep", "math.cos", "numpy.array", "rospy.Rate", "rc_car_msgs.msg.CarPwmContol", "math.atan", "RPi.GPIO.cleanup", "geometry_msgs.msg.TwistStamped", "rospy.Service", "dynamic_reconfigure.server.Server", "numpy.dot", "pigpio.pi", "rospy.Subscriber", ...
[((1335, 1340), 'PID.PID', 'PID', ([], {}), '()\n', (1338, 1340), False, 'from PID import PID\n'), ((1893, 1904), 'pigpio.pi', 'pigpio.pi', ([], {}), '()\n', (1902, 1904), False, 'import pigpio\n'), ((2139, 2153), 'geometry_msgs.msg.TwistStamped', 'TwistStamped', ([], {}), '()\n', (2151, 2153), False, 'from geometry_msgs.msg import Twist, TwistStamped, PoseStamped\n'), ((2299, 2306), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (2304, 2306), False, 'from geometry_msgs.msg import Twist, TwistStamped, PoseStamped\n'), ((2317, 2331), 'rc_car_msgs.msg.CarPwmContol', 'CarPwmContol', ([], {}), '()\n', (2329, 2331), False, 'from rc_car_msgs.msg import CarParams, CarPwmContol\n'), ((2344, 2367), 'ackermann_msgs.msg.AckermannDriveStamped', 'AckermannDriveStamped', ([], {}), '()\n', (2365, 2367), False, 'from ackermann_msgs.msg import AckermannDriveStamped\n'), ((2385, 2399), 'rc_car_msgs.msg.CarPwmContol', 'CarPwmContol', ([], {}), '()\n', (2397, 2399), False, 'from rc_car_msgs.msg import CarParams, CarPwmContol\n'), ((2674, 2720), 'numpy.clip', 'np.clip', (['steering_angle', '(-max_angle)', 'max_angle'], {}), '(steering_angle, -max_angle, max_angle)\n', (2681, 2720), True, 'import numpy as np\n'), ((2991, 3039), 'numpy.clip', 'np.clip', (['goal_vel_msg.linear.x', 'min_vel', 'max_vel'], {}), '(goal_vel_msg.linear.x, min_vel, max_vel)\n', (2998, 3039), True, 'import numpy as np\n'), ((3680, 3729), 'numpy.clip', 'np.clip', (['drive_msg.drive.speed', '(-max_vel)', 'max_vel'], {}), '(drive_msg.drive.speed, -max_vel, max_vel)\n', (3687, 3729), True, 'import numpy as np\n'), ((4090, 4135), 'tf.transformations.euler_from_quaternion', 'tf.transformations.euler_from_quaternion', (['rot'], {}), '(rot)\n', (4130, 4135), False, 'import tf\n'), ((4525, 4582), 'numpy.array', 'np.array', (['[[data.twist.linear.x], [-data.twist.linear.y]]'], {}), '([[data.twist.linear.x], [-data.twist.linear.y]])\n', (4533, 4582), True, 'import numpy as np\n'), ((4617, 4641), 'numpy.dot', 'np.dot', (['rotate', 'velocity'], {}), '(rotate, velocity)\n', (4623, 4641), True, 'import numpy as np\n'), ((12095, 12106), 'rc_car_msgs.msg.CarParams', 'CarParams', ([], {}), '()\n', (12104, 12106), False, 'from rc_car_msgs.msg import CarParams, CarPwmContol\n'), ((2560, 2579), 'math.degrees', 'math.degrees', (['omega'], {}), '(omega)\n', (2572, 2579), False, 'import math\n'), ((2634, 2663), 'math.atan', 'math.atan', (['(wheelbase / radius)'], {}), '(wheelbase / radius)\n', (2643, 2663), False, 'import math\n'), ((11941, 11968), 'numpy.clip', 'np.clip', (['val', 'ostart', 'ostop'], {}), '(val, ostart, ostop)\n', (11948, 11968), True, 'import numpy as np\n'), ((12295, 12324), 'rospy.init_node', 'rospy.init_node', (['"""rc_control"""'], {}), "('rc_control')\n", (12310, 12324), False, 'import rospy\n'), ((12340, 12354), 'rospy.Rate', 'rospy.Rate', (['hz'], {}), '(hz)\n', (12350, 12354), False, 'import rospy\n'), ((12416, 12459), 'dynamic_reconfigure.server.Server', 'Server', (['RcVelControllerConfig', 'cfg_callback'], {}), '(RcVelControllerConfig, cfg_callback)\n', (12422, 12459), False, 'from dynamic_reconfigure.server import Server\n'), ((12539, 12581), 'rospy.get_param', 'rospy.get_param', (['"""~cmd_vel"""', 'cmd_vel_topic'], {}), "('~cmd_vel', cmd_vel_topic)\n", (12554, 12581), False, 'import rospy\n'), ((12602, 12642), 'rospy.get_param', 'rospy.get_param', (['"""~pwm_topic"""', 'pwm_topic'], {}), "('~pwm_topic', pwm_topic)\n", (12617, 12642), False, 'import rospy\n'), ((12670, 12724), 'rospy.get_param', 'rospy.get_param', (['"""~pwm_output_topic"""', 'pwm_output_topic'], {}), "('~pwm_output_topic', pwm_output_topic)\n", (12685, 12724), False, 'import rospy\n'), ((12747, 12791), 'rospy.get_param', 'rospy.get_param', (['"""~drive_topic"""', 'drive_topic'], {}), "('~drive_topic', drive_topic)\n", (12762, 12791), False, 'import rospy\n'), ((12814, 12858), 'rospy.get_param', 'rospy.get_param', (['"""~param_topic"""', 'param_topic'], {}), "('~param_topic', param_topic)\n", (12829, 12858), False, 'import rospy\n'), ((12879, 12919), 'rospy.get_param', 'rospy.get_param', (['"""~vel_topic"""', 'vel_topic'], {}), "('~vel_topic', vel_topic)\n", (12894, 12919), False, 'import rospy\n'), ((12957, 12997), 'rospy.get_param', 'rospy.get_param', (['"""~servo_pin"""', 'servo_pin'], {}), "('~servo_pin', servo_pin)\n", (12972, 12997), False, 'import rospy\n'), ((13018, 13058), 'rospy.get_param', 'rospy.get_param', (['"""~motor_pin"""', 'motor_pin'], {}), "('~motor_pin', motor_pin)\n", (13033, 13058), False, 'import rospy\n'), ((13082, 13128), 'rospy.get_param', 'rospy.get_param', (['"""~middle_servo"""', 'middle_servo'], {}), "('~middle_servo', middle_servo)\n", (13097, 13128), False, 'import rospy\n'), ((13152, 13198), 'rospy.get_param', 'rospy.get_param', (['"""~middle_motor"""', 'middle_motor'], {}), "('~middle_motor', middle_motor)\n", (13167, 13198), False, 'import rospy\n'), ((13222, 13268), 'rospy.get_param', 'rospy.get_param', (['"""~revers_servo"""', 'revers_servo'], {}), "('~revers_servo', revers_servo)\n", (13237, 13268), False, 'import rospy\n'), ((13286, 13326), 'rospy.get_param', 'rospy.get_param', (['"""~servo_offset"""', 'offset'], {}), "('~servo_offset', offset)\n", (13301, 13326), False, 'import rospy\n'), ((13373, 13413), 'rospy.get_param', 'rospy.get_param', (['"""~wheelbase"""', 'wheelbase'], {}), "('~wheelbase', wheelbase)\n", (13388, 13413), False, 'import rospy\n'), ((13425, 13452), 'rospy.has_param', 'rospy.has_param', (['"""~max_vel"""'], {}), "('~max_vel')\n", (13440, 13452), False, 'import rospy\n'), ((13587, 13614), 'rospy.has_param', 'rospy.has_param', (['"""~min_vel"""'], {}), "('~min_vel')\n", (13602, 13614), False, 'import rospy\n'), ((13749, 13778), 'rospy.has_param', 'rospy.has_param', (['"""~max_angle"""'], {}), "('~max_angle')\n", (13764, 13778), False, 'import rospy\n'), ((13946, 13977), 'rospy.has_param', 'rospy.has_param', (['"""~use_imu_vel"""'], {}), "('~use_imu_vel')\n", (13961, 13977), False, 'import rospy\n'), ((14147, 14169), 'rospy.has_param', 'rospy.has_param', (['"""~kP"""'], {}), "('~kP')\n", (14162, 14169), False, 'import rospy\n'), ((14279, 14301), 'rospy.has_param', 'rospy.has_param', (['"""~kI"""'], {}), "('~kI')\n", (14294, 14301), False, 'import rospy\n'), ((14411, 14433), 'rospy.has_param', 'rospy.has_param', (['"""~kD"""'], {}), "('~kD')\n", (14426, 14433), False, 'import rospy\n'), ((14684, 14735), 'rospy.Subscriber', 'rospy.Subscriber', (['cmd_vel_topic', 'Twist', 'cmd_vel_clb'], {}), '(cmd_vel_topic, Twist, cmd_vel_clb)\n', (14700, 14735), False, 'import rospy\n'), ((14744, 14794), 'rospy.Subscriber', 'rospy.Subscriber', (['pwm_topic', 'CarPwmContol', 'pwm_clb'], {}), '(pwm_topic, CarPwmContol, pwm_clb)\n', (14760, 14794), False, 'import rospy\n'), ((14803, 14870), 'rospy.Subscriber', 'rospy.Subscriber', (['drive_topic', 'AckermannDriveStamped', 'drive_vel_clb'], {}), '(drive_topic, AckermannDriveStamped, drive_vel_clb)\n', (14819, 14870), False, 'import rospy\n'), ((14880, 14935), 'rospy.Subscriber', 'rospy.Subscriber', (['vel_topic', 'TwistStamped', 'velocity_clb'], {}), '(vel_topic, TwistStamped, velocity_clb)\n', (14896, 14935), False, 'import rospy\n'), ((14944, 15003), 'rospy.Subscriber', 'rospy.Subscriber', (['pose_topic', 'PoseStamped', 'current_pose_clb'], {}), '(pose_topic, PoseStamped, current_pose_clb)\n', (14960, 15003), False, 'import rospy\n'), ((15012, 15065), 'rospy.Subscriber', 'rospy.Subscriber', (['encoder_topic', 'Float64', 'encoder_clb'], {}), '(encoder_topic, Float64, encoder_clb)\n', (15028, 15065), False, 'import rospy\n'), ((15084, 15146), 'rospy.Publisher', 'rospy.Publisher', (['pwm_output_topic', 'CarPwmContol'], {'queue_size': '(10)'}), '(pwm_output_topic, CarPwmContol, queue_size=10)\n', (15099, 15146), False, 'import rospy\n'), ((15167, 15221), 'rospy.Publisher', 'rospy.Publisher', (['param_topic', 'CarParams'], {'queue_size': '(10)'}), '(param_topic, CarParams, queue_size=10)\n', (15182, 15221), False, 'import rospy\n'), ((15235, 15292), 'rospy.Service', 'rospy.Service', (['setmode_srv_topic', 'SetBool', 'SetModeSrv_clb'], {}), '(setmode_srv_topic, SetBool, SetModeSrv_clb)\n', (15248, 15292), False, 'import rospy\n'), ((17643, 17657), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (17655, 17657), True, 'import RPi.GPIO as GPIO\n'), ((13476, 13512), 'rospy.get_param', 'rospy.get_param', (['"""~max_vel"""', 'max_vel'], {}), "('~max_vel', max_vel)\n", (13491, 13512), False, 'import rospy\n'), ((13638, 13674), 'rospy.get_param', 'rospy.get_param', (['"""~min_vel"""', 'min_vel'], {}), "('~min_vel', min_vel)\n", (13653, 13674), False, 'import rospy\n'), ((13804, 13844), 'rospy.get_param', 'rospy.get_param', (['"""~max_angle"""', 'max_angle'], {}), "('~max_angle', max_angle)\n", (13819, 13844), False, 'import rospy\n'), ((14010, 14059), 'rospy.get_param', 'rospy.get_param', (['"""~use_imu_vel"""', 'use_odometry_vel'], {}), "('~use_imu_vel', use_odometry_vel)\n", (14025, 14059), False, 'import rospy\n'), ((14188, 14214), 'rospy.get_param', 'rospy.get_param', (['"""~kP"""', 'kP'], {}), "('~kP', kP)\n", (14203, 14214), False, 'import rospy\n'), ((14320, 14346), 'rospy.get_param', 'rospy.get_param', (['"""~kI"""', 'kI'], {}), "('~kI', kI)\n", (14335, 14346), False, 'import rospy\n'), ((14452, 14478), 'rospy.get_param', 'rospy.get_param', (['"""~kD"""', 'kD'], {}), "('~kD', kD)\n", (14467, 14478), False, 'import rospy\n'), ((16617, 16636), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (16634, 16636), False, 'import rospy\n'), ((17465, 17479), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (17477, 17479), True, 'import RPi.GPIO as GPIO\n'), ((4428, 4441), 'math.cos', 'math.cos', (['rot'], {}), '(rot)\n', (4436, 4441), False, 'import math\n'), ((4481, 4494), 'math.sin', 'math.sin', (['rot'], {}), '(rot)\n', (4489, 4494), False, 'import math\n'), ((4495, 4508), 'math.cos', 'math.cos', (['rot'], {}), '(rot)\n', (4503, 4508), False, 'import math\n'), ((8617, 8655), 'numpy.clip', 'np.clip', (['motor_val', 'middle_motor', '(2000)'], {}), '(motor_val, middle_motor, 2000)\n', (8624, 8655), True, 'import numpy as np\n'), ((8694, 8724), 'numpy.clip', 'np.clip', (['motor_val', '(1000)', '(2000)'], {}), '(motor_val, 1000, 2000)\n', (8701, 8724), True, 'import numpy as np\n'), ((10886, 10916), 'numpy.clip', 'np.clip', (['motor_val', '(1000)', '(2000)'], {}), '(motor_val, 1000, 2000)\n', (10893, 10916), True, 'import numpy as np\n'), ((4443, 4456), 'math.sin', 'math.sin', (['rot'], {}), '(rot)\n', (4451, 4456), False, 'import math\n'), ((9933, 9948), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (9943, 9948), False, 'import time\n'), ((10126, 10141), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (10136, 10141), False, 'import time\n')]
""" ================================================================================ Utilities for handling data types and transformations ================================================================================ **<NAME>** 4/18/2018 """ ################################################################################ # Introduction # ------------- # The general nature of the **Universal Spectroscopy and Imaging Data (USID)** model facilitates the representation of # any kind of measurement data. # This includes: # # #. Conventional data represented using floating point numbers such as ``1.2345`` # #. Integer data (with or without sign) such as ``137`` # #. Complex-valued data such as ``1.23 + 4.5i`` # #. Multi-valued or compound valued data cells such as (``'Frequency'``: ``301.2``, ``'Amplitude'``: ``1.553E-3``, ``'Phase'``: ``2.14``) # where a single value or measurement is represented by multiple elements, each with their own names, and data types # # While HDF5 datasets are capable of storing all of these kinds of data, many conventional data analysis techniques # such as decomposition, clustering, etc. are either unable to handle complicated data types such as complex-valued # datasets and compound valued datasets, or the results from these techniques do not produce physically meaningful # results. For example, most singular value decomposition algorithms are capable of processing complex-valued datasets. # However, while the eigenvectors can have complex values, the resultant complex-valued abundance maps are meaningless. # These algorithms would not even work if the original data was compound valued! # # To avoid such problems, we need functions that transform the data to and from the necessary type (integer, real-value # etc.) # # The ``pyUSID.dtype_utils`` module facilitates comparisons, validations, and most importantly, transformations of one # data-type to another. We will be going over the many useful functions in this module and explaining how, when and why # one would use them. # # Recommended pre-requisite reading # ----------------------------------- # * `Universal Spectroscopic and Imaging Data (USID) model </../../../USID/usid_model.html>`_ # * `Crash course on HDF5 and h5py <../beginner/plot_h5py.html>`_ # # .. tip:: # You can download and run this document as a Jupyter notebook using the link at the bottom of this page. # # Import all necessary packages # ------------------------------- # Before we begin demonstrating the numerous functions in ``pyUSID.dtype_utils``, we need to import the necessary # packages. Here are a list of packages besides pyUSID that will be used in this example: # # * ``h5py`` - to manipulate HDF5 files # * ``numpy`` - for numerical operations on arrays in memory from __future__ import print_function, division, unicode_literals import os import subprocess import sys def install(package): subprocess.call([sys.executable, "-m", "pip", "install", package]) import h5py import numpy as np # Finally import pyUSID. try: import pyUSID as usid except ImportError: # Warning package in case something goes wrong from warnings import warn warn('pyUSID not found. Will install with pip.') import pip install('pyUSID') import pyUSID as usid ################################################################################ # Utilities for validating data types # ===================================== # pyUSID.dtype_utils contains some handy functions that make it easy to write robust and safe code by simplifying # common data type checking and validation. # # contains_integers() # --------------------- # The ``contains_integers()`` function checks to make sure that each item in a list is indeed an integer. Additionally, it # can be configured to ensure that all the values are above a minimum value. This is particularly useful when building # indices matrices based on the size of dimensions - specified as a list of integers for example. item = [1, 2, -3, 4] print('{} : contains integers? : {}'.format(item, usid.dtype_utils.contains_integers(item))) item = [1, 4.5, 2.2, -1] print('{} : contains integers? : {}'.format(item, usid.dtype_utils.contains_integers(item))) item = [1, 5, 8, 3] min_val = 2 print('{} : contains integers >= {} ? : {}'.format(item, min_val, usid.dtype_utils.contains_integers(item, min_val=min_val))) ################################################################################ # validate_dtype() # ----------------- # The ``validate_dtype()`` function ensure that a provided object is indeed a valid h5py or numpy data type. When writing # a main dataset along with all ancillary datasets, pyUSID meticulously ensures that all inputs are valid before # writing data to the file. This comes in very handy when we want to follow the 'measure twice, cut once' ethos. for item in [np.float16, np.complex64, np.uint8, np.int16]: print('Is {} a valid dtype? : {}'.format(item, usid.dtype_utils.validate_dtype(item))) # This function is especially useful on compound or structured data types: struct_dtype = np.dtype({'names': ['r', 'g', 'b'], 'formats': [np.float32, np.uint16, np.float64]}) print('Is {} a valid dtype? : {}'.format(struct_dtype, usid.dtype_utils.validate_dtype(struct_dtype))) ################################################################################ # get_compound_sub_dtypes() # -------------------------- # One common hassle when dealing with compound / structured array dtypes is that it can be a little challenging to # quickly get the individual datatypes of each field in such a data type. The ``get_compound_sub_dtypes()`` makes this a # lot easier: sub_dtypes = usid.dtype_utils.get_compound_sub_dtypes(struct_dtype) for key, val in sub_dtypes.items(): print('{} : {}'.format(key, val)) ################################################################################ # is_complex_dtype() # ------------------- # Quite often, we need to treat complex datasets different from compound datasets which themselves need to be treated # different from real valued datasets. ``is_complex_dtype()`` makes it easier to check if a numpy or HDF5 dataset has a # complex data type: for dtype in [np.float32, np.float16, np.uint8, np.int16, struct_dtype, bool]: print('Is {} a complex dtype?: {}'.format(dtype, (usid.dtype_utils.is_complex_dtype(dtype)))) for dtype in [np.complex, np.complex64, np.complex128, np.complex256]: print('Is {} a complex dtype?: {}'.format(dtype, (usid.dtype_utils.is_complex_dtype(dtype)))) ################################################################################ # Data transformation # ==================== # Perhaps the biggest benefit of ``dtype_utils`` is the ability to flatten complex, compound datasets to real-valued # datasets and vice versa. As mentioned in the introduction, this is particularly important when attempting to use # machine learning algorithms on complex or compound-valued datasets. In order to enable such pipelines, we need # functions to transform: # # * complex / compound valued datasets to real-valued datasets # * real-valued datasets back to complex / compound valued datasets # # flatten_complex_to_real() # -------------------------- # As the name suggests, this function stacks the imaginary values of a N-dimensional numpy / HDF5 dataset below its # real-values. Thus, applying this function to a complex valued dataset of size ``(a, b, c)`` would result in a # real-valued dataset of shape ``(a, b, 2 * c)``: length = 3 complex_array = np.random.randint(-5, high=5, size=length) + 1j * np.random.randint(-5, high=5, size=length) stacked_real_array = usid.dtype_utils.flatten_complex_to_real(complex_array) print('Complex value: {} has shape: {}'.format(complex_array, complex_array.shape)) print('Stacked real value: {} has shape: ' '{}'.format(stacked_real_array, stacked_real_array.shape)) ################################################################################ # flatten_compound_to_real() # ---------------------------- # This function flattens a compound-valued dataset of shape ``(a, b, c)`` into a real-valued dataset of shape # ``(a, b, k * c)`` where ``k`` is the number of fields within the structured array / compound dtype. Here we will # demonstrate this on a 1D array of 5 elements each containing 'r', 'g', 'b' fields: num_elems = 5 structured_array = np.zeros(shape=num_elems, dtype=struct_dtype) structured_array['r'] = np.random.random(size=num_elems) * 1024 structured_array['g'] = np.random.randint(0, high=1024, size=num_elems) structured_array['b'] = np.random.random(size=num_elems) * 1024 real_array = usid.dtype_utils.flatten_compound_to_real(structured_array) print('Structured array is of shape {} and have values:'.format(structured_array.shape)) print(structured_array) print('\nThis array converted to regular scalar matrix has shape: {} and values:'.format(real_array.shape)) print(real_array) ################################################################################ # flatten_to_real() # ----------------- # This function checks the data type of the provided dataset and then uses either of the above functions to # (if necessary) flatten the dataset into a real-valued matrix. By checking the data type of the dataset, it obviates # the need to explicitly call the aforementioned functions (that still do the work). Here is an example of the function # being applied to the compound valued numpy array again: real_array = usid.dtype_utils.flatten_to_real(structured_array) print('Structured array is of shape {} and have values:'.format(structured_array.shape)) print(structured_array) print('\nThis array converted to regular scalar matrix has shape: {} and values:'.format(real_array.shape)) print(real_array) ################################################################################ # The next three functions perform the inverse operation of taking real-valued matrices or datasets and converting them # to complex or compound-valued datasets. # # stack_real_to_complex() # ------------------------ # As the name suggests, this function collapses a N dimensional real-valued array of size ``(a, b, 2 * c)`` to a # complex-valued array of shape ``(a, b, c)``. It assumes that the first c values in real-valued dataset are the real # components and the following c values are the imaginary components of the complex value. This will become clearer # with an example: real_val = np.hstack([5 * np.random.rand(6), 7 * np.random.rand(6)]) print('Real valued dataset of shape {}:'.format(real_val.shape)) print(real_val) comp_val = usid.dtype_utils.stack_real_to_complex(real_val) print('\nComplex-valued array of shape: {}'.format(comp_val.shape)) print(comp_val) ################################################################################ # stack_real_to_compound() # -------------------------- # Similar to the above function, this function shrinks the last axis of a real valued dataset to create the desired # compound valued dataset. Here we will demonstrate it on the same 3-field ``(r,g,b)`` compound datatype: num_elems = 5 real_val = np.concatenate((np.random.random(size=num_elems) * 1024, np.random.randint(0, high=1024, size=num_elems), np.random.random(size=num_elems) * 1024)) print('Real valued dataset of shape {}:'.format(real_val.shape)) print(real_val) comp_val = usid.dtype_utils.stack_real_to_compound(real_val, struct_dtype) print('\nStructured array of shape: {}'.format(comp_val.shape)) print(comp_val) ################################################################################ # stack_real_to_target_dtype() # ----------------------------- # This function performs the inverse of ``flatten_to_real()`` - stacks the provided real-valued dataset into a complex or # compound valued dataset using the two above functions. Note that unlike ``flatten_to_real()``, the target data type must # be supplied to the function for this to work: print('Real valued dataset of shape {}:'.format(real_val.shape)) print(real_val) comp_val = usid.dtype_utils.stack_real_to_target_dtype(real_val, struct_dtype) print('\nStructured array of shape: {}'.format(comp_val.shape)) print(comp_val) ################################################################################ # check_dtype() # -------------- # ``check_dtype()`` is a master function that figures out the data type, necessary function to transform a HDF5 dataset to # a real-valued array, expected data shape, etc. Before we demonstrate this function, we need to quickly create an # example HDF5 dataset. file_path = 'dtype_utils_example.h5' if os.path.exists(file_path): os.remove(file_path) with h5py.File(file_path) as h5_f: num_elems = (5, 7) structured_array = np.zeros(shape=num_elems, dtype=struct_dtype) structured_array['r'] = 450 * np.random.random(size=num_elems) structured_array['g'] = np.random.randint(0, high=1024, size=num_elems) structured_array['b'] = 3178 * np.random.random(size=num_elems) _ = h5_f.create_dataset('compound', data=structured_array) _ = h5_f.create_dataset('real', data=450 * np.random.random(size=num_elems), dtype=np.float16) _ = h5_f.create_dataset('complex', data=np.random.random(size=num_elems) + 1j * np.random.random(size=num_elems), dtype=np.complex64) h5_f.flush() ################################################################################ # Now, lets test the the function on compound-, complex-, and real-valued HDF5 datasets: def check_dataset(h5_dset): print('\tDataset being tested: {}'.format(h5_dset)) func, is_complex, is_compound, n_features, type_mult = usid.dtype_utils.check_dtype(h5_dset) print('\tFunction to transform to real: %s' % func) print('\tis_complex? %s' % is_complex) print('\tis_compound? %s' % is_compound) print('\tShape of dataset in its current form: {}'.format(h5_dset.shape)) print('\tAfter flattening to real, shape is expected to be: ({}, {})'.format(h5_dset.shape[0], n_features)) print('\tByte-size of a single element in its current form: {}'.format(type_mult)) with h5py.File(file_path, mode='r') as h5_f: print('Checking a compound-valued dataset:') check_dataset(h5_f['compound']) print('') print('Checking a complex-valued dataset:') check_dataset(h5_f['complex']) print('') print('Checking a real-valued dataset:') check_dataset(h5_f['real']) os.remove(file_path)
[ "pyUSID.dtype_utils.flatten_complex_to_real", "numpy.random.rand", "pyUSID.dtype_utils.is_complex_dtype", "pyUSID.dtype_utils.contains_integers", "os.remove", "pyUSID.dtype_utils.stack_real_to_complex", "os.path.exists", "numpy.random.random", "subprocess.call", "warnings.warn", "pyUSID.dtype_ut...
[((5144, 5233), 'numpy.dtype', 'np.dtype', (["{'names': ['r', 'g', 'b'], 'formats': [np.float32, np.uint16, np.float64]}"], {}), "({'names': ['r', 'g', 'b'], 'formats': [np.float32, np.uint16, np.\n float64]})\n", (5152, 5233), True, 'import numpy as np\n'), ((5759, 5813), 'pyUSID.dtype_utils.get_compound_sub_dtypes', 'usid.dtype_utils.get_compound_sub_dtypes', (['struct_dtype'], {}), '(struct_dtype)\n', (5799, 5813), True, 'import pyUSID as usid\n'), ((7730, 7785), 'pyUSID.dtype_utils.flatten_complex_to_real', 'usid.dtype_utils.flatten_complex_to_real', (['complex_array'], {}), '(complex_array)\n', (7770, 7785), True, 'import pyUSID as usid\n'), ((8464, 8509), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_elems', 'dtype': 'struct_dtype'}), '(shape=num_elems, dtype=struct_dtype)\n', (8472, 8509), True, 'import numpy as np\n'), ((8598, 8645), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(1024)', 'size': 'num_elems'}), '(0, high=1024, size=num_elems)\n', (8615, 8645), True, 'import numpy as np\n'), ((8723, 8782), 'pyUSID.dtype_utils.flatten_compound_to_real', 'usid.dtype_utils.flatten_compound_to_real', (['structured_array'], {}), '(structured_array)\n', (8764, 8782), True, 'import pyUSID as usid\n'), ((9563, 9613), 'pyUSID.dtype_utils.flatten_to_real', 'usid.dtype_utils.flatten_to_real', (['structured_array'], {}), '(structured_array)\n', (9595, 9613), True, 'import pyUSID as usid\n'), ((10704, 10752), 'pyUSID.dtype_utils.stack_real_to_complex', 'usid.dtype_utils.stack_real_to_complex', (['real_val'], {}), '(real_val)\n', (10742, 10752), True, 'import pyUSID as usid\n'), ((11519, 11582), 'pyUSID.dtype_utils.stack_real_to_compound', 'usid.dtype_utils.stack_real_to_compound', (['real_val', 'struct_dtype'], {}), '(real_val, struct_dtype)\n', (11558, 11582), True, 'import pyUSID as usid\n'), ((12196, 12263), 'pyUSID.dtype_utils.stack_real_to_target_dtype', 'usid.dtype_utils.stack_real_to_target_dtype', (['real_val', 'struct_dtype'], {}), '(real_val, struct_dtype)\n', (12239, 12263), True, 'import pyUSID as usid\n'), ((12763, 12788), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (12777, 12788), False, 'import os\n'), ((14593, 14613), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (14602, 14613), False, 'import os\n'), ((2911, 2977), 'subprocess.call', 'subprocess.call', (["[sys.executable, '-m', 'pip', 'install', package]"], {}), "([sys.executable, '-m', 'pip', 'install', package])\n", (2926, 2977), False, 'import subprocess\n'), ((7616, 7658), 'numpy.random.randint', 'np.random.randint', (['(-5)'], {'high': '(5)', 'size': 'length'}), '(-5, high=5, size=length)\n', (7633, 7658), True, 'import numpy as np\n'), ((8534, 8566), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (8550, 8566), True, 'import numpy as np\n'), ((8670, 8702), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (8686, 8702), True, 'import numpy as np\n'), ((12794, 12814), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (12803, 12814), False, 'import os\n'), ((12820, 12840), 'h5py.File', 'h5py.File', (['file_path'], {}), '(file_path)\n', (12829, 12840), False, 'import h5py\n'), ((12896, 12941), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_elems', 'dtype': 'struct_dtype'}), '(shape=num_elems, dtype=struct_dtype)\n', (12904, 12941), True, 'import numpy as np\n'), ((13037, 13084), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(1024)', 'size': 'num_elems'}), '(0, high=1024, size=num_elems)\n', (13054, 13084), True, 'import numpy as np\n'), ((13814, 13851), 'pyUSID.dtype_utils.check_dtype', 'usid.dtype_utils.check_dtype', (['h5_dset'], {}), '(h5_dset)\n', (13842, 13851), True, 'import pyUSID as usid\n'), ((14280, 14310), 'h5py.File', 'h5py.File', (['file_path'], {'mode': '"""r"""'}), "(file_path, mode='r')\n", (14289, 14310), False, 'import h5py\n'), ((3171, 3220), 'warnings.warn', 'warn', (['"""pyUSID not found. Will install with pip."""'], {}), "('pyUSID not found. Will install with pip.')\n", (3175, 3220), False, 'from warnings import warn\n'), ((4062, 4102), 'pyUSID.dtype_utils.contains_integers', 'usid.dtype_utils.contains_integers', (['item'], {}), '(item)\n', (4096, 4102), True, 'import pyUSID as usid\n'), ((4180, 4220), 'pyUSID.dtype_utils.contains_integers', 'usid.dtype_utils.contains_integers', (['item'], {}), '(item)\n', (4214, 4220), True, 'import pyUSID as usid\n'), ((4370, 4427), 'pyUSID.dtype_utils.contains_integers', 'usid.dtype_utils.contains_integers', (['item'], {'min_val': 'min_val'}), '(item, min_val=min_val)\n', (4404, 4427), True, 'import pyUSID as usid\n'), ((5308, 5353), 'pyUSID.dtype_utils.validate_dtype', 'usid.dtype_utils.validate_dtype', (['struct_dtype'], {}), '(struct_dtype)\n', (5339, 5353), True, 'import pyUSID as usid\n'), ((7666, 7708), 'numpy.random.randint', 'np.random.randint', (['(-5)'], {'high': '(5)', 'size': 'length'}), '(-5, high=5, size=length)\n', (7683, 7708), True, 'import numpy as np\n'), ((11308, 11355), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(1024)', 'size': 'num_elems'}), '(0, high=1024, size=num_elems)\n', (11325, 11355), True, 'import numpy as np\n'), ((12976, 13008), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (12992, 13008), True, 'import numpy as np\n'), ((13120, 13152), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (13136, 13152), True, 'import numpy as np\n'), ((5011, 5048), 'pyUSID.dtype_utils.validate_dtype', 'usid.dtype_utils.validate_dtype', (['item'], {}), '(item)\n', (5042, 5048), True, 'import pyUSID as usid\n'), ((6406, 6446), 'pyUSID.dtype_utils.is_complex_dtype', 'usid.dtype_utils.is_complex_dtype', (['dtype'], {}), '(dtype)\n', (6439, 6446), True, 'import pyUSID as usid\n'), ((6576, 6616), 'pyUSID.dtype_utils.is_complex_dtype', 'usid.dtype_utils.is_complex_dtype', (['dtype'], {}), '(dtype)\n', (6609, 6616), True, 'import pyUSID as usid\n'), ((10546, 10563), 'numpy.random.rand', 'np.random.rand', (['(6)'], {}), '(6)\n', (10560, 10563), True, 'import numpy as np\n'), ((10591, 10608), 'numpy.random.rand', 'np.random.rand', (['(6)'], {}), '(6)\n', (10605, 10608), True, 'import numpy as np\n'), ((11240, 11272), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (11256, 11272), True, 'import numpy as np\n'), ((11384, 11416), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (11400, 11416), True, 'import numpy as np\n'), ((13263, 13295), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (13279, 13295), True, 'import numpy as np\n'), ((13359, 13391), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (13375, 13391), True, 'import numpy as np\n'), ((13399, 13431), 'numpy.random.random', 'np.random.random', ([], {'size': 'num_elems'}), '(size=num_elems)\n', (13415, 13431), True, 'import numpy as np\n')]
# Copyright 2021 The Distla Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Functions to compute error metrics. """ import functools import jax from jax import lax import numpy as np from distla_core.analysis import pmaps from distla_core.utils import pops def comparison_header(name_a, name_b, relative): """ Generates an appropriate header string based on `name_a` of `matrix_a` and `name_b` of `matrix_b`. """ header = f"||{name_a} - {name_b}||_F" if relative: header += f"/||{name_a}||_F" return header def comparison_error(matrix_a, matrix_b, relative, name_a, name_b): """ Measures how nearly `matrix_a == matrix_b`, `||matrix_a - matrix_b||_F`, transparently handling both the distributed and undistributed cases. Also divides the error by `||matrix_a||_F` if `relative` is True. Args: matrix_a: First matrix to be compared. matrix_b: Second matrix to be compared. relative: Flags whether the relative or absolute error is computed. Returns: err: ||matrix_a - matrix-b||_F, divided by ||matrix_a||_F if relative. header: A string "||name_a - name_b||_F", with "/||name_a||_F" appended if relative. """ err = pmaps.frobdiff(matrix_a, matrix_b) if relative: err /= pmaps.frobnorm(matrix_a) header = comparison_header(name_a, name_b, relative) return err, header def isometry_error(matrix, p_sz=128, precision=lax.Precision.HIGHEST, relative=False, dagger_left=True, name="matrix"): """ Measures how nearly `matrix` is a left isometry (`||matrix^H matrix - I||`, if `dagger_left` is `True`) or a right isometry (`||matrix matrix^H - I||`, if `dagger_left` is `False`). The serial and distributed cases are handled transparently. Args: matrix: The result to be tested. p_sz: p_sz for the SUMMA multiplications. This is only used in the distributed case. precision: ASIC matmul precision. relative: If `True`, errors are divided by `||I||_F`, and the header string is correspondingly modified. name: String to be used in place of `matrix` in the header strings. Returns: err: The error. header: The header. """ should_name = f"{name} {name}^H" if dagger_left: should_name = f"{name}^H {name}" header = comparison_header("I", should_name, relative) if matrix is None: return -1, header if dagger_left: should_be_eye = pmaps.matmul(matrix, matrix, transpose_a=True, conj_a=True, p_sz=p_sz, precision=precision) else: should_be_eye = pmaps.matmul(matrix, matrix, transpose_b=True, conj_b=True, p_sz=p_sz, precision=precision) should_name = f"{name} {name}^H" eye = pmaps.eye(should_be_eye.shape, should_be_eye.dtype) return comparison_error(eye, should_be_eye, relative, "I", should_name) def hermiticity_error(matrix, relative=False, name="matrix"): """ Measures how nearly `matrix` is Hermitian, `||matrix - matrix^H||_F`. The serial and distributed cases are handled transparently. Args: matrix: The result to be tested. relative: If `True`, errors are divided by `||matrix||_F`, and the header string is correspondingly modified. name: String to be used in place of `matrix` in the header strings. Returns: err: The error. header: The header. """ header = comparison_header(name, name + "^H", relative) if matrix is None: return -1, header matrix_t = pmaps.transpose(matrix, conjubuilding_block=True) return comparison_error(matrix, matrix_t, relative, name, name + "^H") def reconstruction_error(matrix, factors, p_sz=128, relative=False, name="matrix", recon_name="prod(factors)", precision=lax.Precision.HIGHEST): """ Measures how nearly `factors = prod(factors)`; `||factors - prod(factors)||_F`. The serial and distributed cases are handled transparently. Args: matrix: The result to be tested. p_sz: p_sz for the SUMMA multiplications. This is only used in the distributed case. relative: If `True`, errors are divided by `||matrix||_F`, and the header string is correspondingly modified. name: String to be used in place of `matrix` in the header string. recon_name: String to be used in place of `prod(factors)` in the header string. precision: ASIC matmul precision. Returns: err: The error. header: The header. """ header = comparison_header(name, recon_name, relative) if matrix is None or factors is None: return -1, header mult_f = functools.partial(pmaps.matmul, p_sz=p_sz, precision=precision) reconstructed = functools.reduce(mult_f, factors) return comparison_error(matrix, reconstructed, relative, name, recon_name) def idempotency_error(matrix, p_sz=128, relative=False, name="matrix", precision=lax.Precision.HIGHEST): """ Measures how nearly `matrix = matrix^2`; `||matrix - matrix^2||_F`. The serial and distributed cases are handled transparently. Args: matrix: The result to be tested. p_sz: p_sz for the SUMMA multiplications. This is only used in the distributed case. relative: If `True`, errors are divided by `||matrix||_F`, and the header string is correspondingly modified. name: String to be used in place of `matrix` in the header string. precision: ASIC matmul precision. Returns: err: The error. header: The header. """ matrix_2_name = name + "^2" header = comparison_header(name, matrix_2_name, relative) if matrix is None: return -1, header matrix_2 = pmaps.matmul(matrix, matrix, p_sz=p_sz, precision=precision) return comparison_error(matrix, matrix_2, relative, name, matrix_2_name) def subspace_header(name_a, name_b): return f"max_angle({name_a}, {name_b})" def subspace_angle(subspace_a, subspace_b, p_sz=128, name_a="result", name_b="expected", precision=lax.Precision.HIGHEST): """ Measures how nearly `subspace_a` and `subspace_b` span the same subspace. At present, this function only works for small matrices, since the bulk of the computation must be done on the host. """ max_size = 3600 ** 2 header = subspace_header(name_a, name_b) if subspace_a is None or subspace_b is None: return -1, header if subspace_a.size > max_size or subspace_b.size > max_size: return -1, header product = pmaps.matmul(subspace_a, subspace_b, p_sz=p_sz, precision=precision, conj_a=True, transpose_a=True) if product.ndim == 3: product = pops.undistribute(product) product, _ = np.linalg.qr(product) svs = np.linalg.svd(product, compute_uv=False) result = np.arccos(svs[-1]) return result, header
[ "distla_core.analysis.pmaps.frobdiff", "distla_core.analysis.pmaps.frobnorm", "numpy.linalg.qr", "numpy.arccos", "distla_core.analysis.pmaps.eye", "functools.reduce", "distla_core.utils.pops.undistribute", "distla_core.analysis.pmaps.matmul", "functools.partial", "distla_core.analysis.pmaps.transp...
[((1793, 1827), 'distla_core.analysis.pmaps.frobdiff', 'pmaps.frobdiff', (['matrix_a', 'matrix_b'], {}), '(matrix_a, matrix_b)\n', (1807, 1827), False, 'from distla_core.analysis import pmaps\n'), ((3329, 3380), 'distla_core.analysis.pmaps.eye', 'pmaps.eye', (['should_be_eye.shape', 'should_be_eye.dtype'], {}), '(should_be_eye.shape, should_be_eye.dtype)\n', (3338, 3380), False, 'from distla_core.analysis import pmaps\n'), ((4070, 4119), 'distla_core.analysis.pmaps.transpose', 'pmaps.transpose', (['matrix'], {'conjubuilding_block': '(True)'}), '(matrix, conjubuilding_block=True)\n', (4085, 4119), False, 'from distla_core.analysis import pmaps\n'), ((5190, 5253), 'functools.partial', 'functools.partial', (['pmaps.matmul'], {'p_sz': 'p_sz', 'precision': 'precision'}), '(pmaps.matmul, p_sz=p_sz, precision=precision)\n', (5207, 5253), False, 'import functools\n'), ((5272, 5305), 'functools.reduce', 'functools.reduce', (['mult_f', 'factors'], {}), '(mult_f, factors)\n', (5288, 5305), False, 'import functools\n'), ((6226, 6286), 'distla_core.analysis.pmaps.matmul', 'pmaps.matmul', (['matrix', 'matrix'], {'p_sz': 'p_sz', 'precision': 'precision'}), '(matrix, matrix, p_sz=p_sz, precision=precision)\n', (6238, 6286), False, 'from distla_core.analysis import pmaps\n'), ((7027, 7131), 'distla_core.analysis.pmaps.matmul', 'pmaps.matmul', (['subspace_a', 'subspace_b'], {'p_sz': 'p_sz', 'precision': 'precision', 'conj_a': '(True)', 'transpose_a': '(True)'}), '(subspace_a, subspace_b, p_sz=p_sz, precision=precision, conj_a\n =True, transpose_a=True)\n', (7039, 7131), False, 'from distla_core.analysis import pmaps\n'), ((7232, 7253), 'numpy.linalg.qr', 'np.linalg.qr', (['product'], {}), '(product)\n', (7244, 7253), True, 'import numpy as np\n'), ((7262, 7302), 'numpy.linalg.svd', 'np.linalg.svd', (['product'], {'compute_uv': '(False)'}), '(product, compute_uv=False)\n', (7275, 7302), True, 'import numpy as np\n'), ((7314, 7332), 'numpy.arccos', 'np.arccos', (['svs[-1]'], {}), '(svs[-1])\n', (7323, 7332), True, 'import numpy as np\n'), ((1854, 1878), 'distla_core.analysis.pmaps.frobnorm', 'pmaps.frobnorm', (['matrix_a'], {}), '(matrix_a)\n', (1868, 1878), False, 'from distla_core.analysis import pmaps\n'), ((3006, 3101), 'distla_core.analysis.pmaps.matmul', 'pmaps.matmul', (['matrix', 'matrix'], {'transpose_a': '(True)', 'conj_a': '(True)', 'p_sz': 'p_sz', 'precision': 'precision'}), '(matrix, matrix, transpose_a=True, conj_a=True, p_sz=p_sz,\n precision=precision)\n', (3018, 3101), False, 'from distla_core.analysis import pmaps\n'), ((3159, 3254), 'distla_core.analysis.pmaps.matmul', 'pmaps.matmul', (['matrix', 'matrix'], {'transpose_b': '(True)', 'conj_b': '(True)', 'p_sz': 'p_sz', 'precision': 'precision'}), '(matrix, matrix, transpose_b=True, conj_b=True, p_sz=p_sz,\n precision=precision)\n', (3171, 3254), False, 'from distla_core.analysis import pmaps\n'), ((7190, 7216), 'distla_core.utils.pops.undistribute', 'pops.undistribute', (['product'], {}), '(product)\n', (7207, 7216), False, 'from distla_core.utils import pops\n')]
# Copyright (c) 2014, <NAME> (<EMAIL>) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ average several results """ import sys import os import csv import cPickle as pickle import numpy as np import gzip from numpy import isnan RESULT_FILE = ["./RUN/avg_res/0.07889.csv", "./RUN/avg_res/0.07895.csv", "./RUN/avg_res/0.07911.csv", "./RUN/avg_res/0.07939.csv"] OUTPUT_FILE = "./RUN/avg_res/final_submission.csv" def refine_result(res): # all values > 1 should be = 1 res[res > 1] = 1 # al values < 0 should = 0 res[res < 0] = 0 def main(): res_total = np.zeros((79975, 37)) for i in xrange(len(RESULT_FILE)): result = np.genfromtxt(RESULT_FILE[i], dtype=np.float32, delimiter=',', skip_header=1) result = result[:, 1:] res_total += result res_total /= len(RESULT_FILE) first_col = np.genfromtxt("./raw_data/kaggle_submission.csv", dtype=np.int32, delimiter=',', skip_header=1, usecols=0) first_col = first_col.reshape(len(first_col), 1) r = csv.reader(open("./raw_data/kaggle_submission.csv", 'rb'), delimiter=",") h = r.next() refine_result(res_total) with open(OUTPUT_FILE, 'wb') as f_out: w = csv.writer(f_out, delimiter=",") w.writerow(h) for i in range(res_total.shape[0]): w.writerow(np.hstack([first_col[i, 0], res_total[i, :]]).astype(np.single)) if __name__ == '__main__': main()
[ "numpy.zeros", "csv.writer", "numpy.genfromtxt", "numpy.hstack" ]
[((1882, 1903), 'numpy.zeros', 'np.zeros', (['(79975, 37)'], {}), '((79975, 37))\n', (1890, 1903), True, 'import numpy as np\n'), ((2149, 2260), 'numpy.genfromtxt', 'np.genfromtxt', (['"""./raw_data/kaggle_submission.csv"""'], {'dtype': 'np.int32', 'delimiter': '""","""', 'skip_header': '(1)', 'usecols': '(0)'}), "('./raw_data/kaggle_submission.csv', dtype=np.int32, delimiter\n =',', skip_header=1, usecols=0)\n", (2162, 2260), True, 'import numpy as np\n'), ((1960, 2037), 'numpy.genfromtxt', 'np.genfromtxt', (['RESULT_FILE[i]'], {'dtype': 'np.float32', 'delimiter': '""","""', 'skip_header': '(1)'}), "(RESULT_FILE[i], dtype=np.float32, delimiter=',', skip_header=1)\n", (1973, 2037), True, 'import numpy as np\n'), ((2525, 2557), 'csv.writer', 'csv.writer', (['f_out'], {'delimiter': '""","""'}), "(f_out, delimiter=',')\n", (2535, 2557), False, 'import csv\n'), ((2647, 2692), 'numpy.hstack', 'np.hstack', (['[first_col[i, 0], res_total[i, :]]'], {}), '([first_col[i, 0], res_total[i, :]])\n', (2656, 2692), True, 'import numpy as np\n')]