content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
This module implements a Behler-Parinello Neural network that is compatible with Scikit learn and can therefore be
used with Osprey hyperparameter optimisation.
This code was written following closely the code written by Zachary Ulissi (Department of Chemical Engineering,
Stanford University) in the tflow.py module of the AMP package.
"""
from sklearn.base import BaseEstimator, ClassifierMixin
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
class BPNN(BaseEstimator, ClassifierMixin):
"""
The parameter labels says to which atom each feature in X corresponds to. It is a list of atom label plus the number
of how many features correspond to that atom.
:labels: list of length (2*n_atoms,) containing strings and int
"""
def fit(self, X, y):
"""
X is the training set. It is a numpy array containing all the descriptors for each atom concatenated.
:X: numpy array of shape (n_samples, n_features)
:y: array of shape (n_samples,)
"""
# Some useful data
self.n_samples = X.shape[0]
self.checkBatchSize()
# Modifying shape of y to be compatible with tensorflow and creating a placeholder
y = np.reshape(y, (len(y), 1))
with tf.name_scope('input_y'):
y_tf = tf.placeholder(tf.float32, [None, 1])
# # Splitting the X into the different descriptors
# X_input = self.__split_input(X)
# Making a list of the unique elements and one of all the elements in order
self.unique_ele, self.all_atoms = self.__unique_elements()
# Create a list of tensorflow placeholders, one item per atom in the system
inputs = []
with tf.name_scope('input_x'):
for ii in range(0, len(self.labels)):
inputs.append(tf.placeholder(tf.float32, [None, self.labels[ii][1]]))
# Zipping the placeholders and the labels, so it is easy to know which weights to call
data = zip(self.all_atoms, inputs)
# Declaring the weights
with tf.name_scope('weights'):
all_weights = {}
all_biases = {}
for key, value in self.unique_ele.iteritems():
weights, biases = self.__generate_weights(value)
all_weights[key] = weights
all_biases[key] = biases
tf.summary.histogram("weights_in", weights[0])
for ii in range(len(self.hidden_layer_sizes) - 1):
tf.summary.histogram("weights_hidden", weights[ii + 1])
tf.summary.histogram("weights_out", weights[-1])
# Evaluating the model
with tf.name_scope("atom_nn"):
all_atom_ene = []
for ii in range(len(self.all_atoms)):
atom_ene = self.__atom_energy(data[ii], all_weights, all_biases)
all_atom_ene.append(atom_ene)
# Summing the results to get the total energy
with tf.name_scope("tot_ene"):
model_tot = all_atom_ene[0]
for ii in range(len(all_atom_ene)-1):
model_tot = tf.add(all_atom_ene[ii+1], model_tot)
# Calculating the cost function with L2 regularisation term
with tf.name_scope('cost'):
cost = self.__reg_cost(model_tot, y_tf, all_weights)
# tf.summary.scalar("cost", cost)
# Training step
with tf.name_scope('training'):
optimiser = tf.train.AdamOptimizer(learning_rate=self.learning_rate_init).minimize(cost)
# Initialisation of the model
init = tf.global_variables_initializer()
merged_summary = tf.summary.merge_all()
with tf.Session() as sess:
self.cost_list = []
summary_writer = tf.summary.FileWriter(logdir="/Users/walfits/Repositories/trainingNN/tensorboard", graph=sess.graph)
sess.run(init)
for i in range(self.max_iter):
# This is the total number of batches in which the training set is divided
n_batches = int(self.n_samples / self.batch_size)
# This will be used to calculate the average cost per iteration
avg_cost = 0
# Learning over the batches of data
for i in range(n_batches):
batch_x = X[i * self.batch_size:(i + 1) * self.batch_size, :]
batch_y = y[i * self.batch_size:(i + 1) * self.batch_size, :]
X_batch = self.__split_input(batch_x)
feeddict = {i: d for i, d in zip(inputs, X_batch)}
feeddict[y_tf] = batch_y
opt, c = sess.run([optimiser, cost], feed_dict=feeddict)
avg_cost += c / n_batches
summary = sess.run(merged_summary, feed_dict=feeddict)
summary_writer.add_summary(summary, i)
self.cost_list.append(avg_cost)
self.all_weights = {}
self.all_biases = {}
for key, value in self.unique_ele.iteritems():
w = []
b = []
for ii in range(len(all_weights[key])):
w.append(sess.run(all_weights[key][ii]))
b.append(sess.run(all_biases[key][ii]))
self.all_weights[key] = w
self.all_biases[key] = b
def predict(self, X):
"""
This function uses the X data and plugs it into the model and then returns the predicted y
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:return: array of size (n_samples,)
This contains the predictions for the target values corresponding to the samples contained in X.
"""
# Splitting the X into the different descriptors
X_input = self.__split_input(X)
# Making a list of the unique elements and one of all the elements in order
self.unique_ele, self.all_atoms = self.__unique_elements()
# Create a list of tensorflow placeholders, one item per atom in the system
inputs = []
with tf.name_scope('input_x'):
for ii in range(0, len(self.labels)):
inputs.append(tf.placeholder(tf.float32, [None, self.labels[ii][1]]))
# Zipping the placeholders and the labels, so it is easy to know which weights to call
data = zip(self.all_atoms, inputs)
# Making the weights into tf.variables
all_weights = {}
all_biases = {}
for key, value in self.unique_ele.iteritems():
w = []
b = []
for ii in range(len(self.all_weights[key])):
w.append(tf.Variable(self.all_weights[key][ii]))
b.append(tf.Variable(self.all_biases[key][ii]))
all_weights[key] = w
all_biases[key] = b
# Evaluating the model
with tf.name_scope("atom_nn"):
all_atom_ene = []
for ii in range(len(self.all_atoms)):
atom_ene = self.__atom_energy(data[ii], all_weights, all_biases)
all_atom_ene.append(atom_ene)
# Summing the results to get the total energy
with tf.name_scope("tot_ene"):
model_tot = all_atom_ene[0]
for ii in range(len(all_atom_ene) - 1):
model_tot = tf.add(all_atom_ene[ii + 1], model_tot)
# Initialising variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
feeddict = {i: d for i, d in zip(inputs, X_input)}
pred = sess.run(model_tot, feed_dict=feeddict)
predictions = np.reshape(pred, (pred.shape[0],))
print pred
return predictions
def plot_cost(self):
"""
This function plots the cost as a function of training iterations. It can only be called after the model has
been trained.
:return: None
"""
try:
self.cost_list
except AttributeError:
raise AttributeError("No values for the cost. Make sure that the model has been trained with the function "
"fit().")
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(self.cost_list, label="Train set", color="b")
ax.set_xlabel('Number of iterations')
ax.set_ylabel('Cost Value')
ax.legend()
# plt.yscale("log")
plt.show()
def correlationPlot(self, X, y):
"""
This function plots a correlation plot of the values that are in the data set and the NN predictions. It expects
the target values to be in Hartrees.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
:ylim: tuple of shape (2,) containing doubles
These are the limits of the y values for the plot.
:xlim: tuple of shape (2,) containing doubles
These are the limits of the x values for the plot.
"""
y_pred = self.predict(X)
df = pd.DataFrame()
df['High level calculated energies (Ha)'] = y
df['NN predicted energies (Ha)'] = y_pred
lm = sns.lmplot('High level calculated energies (Ha)', 'NN predicted energies (Ha)', data=df,
scatter_kws={"s": 20, "alpha": 0.6}, line_kws={"alpha": 0.5})
# lm.set(ylim=ylim)
# lm.set(xlim=xlim)
plt.show()
def __unique_elements(self):
"""
This function takes the 'labels' parameter and extracts the unique elements. These are placed into a dictionary
where the value is the number of features that each unique element has in the descriptor.
:return: dictionary of size (n_unique_elements)
"""
feat_dict = {}
all_atoms = []
for ii in range(0,len(self.labels)):
feat_dict[self.labels[ii][0]] = self.labels[ii][1]
all_atoms.append(self.labels[ii][0])
return feat_dict, all_atoms
def __generate_weights(self, n_input_layer):
"""
This function generates the weights and the biases for each element-specific neural network. It does so by
looking at the size of the hidden layers. The weights are initialised randomly.
:n_input_layer: number of features in the descriptor for one atom - int
:return: lists (of length n_hidden_layers + 1) of tensorflow variables
"""
weights = []
biases = []
# Weights from input layer to first hidden layer
weights.append(tf.Variable(tf.truncated_normal([self.hidden_layer_sizes[0], n_input_layer], stddev=0.01), name='weight_in'))
biases.append(tf.Variable(tf.zeros([self.hidden_layer_sizes[0]]), name='bias_in'))
# Weights from one hidden layer to the next
for ii in range(len(self.hidden_layer_sizes)-1):
weights.append(tf.Variable(tf.truncated_normal([self.hidden_layer_sizes[ii+1], self.hidden_layer_sizes[ii]], stddev=0.01), name='weight_hidden'))
biases.append(tf.Variable(tf.zeros([self.hidden_layer_sizes[ii+1]]), name='bias_hidden'))
# Weights from lat hidden layer to output layer
weights.append(tf.Variable(tf.truncated_normal([1, self.hidden_layer_sizes[-1]], stddev=0.01), name='weight_out'))
biases.append(tf.Variable(tf.zeros([1]), name='bias_out'))
return weights, biases
def __atom_energy(self, zip_data, all_weights, all_biases):
"""
This function calculates the single atom energy with the single atom neural networks. It uses zip_data, which
contains the tf placeholders for an atom and its corresponding atom label. all_weights/all_biases are all
the weights/biases and their label.
:zip_data: a two item list where the first item is the atom label (string) and the second is a tf.placeholder
:all_weights: Dictionaries where the key is the atom label and the value is a list of weights (of length [n_hidden_layers+1,].
:all_biases: Dictionaries where the key is the atom label and the value is a list of biases (of length [n_hidden_layers+1,].
:return: tf.tensor containing the activation of the output layer.
"""
label = zip_data[0]
tf_input = zip_data[1]
# Obtaining the index of the weights that correspond to the right atom
z = tf.add(tf.matmul(tf_input, tf.transpose(all_weights[label][0])), all_biases[label][0])
h = tf.nn.tanh(z)
for ii in range(len(self.hidden_layer_sizes) - 1):
z = tf.add(tf.matmul(h, tf.transpose(all_weights[label][ii + 1])), all_biases[label][ii + 1])
h = tf.nn.tanh(z)
z = tf.add(tf.matmul(h, tf.transpose(all_weights[label][-1])), all_biases[label][-1])
return z
def __split_input(self, X):
"""
This function takes the data where the descriptor of all the atoms are concatenated into one line. It then splits
it into n_atoms different data sets that will all be fed into a different mini-network.
:X: numpy array of shape (n_samples, n_features_tot)
:return: list of numpy array of shape (n_samples, n_features)
"""
split_X = []
counter = 0
for ii in range(0, len(self.labels)):
idx1 = counter
idx2 = counter + self.labels[ii][1]
split_X.append(X[:,idx1:idx2])
counter = counter + self.labels[ii][1]
return split_X
def __reg_cost(self, nn_energy, qm_energy, all_weights):
"""
This function calculates the cost function with L2 regularisation. It requires the energies predicted by the
neural network and the energies calculated through quantum mechanics.
:nn_energy: tf.Variable of shape [n_samples, 1]
:qm_energy: tf.placeholder of shape [n_samples, 1]
:all_weights: list of tuples. each tuple contains an atom label and a list of weights (of length [n_hidden_layers+1,].
:return: tf.Variable of shape [1]
"""
err = tf.subtract(qm_energy, nn_energy, name="error")
cost = tf.nn.l2_loss(err, name="unreg_cost") # scalar
reg_l2 = tf.Variable(tf.zeros([1]), name="reg_term") # scalar
for key, value in self.unique_ele.iteritems():
for ii in range(len(self.hidden_layer_sizes) + 1):
reg_l2 = tf.add(reg_l2, tf.nn.l2_loss(all_weights[key][ii]))
reg_l2 = tf.scalar_mul(self.alpha, reg_l2)
cost_reg = tf.add(cost, reg_l2, name="reg_cost")
return cost_reg
def checkBatchSize(self):
"""
This function is called to check if the batch size has to take the default value or a user-set value.
If it is a user set value, it checks whether it is a reasonable value.
:return: int
The default is 100 or to the total number of samples present if this is smaller than 100. Otherwise it is
checked whether it is smaller than 1 or larger than the total number of samples.
"""
if self.batch_size == 'auto':
self.batch_size = min(100, self.n_samples)
else:
if self.batch_size < 1 or self.batch_size > self.n_samples:
print "Warning: Got `batch_size` less than 1 or larger than sample size. It is going to be clipped"
self.batch_size = np.clip(self.batch_size, 1, self.n_samples)
else:
self.batch_size = self.batch_size
def score(self, X, y, sample_weight=None):
"""
Returns the mean accuracy on the given test data and labels. It calculates the R^2 value. It is used during the
training of the model.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
:sample_weight: array of shape (n_samples,)
Sample weights (not sure what this is, but i need it for inheritance from the BaseEstimator)
:return: double
This is a score between -inf and 1 (best value is 1) that tells how good the correlation plot is.
"""
y_pred = self.predict(X)
r2 = r2_score(y, y_pred)
return r2
def scoreFull(self, X, y):
"""
This scores the predictions more thouroughly than the function 'score'. It calculates the r2, the root mean
square error, the mean absolute error and the largest positive/negative outliers. They are all in the units of
the data passed.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
:return:
:r2: double
This is a score between -inf and 1 (best value is 1) that tells how good the correlation plot is.
:rmse: double
This is the root mean square error
:mae: double
This is the mean absolute error
:lpo: double
This is the largest positive outlier.
:lno: double
This is the largest negative outlier.
"""
y_pred = self.predict(X)
r2 = r2_score(y, y_pred)
rmse = np.sqrt(mean_squared_error(y, y_pred))
mae = mean_absolute_error(y, y_pred)
lpo, lno = self.largestOutliers(y, y_pred)
return r2, rmse, mae, lpo, lno
def largestOutliers(self, y_true, y_pred):
"""
This function calculates the larges positive and negative outliers from the predictions of the neural net.
:y_true: array of shape (n_samples,)
This contains the target values for each sample.
:y_pred: array of shape (n_samples,)
This contains the neural network predictions of the target values for each sample.
:return:
:lpo: double
This is the largest positive outlier.
:lno: double
This is the largest negative outlier.
"""
diff = y_pred - y_true
lpo = np.amax(diff)
lno = - np.amin(diff)
return lpo, lno
if __name__ == "__main__":
X, y = testMatrix2()
nn = BPNN(hidden_layer_sizes=(5,), labels=[('N', 1), ('C',1)], max_iter=400, alpha=0.0, learning_rate_init=0.01, batch_size=5)
nn.fit(X, y)
nn.plot_cost()
nn.correlationPlot(X, y)
| [
37811,
198,
1212,
8265,
23986,
257,
10407,
1754,
12,
10044,
500,
18798,
47986,
3127,
326,
318,
11670,
351,
10286,
15813,
2193,
290,
460,
4361,
307,
198,
1484,
351,
440,
2777,
4364,
8718,
17143,
2357,
6436,
5612,
13,
198,
198,
1212,
2438... | 2.314871 | 8,251 |
import torch
import torch.nn as nn
from transformers import AutoModel
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
6121,
364,
1330,
11160,
17633,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628
] | 3.814815 | 27 |
import pytest
import os
from stackmanager.config import Config
from stackmanager.exceptions import ValidationError
from stackmanager.loader import load_config
| [
11748,
12972,
9288,
201,
198,
11748,
28686,
201,
198,
6738,
8931,
37153,
13,
11250,
1330,
17056,
201,
198,
6738,
8931,
37153,
13,
1069,
11755,
1330,
3254,
24765,
12331,
201,
198,
6738,
8931,
37153,
13,
29356,
1330,
3440,
62,
11250,
201,
... | 3.320755 | 53 |
# -*- coding: utf-8 -*-
""" chat_view.py - presenter for the chat"""
__author__ = "topseli"
__license__ = "0BSD"
import os
import sys
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot
if __name__ == '__main__':
run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
8537,
62,
1177,
13,
9078,
532,
39597,
329,
262,
8537,
37811,
198,
834,
9800,
834,
796,
366,
4852,
741,
72,
1,
198,
834,
43085,
834,
796,
366,
15,
21800,
1,
19... | 2.415094 | 106 |
# -*- coding: utf-8 -*-
"""
FELPY
__author__ = "Trey Guest"
__credits__ = ["Trey Guest"]
__license__ = "EuXFEL"
__version__ = "0.2.1"
__maintainer__ = "Trey Guest"
__email__ = "trey.guest@xfel.eu"
__status__ = "Developement"
"""
from wpg.srw import srwlpy
from wpg.wavefront import Wavefront as WPG_Wavefront
from matplotlib import pyplot as plt
import imageio
import numpy as np
from wpg.wpg_uti_wf import calculate_fwhm
from felpy.utils.vis_utils import double_colorbar_plot, colorbar_plot
import numpy as np
from wpg.wpg_uti_wf import calculate_fwhm, calc_pulse_energy
import seaborn as sns
from felpy.utils.np_utils import get_mesh
from felpy.analysis.scalar.enclosed_energy import get_enclosed_energy
from scipy.constants import h, c, e
from felpy.analysis.scalar.centroid import get_com
from felpy.analysis.complex.coherence import get_coherence_time
from felpy.utils.vis_utils import colorbar_plot
from felpy.utils.np_utils import get_mesh
from felpy.model.tools import radial_profile
from datetime import datetime
import os
from felpy.analysis.energy_spectral_density import power_spectral_density
from felpy.analysis.scalar.centroid import get_com
from felpy.utils.maths.fit import fit_gaussian
from felpy.utils.maths.constants import sigma_to_fwhm
ls = {"m": 1,
"cm": 1e2,
"mm": 1e3,
"um": 1e6,
"nm": 1e9}
def complex_converter(carr):
"""
convert a complex array to wpg format (note, currently only taking hor.
polarised component).
:param carr: numpy style complex array
"""
if len(carr.shape) == 3:
cwfr = np.ones([carr.shape[0], carr.shape[1], 2, carr.shape[2]])
cwfr[:,:,0,:] = carr.real
cwfr[:,:,1,:] = carr.imag
else:
cwfr = np.ones([carr.shape[0], carr.shape[1], 1,2])
cwfr[:,:,0,0] = carr.real
cwfr[:,:,0,1] = carr.imag
return cwfr
if __name__ == '__main__':
# =============================================================================
# from felpy.model.src.coherent import construct_SA1_pulse
#
# wfr = construct_SA1_pulse(200,200,4,1,.1)
# wfr.metadata['trey'] = True
# wfr.store_hdf5("../data/tmp/test.h5")
# wfr.load_hdf5("../data/tmp/test.h5")
#
# =============================================================================
pass | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
37,
3698,
47,
56,
198,
198,
834,
9800,
834,
796,
366,
51,
4364,
22358,
1,
198,
834,
66,
20696,
834,
796,
14631,
51,
4364,
22358,
8973,
198,
834,
43085,
8... | 2.39697 | 990 |
# Copyright (C) 2008 Valmantas Paliksa <walmis at balticum-tv dot lt>
# Copyright (C) 2008 Tadas Dailyda <tadas at dailyda dot com>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
| [
2,
15069,
357,
34,
8,
3648,
3254,
76,
415,
292,
3175,
72,
591,
64,
1279,
16783,
25413,
379,
275,
2501,
39901,
12,
14981,
16605,
300,
83,
29,
198,
2,
15069,
357,
34,
8,
3648,
309,
38768,
6714,
6814,
1279,
83,
38768,
379,
4445,
6814... | 3.658009 | 231 |
from dataclasses import (dataclass,
field)
from pyro.TimeElapsed import TimeElapsed
@dataclass
| [
6738,
4818,
330,
28958,
1330,
357,
19608,
330,
31172,
11,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2214,
8,
201,
198,
201,
198,
6738,
12972,
... | 2 | 65 |
#Script generated by SupremolecularAnalyser
import sys
sys.path.insert(0, "/net/archive/groups/plggkatksdh/pyplotTest")
import matplotlib.pyplot as plt
from simpleFilters import *
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.colors as mcolors
from os.path import isdir, join, basename
from os import makedirs, remove
from glob import glob
import json
from collections import defaultdict
from itertools import combinations
from math import pi
mpl.rcParams['mathtext.default'] = 'regular'
plt.rcParams.update({'font.size': 16})
logDir = "logs/"
postprocessingDir = "postprocessing/"
resUniqueDir = join(postprocessingDir, "general")
if not isdir(resUniqueDir):
makedirs(resUniqueDir)
# cases2run = { "preprocessing" : True, "a-g" : True, "UniqueSeq" : True, "histogram2d" : True,
# "histogram2d-planar" : True, "histograms-linear" : True , "barplots": True , "resolutionplot":False,
# "occurencesTable" : True, "occurencesPairs" : True,
# "chainNeoghbors" : True }
cases2run = { "preprocessing" : False, "a-g" : False, "UniqueSeq" : False, "histogram2d" : True, "hydrogenBondAnalysis" : False , "ligandsAnalysis" : False ,
"histogram2d-planar" : False, "histograms-linear" : False , "barplots": False , "resolutionplot":False,
"occurencesTable" : False, "occurencesPairs" : False,
"chainNeoghbors" : False }
logAnionPi = join( logDir, "anionPi.log" )
logAnionCation = join(logDir, "anionCation.log")
logCationPi = join( logDir, "cationPi.log" )
logPiPi = join( logDir, "piPi.log")
logMethylPi = join(logDir, "methylPi.log")
logPlanarAnionPi = join(logDir, "planarAnionPi.log")
logLinearAnionPi = join( logDir, "linearAnionPi.log")
if cases2run["preprocessing"] or cases2run["a-g"]:
AnionPi = pd.read_csv( logAnionPi, sep = "\t").fillna("NA").sort_values(by=['Distance'],ascending=True)
AnionPi = AnionPi.drop_duplicates( subset = [ 'PDB Code', 'Model No', 'Anion chain', 'Pi acid chain', 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
AnionPi = AnionPi[ ~AnionPi['Anion code'].astype(str).isin([ "FES", "F3S", "S3F", "9S8", "ER2", "FS4", "MSK", "SF4", "1CL", "CLF"]) ]
# LinearAnionPi = pd.read_csv( logdir+"linearAnionPi.log", sep = "\t").fillna("NA")
AnionCation = pd.read_csv( logAnionCation, sep = "\t").fillna("NA")
AnionCation = AnionCation[ ~AnionCation['Anion code'].astype(str).isin([ "FES", "F3S", "S3F", "9S8", "ER2", "FS4", "MSK", "SF4", "1CL", "CLF"]) ]
HBonds = pd.read_csv( logDir+"hBonds.log", sep = "\t").fillna("NA")
HBonds = HBonds[ ~HBonds['Anion code'].astype(str).isin([ "FES", "F3S", "S3F", "9S8", "ER2", "FS4", "MSK", "SF4", "1CL", "CLF"]) ]
CationPi = pd.read_csv( logCationPi, sep = "\t").fillna("NA")
PiPi = pd.read_csv( logPiPi, sep = "\t").fillna("NA")
# MetalLigand = pd.read_csv( logdir+"metalLigand.log", sep = "\t").fillna("NA")
# PlanarAnionPi = pd.read_csv( logPlanarAnionPi, sep = "\t").fillna("NA")
if cases2run["preprocessing"]:
methylPi = pd.read_csv( logMethylPi, sep = "\t").fillna("NA")
resolution = pd.read_csv("resolu.idx", sep = "\s+")
resOK = resolution[(resolution["RESOLUTION"]<=2.5) & (resolution["RESOLUTION"]>0)]["IDCODE"]
print("PDB ok: ", len(resOK))
##################################################################################################
logAnionPiRes = join(resUniqueDir, "anionPi_res.log")
logMethylPiRes = join(resUniqueDir, "methylPi_res.log")
logCationPiRes = join(resUniqueDir, "cationPi_res.log")
logPiPiRes = join(resUniqueDir, "piPi_res.log")
if cases2run["preprocessing"]:
keepHighResolution(AnionPi, logAnionPiRes )
keepHighResolution(methylPi, logMethylPiRes )
keepHighResolution(CationPi, logCationPiRes)
keepHighResolution(PiPi, logPiPiRes)
##################################################################################################
logAnionPiResRingPlane = join(resUniqueDir, "anionPi_res_ring_plane.log")
if cases2run["preprocessing"]:
AnionPi_temp = AnionPi
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] > 3 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] < 4.9 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] < 1 ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Distance" ] <= 4.5 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp ['PDB Code'].isin(resOK)]
AnionPi_temp.to_csv( logAnionPiResRingPlane, sep = "\t")
##################################################################################################
logAnionPiResCylinder = join(resUniqueDir, "anionPi_res_cylinder.log")
if cases2run["preprocessing"]:
AnionPi_temp = AnionPi
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] < 1.8 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] > 1.5 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] < 4.5 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp ['PDB Code'].isin(resOK)]
AnionPi_temp.to_csv( logAnionPiResCylinder, sep = "\t")
##################################################################################################
logAnionPiResBullshit = join(resUniqueDir, "anionPi_res_bullshit.log")
if cases2run["preprocessing"]:
AnionPi_temp = AnionPi
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Distance" ] < 5.0 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Angle" ] < 30 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp ['PDB Code'].isin(resOK)]
AnionPi_temp.to_csv( logAnionPiResBullshit, sep = "\t")
##################################################################################################
logAnionPiResDiag = join(resUniqueDir, "anionPi_res_norCylinderNorPlane.log")
if cases2run["preprocessing"]:
AnionPi_temp = AnionPi
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] > 1.8 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] < 3.25 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] > 2.4 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] < 3.8 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp ['PDB Code'].isin(resOK)]
AnionPi_temp.to_csv( logAnionPiResDiag, sep = "\t")
##################################################################################################
logCationPiResCylinder = join(resUniqueDir, "cationPi_res_cylinder.log")
if cases2run["preprocessing"]:
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp [ "x" ] < 1.8 ]
CationPi_temp = CationPi_temp[ CationPi_temp [ "h" ] > 1.5 ]
CationPi_temp = CationPi_temp[ CationPi_temp [ "h" ] < 4.5 ]
CationPi_temp = CationPi_temp[ CationPi_temp ['PDB Code'].isin(resOK)]
CationPi_temp.to_csv( logCationPiResCylinder, sep = "\t")
##################################################################################################
logCationPiResRingPlane = join(resUniqueDir, "cationPi_res_ring_plane.log")
if cases2run["preprocessing"]:
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp [ "x" ] > 3 ]
CationPi_temp = CationPi_temp[ CationPi_temp [ "h" ] < 1 ]
CationPi_temp = CationPi_temp[ CationPi_temp [ "x" ] < 4.9 ]
CationPi_temp = CationPi_temp[ CationPi_temp ['PDB Code'].isin(resOK)]
CationPi_temp.to_csv( logCationPiResRingPlane, sep = "\t")
##################################################################################################
# logAnionPi45 = join(resUniqueDir, "anionPi_45.log")
# logAnionPiRes45 = join(resUniqueDir, "anionPi_res_45.log")
# if cases2run["preprocessing"]:
# AnionPi_temp = AnionPi
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Distance" ] <= 4.5 ]
# AnionPi_temp.to_csv( logAnionPi45, sep = "\t")
# AnionPi_temp = AnionPi_temp[ AnionPi_temp ['PDB Code'].isin(resOK)]
# AnionPi_temp.to_csv( logAnionPiRes45, sep = "\t")
logCationPiRes45 = join(resUniqueDir, "cationPi_res_45.log")
if cases2run["preprocessing"]:
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp [ "Distance" ] <= 5.0 ]
CationPi_temp = CationPi_temp[ CationPi_temp ['PDB Code'].isin(resOK)]
CationPi_temp.to_csv( logCationPiRes45, sep = "\t")
##################################################################################################
distAnionCation = 3.25
for dirname, anionPiSource in zip( [ "cylinder" , "ringPlane", "norCylinderNodPlane" ], [ logAnionPiResCylinder, logAnionPiResRingPlane, logAnionPiResDiag ] ):
print("A-G Analysis")
print(anionPiSource)
AnionPi = pd.read_csv( anionPiSource, sep = "\t").fillna("NA")
analyseDir = join(postprocessingDir, dirname)
##################################################################################################
aDir = join(analyseDir, "a")
if not isdir(aDir):
makedirs(aDir)
logAnionPiA = join(aDir, "anionPi.log" )
logCationPiA = join(aDir, "cationPi.log")
logAnionCationA = join(aDir, "anionCation.log")
if cases2run["a-g"]:
print("lecimy z a")
AnionPi_temp = AnionPi
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp[ "RingChain"].astype(str).isin(['1'] )]
dataFrames2Merge = [ AnionPi_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ CationPi_temp ]
dataFrameExcludeHeaders = [['PDB Code', 'Model No', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
[ AnionPi_temp , CationPi_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp[ "RingChain"].astype(str).isin(['0'] )]
CationPi_temp = CationPi_temp[ ~CationPi_temp[ "Cation code"].astype(str).isin(['ARG', 'LYS'] )]
AnionCation_temp = AnionCation
AnionCation_temp = AnionCation_temp[ AnionCation_temp [ "Distance" ] < distAnionCation ]
if dirname != "ringPlane":
AnionCation_temp = AnionCation_temp[ AnionCation_temp[ "Same semisphere"].astype(str).isin(['True'] )]
AnionCation_temp = AnionCation_temp[ ~AnionCation_temp[ "Cation code"].astype(str).isin(['ARG', 'LYS'] )]
####sprawdzić czy są kationy po przeciwnej stronie pierscienia i jak duzo#####
dataFrames2Merge = [ AnionPi_temp , CationPi_temp, AnionCation_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id' , 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id' ,'Anion group id', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , CationPi_temp, AnionCation_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiA, sep = "\t")
CationPi_temp.to_csv( logCationPiA, sep = "\t")
AnionCation_temp.to_csv( logAnionCationA, sep = "\t")
##################################################################################################
bDir = join(analyseDir, "b")
if not isdir(bDir):
makedirs(bDir)
logAnionPiB = join( bDir, "anionPi.log" )
logCationPiB = join(bDir, "cationPi.log")
logAnionCationB = join( bDir, "anionCation.log")
if cases2run["a-g"]:
print("lecimy z b")
AnionPi_temp = AnionPi
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp[ "RingChain"].astype(str).isin(['1'] )]
dataFrames2Merge = [ AnionPi_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ CationPi_temp ]
dataFrameExcludeHeaders = [['PDB Code', 'Model No', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
[ AnionPi_temp , CationPi_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionCation_temp = AnionCation
AnionCation_temp = AnionCation_temp[ AnionCation_temp [ "Distance" ] > distAnionCation ]
if dirname != "ringPlane":
AnionCation_temp = AnionCation_temp[ AnionCation_temp[ "Same semisphere"].astype(str).isin(['False'] )]
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ ~CationPi_temp[ "RingChain"].astype(str).isin(['1'] )]
CationPi_temp = CationPi_temp[ CationPi_temp [ "Distance" ] < 5.0 ]
CationPi_temp = CationPi_temp[ CationPi_temp [ "Angle" ] < 45.0 ]
dataFrames2Merge = [ AnionPi_temp , CationPi_temp , AnionCation_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Anion group id' ,'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id' , 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , CationPi_temp , AnionCation_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiB, sep = "\t")
CationPi_temp.to_csv( logCationPiB, sep = "\t")
AnionCation_temp.to_csv( logAnionCationB, sep = "\t")
##################################################################################################
cDir = join(analyseDir, "c")
if not isdir(cDir):
makedirs(cDir)
logAnionPiC = join(cDir, "anionPi.log")
logCationPiC = join( cDir, "cationPi.log" )
logAnionCationC = join(cDir, "anionCation.log")
if cases2run["a-g"]:
print("lecimy z c")
AnionPi_temp = AnionPi
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp[ "RingChain"].astype(str).isin(['1'] )]
AnionCation_temp = AnionCation
AnionCation_temp = AnionCation_temp[ AnionCation_temp [ "Distance" ] < distAnionCation ]
dataFrames2Merge = [ AnionPi_temp , CationPi_temp , AnionCation_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , CationPi_temp , AnionCation_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiC, sep = "\t")
CationPi_temp.to_csv( logCationPiC, sep = "\t")
AnionCation_temp.to_csv( logAnionCationC, sep = "\t")
##################################################################################################
dDir = join(analyseDir, "d")
if not isdir(dDir):
makedirs(dDir)
logAnionPiD = join(dDir, "anionPi.log")
logPiPiD = join(dDir, "piPi.log")
if cases2run["a-g"]:
print("lecimy z d")
AnionPi_temp = AnionPi
PiPi_temp = PiPi
PiPi_temp = PiPi_temp[ PiPi_temp [ "h" ] > 1.6 ]
PiPi_temp = PiPi_temp[ PiPi_temp [ "x" ] < 2.2 ]
PiPi_temp = PiPi_temp[ PiPi_temp [ "Angle" ] < 15.0 ]
PiPi_temp = PiPi_temp[ PiPi_temp [ "theta" ] < 15.0 ]
dataFrames2Merge = [ AnionPi_temp , PiPi_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , PiPi_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiD, sep = "\t")
PiPi_temp.to_csv( logPiPiD, sep = "\t")
##################################################################################################
eDir = join(analyseDir, "e")
if not isdir(eDir):
makedirs(eDir)
logAnionPiE = join( eDir, "anionPi.log")
logPiPiE = join( eDir, "piPi.log" )
if cases2run["a-g"]:
print("lecimy z e")
AnionPi_temp = AnionPi
PiPi_temp = PiPi
PiPi_temp = PiPi_temp[ PiPi_temp [ "Angle" ] > 70.0 ]
PiPi_temp = PiPi_temp[ PiPi_temp [ "theta" ] > 70.0 ]
PiPi_temp = PiPi_temp[ PiPi_temp [ "omega" ] > 70.0 ]
dataFrames2Merge = [ AnionPi_temp , PiPi_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , PiPi_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiE, sep = "\t")
PiPi_temp.to_csv( logPiPiE, sep = "\t")
##################################################################################################
fDir = join(analyseDir, "f")
if not isdir(fDir):
makedirs(fDir)
logAnionPiF = join(fDir, "anionPi.log")
logCationPiF = join(fDir, "cationPi.log")
logAnionCationF = join(fDir, "anionCation.log")
if cases2run["a-g"]:
print("lecimy z f")
AnionPi_temp = AnionPi
AnionCation_temp = AnionCation
AnionCation_temp = AnionCation_temp[ AnionCation_temp [ "Distance" ] < 3.5 ]
dataFrames2Merge = [ AnionPi_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ AnionCation_temp ]
dataFrameExcludeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
[ AnionPi_temp , AnionCation_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionCation_temp = AnionCation
AnionCation_temp = AnionCation_temp[ AnionCation_temp [ "Distance" ] > 3.5 ]
CationPi_temp = CationPi
CationPi_temp = CationPi_temp[ CationPi_temp [ "RingChain" ] > 0.1 ]
dataFrames2Merge = [ AnionPi_temp , CationPi_temp , AnionCation_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id', 'Cation code', 'Cation chain', 'Cation id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , CationPi_temp , AnionCation_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiF, sep = "\t")
CationPi_temp.to_csv( logCationPiF, sep = "\t")
AnionCation_temp.to_csv( logAnionCationF, sep = "\t")
##################################################################################################
gDir = join(analyseDir, "g")
if not isdir(gDir):
makedirs(gDir)
logAnionPiG = join(gDir, "anionPi.log")
logHBondsG = join(gDir, "hbond.log")
if cases2run["a-g"]:
print("lecimy z g")
AnionPi_temp = AnionPi
#parametry do zmiany: kąt 170-180, H-a 1.2-1.5, d-a 2.4-3
HBonds_temp = HBonds
HBonds_temp = HBonds_temp[ HBonds_temp [ "Angle" ] > 130.0 ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Angle" ] < 180.0 ]
# HBonds_temp = HBonds_temp[ HBonds_temp [ "Distance Don Acc" ] > 2.2 ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Distance Don Acc" ] < 3.2 ]
# HBonds_temp = HBonds_temp[ HBonds_temp [ "Distance H Acc" ] > 1.2 ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Distance H Acc" ] < 2.2 ]
dataFrames2Merge = [ AnionPi_temp , HBonds_temp ]
dataFrameMergeHeaders = [['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id', 'Pi acid Code', 'Pi acid chain', 'Piacid id', 'CentroidId'], ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id']]
dataFrames2Exclude = [ ]
dataFrameExcludeHeaders = []
[ AnionPi_temp , HBonds_temp ] = simpleMerge(dataFrames2Merge ,dataFrameMergeHeaders, dataFrames2Exclude, dataFrameExcludeHeaders)
AnionPi_temp.to_csv( logAnionPiG, sep = "\t")
HBonds_temp.to_csv( logHBondsG, sep = "\t")
##################################################################################################
pureAnionPiDir = join(analyseDir, "pureAnionPi")
if not isdir(pureAnionPiDir):
makedirs(pureAnionPiDir)
logAnionPiPure = join(pureAnionPiDir, "anionPi.log")
if cases2run["a-g"]:
cases2exclude = [ logAnionPiA, logAnionPiB, logAnionPiC, logAnionPiD, logAnionPiE, logAnionPiF, logAnionPiG ]
df = AnionPi
mergingHeaders = ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id', 'Pi acid Code', 'Pi acid chain', 'Piacid id']
for logPath in cases2exclude:
# print("lol")
df2exclude = pd.read_table( logPath )
subMerged = pd.merge( df, df2exclude[mergingHeaders] , on = mergingHeaders, how='left', indicator=True )
df = subMerged[ subMerged['_merge'] == 'left_only' ]
df = df.drop( ['_merge'], axis = 1 )
df.to_csv( logAnionPiPure, sep = "\t" )
##################################################################################################
if cases2run["a-g"]:
files2process = [ logAnionPiA, logAnionPiB, logAnionPiC, logAnionPiD, logAnionPiE, logAnionPiF, logAnionPiG, logAnionPiPure ]
for f in files2process:
fOut = f[:-4] + "_UniqueSeq.log"
saveUniqueRecordsSeq( f, fOut, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
for f in [ logCationPiA, logCationPiB, logCationPiC, logCationPiF ]:
fOut = f[:-4] + "_UniqueSeq.log"
saveUniqueRecordsSeq( f, fOut,'Cation chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Cation code', 'Cation id'] )
for f in [ logPiPiD, logPiPiE]:
fOut = f[:-4] + "_UniqueSeq.log"
saveUniqueRecordsSeq( f, fOut,'Pi res chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Pi res code', 'Pi res id'])
##################################################################################################
if cases2run["a-g"]:
files2process = [ logAnionPiA, logAnionPiB, logAnionPiC, logAnionPiD, logAnionPiE, logAnionPiF, logAnionPiG, logAnionPiPure ]
for f in files2process:
fUnique = f[:-4] + "_UniqueSeq.log"
pdUnique = pd.read_table( fUnique )
anDict, piAcidDict, pairsDict = getFreqAnionPi( pdUnique )
anionStats = open( fUnique[:-4]+"_anions.csv", 'w' )
anionStats.write("Residue\tOccurences\n")
for res in anDict:
anionStats.write(res+"\t"+str(anDict[res]) + "\n")
anionStats.close()
piStats = open( fUnique[:-4]+"_piAcids.csv", 'w' )
piStats.write("Residue\tOccurences\n")
for res in piAcidDict:
piStats.write(res+"\t"+str(piAcidDict[res]) + "\n")
piStats.close()
pairStats = open( fUnique[:-4]+"_pairs.csv", 'w' )
pairStats.write("Residue:Residue\tOccurences\n")
for res in pairsDict:
pairStats.write(res[0]+":"+res[1]+"\t"+str(pairsDict[res]) + "\n")
pairStats.close()
pngName = f[:-4] + "_UniqueSeq_hHist.png"
distances = pdUnique["h" ].tolist()
plt.figure()
plt.rcParams.update({'font.size': 12})
n, bins, patches = plt.hist(distances, 15, density=False, facecolor='cornflowerblue')
# plt.colorbar()
plt.xlabel('$\it{h}$ / $\\AA$')
plt.ylabel('Number of occurences')
# if text != "":
# plt.text(70, 0.7*max(n), text, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(pngName, dpi=600, transparent=True)
plt.close()
pngName = f[:-4] + "_UniqueSeq_xHist.png"
x = pdUnique["x" ].tolist()
plt.figure()
plt.rcParams.update({'font.size': 12})
n, bins, patches = plt.hist(x, 15, density=False, facecolor='cornflowerblue')
# plt.colorbar()
plt.xlabel('$\it{x}$ / $\\AA$')
plt.ylabel('Number of occurences')
# if text != "":
# plt.text(70, 0.7*max(n), text, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(pngName, dpi=600, transparent=True)
plt.close()
for f in [ logCationPiA, logCationPiB, logCationPiC, logCationPiF ]:
fUnique = f[:-4] + "_UniqueSeq.log"
pdUnique = pd.read_table( fUnique )
catDict, piAcidCatDict, catElDict = getFreqCation(pdUnique)
catElStats = open( fUnique[:-4]+"_catEl.csv", 'w' )
catElStats.write("Element\tOccurences\n")
for res in catElDict:
catElStats.write(res+"\t"+str(catElDict[res]) + "\n")
catElStats.close()
##################################################################################################
if cases2run["a-g"]:
files2process = [ logAnionPiA, logAnionPiB, logAnionPiC, logAnionPiD, logAnionPiE, logAnionPiF, logAnionPiG ]
for comb in combinations( files2process, 2 ):
merged = pd.read_table( comb[0][:-4] + "_UniqueSeq.log" )
for f in comb[1:]:
newData = pd.read_table( f[:-4] + "_UniqueSeq.log" )
headers = ['PDB Code', 'Pi acid Code', "Pi acid chain", 'Piacid id', "CentroidId" , 'Anion code', "Anion chain" , 'Anion id', 'Anion group id', "Model No"]
merged = pd.merge( merged , newData, on = headers )
print( "intersection between: " )
print("\n".join(comb))
print( len(merged.index) )
##################################################################################################
logAnionPiUnique = join( resUniqueDir , basename(logAnionPi)[:-4] + "_UniqueSeq.log" )
logAnionPiResUnique = logAnionPiRes[:-4] + "_UniqueSeq.log"
logAnionPiResCylinderUnique = logAnionPiResCylinder[:-4] + "_UniqueSeq.log"
logAnionPiResRingPlaneUnique = logAnionPiResRingPlane[:-4] + "_UniqueSeq.log"
logAnionPiResDiagUnique = logAnionPiResDiag[:-4] + "_UniqueSeq.log"
logAnionPiResBullshitUnique = logAnionPiResBullshit[:-4] + "_UniqueSeq.log"
# logAnionPi45Unique = logAnionPi45[:-4] + "_UniqueSeq.log"
# logAnionPiRes45Unique = logAnionPiRes45[:-4]+ "_UniqueSeq.log"
logMethylPiUnique = join( resUniqueDir , basename(logMethylPi)[:-4] + "_UniqueSeq.log" )
logMethylPiResUnique = logMethylPiRes[:-4] + "_UniqueSeq.log"
logCationPiUnique = join( resUniqueDir , basename(logCationPi)[:-4] + "_UniqueSeq.log" )
logCationPiResUnique = logCationPiRes[:-4] + "_UniqueSeq.log"
logCationPiRes45Unique = logCationPiRes45[:-4] + "_UniqueSeq.log"
logCationPiResCylinderUnique = logCationPiResCylinder[:-4] + "_UniqueSeq.log"
logCationPiResRingPlaneUnique = logCationPiResRingPlane[:-4] + "_UniqueSeq.log"
logPiPiUnique = join( resUniqueDir, basename(logPiPi)[:-4] + "_UniqueSeq.log" )
logPiPiResUnique = logPiPiRes[:-4] + "_UniqueSeq.log"
if cases2run["UniqueSeq"]:
print("unique seq")
saveUniqueRecordsSeq( logAnionPi, logAnionPiUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logAnionPiRes, logAnionPiResUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logAnionPiResCylinder, logAnionPiResCylinderUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logAnionPiResRingPlane, logAnionPiResRingPlaneUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logAnionPiResDiag, logAnionPiResDiagUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logAnionPiResBullshit, logAnionPiResBullshitUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
# saveUniqueRecordsSeq( logAnionPi45, logAnionPi45Unique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
# saveUniqueRecordsSeq( logAnionPiRes45, logAnionPiRes45Unique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logMethylPi, logMethylPiUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logMethylPiRes, logMethylPiResUnique, 'Anion chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'] )
saveUniqueRecordsSeq( logCationPi, logCationPiUnique,'Cation chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Cation code', 'Cation id'] )
saveUniqueRecordsSeq( logCationPiRes, logCationPiResUnique,'Cation chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Cation code', 'Cation id'] )
saveUniqueRecordsSeq( logCationPiResCylinder, logCationPiResCylinderUnique,'Cation chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Cation code', 'Cation id'] )
saveUniqueRecordsSeq( logCationPiResRingPlane, logCationPiResRingPlaneUnique,'Cation chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Cation code', 'Cation id'] )
saveUniqueRecordsSeq( logCationPiRes45, logCationPiRes45Unique,'Cation chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Cation code', 'Cation id'] )
saveUniqueRecordsSeq( logPiPi, logPiPiUnique, 'Pi res chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Pi res code', 'Pi res id'] )
saveUniqueRecordsSeq( logPiPiRes, logPiPiResUnique,'Pi res chain', 'Pi acid chain', [ 'Pi acid Code', 'Piacid id', 'Pi res code', 'Pi res id'] )
##################################################################################################
if cases2run["histogram2d"]:
mainHistDir = join(postprocessingDir, "hist2d")
for pngFile in glob( join(mainHistDir, "*/*png") ):
remove(pngFile)
histAnionsDir = join(mainHistDir, "anions")
histPiAcidsDir = join(mainHistDir, "piAcids")
histPiAcidCationDir = join( mainHistDir, "piAcidsCations" )
histCationDir = join( mainHistDir, "cations" )
histOverwievDir = join(mainHistDir, "overwiev")
histNormalizedVolumeDir= join(mainHistDir, "normalizedVolume")
for pngFile in glob( join(histNormalizedVolumeDir, "*/*png") ):
remove(pngFile)
histAnionsDirNormalizedVolumeDir = join(histNormalizedVolumeDir, "anions")
histPiAcidsDirNormalizedVolumeDir = join(histNormalizedVolumeDir, "piAcids")
histPiAcidCationDirNormalizedVolumeDir = join( histNormalizedVolumeDir, "piAcidsCations" )
histCationDirNormalizedVolumeDir = join( histNormalizedVolumeDir, "cations" )
histOverwievDirNormalizedVolumeDir = join(histNormalizedVolumeDir, "overwiev")
for plotsDir in [ histOverwievDir, histAnionsDir, histPiAcidsDir, histPiAcidCationDir, mainHistDir ,
histCationDir, histAnionsDirNormalizedVolumeDir, histPiAcidsDirNormalizedVolumeDir, histCationDirNormalizedVolumeDir, histPiAcidCationDirNormalizedVolumeDir, histOverwievDirNormalizedVolumeDir ]:
if not isdir(plotsDir):
makedirs(plotsDir)
bin = (500,500)
binNorm = (100, 100)
overwievHistograms( logAnionPiUnique, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logAnionPiResUnique, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
# overwievHistograms( logAnionPi45Unique, histOverwievDir, bin, 0, 4.5, 0, 4.5 )
# overwievHistograms( logAnionPiRes45Unique, histOverwievDir, bin, 0, 4.5, 0, 4.5 )
dfCatPiUnique = pd.read_table(logCationPiUnique)
dfCatPiUniqueMetals = dfCatPiUnique[ ~dfCatPiUnique['Cation code'].isin([ "ARG", "LYS" ]) ]
logCationPiUniqueMetals = join( postprocessingDir, "cationPiUniqueMetals.log" )
dfCatPiUniqueMetals.to_csv(logCationPiUniqueMetals,sep='\t')
del dfCatPiUniqueMetals
dfCatPiUniqueArgLys = dfCatPiUnique[ dfCatPiUnique['Cation code'].isin([ "ARG", "LYS" ]) ]
logCatPiUniqueArgLys = join( postprocessingDir, "cationPiUniqueArgLys.log" )
dfCatPiUniqueArgLys.to_csv(logCatPiUniqueArgLys,sep='\t')
del dfCatPiUniqueArgLys
dfCatPiUniqueDist = dfCatPiUnique[ dfCatPiUnique['Distance'] < 5.0 ]
logdfCatPiUniqueDist = join( postprocessingDir, "cationPiUniqueDist.log" )
dfCatPiUniqueDist.to_csv(logdfCatPiUniqueDist,sep='\t')
del dfCatPiUniqueDist
overwievHistograms( logCationPiUniqueMetals, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logCatPiUniqueArgLys, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logdfCatPiUniqueDist, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
dfCatPiUnique = pd.read_table(logCationPiResUnique)
dfCatPiUniqueMetals = dfCatPiUnique[ ~dfCatPiUnique['Cation code'].isin([ "ARG", "LYS" ]) ]
logCationPiUniqueMetals = join( postprocessingDir, "cationPiResUniqueMetals.log" )
logCationPiUniqueMetalsDist = join( postprocessingDir, "cationPiResUniqueMetalsDist.log" )
dfCatPiUniqueMetals.to_csv(logCationPiUniqueMetals,sep='\t')
dfCatPiUniqueMetals = dfCatPiUniqueMetals[ dfCatPiUniqueMetals['Distance'] < 5.0 ]
dfCatPiUniqueMetals.to_csv(logCationPiUniqueMetalsDist,sep='\t')
del dfCatPiUniqueMetals
dfCatPiUniqueArgLys = dfCatPiUnique[ dfCatPiUnique['Cation code'].isin([ "ARG", "LYS" ]) ]
logCatPiUniqueArgLys = join( postprocessingDir, "cationPiResUniqueArgLys.log" )
dfCatPiUniqueArgLys.to_csv(logCatPiUniqueArgLys,sep='\t')
del dfCatPiUniqueArgLys
dfCatPiUniqueDist = dfCatPiUnique[ dfCatPiUnique['Distance'] < 5.0 ]
logdfCatPiUniqueDist = join( postprocessingDir, "cationPiResUniqueDist.log" )
dfCatPiUniqueDist.to_csv(logdfCatPiUniqueDist,sep='\t')
cationFreq = dfCatPiUniqueDist.groupby('Cation code').size().to_dict()
piAcidCationFreq = dfCatPiUniqueDist.groupby("Pi acid Code").size().to_dict()
for cat in cationFreq:
if cationFreq[cat] > 100:
anionPiHist2D(dfCatPiUniqueDist[dfCatPiUniqueDist["Cation code"]==cat], join( histCationDir ,cat+ ".png"), join( histCationDirNormalizedVolumeDir ,cat+ ".png"),(100,100), (20, 20) , cat, 0.0, 5.0, 0.0, 5.0)
for pa in piAcidCationFreq:
if piAcidCationFreq[pa] > 100:
anionPiHist2D(dfCatPiUniqueDist[dfCatPiUniqueDist["Pi acid Code"]==pa], join( histPiAcidCationDir ,pa+ ".png"), join( histPiAcidCationDirNormalizedVolumeDir ,pa+ ".png"), (100,100),(20, 20) , pa, 0.0, 5.0, 0.0, 5.0)
overwievHistograms( logCationPiUniqueMetals, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logCationPiUniqueMetalsDist, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logCatPiUniqueArgLys, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logdfCatPiUniqueDist, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logPiPiUnique, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logPiPiResUnique, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logMethylPiUnique, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
overwievHistograms( logMethylPiResUnique, histOverwievDir, histOverwievDirNormalizedVolumeDir, bin, binNorm )
# df = pd.read_table(logAnionPiRes45Unique)
df = pd.read_table(logAnionPiResUnique)
df_cylinder = pd.read_table( logAnionPiResCylinderUnique)
anionFreq = df_cylinder.groupby("Anion code").size()
allAnions = anionFreq.to_dict()
PiAcidFreq = df_cylinder.groupby("Pi acid Code").size()
allPiAcids = PiAcidFreq.to_dict()
for anion in allAnions:
if allAnions[anion] > 100:
anionPiHist2D(df[df["Anion code"]==anion], join( histAnionsDir ,anion+ ".png"), join( histAnionsDirNormalizedVolumeDir ,anion+ ".png"), (100,100),(20, 20) , anion, 0.0, 5.0, 0.0, 5.0)
for piAcid in allPiAcids:
if allPiAcids[piAcid] > 100:
anionPiHist2D(df[df["Pi acid Code"]==piAcid], join( histPiAcidsDir ,piAcid+ ".png"), join( histPiAcidsDirNormalizedVolumeDir ,piAcid+ ".png"), (100,100),(20, 20) , piAcid, 0.0, 5.0, 0.0, 5.0)
##################################################################################################
if cases2run["hydrogenBondAnalysis"]:
dfAnionPi = pd.read_table(logAnionPiResUnique)
dfHydrogenBonds = pd.read_csv( logDir+"hBonds.log", sep = "\t").fillna("NA")
HBonds_temp = dfHydrogenBonds[ ~dfHydrogenBonds['Anion code'].astype(str).isin([ "FES", "F3S", "S3F", "9S8", "ER2", "FS4", "MSK", "SF4", "1CL", "CLF"]) ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Angle" ] > 130.0 ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Angle" ] < 180.0 ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Distance Don Acc" ] < 3.2 ]
HBonds_temp = HBonds_temp[ HBonds_temp [ "Distance H Acc" ] < 2.2 ]
HBonds_temp = HBonds_temp.rename(columns = { "Donor code" : "Pi acid Code" , "Donor chain" : "Pi acid chain", "Donor id" : "Piacid id" })
mergingHeaders = ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id', "Pi acid Code", "Pi acid chain", "Piacid id"]
autoHBonds = pd.merge( dfAnionPi[mergingHeaders], HBonds_temp[mergingHeaders] , on = mergingHeaders, how='inner')
noAutoHBonds = pd.merge( dfAnionPi, autoHBonds , on = mergingHeaders, how='left', indicator=True )
noAutoHBonds = noAutoHBonds[ noAutoHBonds['_merge'] == 'left_only' ]
hBondAnalDir = join(postprocessingDir, "AutoHBondsAnalysis")
if not isdir(hBondAnalDir):
makedirs(hBondAnalDir)
anionPiHist2D(noAutoHBonds, join(hBondAnalDir ,"noAutoHBonds.png"), join(hBondAnalDir ,"noAutoHBondsNorm.png"), (500, 500), (100,100), "", 0.0 , 5.0 , 0.0 , 5.0 )
##################################################################################################
if cases2run["ligandsAnalysis"]:
dfAnionPi = pd.read_table(logAnionPiResUnique)
anionFreq = dfAnionPi.groupby("Anion code").size().to_dict()
PiAcidFreq = dfAnionPi.groupby("Pi acid Code").size().to_dict()
occurencesThreshold = 100
anionLigands = []
piAcidLigands = []
for anion in anionFreq:
if anionFreq[anion] < occurencesThreshold:
anionLigands.append(anion)
for piAcid in PiAcidFreq:
if PiAcidFreq[piAcid] < occurencesThreshold:
piAcidLigands.append(piAcid)
ligandsAnalDir = join(postprocessingDir, "ligandsAnalysis")
if not isdir(ligandsAnalDir):
makedirs(ligandsAnalDir)
anionPiHist2D(dfAnionPi[ dfAnionPi["Anion code"].astype(str).isin( anionLigands ) ], join(ligandsAnalDir ,"ligandsAsAnions.png"), join(ligandsAnalDir ,"ligandsAsAnionsNorm.png"), (500, 500), (100,100), "", 0.0 , 5.0 , 0.0 , 5.0 )
anionPiHist2D(dfAnionPi[ dfAnionPi["Pi acid Code"].astype(str).isin( piAcidLigands ) ], join(ligandsAnalDir ,"ligandsAsPiAcids.png"), join(ligandsAnalDir ,"ligandsAsPiAcidsNorm.png"), (500, 500), (100,100), "", 0.0 , 5.0 , 0.0 , 5.0 )
##################################################################################################
if cases2run["histogram2d-planar"]:
mainPlanarHistDir = join(postprocessingDir, "hist2d_planarAnions")
for pngFile in glob( join(mainPlanarHistDir, "*png") ):
remove(pngFile)
for plotsDir in [ mainPlanarHistDir ]:
if not isdir(plotsDir):
makedirs(plotsDir)
anionCation2exclude = pd.read_csv( logAnionCation, sep = "\t").fillna("NA")
anionCation2exclude = anionCation2exclude[ anionCation2exclude["Distance"] < 3.0 ]
dfPlanarAnionPi = pd.read_csv( logPlanarAnionPi, sep = "\t").fillna("NA").rename(columns = { "Angle" : "PlanarAngle" }).drop( ['Centroid x coord','Centroid y coord','Centroid z coord'], axis = 1 )
mergingHeaders = ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id']
subMerged = pd.merge( dfPlanarAnionPi, anionCation2exclude[mergingHeaders] , on = mergingHeaders, how='left', indicator=True )
df = subMerged[ subMerged['_merge'] == 'left_only' ]
dfPlanarAnionPi = df.drop( ['_merge'], axis = 1 )
for logAP, pngBasename in zip([ logAnionPiResCylinderUnique, logAnionPiResDiagUnique, logAnionPiResRingPlaneUnique, logAnionPiResBullshitUnique ], ["_cylinder", "_norCylinderNorPlane", "_ringPlane", "_bullshit"]):
df2merge = pd.read_table( logAP )
headers = ['PDB Code', 'Pi acid Code', "Pi acid chain", 'Piacid id', "CentroidId" , 'Anion code', "Anion chain" , 'Anion id', 'Anion group id', "Model No"]
dfMerged = pd.merge( dfPlanarAnionPi, df2merge, on = headers )
dfMerged.to_csv( join( mainPlanarHistDir, "planarAnionPi"+pngBasename+"_ResUniqueSeq.csv" ) ,sep='\t')
anionFreq = dfMerged.groupby("Anion code").size()
allAnions = anionFreq.to_dict()
for anion in allAnions:
if allAnions[anion] > 40:
anionPiPlanarHist2D(dfMerged[dfMerged["Anion code"]==anion], join( mainPlanarHistDir ,anion+pngBasename+ ".png"), 9, anion)
if cases2run["histograms-linear"]:
mainLinearHistDir = join(postprocessingDir, "hist_linearAnions")
for pngFile in glob( join(mainLinearHistDir, "*png") ):
remove(pngFile)
for plotsDir in [ mainLinearHistDir ]:
if not isdir(plotsDir):
makedirs(plotsDir)
anionCation2exclude = pd.read_csv( logAnionCation, sep = "\t").fillna("NA")
anionCation2exclude = anionCation2exclude[ anionCation2exclude["Distance"] < 3.0 ]
dfLinearAnionPi = pd.read_csv( logLinearAnionPi, sep = "\t").fillna("NA").rename(columns = { "Angle" : "LinearAngle" }).drop( ['Centroid x coord','Centroid y coord','Centroid z coord'], axis = 1 )
mergingHeaders = ['PDB Code', 'Model No', 'Anion code', 'Anion chain', 'Anion id','Anion group id']
subMerged = pd.merge( dfLinearAnionPi, anionCation2exclude[mergingHeaders] , on = mergingHeaders, how='left', indicator=True )
df = subMerged[ subMerged['_merge'] == 'left_only' ]
dfLinearAnionPi = df.drop( ['_merge'], axis = 1 )
for logAP, pngBasename in zip([ logAnionPiResCylinderUnique, logAnionPiResDiagUnique, logAnionPiResRingPlaneUnique ], ["_cylinder", "_norCylinderNorPlane", "_ringPlane"]):
df2merge = pd.read_table( logAP )
headers = ['PDB Code', 'Pi acid Code', "Pi acid chain", 'Piacid id', "CentroidId" , 'Anion code', "Anion chain" , 'Anion id', 'Anion group id', "Model No"]
dfMerged = pd.merge( dfLinearAnionPi, df2merge, on = headers )
dfMerged.to_csv( join( mainLinearHistDir, "linearAnionPi"+pngBasename+"_ResUniqueSeq.csv" ) ,sep='\t')
anionFreq = dfMerged.groupby("Anion code").size()
allAnions = anionFreq.to_dict()
for anion in allAnions:
if allAnions[anion] > 40:
anionPiLinearHist2D(dfMerged[dfMerged["Anion code"]==anion], join( mainLinearHistDir ,anion+pngBasename+ ".png"), 9, anion)
##################################################################################################
if cases2run["barplots"]:
logs = [ logAnionPiResCylinderUnique , logAnionPiResRingPlaneUnique , logAnionPiResDiagUnique , logCationPiResCylinderUnique, logCationPiResRingPlaneUnique ]
plotbarDir = join(postprocessingDir, "barplots")
if not isdir(plotbarDir):
makedirs(plotbarDir)
plotbarDirCationPi = join(plotbarDir, "cationPi")
if not isdir(plotbarDirCationPi):
makedirs(plotbarDirCationPi)
acidicaa = ["ASP","GLU"]
aa = ["ALA", "CYS", "PHE", "GLY", "HIS", "ILE", "LYS",
"LEU", "MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL", "TRP", "TYR"]
nu = ["A","G","T","C","U","I","DA", "DC", "DG", "DT", "DI" ]
aCodes = acidicaa + aa + nu
piAcids =["PHE","TYR","HIS","TRP"] + nu
nbar=10
for log, pngBasename, directory, ionHeader in zip(logs, ["cylinder", "ringPlane" , "norCylinderNorPlane" , "cylinder", "ringPlane" ], [ plotbarDir, plotbarDir, plotbarDir, plotbarDirCationPi, plotbarDirCationPi ] , [ "Anion code", "Anion code", "Anion code", "Cation code", "Cation code" ] ):
df = pd.read_table(log)
df2 = df.drop_duplicates(subset = ['PDB Code'])
anionFreq = df.groupby(ionHeader).size().sort_values(ascending=False)
PiAcidFreq = df.groupby("Pi acid Code").size().sort_values(ascending=False)
if ionHeader == "Anion code":
typeFreq = df2.groupby("Structure type").size().sort_values(ascending=False)
typeFreqInter = df.groupby("Structure type").size().sort_values(ascending=False)
anionDict = anionFreq.head(nbar).to_dict()
PiAcidDict = PiAcidFreq.head(nbar).to_dict()
typesDict = typeFreq.head(nbar).to_dict()
anionPiPlotBar( anionDict, join(directory , pngBasename+"_topIons.png"))
anionPiPlotBar( PiAcidDict, join(directory , pngBasename+"_topPiAcids.png"))
print(log)
anionDict = anionFreq.to_dict()
PiAcidDict = PiAcidFreq.to_dict()
if ionHeader == "Anion code":
typesDict = typeFreq.to_dict()
typesInterDict = typeFreqInter.to_dict()
for key in aCodes:
if key in anionDict:
del anionDict[key]
for key in piAcids:
if key in PiAcidDict:
del PiAcidDict[key]
anionDict = {k: v for k, v in sorted(anionDict.items(), key=lambda item: item[1], reverse = True)[:nbar]}
PiAcidDict = {k: v for k, v in sorted(PiAcidDict.items(), key=lambda item: item[1], reverse = True)[:nbar]}
print("top ions")
print(anionDict)
print("top pi acids")
print(PiAcidDict)
anionPiPlotBar( anionDict, join(directory , pngBasename+"_topIons_AA_NU_excluded.png") )
anionPiPlotBar( PiAcidDict, join(directory , pngBasename+"_topPiAcids_AA_NU_excluded.png") )
df3 = df[[ionHeader, 'Pi acid Code']]
pairs = df3.groupby([ionHeader, "Pi acid Code"]).size().sort_values(ascending=False)
pairsDict = pairs.head(10).to_dict()
print(log)
print(pairsDict)
if ionHeader == "Anion code":
print("PDB counts")
print(typesDict)
print("interaction counts")
print(typesInterDict)
labels = []
key2convert = list(pairsDict.keys())
for key in key2convert:
labels.append(str(key))
plt.figure(figsize=(16, 8))
plt.bar(labels, list(pairsDict.values()), color = "gold" )
plt.savefig( join(directory , pngBasename+"_top_pairs_ion_piAcid.png"), dpi=600, transparent=True)
##################################################################################################
if cases2run["resolutionplot"]:
# AnionPi_temp = pd.read_csv( logAnionPiUnique, sep = "\t").fillna("NA")
AnionPi_temp = pd.read_csv( logCationPiUnique, sep = "\t").fillna("NA")
resolutionDF = pd.read_csv("resolu.idx", sep = "\s+")
resolutionDF = resolutionDF.rename( columns = { "IDCODE" : 'PDB Code' })
AnionPi_temp = pd.merge( AnionPi_temp, resolutionDF, on = [ 'PDB Code' ] )
AnionPi_temp = AnionPi_temp[ (AnionPi_temp [ "RESOLUTION" ] < 5) & (AnionPi_temp [ "RESOLUTION" ] > 0) ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Method" ] == "X-RAY DIFFRACTION"]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Structure type" ] == "protein"]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Anion code" ] == "GLU" ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Pi acid Code" ] == "PHE" ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] < 1.8 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] > 1.5 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Cation code" ] == "LYS" ]
# fullSphere = AnionPi_temp.drop_duplicates(subset = ['PDB Code', 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'])
fullSphere = AnionPi_temp
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] > 0.0 ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "x" ] < 1.8 ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Angle" ] < 45.0 ]
AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] < 3.5 ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "h" ] < 3.5 ]
# AnionPi_temp = AnionPi_temp[ AnionPi_temp [ "Distance" ] < 4.5 ]
# insideCone = AnionPi_temp.drop_duplicates(subset = ['PDB Code', 'Pi acid Code', 'Piacid id', 'Anion code', 'Anion id', 'Anion group id'])
insideCone = AnionPi_temp
resMin = min(fullSphere["RESOLUTION"])
resMax = max(fullSphere["RESOLUTION"])
nbins = 30
fullSphereCounts, fullSphereBins = np.histogram( fullSphere["RESOLUTION"], bins = nbins, range = ( resMin, resMax ) )
insideConeCounts, insideConeBins = np.histogram( insideCone["RESOLUTION"], bins = nbins, range = ( resMin, resMax ) )
print( fullSphereBins )
print( insideConeBins )
cone2fullSphere = []
i = 0
x = []
fullX = []
countsPerPDBsphere = []
countsPerPDBcone = []
pdbInCone2pdbInSphere = []
for cone, sphere in zip(insideConeCounts,fullSphereCounts):
if sphere > 10 and cone > 10:
cone2fullSphere.append( 100*cone/sphere )
x.append( ( insideConeBins[i] + insideConeBins[i+1])/2 )
pdbInResRangeSphere = len(fullSphere[(fullSphere [ "RESOLUTION" ] > insideConeBins[i]) & (fullSphere [ "RESOLUTION" ] < insideConeBins[i+1])].drop_duplicates(subset=["PDB Code"]).index)
pdbInResRangeCone = len(insideCone[(insideCone [ "RESOLUTION" ] > insideConeBins[i]) & (insideCone [ "RESOLUTION" ] < insideConeBins[i+1])].drop_duplicates(subset=["PDB Code"]).index)
countsPerPDBsphere.append( sphere/pdbInResRangeSphere )
countsPerPDBcone.append( cone/pdbInResRangeSphere )
pdbInCone2pdbInSphere.append( pdbInResRangeCone/pdbInResRangeSphere )
fullX.append( ( insideConeBins[i] + insideConeBins[i+1])/2 )
i += 1
plt.figure()
plt.plot(x, cone2fullSphere )
plt.xlabel('resolution ${\\AA}$')
plt.savefig(join( postprocessingDir, "resolution.png"), dpi=600, transparent=True)
plt.show()
################################################################################
if cases2run["occurencesTable"]:
dfSphere = pd.read_table(logAnionPiResUnique)
anionSphere, piAcidSphere = getFreq(dfSphere)
print("Number of unique resnames anions in sphere ", len(list(anionSphere.keys())))
print("Number of unique resnames quadrupoles in sphere ", len(list(piAcidSphere.keys())))
for APlog in [ logAnionPiResCylinderUnique, logAnionPiResDiagUnique, logAnionPiResRingPlaneUnique ]:
operatingDir = APlog[:-4]+"_tables"
if not isdir(operatingDir):
makedirs(operatingDir)
dfCylinder = pd.read_table(APlog)
anionCylinder, piAcidCylinder = getFreq(dfCylinder)
selectedResAnions = list( anionCylinder.keys() )
table = open(join(operatingDir, "anionsOccurencesFull.csv" ), "w")
table.write( "\t".join(["Residue", "in sphere", "in cylinder", "cylinder/sphere"]) +"\n" )
for res in selectedResAnions:
table.write(res + "\t")
numbers = [ anionSphere[res], anionCylinder[res], 100*anionCylinder[res]/anionSphere[res] ]
table.write( "\t".join( [ str(num) for num in numbers ] ) )
table.write("\n")
table.close()
selectedResPiAcids = list( piAcidCylinder.keys() )
table = open(join(operatingDir, "piAcidOccurencesFull.csv" ), "w")
table.write( "\t".join(["Residue", "in sphere", "in cylinder", "cylinder/sphere"]) +"\n" )
for res in selectedResPiAcids:
table.write(res + "\t")
numbers = [ piAcidSphere[res], piAcidCylinder[res], 100*piAcidCylinder[res]/piAcidSphere[res] ]
table.write( "\t".join( [ str(num) for num in numbers ] ) )
table.write("\n")
table.close()
table = open(join(operatingDir, "anionsOccurencesSmall.csv" ), "w")
table.write( "\t".join(["Residue", "in sphere", "in cylinder", "cylinder/sphere"]) +"\n" )
for res in selectedResAnions:
if anionCylinder[res] >= 100:
table.write(res + "\t")
numbers = [ anionSphere[res], anionCylinder[res], 100*anionCylinder[res]/anionSphere[res] ]
table.write( "\t".join( [ str(num) for num in numbers ] ) )
table.write("\n")
table.close()
table = open(join(operatingDir, "piAcidOccurencesSmall.csv" ), "w")
table.write( "\t".join(["Residue", "in sphere", "in cylinder", "cylinder/sphere"]) +"\n" )
for res in selectedResPiAcids:
if piAcidCylinder[res] >= 100:
table.write(res + "\t")
numbers = [ piAcidSphere[res], piAcidCylinder[res], 100*piAcidCylinder[res]/piAcidSphere[res] ]
table.write( "\t".join( [ str(num) for num in numbers ] ) )
table.write("\n")
table.close()
dfSphere = pd.read_table(logCationPiRes45Unique)
dfCylinder = pd.read_table(logCationPiResCylinderUnique)
cationSphere, piAcidCationSphere = getFreqCation(dfSphere)
cationCylinder, piAcidCationCylinder = getFreqCation(dfCylinder)
selectedResCations = list( cationCylinder.keys() )
table = open(join(resUniqueDir, "cationsOccurencesFull.csv" ), "w")
table.write( "\t".join(["Residue", "in sphere", "in cylinder", "cylinder/sphere"]) +"\n" )
for res in selectedResCations:
table.write(res + "\t")
numbers = [ cationSphere[res], cationCylinder[res], 100*cationCylinder[res]/cationSphere[res] ]
table.write( "\t".join( [ str(num) for num in numbers ] ) )
table.write("\n")
table.close()
selectedResPiAcids = list( piAcidCationCylinder.keys() )
table = open(join(resUniqueDir, "piAcidCationOccurencesFull.csv" ), "w")
table.write( "\t".join(["Residue", "in sphere", "in cylinder", "cylinder/sphere"]) +"\n" )
for res in selectedResPiAcids:
table.write(res + "\t")
numbers = [ piAcidCationSphere[res], piAcidCationCylinder[res], 100*piAcidCationCylinder[res]/piAcidCationSphere[res] ]
table.write( "\t".join( [ str(num) for num in numbers ] ) )
table.write("\n")
table.close()
################################################################################
if cases2run["occurencesPairs"]:
for logAP in [ logAnionPiResCylinderUnique, logAnionPiResDiagUnique, logAnionPiResRingPlaneUnique ]:
df = pd.read_table(logAP)
df2 = df[['Anion code', 'Pi acid Code']]
pairs = df2.groupby(["Anion code", "Pi acid Code"]).size().sort_values(ascending=False)
pairsDict = pairs.to_dict()
AnionCode = df['Anion code'].drop_duplicates()
allAnionCode = AnionCode.tolist()
PiAcidCode = df['Pi acid Code'].drop_duplicates()
allPiAcidCode = PiAcidCode.tolist()
acidicaa = ["ASP","GLU"]
aa = ["ALA", "CYS", "PHE", "GLY", "HIS", "ILE", "LYS",
"LEU", "MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL", "TRP", "TYR"]
nu = ["A","G","T","C","U","I","DA", "DC", "DG", "DT", "DI" ]
aCodes = acidicaa + aa + nu
piAcids =["PHE","TYR","HIS","TRP"] + nu
tabela = open( logAP[:-4] +"_tabela.csv",'w')
tabela.write("piacid\t")
for a in aCodes:
tabela.write(a+"\t")
tabela.write("Others\n")
grandTotal = pairs.sum()
for piacid in piAcids:
tabela.write(piacid+"\t")
total = 0
for key in pairsDict:
if key[1] == piacid:
total += pairsDict[key]
for anion in aCodes:
key = ( anion, piacid)
if key in pairsDict:
anion_piacid_counts = pairsDict[key]
anion_piacid_counts_str = str(anion_piacid_counts)
tabela.write(anion_piacid_counts_str+"\t")
total -= anion_piacid_counts
grandTotal-= anion_piacid_counts
else:
tabela.write("0\t")
tabela.write(str(total)+"\n")
grandTotal-= total
tabela.write("Others\t")
for anion in aCodes:
total = 0
for key in pairsDict:
if key[0] == anion:
total += pairsDict[key]
for piacid in piAcids:
key = ( anion, piacid)
if key in pairsDict:
piacid_counts = pairsDict[key]
total -= piacid_counts
tabela.write(str(total)+"\t")
grandTotal-= total
tabela.write(str(grandTotal)+"\n")
tabela.close()
################################################################################
if cases2run["chainNeoghbors"]:
cnbarDir = join(postprocessingDir, "chainNeighbors")
if not isdir(cnbarDir):
makedirs(cnbarDir)
for pngFile in glob( join(cnbarDir, "*png") ):
remove(pngFile)
dfCylinder = getResidueIdDiffDataFrame( logAnionPiResCylinderUnique )
dfPlane =getResidueIdDiffDataFrame( logAnionPiResRingPlaneUnique)
dfNor = getResidueIdDiffDataFrame( logAnionPiResDiagUnique )
# print("Wielkosc zerowego slupka")
# print( len(AnionPi_temp[ AnionPi_temp[ "chainDist" ] == 0 ].index) )
# AnionPi_temp[ AnionPi_temp[ "chainDist" ] == 0 ].to_csv( join(cnbarDir, "zeroDiffChain.csv"), sep = "\t")
for dfAp, csvBasename in zip( [ dfCylinder, dfNor, dfPlane ] , ["_cylinder.csv" , "_norCylinderNorPlane.csv" , "_ringPlane.csv" ] ):
dfCloseAnions = dfAp[ dfAp[ "Pi acid Code"].astype(str).isin(['PHE', 'HIS', 'TRP', 'TYR'] ) ]
dfCloseAnions = dfCloseAnions[ dfCloseAnions[ "Anion code"].astype(str).isin(['ASP', 'GLU'] ) ]
dfCloseAnions = dfCloseAnions[ (dfCloseAnions["chainDist"] < 6 ) & (dfCloseAnions["chainDist"] > -6 ) & (dfCloseAnions["chainDist"] != 0 ) ]
dfCloseAnions.to_csv( join(cnbarDir, "closeAnions"+csvBasename), sep = "\t" )
dfCloseNu = dfAp[ dfAp[ "Pi acid Code"].astype(str).isin([ "A","G","T","C","U","I","DA", "DC", "DG", "DT", "DI"] ) ]
dfCloseNu = dfCloseNu[ dfCloseNu[ "Anion code"].astype(str).isin([ "A","G","T","C","U","I","DA", "DC", "DG", "DT", "DI"] ) ]
dfCloseNu = dfCloseNu[ (dfCloseNu["chainDist"] < 6 ) & (dfCloseNu["chainDist"] > -6 ) & (dfCloseNu["chainDist"] != 0 ) ]
dfCloseNu.to_csv( join(cnbarDir, "closeNUs"+csvBasename), sep = "\t" )
for anion in ['ASP', 'GLU']:
for piAcid in ['PHE', 'HIS', 'TRP', 'TYR'] :
for tempAP, pngBanename in zip( [ dfCylinder, dfNor, dfPlane ] , ["_cylinder.png" , "_norCylinderNorPlane.png" , "_ringPlane.png" ] ):
temp = tempAP[ (tempAP["Anion code"] == anion ) & ( tempAP[ "Pi acid Code"] == piAcid ) ]
freq = temp.groupby("chainDist").size().sort_values(ascending=False)
data = freq.to_dict()
plt.figure()
plt.bar(list(data.keys()), list(data.values()), color = "gold" )
plt.xlabel("$ \\Delta_{rID - aID}$")
plt.ylabel("Pairs")
plt.text(70, 0.7*max(data.values()), anion + "-"+ piAcid, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.text(-70, 0.7*max(data.values()), piAcid + "-"+ anion , fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(join(cnbarDir, anion + "_"+ piAcid+pngBanename), dpi=600, format='png', transparent=True, bbox_inches = "tight")
plt.close()
allNu = ["A","G","T","C","U","I","DA", "DC", "DG", "DT", "DI" ]
allNuDNA = ["DA", "DC", "DG", "DT", "DI" ]
allNuRNA = ["A","G","T","C","U","I"]
for tempAP, pngBanename in zip( [ dfCylinder, dfNor, dfPlane ] , ["_cylinder.png" , "_norCylinderNorPlane.png" , "_ringPlane.png" ] ):
temp = tempAP[ (tempAP["Anion code"].astype(str).isin(allNu) ) & ( tempAP[ "Pi acid Code"].astype(str).isin(allNu)) & ( tempAP[ "chainDist"] > -25 ) & ( tempAP[ "chainDist"] < 25 ) ]
freq = temp.groupby("chainDist").size().sort_values(ascending=False)
data = freq.to_dict()
plt.figure()
plt.bar(list(data.keys()), list(data.values()), color = "gold" )
plt.xlabel("$ \\Delta_{rID - aID}$")
plt.ylabel("Pairs")
# plt.text(70, 0.7*max(data.values()), anion + "-"+ piAcid, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
# plt.text(-70, 0.7*max(data.values()), piAcid + "-"+ anion , fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(join(cnbarDir, "onlyNU"+pngBanename), dpi=600, format='png', transparent=True, bbox_inches = "tight")
plt.close()
for tempAP, pngBanename in zip( [ dfCylinder, dfNor, dfPlane ] , ["_cylinder.png" , "_norCylinderNorPlane.png" , "_ringPlane.png" ] ):
temp = tempAP[ (tempAP["Anion code"].astype(str).isin(allNuDNA) ) & ( tempAP[ "Pi acid Code"].astype(str).isin(allNuDNA)) & ( tempAP[ "chainDist"] > -25 ) & ( tempAP[ "chainDist"] < 25 ) ]
freq = temp.groupby("chainDist").size().sort_values(ascending=False)
data = freq.to_dict()
plt.figure()
plt.bar(list(data.keys()), list(data.values()), color = "gold" )
plt.xlim([-25, 25])
plt.xlabel("$ \\Delta_{rID - aID}$")
plt.ylabel("Pairs")
# plt.text(70, 0.7*max(data.values()), anion + "-"+ piAcid, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
# plt.text(-70, 0.7*max(data.values()), piAcid + "-"+ anion , fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(join(cnbarDir, "onlyDNA"+pngBanename), dpi=600, format='png', transparent=True, bbox_inches = "tight")
plt.close()
for tempAP, pngBanename in zip( [ dfCylinder, dfNor, dfPlane ] , ["_cylinder.png" , "_norCylinderNorPlane.png" , "_ringPlane.png" ] ):
temp = tempAP[ (tempAP["Anion code"].astype(str).isin(allNuRNA) ) & ( tempAP[ "Pi acid Code"].astype(str).isin(allNuRNA)) & ( tempAP[ "chainDist"] > -25 ) & ( tempAP[ "chainDist"] < 25 ) ]
freq = temp.groupby("chainDist").size().sort_values(ascending=False)
data = freq.to_dict()
plt.figure()
plt.bar(list(data.keys()), list(data.values()), color = "gold" )
plt.xlim([-25, 25])
plt.xlabel("$ \\Delta_{rID - aID}$")
plt.ylabel("Pairs")
# plt.text(70, 0.7*max(data.values()), anion + "-"+ piAcid, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
# plt.text(-70, 0.7*max(data.values()), piAcid + "-"+ anion , fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(join(cnbarDir, "onlyRNA"+pngBanename), dpi=600, format='png', transparent=True, bbox_inches = "tight")
plt.close()
for tempAP, pngBanename in zip( [ dfCylinder, dfNor, dfPlane ] , ["_cylinder.png" , "_norCylinderNorPlane.png" , "_ringPlane.png" ] ):
temp = tempAP[ (tempAP["Anion code"].astype(str).isin(['ASP', 'GLU']) ) & ( tempAP[ "Pi acid Code"].astype(str).isin(['PHE', 'HIS', 'TRP', 'TYR'])) & ( tempAP[ "chainDist"] > -25 ) & ( tempAP[ "chainDist"] < 25 ) ]
freq = temp.groupby("chainDist").size().sort_values(ascending=False)
data = freq.to_dict()
plt.figure()
plt.bar(list(data.keys()), list(data.values()), color = "gold" )
plt.xlabel("$ \\Delta_{rID - aID}$")
plt.ylabel("Pairs")
# plt.text(70, 0.7*max(data.values()), anion + "-"+ piAcid, fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
# plt.text(-70, 0.7*max(data.values()), piAcid + "-"+ anion , fontsize = 16, color='k',horizontalalignment='center', verticalalignment='center', weight='bold')
plt.savefig(join(cnbarDir, "onlyAA"+pngBanename), dpi=600, format='png', transparent=True, bbox_inches = "tight")
plt.close()
| [
2,
7391,
7560,
416,
5200,
2787,
2305,
10440,
2025,
26266,
263,
201,
198,
11748,
25064,
201,
198,
201,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
12813,
3262,
14,
17474,
14,
24432,
14,
489,
1130,
41826,
591,
34985,
14,
9078,
29487,
... | 2.412902 | 27,050 |
#!/usr/bin/env python
import os, sys, datetime, operator
numArgs = len(sys.argv)
if numArgs == 1:
rootDir = "/var/www/html/apps/opensha"
elif numArgs == 2:
rootDir = sys.argv[1]
else:
print "USAGE: " + sys.argv[0] + " [dir]"
sys.exit(2)
if not os.path.isdir(rootDir):
print rootDir + " is not a directory!"
sys.exit(1)
processDir(rootDir) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
11,
25064,
11,
4818,
8079,
11,
10088,
198,
198,
22510,
42035,
796,
18896,
7,
17597,
13,
853,
85,
8,
198,
198,
361,
997,
42035,
6624,
352,
25,
198,
197,
15763,
3527... | 2.275641 | 156 |
from .rambank import RAMBank
import logging
from enum import Enum
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name='mbc1')
class MBC1:
"""
Memory Bank Controller 1
Memory from 0x0-0x7fff is both read from and writen
to for control of MBC Registers
...
Attributes
----------
rom : bytearray
the ROM of the entire cartridge
cur_rom : int
the current rom Bank selected
"""
def __init__(self, cartridge):
"""
Initialize MBC1.
...
Parameters
----------
cartridge : bytearray
the ROM to initialize from
"""
self.rom = cartridge
self.ram = RAMBank(cartridge[0x149])
self.cur_rom = 1
self.modes = Enum('BankMode', 'ROM RAM')
self.mode = self.modes.ROM
def read_byte(self, address):
"""
Read a byte from mbc1.
...
Parameters
----------
address : int
to read
"""
if address < 0x4000:
return self.rom[address]
elif address < 0x8000:
address -= 0x4000
return self.rom[address + (self.cur_rom * 0x4000)]
elif address < 0xe000:
return self.ram.read_byte(address)
else:
log.critical('INVALID READ AT: ' + hex(address))
return 1
def write_byte(self, byte, address):
"""
Write a byte to MBC1. This controls/updates registers.
RAM ENABLE: 0x0 - 0x1fff
ROM BANK #: 0x2000 - 0x3ffff
RAM BANK # or upper bits of ROM BANK: 0x4000 - 0x5fff
ROM/RAM Select: 0x6000 - 0x7fff
...
Parameters
----------
byte : int
to write
address : int
to write to
"""
if address < 0x8000:
if address < 0x2000:
#enable/disable ram register
if byte & 0xa == 0xa:
self.ram.set_ext_ram_enable(True)
else:
self.ram.set_ext_ram_enable(False)
elif address < 0x4000:
#rom bank number, lower 5 bits
self.cur_rom &= 0xe0
if byte == 0:
byte |= 0x1 #MBC 1 translates 0 -> 1
self.cur_rom |= byte & 0x1f
elif address < 0x6000:
# ram bank num or upper bits of rom bank #
if self.mode == self.modes.RAM:
self.ram.set_bank_num(byte & 0x3)
else:
self.cur_rom &= 0x1f
self.cur_rom |= (byte & 0x3) << 5
else: # address < 0x8000
#rom/ram mode select
if byte == 0x0:
self.mode = self.modes.ROM
elif byte == 0x1:
self.mode = self.modes.RAM
elif address < 0xe000:
self.ram.write_byte(byte, address)
else:
log.critical('INVALID WRITE TO: ' + hex(address))
| [
6738,
764,
859,
17796,
1330,
13931,
28650,
198,
11748,
18931,
198,
6738,
33829,
1330,
2039,
388,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
8,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
3672,
11639,
2... | 1.881701 | 1,623 |
import socket
import sys
import time
PORT = 0
buffer = 1024
server = ("127.0.0.1", 12345)
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
try:
clientsocket.bind((socket.gethostbyname(""), PORT))
print(socket.gethostbyname(""))
except:
print("[Binding Error] : Exiting...")
sys.exit(1)
while True:
try:
clientsocket.connect(server)
break
except:
print("[Server Offline] : Server Not Running, retrying")
time.sleep(2)
while True:
try:
print("\"exit\" for exiting the program")
message = input("Message -> ")
if message.encode("utf-8") == b'':
print("[Invalid Input] : Enter something!!")
continue
if message == "exit":
message = message.encode("utf-8")
try:
clientsocket.send(message)
except:
break
print("[Exit Received] Exiting...")
time.sleep(2)
break
print("[Sending]: ", message)
message = message.encode("utf-8")
try:
clientsocket.send(message)
except:
print("[Error] : Probable Server Error, Try to Reconnect")
break
try:
msgFromServer= clientsocket.recv(buffer)
print("[Reply From Server] :", msgFromServer.decode("utf-8"))
except:
print("[Error] : Probable Server Error, Resend")
except KeyboardInterrupt:
sys.exit(2)
clientsocket.close() | [
11748,
17802,
198,
11748,
25064,
198,
11748,
640,
198,
198,
15490,
796,
657,
198,
22252,
796,
28119,
198,
15388,
796,
5855,
16799,
13,
15,
13,
15,
13,
16,
1600,
17031,
2231,
8,
628,
198,
565,
2334,
5459,
796,
17802,
13,
44971,
7,
44... | 2.555556 | 495 |
import argparse
import editdistance
from num2words import num2words
from dataset import *
from engine import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--engine_type', type=str, required=True)
args = parser.parse_args()
print('Engine type is %s' % str(args.engine_type))
dataset = Dataset.create('tudade')
print('loaded %s with %.2f hours of data' % (str(dataset), dataset.size_hours()))
engine = ASREngine.create(ASREngines[args.engine_type])
print('created %s engine' % str(engine))
word_error_count = 0
word_count = 0
composite_errors = list()
composite_error_count = 0
millis = int(round(time.time() * 1000))
for i in range(dataset.size()):
print("sample %s of %s" % (str(i + 1), str(dataset.size())))
path, ref_transcript = dataset.get(i)
transcript = engine.transcribe(path)
ref_words = ref_transcript.strip('\n ').lower().split()
words = transcript.strip('\n ').lower().split()
if isinstance(dataset, TudaDeDataset) and dataset.cleaned:
for word in range(len(words)):
if words[word].isnumeric(): words[word] = num2words(words[word], lang='de')
for ref_word in range(len(ref_words)):
for word in range(len(words)):
if (ref_words[ref_word].startswith(words[word])
and ref_words[ref_word] != words[word] and word + 1 < len(words)):
comp = list()
comp.append(words[word])
for following in range(word + 1, len(words)):
if not ref_words[ref_word].startswith(''.join(comp) + words[following]): break
if ''.join(comp) is ref_words[ref_word]: break
comp.append(words[following])
if ''.join(comp) == ref_words[ref_word]:
composite_error_count += len(comp)
composite_errors.append((ref_words[ref_word], comp))
distance = editdistance.eval(ref_words, words)
print("Ref: %s" % ref_words)
print("Got: %s" % words)
print("Distance: %s" % str(distance))
word_error_count += distance
word_count += len(ref_words)
print('word count: %d' % word_count)
print('word error count : %d' % word_error_count)
print('Composite error count : %d' % composite_error_count)
print('word error rate without composite errors : %.2f' % (
100 * float(word_error_count - len(composite_errors)) / word_count))
print('word error rate : %.2f' % (100 * float(word_error_count) / word_count))
end_millis = int(round(time.time() * 1000))
print("Start: %s", str(millis))
print("End: %s", str(end_millis))
| [
11748,
1822,
29572,
198,
198,
11748,
4370,
30246,
198,
6738,
997,
17,
10879,
1330,
997,
17,
10879,
198,
198,
6738,
27039,
1330,
1635,
198,
6738,
3113,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
... | 2.253205 | 1,248 |
import codecs
import os
import re
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), "rb", "utf-8") as f:
return f.read()
def find_version(*file_paths):
"""
Build a path from *file_paths* and search for a ``__version__``
string inside.
"""
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "doc2dash"
author = "Hynek Schlawack"
copyright = "2012, Hynek Schlawack"
# The full version, including alpha/beta/rc tags.
release = find_version("../src/doc2dash/__init__.py")
# The short X.Y version.
version = release.rsplit(".", 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme = "furo"
html_theme_options = {}
# Output file base name for HTML help builder.
htmlhelp_basename = "doc2dashdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"doc2dash.tex",
"doc2dash Documentation",
"Hynek Schlawack",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "doc2dash", "doc2dash Documentation", ["Hynek Schlawack"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"doc2dash",
"doc2dash Documentation",
"Hynek Schlawack",
"doc2dash",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| [
11748,
40481,
82,
198,
11748,
28686,
198,
11748,
302,
628,
198,
4299,
1100,
46491,
42632,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
10934,
281,
4112,
3108,
422,
1635,
42632,
9,
290,
290,
1441,
262,
10154,
286,
262,
198,
220... | 3.119589 | 1,848 |
import lasagne
| [
11748,
39990,
21080,
628
] | 4 | 4 |
import warnings
import pytest
| [
11748,
14601,
198,
198,
11748,
12972,
9288,
628,
628
] | 3.777778 | 9 |
#!/usr/bin/env python
import argparse
import os
import sys
import subprocess
import github3
PROJECT_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
BUILD_DIR = os.path.join(PROJECT_DIR, './build')
GITHUB_API_TOKEN = os.environ.get('HOMEBREW_GITHUB_API_TOKEN')
if GITHUB_API_TOKEN:
gh = github3.login(token=GITHUB_API_TOKEN)
else:
print >> sys.stderr, "Warning: GITHUB_API_TOKEN. Github API calls will be rate-limited."
gh = github3
def file_diff2htmls(patch_file):
"""Returns a string."""
cmd = os.path.join(PROJECT_DIR, './PrettyPatch/prettify.rb')
return subprocess.check_output(cmd + ' ' + patch_file, shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create *.patch and *-patch.html for pull requests.')
parser.add_argument('user', type=str, metavar='GITHUB_USER')
parser.add_argument('repo', type=str, metavar='GITHUB_REPO')
args = parser.parse_args()
main(args.user, args.repo)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
33084,
18,
198,
198,
31190,
23680,
62,
34720,
796,
28686,
13,
6978,
13,
22179,
7,
418,
... | 2.539642 | 391 |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.functional import empty
from .utils.lazymodel import model_cache_key, OBJECT_DOES_NOT_EXIST, get_model_cache
from .utils.modelutils import (
lookup_cache_key,
model_cache_deleted_cache_key,
model_row_cache_enabled,
save_lookup_cache_key,
GET_ARGS_PK_KEY,
)
DOES_NOT_EXIST_CACHE_TIMEOUT = 60 * 5
DELETED_CACHE_TIMEOUT = 60
LOOKUP_CACHE_TIMEOUT = 60 * 60
class CachedGetManager(RelatedFieldManager):
"""
Manager for caching results of the get() method. Uses an ordinary
dictionary by default, but can be overridden to use anything that
supports dictionary-like access, such as a memcache wrapper.
"""
cache_backend = {}
class RowCacheManager(RelatedFieldManager):
"""
Manager for caching single-row queries. To make invalidation easy,
we use an extra layer of indirection. The query arguments are used as a
cache key, whose stored value is the object pk, from which the final pk
cache key can be generated. When a model using RowCacheManager is saved,
this pk cache key should be invalidated. Doing two memcached queries is
still much faster than fetching from the database.
"""
# noinspection PyProtectedMember
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
45124,
1330,
6565,
198,
198,
6738,
764,
26791,
13,
75,
12582,
19849,
1330,
2746,
62,
23... | 3.05036 | 417 |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 01 21:52:18 2018
@author: LCL
"""
from __future__ import division # for / and //
#from collections import Counter # for merge dict
import sys # for exit()
import os # for file path
global CodeUpperPath
CodeUpperPath = os.path.abspath('..')
'''
https://blog.csdn.net/fred1653/article/details/51255530
'''
def addr2dec(addr):
"将点分十进制IP地址转换成十进制整数"
items = [int(x) for x in addr.split(".")]
return sum([items[i] << [24, 16, 8, 0][i] for i in range(4)])
def dec2addr(dec):
"将十进制整数IP转换成点分十进制的字符串IP地址"
return ".".join([str(dec >> x & 0xff) for x in [24, 16, 8, 0]])
#RuleList, linenum = Init_OpenFlow('../data/OF1_50K') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
4280,
5534,
2310,
25,
4309,
25,
1507,
2864,
198,
198,
31,
9800,
25,
406,
5097,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
... | 1.99711 | 346 |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# support
import pyre
# declaration
class Status(pyre.tracker):
"""
A helper that watches over a component's traits and records value changes
"""
# public data
raw = True
# meta-methods
# hooks
def flush(self, observable, **kwds):
"""
Handler of the notification that the value of {observable} has changed
"""
# mark me
self.raw = True
# chain up
return super().flush(observable=observable, **kwds)
# end of file
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
285,
40302,
257,
13,
70,
13,
257,
26884,
85,
6557,
89,
271,
198,
2,
29617,
39795,
198,
2,
357,
66,
8,
7795,
12,
7908,
477,
2489,
10395,
198,
2,
628,
1... | 2.452 | 250 |
from lxml import etree | [
6738,
300,
19875,
1330,
2123,
631
] | 3.666667 | 6 |
import tensorflow as tf
from basic.model import Model
from my.tensorflow import average_gradients
from my.tensorflow.general import zerout_gradients_for_zero_weights
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
4096,
13,
19849,
1330,
9104,
198,
6738,
616,
13,
83,
22854,
11125,
1330,
2811,
62,
9744,
2334,
198,
6738,
616,
13,
83,
22854,
11125,
13,
24622,
1330,
1976,
263,
448,
62,
9744,
2334... | 3.428571 | 49 |
'''
Functionality to get ticker names and data
'''
import os
import random
import pandas as pd
import yfinance as yf
def get_random_tickers(n: int) -> list:
'''
From the available ticker data, randomly select n tickers.
'''
return random.sample(get_all_ticker_names(), n)
def get_tickers_exc_sample(n: int,
exclude: list) -> list:
'''
Get a random selection of tickers from all available, excluding any tickers
that are in the exclude list.
'''
all_tickers = get_all_ticker_names()
ticker_sample = [ticker for ticker in all_tickers if ticker not in exclude]
return random.sample(ticker_sample, n)
def get_all_ticker_names():
'''
Get a list of all ticker names in the data-directory.
'''
# To remove duplicated entries since fundamentals exist in this folder too,
# the list is turned into a set and back to a list again
return list(
set([s.split('.csv')[0].split('_')[0]
for s in os.listdir('data/') if '.csv' in s]
)
)
def check_run(download_type: str):
'''
Before downloading tickers, check the paths and configs are correct
Parameters
----------
download_type : str
A config to describe which type of download is being performed
Returns
-------
None
'''
# First check if the data folder exists, if it doesn't, then create one
if not os.path.isdir('data/'):
os.mkdir('data')
# Now check the download type is of the correct format
if download_type not in ['spy', 'user']:
raise ValueError(str(download_type) + ' is not a valid option, ' +
' please enter spy or user.')
def get_tickers(download_type: str) -> list:
'''
Get the list of tickers based on the config controls
Parameters
----------
download_type : str
Which data source to grab the ticker names from
Returns
-------
tickers: list
A list of tickers ready for downloading
'''
if download_type == 'user':
return user_defined()
elif download_type == 'spy':
return spy_tickers()
def user_defined() -> list:
'''
Get a list of tickers generated from the user file "tickers.csv"
Parameters
----------
None
Returns
-------
tickers: list
A list of tickers to download.
'''
return pd.read_csv('utils/tickers.csv')['ticker'].tolist()
def spy_tickers() -> list:
'''
Get the current s&p500 from wikipedia.
Parameters
----------
None
Returns
-------
tickers : list
List of the current s&p 500 tickers
'''
df = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
return df[0]['Symbol'].tolist()
def get_data(tickers: list):
'''
Obtain historical daily price data for all tickers specified. The outcome
is a csv file of price data for each ticker in the data folder.
Parameters
----------
tickers : list
A list of the tickers to download the data for
Returns
-------
None
'''
print('Downloading the data from yahoo finance')
data = yf.download(tickers = tickers,
interval = '1D',
group_by = 'ticker',
auto_adjust = False,
prepost = False,
threads = True,
proxy = None
)
data = data.T
print('Data downloaded. Saving the csv files in the data directory.')
for ticker in tickers:
# Try statement is required because sometimes a ticker fails to download
try:
df = data.loc[(ticker.upper(),),].T.reset_index().dropna()
df.to_csv(f'data/{ticker}.csv', index = False)
except:
print(f'Ticker {ticker} failed to download.') | [
7061,
6,
198,
22203,
1483,
284,
651,
4378,
263,
3891,
290,
1366,
198,
7061,
6,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
331,
69,
14149,
355,
331,
69,
198,
198,
4299,
651,
62,
25120,
62,
... | 2.350888 | 1,690 |
# conding=utf-8
from .CCPRestSDK import REST
# 说明:主账号,登陆云通讯网站后,可在"控制台-应用"中看到开发者主账号ACCOUNT SID
_accountSid = '8a216da86812593601682c07861e1cad'
# 说明:主账号Token,登陆云通讯网站后,可在控制台-应用中看到开发者主账号AUTH TOKEN
_accountToken = '3df1186fece043f3a7c66fd4f91148a8'
# 请使用管理控制台首页的APPID或自己创建应用的APPID
_appId = '8a216da86812593601682c07866b0cb3'
# 说明:请求地址,生产环境配置成app.cloopen.com
_serverIP = 'sandboxapp.cloopen.com'
# 说明:请求端口 ,生产环境为8883
_serverPort = "8883"
# 说明:REST API版本号保持不变
_softVersion = '2013-12-26'
class CCP(object):
"""发送短信的辅助类,并保证全局单例"""
def send_template_sms(self, to, datas, temp_id):
"""发送短信"""
result = self.rest.sendTemplateSMS(to, datas, temp_id)
# 如果云通讯发送短信成功,返回的字典数据result中statuCode的值为"000000"则成功
if result.get('statusCode') == '000000':
# 返回0,表示发送短信成功
return 0
else:
# 返回-1,表示发送失败
return -1
if __name__ == "__main__":
ccp = CCP()
# 注意: 测试的短信模板编号为1
#参数1: 发送给谁的手机号
#参数2: ['内容', 有效时间单位分钟]
#参数3: 模板编号1 【云通讯】您使用的是云通讯短信模板,您的验证码是{1},请于{2}分钟内正确输入
ccp.send_template_sms('13666', ['666666', 5], 1)
| [
2,
1779,
278,
28,
40477,
12,
23,
198,
198,
6738,
764,
4093,
4805,
395,
10305,
42,
1330,
30617,
628,
198,
2,
5525,
107,
112,
23626,
236,
171,
120,
248,
10310,
119,
164,
112,
99,
20998,
115,
171,
120,
234,
163,
247,
119,
165,
247,
... | 1.048645 | 1,624 |
from .settings_base import *
DEBUG = False
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (
'rest_framework.renderers.JSONRenderer',
'utils.api.BrowsableAPIRendererWithoutForms',
)
| [
6738,
764,
33692,
62,
8692,
1330,
1635,
628,
198,
30531,
796,
10352,
198,
198,
49,
6465,
62,
10913,
2390,
6217,
14670,
17816,
7206,
38865,
62,
49,
10619,
1137,
1137,
62,
31631,
1546,
20520,
796,
357,
198,
220,
220,
220,
705,
2118,
62,... | 2.533333 | 75 |
from pyvista import examples
dataset = examples.download_rectilinear_grid() # doctest:+SKIP
| [
6738,
12972,
85,
12523,
1330,
6096,
198,
19608,
292,
316,
796,
6096,
13,
15002,
62,
2554,
346,
259,
451,
62,
25928,
3419,
220,
1303,
10412,
395,
25,
10,
18831,
4061,
198
] | 3 | 31 |
from django.contrib import admin
from reversion.admin import VersionAdmin
from . import models
admin.site.register(models.ToDoList, ToDoListAdmin)
admin.site.register(models.ToDoItem, ToDoItemAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
302,
9641,
13,
28482,
1330,
10628,
46787,
198,
198,
6738,
764,
1330,
4981,
628,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
2514,
5211,
8053,
11,
1675,
5211,
805... | 3.253968 | 63 |
import numpy as np
def OrdenDeConvergencia(xNmenos2, xNmenos1, xN, xNmas1):
"""
Calcula el orden de convergencia para determinado punto aplicando la division que se realiza en la definicion.
Si algun denominador resultase nulo, no se devolvera una constante, se devuelve None.
"""
a = xNmas1 - xN
if a == 0:
return None
b = xN - xNmenos1
if b == 0:
return None
numerador = np.log(abs(a / b))
c = xN - xNmenos1
if c == 0:
return None
d = xNmenos1 - xNmenos2
if d == 0:
return None
denominador = np.log(abs(c / d))
if denominador == 0:
return None
return numerador / denominador
#funcion original que habiamos hecho para calcular la constante alfa
def CalcularHistoriaDeOrden2(historia):
"""
Calcula la constante alfa, necesita de la historia de la busqueda de la raiz.
Devuelve la historia del alfa y el alfa final conseguido
"""
tope = len(historia)
if tope < 5:
return 0, np.array([])
historiaDeOrden = np.zeros((tope - 4, 2))
j = 0
for i in range(2, tope - 2):
ordenActual = OrdenDeConvergencia(historia[i - 2][1], historia[i - 1][1], historia[i][1], historia[i + 1][1])
if not (ordenActual is None):
historiaDeOrden[j][1] = ordenActual
historiaDeOrden[j][0] = i
j = j + 1
historiaDeOrden = historiaDeOrden[:j]
return historiaDeOrden[j - 1][1], historiaDeOrden
def CalcularHistoriaDeOrden(historiaRaices):
"""
Calcula la constante alfa, necesita de la historia de la busqueda de la raiz.
Devuelve la historia del alfa y el alfa final conseguido
"""
nIteraciones = len(historiaRaices) - 1
if len(historiaRaices) < 5:
return 0, np.array([])
alfa = np.zeros((nIteraciones - 1, 2))
for n in range(3 - 1, nIteraciones - 1):
e_n_mas_1 = historiaRaices[n + 1][1] - historiaRaices[n][1]
e_n = historiaRaices[n][1] - historiaRaices[n - 1][1]
e_n_menos_1 = historiaRaices[n - 1][1] - historiaRaices[n - 2][1]
# Tira un warning de division por 0 debido a que varios numeros estan cercanos al mismo, se agrego para evitarlo,
# si se saltean estas divisiones los graficos quedan peor.
with np.errstate(divide='ignore'):
alfa[n] = n, np.log10(np.abs(e_n_mas_1 / e_n)) / np.log10(np.abs(e_n / e_n_menos_1))
return alfa[nIteraciones - 2][1], alfa
def ConstanteAsintotica(xN, xNmas1, alfa, raiz):
"""
Realiza el calculo de la constante asintotica para una iteracion. Los valores recibidos son validos.
Si resulta que el denominador es 0, se devuelve None.
"""
numerador = abs((xNmas1 - raiz))
denominador = abs(xN - raiz) ** alfa
if denominador == 0:
return None
return numerador / denominador
def CalcularHistoriaConstanteAsintotica(historia, alfa):
"""
Calcula la constante lambda, necesita de la historia de la busqueda de la raiz y del alfa obtenido previamente
Devuelve la historia del lambda y el lambda final conseguido
"""
if alfa <= 0:
return 0, np.array([])
tope = len(historia)
raiz = historia[tope - 1][1]
historiaConstanteAsintotica = np.zeros((tope - 2, 2))
j = 0
for i in range(0, tope - 2):
constanteActual = ConstanteAsintotica(historia[i][1], historia[i + 1][1], alfa, raiz)
if not (constanteActual is None):
historiaConstanteAsintotica[j][1] = constanteActual
historiaConstanteAsintotica[j][0] = i
j = j + 1
historiaConstanteAsintotica = historiaConstanteAsintotica[:j]
return historiaConstanteAsintotica[j - 1][1], historiaConstanteAsintotica
| [
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
4299,
1471,
6559,
5005,
3103,
332,
5235,
33743,
7,
87,
45,
3653,
418,
17,
11,
2124,
45,
3653,
418,
16,
11,
2124,
45,
11,
2124,
45,
5356,
16,
2599,
201,
198,
220,
220,
... | 2.162909 | 1,774 |
import random
print(play())
| [
11748,
4738,
628,
628,
198,
4798,
7,
1759,
28955,
198
] | 3.2 | 10 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes and functions for student-mentor training."""
from absl import logging
import numpy as np
import tensorflow as tf
from student_mentor_dataset_cleaning.training.loss.triplet_loss import triplet_semihard_loss_fn
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
"""An early stopping callback that avoids resetting between calls to fit."""
class CustomReduceLROnPlateau(tf.keras.callbacks.ReduceLROnPlateau):
"""A reduce LR callback that avoids resetting between executions of fit."""
class LearningRateLogger(tf.keras.callbacks.TensorBoard):
"""Logs the learning rate to tensorboard."""
def get_gradients_dataset_from_labelled_data(student, dataset):
"""Computes the gradients of the student when it processes a dataset.
Args:
student: The student model
dataset: The training dataset
Returns:
The student's gradients on the given dataset.
"""
loss_fn = student.loss
return dataset.map(get_gradients)
def get_gradients_dataset_from_triplet_data(student, dataset):
"""Computes the gradients of the student network for a dataset of triplets.
Args:
student: The student model
dataset: The training dataset
Returns:
The student's gradients on the given dataset.
"""
tf.keras.backend.clear_session()
triplet_loss_fn = student.loss
y_true = None
y_pred = None
batch_size = 1996
for img, l in dataset.batch(batch_size).take(1):
y_true = l
y_pred = student(img)
tf.stop_gradient(student)
triplet_loss_fn.call(y_true, y_pred)
return tf.data.Dataset.from_generator(
gradient_generator,
output_types=(tf.float32),
output_shapes=(tf.TensorShape([104000])))
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33160,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 3.206704 | 716 |
import pytest
from pathlib import Path
from eddington import FitData
from mock import Mock
import numpy as np
from pytest_cases import fixture_plus
from eddington_matplotlib import plot_all
from tests.plot import dummy_func
should_print_results = True
should_plot_fitting = True
should_plot_residuals = True
should_plot_data = False
xmin = 0.2
xmax = 9.8
func = dummy_func
data = FitData.random(dummy_func)
a = np.array([1, 2])
output_dir = Path("dir/to/output")
@pytest.fixture
@fixture_plus
| [
11748,
12972,
9288,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
1225,
67,
9557,
1330,
25048,
6601,
198,
6738,
15290,
1330,
44123,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
9288,
62,
33964,
1330,
29220,
62,
9541,
198,
... | 2.913793 | 174 |
import socket
import threading
PORT = 9090
HOST = socket.gethostbyname(socket.gethostname()) #local host
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
clients = []
def start():
"""
Thread to accept clients
:return: None
"""
server.listen()
print("[SERVER] Server is listening now...")
while True:
try:
client_socket, address = server.accept()
clients.append(client_socket)
print(f"{address} is connected")
thread = threading.Thread(target=client_handle , args=(client_socket,))
thread.start()
except Exception as e:
print("[EXCEPTIO]", e)
def client_handle(client_socket):
"""
Handle messages from the client
:param client_socket: socket
:return: None
"""
try:
name = client_socket.recv(1024).decode('utf-8')
broadcast(f"{name} is connected now! :", "")
while True:
msg = client_socket.recv(1024).decode('utf-8')
if msg == 'exit':
clients.remove(client_socket)
broadcast(f"{name} has left the room! :", "")
break
else:
broadcast(msg, name)
except Exception as e:
print('[EXCEPTION]', e)
client_socket.close()
def broadcast(message, name):
"""
send messages to all clients
:param message: str
:param name: str
:return: None
"""
for client in clients:
try:
client.send(f'{name} : {message}'.encode('utf-8'))
except:
print('[EXCEPTION ON BROADCAST]')
if __name__ == '__main__':
start() | [
11748,
17802,
201,
198,
11748,
4704,
278,
201,
198,
201,
198,
15490,
796,
860,
42534,
201,
198,
39,
10892,
796,
17802,
13,
1136,
4774,
1525,
3672,
7,
44971,
13,
1136,
4774,
3672,
28955,
1303,
12001,
2583,
201,
198,
201,
198,
15388,
79... | 2.066049 | 863 |
import numpy as np
import sys
from scipy.special import erfcinv as erfcinv
from calc_sigmas import calc_sigmas
| [
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
6738,
629,
541,
88,
13,
20887,
1330,
1931,
16072,
16340,
355,
1931,
16072,
16340,
198,
6738,
42302,
62,
82,
328,
5356,
1330,
42302,
62,
82,
328,
5356,
220,
198,
220,
220,
220,
198... | 2.761905 | 42 |
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, floor, ceil
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage import label
import random
import warnings
import sys
import os
import csv
import cv2
from sklearn.cluster import KMeans
from PIL import Image
from matplotlib.colors import colorConverter
import matplotlib as mpl
if __name__ == "__main__":
filePath = sys.argv[1]
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
10688,
1330,
19862,
17034,
11,
4314,
11,
2906,
346,
201,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
23603,
413... | 2.852564 | 156 |
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as la
import scipy as sp
# ================ ARRAY ================
# ================ STATS ================
msqrt = lambda M : [np.matmul(u,np.diag(np.sqrt(s))) for u,s,vh in [la.svd(M)]][0]
matern = lambda r,r0,nu : 2**(1-nu)/sp.special.gamma(nu)*sp.special.kv(nu,r/r0+1e-10)*(r/r0+1e-10)**nu
gaussian_beam = lambda z, w0, l, n : np.sqrt(1/np.square(z) + np.square(l) / np.square(w0 * np.pi * n))
# ================ POINTING ================
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
75,
1292,
70,
355,
8591,
198,
11748,
629,
541,
88,
355,
599,
198,
198,
2,
796,
25609,
18604,
5923,
30631,
79... | 2.13253 | 249 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from pycassa import ConnectionPool, ColumnFamily
from urllib import unquote_plus
from contrail_api_cli.command import Command, Option
from contrail_api_cli.resource import Resource, Collection
from contrail_api_cli.exceptions import ResourceNotFound
from ..utils import server_type
logger = logging.getLogger(__name__)
class OrphanedACL(Command):
"""Removes stale ACLs.
ACL is considered as stale if it has no parent::
contrail-api-cli --ns contrail_api_cli.ns clean-orphaned-acl --cassandra-servers <ip1> <ip2>
.. note::
Because of an API server limitation the ACLs are removed directly from the cassandra cluster.
Thus, the cassandra cluster nodes IPs must be provided.
"""
description = "Clean all ACLs that don't have any parent"
force = Option('-f',
help="Delete orphan ACL (default: %(default)s)",
default=False,
action="store_true")
parent_type = Option(help="Parent type the ACL should have (default: %(default)s)",
choices=['security-group', 'virtual-network'],
default='security-group')
cassandra_servers = Option(help="Cassandra server list' (default: %(default)s)",
nargs='+',
type=server_type,
default=['localhost:9160'])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
18931,
198,
198,
6738,
12972,
66,
562,
64,
1330,
26923,
27201,
11,
29201,
24094,
198,
6738,
2956,
... | 2.4375 | 608 |
from datamart.utilities.utils import Utils
from pandas import DataFrame
from datamart.es_managers.json_query_manager import JSONQueryManager
import typing
class Dataset:
"""
Represents a retrieved dataset from datamart, in a search query.
Contains the meta info and the way to materialize the dataset.
Follow the API defined by https://datadrivendiscovery.org/wiki/display/work/Python+API
"""
def download(self, destination: str) -> str:
"""
Materializes the dataset on disk.
Args:
destination: the file path where the user would like to save the concrete dataset
Returns:
"""
data = self.materialize()
try:
data.to_csv(destination, index=False)
return "Success"
except Exception as e:
return "Failed: %s" % str(e)
@property
def id(self):
"""
es _id
Returns:
"""
return self._id
@property
def inner_hits(self):
"""
es _id
Returns:
"""
return self._inner_hits
@property
def _es_raw_object(self):
"""
es _id
Returns:
"""
return self.__es_raw_object
@property
def metadata(self):
"""
Contains the metadata for the dataset, in D3M dataset schema.
Returns:
"""
return self._metadata
@property
def score(self):
"""
Floating-point value measuring how well this dataset matches the query parameters. Higher is better.
Returns:
"""
return self._score
@property
def match(self):
"""
(TODO better name?)
Metadata indicating which column of this dataset matches which requested column from the query. \
This explains why this dataset matches the query, and can be used for joining.
Returns:
"""
return None
@property
def matched_cols(self):
"""
(TODO better name?)
Metadata indicating which column of this dataset matches which requested column from the query. \
This explains why this dataset matches the query, and can be used for joining.
Returns:
"""
return self._matched_cols
@property
@property
| [
6738,
4818,
321,
433,
13,
315,
2410,
13,
26791,
1330,
7273,
4487,
198,
6738,
19798,
292,
1330,
6060,
19778,
198,
6738,
4818,
321,
433,
13,
274,
62,
805,
10321,
13,
17752,
62,
22766,
62,
37153,
1330,
19449,
20746,
13511,
198,
11748,
19... | 2.448637 | 954 |
# This file holds metadata about the project. It should import only from standard library modules
# (this includes not importing other modules in the package) so that it can be loaded
# by setup.py before dependencies are installed.
source = "https://github.com/neilpquinn/wmfdata"
version = "0.1.0" | [
2,
770,
2393,
6622,
20150,
546,
262,
1628,
13,
632,
815,
1330,
691,
422,
3210,
5888,
13103,
198,
2,
357,
5661,
3407,
407,
33332,
584,
13103,
287,
262,
5301,
8,
220,
523,
326,
340,
460,
307,
9639,
198,
2,
416,
9058,
13,
9078,
878,
... | 3.810127 | 79 |
from collections import defaultdict
import os
# A disjoint exonic bin. This class holds disjoint exonic bin data prepared
# in the preprocessing step
#######################
#######################
# Transcript class
| [
6738,
17268,
1330,
4277,
11600,
198,
11748,
28686,
198,
198,
2,
317,
595,
73,
1563,
409,
9229,
9874,
13,
770,
1398,
6622,
595,
73,
1563,
409,
9229,
9874,
1366,
5597,
198,
2,
287,
262,
662,
36948,
2239,
198,
198,
14468,
4242,
21017,
... | 4.25 | 52 |
import configparser
import hashlib
import logging
import os
import sys
import requests
MOJANG_MANIFEST_URL = 'https://launchermeta.mojang.com/mc/game/version_manifest.json'
DOWNLOADS_VERSIONS_FILENAME = 'servers-infos.txt'
class IntegrityCheckError(Exception):
"""Throwed if the file signature doesn't match with signature expected."""
pass
def get_versions(manifest_url: str, types=['snapshot', 'release', 'old_beta', 'old_alpha']):
"""Get manifest of mojang."""
response = requests.request(
method='GET',
url=manifest_url,
)
response_json = response.json()
versions = []
for version in response_json['versions']:
if version['type'] in types:
versions.append(version)
return versions
def download_versions(versions: dict, directory: str, info_filename=DOWNLOADS_VERSIONS_FILENAME):
"""Download specific versions, and verify their integrity."""
if not os.path.exists(directory):
os.makedirs(directory)
infos_file = os.path.join(directory, info_filename)
with open(infos_file, 'w+') as infos:
for version in versions:
# Get informations
version_manifest_response = requests.request(
method='GET',
url=version['url']
)
version_manifest_json = version_manifest_response.json()
try:
server_file_sha1 = version_manifest_json['downloads']['server']['sha1']
server_file_url = version_manifest_json['downloads']['server']['url']
except KeyError:
logging.warning('Skip %s (%s) downloading, there isn\'t server file' % (version['id'], version['type']))
continue
local_filename = os.path.join(
directory,
'server-' + version['id'] + '.jar',
)
download_file(
server_file_sha1,
server_file_url,
local_filename,
)
infos.write('%s %s\n' % (version['id'], local_filename))
def download_file(sha1: str, url: str, filepath: str):
"""Download file and check integrity."""
filename = os.path.basename(filepath)
# Check previous if exists
if os.path.exists(filepath):
if check_integrity(filepath, sha1):
return
else:
logging.warning(filename + ' integrity check failed, re-downloading in progress')
os.remove(filepath)
# Download file
with requests.get(url=url, stream=True) as stream:
stream.raise_for_status()
with open(filepath, 'wb') as file:
for chunk in stream.iter_content(chunk_size=8192):
file.write(chunk)
logging.info(filename + ' downloaded successfully')
# Verify sha1
if not check_integrity(filepath, sha1):
raise IntegrityCheckError(filepath + ' : File check integrity failed.')
else:
logging.info(filename + ' integrity check successfully')
def check_integrity(filepath: str, sha1: str):
"""Check file integrity."""
hash_sha1 = hashlib.sha1()
with open(filepath, 'rb') as file:
for chunk in iter(lambda: file.read(8192), b''):
hash_sha1.update(chunk)
return hash_sha1.hexdigest() == sha1
def get_server_file(directory: str, version: str, info_filename=DOWNLOADS_VERSIONS_FILENAME):
"""Give the file path of a server."""
file_info = os.path.join(directory, info_filename)
if os.path.exists(file_info):
with open(file_info, 'r') as file:
content = file.read().split('\n')
for line in content:
server_version, server_filepath = line.split(' ')
if version == server_version:
return server_filepath
return None
def get_local_versions(directory: str, info_filename=DOWNLOADS_VERSIONS_FILENAME):
"""Give the local server versions."""
file_info = os.path.join(directory, info_filename)
versions = []
if os.path.exists(file_info):
with open(file_info, 'r') as file:
content = file.readlines()
for line in content:
server_version, server_filepath = line.split(' ')
versions.append(server_version)
return versions
| [
11748,
4566,
48610,
198,
11748,
12234,
8019,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
7007,
628,
198,
11770,
41,
15567,
62,
10725,
5064,
6465,
62,
21886,
796,
705,
5450,
1378,
38722,
2044,
28961,
13,
5908,... | 2.341119 | 1,841 |
class News_Sources:
"""
clas to define the objects in the News_Sources class
"""
class News_Article:
"""
class to define the objects in the News_Article class
""" | [
4871,
3000,
62,
21188,
25,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
537,
292,
284,
8160,
262,
5563,
287,
262,
3000,
62,
21188,
1398,
198,
220,
220,
220,
37227,
628,
198,
4871,
3000,
62,
14906,
25,
628,
220,
220,
220,
37227,
... | 3.015873 | 63 |
""" UDP client in python. """
import logging
import socket
import threading
from py_client.players import Player
logging.basicConfig(level=logging.DEBUG)
class UdpClient:
"""UDP client for playing Connect-N"""
# SETTING UP
# LISTENING
# RECEIVING
# SENDING
| [
37811,
36428,
5456,
287,
21015,
13,
37227,
198,
198,
11748,
18931,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
198,
6738,
12972,
62,
16366,
13,
32399,
1330,
7853,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
... | 2.918367 | 98 |
## Copyright 2020 UT-Battelle, LLC. See LICENSE.txt for more information.
###
# @author Narasinga Rao Miniskar, Frank Liu, Dwaipayan Chakraborty, Jeffrey Vetter
# miniskarnr@ornl.gov
#
# Modification:
# Baseline code
# Date: Apr, 2020
# **************************************************************************
###
import os
from multi_thread_run import *
from deffe_utils import *
import numpy as np
import argparse
import shlex
import pathlib
from read_config import *
""" DeffeExtract class to extract cost metrics for the batch of samples with
through multi-thread execution environment either with/without
the help of slurm
"""
# Read arguments provided in JSON configuration file
# Add command line arguments to parser
# Initialize the class with parameters list and to be extracted cost metrics
# Run the extraction
| [
2235,
15069,
12131,
19255,
12,
33,
1078,
13485,
11,
11419,
13,
220,
4091,
38559,
24290,
13,
14116,
329,
517,
1321,
13,
198,
21017,
198,
2,
2488,
9800,
13596,
2313,
64,
48395,
1855,
1984,
283,
11,
5278,
18258,
11,
360,
10247,
541,
2293... | 3.432432 | 259 |
# -*- coding: utf-8 -*-
# TODO: remove this
from flask.ext.cache import Cache
from settings import CACHE_CONFIG
cache = Cache()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16926,
46,
25,
4781,
428,
198,
198,
6738,
42903,
13,
2302,
13,
23870,
1330,
34088,
198,
198,
6738,
6460,
1330,
327,
2246,
13909,
62,
10943,
16254,
628,
198,
23870,
... | 2.734694 | 49 |
"""
Unit tests for specific models
@author: Mathieu Doucet / UTK
"""
from __future__ import print_function
import unittest, math, time
# Disable "missing docstring" complaint
# pylint: disable-msg=C0111
# Disable "too many methods" complaint
# pylint: disable-msg=R0904
# Disable "could be a function" complaint
# pylint: disable-msg=R0201
try:
import VolumeCanvas
print("Testing local version")
except:
import sys
print(sys.exc_value)
#testing the version that is working on
print("Testing installed version")
import sas.sascalc.realspace.VolumeCanvas as VolumeCanvas
class TestRealSpaceModel(unittest.TestCase):
""" Unit tests for sphere model """
class TestSphere(unittest.TestCase):
""" Unit tests for sphere model """
def testSetDensityTiming(self):
"""Testing change in computation time with density"""
handle = self.canvas.add('sphere')
self.canvas.setParam("%s.radius" % handle, 15.0)
self.canvas.setParam('lores_density', 0.6)
t_0 = time.time()
self.canvas.getIq(0.001)
t_1 = time.time()-t_0
# Change density, the answer should be the same
self.canvas.setParam('lores_density', 0.1)
t_0 = time.time()
self.canvas.getIq(0.001)
t_2 = time.time()-t_0
self.assert_( t_2 < t_1 and (t_1-t_2)/t_2 > 2)
def testGetParamList(self):
""" Test GetParamList on empty canvas"""
self.assert_('lores_density' in self.canvas.getParamList())
handle = self.canvas.add('sphere')
def testGetParamListWithShape(self):
""" Test GetParamList on filled canvas"""
self.canvas.add('sphere')
self.assert_('lores_density' in self.canvas.getParamList())
def testGetIq(self):
""" Test the output of I(q) to the analytical solution
If the normalization is wrong, we will have to fix it.
getIq() should call getPr() behind the scenes so that
the user doesnt have to do it if he doesn't need to.
"""
from sas.models.SphereModel import SphereModel
sphere = SphereModel()
sphere.setParam('radius', 10.0)
sphere.setParam('contrast', 1.0)
sphere.setParam('background', 0.0)
sphere.setParam('scale', 1.0)
handle = self.canvas.add('sphere')
self.canvas.setParam('%s.radius' % handle, 10.0)
self.canvas.setParam('%s.contrast' % handle, 1.0)
sim_1 = self.canvas.getIq(0.001)
ana_1 = sphere.run(0.001)
sim_2 = self.canvas.getIq(0.01)
ana_2 = sphere.run(0.01)
# test the shape of the curve (calculate relative error
# on the output and it should be compatible with zero
# THIS WILL DEPEND ON THE NUMBER OF SPACE POINTS:
# that why we need some error analysis.
self.assert_( (sim_2*ana_1/sim_1 - ana_2)/ana_2 < 0.1)
# test the absolute amplitude
self.assert_( math.fabs(sim_2-ana_2)/ana_2 < 0.1)
def testGetIq2(self):
""" Test two different q values
"""
handle = self.canvas.add('sphere')
self.canvas.setParam('%s.radius' % handle, 10.0)
sim_1 = self.canvas.getIq(0.001)
sim_2 = self.canvas.getIq(0.01)
self.assertNotAlmostEqual(sim_2, sim_1, 3)
def testGetIq_Identical(self):
""" Test for identical model / no param change
"""
handle = self.canvas.add('sphere')
self.canvas.setParam('%s.radius' % handle, 10.0)
sim_1 = self.canvas.getIq(0.01)
sim_2 = self.canvas.getIq(0.01)
self.assertEqual(sim_2, sim_1)
def testGetIq_Identical2(self):
""" Test for identical model after a parameter change
Should be different only of the space points
are regenerated and the random seed is different
"""
handle = self.canvas.add('sphere')
self.canvas.setParam('%s.radius' % handle, 10.0)
self.canvas.setParam('lores_density', 0.1)
sim_1 = self.canvas.getIq(0.01)
# Try to fool the code by changing to a different value
self.canvas.setParam('lores_density', 0.2)
self.canvas.getIq(0.01)
self.canvas.setParam('lores_density', 0.1)
sim_2 = self.canvas.getIq(0.01)
self.assert_((sim_2-sim_1)/sim_1<0.05)
def testGetIq_time(self):
""" Time profile
"""
handle = self.canvas.add('sphere')
self.canvas.setParam('%s.radius' % handle, 15.0)
self.canvas.setParam('lores_density', 0.1)
t_0 = time.time()
sim_1 = self.canvas.getIq(0.01)
delta_1 = time.time()-t_0
self.canvas.setParam('lores_density', 0.1)
t_0 = time.time()
sim_2 = self.canvas.getIq(0.01)
delta_2 = time.time()-t_0
self.assert_((delta_2-delta_1)/delta_1<0.05)
def testGetPr(self):
"""Compare the output of P(r) to the theoretical value"""
#TODO: find a way to compare you P(r) to the known
# analytical value.
pass
def testLogic1(self):
""" Test that the internal logic is set so that the user
get the right output after changing a parameter
"""
handle = self.canvas.add('sphere')
self.canvas.setParam('%s.radius' % handle, 10.0)
result_1 = self.canvas.getIq(0.1)
self.canvas.setParam('%s.radius' % handle, 20.0)
result_2 = self.canvas.getIq(0.1)
self.assertNotAlmostEqual(result_1, result_2, 2)
class TestCanvas(unittest.TestCase):
""" Unit tests for all shapes in canvas model """
class TestOrdering(unittest.TestCase):
""" Unit tests for all shapes in canvas model """
if __name__ == '__main__':
unittest.main()
| [
37811,
198,
220,
220,
220,
11801,
5254,
329,
2176,
4981,
198,
220,
220,
220,
2488,
9800,
25,
16320,
22304,
5728,
66,
316,
1220,
19255,
42,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
555,
715,
395,... | 2.077944 | 2,938 |
import asyncio
from right import Right
| [
11748,
30351,
952,
198,
6738,
826,
1330,
6498,
198
] | 4.333333 | 9 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Code inside the test is the chemistry sample from the readme.
If this test fails and code changes are needed here to resolve
the issue then ensure changes are made to readme too.
"""
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.chemistry import QiskitChemistryError
class TestReadmeSample(QiskitChemistryTestCase):
"""Test sample code from readme"""
def test_readme_sample(self):
""" readme sample test """
# pylint: disable=import-outside-toplevel,redefined-builtin
def print(*args):
""" overloads print to log values """
if args:
self.log.debug(args[0], *args[1:])
# --- Exact copy of sample code ----------------------------------------
from qiskit.chemistry import FermionicOperator
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.aqua.operators import Z2Symmetries
# Use PySCF, a classical computational chemistry software
# package, to compute the one-body and two-body integrals in
# molecular-orbital basis, necessary to form the Fermionic operator
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735',
unit=UnitsType.ANGSTROM,
basis='sto3g')
molecule = driver.run()
num_particles = molecule.num_alpha + molecule.num_beta
num_spin_orbitals = molecule.num_orbitals * 2
# Build the qubit operator, which is the input to the VQE algorithm in Aqua
ferm_op = FermionicOperator(h1=molecule.one_body_integrals, h2=molecule.two_body_integrals)
map_type = 'PARITY'
qubit_op = ferm_op.mapping(map_type)
qubit_op = Z2Symmetries.two_qubit_reduction(qubit_op, num_particles)
num_qubits = qubit_op.num_qubits
# setup a classical optimizer for VQE
from qiskit.aqua.components.optimizers import L_BFGS_B
optimizer = L_BFGS_B()
# setup the initial state for the variational form
from qiskit.chemistry.components.initial_states import HartreeFock
init_state = HartreeFock(num_spin_orbitals, num_particles)
# setup the variational form for VQE
from qiskit.circuit.library import TwoLocal
var_form = TwoLocal(num_qubits, ['ry', 'rz'], 'cz', initial_state=init_state)
# setup and run VQE
from qiskit.aqua.algorithms import VQE
algorithm = VQE(qubit_op, var_form, optimizer)
# set the backend for the quantum computation
from qiskit import Aer
backend = Aer.get_backend('statevector_simulator')
result = algorithm.run(backend)
print(result.eigenvalue.real)
# ----------------------------------------------------------------------
self.assertAlmostEqual(result.eigenvalue.real, -1.8572750301938803, places=6)
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
12131,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,... | 2.594127 | 1,328 |
import os, sys
sys.path.insert(0, os.path.abspath('..'))
from WeekX.LinkedList import LinkedList
from WeekX.Stack import Stack
if __name__ == '__main__':
graph = Graph(13)
graph.addEdge(0,1)
graph.addEdge(0,5)
graph.addEdge(0,2)
graph.addEdge(0,6)
graph.addEdge(3,4)
graph.addEdge(5,3)
graph.addEdge(5,4)
graph.addEdge(4,6)
graph.addEdge(7,8)
graph.addEdge(10,9)
graph.addEdge(12,9)
graph.addEdge(11,9)
graph.addEdge(11,12)
print "*"*40
print "{:^40}".format("Adjacency Lists")
print "*"*40
for item in range(13):
print graph.adjVertices(item)
print
print "*"*40
print "{:^40}".format("DFS Paths")
print "*"*40
dfs = DepthFirstSearch(graph,0)
print dfs.dfPaths()
print
print "*"*40
print "Path from source %s to each node." % (dfs._source)
print "*"*40
for item in range(13):
print "Path from 0 to",item,": ",
dfs.ppath(item)
print
# ****************************************
# Adjacency Lists
# ****************************************
# [1, 5, 2, 6]
# [0]
# [0]
# [4, 5]
# [3, 5, 6]
# [0, 3, 4]
# [0, 4]
# [8]
# [7]
# [10, 12, 11]
# [9]
# [9, 12]
# [9, 11]
#
# ****************************************
# DFS Paths
# ****************************************
# [0, 0, 0, 5, 3, 0, 4, 7, 8, 9, 10, 11, 12]
#
# ****************************************
# Path from source 0 to each node.
# ****************************************
# Path from 0 to 0 : 0
# Path from 0 to 1 : 0 1
# Path from 0 to 2 : 0 2
# Path from 0 to 3 : 0 5 3
# Path from 0 to 4 : 0 5 3 4
# Path from 0 to 5 : 0 5
# Path from 0 to 6 : 0 5 3 4 6
# Path from 0 to 7 : No Path
# Path from 0 to 8 : No Path
# Path from 0 to 9 : No Path
# Path from 0 to 10 : No Path
# Path from 0 to 11 : No Path
# Path from 0 to 12 : No Path | [
11748,
28686,
11,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
10786,
492,
6,
4008,
198,
6738,
6119,
55,
13,
11280,
276,
8053,
1330,
7502,
276,
8053,
198,
6738,
6119,
55,
13,
25896,
1330,
... | 2.327296 | 773 |
# -*- coding: utf-8 -*-
##############################################
# Export CloudWatch metric data to csv file
# Configuration file
##############################################
METRICS = {
'CPUUtilization': ['AWS/EC2', 'AWS/RDS'],
'CPUCreditUsage': ['AWS/EC2', 'AWS/RDS'],
'CPUCreditBalance': ['AWS/EC2', 'AWS/RDS'],
'DiskReadOps': ['AWS/EC2'],
'DiskWriteOps': ['AWS/EC2'],
'DiskReadBytes': ['AWS/EC2'],
'DiskWriteBytes': ['AWS/EC2'],
'NetworkIn': ['AWS/EC2'],
'NetworkOut': ['AWS/EC2'],
'NetworkPacketsIn': ['AWS/EC2'],
'NetworkPacketsOut': ['AWS/EC2'],
'MetadataNoToken': ['AWS/EC2'],
'CPUSurplusCreditBalance': ['AWS/EC2'],
'CPUSurplusCreditsCharged': ['AWS/EC2'],
'EBSReadOps': ['AWS/EC2'],
'EBSWriteOps': ['AWS/EC2'],
'EBSReadBytes': ['AWS/EC2'],
'EBSWriteBytes': ['AWS/EC2'],
'EBSIOBalance%': ['AWS/EC2'],
'EBSByteBalance%': ['AWS/EC2'],
'StatusCheckFailed': ['AWS/EC2'],
'StatusCheckFailed_Instance': ['AWS/EC2'],
'StatusCheckFailed_System': ['AWS/EC2'],
'BinLogDiskUsage': ['AWS/RDS'],
'BurstBalance': ['AWS/RDS'],
'DatabaseConnections': ['AWS/RDS'],
'DiskQueueDepth': ['AWS/RDS'],
'FailedSQLServerAgentJobsCount': ['AWS/RDS'],
'FreeableMemory': ['AWS/RDS'],
'FreeStorageSpace': ['AWS/RDS'],
'MaximumUsedTransactionIDs': ['AWS/RDS'],
'NetworkReceiveThroughput': ['AWS/RDS'],
'NetworkTransmitThroughput': ['AWS/RDS'],
'OldestReplicationSlotLag': ['AWS/RDS'],
'ReadIOPS': ['AWS/RDS'],
'ReadLatency': ['AWS/RDS'],
'ReadThroughput': ['AWS/RDS'],
'ReplicaLag': ['AWS/RDS'],
'ReplicationSlotDiskUsage': ['AWS/RDS'],
'SwapUsage': ['AWS/RDS'],
'TransactionLogsDiskUsage': ['AWS/RDS'],
'TransactionLogsGeneration': ['AWS/RDS'],
'WriteIOPS': ['AWS/RDS'],
'WriteLatency': ['AWS/RDS'],
'WriteThroughput': ['AWS/RDS'],
'ActiveConnectionCount': ['AWS/ApplicationELB'],
'ClientTLSNegotiationErrorCount': ['AWS/ApplicationELB'],
'ConsumedLCUs': ['AWS/ApplicationELB'],
'DesyncMitigationMode_NonCompliant_Request_Count': ['AWS/ApplicationELB'],
'HTTP_Fixed_Response_Count': ['AWS/ApplicationELB'],
'HTTP_Redirect_Count': ['AWS/ApplicationELB'],
'HTTP_Redirect_Url_Limit_Exceeded_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_3XX_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_4XX_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_5XX_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_500_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_502_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_503_Count': ['AWS/ApplicationELB'],
'HTTPCode_ELB_504_Count': ['AWS/ApplicationELB'],
'IPv6ProcessedBytes': ['AWS/ApplicationELB'],
'IPv6RequestCount': ['AWS/ApplicationELB'],
'NewConnectionCount': ['AWS/ApplicationELB'],
'NonStickyRequestCount': ['AWS/ApplicationELB'],
'ProcessedBytes': ['AWS/ApplicationELB'],
'RejectedConnectionCount': ['AWS/ApplicationELB'],
'RequestCount': ['AWS/ApplicationELB'],
'RuleEvaluations': ['AWS/ApplicationELB'],
'HTTPCode_Target_2XX_Count': ['AWS/ApplicationELB'],
'HTTPCode_Target_3XX_Count': ['AWS/ApplicationELB'],
'HTTPCode_Target_4XX_Count': ['AWS/ApplicationELB'],
'HTTPCode_Target_5XX_Count': ['AWS/ApplicationELB'],
'TargetConnectionErrorCount': ['AWS/ApplicationELB'],
'TargetResponseTime': ['AWS/ApplicationELB'],
'TargetTLSNegotiationErrorCount': ['AWS/ApplicationELB'],
'LambdaTargetProcessedBytes': ['AWS/ApplicationELB'],
'ELBAuthError': ['AWS/ApplicationELB'],
'ELBAuthFailure': ['AWS/ApplicationELB'],
'ELBAuthLatency': ['AWS/ApplicationELB'],
'ELBAuthRefreshTokenSuccess': ['AWS/ApplicationELB'],
'ELBAuthSuccess': ['AWS/ApplicationELB'],
'ELBAuthUserClaimsSizeExceeded': ['AWS/ApplicationELB'],
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
7804,
4242,
2235,
198,
2,
36472,
10130,
10723,
18663,
1366,
284,
269,
21370,
2393,
198,
2,
28373,
2393,
198,
29113,
7804,
4242,
2235,
198,
198,
47123,
49,
19505,
... | 2.311041 | 1,585 |
# coding: utf-8
from __future__ import unicode_literals
import os
import shutil
import subprocess
from .ffmpeg import FFmpegPostProcessor
from ..utils import (
check_executable,
encodeArgument,
encodeFilename,
PostProcessingError,
prepend_extension,
replace_extension,
shell_quote,
base64
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
628,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
198,
6738,
764,
487,
43913,
1330,
18402,
43913,
6307,
18709,
... | 2.764706 | 119 |
import tensorflow as tf
import RecordLoader as rloader
import numpy as np
import time
MAX_EPOCH = 1
REPEAT = 1
BATCH_SIZE = 200
TEST_TFRECORD = ''
MODEL_GRAPH = ''
MODEL_PATH = ''
with tf.Session() as sess:
saver = tf.train.import_meta_graph(MODEL_GRAPH)
saver.restore(sess, tf.train.latest_checkpoint(MODEL_PATH))
filename = tf.placeholder(tf.string, [None], name='filename')
dataset = rloader.create_dataset(filename, REPEAT, 1500, BATCH_SIZE)
iterator = dataset.make_initializable_iterator()
images, labels = iterator.get_next()
sess.run(iterator.initializer, feed_dict={filename: [TEST_TFRECORD]})
graph = tf.get_default_graph()
input = graph.get_tensor_by_name('input_x:0')
target = graph.get_tensor_by_name('target_label:0')
channel_keep_prob = graph.get_tensor_by_name('channel_keep_prob:0')
s0 = graph.get_tensor_by_name('out_s0_accuracy:0')
s1 = graph.get_tensor_by_name('out_s1_accuracy:0')
mean = graph.get_tensor_by_name('out_accuracy:0')
while True:
try:
image_batch, label_batch = sess.run([images, labels])
feedDict = {input: image_batch, target: label_batch, channel_keep_prob: 1.}
s0_accuracy, s1_accuracy, accuracy = sess.run([s0, s1, mean], feed_dict=feedDict)
print('Test: s0_acc:{}, s1_acc:{}, mean_acc:{}'.format(s0_accuracy, s1_accuracy, accuracy))
except tf.errors.OutOfRangeError:
break
| [
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
13266,
17401,
355,
374,
29356,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
640,
201,
198,
201,
198,
22921,
62,
8905,
46,
3398,
796,
352,
201,
198,
2200,
11401,
1404,
... | 2.250377 | 663 |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="py_outrider",
version="0.1.0",
author="Stefan Loipfinger, Ines Scheller",
author_email="scheller@in.tum.de",
description="Python backend package for OUTRIDER2 R package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gagneurlab/py_outrider/",
packages=find_packages(),
install_requires=['tensorflow>=2.3.0',
'tensorflow-probability>=0.10.0',
'scikit-learn>=0.23.1',
'statsmodels>=0.11.1',
'numpy>=1.19.2',
'pandas>=1.1.5',
'anndata>=0.7.0',
'nipals>=0.5.2'
],
entry_points={
"console_scripts": ['py_outrider = py_outrider.__main__:main']
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
40406,
... | 1.967742 | 589 |
import sys
from os import environ
from unittest.mock import patch
from pynamodb.exceptions import DoesNotExist
from pytest import mark, raises
from pytest_subtests import SubTests
from geostore.api_keys import MESSAGE_KEY
from geostore.check_files_checksums.task import main
from geostore.check_files_checksums.utils import ARRAY_INDEX_VARIABLE_NAME
from geostore.error_response_keys import ERROR_KEY
from geostore.logging_keys import LOG_MESSAGE_VALIDATION_COMPLETE
from geostore.models import DATASET_ID_PREFIX, DB_KEY_SEPARATOR, VERSION_ID_PREFIX
from geostore.parameter_store import ParameterName, get_param
from geostore.processing_assets_model import ProcessingAssetType, ProcessingAssetsModelBase
from geostore.step_function import Outcome
from .aws_utils import get_s3_role_arn
from .general_generators import any_program_name
from .stac_generators import any_dataset_id, any_dataset_version_id
@mark.infrastructure
| [
11748,
25064,
198,
6738,
28686,
1330,
551,
2268,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
279,
4989,
375,
65,
13,
1069,
11755,
1330,
8314,
3673,
3109,
396,
198,
6738,
12972,
9288,
1330,
1317,
11,
12073,
198,
... | 3.096667 | 300 |
# start_repo_marker_0
import os
from dagster import IOManager, graph, io_manager, op, repository
from pyspark.sql import DataFrame, Row, SparkSession
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
@io_manager
@op
@op
@graph
make_and_filter_data_job = make_and_filter_data.to_job(
resource_defs={"io_manager": local_parquet_store}
)
# end_repo_marker_0
@repository
| [
2,
923,
62,
260,
7501,
62,
4102,
263,
62,
15,
198,
11748,
28686,
198,
198,
6738,
48924,
1706,
1330,
314,
2662,
272,
3536,
11,
4823,
11,
33245,
62,
37153,
11,
1034,
11,
16099,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
6060,
19778,... | 2.701299 | 154 |
import numpy as np
# from cobra.flux_analysis import flux_variability_analysis
from pytfa.analysis.variability import variability_analysis
| [
11748,
299,
32152,
355,
45941,
198,
2,
422,
22843,
430,
13,
69,
22564,
62,
20930,
1330,
28462,
62,
25641,
1799,
62,
20930,
198,
6738,
12972,
83,
13331,
13,
20930,
13,
25641,
1799,
1330,
25364,
62,
20930,
198
] | 3.756757 | 37 |
# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of basic calculations.
These include:
* wind components
* heat index
* windchill
"""
import contextlib
from itertools import product
import warnings
import numpy as np
from scipy.ndimage import gaussian_filter
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import check_units, masked_array, units
from ..xarray import preprocess_and_wrap
exporter = Exporter(globals())
# The following variables are constants for a standard atmosphere
t0 = units.Quantity(288., 'kelvin')
p0 = units.Quantity(1013.25, 'hPa')
gamma = units.Quantity(6.5, 'K/km')
@exporter.export
@preprocess_and_wrap(wrap_like='u')
@check_units('[speed]', '[speed]')
def wind_speed(u, v):
r"""Compute the wind speed from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
Returns
-------
wind speed: `pint.Quantity`
Speed of the wind
See Also
--------
wind_components
"""
speed = np.sqrt(u * u + v * v)
return speed
@exporter.export
@preprocess_and_wrap(wrap_like='u')
@check_units('[speed]', '[speed]')
def wind_direction(u, v, convention='from'):
r"""Compute the wind direction from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
convention : str
Convention to return direction; 'from' returns the direction the wind is coming from
(meteorological convention), 'to' returns the direction the wind is going towards
(oceanographic convention), default is 'from'.
Returns
-------
direction: `pint.Quantity`
The direction of the wind in intervals [0, 360] degrees, with 360 being North,
direction defined by the convention kwarg.
See Also
--------
wind_components
Notes
-----
In the case of calm winds (where `u` and `v` are zero), this function returns a direction
of 0.
"""
wdir = units.Quantity(90., 'deg') - np.arctan2(-v, -u)
origshape = wdir.shape
wdir = np.atleast_1d(wdir)
# Handle oceanographic convection
if convention == 'to':
wdir -= units.Quantity(180., 'deg')
elif convention not in ('to', 'from'):
raise ValueError('Invalid kwarg for "convention". Valid options are "from" or "to".')
mask = wdir <= 0
if np.any(mask):
wdir[mask] += units.Quantity(360., 'deg')
# avoid unintended modification of `pint.Quantity` by direct use of magnitude
calm_mask = (np.asanyarray(u.magnitude) == 0.) & (np.asanyarray(v.magnitude) == 0.)
# np.any check required for legacy numpy which treats 0-d False boolean index as zero
if np.any(calm_mask):
wdir[calm_mask] = units.Quantity(0., 'deg')
return wdir.reshape(origshape).to('degrees')
@exporter.export
@preprocess_and_wrap(wrap_like=('speed', 'speed'))
@check_units('[speed]')
def wind_components(speed, wind_direction):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : `pint.Quantity`
Wind speed (magnitude)
wind_direction : `pint.Quantity`
Wind direction, specified as the direction from which the wind is
blowing (0-2 pi radians or 0-360 degrees), with 360 degrees being North.
Returns
-------
u, v : tuple of `pint.Quantity`
The wind components in the X (East-West) and Y (North-South)
directions, respectively.
See Also
--------
wind_speed
wind_direction
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.wind_components(10. * units('m/s'), 225. * units.deg)
(<Quantity(7.07106781, 'meter / second')>, <Quantity(7.07106781, 'meter / second')>)
.. versionchanged:: 1.0
Renamed ``wdir`` parameter to ``wind_direction``
"""
wind_direction = _check_radians(wind_direction, max_radians=4 * np.pi)
u = -speed * np.sin(wind_direction)
v = -speed * np.cos(wind_direction)
return u, v
@exporter.export
@preprocess_and_wrap(wrap_like='temperature')
@check_units(temperature='[temperature]', speed='[speed]')
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`).
Parameters
----------
temperature : `pint.Quantity`
Air temperature
speed : `pint.Quantity`
Wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
Corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index, apparent_temperature
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = units.Quantity(10., 'degC'), units.Quantity(3, 'mph')
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti
@exporter.export
@preprocess_and_wrap(wrap_like='temperature')
@check_units('[temperature]')
def heat_index(temperature, relative_humidity, mask_undefined=True):
r"""Calculate the Heat Index from the current temperature and relative humidity.
The implementation uses the formula outlined in [Rothfusz1990]_, which is a
multi-variable least-squares regression of the values obtained in [Steadman1979]_.
Additional conditional corrections are applied to match what the National
Weather Service operationally uses. See Figure 3 of [Anderson2013]_ for a
depiction of this algorithm and further discussion.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
relative_humidity : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
Corresponding Heat Index value(s)
Other Parameters
----------------
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values masked where the temperature < 80F. Defaults to `True`.
.. versionchanged:: 1.0
Renamed ``rh`` parameter to ``relative_humidity``
See Also
--------
windchill, apparent_temperature
"""
temperature = np.atleast_1d(temperature)
relative_humidity = np.atleast_1d(relative_humidity)
# assign units to relative_humidity if they currently are not present
if not hasattr(relative_humidity, 'units'):
relative_humidity = units.Quantity(relative_humidity, 'dimensionless')
delta = temperature.to(units.degF) - units.Quantity(0., 'degF')
rh2 = relative_humidity * relative_humidity
delta2 = delta * delta
# Simplifed Heat Index -- constants converted for relative_humidity in [0, 1]
a = (units.Quantity(-10.3, 'degF') + 1.1 * delta
+ units.Quantity(4.7, 'delta_degF') * relative_humidity)
# More refined Heat Index -- constants converted for relative_humidity in [0, 1]
b = (units.Quantity(-42.379, 'degF')
+ 2.04901523 * delta
+ units.Quantity(1014.333127, 'delta_degF') * relative_humidity
- 22.475541 * delta * relative_humidity
- units.Quantity(6.83783e-3, '1/delta_degF') * delta2
- units.Quantity(5.481717e2, 'delta_degF') * rh2
+ units.Quantity(1.22874e-1, '1/delta_degF') * delta2 * relative_humidity
+ 8.5282 * delta * rh2
- units.Quantity(1.99e-2, '1/delta_degF') * delta2 * rh2)
# Create return heat index
hi = units.Quantity(np.full(np.shape(temperature), np.nan), 'degF')
# Retain masked status of temperature with resulting heat index
if hasattr(temperature, 'mask'):
hi = masked_array(hi)
# If T <= 40F, Heat Index is T
sel = (temperature <= units.Quantity(40., 'degF'))
if np.any(sel):
hi[sel] = temperature[sel].to(units.degF)
# If a < 79F and hi is unset, Heat Index is a
sel = (a < units.Quantity(79., 'degF')) & np.isnan(hi)
if np.any(sel):
hi[sel] = a[sel]
# Use b now for anywhere hi has yet to be set
sel = np.isnan(hi)
if np.any(sel):
hi[sel] = b[sel]
# Adjustment for RH <= 13% and 80F <= T <= 112F
sel = ((relative_humidity <= units.Quantity(13., 'percent'))
& (temperature >= units.Quantity(80., 'degF'))
& (temperature <= units.Quantity(112., 'degF')))
if np.any(sel):
rh15adj = ((13. - relative_humidity[sel] * 100.) / 4.
* np.sqrt((units.Quantity(17., 'delta_degF')
- np.abs(delta[sel] - units.Quantity(95., 'delta_degF')))
/ units.Quantity(17., '1/delta_degF')))
hi[sel] = hi[sel] - rh15adj
# Adjustment for RH > 85% and 80F <= T <= 87F
sel = ((relative_humidity > units.Quantity(85., 'percent'))
& (temperature >= units.Quantity(80., 'degF'))
& (temperature <= units.Quantity(87., 'degF')))
if np.any(sel):
rh85adj = (0.02 * (relative_humidity[sel] * 100. - 85.)
* (units.Quantity(87., 'delta_degF') - delta[sel]))
hi[sel] = hi[sel] + rh85adj
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array(temperature < units.Quantity(80., 'degF'))
if mask.any():
hi = masked_array(hi, mask=mask)
return hi
@exporter.export
@preprocess_and_wrap(wrap_like='temperature')
@check_units(temperature='[temperature]', speed='[speed]')
def apparent_temperature(temperature, relative_humidity, speed, face_level_winds=False,
mask_undefined=True):
r"""Calculate the current apparent temperature.
Calculates the current apparent temperature based on the wind chill or heat index
as appropriate for the current conditions. Follows [NWS10201]_.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
relative_humidity : `pint.Quantity`
Relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
speed : `pint.Quantity`
Wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill or heat_index is undefined masked. For wind
chill, these are values where the temperature > 50F or
wind speed <= 3 miles per hour. For heat index, these are values
where the temperature < 80F.
Defaults to `True`.
Returns
-------
`pint.Quantity`
Corresponding apparent temperature value(s)
.. versionchanged:: 1.0
Renamed ``rh`` parameter to ``relative_humidity``
See Also
--------
heat_index, windchill
"""
is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray))
temperature = np.atleast_1d(temperature)
relative_humidity = np.atleast_1d(relative_humidity)
speed = np.atleast_1d(speed)
# NB: mask_defined=True is needed to know where computed values exist
wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds,
mask_undefined=True).to(temperature.units)
heat_index_temperature = heat_index(temperature, relative_humidity,
mask_undefined=True).to(temperature.units)
# Combine the heat index and wind chill arrays (no point has a value in both)
# NB: older numpy.ma.where does not return a masked array
app_temperature = masked_array(
np.ma.where(masked_array(wind_chill_temperature).mask,
heat_index_temperature.to(temperature.units),
wind_chill_temperature.to(temperature.units)
), temperature.units)
# If mask_undefined is False, then set any masked values to the temperature
if not mask_undefined:
app_temperature[app_temperature.mask] = temperature[app_temperature.mask]
# If no values are masked and provided temperature does not have a mask
# we should return a non-masked array
if not np.any(app_temperature.mask) and not hasattr(temperature, 'mask'):
app_temperature = units.Quantity(np.array(app_temperature.m), temperature.units)
if is_not_scalar:
return app_temperature
else:
return np.atleast_1d(app_temperature)[0]
@exporter.export
@preprocess_and_wrap(wrap_like='pressure')
@check_units('[pressure]')
def pressure_to_height_std(pressure):
r"""Convert pressure data to height using the U.S. standard atmosphere [NOAA1976]_.
The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
Returns
-------
`pint.Quantity`
Corresponding height value(s)
Notes
-----
.. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}]
"""
return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**(
mpconsts.Rd * gamma / mpconsts.g))
@exporter.export
@preprocess_and_wrap(wrap_like='height')
@check_units('[length]')
def height_to_geopotential(height):
r"""Compute geopotential for a given height above sea level.
Calculates the geopotential from height above mean sea level using the following formula,
which is derived from the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq
3.21, along with an approximation for variation of gravity with altitude:
.. math:: \Phi = \frac{g R_e z}{R_e + z}
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, and :math:`g` is standard gravity).
Parameters
----------
height : `pint.Quantity`
Height above sea level
Returns
-------
`pint.Quantity`
Corresponding geopotential value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9805.11097983 19607.1448853 29406.10316465
39201.98726524 48994.79863351 58784.53871501 68571.20895435
78354.81079527 88135.34568058 97912.81505219], 'meter ** 2 / second ** 2')>
See Also
--------
geopotential_to_height
Notes
-----
This calculation approximates :math:`g(z)` as
.. math:: g(z) = g_0 \left( \frac{R_e}{R_e + z} \right)^2
where :math:`g_0` is standard gravity. It thereby accounts for the average effects of
centrifugal force on apparent gravity, but neglects latitudinal variations due to
centrifugal force and Earth's eccentricity.
(Prior to MetPy v0.11, this formula instead calculated :math:`g(z)` from Newton's Law of
Gravitation assuming a spherical Earth and no centrifugal force effects).
"""
return (mpconsts.g * mpconsts.Re * height) / (mpconsts.Re + height)
@exporter.export
@preprocess_and_wrap(wrap_like='geopotential')
@check_units('[length] ** 2 / [time] ** 2')
def geopotential_to_height(geopotential):
r"""Compute height above sea level from a given geopotential.
Calculates the height above mean sea level from geopotential using the following formula,
which is derived from the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq
3.21, along with an approximation for variation of gravity with altitude:
.. math:: z = \frac{\Phi R_e}{gR_e - \Phi}
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, and :math:`g` is standard gravity).
Parameters
----------
geopotential : `pint.Quantity`
Geopotential
Returns
-------
`pint.Quantity`
Corresponding value(s) of height above sea level
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9805.11097983 19607.1448853 29406.10316465
39201.98726524 48994.79863351 58784.53871501 68571.20895435
78354.81079527 88135.34568058 97912.81505219], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
See Also
--------
height_to_geopotential
Notes
-----
This calculation approximates :math:`g(z)` as
.. math:: g(z) = g_0 \left( \frac{R_e}{R_e + z} \right)^2
where :math:`g_0` is standard gravity. It thereby accounts for the average effects of
centrifugal force on apparent gravity, but neglects latitudinal variations due to
centrifugal force and Earth's eccentricity.
(Prior to MetPy v0.11, this formula instead calculated :math:`g(z)` from Newton's Law of
Gravitation assuming a spherical Earth and no centrifugal force effects.)
.. versionchanged:: 1.0
Renamed ``geopot`` parameter to ``geopotential``
"""
return (geopotential * mpconsts.Re) / (mpconsts.g * mpconsts.Re - geopotential)
@exporter.export
@preprocess_and_wrap(wrap_like='height')
@check_units('[length]')
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere [NOAA1976]_.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
Corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
@exporter.export
@preprocess_and_wrap(wrap_like='latitude')
def coriolis_parameter(latitude):
r"""Calculate the coriolis parameter at each point.
The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371.
Parameters
----------
latitude : array_like
Latitude at each point
Returns
-------
`pint.Quantity`
Corresponding coriolis force at each point
"""
latitude = _check_radians(latitude, max_radians=np.pi / 2)
return (2. * mpconsts.omega * np.sin(latitude)).to('1/s')
@exporter.export
@preprocess_and_wrap(wrap_like='pressure')
@check_units('[pressure]', '[length]')
def add_height_to_pressure(pressure, height):
r"""Calculate the pressure at a certain height above another pressure level.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
pressure : `pint.Quantity`
Pressure level
height : `pint.Quantity`
Height above a pressure level
Returns
-------
`pint.Quantity`
Corresponding pressure value for the height above the pressure level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_pressure_to_height
"""
pressure_level_height = pressure_to_height_std(pressure)
return height_to_pressure_std(pressure_level_height + height)
@exporter.export
@preprocess_and_wrap(wrap_like='height')
@check_units('[length]', '[pressure]')
def add_pressure_to_height(height, pressure):
r"""Calculate the height at a certain pressure above another height.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
height : `pint.Quantity`
Height level
pressure : `pint.Quantity`
Pressure above height level
Returns
-------
`pint.Quantity`
The corresponding height value for the pressure above the height level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_height_to_pressure
"""
pressure_at_height = height_to_pressure_std(height)
return pressure_to_height_std(pressure_at_height - pressure)
@exporter.export
@preprocess_and_wrap(wrap_like='sigma')
@check_units('[dimensionless]', '[pressure]', '[pressure]')
def sigma_to_pressure(sigma, pressure_sfc, pressure_top):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
Sigma levels to be converted to pressure levels
pressure_sfc : `pint.Quantity`
Surface pressure value
pressure_top : `pint.Quantity`
Pressure value at the top of the model domain
Returns
-------
`pint.Quantity`
Pressure values at the given sigma levels
Notes
-----
Sigma definition adapted from [Philips1957]_:
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
.. versionchanged:: 1.0
Renamed ``psfc``, ``ptop`` parameters to ``pressure_sfc``, ``pressure_top``
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if pressure_sfc.magnitude < 0 or pressure_top.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (pressure_sfc - pressure_top) + pressure_top
@exporter.export
@preprocess_and_wrap(wrap_like='scalar_grid', match_unit=True, to_magnitude=True)
def smooth_gaussian(scalar_grid, n):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : `pint.Quantity`
Some n-dimensional scalar grid. If more than two axes, smoothing
is only done across the last two.
n : int
Degree of filtering
Returns
-------
`pint.Quantity`
The filtered 2D scalar grid
Notes
-----
This function is a close replication of the GEMPAK function ``GWFS``,
but is not identical. The following notes are incorporated from
the GEMPAK source code:
This function smoothes a scalar grid using a moving average
low-pass filter whose weights are determined by the normal
(Gaussian) probability distribution function for two dimensions.
The weight given to any grid point within the area covered by the
moving average for a target grid point is proportional to:
.. math:: e^{-D^2}
where D is the distance from that point to the target point divided
by the standard deviation of the normal distribution. The value of
the standard deviation is determined by the degree of filtering
requested. The degree of filtering is specified by an integer.
This integer is the number of grid increments from crest to crest
of the wave for which the theoretical response is 1/e = .3679. If
the grid increment is called delta_x, and the value of this integer
is represented by N, then the theoretical filter response function
value for the N * delta_x wave will be 1/e. The actual response
function will be greater than the theoretical value.
The larger N is, the more severe the filtering will be, because the
response function for all wavelengths shorter than N * delta_x
will be less than 1/e. Furthermore, as N is increased, the slope
of the filter response function becomes more shallow; so, the
response at all wavelengths decreases, but the amount of decrease
lessens with increasing wavelength. (The theoretical response
function can be obtained easily--it is the Fourier transform of the
weight function described above.)
The area of the patch covered by the moving average varies with N.
As N gets bigger, the smoothing gets stronger, and weight values
farther from the target grid point are larger because the standard
deviation of the normal distribution is bigger. Thus, increasing
N has the effect of expanding the moving average window as well as
changing the values of weights. The patch is a square covering all
points whose weight values are within two standard deviations of the
mean of the two dimensional normal distribution.
The key difference between GEMPAK's GWFS and this function is that,
in GEMPAK, the leftover weight values representing the fringe of the
distribution are applied to the target grid point. In this
function, the leftover weights are not used.
When this function is invoked, the first argument is the grid to be
smoothed, the second is the value of N as described above:
GWFS ( S, N )
where N > 1. If N <= 1, N = 2 is assumed. For example, if N = 4,
then the 4 delta x wave length is passed with approximate response
1/e.
"""
# Compute standard deviation in a manner consistent with GEMPAK
n = int(round(n))
if n < 2:
n = 2
sgma = n / (2 * np.pi)
# Construct sigma sequence so smoothing occurs only in horizontal direction
num_ax = len(scalar_grid.shape)
# Assume the last two axes represent the horizontal directions
sgma_seq = [sgma if i > num_ax - 3 else 0 for i in range(num_ax)]
filter_args = {'sigma': sgma_seq, 'truncate': 2 * np.sqrt(2)}
if hasattr(scalar_grid, 'mask'):
smoothed = gaussian_filter(scalar_grid.data, **filter_args)
return np.ma.array(smoothed, mask=scalar_grid.mask)
else:
return gaussian_filter(scalar_grid, **filter_args)
@exporter.export
@preprocess_and_wrap(wrap_like='scalar_grid', match_unit=True, to_magnitude=True)
def smooth_window(scalar_grid, window, passes=1, normalize_weights=True):
"""Filter with an arbitrary window smoother.
Parameters
----------
scalar_grid : array-like
N-dimensional scalar grid to be smoothed
window : ndarray
Window to use in smoothing. Can have dimension less than or equal to N. If
dimension less than N, the scalar grid will be smoothed along its trailing dimensions.
Shape along each dimension must be odd.
passes : int
The number of times to apply the filter to the grid. Defaults to 1.
normalize_weights : bool
If true, divide the values in window by the sum of all values in the window to obtain
the normalized smoothing weights. If false, use supplied values directly as the
weights.
Returns
-------
array-like
The filtered scalar grid
See Also
--------
smooth_rectangular, smooth_circular, smooth_n_point, smooth_gaussian
Notes
-----
This function can be applied multiple times to create a more smoothed field and will only
smooth the interior points, leaving the end points with their original values (this
function will leave an unsmoothed edge of size `(n - 1) / 2` for each `n` in the shape of
`window` around the data). If a masked value or NaN values exists in the array, it will
propagate to any point that uses that particular grid point in the smoothing calculation.
Applying the smoothing function multiple times will propagate NaNs further throughout the
domain.
"""
# Verify that shape in all dimensions is odd (need to have a neighborhood around a
# central point)
if any((size % 2 == 0) for size in window.shape):
raise ValueError('The shape of the smoothing window must be odd in all dimensions.')
# Optionally normalize the supplied weighting window
if normalize_weights:
weights = window / np.sum(window)
else:
weights = window
# Set indexes
# Inner index for the centered array elements that are affected by the smoothing
inner_full_index = _trailing_dims(_offset(_pad(n), 0) for n in weights.shape)
# Indexes to iterate over each weight
weight_indexes = tuple(product(*(range(n) for n in weights.shape)))
# Index for full array elements, offset by the weight index
# TODO: this is not lazy-loading/dask compatible, as it "densifies" the data
data = np.array(scalar_grid)
for _ in range(passes):
# Set values corresponding to smoothing weights by summing over each weight and
# applying offsets in needed dimensions
data[inner_full_index] = sum(weights[index] * data[offset_full_index(index)]
for index in weight_indexes)
return data
@exporter.export
def smooth_rectangular(scalar_grid, size, passes=1):
"""Filter with a rectangular window smoother.
Parameters
----------
scalar_grid : array-like
N-dimensional scalar grid to be smoothed
size : int or sequence of ints
Shape of rectangle along the trailing dimension(s) of the scalar grid
passes : int
The number of times to apply the filter to the grid. Defaults to 1.
Returns
-------
array-like
The filtered scalar grid
See Also
--------
smooth_window, smooth_circular, smooth_n_point, smooth_gaussian
Notes
-----
This function can be applied multiple times to create a more smoothed field and will only
smooth the interior points, leaving the end points with their original values (this
function will leave an unsmoothed edge of size `(n - 1) / 2` for each `n` in `size` around
the data). If a masked value or NaN values exists in the array, it will propagate to any
point that uses that particular grid point in the smoothing calculation. Applying the
smoothing function multiple times will propagate NaNs further throughout the domain.
"""
return smooth_window(scalar_grid, np.ones(size), passes=passes)
@exporter.export
def smooth_circular(scalar_grid, radius, passes=1):
"""Filter with a circular window smoother.
Parameters
----------
scalar_grid : array-like
N-dimensional scalar grid to be smoothed. If more than two axes, smoothing is only
done along the last two.
radius : int
Radius of the circular smoothing window. The "diameter" of the circle (width of
smoothing window) is 2 * radius + 1 to provide a smoothing window with odd shape.
passes : int
The number of times to apply the filter to the grid. Defaults to 1.
Returns
-------
array-like
The filtered scalar grid
See Also
--------
smooth_window, smooth_rectangular, smooth_n_point, smooth_gaussian
Notes
-----
This function can be applied multiple times to create a more smoothed field and will only
smooth the interior points, leaving the end points with their original values (this
function will leave an unsmoothed edge of size `radius` around the data). If a masked
value or NaN values exists in the array, it will propagate to any point that uses that
particular grid point in the smoothing calculation. Applying the smoothing function
multiple times will propagate NaNs further throughout the domain.
"""
# Generate the circle
size = 2 * radius + 1
x, y = np.mgrid[:size, :size]
distance = np.sqrt((x - radius) ** 2 + (y - radius) ** 2)
circle = distance <= radius
# Apply smoother
return smooth_window(scalar_grid, circle, passes=passes)
@exporter.export
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with an n-point smoother.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
N-dimensional scalar grid to be smoothed. If more than two axes, smoothing is only
done along the last two.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered scalar grid
See Also
--------
smooth_window, smooth_rectangular, smooth_circular, smooth_gaussian
Notes
-----
This function is a close replication of the GEMPAK function SM5S and SM9S depending on the
choice of the number of points to use for smoothing. This function can be applied multiple
times to create a more smoothed field and will only smooth the interior points, leaving
the end points with their original values (this function will leave an unsmoothed edge of
size 1 around the data). If a masked value or NaN values exists in the array, it will
propagate to any point that uses that particular grid point in the smoothing calculation.
Applying the smoothing function multiple times will propagate NaNs further throughout the
domain.
"""
if n == 9:
weights = np.array([[0.0625, 0.125, 0.0625],
[0.125, 0.25, 0.125],
[0.0625, 0.125, 0.0625]])
elif n == 5:
weights = np.array([[0., 0.125, 0.],
[0.125, 0.5, 0.125],
[0., 0.125, 0.]])
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
return smooth_window(scalar_grid, window=weights, passes=passes, normalize_weights=False)
@exporter.export
@preprocess_and_wrap(wrap_like='altimeter_value')
@check_units('[pressure]', '[length]')
def altimeter_to_station_pressure(altimeter_value, height):
r"""Convert the altimeter measurement to station pressure.
This function is useful for working with METARs since they do not provide
altimeter values, but not sea-level pressure or station pressure.
The following definitions of altimeter setting and station pressure
are taken from [Smithsonian1951]_ Altimeter setting is the
pressure value to which an aircraft altimeter scale is set so that it will
indicate the altitude above mean sea-level of an aircraft on the ground at the
location for which the value is determined. It assumes a standard atmosphere [NOAA1976]_.
Station pressure is the atmospheric pressure at the designated station elevation.
Finding the station pressure can be helpful for calculating sea-level pressure
or other parameters.
Parameters
----------
altimeter_value : `pint.Quantity`
The altimeter setting value as defined by the METAR or other observation,
which can be measured in either inches of mercury (in. Hg) or millibars (mb)
height: `pint.Quantity`
Elevation of the station measuring pressure
Returns
-------
`pint.Quantity`
The station pressure in hPa or in. Hg. Can be used to calculate sea-level
pressure.
See Also
--------
altimeter_to_sea_level_pressure
Notes
-----
This function is implemented using the following equations from the
Smithsonian Handbook (1951) p. 269
Equation 1:
.. math:: A_{mb} = (p_{mb} - 0.3)F
Equation 3:
.. math:: F = \left [1 + \left(\frac{p_{0}^n a}{T_{0}} \right)
\frac{H_{b}}{p_{1}^n} \right ] ^ \frac{1}{n}
Where,
:math:`p_{0}` = standard sea-level pressure = 1013.25 mb
:math:`p_{1} = p_{mb} - 0.3` when :math:`p_{0} = 1013.25 mb`
gamma = lapse rate in [NOAA1976]_ standard atmosphere below the isothermal layer
:math:`6.5^{\circ}C. km.^{-1}`
:math:`t_{0}` = standard sea-level temperature 288 K
:math:`H_{b} =` station elevation in meters (elevation for which station pressure is given)
:math:`n = \frac{a R_{d}}{g} = 0.190284` where :math:`R_{d}` is the gas constant for dry
air
And solving for :math:`p_{mb}` results in the equation below, which is used to
calculate station pressure :math:`(p_{mb})`
.. math:: p_{mb} = \left [A_{mb} ^ n - \left (\frac{p_{0} a H_{b}}{T_0}
\right) \right] ^ \frac{1}{n} + 0.3
"""
# N-Value
n = (mpconsts.Rd * gamma / mpconsts.g).to_base_units()
return ((altimeter_value ** n
- ((p0.to(altimeter_value.units) ** n * gamma * height) / t0)) ** (1 / n)
+ units.Quantity(0.3, 'hPa'))
@exporter.export
@preprocess_and_wrap(wrap_like='altimeter_value')
@check_units('[pressure]', '[length]', '[temperature]')
def altimeter_to_sea_level_pressure(altimeter_value, height, temperature):
r"""Convert the altimeter setting to sea-level pressure.
This function is useful for working with METARs since most provide
altimeter values, but not sea-level pressure, which is often plotted
on surface maps. The following definitions of altimeter setting, station pressure, and
sea-level pressure are taken from [Smithsonian1951]_.
Altimeter setting is the pressure value to which an aircraft altimeter scale
is set so that it will indicate the altitude above mean sea-level of an aircraft
on the ground at the location for which the value is determined. It assumes a standard
atmosphere. Station pressure is the atmospheric pressure at the designated station
elevation. Sea-level pressure is a pressure value obtained by the theoretical reduction
of barometric pressure to sea level. It is assumed that atmosphere extends to sea level
below the station and that the properties of the atmosphere are related to conditions
observed at the station. This value is recorded by some surface observation stations,
but not all. If the value is recorded, it can be found in the remarks section. Finding
the sea-level pressure is helpful for plotting purposes and different calculations.
Parameters
----------
altimeter_value : 'pint.Quantity'
The altimeter setting value is defined by the METAR or other observation,
with units of inches of mercury (in Hg) or millibars (hPa).
height : 'pint.Quantity'
Elevation of the station measuring pressure. Often times measured in meters
temperature : 'pint.Quantity'
Temperature at the station
Returns
-------
'pint.Quantity'
The sea-level pressure in hPa and makes pressure values easier to compare
between different stations.
See Also
--------
altimeter_to_station_pressure
Notes
-----
This function is implemented using the following equations from Wallace and Hobbs (1977).
Equation 2.29:
.. math::
\Delta z = Z_{2} - Z_{1}
= \frac{R_{d} \bar T_{v}}{g_0}ln\left(\frac{p_{1}}{p_{2}}\right)
= \bar H ln \left (\frac {p_{1}}{p_{2}} \right)
Equation 2.31:
.. math::
p_{0} = p_{g}exp \left(\frac{Z_{g}}{\bar H} \right)
= p_{g}exp \left(\frac{g_{0}Z_{g}}{R_{d}\bar T_{v}} \right)
Then by substituting :math:`\Delta_{Z}` for :math:`Z_{g}` in Equation 2.31:
.. math:: p_{sealevel} = p_{station} exp\left(\frac{\Delta z}{H}\right)
where :math:`\Delta_{Z}` is the elevation in meters and :math:`H = \frac{R_{d}T}{g}`
"""
# Calculate the station pressure using function altimeter_to_station_pressure()
psfc = altimeter_to_station_pressure(altimeter_value, height)
# Calculate the scale height
h = mpconsts.Rd * temperature / mpconsts.g
return psfc * np.exp(height / h)
def _check_radians(value, max_radians=2 * np.pi):
"""Input validation of values that could be in degrees instead of radians.
Parameters
----------
value : `pint.Quantity`
Input value to check
max_radians : float
Maximum absolute value of radians before warning
Returns
-------
`pint.Quantity`
Input value
"""
with contextlib.suppress(AttributeError):
value = value.to('radians').m
if np.any(np.greater(np.abs(value), max_radians)):
warnings.warn('Input over {} radians. '
'Ensure proper units are given.'.format(np.nanmax(max_radians)))
return value
| [
2,
15069,
357,
66,
8,
3648,
11,
4626,
11,
5304,
11,
5539,
11,
7908,
11,
23344,
3395,
20519,
34152,
13,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
2,
30628,
55,
12,
34156,
12,
33... | 2.792985 | 15,081 |
"""
问题描述:给定彼此独立的两棵树头结点分别为t1和t2,判断t1树是否包含t2树全部的拓扑结构。
"""
from binarytree.toolcls import Node
if __name__ == '__main__':
t1 = Node(1)
t1.left = Node(2)
t1.right = Node(3)
t1.left.left = Node(4)
t1.left.right = Node(5)
t1.right.left = Node(6)
t1.right.right = Node(7)
t1.left.left.left = Node(8)
t1.left.left.right = Node(9)
t1.left.right.left = Node(10)
t2 = Node(2)
t2.left = Node(4)
t2.left.left = Node(8)
t2.right = Node(5)
print(BSTTop.is_bst_top(t1, t2)) | [
37811,
198,
29785,
106,
165,
95,
246,
162,
237,
237,
32573,
108,
171,
120,
248,
163,
119,
247,
22522,
248,
37605,
120,
29826,
97,
45379,
105,
44165,
233,
21410,
10310,
97,
162,
96,
113,
43718,
239,
13783,
112,
163,
119,
241,
163,
22... | 1.515942 | 345 |
import pymongo
config = {
# "mongo" : "mongodb://usuario:pass@localhost/admin"
# "mongo" : "mongodb://usuario:pass@localhost:27100/admin"
"mongo" : "mongodb://localhost:27100/admin"
}
myclient = pymongo.MongoClient(config['mongo'])
print('bases de datos',myclient.list_database_names())
db = myclient["test"]
print('coleciones',db.list_collection_names())
coleccion = db['users']
usuario = { "name": "Juan", "city": "Lima" }
x = coleccion.insert_one(usuario)
print(x.inserted_id)
# usuarios
usuarios = [
{ "name": "Juan", "city": "Lima" },
{ "name": "Jorge", "city": "Callao" }
]
x = coleccion.insert_many(usuarios)
print(x.inserted_ids)
# find one
x = coleccion.find_one()
print(x)
#query
query = { "name": { "$regex": "^Ju" } }
xs = coleccion.find(query)
for x in xs:
print(x)
| [
11748,
279,
4948,
25162,
198,
198,
11250,
796,
1391,
198,
220,
220,
220,
1303,
366,
76,
25162,
1,
1058,
366,
31059,
375,
65,
1378,
385,
84,
4982,
25,
6603,
31,
36750,
14,
28482,
1,
198,
220,
220,
220,
1303,
366,
76,
25162,
1,
1058... | 2.280453 | 353 |
# coding:utf-8
# Produced by Andysin Zhang
# 06_Aug_2019
# Inspired By Google, Appreciate for the wonderful work
#
# Copyright 2019 TCL Inc. All Rights Reserverd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""""Basic Seq2Seq model with VAE, no Attention support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import collections
import tensorflow as tf
import model_helper as _mh
from utils.log import log_info as _info
from utils.log import log_error as _error
__all__ = ['BaseModel']
def get_scpecific_scope_params(scope=''):
"""used to get specific parameters for training
"""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
class BaseModel(object):
"""Base Model
"""
def _set_params_initializer(self, hparams, mode, scope):
"""Load the parameters and set the initializer
"""
self.mode = mode
# pre_train flag is used for distinguish with pre_train and fine tune
if hparams.enable_vae:
_info('Enable VAE')
self.enable_vae = True
self.pre_train = hparams.pre_train
else:
self.enable_vae = False
self.pre_train = False
self.dtype = tf.float32
self.global_step = tf.Variable(0, trainable=False)
# define the input for the model
self.encoder_input_data = tf.placeholder(
tf.int32, [None, None], name='encoder_input_data')
self.decoder_input_data = tf.placeholder(
tf.int32, [None, None], name='decoder_input_data')
self.decoder_output_data = tf.placeholder(
tf.int32, [None, None], name='decoder_output_data')
self.seq_length_encoder_input_data = tf.placeholder(
tf.int32, [None], name='seq_length_encoder_input_data')
self.seq_length_decoder_input_data = tf.placeholder(
tf.int32, [None], name='seq_length_decoder_input_data')
# load some important hparamters
self.unit_type = hparams.unit_type
self.num_units = hparams.num_units
self.num_encoder_layers = hparams.num_encoder_layers
self.num_decoder_layers = hparams.num_decoder_layers
self.num_encoder_residual_layers = self.num_encoder_layers - 1
self.num_decoder_residual_layers = self.num_decoder_layers - 1
self.batch_size = tf.size(self.seq_length_encoder_input_data)
# set initializer
random_seed = hparams.random_seed
initializer = _mh.get_initializer(hparams.init_op, random_seed, hparams.init_weight)
tf.get_variable_scope().set_initializer(initializer)
# embeddings
self.src_vocab_size = hparams.src_vocab_size
self.tgt_vocab_size = hparams.tgt_vocab_size
self.init_embeddings(hparams, scope)
def init_embeddings(self, hparams, scope):
"""Init embeddings
"""
self.embedding_encoder, self.embedding_decoder = \
_mh.create_emb_for_encoder_and_decoder(
share_vocab=hparams.share_vocab,
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
src_embed_size=self.num_units,
tgt_embed_size=self.num_units,
scope=scope)
def _train_or_inference(self, hparams, res):
"""need to optimize, etc. in train,
used for seperate process in train and infer
"""
if self.mode == 'train':
self.sample_id = res[1]
self.loss = res[2]
self.loss_per_token = res[3]
self.kl_loss = res[4]
elif self.mode == 'eval':
self.loss = res[2]
elif self.mode == 'infer':
self.infer_logtis, self.sample_id = res[0], res[1]
if self.mode != 'infer':
self.predict_count = tf.reduce_sum(self.seq_length_decoder_input_data)
if self.enable_vae and not self.pre_train:
params = get_scpecific_scope_params('dynamic_seq2seq/transfer')
else:
params = tf.trainable_variables()
# set learning rate
if self.mode == 'train':
self.learning_rate = tf.constant(hparams.learning_rate)
# warm-up or decay
self.learning_rate = self._get_learning_rate_warmup_decay(hparams)
# Optimier
if hparams.optimizer == 'sgd':
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
elif hparams.optimizer == 'adam':
opt = tf.train.AdamOptimizer(self.learning_rate)
else:
_error('Unknown optimizer type {}'.format(hparams.optimizer))
raise ValueError
# Gradients
gradients = tf.gradients(self.loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(
gradients, 5.0)
self.update = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)
# Summary
self.train_summary = tf.summary.merge(
[tf.summary.scalar('lr', self.learning_rate),
tf.summary.scalar('loss', self.loss)])
def _build_encoder(self, hparams):
"""Build the encoder and return the encoding outputs
Args:
hparams: hyperameters
Returns:
encoder_outputs: 'uni': [batch, seq, hidden] 'bi': [batch, seq, hidden * 2]
encoder_state: 'uni': [batch, hidden] for _ in range(layers)
'bi': Tuple(fw_hidden_layer_i, bw_hidden_layer_i for i in range(layers))
Raises:
ValueError: Unknown encoder_type
"""
num_layers = self.num_encoder_layers
num_redisual_layers = self.num_encoder_residual_layers
with tf.variable_scope('encoder') as _:
self.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)
if hparams.encoder_type == 'uni':
_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))
# 1. build a list of cells
cell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)
# 2. forward
# encoder_outputs: [batch, time, hidden]
# encoder_state: ([batch, hidden] for _ in range(layers))
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell,
self.encoder_emb_inp,
dtype=self.dtype,
sequence_length=self.seq_length_encoder_input_data,
swap_memory=True)
elif hparams.encoder_type == 'bi':
if not num_layers % 2 == 0:
_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))
raise ValueError
num_bi_layers = int(num_layers / 2)
num_bi_residual_layers = num_bi_layers - 1
_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))
cell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)
cell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)
# bi_outputs: (fw, bw): fw: [batch, seq, hidden]
# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]
bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
self.encoder_emb_inp,
dtype=self.dtype,
sequence_length=self.seq_length_encoder_input_data,
swap_memory=True)
if num_bi_layers == 1:
encoder_state = bi_state
else:
encoder_state = []
for layer_id in range(num_bi_layers):
encoder_state.append(bi_state[0][layer_id]) # fw state in layer id
encoder_state.append(bi_state[1][layer_id]) # bw state in layer id
encoder_state = tuple(encoder_state)
encoder_outputs = tf.concat(bi_outputs, -1) # [batch, seq, hidden * 2]
else:
_error('Unknow encoder type: {}'.format(hparams.encoder_type))
raise ValueError
return encoder_outputs, encoder_state
def _build_encoder_cell(self, hparams, num_layers, num_residual_layers):
"""Build a multi-layer RNN cell
"""
return _mh.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=self.num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode)
@_combine_encoder_state
@_combine_encoder_state
def _get_infer_maximum_iterations(self, hparams):
"""Maximum decoding steps at inference time
"""
if hparams.tgt_max_len_infer:
maximum_iterations = hparams.tgt_max_len_infer
_info('decoding with maximum iterations {}'.format(maximum_iterations))
else:
if self.mode == 'infer':
_error('For Inference, tgt_max_len_infer in hparameters must set')
raise ValueError
decoding_length_factor = 3.0
max_encoder_length = tf.reduce_max(self.seq_length_decoder_input_data)
maximum_iterations = tf.to_int32(tf.round(
tf.to_float(max_encoder_length) * decoding_length_factor))
return maximum_iterations
def _build_decoder(self, encoder_outputs, encoder_state, hparams):
"""Build decoder and return results
Args:
encoder_outputs: the outputs from the encoder, [batch, time, hidden] or [batch, time, hidden * 2]
encoder_state: the final state of the encoder, [b, h]([b, h_f], [b, h_b]) for _ in range(layers)
Returns:
logits: [batch, time, vocab_size]
Raises:
ValueError: Unknown infer mode
"""
tgt_sos_id = tf.cast(tf.constant(hparams.sos_id), tf.int32)
tgt_eos_id = tf.cast(tf.constant(hparams.eos_id), tf.int32)
maximum_iterations = self._get_infer_maximum_iterations(hparams)
# Decoder
with tf.variable_scope('decoder') as decoder_scope:
cell, decoder_initial_state = self._build_decoder_cell(hparams, encoder_state)
logits = tf.no_op()
decoder_outputs = None
# Train or Eval
if self.mode != 'infer':
decoder_emb_input = tf.nn.embedding_lookup(self.embedding_decoder, self.decoder_input_data)
# helper
helper = tf.contrib.seq2seq.TrainingHelper(
decoder_emb_input, self.seq_length_decoder_input_data)
# decoder
my_decoder = tf.contrib.seq2seq.BasicDecoder(
cell,
helper,
decoder_initial_state)
# dynamic decoding
outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(
my_decoder,
swap_memory=True,
scope=decoder_scope)
sample_id = outputs.sample_id
logits = self.output_layer(outputs.rnn_output)
else:
infer_mode = hparams.infer_mode
start_tokens = tf.fill([self.batch_size], tgt_sos_id)
end_token = tgt_eos_id
_info(' decoder by infer_mode={} beam_width={}'.format(infer_mode, hparams.beam_width))
if infer_mode == 'greedy':
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
self.embedding_decoder, start_tokens, end_token)
elif infer_mode == 'beam_search':
beam_width = hparams.beam_width
length_penalty_weight = hparams.length_penalty_weight
coverage_penalty_weight = hparams.coverage_penalty_weight
# beam search do not require helper
my_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=self.embedding_decoder,
start_tokens=start_tokens,
end_token=end_token,
initial_state=decoder_initial_state,
beam_width=beam_width,
output_layer=self.output_layer,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight)
else:
_error('Unknown infer_mode {}'.format(infer_mode))
raise ValueError
if infer_mode != 'beam_search':
my_decoder = tf.contrib.seq2seq.BasicDecoder(
cell,
helper,
decoder_initial_state,
output_layer=self.output_layer) # apply to the RNN output prior to storing the result or sampling
outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(
my_decoder,
maximum_iterations=maximum_iterations,
swap_memory=True,
scope=decoder_scope)
if infer_mode == 'beam_search':
sample_id = outputs.predicted_ids
else:
logits = outputs.rnn_output
sample_id = outputs.sample_id
return logits, sample_id, final_context_state
def _build_decoder_cell(self, hparams, encoder_state):
"""build RNN cell
"""
if hparams.attention:
_error('The basic model does not support Attention')
raise ValueError
cell = _mh.create_rnn_cell(
unit_type=self.unit_type,
num_units=self.num_units,
num_layers=self.num_decoder_layers,
num_residual_layers=self.num_decoder_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode)
if self.mode == 'infer' and hparams.infer_mode == 'beam_search':
decoder_initial_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=hparams.beam_width)
else:
decoder_initial_state = encoder_state
return cell, decoder_initial_state
def _compute_loss(self, logtis):
"""Compute loss"""
# compute cross-entropy loss
max_time = tf.shape(self.decoder_output_data)[1]
ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.decoder_output_data, logits=logtis)
target_weights = tf.sequence_mask(
self.seq_length_decoder_input_data,
max_time,
dtype=tf.float32)
# convert to tf.float, otherwise, the following code will not be executed
bs = tf.cast(self.batch_size, tf.float32)
ce_loss_clear = tf.reduce_sum(ce_loss * target_weights) / bs
loss_per_token = ce_loss_clear * bs / tf.reduce_sum(target_weights)
# vae loss
if self.enable_vae:
kl_loss = tf.cond(tf.cast(self.pre_train, dtype=tf.bool),
lambda : self._vae_loss(bs), lambda : 0.)
loss = ce_loss_clear + kl_loss
return loss, loss_per_token, kl_loss
else:
loss = ce_loss_clear
return loss, loss_per_token, tf.constant(0.)
def _get_learning_rate_warmup_decay(self, hparams):
"""warmup or decay learning rate"""
warm_steps = hparams.warm_steps
warmup_factor = tf.exp(tf.log(0.01) / warm_steps)
inv_decay = warmup_factor ** (tf.to_float(warm_steps - self.global_step))
return tf.cond(self.global_step < hparams.warm_steps,
lambda : inv_decay * self.learning_rate,
lambda : tf.train.exponential_decay(self.learning_rate,
self.global_step - 200000,
hparams.decay_step,
hparams.decay_rate,
staircase=True),
name='learning_rate_warm_decay_cond')
def train(self, sess, realv):
"""Build train graph"""
assert self.mode == 'train'
output_tuple = TrainOutputTuple(train_loss=self.loss,
loss_per_token=self.loss_per_token,
kl_loss=self.kl_loss,
predict_count=self.predict_count,
global_step=self.global_step,
batch_size=self.batch_size,
learning_rate=self.learning_rate,
train_summary=self.train_summary)
feed_dict = {self.encoder_input_data: realv.encoder_input_data,
self.decoder_input_data: realv.decoder_input_data,
self.decoder_output_data: realv.decoder_output_data,
self.seq_length_encoder_input_data: realv.seq_length_encoder_data,
self.seq_length_decoder_input_data: realv.seq_length_decoder_data}
return sess.run([self.update, output_tuple], feed_dict=feed_dict)
def eval(self, sess, realv):
"""Build eval graph"""
assert self.mode == 'eval'
output_tuple = EvalOutputTuple(sample_id=self.sample_id,
eval_loss=self.loss,
predict_count=self.predict_count,
batch_size=self.batch_size)
feed_dict = {self.encoder_input_data: realv[0],
self.decoder_input_data: realv[1],
self.decoder_output_data: realv[2],
self.seq_length_encoder_input_data: realv[3],
self.seq_length_decoder_input_data: realv[4]}
return sess.run(output_tuple, feed_dict=feed_dict)
if __name__ == '__main__':
import numpy as np
from hparameters import hyper
input_x = np.array([[10, 120, 30, 0, 0], [20, 30, 0, 0, 0], [15, 20, 30, 50, 100]])
seq_input_x = [3, 2, 5]
output_y_input = np.array([[1, 20, 10, 30, 0, 0, 0], [1, 3, 3, 4, 5, 6, 7], [1, 20, 30, 0, 0, 0, 0]])
output_y_output = np.array([[20, 10, 30, 2, 0, 0, 0], [3, 3, 4, 5, 6, 7, 2], [20, 30, 2, 0, 0, 0, 0]])
seq_output_y = [4, 7, 3]
tf.reset_default_graph()
global_graph = tf.Graph()
with global_graph.as_default():
model = BaseModel(hyper, 'train')
init = tf.global_variables_initializer()
local_init = tf.local_variables_initializer()
table_init = tf.tables_initializer()
with tf.Session(graph=global_graph) as sess:
sess.run([init, local_init, table_init])
for i in range(100):
feed_data = [input_x, output_y_input, output_y_output, seq_input_x, seq_output_y]
res = model.train(sess, feed_data)
print('Step {} Loss : {:.2f}'.format(res[1].global_step, res[1].train_loss)) | [
2,
19617,
25,
40477,
12,
23,
198,
2,
21522,
771,
416,
843,
893,
259,
19439,
198,
2,
9130,
62,
12512,
62,
23344,
198,
2,
45827,
2750,
3012,
11,
2034,
29102,
378,
329,
262,
7932,
670,
198,
2,
198,
2,
15069,
13130,
309,
5097,
3457,
... | 2.372003 | 7,008 |
# from .. import (
# PathManager,
# Path_,
# )
from .base import (
BaseFormat,
BaseFormats,
)
from .df import (
DFFormat,
DFFormats,
)
from .audio import (
AudioFormat,
AudioFormats,
)
from .video import (
VideoFormat,
VideoFormats,
FourCC,
)
from .img import (
ImgFormat,
ImgFormats,
) | [
2,
422,
11485,
1330,
357,
198,
2,
220,
220,
10644,
13511,
11,
198,
2,
220,
220,
10644,
62,
11,
198,
2,
1267,
198,
198,
6738,
764,
8692,
1330,
357,
198,
220,
7308,
26227,
11,
198,
220,
7308,
8479,
1381,
11,
198,
8,
198,
198,
6738... | 2.401515 | 132 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
from nltk import sent_tokenize
import pandas as pd
import numpy as np
import joblib
def createData(abstract):
"""
This function extracts the text from all the sentences and
extracts the positional information of the sentences in an unstructract.
Args:
abstract - raw text of unstructured abstract.
Returns:
A tuple containing list of sentences from the abstract and,
a list of one hot encoded positional vector for all sentences.
Example :
(
["Although immune-mediated ther..... promising treatment options.",
"In renal cell carcino..... with metastatic disease",
"In urothelial carcinoma, cp..... for other indications.],
[[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0]]]
)
"""
data = sent_tokenize(abstract) # Tokenize each sentences
abstracts = []
# Divide abstract into rough sections.
position = ['#', 'FIRST', 'SECOND', 'THIRD', 'FOURTH', 'FIFTH']
for line_no, abst_lines in enumerate(data):
each_line = {}
each_line["text"] = abst_lines
# Categorizes the position of sentence equally (1 to 5)
scale_line = round(( (line_no + 1 - 1) / (len(data) - 1) ) * (5 - 1) + 1)
each_line['position'] = position[scale_line]
abstracts.append(each_line)
abstract = pd.DataFrame(abstracts)
abs_sent = abstract.text
one_hot = joblib.load('Model/one_hot.joblib')
abs_pos = one_hot.transform(np.expand_dims(abstract.position, axis = 1)).toarray()
return (abs_sent, abs_pos)
| [
6738,
299,
2528,
74,
1330,
1908,
62,
30001,
1096,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1693,
8019,
198,
198,
4299,
2251,
6601,
7,
397,
8709,
2599,
628,
220,
220,
220,
37227,
198,
220,
... | 2.517134 | 642 |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 16:58:07 2018
@author: Madhur Kashyap 2016EEZ8350
"""
import math
import logging
import numpy as np
import pandas as pd
from Utils import gen_bidi_map, is_array_or_list, is_dataframe
from AudioUtils import read_sph, extract_features
from TrainUtils import batch_temporal_categorical
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
2758,
2534,
1467,
25,
3365,
25,
2998,
2864,
201,
198,
201,
198,
31,
9800,
25,
4627,
48349,
20578,
88,
499,
1584,
6500,
57,
5999... | 2.591241 | 137 |
from .env import NEnv
from datetime import datetime
MODEL_NEGOTIATION = [
"DQN",
"PPO1",
"PPO2",
"GAIL",
"A2C",
"ACER",
"DDPG",
] | [
6738,
764,
24330,
1330,
399,
4834,
85,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
33365,
3698,
62,
45,
7156,
2394,
40,
6234,
796,
685,
198,
220,
220,
220,
366,
35,
48,
45,
1600,
198,
220,
220,
220,
366,
10246,
46,
16,
1600... | 1.775281 | 89 |
from bs4 import BeautifulSoup
import requests
from google.cloud import firestore
from google.cloud import secretmanager
if __name__ == "__main__":
hello_pubsub(event="", context="")
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
6738,
23645,
13,
17721,
1330,
2046,
8095,
198,
6738,
23645,
13,
17721,
1330,
3200,
37153,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
2... | 3.462963 | 54 |
import os
os.environ["SYNCGIT_REPOS_DIR_NAME"] = os.path.join("tests", "fixtures")
| [
11748,
28686,
198,
198,
418,
13,
268,
2268,
14692,
23060,
7792,
38,
2043,
62,
35316,
2640,
62,
34720,
62,
20608,
8973,
796,
28686,
13,
6978,
13,
22179,
7203,
41989,
1600,
366,
69,
25506,
4943,
198
] | 2.4 | 35 |
import os
import numpy as np
import re
if __name__ == '__main__':
# path = '~/.dnn/datasets/hcp/processing_odf/HCP_1200_tensor'
# process_all_odf_tensors(path)
path = '~/.dnn/datasets/hcp/processing/HCP_1200_tensor'
process_all_dti_tensors(path)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
302,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
1303,
3108,
796,
705,
93,
11757,
67,
20471,
14,
19608,
292,
1039,
14,... | 2.195122 | 123 |
""" Tools for ploting individual traces
* :py:class:`TraceDB`: Store a map between individual traces and image stacks
"""
# Imports
import pathlib
from typing import Dict, Optional, Tuple, List
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
# Our own imports
from ..plotting import colorwheel, set_plot_style
from ..utils import (
guess_channel_dir, find_tiledirs, parse_tile_name, write_movie, LazyImageDir)
from .utils import load_track_csvfile
# Classes
class TraceDB(object):
""" Map between images and traces
:param Path image_dir:
The image directory to load
:param Path track_file:
The parallel track file to load
:param Path activation_dir:
The directory of activation images to load
:param float time_scale:
Scale factor for time (mins/frame)
:param float space_scale:
Scale factor for space (um/min)
:param int padding:
Padding on all sides by this many pixels
:param float linewidth:
"""
def load_track_file(self,
min_samples: int = 5,
min_distance: float = 1.0,
min_displacement: float = 1.0):
""" Load the track from a file
:param int min_samples:
Minimum samples to keep a track
:param float min_distance:
Minimum distance along a track to keep
:param float min_displacement:
Minimum displacement along a track to keep
"""
filtered_tracks = []
for track in load_track_csvfile(self.track_file):
if len(track) < min_samples:
continue
distance = track.get_track_length(self.space_scale)
displacement = track.get_track_displacement(self.space_scale)
if distance < min_distance:
continue
if displacement < min_displacement:
continue
filtered_tracks.append(track)
if len(filtered_tracks) < 1:
raise ValueError(f'No sufficiently long tracks under: {self.track_file}')
filtered_tracks = list(sorted(filtered_tracks, key=lambda t: len(t), reverse=True))
print(f'Got {len(filtered_tracks)} tracks')
min_len = min([len(t) for t in filtered_tracks])
max_len = max([len(t) for t in filtered_tracks])
print(f'Min track len: {min_len}')
print(f'Max track len: {max_len}')
self.min_track_len = min_len
self.max_track_len = max_len
self.tracks = filtered_tracks
track_bboxes = []
for track in filtered_tracks:
bbox = track.get_bbox()
track_bboxes.append([bbox.x0, bbox.x1, bbox.y0, bbox.y1])
self.track_bboxes = np.array(track_bboxes)
def load_raw_images(self, scale: float = 1.0, suffix: str = ''):
""" Load the raw image database
:param float scale:
If not 1, the rescaling factor to resize images by
:param str suffix:
The suffix for each image to find
"""
self.raw_imgs = LazyImageDir(self.image_dir,
scale=scale,
suffix=suffix,
transpose=self.transpose)
if self.rows is None or self.cols is None:
_, self.rows, self.cols = self.raw_imgs.shape
def load_activation_images(self, scale: float = 1.0, suffix: str = '_resp'):
""" Load the activation image database
:param float scale:
If not 1, the rescaling factor to resize images by
:param str suffix:
The suffix for each image to find
"""
if self.activation_dir is None:
raise ValueError('Cannot load activation images without a valid activation_dir')
self.act_imgs = LazyImageDir(self.activation_dir,
scale=scale,
suffix=suffix,
transpose=self.transpose)
if self.rows is None or self.cols is None:
_, self.rows, self.cols = self.act_imgs.shape
def check_image_alignment(self):
""" Make sure the images and tracks are concordant """
if self.raw_imgs is not None:
print(f'Raw image shape: {self.raw_imgs.shape}')
if self.raw_imgs.shape[0] < self.max_track_len:
raise ValueError(f'Got images with {self.raw_imgs.shape[0]} frames but tracks with {self.max_track_len} timepoints')
if self.act_imgs is not None:
print(f'Act image shape: {self.act_imgs.shape}')
if self.act_imgs.shape[0] < self.max_track_len:
raise ValueError(f'Got activations with {self.act_imgs.shape[0]} frames but tracks with {self.max_track_len} timepoints')
if self.act_imgs is not None and self.raw_imgs is not None:
if self.act_imgs.shape != self.raw_imgs.shape:
raise ValueError(f'Images have shape {self.raw_imgs.shape} but activations are shape {self.act_imgs.shape}')
def plot_all_single_traces(self,
outdir: pathlib.Path,
image_type: str = 'raw',
track_start: int = 0,
track_end: int = -1,
track_step: int = 1,
min_timepoint: int = 0,
max_timepoint: int = -1,
write_to_movie: bool = False,
frames_per_second: int = 5):
""" Plot all traces over a single track
:param Path outdir:
Directory to write traces under
:param int track_start:
Which track index to start with
:param int track_end:
Which track index to end with
:param int track_step:
Step size for iterating over tracks
:param int min_timepoint:
Minimum timepoint to plot
:param int max_timepoint:
Maximum timepoint to plot
:param bool write_to_movie:
If True, write the frames to a movie
:param int frames_per_second:
Frames per second for the trace plot
"""
if track_start < 0:
track_start = len(self.tracks) + track_start
if track_end < 0:
track_end = len(self.tracks) + track_end
if track_start < 0 or track_start >= len(self.tracks):
raise IndexError(f'Invalid start track index {track_start} for {len(self.tracks)} tracks')
if track_end < 0 or track_end >= len(self.tracks):
raise IndexError(f'Invalid end track index {track_end} for {len(self.tracks)} tracks')
for trackid in range(track_start, track_end, track_step):
if image_type == 'raw':
framefiles = self.plot_raw_single_trace(
outdir, trackid,
min_timepoint=min_timepoint,
max_timepoint=max_timepoint)
elif image_type in ('act', 'activation'):
framefiles = self.plot_act_single_trace(
outdir, trackid,
min_timepoint=min_timepoint,
max_timepoint=max_timepoint)
else:
raise KeyError(f'Unknown image type: "{image_type}"')
# Write out the track to a movie
if write_to_movie and outdir is not None:
moviefile = outdir / f'{image_type}-tr{trackid:03d}.mp4'
print(f'Writing to movie: {moviefile}')
write_movie(framefiles, moviefile,
frames_per_second=frames_per_second,
get_size_from_frames=True)
def plot_roi_traces(self,
outdir: Optional[pathlib.Path],
bbox: List[Tuple],
image_type: str = 'raw',
min_timepoint: int = 0,
max_timepoint: int = -1,
write_to_movie: bool = False,
frames_per_second: float = 5) -> List[pathlib.Path]:
""" Plot all traces in a single ROI
:param Path outdir:
The directory to write the traces out to
:param list[tuple] bbox:
The list of bounding box coordinates to use
:param str image_type:
One of "raw" or "act" to select raw images or activations
:param int min_timepoint:
Minimum timepoint index to use
:param int max_timepoint:
Maximum timepoint index to use
:returns:
The list of files written, if any
"""
if image_type == 'raw':
block_img = self.raw_imgs.crop(bbox)
cmap = self.raw_imgs_cmap
prefix = 'raw'
elif image_type in ('act', 'activation'):
block_img = self.act_imgs.crop(bbox)
cmap = self.act_imgs_cmap
prefix = 'act'
else:
raise KeyError(f'Unknown image type: "{image_type}"')
# Figure out the name of the ROI movie
if outdir is None:
trackdir = None
else:
roi_id = 0
while True:
trackdir = outdir / f'{prefix}-roi{roi_id:03d}'
if not trackdir.is_dir():
break
roi_id += 1
trackdir.mkdir(exist_ok=True, parents=True)
palette = colorwheel(self.track_cmap)
xmin = bbox[0][0]
xmax = bbox[0][1]
ymin = bbox[1][0]
ymax = bbox[1][1]
# Aspect ratio for the tracks
aspect = (ymax - ymin) / (xmax - xmin)
fig_x = self.figsize[0] * aspect
fig_y = self.figsize[0]
track_ids = self.calc_tracks_in_bbox(bbox)
tracks = [self.tracks[i].to_arrays() for i in track_ids]
if len(tracks) < 1:
print(f'No tracks found in {bbox}')
return
print(f'Found {len(tracks)} tracks in ROI')
# Render animated because that runs much faster
framefiles = []
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=(fig_x, fig_y))
art1 = ax.imshow(block_img[0, :, :], cmap=cmap, aspect='equal',
vmin=self.vmin, vmax=self.vmax)
artists = [art1]
for i, (timepoints, ys, xs) in enumerate(tracks):
color = palette[i]
art_line = ax.plot(ys[:1] - ymin, xs[:1] - xmin, '-', color=color,
linewidth=self.linewidth)[0]
art_point = ax.plot(ys[0] - ymin, xs[0] - xmin, 'o', color=color,
markersize=self.markersize)[0]
artists.extend([art_line, art_point])
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim([0, ymax-ymin])
ax.set_ylim([xmax-xmin, 0])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for art in artists:
art.set_animated(True)
fig.canvas.draw()
bg_cache = fig.canvas.copy_from_bbox(ax.bbox)
if min_timepoint < 0:
min_timepoint = block_img.shape[0] + min_timepoint
if max_timepoint < 0:
max_timepoint = block_img.shape[0] + max_timepoint
for i in range(min_timepoint, max_timepoint):
fig.canvas.restore_region(bg_cache)
if i not in block_img:
continue
art1.set_data(block_img[i, :, :])
for j, (timepoints, ys, xs) in enumerate(tracks):
art_line = artists[j*2 + 1]
art_point = artists[j*2 + 2]
t_ed = i - np.min(timepoints)
t_st = t_ed - self.num_tail_points
if t_ed < 0:
art_line.set_visible(False)
art_point.set_visible(False)
elif t_ed >= timepoints.shape[0]:
art_line.set_visible(True)
art_point.set_visible(False)
t_ed = timepoints.shape[0]
art_line.set_data(ys[t_st:t_ed+1] - ymin, xs[t_st:t_ed+1] - xmin)
elif t_st >= timepoints.shape[0]:
art_line.set_visible(False)
art_point.set_visible(False)
else:
art_line.set_visible(True)
art_point.set_visible(True)
t_st = max([0, t_st])
art_line.set_data(ys[t_st:t_ed+1] - ymin, xs[t_st:t_ed+1] - xmin)
art_point.set_data(ys[t_ed] - ymin, xs[t_ed] - xmin)
for art in artists:
art.axes.draw_artist(art)
fig.canvas.blit(ax.bbox)
if trackdir is None:
plt.pause(0.1)
else:
outfile = trackdir / f'{self.image_dir.name}-roi{roi_id:03d}t{i:03d}.tif'
print(f'Saving frame: {outfile}')
style.savefig(str(outfile), transparent=True,
bbox_inches='tight', pad_inches=0)
framefiles.append(outfile)
plt.close()
if write_to_movie and outdir is not None:
moviefile = trackdir.parent / (trackdir.name + '.mp4')
print(f'Writing frames to {moviefile}')
write_movie(framefiles, moviefile,
frames_per_second=frames_per_second,
get_size_from_frames=True)
return framefiles
def plot_raw_single_trace(self, outdir: Optional[pathlib.Path],
trackid: int,
min_timepoint: int = 0,
max_timepoint: int = -1) -> List[pathlib.Path]:
""" Plot a single trace
:param Path outdir:
The root directory where traces will be stored
:param int trackid:
Index of the track to load
:returns:
The list of files written, if any
"""
if outdir is None:
trackdir = None
else:
trackdir = outdir / f'raw-tr{trackid:03d}'
trackdir.mkdir(exist_ok=True, parents=True)
print(f'Track {trackid}')
track = self.tracks[trackid]
timepoints, ys, xs = track.to_arrays()
distance = np.sum(np.sqrt((xs[1:] - xs[:-1])**2 + (ys[1:] - ys[:-1])**2))
displacement = np.sqrt((xs[-1] - xs[0])**2 + (ys[-1] - ys[0])**2)
print(f'Distance: {distance * self.space_scale}')
print(f'Displacement: {displacement * self.space_scale}')
# Work out bounding box around points
bbox = self.calc_image_bbox(xs, ys)
xmin = bbox[0][0]
xmax = bbox[0][1]
ymin = bbox[1][0]
ymax = bbox[1][1]
# Aspect ratio for the tracks
aspect = (ymax - ymin) / (xmax - xmin)
fig_x = self.figsize[0] * aspect
fig_y = self.figsize[0]
block_raw_img = self.raw_imgs.crop(bbox)
# Render animated because that runs much faster
framefiles = []
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=(fig_x, fig_y))
art1 = ax.imshow(block_raw_img[timepoints[0], :, :],
cmap=self.raw_imgs_cmap,
aspect='equal',
vmin=self.vmin,
vmax=self.vmax)
art2 = ax.plot(ys[:1] - ymin, xs[:1] - xmin, '-r',
linewidth=self.linewidth)[0]
art3 = ax.plot(ys[0] - ymin, xs[0] - xmin, 'ro',
markersize=self.markersize)[0]
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim([0, ymax-ymin])
ax.set_ylim([xmax-xmin, 0])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
artists = [art1, art2, art3]
for art in artists:
art.set_animated(True)
fig.canvas.draw()
bg_cache = fig.canvas.copy_from_bbox(ax.bbox)
if min_timepoint < 0:
min_timepoint = timepoints.shape[0] + min_timepoint
if max_timepoint < 0:
max_timepoint = timepoints.shape[0] + max_timepoint
for i in range(min_timepoint, max_timepoint):
fig.canvas.restore_region(bg_cache)
if timepoints[i] not in block_raw_img:
continue
art1.set_data(block_raw_img[timepoints[i], :, :])
art2.set_data(ys[:i+1] - ymin, xs[:i+1] - xmin)
art3.set_data(ys[i] - ymin, xs[i] - xmin)
for art in artists:
art.axes.draw_artist(art)
fig.canvas.blit(ax.bbox)
if trackdir is None:
plt.pause(0.1)
else:
outfile = trackdir / f'{self.image_dir.name}-tr{trackid:03d}t{i:03d}.tif'
print(f'Saving frame: {outfile}')
framefiles.append(outfile)
style.savefig(str(outfile), transparent=True,
bbox_inches='tight', pad_inches=0)
plt.close()
return framefiles
def plot_act_single_trace(self, outdir: Optional[pathlib.Path],
trackid: int,
min_timepoint: int = 0,
max_timepoint: int = -1) -> List[pathlib.Path]:
""" Plot the traces over the activation images
:returns:
The list of files written, if any
"""
if outdir is None:
trackdir = None
else:
trackdir = outdir / f'act-tr{trackid:03d}'
trackdir.mkdir(exist_ok=True, parents=True)
print(f'Track {trackid}')
track = self.tracks[trackid]
timepoints, ys, xs = track.to_arrays()
bbox = self.calc_image_bbox(xs, ys)
xmin = bbox[0][0]
xmax = bbox[0][1]
ymin = bbox[1][0]
ymax = bbox[1][1]
# Aspect ratio for the tracks
aspect = (ymax - ymin) / (xmax - xmin)
fig_x = self.figsize[0] * aspect
fig_y = self.figsize[0]
block_act_img = self.act_imgs.crop(bbox)
framefiles = []
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=(fig_x, fig_y))
art4 = ax.imshow(block_act_img[timepoints[0], :, :],
cmap=self.act_imgs_cmap,
aspect='equal',
vmin=self.vmin,
vmax=self.vmax)
peaks = peak_local_max(block_act_img[timepoints[0], :, :],
min_distance=3,
threshold_abs=50,
exclude_border=0)
art5 = ax.plot(peaks[:, 1], peaks[:, 0],
'go', markersize=20)[0]
art6 = ax.plot(ys[:1] - ymin, xs[:1] - xmin, '-r',
linewidth=self.linewidth)[0]
art7 = ax.plot(ys[0] - ymin, xs[0] - xmin, 'ro',
markersize=self.markersize)[0]
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim([0, ymax-ymin])
ax.set_ylim([xmax-xmin, 0])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
artists = [art4, art5, art6, art7]
for art in artists:
art.set_animated(True)
fig.canvas.draw()
bg_cache = fig.canvas.copy_from_bbox(ax.bbox)
if min_timepoint < 0:
min_timepoint = timepoints.shape[0] + min_timepoint
if max_timepoint < 0:
max_timepoint = timepoints.shape[0] + max_timepoint
for i in range(min_timepoint, max_timepoint):
fig.canvas.restore_region(bg_cache)
if timepoints[i] not in block_act_img:
continue
peaks = peak_local_max(block_act_img[timepoints[i], :, :],
min_distance=3,
threshold_abs=50,
exclude_border=0)
art4.set_data(block_act_img[timepoints[i], :, :])
art5.set_data(peaks[:, 1], peaks[:, 0])
art6.set_data(ys[:i+1] - ymin, xs[:i+1] - xmin)
art7.set_data(ys[i] - ymin, xs[i] - xmin)
for art in artists:
art.axes.draw_artist(art)
fig.canvas.blit(ax.bbox)
if trackdir is None:
plt.pause(0.1)
else:
outfile = trackdir / f'{self.activation_dir.name}-tr{trackid:03d}t{i:03d}.png'
print(f'Saving frame: {outfile}')
style.savefig(str(outfile), transparent=True,
bbox_inches='tight', pad_inches=0)
framefiles.append(outfile)
plt.close()
return framefiles
def calc_bbox_from_trackid(self, trackid: int) -> List[Tuple[int]]:
""" Calculate the bounding box from a track identifier
:param int trackid:
The track index to use
:returns:
A bounding box around that track in image coordinates
"""
track = self.tracks[trackid]
_, ys, xs = track.to_arrays()
return self.calc_image_bbox(xs, ys)
def calc_image_bbox(self, xs: np.ndarray, ys: np.ndarray) -> List[Tuple[int]]:
""" Calculate the bounding box around a set of tracks
:param ndarray xs:
The x coordinates for the track
:param ndarray ys:
The y coordinates for the track
:returns:
A bounding box around that track in image coordinates
"""
xmin = np.floor(np.min(xs)) - self.padding
xmax = np.ceil(np.max(xs)) + self.padding
xmin = int(max(xmin, 0))
xmax = int(min(xmax, self.rows))
ymin = np.floor(np.min(ys)) - self.padding
ymax = np.ceil(np.max(ys)) + self.padding
ymin = int(max(ymin, 0))
ymax = int(min(ymax, self.cols))
print(f'X Range: {xmin} to {xmax}')
print(f'Y Range: {ymin} to {ymax}')
return [(xmin, xmax), (ymin, ymax)]
def calc_tracks_in_bbox(self, bbox: List[Tuple]) -> np.ndarray:
""" Calculate the tracks that fall within an image bounding box
:returns:
A list of track ids that fall in this bounding box
"""
ymin = bbox[0][0]
ymax = bbox[0][1]
xmin = bbox[1][0]
xmax = bbox[1][1]
track_bboxes = self.track_bboxes
xmask = np.logical_and(np.any(track_bboxes[:, 0:2] >= xmin, axis=1),
np.any(track_bboxes[:, 0:2] <= xmax, axis=1))
ymask = np.logical_and(np.any(track_bboxes[:, 2:4] >= ymin, axis=1),
np.any(track_bboxes[:, 2:4] <= ymax, axis=1))
return np.where(np.logical_and(xmask, ymask))[0]
# Helper methods
@staticmethod
def find_image_dir(rootdir: pathlib.Path, tileno: int, channel: str) -> pathlib.Path:
""" Find the image directory """
channel, channel_dir = guess_channel_dir(rootdir / 'Corrected', channel)
print(f'Following on channel: {channel}')
tiledirs = list(find_tiledirs(channel_dir, tiles=tileno))
if len(tiledirs) == 0:
raise OSError(f'No tile data for tile {tileno}: {rootdir}')
if len(tiledirs) > 1:
raise OSError(f'Multiple tiles match {tileno}: {rootdir}')
return tiledirs[0][1]
@staticmethod
def find_track_file(track_dir: pathlib.Path, tile_data: Dict) -> pathlib.Path:
""" Find the track file for a given set of tile data """
track_filenames = ['s{tile:02d}-{condition}_traces.csv'.format(**tile_data),
's{tile:02d}_traces.csv'.format(**tile_data)]
track_file = None
for track_filename in track_filenames:
if (track_dir / track_filename).is_file():
track_file = track_dir / track_filename
break
if track_file is None or not track_file.is_file():
raise OSError(f'Cannot find track data under {track_dir} matching {tile_data}')
return track_file
@staticmethod
def find_activation_dir(activation_rootdir: pathlib.Path, tile_data: Dict) -> pathlib.Path:
""" Find where the activations are for this track """
act_dirnames = ['s{tile:02d}-{condition}'.format(**tile_data),
's{tile:02d}'.format(**tile_data)]
activation_dir = None
for act_dirname in act_dirnames:
if (activation_rootdir / act_dirname).is_dir():
activation_dir = activation_rootdir / act_dirname
break
if activation_dir is None:
raise OSError(f'Cannot find activations under {activation_rootdir} matching {tile_data}')
return activation_dir
@classmethod
def from_tileno(cls,
rootdir: pathlib.Path,
tileno: int,
channel: pathlib.Path = 'gfp',
detector: pathlib.Path = 'composite',
**kwargs) -> 'TraceDB':
""" Load a database from a rootdir/tileno combination
:param Path rootdir:
The experiment directory to look at
:param int tileno:
The tile number to look at
:param str channel:
The channel to look for
:param str detector:
The detector to look for
"""
image_dir = cls.find_image_dir(rootdir, tileno, channel)
# Go fishing for matching tracks
tile_data = parse_tile_name(image_dir.name)
activation_rootdir = guess_channel_dir(rootdir / f'SingleCell-{detector}' / 'Corrected', channel)[1]
track_dir = rootdir / f'CellTracking-{detector}' / 'Tracks'
track_file = cls.find_track_file(track_dir, tile_data)
try:
activation_dir = cls.find_activation_dir(activation_rootdir, tile_data)
except OSError:
print(f'Activation dir not found under {activation_rootdir}')
activation_dir = None
return cls(image_dir=image_dir,
track_file=track_file,
activation_dir=activation_dir,
**kwargs)
| [
37811,
20003,
329,
7110,
278,
1981,
20675,
198,
198,
9,
1058,
9078,
25,
4871,
25,
63,
2898,
558,
11012,
63,
25,
9363,
257,
3975,
1022,
1981,
20675,
290,
2939,
24285,
198,
198,
37811,
198,
198,
2,
1846,
3742,
198,
11748,
3108,
8019,
... | 1.872631 | 14,509 |
#/bin/envs python3
#-*- encoding: utf-8 -*-
# if(re.findall(r"^>[a-zA-Z0-9]{6}$",line)):
try:
import numpy as np
except ImportError:
raise ImportError("pip3 install numpy")
try:
import pandas as pd
except ImportError:
raise ImportError("pip3 install pandas")
import os, argparse, math , re
from decimal import Decimal
# Criar uma lista com as letras do dicionario e armazenar o valor
global keys
#keys = ['A', 'T', 'C', 'G']
keys = ['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V','B','Z', '-']
if __name__ == "__main__":
main()
| [
2,
14,
8800,
14,
268,
14259,
21015,
18,
198,
2,
12,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
197,
361,
7,
260,
13,
19796,
439,
7,
81,
1,
61,
36937,
64,
12,
89,
32,
12,
57,
15,
12,
24,
60,
90,
21,
92... | 2.305556 | 252 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BoostExpression(Model):
"""An expression that modifies the ranking score for item search results
satisfying a condition.
:param condition: The condition that items must satisfy to be boosted.
:type condition: ~microsoft.bing.commerce.search.models.ConditionBase
:param boost: The value to add to the ranking score. The range is
-10,000,000 to 10,000,000.
:type boost: float
"""
_attribute_map = {
'condition': {'key': 'condition', 'type': 'ConditionBase'},
'boost': {'key': 'boost', 'type': 'float'},
}
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
201,
198,
2,
49962,
739,
262,
17168,
5964,
13,
201,
198,
201,
198,
2,
19617,
28,
40477,
12,
23,
201,
198,
2,
16529,
35937,
201,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127... | 3.431438 | 299 |
from typing import Dict
from typing import List
import pytest
import pytorch_lightning as pl
import torch
from torch import nn
import torch.nn.functional as F
import optuna
from optuna.integration import PyTorchLightningPruningCallback
from optuna.testing.integration import create_running_trial
from optuna.testing.integration import DeterministicPruner
| [
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
12972,
9288,
198,
11748,
12972,
13165,
354,
62,
2971,
768,
355,
458,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124... | 3.71134 | 97 |
top_card = None
player_hand = None
eventID = None
drawn_cards = None
playerTurn = None
winner = None
chosenColour = None
playable_cards = None
currentChoice = None
okPrompt = None
colourChange = None | [
4852,
62,
9517,
796,
6045,
198,
7829,
62,
4993,
220,
796,
6045,
198,
15596,
2389,
796,
6045,
198,
41549,
62,
27761,
796,
6045,
198,
7829,
17278,
796,
6045,
198,
39791,
796,
6045,
198,
354,
5233,
5216,
454,
796,
6045,
198,
1759,
540,
... | 3.225806 | 62 |
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
# -*- coding: utf-8 -*-
"""
Provides utilities for running components
"""
import time
import six
from functools import partial
import click
from dcae_cli.util import docker_util as du
from dcae_cli.util import dmaap, inputs
from dcae_cli.util.cdap_util import run_component as run_cdap_component
from dcae_cli.util.exc import DcaeException
from dcae_cli.util import discovery as dis
from dcae_cli.util.discovery import get_user_instances, config_context, \
replace_dots
import dcae_cli.util.profiles as profiles
from dcae_cli.util.logger import get_logger
from dcae_cli.catalog.mock.catalog import build_config_keys_map, \
get_data_router_subscriber_route
# This seems to be an abstraction leak
from dcae_cli.catalog.mock.schema import apply_defaults_docker_config
log = get_logger('Run')
def _update_delivery_urls(spec, target_host, dmaap_map):
"""Updates the delivery urls for data router subscribers"""
# Try to stick in the more appropriate delivery url which is not realized
# until after deployment because you need the ip, port.
# Realized that this is not actually needed by the component but kept it because
# it might be useful for component developers to **see** this info.
get_route_func = partial(get_data_router_subscriber_route, spec)
target_base_url = "http://{0}".format(target_host)
return dmaap.update_delivery_urls(get_route_func, target_base_url,
dmaap_map)
def _verify_component(name, max_wait, consul_host):
"""Verify that the component is healthy
Args:
-----
max_wait (integer): limit to how may attempts to make which translates to
seconds because each sleep is one second. 0 means infinite.
Return:
-------
True if component is healthy else returns False
"""
num_attempts = 1
while True:
if dis.is_healthy(consul_host, name):
return True
else:
num_attempts += 1
if max_wait > 0 and max_wait < num_attempts:
return False
time.sleep(1)
def run_component(user, cname, cver, catalog, additional_user, attached, force,
dmaap_map, inputs_map, external_ip=None):
'''Runs a component based on the component type
Args
----
force: (boolean)
Continue to run even when there are no valid downstream components,
when this flag is set to True.
dmaap_map: (dict) config_key to message router or data router connections.
Used as a manual way to make available this information for the component.
inputs_map: (dict) config_key to value that is intended to be provided at
deployment time as an input
'''
cname, cver = catalog.verify_component(cname, cver)
ctype = catalog.get_component_type(cname, cver)
profile = profiles.get_profile()
instance_map = _get_instances(user, additional_user)
neighbors = six.iterkeys(instance_map)
dmaap_config_keys = catalog.get_discovery_for_dmaap(cname, cver)
if not dmaap.validate_dmaap_map_entries(dmaap_map, *dmaap_config_keys):
return
if ctype == 'docker':
params, interface_map = catalog.get_discovery_for_docker(cname, cver, neighbors)
should_wait = attached
spec = catalog.get_component_spec(cname, cver)
config_key_map = build_config_keys_map(spec)
inputs_map = inputs.filter_entries(inputs_map, spec)
dmaap_map = _update_delivery_urls(spec, profile.docker_host.split(":")[0],
dmaap_map)
with config_context(user, cname, cver, params, interface_map,
instance_map, config_key_map, dmaap_map=dmaap_map, inputs_map=inputs_map,
always_cleanup=should_wait, force_config=force) as (instance_name, _):
image = catalog.get_docker_image(cname, cver)
docker_config = catalog.get_docker_config(cname, cver)
docker_logins = dis.get_docker_logins()
if should_wait:
du.deploy_component(profile, image, instance_name, docker_config,
should_wait=True, logins=docker_logins)
else:
result = du.deploy_component(profile, image, instance_name, docker_config,
logins=docker_logins)
log.debug(result)
if result:
log.info("Deployed {0}. Verifying..".format(instance_name))
# TODO: Be smarter here but for now wait longer i.e. 5min
max_wait = 300 # 300s == 5min
if _verify_component(instance_name, max_wait,
dis.default_consul_host()):
log.info("Container is up and healthy")
# This block of code is used to construct the delivery
# urls for data router subscribers and to display it for
# users to help with manually provisioning feeds.
results = dis.lookup_instance(dis.default_consul_host(),
instance_name)
target_host = dis.parse_instance_lookup(results)
dmaap_map = _update_delivery_urls(spec, target_host, dmaap_map)
delivery_urls = dmaap.list_delivery_urls(dmaap_map)
if delivery_urls:
msg = "\n".join(["\t{k}: {url}".format(k=k, url=url)
for k, url in delivery_urls])
msg = "\n\n{0}\n".format(msg)
log.warn("Your component is a data router subscriber. Here are the delivery urls: {0}".format(msg))
else:
log.warn("Container never became healthy")
else:
raise DcaeException("Failed to deploy docker component")
elif ctype =='cdap':
(jar, config, spec) = catalog.get_cdap(cname, cver)
config_key_map = build_config_keys_map(spec)
inputs_map = inputs.filter_entries(inputs_map, spec)
params, interface_map = catalog.get_discovery_for_cdap(cname, cver, neighbors)
with config_context(user, cname, cver, params, interface_map, instance_map,
config_key_map, dmaap_map=dmaap_map, inputs_map=inputs_map, always_cleanup=False,
force_config=force) as (instance_name, templated_conf):
run_cdap_component(catalog, params, instance_name, profile, jar, config, spec, templated_conf)
else:
raise DcaeException("Unsupported component type for run")
def dev_component(user, catalog, specification, additional_user, force, dmaap_map,
inputs_map):
'''Sets up the discovery layer for in development component
The passed-in component specification is
* Validated it
* Generates the corresponding application config
* Pushes the application config and rels key into Consul
This allows developers to play with their spec and the resulting configuration
outside of being in the catalog and in a container.
Args
----
user: (string) user name
catalog: (object) instance of MockCatalog
specification: (dict) experimental component specification
additional_user: (string) another user name used to source additional
component instances
force: (boolean)
Continue to run even when there are no valid downstream components when
this flag is set to True.
dmaap_map: (dict) config_key to message router connections. Used as a
manual way to make available this information for the component.
inputs_map: (dict) config_key to value that is intended to be provided at
deployment time as an input
'''
instance_map = _get_instances(user, additional_user)
neighbors = six.iterkeys(instance_map)
params, interface_map, dmaap_config_keys = catalog.get_discovery_from_spec(
user, specification, neighbors)
if not dmaap.validate_dmaap_map_entries(dmaap_map, *dmaap_config_keys):
return
cname = specification["self"]["name"]
cver = specification["self"]["version"]
config_key_map = build_config_keys_map(specification)
inputs_map = inputs.filter_entries(inputs_map, specification)
dmaap_map = _update_delivery_urls(specification, "localhost", dmaap_map)
with config_context(user, cname, cver, params, interface_map, instance_map,
config_key_map, dmaap_map, inputs_map=inputs_map, always_cleanup=True,
force_config=force) \
as (instance_name, templated_conf):
click.echo("Ready for component development")
if specification["self"]["component_type"] == "docker":
# The env building is only for docker right now
docker_config = apply_defaults_docker_config(specification["auxilary"])
envs = du.build_envs(profiles.get_profile(), docker_config, instance_name)
envs_message = "\n".join(["export {0}={1}".format(k, v) for k,v in envs.items()])
envs_filename = "env_{0}".format(profiles.get_active_name())
with open(envs_filename, "w") as f:
f.write(envs_message)
click.echo()
click.echo("Setup these environment varibles. Run \"source {0}\":".format(envs_filename))
click.echo()
click.echo(envs_message)
click.echo()
else:
click.echo("Set the following as your HOSTNAME:\n {0}".format(instance_name))
input("Press any key to stop and to clean up")
| [
2,
796,
2559,
18604,
43,
2149,
24290,
62,
2257,
7227,
10052,
4770,
1421,
18604,
198,
2,
8745,
13,
261,
499,
13,
17896,
3609,
198,
2,
38093,
25609,
18604,
198,
2,
15069,
357,
66,
8,
2177,
12,
7908,
5161,
5,
51,
42443,
14161,
13,
14... | 2.513584 | 4,233 |
from toee import *
import char_class_utils
###################################################
classEnum = stat_level_horizon_walker
###################################################
class_feats = {
}
class_skills = (skill_balance, skill_climb, skill_diplomacy, skill_handle_animal, skill_hide, skill_knowledge_nature, skill_listen, skill_move_silently, skill_profession, skill_ride, skill_spot, skill_wilderness_lore)
| [
6738,
284,
1453,
1330,
1635,
198,
11748,
1149,
62,
4871,
62,
26791,
198,
198,
29113,
14468,
21017,
198,
198,
4871,
4834,
388,
796,
1185,
62,
5715,
62,
17899,
8637,
62,
20783,
198,
198,
29113,
14468,
21017,
628,
198,
4871,
62,
5036,
13... | 3.605042 | 119 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Unit tests for txt_utils.py.
"""
from pyhandy import remove_multiple_newlines_in_txt, trim_spaces_and_tabs_from_lines_in_txt
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
26453,
5254,
329,
256,
742,
62,
26791,
13,
9078,
13,
198,
37811,
198,
198,
6738,
12972,
4993,
88,
1330,
4... | 2.405405 | 74 |
import argparse
import sys
import os
import importlib
from parser import parser
parser.parse(sys.argv[1], sys.argv[2])
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1330,
8019,
198,
198,
6738,
30751,
1330,
30751,
198,
198,
48610,
13,
29572,
7,
17597,
13,
853,
85,
58,
16,
4357,
25064,
13,
853,
85,
58,
17,
12962,
628
] | 3.05 | 40 |
"""
Copyright [2019-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from allocation_service.genomic_features import ProteinCodingGene
"""module for classes handling annotation events. An event is a change to a locus with one or more overlapping genes"""
class CreateGeneModelEvent(AnnotationEvent):
"""A new gene model created at a locus with no reference gene"""
"""A split of one gene model into two or more gene models"""
"""A merge of two or more gene models into one gene model"""
class EditOnlyEvent(AnnotationEvent):
"""A change to the gene model structure"""
| [
37811,
198,
15269,
685,
23344,
12,
42334,
60,
17228,
9148,
12,
22030,
16024,
259,
18982,
873,
5136,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2... | 3.898601 | 286 |
pi = 3.14159
r = int(input())
volume = (4/3) * pi * r ** 3
print("VOLUME = %.3f" % volume) | [
14415,
796,
513,
13,
1415,
19707,
198,
198,
81,
796,
493,
7,
15414,
28955,
198,
198,
29048,
796,
357,
19,
14,
18,
8,
1635,
31028,
1635,
374,
12429,
513,
198,
198,
4798,
7203,
44558,
38340,
796,
4064,
13,
18,
69,
1,
4064,
6115,
8
] | 2.113636 | 44 |
"""Change RelativeModifier.value to a Numeric/Decimal
Revision ID: 5795c29b2c7a
Revises: 19506187e7aa
Create Date: 2014-07-23 14:43:45.748696
"""
# revision identifiers, used by Alembic.
revision = '5795c29b2c7a'
down_revision = '19506187e7aa'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import update, select, table, column
from decimal import Decimal
| [
37811,
19400,
45344,
5841,
7483,
13,
8367,
284,
257,
399,
39223,
14,
10707,
4402,
198,
198,
18009,
1166,
4522,
25,
642,
41544,
66,
1959,
65,
17,
66,
22,
64,
198,
18009,
2696,
25,
11445,
21,
23451,
68,
22,
7252,
198,
16447,
7536,
25,... | 2.768116 | 138 |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2018 Fernando Serena.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
from expiringdict import ExpiringDict
from graphql import parse, build_ast_schema, MiddlewareManager, Source, validate, execute
from graphql.execution import ExecutionResult
from agora_graphql.gql.executor import AgoraExecutor
from agora_graphql.gql.middleware import AgoraMiddleware
from agora_graphql.gql.schema import create_gql_schema
__author__ = 'Fernando Serena'
| [
37811,
198,
2,
12,
46402,
46402,
46402,
46402,
27584,
46249,
198,
220,
15069,
357,
34,
8,
2864,
31063,
30175,
2616,
13,
198,
2,
12,
46402,
46402,
46402,
46402,
27584,
46249,
198,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 4.020134 | 298 |
from PySide2.QtWidgets import QDialog, QFileDialog
from .settings_dialog_ui import Ui_SettingsDialogUi | [
6738,
9485,
24819,
17,
13,
48,
83,
54,
312,
11407,
1330,
1195,
44204,
11,
1195,
8979,
44204,
198,
6738,
764,
33692,
62,
38969,
519,
62,
9019,
1330,
471,
72,
62,
26232,
44204,
52,
72
] | 3 | 34 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Yijia Zheng
# @email : yj.zheng@siat.ac.cn
# @Time : 2020/04/11 17:40:28
import logging
import numpy as np
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
13838,
25,
575,
2926,
544,
44583,
201,
198,
2,
2488,
12888,
1058,
331,
73,
13,
89,
31753,
31,
82... | 2.070588 | 85 |
"""/web/app/syzygy/wishlist/__init__.py
Author: Adam Green (adam.green1@maine.edu)
[Description]
Classes:
[ClassesList]
Functions:
[FunctionsList]
"""
import logging
from .model import Wishlist
from .schema import WishlistSchema
BASE_ROUTE = "wishlist"
log = logging.getLogger(__name__)
| [
37811,
14,
12384,
14,
1324,
14,
1837,
7357,
1360,
14,
86,
680,
4868,
14,
834,
15003,
834,
13,
9078,
198,
198,
13838,
25,
7244,
3469,
357,
324,
321,
13,
14809,
16,
31,
76,
5718,
13,
15532,
8,
198,
198,
58,
11828,
60,
198,
198,
94... | 2.475806 | 124 |
def _nr():
"""Get the current INPUT_LINE_NUMBER"""
global INPUT_LINE_NUMBER
try:
return fileinput.lineno()
except RuntimeError:
return INPUT_LINE_NUMBER
| [
201,
198,
4299,
4808,
48624,
33529,
201,
198,
220,
220,
220,
37227,
3855,
262,
1459,
3268,
30076,
62,
24027,
62,
41359,
13246,
37811,
201,
198,
220,
220,
220,
3298,
3268,
30076,
62,
24027,
62,
41359,
13246,
201,
198,
220,
220,
220,
19... | 2.229885 | 87 |
import tensorflow as tf
__all__ = ["auxillary_loss"]
def auxillary_loss(disc_target, disc_preds, cont_trg, cont_pred):
r"""
Args:
disc_target (tensor): a tensor representing the discriminator target ouput
disc_preds (tensor): a tensor representing the prediction of discriminator
cont_trg (tensor): a tensor representing the content of the target
cont_pred (tensor): a tensor representing the predicted content
Return:
a tensor representing the auxillary loss
"""
disc_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=disc_target, logits=disc_preds)
)
cont_loss = tf.reduce_mean(tf.reduce_sum(tf.square(cont_trg - cont_pred), axis=1))
aux_loss = disc_loss + cont_loss
return aux_loss
| [
11748,
11192,
273,
11125,
355,
48700,
628,
198,
834,
439,
834,
796,
14631,
14644,
15856,
62,
22462,
8973,
628,
198,
4299,
27506,
15856,
62,
22462,
7,
15410,
62,
16793,
11,
1221,
62,
28764,
82,
11,
542,
62,
2213,
70,
11,
542,
62,
287... | 2.604575 | 306 |
import dataclasses
import uuid
@dataclasses.dataclass
| [
11748,
4818,
330,
28958,
198,
11748,
334,
27112,
628,
198,
31,
19608,
330,
28958,
13,
19608,
330,
31172,
198
] | 2.947368 | 19 |
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from rest_framework import routers
from . import views
from . import api
router = routers.DefaultRouter()
router.register(r'pages', api.PageViewSet)
router.register(r'menus', api.MenuViewSet)
urlpatterns = [
url('^accounts/login/$', auth_views.login, {'template_name': 'admin/login.html'}, name='cow_login'),
url('^api/', include(router.urls)),
url('^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(
regex="^menu/create/$",
view=login_required(views.MenuCreateView.as_view()),
name='menu_create',
),
url(
regex="^menu/(?P<pk>\d+)/create/$",
view=login_required(views.MenuChildCreateView.as_view()),
name='menu_child_create',
),
url(
regex="^menu/delete/(?P<pk>\d+)/$",
view=login_required(views.MenuDeleteView.as_view()),
name='menu_delete',
),
url(
regex="^menu/update/(?P<pk>\d+)/$",
view=login_required(views.MenuUpdateView.as_view()),
name='menu_update',
),
url(
regex="^menu/(?P<pk>\d+)/$",
view=login_required(views.MenuDetailView.as_view()),
name='menu_detail',
),
url(
regex="^$",
view=login_required(views.PageListView.as_view()),
name='page_list',
),
url(
regex="^page/create/$",
view=login_required(views.PageCreateView.as_view()),
name='page_create',
),
url(
regex="^page/delete/(?P<pk>\d+)/$",
view=login_required(views.PageDeleteView.as_view()),
name='page_delete',
),
url(
regex="^page/(?P<pk>\d+)/$",
view=login_required(views.PageUpdateView.as_view()),
name='page_update',
),
url(
regex="^page/(?P<pk>\d+)/plugin/create/$",
view=login_required(views.PluginCreateView.as_view()),
name='plugin_create',
),
url(
regex="^plugin/delete/(?P<pk>\d+)/$",
view=login_required(views.PluginDeleteView.as_view()),
name='plugin_delete',
),
url(
regex="^plugin/text/(?P<pk>\d+)/$",
view=login_required(views.TextPluginUpdateView.as_view()),
name='text_plugin_edit',
),
url(
regex="^plugin/address/(?P<pk>\d+)/$",
view=login_required(views.AddressPluginUpdateView.as_view()),
name='address_plugin_edit',
),
url(
regex="^plugin/image/(?P<pk>\d+)/$",
view=login_required(views.ImagePluginUpdateView.as_view()),
name='image_plugin_edit',
),
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5009,
355,
6284,
62,
33571,
198,
6738,
42625... | 2.143083 | 1,265 |
import math
big = 600851475143
for i in range (3, 150000000000):
factor = big/i
if (math.floor(factor) * i == big and math.ceil(factor) * i == big):
for j in range (3, i+1):
factor2 = i/j
if (math.floor(factor2) *j == i and math.ceil(factor2) * j == i):
print("Not Perfect", j)
elif(j == i):
print("Perfect", i)
# Requires reentering large factors to check for 'primeness', but it works
| [
11748,
10688,
198,
14261,
796,
10053,
5332,
1415,
2425,
21139,
198,
1640,
1312,
287,
2837,
357,
18,
11,
1315,
8269,
405,
2599,
198,
220,
220,
220,
5766,
796,
1263,
14,
72,
198,
220,
220,
220,
611,
357,
11018,
13,
28300,
7,
31412,
8,... | 2.145455 | 220 |
#初始化login.py文件
#第一次修改 | [
2,
26344,
251,
34650,
233,
44293,
244,
38235,
13,
9078,
23877,
229,
20015,
114,
198,
198,
2,
163,
105,
105,
31660,
162,
105,
94,
46479,
106,
162,
242,
117
] | 0.758621 | 29 |
#
# Dimensional and dimensionless parameter values, and scales
#
import pybamm
import pandas as pd
import os
import numbers
import numpy as np
from pprint import pformat
class ParameterValues:
"""
The parameter values for a simulation.
Note that this class does not inherit directly from the python dictionary class as
this causes issues with saving and loading simulations.
Parameters
----------
values : dict or string
Explicit set of parameters, or reference to a file of parameters
If string, gets passed to read_parameters_csv to read a file.
chemistry : dict
Dict of strings for default chemistries. Must be of the form:
{"base chemistry": base_chemistry,
"cell": cell_properties_authorYear,
"anode": anode_chemistry_authorYear,
"separator": separator_chemistry_authorYear,
"cathode": cathode_chemistry_authorYear,
"electrolyte": electrolyte_chemistry_authorYear,
"experiment": experimental_conditions_authorYear}.
Then the anode chemistry is loaded from the file
inputs/parameters/base_chemistry/anodes/anode_chemistry_authorYear, etc.
Parameters in "cell" should include geometry and current collector properties.
Parameters in "experiment" should include parameters relating to experimental
conditions, such as initial conditions and currents.
Examples
--------
>>> import pybamm
>>> values = {"some parameter": 1, "another parameter": 2}
>>> param = pybamm.ParameterValues(values)
>>> param["some parameter"]
1
>>> file = "input/parameters/lithium-ion/cells/kokam_Marquis2019/parameters.csv"
>>> values_path = pybamm.get_parameters_filepath(file)
>>> param = pybamm.ParameterValues(values=values_path)
>>> param["Negative current collector thickness [m]"]
2.5e-05
>>> param = pybamm.ParameterValues(chemistry=pybamm.parameter_sets.Marquis2019)
>>> param["Reference temperature [K]"]
298.15
"""
def __setitem__(self, key, value):
"Call the update functionality when doing a setitem"
self.update({key: value})
def keys(self):
"Get the keys of the dictionary"
return self._dict_items.keys()
def values(self):
"Get the values of the dictionary"
return self._dict_items.values()
def items(self):
"Get the items of the dictionary"
return self._dict_items.items()
def search(self, key, print_values=True):
"""
Search dictionary for keys containing 'key'.
See :meth:`pybamm.FuzzyDict.search()`.
"""
return self._dict_items.search(key, print_values)
def update_from_chemistry(self, chemistry):
"""
Load standard set of components from a 'chemistry' dictionary
"""
base_chemistry = chemistry["chemistry"]
# Create path to file
path = os.path.join(
pybamm.root_dir(), "pybamm", "input", "parameters", base_chemistry
)
# Load each component name
for component_group in [
"cell",
"anode",
"cathode",
"separator",
"electrolyte",
"experiment",
]:
# Make sure component is provided
try:
component = chemistry[component_group]
except KeyError:
raise KeyError(
"must provide '{}' parameters for {} chemistry".format(
component_group, base_chemistry
)
)
# Create path to component and load values
component_path = os.path.join(path, component_group + "s", component)
component_params = self.read_parameters_csv(
pybamm.get_parameters_filepath(
os.path.join(component_path, "parameters.csv")
)
)
# Update parameters, making sure to check any conflicts
self.update(
component_params,
check_conflict=True,
check_already_exists=False,
path=component_path,
)
# register citations
if "citation" in chemistry:
citation = chemistry["citation"]
pybamm.citations.register(citation)
def read_parameters_csv(self, filename):
"""Reads parameters from csv file into dict.
Parameters
----------
filename : str
The name of the csv file containing the parameters.
Returns
-------
dict
{name: value} pairs for the parameters.
"""
df = pd.read_csv(filename, comment="#", skip_blank_lines=True)
# Drop rows that are all NaN (seems to not work with skip_blank_lines)
df.dropna(how="all", inplace=True)
return {k: v for (k, v) in zip(df["Name [units]"], df["Value"])}
def update(self, values, check_conflict=False, check_already_exists=True, path=""):
"""
Update parameter dictionary, while also performing some basic checks.
Parameters
----------
values : dict
Dictionary of parameter values to update parameter dictionary with
check_conflict : bool, optional
Whether to check that a parameter in `values` has not already been defined
in the parameter class when updating it, and if so that its value does not
change. This is set to True during initialisation, when parameters are
combined from different sources, and is False by default otherwise
check_already_exists : bool, optional
Whether to check that a parameter in `values` already exists when trying to
update it. This is to avoid cases where an intended change in the parameters
is ignored due a typo in the parameter name, and is True by default but can
be manually overridden.
path : string, optional
Path from which to load functions
"""
# update
for name, value in values.items():
# check for conflicts
if (
check_conflict is True
and name in self.keys()
and not (self[name] == float(value) or self[name] == value)
):
raise ValueError(
"parameter '{}' already defined with value '{}'".format(
name, self[name]
)
)
# check parameter already exists (for updating parameters)
if check_already_exists is True:
try:
self._dict_items[name]
except KeyError as err:
raise KeyError(
"Cannot update parameter '{}' as it does not ".format(name)
+ "have a default value. ({}). If you are ".format(err.args[0])
+ "sure you want to update this parameter, use "
+ "param.update({{name: value}}, check_already_exists=False)"
)
# if no conflicts, update, loading functions and data if they are specified
# Functions are flagged with the string "[function]"
if isinstance(value, str):
if value.startswith("[function]"):
loaded_value = pybamm.load_function(
os.path.join(path, value[10:] + ".py")
)
self._dict_items[name] = loaded_value
values[name] = loaded_value
# Data is flagged with the string "[data]" or "[current data]"
elif value.startswith("[current data]") or value.startswith("[data]"):
if value.startswith("[current data]"):
data_path = os.path.join(
pybamm.root_dir(), "pybamm", "input", "drive_cycles"
)
filename = os.path.join(data_path, value[14:] + ".csv")
function_name = value[14:]
else:
filename = os.path.join(path, value[6:] + ".csv")
function_name = value[6:]
filename = pybamm.get_parameters_filepath(filename)
data = pd.read_csv(
filename, comment="#", skip_blank_lines=True, header=None
).to_numpy()
# Save name and data
self._dict_items[name] = (function_name, data)
values[name] = (function_name, data)
elif value == "[input]":
self._dict_items[name] = pybamm.InputParameter(name)
# Anything else should be a converted to a float
else:
self._dict_items[name] = float(value)
values[name] = float(value)
else:
self._dict_items[name] = value
# check parameter values
self.check_and_update_parameter_values(values)
# reset processed symbols
self._processed_symbols = {}
def process_model(self, unprocessed_model, inplace=True):
"""Assign parameter values to a model.
Currently inplace, could be changed to return a new model.
Parameters
----------
unprocessed_model : :class:`pybamm.BaseModel`
Model to assign parameter values for
inplace: bool, optional
If True, replace the parameters in the model in place. Otherwise, return a
new model with parameter values set. Default is True.
Raises
------
:class:`pybamm.ModelError`
If an empty model is passed (`model.rhs = {}` and `model.algebraic = {}` and
`model.variables = {}`)
"""
pybamm.logger.info(
"Start setting parameters for {}".format(unprocessed_model.name)
)
# set up inplace vs not inplace
if inplace:
# any changes to model_disc attributes will change model attributes
# since they point to the same object
model = unprocessed_model
else:
# create a blank model of the same class
model = unprocessed_model.new_copy()
if (
len(unprocessed_model.rhs) == 0
and len(unprocessed_model.algebraic) == 0
and len(unprocessed_model.variables) == 0
):
raise pybamm.ModelError("Cannot process parameters for empty model")
for variable, equation in model.rhs.items():
pybamm.logger.debug("Processing parameters for {!r} (rhs)".format(variable))
model.rhs[variable] = self.process_symbol(equation)
for variable, equation in model.algebraic.items():
pybamm.logger.debug(
"Processing parameters for {!r} (algebraic)".format(variable)
)
model.algebraic[variable] = self.process_symbol(equation)
for variable, equation in model.initial_conditions.items():
pybamm.logger.debug(
"Processing parameters for {!r} (initial conditions)".format(variable)
)
model.initial_conditions[variable] = self.process_symbol(equation)
model.boundary_conditions = self.process_boundary_conditions(model)
for variable, equation in model.variables.items():
pybamm.logger.debug(
"Processing parameters for {!r} (variables)".format(variable)
)
model.variables[variable] = self.process_symbol(equation)
for event in model.events:
pybamm.logger.debug(
"Processing parameters for event'{}''".format(event.name)
)
event.expression = self.process_symbol(event.expression)
# Process timescale
model.timescale = self.process_symbol(model.timescale)
pybamm.logger.info("Finish setting parameters for {}".format(model.name))
return model
def process_boundary_conditions(self, model):
"""
Process boundary conditions for a model
Boundary conditions are dictionaries {"left": left bc, "right": right bc}
in general, but may be imposed on the tabs (or *not* on the tab) for a
small number of variables, e.g. {"negative tab": neg. tab bc,
"positive tab": pos. tab bc "no tab": no tab bc}.
"""
new_boundary_conditions = {}
sides = ["left", "right", "negative tab", "positive tab", "no tab"]
for variable, bcs in model.boundary_conditions.items():
processed_variable = self.process_symbol(variable)
new_boundary_conditions[processed_variable] = {}
for side in sides:
try:
bc, typ = bcs[side]
pybamm.logger.debug(
"Processing parameters for {!r} ({} bc)".format(variable, side)
)
processed_bc = (self.process_symbol(bc), typ)
new_boundary_conditions[processed_variable][side] = processed_bc
except KeyError as err:
# don't raise error if the key error comes from the side not being
# found
if err.args[0] in side:
pass
# do raise error otherwise (e.g. can't process symbol)
else:
raise KeyError(err)
return new_boundary_conditions
def process_geometry(self, geometry):
"""
Assign parameter values to a geometry (inplace).
Parameters
----------
geometry : :class:`pybamm.Geometry`
Geometry specs to assign parameter values to
"""
for domain in geometry:
for prim_sec_tabs, variables in geometry[domain].items():
# process tab information if using 1 or 2D current collectors
if prim_sec_tabs == "tabs":
for tab, position_size in variables.items():
for position_size, sym in position_size.items():
geometry[domain][prim_sec_tabs][tab][
position_size
] = self.process_symbol(sym)
else:
for spatial_variable, spatial_limits in variables.items():
for lim, sym in spatial_limits.items():
geometry[domain][prim_sec_tabs][spatial_variable][
lim
] = self.process_symbol(sym)
def process_symbol(self, symbol):
"""Walk through the symbol and replace any Parameter with a Value.
If a symbol has already been processed, the stored value is returned.
Parameters
----------
symbol : :class:`pybamm.Symbol`
Symbol or Expression tree to set parameters for
Returns
-------
symbol : :class:`pybamm.Symbol`
Symbol with Parameter instances replaced by Value
"""
try:
return self._processed_symbols[symbol.id]
except KeyError:
processed_symbol = self._process_symbol(symbol)
self._processed_symbols[symbol.id] = processed_symbol
return processed_symbol
def _process_symbol(self, symbol):
""" See :meth:`ParameterValues.process_symbol()`. """
if isinstance(symbol, pybamm.Parameter):
value = self[symbol.name]
if isinstance(value, numbers.Number):
# Scalar inherits name (for updating parameters) and domain (for
# Broadcast)
return pybamm.Scalar(value, name=symbol.name, domain=symbol.domain)
elif isinstance(value, pybamm.InputParameter):
value.domain = symbol.domain
return value
elif isinstance(symbol, pybamm.FunctionParameter):
new_children = [self.process_symbol(child) for child in symbol.children]
function_name = self[symbol.name]
# Create Function or Interpolant or Scalar object
if isinstance(function_name, tuple):
# If function_name is a tuple then it should be (name, data) and we need
# to create an Interpolant
name, data = function_name
function = pybamm.Interpolant(data, *new_children, name=name)
elif isinstance(function_name, numbers.Number):
# If the "function" is provided is actually a scalar, return a Scalar
# object instead of throwing an error.
# Also use ones_like so that we get the right shapes
function = pybamm.Scalar(
function_name, name=symbol.name
) * pybamm.ones_like(*new_children)
elif isinstance(function_name, pybamm.InputParameter):
# Replace the function with an input parameter
function = function_name
else:
# otherwise evaluate the function to create a new PyBaMM object
function = function_name(*new_children)
# Differentiate if necessary
if symbol.diff_variable is None:
function_out = function
else:
# return differentiated function
new_diff_variable = self.process_symbol(symbol.diff_variable)
function_out = function.diff(new_diff_variable)
# Convert possible float output to a pybamm scalar
if isinstance(function_out, numbers.Number):
return pybamm.Scalar(function_out)
# Process again just to be sure
return self.process_symbol(function_out)
elif isinstance(symbol, pybamm.BinaryOperator):
# process children
new_left = self.process_symbol(symbol.left)
new_right = self.process_symbol(symbol.right)
# make new symbol, ensure domain remains the same
new_symbol = symbol._binary_new_copy(new_left, new_right)
new_symbol.domain = symbol.domain
return new_symbol
# Unary operators
elif isinstance(symbol, pybamm.UnaryOperator):
new_child = self.process_symbol(symbol.child)
new_symbol = symbol._unary_new_copy(new_child)
# ensure domain remains the same
new_symbol.domain = symbol.domain
return new_symbol
# Functions
elif isinstance(symbol, pybamm.Function):
new_children = [self.process_symbol(child) for child in symbol.children]
return symbol._function_new_copy(new_children)
# Concatenations
elif isinstance(symbol, pybamm.Concatenation):
new_children = [self.process_symbol(child) for child in symbol.children]
return symbol._concatenation_new_copy(new_children)
else:
# Backup option: return new copy of the object
try:
return symbol.new_copy()
except NotImplementedError:
raise NotImplementedError(
"Cannot process parameters for symbol of type '{}'".format(
type(symbol)
)
)
def evaluate(self, symbol):
"""
Process and evaluate a symbol.
Parameters
----------
symbol : :class:`pybamm.Symbol`
Symbol or Expression tree to evaluate
Returns
-------
number of array
The evaluated symbol
"""
processed_symbol = self.process_symbol(symbol)
if processed_symbol.is_constant() and processed_symbol.evaluates_to_number():
return processed_symbol.evaluate()
else:
raise ValueError("symbol must evaluate to a constant scalar")
class CurrentToCrate:
"Convert a current function to a C-rate function"
class CrateToCurrent:
"Convert a C-rate function to a current function"
| [
2,
198,
2,
360,
16198,
290,
15793,
1203,
11507,
3815,
11,
290,
16252,
198,
2,
198,
11748,
12972,
65,
6475,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
3146,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
279,
... | 2.186244 | 9,305 |
from sklearn.model_selection import train_test_split
import numpy as np
import os
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.mobilenet_v2 import preprocess_input
# 이미지 원본 경로
SOURCE_PATH = "../../image/"
# 이미지 타입
CLASSES = ["mask", "no_mask"]
# 이미지 크기
image_w = 128
image_h = 128
# 데이터셋 저장 경로
DATASET_PATH = "../dataset/"
# mask, no_mask 이미지 파일을 배열로 변환
if __name__ == "__main__":
x, y = [], []
get_data_set(x, y)
X = np.array(x, dtype='float32')
Y = np.array(y)
# 학습 전용 데이터와 테스트 전용 데이터 구분
print("학습, 테스트 데이터 분류...")
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, stratify=Y, random_state=42)
xy = (X_train, X_test, y_train, y_test)
print("분류 완료")
# 이미지 데이터 셋 저장
print(xy[0][0][0])
print(xy[2])
print('데이터 저장중 ...')
np.save(DATASET_PATH + "images_data.npy", xy)
print("저장 완료")
| [
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
41927,
292,
13,
3866,
36948,
13,
9060,
1330,
3440,
62,
9600,
198,
6738,
41927,
292,
13,
3866,
... | 1.508716 | 631 |
import io
import os
import zipfile
import numpy as np
from PIL import Image
from chainer.dataset import download
from chainer.dataset import DatasetMixin | [
11748,
33245,
198,
11748,
28686,
198,
11748,
19974,
7753,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
6333,
263,
13,
19608,
292,
316,
1330,
4321,
198,
198,
6738,
6333,
263,
13,
19608,
292,
316,
... | 3.297872 | 47 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cog.models.dbutils
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
11748,
43072,
13,
27530,
13,
67,
436... | 2.891304 | 46 |
"""Functions
.. autosummary::
:toctree:
identity
const
match
"""
from haskpy.types.function import function
@function
def identity(x):
"""a -> a"""
return x
@function
def const(x, y):
"""a -> b -> a"""
return x
#
# Pattern matching related functions
#
# NOTE: Currying doesn't work as expected for this function, because this is a
# generic function and we don't know how many arguments are required. We would
# first like to get all the required arguments and only after that the actual
# object on which to pattern match. One solution would be take the patterns as
# a dictionary. Then this function would always take two arguments and it would
# be explicit that all the patterns would be given at the same time. Something
# like:
#
| [
37811,
24629,
2733,
198,
198,
492,
44619,
388,
6874,
3712,
198,
220,
220,
1058,
1462,
310,
631,
25,
628,
220,
220,
5369,
198,
220,
220,
1500,
198,
220,
220,
2872,
198,
198,
37811,
198,
198,
6738,
468,
74,
9078,
13,
19199,
13,
8818,
... | 3.534247 | 219 |
import asyncio
import aiohttp
import discord
from redbot.core import commands, Config, checks
import discord.errors
from redbot.core.bot import Red
from typing import *
import logging
import datetime
import re
from redbot.core.utils.chat_formatting import pagify, box, quote
import markdownify
log = logging.getLogger("red.goon.mybbnotif")
| [
11748,
30351,
952,
198,
11748,
257,
952,
4023,
198,
11748,
36446,
198,
6738,
2266,
13645,
13,
7295,
1330,
9729,
11,
17056,
11,
8794,
198,
11748,
36446,
13,
48277,
198,
6738,
2266,
13645,
13,
7295,
13,
13645,
1330,
2297,
198,
6738,
19720... | 3.454545 | 99 |
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Implementation of Soft Actor-Critic (SAC)
# Paper: https://arxiv.org/abs/1801.01290
# Soft Actor-Critic Algorithms and Applications
# https://arxiv.org/abs/1812.05905
# Implemetation of Attentive Update of Multi-Critic for Deep Reinforcement Learning (AUMC)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain("relu")
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
LOG_STD_MAX = 2
LOG_STD_MIN = -20
def gaussian_logprob(noise, log_std):
"""Compute Gaussian log probability."""
residual = (-0.5 * noise.pow(2) - log_std).sum(-1, keepdim=True)
return residual - 0.5 * np.log(2 * np.pi) * noise.size(-1)
# Returns an action for a given state
| [
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
2,
46333,
286,
8297,
27274,
12,
18559,
291,
357,
50,
2246,
8,... | 2.488 | 500 |
#######################################
# Project: IO_Vehicle_Detection Project
# Authors: Mateusz Smendowski, Piotr Sladowski, Adam Twardosz
# Copyright (C) Mateusz Smendowski, Piotr Sladowski, Adam Twardosz 2020
#######################################
import sys
import os
import platform
import time
from PySide2 import QtCore, QtGui
from PySide2.QtCore import QSize, Qt, QCoreApplication, QUrl
from PySide2.QtGui import QColor
from PySide2.QtWidgets import *
if os.name == 'nt':
os.add_dll_directory(os.path.abspath("./dlls"))
import vlc
from gui.ui_main import Ui_MainWindow
from vehicle_detection import VideoProcessor
import gui.all_icons_rc
# GLOBALS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
GLOBAL_TABS_ENABLED = True
TITLE = "Vehicle Detector"
VIDEO_PATH = ''
RESULTS = {}
PROCESS_TIME = 0
PROGRESS = 0.0
# ~~~~~ END OF CONSTRUCTOR ~~~~~ #
# AFTER VIDEO PROCESSING METHOD
# PRINT STATISTICS TO STATS_TAB
# TABS TOGGLING
# UPDATE labelTimeLeft EVERY SECOND
# MEDIA CONTROL METHODS
# ALLOW WINDOW TO MOVE ON THE SCREEN
# MAXIMIZE/RESTORE
# RETURN STATUS
# ----------- MAIN_FUNC ----------- #
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
| [
29113,
4242,
21017,
198,
2,
4935,
25,
24418,
62,
37870,
1548,
62,
11242,
3213,
4935,
198,
2,
46665,
25,
24787,
385,
89,
2439,
437,
12079,
11,
350,
5151,
81,
3454,
9797,
4106,
11,
7244,
309,
904,
418,
89,
198,
2,
15069,
357,
34,
8,... | 2.72103 | 466 |
# coding: utf-8
# # Mapping of production and consumption mixes in Europe and their effect on carbon footprint of electric vehicles
# This code plots the following:
# - CF of production vs consumption mixes of each country with visual indication of relative contribution to total European electricity production] [Figure_1]
# - imports, exports and net trade [extra figure]
# - trade matrix heatmap [extra figure]
# - change in BEV CF with domestic production [Figure 4, horizontal and vertical formats]
# - CF of BEVs for 2 size segments and production and consumption mixes] [#Figure 2]
# - mitigation potential of BEVs for 4 size segments] [#Figure 3]
# - ratio of BEV:ICEV lifecycle impacts [# Extra figure]
# - Requires the following files to run:
# - country-specific indirect_el.pkl (from BEV_footprints_calculation.py)
# - country-specific indirect_BEV.pkl (from BEV_footprints_calculation.py)
# - country-specific indirect_BEV_impacts_consumption.pkl (from BEV_footprints_calculation.py)
# - country-specific indirect_ICEV_impacts.pkl (from BEV_footprints_calculation.py)
# - label_pos.csv
# %% Import packages
import os
import logging
import pickle
from datetime import datetime
import numpy as np
import pandas as pd
import country_converter as coco
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.ticker as ticker
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as pe
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib import cm
from matplotlib.patches import Circle, Rectangle
from matplotlib.ticker import AutoMinorLocator, FixedLocator
import geopandas as gpd
import seaborn as sns
# from palettable.cubehelix import Cubehelix
from cmcrameri import cm
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
# %% Set up functions to run for visualizations for experiments
fp = os.path.curdir
fp_data = os.path.join(fp, 'data')
fp_output = os.path.join(fp, 'output')
fp_results = os.path.join(fp, 'results')
fp_figures = os.path.join(fp_results, 'figures')
# def plot_all(exp, fp_figure, CFEL, results, ICEV_total_impacts, mapping_data, plot_ei=False, include_TD_losses=True, export_figures=False):
def plot_all(exp, fp_figure, results, ICEV_total_impacts, mapping_data, ei_countries, plot_ei=False, export_figures=False):
""" Reproduce figures from article. """
if not plot_ei:
mapping_data.loc[mapping_data['ISO_A2'].isin(ei_countries), 'Total production (TWh)':] = np.nan
ei_countries = None
sns.set_style('white')
plot_fig3(exp, fp_figure, mapping_data, export_figures, ei_countries, 2)
plot_fig4(exp, fp_figure, mapping_data, export_figures, ei_countries, 2)
plot_fig5a(exp, fp_figure, mapping_data, ICEV_total_impacts, export_figures, ei_countries, 2)
plot_fig5b(exp, fp_figure, mapping_data, ICEV_total_impacts, export_figures, ei_countries, 2)
# # maps with all segments for SI
plot_fig3(exp, fp_figure, mapping_data, export_figures, ei_countries, 4)
plot_fig4(exp, fp_figure, mapping_data, export_figures, ei_countries, 4)
plot_fig5a(exp, fp_figure, mapping_data, ICEV_total_impacts, export_figures, ei_countries, 4)
plot_fig5b(exp, fp_figure, mapping_data, ICEV_total_impacts, export_figures, ei_countries, 4)
sns.set_style("whitegrid")
plot_fig7(exp, fp_figure, results, export_figures, orientation='horizontal')
# %% Helper class for asymmetric normalizing colormaps
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work their way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
#%% Helper function
def cmap_map(function, cmap):
"""Apply function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return colors.LinearSegmentedColormap('colormap',cdict,1024)
# %% Plot Figure 2 - Production vs Consumption electricity
def plot_fig2(exp, fp_figure, CFEL, include_TD_losses, export_figures, ei_CFEL=None):
""" Set up different options for production vs consumption electricity figure"""
# Prepare variables for Figure 2
CFEL_sorted = CFEL.sort_values(by='Production mix intensity')
# determine net importers and exporters for using colormap; positive if net importer, negative if net exporter
net_trade = (CFEL_sorted.iloc[:, 1] - CFEL_sorted.iloc[:, 0])
pct_trade = CFEL_sorted['Trade percentage, gross']*100
# for plotting with transmission losses
net_trade_TL = (CFEL_sorted.iloc[:, 1] - CFEL_sorted.iloc[:, 0]).values
print("number of importers: " + str(sum(net_trade > 0)))
print("number of exporters: " + str(sum(net_trade < 0)))
# Finally, plot Figure 2
cmap = cm.oslo_r
mark_color = cmap
# Prep country marker sizes; for now, use same size for all axis types
size_ind = 'production'
if size_ind == 'consumption':
marker_size = (CFEL_sorted.iloc[:, 1] / (CFEL_sorted.iloc[:, 1].sum()) * 1200)**2 # marker size = diameter **2
elif size_ind == 'production':
marker_size = (CFEL_sorted.iloc[:, 0] / (CFEL_sorted.iloc[:, 0].sum()) * 1200)**2 # marker size = diameter **2
else:
print('invalid value for size_ind')
if include_TD_losses:
plot_lim = 1300
else:
plot_lim = 1300
fig2_generator(exp, fp_figure, CFEL_sorted, ei_CFEL, plot_lim, export_figures,
'linear', size_ind, marker_size, mark_color, pct_trade, xlim=500, ylim=500)
# %% Figure 2 generator (carbon footprint of electricity production mix vs consumption mix)
def fig2_generator(exp, fp_figure, CFEL_sorted, ei_CFEL, plot_maxlim, export_figures, axis_type, size_ind, marker_size, marker_clr, net_trade=None, xlim=None, ylim=None):
""" Generate Figure 2; carbon footprint of production vs consumption
mixes. Can be plotted on linear, log or semilog axes
"""
# Sort by marker size to have smallest markers drawn last
marker_size = marker_size.sort_values(ascending=False)
CFEL_markers = CFEL_sorted.copy()
CFEL_markers = CFEL_markers.reindex(marker_size.index)
fig, ax = plt.subplots(1, figsize=(16, 12))
# Control for input axis types linear, semilog, logs
if axis_type == "log":
ax.set_yscale(axis_type)
elif axis_type == "semilog":
axis_type = "log"
ax.set_xscale(axis_type)
# Plot data
norm = colors.Normalize(vmax=100)
plot = ax.scatter(CFEL_markers.iloc[:, 2], CFEL_markers.iloc[:, 3],
s=marker_size, alpha=0.5, norm=norm, c=net_trade, cmap=marker_clr, label='_nolegend_')
ax.scatter(CFEL_markers.iloc[:, 2], CFEL_markers.iloc[:, 3],
s=2, c='k', alpha=0.9, edgecolor='k', label='_nolegend_') # Include midpoint in figure
if ei_CFEL is not None:
ax.scatter(ei_CFEL.iloc[:, 2], ei_CFEL.iloc[:, 3], s=100, marker='*', label='_nolegend_')
# Hack to have darker marker edges
ax.scatter(CFEL_markers.iloc[:, 2], CFEL_markers.iloc[:, 3],
s=marker_size, alpha=0.7, norm=norm, c="None", edgecolor='k', linewidths=0.7,
label='_nolegend_')
### Configure axis ticks and set minimum limits to 0 (was -60)
ax.tick_params(which='minor', direction='out', length=4.0)
ax.tick_params(which='major', direction='out', length=6.0, labelsize=16.5)
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.set_xlim(left=0, right=plot_maxlim)
ax.set_ylim(bottom=0, top=plot_maxlim)
### Size legend
legend_sizes = [(x / 100 * 1200)**2 for x in [15, 10, 5, 1]]
legend_labels = ["15%", "10%", "5%", "1%"]
# The below is for having the legend in the top left position
ada = AnchoredDrawingArea(300, 300, 0, 0, loc=2, frameon=True)
if size_ind == 'consumption':
legend_title = AnchoredText('% of total European consumption',
frameon=False, loc=9, bbox_to_anchor=(0.18, 0.990),
bbox_transform=ax.transAxes,
prop=dict(size=17, weight='medium'))
elif size_ind == 'production':
legend_title = AnchoredText('% of total European production',
frameon=False, loc=9, bbox_to_anchor=(0.174, 0.990),
bbox_transform=ax.transAxes,
prop=dict(size=17, weight='medium'))
add_year = AnchoredText('2020', frameon=False, loc=9, bbox_to_anchor=(0.185, 0.961),
bbox_transform=ax.transAxes, prop=dict(size=17, weight='medium'))
# The below is for having the legend in the bottom right position
# ada = AnchoredDrawingArea(250,200,0,0,loc=4,frameon=True)
# legend_title = AnchoredText('% of total European consumption',frameon=False,loc=9, bbox_to_anchor=(0.87,0.3), bbox_transform=ax.transAxes,prop=dict(size=14))
# add_year= AnchoredText('2011',frameon=False,loc=9, bbox_to_anchor=(0.87,0.27), bbox_transform=ax.transAxes,prop=dict(size=14))
ada.drawing_area.add_artist(legend_title)
ada.drawing_area.add_artist(add_year)
for i, area in enumerate(legend_sizes):
radius_pts = np.sqrt(area / 3.14)
c1 = Circle((150, np.sqrt(area / 3.14) + 10), radius_pts,
fc = marker_clr(0.5), #'#89BEA3',
ec='k', lw=0.6, alpha=0.4)
c2 = Circle((150, np.sqrt(area / 3.14) + 10), radius_pts,
fc="None", ec='k', lw=0.7, alpha=0.7)
ada.drawing_area.add_artist(c1)
ada.drawing_area.add_artist(c2)
leg_ann = plt.annotate(s=legend_labels[i], xy=[0.5, 1], xycoords=c1, xytext=[120, 12],
textcoords="offset points", fontsize=13,
arrowprops=dict(arrowstyle="->", color="k", lw=1,
connectionstyle="arc,angleA=45,angleB=45,armA=0,armB=20,rad=0"))
leg_ann.set_zorder(20)
ax.add_artist(ada)
### 10%, 20%, 50% shading and x=y line
# x=y
ax.plot([0, ax.get_xlim()[1]], [0, ax.get_xlim()[1]], color="grey", alpha=0.6)
# 10%
plt.fill_between([0, ax.get_xlim()[1]], [0, ax.get_xlim()[1] * 1.1],
[0, ax.get_xlim()[1] * 0.9], color="grey", edgecolor='k', alpha=0.13, zorder=10)
# 20%
plt.fill_between([0, ax.get_xlim()[1]], [0, ax.get_xlim()[1] * 1.2],
[0, ax.get_xlim()[1] * 0.8], color="grey",edgecolor='k', alpha=0.1, zorder=9)
# 50%
plt.fill_between([0, ax.get_xlim()[1]], [0, ax.get_xlim()[1] * 1.5],
[0, ax.get_xlim()[1] * 0.5], color='grey', edgecolor='k', alpha=0.07, zorder=8)
ax.annotate(r'$\pm$ 10%', xy=(0, 0), xytext=(0.881, 0.95), xycoords='axes fraction', fontsize=13, rotation=40)
ax.annotate(r'$\pm$ 20%', xy=(0, 0), xytext=(0.808, 0.95), xycoords='axes fraction', fontsize=13, rotation=40)
ax.annotate(r'$\pm$ 50%', xy=(0, 0), xytext=(0.64, 0.95), xycoords='axes fraction', fontsize=13, rotation=40)
ax.annotate(r'x = y', xy=(0, 0), xytext=(0.95, 0.96), xycoords='axes fraction', fontsize=13, rotation=40)
ax.set_xlabel("Carbon footprint of production mix \n (g CO$_2$ kWh$^{-1}$)", fontsize=20, labelpad=14)
ax.set_ylabel("Carbon footprint of consumption mix \n (g CO$_2$ kWh$^{-1}$)", fontsize=20, labelpad=14)
### Make and format inset figure
# ax2 = fig.add_subplot(339) # for inlay in bottom right position inset subplot
# ax2.axis([400, xlim, 400, ylim])
# Set formatting for zoomed inlay figure
# markersize = 40, s = 40*2, pi/4*s = marker area
# Linear factor between main fig and inset: x1/x2 = z*(l1/l2) --> 1100/150 = 33.75/10.85*x --> x=2.357
# marker_size_ratio = (ax.get_xlim()[1] / (ax2.get_xlim()[1] - ax2.get_xlim()[0]))
# ax2.scatter(CFEL_markers.iloc[:, 2], CFEL_markers.iloc[:, 3],
# s= (np.sqrt(marker_size) * np.sqrt(marker_size_ratio))**2, alpha=0.5,
# norm=norm, c=net_trade, cmap=marker_clr, edgecolor='k', label='_nolegend_')
# ax2.scatter(CFEL_markers.iloc[:, 2], CFEL_markers.iloc[:, 3],
# s=(np.sqrt(marker_size) * np.sqrt(marker_size_ratio))**2, alpha=0.9,
# norm=norm, c="None", edgecolor='k', linewidths=0.7, label='_nolegend_') # Hack for darker edge colours
# ax2.scatter(CFEL_markers.iloc[:, 2], CFEL_markers.iloc[:, 3],
# s=2, c='k', alpha=0.9, edgecolor='k', label='_nolegend_') # Include midpoint in figure
# ax2.xaxis.tick_top()
# ax2.yaxis.tick_right()
# ax2.xaxis.set_major_locator(ticker.MultipleLocator(50))
# ax2.yaxis.set_major_locator(ticker.MultipleLocator(50))
# ax2.tick_params(which="major", labelsize=12)
# ax2.plot([0, ax.get_xlim()[1]], [0, ax.get_xlim()[1]], color="grey", alpha=0.6)
# plt.fill_between([0, ax2.get_xlim()[1]], [0, ax2.get_xlim()[1] * 1.1],
# [0, ax2.get_xlim()[1] * 0.9], color="grey", alpha=0.13)
# plt.fill_between([0, ax2.get_xlim()[1]], [0, ax2.get_xlim()[1] * 1.2],
# [0, ax2.get_xlim()[1] * 0.8], color="grey", alpha=0.1)
# plt.fill_between([0, ax2.get_xlim()[1]], [0, ax2.get_xlim()[1] * 1.5],
# [0, ax2.get_xlim()[1] * 0.5], color='grey', alpha=0.07)
### Add country text labels
for (country, country_data) in CFEL_markers.iterrows():
# Inset figure labelling
# if country_data["Production mix intensity"] >= (ax2.get_xlim()[0 ] * 0.9) and country_data["Production mix intensity"] <= (ax2.get_xlim()[1]*1.1) and country_data["Consumption mix intensity"] >= (ax2.get_ylim()[0]*0.9) and country_data["Consumption mix intensity"] <= (ax2.get_ylim()[1]*1.1): #Inset labels
# if country in ['DE', 'IT', 'NL']:
# pass # ignore the largest circles in the inset, as they don't need labelling
# else:
# ax2.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
# xytext=(np.sqrt(marker_size[country]*marker_size_ratio*(np.pi/4))/2+6,-7),
# textcoords=("offset points"), size=15,
# path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)])
# Left corner of main figure
if country_data["Production mix intensity"] <= 100 and country_data["Consumption mix intensity"] <= 100:
# Adjust for countries in bottom left
if country == "SE":
ax.annotate(country, xy=(country_data["Production mix intensity"],
country_data["Consumption mix intensity"]),
xytext=(np.sqrt(marker_size[country]) / 2 + 2, -2), textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
elif country == "NO":
ax.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
xytext=(np.sqrt(marker_size[country]) / 2 + 2, -14),
textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
elif country == 'FR':
ax.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
xytext=(np.sqrt(marker_size[country]) / 2 - 2, -38), textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
# Rest of figure; avoid overlapping labels for LU, MT
elif country == 'LU':
ax.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
xytext=(np.sqrt(marker_size[country]) / 2 + 5, -4), textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
elif country == 'MT':
ax.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
xytext=(-25, 2), textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
else:
ax.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
xytext=(-9.5, -14 - np.sqrt(marker_size[country]) / 2), textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
if ei_CFEL is not None:
for (country, country_data) in ei_CFEL.iterrows():
ax.annotate(country, xy=(country_data["Production mix intensity"], country_data["Consumption mix intensity"]),
xytext=(-9.5, -20), textcoords=("offset points"),
path_effects=[pe.withStroke(linewidth=4, foreground="w", alpha=0.8)], size=15)
### Make colorbar
ax_cbar = fig.add_axes([0.925, 0.125, 0.03, 0.755]) # place colorbar on own Ax
### Calculate minimum and maximum labels for colorbar (rounded to nearest 5 within the scale)
cbar_min = 0
cbar_max = 100
# Begin plotting colorbar
cbar = plt.colorbar(plot, cax=ax_cbar, drawedges=False, extend='max')
cbar.set_label('Gross electricity traded, as % of net production', fontsize=18, rotation=90, labelpad=8)
cbar.set_alpha(1)
cbar.ax.tick_params(labelsize=14)
cbar.outline.set_linewidth(5)
cbar.outline.set_color('k')
cbar.draw_all()
# Manually tack on semi-transparent rectangle on colorbar to match alpha of plot; workaround for weird rendering of colorbar with alpha
r1 = Rectangle((9, 0), 85, 500, fc='w', alpha=0.3)
ax_cbar.add_patch(r1)
if export_figures:
keeper = exp + " run {:%d-%m-%y, %H_%M}".format(datetime.now())
plt.savefig(os.path.join(fp_figure, 'Fig_2 - ' + keeper + '.pdf'), format='pdf', bbox_inches='tight')
plt.savefig(os.path.join(fp_figure, 'Fig_2 - ' + keeper + '.png'), bbox_inches='tight')
plt.show()
# %% Figure 7 (differences for domestic production)
# %% map_prep methods
# %% Helper functions for map plotting
# function to round to nearest given multiple
# function for annotating maps
fp_label_pos = os.path.join(fp_data, 'label_pos.csv')
label_pos = pd.read_csv(fp_label_pos, index_col=[0, 1], skiprows=0, header=0) # read in coordinates for label/annotation positions
lined_countries = ['PT', 'BE', 'NL', 'DK', 'SI', 'GR', 'ME', 'MK', 'MD', 'EE', 'LV', 'BA', 'MT', 'LU', 'AL', 'HR'] # countries using leader lines
# %% Figure 3 - BEV footprints by country
#%% Figure 4 - Plot share of production emissions
# %% Figure 5 - Absolute mitigation by electrification
#%%
#%% Figure 6 - sensitivity with vehicle lifetimes
#%% Plot country footprint with specific segment
def plot_country_footprint(exp, fp_figure, country, segment, start, timestamp, mapping_data, export_figures):
"""Produce country footprint based on user query."""
# use same colour coding as for Figure 3 in paper
cmap = colors.ListedColormap(["#c6baca", # light purple
"#83abce",
"#6eb668",
"#9caa41",
"#815137",
"#681e3e" # red
])
vmin=50
vmax=375
cmap_col = [cmap(i) for i in np.linspace(0, 1, 6)] # retrieve colormap colors
cmap = cmap_col
boundaries = [i for i in np.arange(vmin, vmax, 50)] # define boundaries of colormap transitions
cmap_BEV, norm = colors.from_levels_and_colors(boundaries, colors=[cmap[0]]+ cmap + [cmap_col[-1]], extend='both')
col_list = ['BEV footprint - Segment A - Consumption mix', 'BEV footprint - Segment C - Consumption mix',
'BEV footprint - Segment JC - Consumption mix', 'BEV footprint - Segment JE - Consumption mix']
col = [col for col in col_list if 'Segment '+ segment in col]
if len(col) == 1:
col = col[0]
country_ind = mapping_data.loc[mapping_data['ISO_A2'] == country]
if country_ind[col].values >= (4/6*(vmax-vmin)):
color = 'grey'
else:
color = '#363737'
fig, ax = plt.subplots(1, 1, figsize=(5, 4), dpi=600)
mapping_data.plot(ax=ax, color=color, edgecolor='darkgrey', linewidth=0.3)
country_ind.plot(ax=ax, column=col, cmap=cmap_BEV, norm=norm, edgecolor='k', linewidth=0.3, alpha=0.8)
plt.xlim((-12, 34))
plt.ylim((32, 75))
plt.yticks([])
plt.xticks([])
# add annotations
timestamp = timestamp.strftime('%Y-%m-%d %H:%M %z')
caption = f'Carbon footprint for segment {segment} BEV in {country} \n at {timestamp} (g CO$_2$e /vkm) \n (User query for {start})'
ax.annotate(caption, xy=(0.5, 1.02), xycoords='axes fraction', fontsize=9, ha='center')
annotate_map(ax,
country_ind[country_ind[col].notna()].index.to_list(),
country_ind,
country_ind[country_ind[col].notna()][col].values,
300,
threshold=0,
round_dig=0,
)
if export_figures:
keeper = exp + " run {:%d-%m-%y, %H_%M}".format(datetime.now())
plt.savefig(os.path.join(fp_figure, 'Fig_1 ' + keeper + '.pdf'), format='pdf', bbox_inches='tight')
plt.savefig(os.path.join(fp_figure, 'Fig_1 ' + keeper), bbox_inches='tight')
plt.show()
#%% Optional figures for plotting
def plot_el_trade(exp, fp_figure, CFEL, export_figures):
"""Plot import, exports and net trade for each country."""
plot_trades = pd.DataFrame([CFEL['imports'], -CFEL['exports'], CFEL['imports'] - CFEL['exports']])
plot_trades.index = ['Imports', 'Exports', 'Net trade']
plot_trades = plot_trades.T
plot_trades.sort_index(axis=0, inplace=True)
fig, ax = plt.subplots()
# plot imports and exports
plot_trades.iloc[:, 0:2].plot(kind='bar', color=['xkcd:cadet blue','k'],
stacked=True, width=1, rot=45, figsize=(20,8),
grid=True, ax=ax, use_index=True, legend=False)
# plot points for net trade
plot_trades.iloc[:, 2].plot(kind='line', style='o', markersize=8, mfc='#681E3E', mec='w', alpha=0.8, fontsize=15, grid=True, ax=ax)
plt.ylabel('Electricity traded, TWh', fontsize=14)
ax.yaxis.set_minor_locator(ticker.MultipleLocator(10))
ax.tick_params(axis='y', which='major', labelsize=14)
# set up secondary axis for trade as % of production
trade_pct = CFEL['Trade percentage, gross'] * 100
ax2 = ax.twinx()
ax2.set_xlim(ax.get_xlim()) # set up secondary y axis plot
# semi-manually set y-axis extrema to align 0-value with primary y-axis
ax_neg_pct = abs(ax.get_ylim()[0]) / (abs(ax.get_ylim()[0]) + ax.get_ylim()[1])
ax_pos_pct = 1 - ax_neg_pct
# ax2_upper_y = round_up_down(trade_pct.max(), 50, 'up')
ax2_upper_y = round_up_down(trade_pct.max(), 100, 'up')
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
# ax2.yaxis.set_major_locator(ticker.MultipleLocator(100))
ax2.yaxis.set_minor_locator(ticker.MultipleLocator(25))
# this is just for spacing the 0 in the right place
ax2.set_ylim(top=(ax2_upper_y), bottom=-(ax2_upper_y*ax_neg_pct) / ax_pos_pct)
# remove negative y ticks on secondary axis (trade as % of total prod)
ticks = [tick for tick in plt.gca().get_yticks() if tick >= 0]
ax2.set_yticks(ticks)
ax2.tick_params(axis='y', which='major', labelsize=14)
ax2.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False,
right=True, labelright=True)
# yticks = ax2.yaxis.get_major_ticks()
# for t in yticks:
# print(t.label2.get_text())
# if '-' in t.label2.get_text():
# t.label2.set_visible(False)
# t.tick1line.set_visible(False)
# t.tick2line.set_visible(False)
ax2.set_ylabel('Gross trade, as percentage of total production (%)', labelpad=5, fontsize=14)
ax2.set_facecolor('none')
for _, spine in ax2.spines.items():
spine.set_visible(False)
trade_pct.plot(kind='line', style='D', markersize=8, color='#99a63f', mec='k',
label='Gross trade, as % of total production [right axis]', grid=False, ax=ax2, alpha=0.8, fontsize=13)
handles, labels = ax.get_legend_handles_labels()
handles2, labels2 = ax2.get_legend_handles_labels()
order = [1, 2, 0, 3] # manually arrange legend entries
handles = handles + handles2
labels = labels +labels2
plt.legend([handles[i] for i in order], [labels[i] for i in order], fontsize=13, frameon=True, facecolor='w', borderpad=1, loc=4, framealpha=1)
if export_figures:
keeper = exp + " run {:%d-%m-%y, %H_%M}".format(datetime.now())
plt.savefig(os.path.join(fp_figure,'Fig_eltrade_' + keeper + '.pdf'), format='pdf', bbox_inches='tight')
plt.savefig(os.path.join(fp_figure,'Fig_eltrade_' + keeper + '.png'), bbox_inches='tight')
plt.show()
def trade_heatmap(trades):
""" Extra figure; plot trades as heatmap """
plt.figure(figsize=(15, 12))
sns.heatmap(trades.replace(0, np.nan), square=True, linecolor='silver', linewidths=0.5, cmap='inferno_r') | [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
337,
5912,
286,
3227,
290,
7327,
33237,
287,
2031,
290,
511,
1245,
319,
6588,
24713,
286,
5186,
5672,
198,
198,
2,
770,
2438,
21528,
262,
1708,
25,
198,
2,
220,
220,
220,
220,
5... | 2.258441 | 11,995 |
# Adapted from: https://stackoverflow.com/questions/37935920/quantile-normalization-on-pandas-dataframe
# Citing: https://en.wikipedia.org/wiki/Quantile_normalization
| [
2,
30019,
276,
422,
25,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
29088,
30743,
1238,
14,
40972,
576,
12,
11265,
1634,
12,
261,
12,
79,
392,
292,
12,
7890,
14535,
198,
2,
327,
1780,
25,
3740,
1378,
268,
13,
31266... | 3.054545 | 55 |
"""
The board definition should define the following variables:
PART (str): The part name of the FPGA according to the tool
"""
PART = "xcku115-flva1517-2-e"
| [
37811,
198,
464,
3096,
6770,
815,
8160,
262,
1708,
9633,
25,
198,
220,
220,
220,
16652,
357,
2536,
2599,
383,
636,
1438,
286,
262,
376,
6968,
32,
1864,
284,
262,
2891,
198,
37811,
198,
198,
30709,
796,
366,
87,
694,
84,
15363,
12,
... | 3.075472 | 53 |
import argparse # to pass the argument
import cv2 # to import the openCV library
# fetching the arguments and save in dictionary
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Enter path to the image")
args = vars(ap.parse_args())
# loading and converting the image into numpy array
# printing the corresponding values
image = cv2.imread(args["image"]) # to read the image
# grab the pixel at (0,0) of the image and print the color values
(b,g,r) = image[0,0]
print("pixel at (0,0)- Red: {} , Green: {} , Blue: {}".format(r,g,b))
image[0,0] = (0,0,255)
(b,g,r) = image[0,0]
print("pixel at (0,0)- Red: {} , Green: {} , Blue: {}".format(r,g,b))
#grap the region and show in the cv2 window
corner = image[0:100, 0:100] # to specify the corner
# load image into cv2 window
# wait for key press
# write image into another format
# cv2.imshow("Image Title", image) # to show the image
# cv2.waitKey(0) # wait until any key press from keyboard
# change color of the area of image
image[0:100, 0:100] = (0,255,0) # to specify the corner
cv2.imshow("color of area changed", image)
cv2.waitKey(0) | [
11748,
1822,
29572,
220,
220,
1303,
284,
1208,
262,
4578,
198,
11748,
269,
85,
17,
220,
220,
1303,
284,
1330,
262,
1280,
33538,
5888,
198,
198,
2,
21207,
278,
262,
7159,
290,
3613,
287,
22155,
198,
499,
796,
1822,
29572,
13,
28100,
... | 2.75 | 424 |