hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2e83de294b08d097eda13bb958e629ecbb541e | 7,059 | py | Python | ParamGenerator/Phoenics/BayesianNeuralNetwork/pymc3_interface.py | Tabor-Research-Group/ChemOS | 50117f572e95e68dc4dccb624cedb28dbfc6e419 | [
"Apache-2.0"
] | 37 | 2018-03-20T21:23:11.000Z | 2022-03-26T08:19:20.000Z | ParamGenerator/Phoenics/BayesianNeuralNetwork/pymc3_interface.py | Tabor-Research-Group/ChemOS | 50117f572e95e68dc4dccb624cedb28dbfc6e419 | [
"Apache-2.0"
] | 1 | 2021-06-29T10:03:22.000Z | 2021-06-29T10:03:22.000Z | ParamGenerator/Phoenics/BayesianNeuralNetwork/pymc3_interface.py | Tabor-Research-Group/ChemOS | 50117f572e95e68dc4dccb624cedb28dbfc6e419 | [
"Apache-2.0"
] | 10 | 2018-05-16T21:04:05.000Z | 2021-10-15T18:14:06.000Z | #!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import numpy as np
import theano
import theano.tensor as T
import pymc3 as pm
from Utils.utils import VarDictParser
#from BayesianNeuralNetwork.distributions import DiscreteLaplace
#========================================================================
class Pymc3Network(VarDictParser):
def __init__(self, var_dicts, observed_params, observed_losses, batch_size, model_details):
VarDictParser.__init__(self, var_dicts)
self.observed_params = observed_params
self.observed_losses = observed_losses
self.num_obs = len(self.observed_losses)
self.batch_size = batch_size
self.model_details = model_details
for key, value in self.model_details.items():
setattr(self, str(key), value)
self._process_network_inputs()
self._get_weights_and_bias_shapes()
def __get_weights(self, index, shape, scale = None):
return pm.Normal('w%d' % index, self.weight_loc, self.weight_scale, shape = shape)
def __get_biases(self, index, shape, scale = None):
return pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape)
def weight(self, index):
return getattr(self, 'w%d' % index)
def bias(self, index):
return getattr(self, 'b%d' % index)
def _get_weights_and_bias_shapes(self):
self.weight_shapes = [[self.network_input.shape[1], self.hidden_shape]]
self.bias_shapes = [[self.hidden_shape]]
for index in range(1, self.num_layers - 1):
self.weight_shapes.append([self.hidden_shape, self.hidden_shape])
self.bias_shapes.append([self.hidden_shape])
self.weight_shapes.append([self.hidden_shape, self.network_input.shape[1]])
self.bias_shapes.append([self.network_input.shape[1]])
def _process_network_inputs(self):
self.network_input = np.zeros((self.num_obs, self.complete_size)) #+ 10.**-4
self.network_output = np.zeros((self.num_obs, self.total_size))
for obs_index, obs in enumerate(self.observed_params):
current_index = 0
for var_index, value in enumerate(obs):
if self.var_p_types[var_index] == 'categorical':
self.network_input[obs_index, int(current_index + value)] += 1. #- 2 * 10.**-4
self.network_output[obs_index, var_index] = value
current_index += len(self.var_p_options[var_index])
else:
self.network_input[obs_index, current_index] = value
self.network_output[obs_index, var_index] = value
current_index += 1
for att in ['floats', 'ints', 'cats']:
setattr(self, att, np.array([False for i in range(self.complete_size)]))
self.upper_rescalings = np.empty(self.complete_size)
self.lower_rescalings = np.empty(self.complete_size)
for var_e_index, var_e_name in enumerate(self.var_e_names):
high = self.var_e_highs[var_e_index]
low = self.var_e_lows[var_e_index]
if self.var_e_types[var_e_index] == 'float':
self.upper_rescalings[var_e_index] = high + 0.1 * (high - low)
self.lower_rescalings[var_e_index] = low - 0.1 * (high - low)
self.floats[var_e_index] = True
elif self.var_e_types[var_e_index] == 'integer':
self.upper_rescalings[var_e_index] = high# + np.ceil(0.1 * (high - low))
self.lower_rescalings[var_e_index] = low# - np.ceil(0.1 * (high - low))
self.ints[var_e_index] = True
elif self.var_e_types[var_e_index] == 'categorical':
self.upper_rescalings[var_e_index] = 1.
self.lower_rescalings[var_e_index] = 0.
self.cats[var_e_index] = True
self.network_input = 2. * (self.network_input - self.lower_rescalings) / (self.upper_rescalings - self.lower_rescalings) - 1.
def _create_model(self):
with pm.Model() as self.model:
# getting the location primers
for layer_index in range(self.num_layers):
setattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))
setattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))
if layer_index == 0:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
elif 0 < layer_index < self.num_layers - 1:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
else:
self._loc = pm.Deterministic('bnn_out', pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) )
# getting the precision / standard deviation / variance
self.tau_rescaling = np.zeros((self.num_obs, self.network_input.shape[1]))
for obs_index in range(self.num_obs):
self.tau_rescaling[obs_index] += self.var_e_ranges
self.tau_rescaling = self.tau_rescaling**2
tau = pm.Gamma('tau', self.num_obs**2, 1., shape = (self.num_obs, self.network_input.shape[1]))
self.tau = tau / self.tau_rescaling
self.scale = pm.Deterministic('scale', 1. / pm.math.sqrt(self.tau))
# learn the floats
self.loc = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * self._loc + self.lower_rescalings)
self.out_floats = pm.Normal('out_floats', self.loc[:, self.floats], tau = self.tau[:, self.floats], observed = self.network_output[:, self._floats])
# learn the integers
self.int_scale = pm.Deterministic('int_scale', 1. * self.scale)
self.out_ints = DiscreteLaplace('out_ints', loc = self.loc[:, self.ints], scale = self.int_scale[:, self.ints], observed = self.network_output[:, self._ints])
# learn the categories
dist_counter, cat_var_index = 0, 0
self.alpha = pm.Deterministic('alpha', (self.loc + 1.) * self.scale)
self.num_cats = 0
for var_e_index, var_e_type in enumerate(self.var_e_types):
if var_e_type == 'categorical' and self.var_e_begin[var_e_index] == var_e_index:
begin, end = self.var_e_begin[var_e_index], self.var_e_end[var_e_index]
var_e_name = self.var_e_names[var_e_index]
param_index = np.argwhere(self.var_p_names == var_e_name)[0, 0]
self.param_index = param_index
out_dirichlet = pm.Dirichlet('dirich_%d' % dist_counter, a = self.alpha[:, begin : end], shape = (self.num_obs, int(end - begin)) )
out_cats = pm.Categorical('out_cats_%d' % dist_counter, p = out_dirichlet, observed = self.network_output[:, param_index])
self.num_cats += 1
dist_counter += 1
def _sample(self, num_epochs = None, num_draws = None):
if not num_epochs: num_epochs = self.num_epochs
if not num_draws: num_draws = self.num_draws
with self.model:
# approx = pm.fit(method = 'svgd', n = 1000, obj_optimizer = pm.adam(learning_rate = self.learning_rate))
# self.trace = approx.sample(draws = num_draws)
approx = pm.fit(n = num_epochs, obj_optimizer = pm.adam(learning_rate = self.learning_rate))
self.trace = approx.sample(draws = num_draws)
| 41.523529 | 170 | 0.696274 |
__author__ = 'Florian Hase'
import numpy as np
import theano
import theano.tensor as T
import pymc3 as pm
from Utils.utils import VarDictParser
class Pymc3Network(VarDictParser):
def __init__(self, var_dicts, observed_params, observed_losses, batch_size, model_details):
VarDictParser.__init__(self, var_dicts)
self.observed_params = observed_params
self.observed_losses = observed_losses
self.num_obs = len(self.observed_losses)
self.batch_size = batch_size
self.model_details = model_details
for key, value in self.model_details.items():
setattr(self, str(key), value)
self._process_network_inputs()
self._get_weights_and_bias_shapes()
def __get_weights(self, index, shape, scale = None):
return pm.Normal('w%d' % index, self.weight_loc, self.weight_scale, shape = shape)
def __get_biases(self, index, shape, scale = None):
return pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape)
def weight(self, index):
return getattr(self, 'w%d' % index)
def bias(self, index):
return getattr(self, 'b%d' % index)
def _get_weights_and_bias_shapes(self):
self.weight_shapes = [[self.network_input.shape[1], self.hidden_shape]]
self.bias_shapes = [[self.hidden_shape]]
for index in range(1, self.num_layers - 1):
self.weight_shapes.append([self.hidden_shape, self.hidden_shape])
self.bias_shapes.append([self.hidden_shape])
self.weight_shapes.append([self.hidden_shape, self.network_input.shape[1]])
self.bias_shapes.append([self.network_input.shape[1]])
def _process_network_inputs(self):
self.network_input = np.zeros((self.num_obs, self.complete_size))
self.network_output = np.zeros((self.num_obs, self.total_size))
for obs_index, obs in enumerate(self.observed_params):
current_index = 0
for var_index, value in enumerate(obs):
if self.var_p_types[var_index] == 'categorical':
self.network_input[obs_index, int(current_index + value)] += 1.
self.network_output[obs_index, var_index] = value
current_index += len(self.var_p_options[var_index])
else:
self.network_input[obs_index, current_index] = value
self.network_output[obs_index, var_index] = value
current_index += 1
for att in ['floats', 'ints', 'cats']:
setattr(self, att, np.array([False for i in range(self.complete_size)]))
self.upper_rescalings = np.empty(self.complete_size)
self.lower_rescalings = np.empty(self.complete_size)
for var_e_index, var_e_name in enumerate(self.var_e_names):
high = self.var_e_highs[var_e_index]
low = self.var_e_lows[var_e_index]
if self.var_e_types[var_e_index] == 'float':
self.upper_rescalings[var_e_index] = high + 0.1 * (high - low)
self.lower_rescalings[var_e_index] = low - 0.1 * (high - low)
self.floats[var_e_index] = True
elif self.var_e_types[var_e_index] == 'integer':
self.upper_rescalings[var_e_index] = high
self.lower_rescalings[var_e_index] = low
self.ints[var_e_index] = True
elif self.var_e_types[var_e_index] == 'categorical':
self.upper_rescalings[var_e_index] = 1.
self.lower_rescalings[var_e_index] = 0.
self.cats[var_e_index] = True
self.network_input = 2. * (self.network_input - self.lower_rescalings) / (self.upper_rescalings - self.lower_rescalings) - 1.
def _create_model(self):
with pm.Model() as self.model:
for layer_index in range(self.num_layers):
setattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))
setattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))
if layer_index == 0:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
elif 0 < layer_index < self.num_layers - 1:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
else:
self._loc = pm.Deterministic('bnn_out', pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) )
self.tau_rescaling = np.zeros((self.num_obs, self.network_input.shape[1]))
for obs_index in range(self.num_obs):
self.tau_rescaling[obs_index] += self.var_e_ranges
self.tau_rescaling = self.tau_rescaling**2
tau = pm.Gamma('tau', self.num_obs**2, 1., shape = (self.num_obs, self.network_input.shape[1]))
self.tau = tau / self.tau_rescaling
self.scale = pm.Deterministic('scale', 1. / pm.math.sqrt(self.tau))
self.loc = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * self._loc + self.lower_rescalings)
self.out_floats = pm.Normal('out_floats', self.loc[:, self.floats], tau = self.tau[:, self.floats], observed = self.network_output[:, self._floats])
self.int_scale = pm.Deterministic('int_scale', 1. * self.scale)
self.out_ints = DiscreteLaplace('out_ints', loc = self.loc[:, self.ints], scale = self.int_scale[:, self.ints], observed = self.network_output[:, self._ints])
dist_counter, cat_var_index = 0, 0
self.alpha = pm.Deterministic('alpha', (self.loc + 1.) * self.scale)
self.num_cats = 0
for var_e_index, var_e_type in enumerate(self.var_e_types):
if var_e_type == 'categorical' and self.var_e_begin[var_e_index] == var_e_index:
begin, end = self.var_e_begin[var_e_index], self.var_e_end[var_e_index]
var_e_name = self.var_e_names[var_e_index]
param_index = np.argwhere(self.var_p_names == var_e_name)[0, 0]
self.param_index = param_index
out_dirichlet = pm.Dirichlet('dirich_%d' % dist_counter, a = self.alpha[:, begin : end], shape = (self.num_obs, int(end - begin)) )
out_cats = pm.Categorical('out_cats_%d' % dist_counter, p = out_dirichlet, observed = self.network_output[:, param_index])
self.num_cats += 1
dist_counter += 1
def _sample(self, num_epochs = None, num_draws = None):
if not num_epochs: num_epochs = self.num_epochs
if not num_draws: num_draws = self.num_draws
with self.model:
approx = pm.fit(n = num_epochs, obj_optimizer = pm.adam(learning_rate = self.learning_rate))
self.trace = approx.sample(draws = num_draws)
| true | true |
1c2e84f9242aa5d5ab7410159ed00ce8f79e48be | 460 | py | Python | Matplotlib/Part 4/Part4.py | aritraroy24/Learning_NumPy_SciPy | cd33fbcb7dee11547b7c6e30692866373fb0ea5b | [
"MIT"
] | null | null | null | Matplotlib/Part 4/Part4.py | aritraroy24/Learning_NumPy_SciPy | cd33fbcb7dee11547b7c6e30692866373fb0ea5b | [
"MIT"
] | null | null | null | Matplotlib/Part 4/Part4.py | aritraroy24/Learning_NumPy_SciPy | cd33fbcb7dee11547b7c6e30692866373fb0ea5b | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
plt.style.use("fivethirtyeight")
hours = [1, 2, 3, 4, 5, 6, 7, 8, 9]
dev1 = [8, 6, 5, 5, 4, 2, 1, 1, 0]
dev2 = [0, 1, 2, 2, 2, 4, 4, 4, 4]
dev3 = [0, 1, 1, 1, 2, 2, 3, 3, 4]
labels = ['Dev1', 'Dev2', 'Dev3']
colors = ['#6d904f', '#fc4f30', '#008fd5']
plt.stackplot(hours, dev1, dev2, dev3, labels=labels, colors=colors)
plt.legend(loc=(0.075, 0.05))
plt.title("My Awesome Stack Plot")
plt.tight_layout()
plt.show() | 20.909091 | 68 | 0.591304 | from matplotlib import pyplot as plt
plt.style.use("fivethirtyeight")
hours = [1, 2, 3, 4, 5, 6, 7, 8, 9]
dev1 = [8, 6, 5, 5, 4, 2, 1, 1, 0]
dev2 = [0, 1, 2, 2, 2, 4, 4, 4, 4]
dev3 = [0, 1, 1, 1, 2, 2, 3, 3, 4]
labels = ['Dev1', 'Dev2', 'Dev3']
colors = ['#6d904f', '#fc4f30', '#008fd5']
plt.stackplot(hours, dev1, dev2, dev3, labels=labels, colors=colors)
plt.legend(loc=(0.075, 0.05))
plt.title("My Awesome Stack Plot")
plt.tight_layout()
plt.show() | true | true |
1c2e85128466df3e710be0f1ad094ceda7965972 | 10,591 | py | Python | PythonExercises.py | jaxalbert/PythonExperiments | e5c658be1969b59be9a85a969db0e298f7e23bd6 | [
"MIT"
] | 2 | 2018-03-05T06:14:35.000Z | 2019-01-08T15:32:30.000Z | PythonExercises.py | jaxalbert/PythonExperiments | e5c658be1969b59be9a85a969db0e298f7e23bd6 | [
"MIT"
] | null | null | null | PythonExercises.py | jaxalbert/PythonExperiments | e5c658be1969b59be9a85a969db0e298f7e23bd6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
def stemWord(w):
"""Renders a word in to its generic form.
This function is used preprocess words for NLP.
It removes all trailing punctuation marks "';-?.,!:".
It also removes the possessive "'s" from words.
And it converts it to all lower case
Args:
w (str): A string containing the input word
Returns:
str a simpler version of the word
"""
w=w.split("'")[0]#Separates and eliminates all after the "'"
w=[letter for letter in w if not (letter in ';-?.,!:')] #Eliminates the simbols on w
return ''.join(w).lower() #Joins and puts in lowercase
class LanguageModel:
"""Class that contains the data needed for the analysis of text
"""
@staticmethod
def getWordFreq(txt):
"""Returns a dictionary where the keys are stemmed words and values
are the number they occured in txt.
As na example if txt == "The dog likes the cat!" the resulting
frequency dictionary should be {"the":2,"dog":1,"likes":1,"cat":1}
Hints:
-Use split to break the text in to a list of words
stem all words in the list
-Make sure a dictionary element exists before operating on it.
Args:
txt (str): a string containing free text
Returns:
dict: A dictionary whith stemmed words (str) for keys and int
values containing the occurence count of these words.
"""
list1 = txt.split()
newlist = []
dic = {}
for word in list1:
newlist.append(stemWord(word))
for word in newlist:
if dic.has_key(word) == True:
dic[word] = dic[word] +1
else:
dic[word] = 1
return dic
@staticmethod
def mergeWordFreqDict(frDict1,frDict2):
"""Takes two dictionaries containing word frequencies and returns a
single dictionary containing their sum.
In essence this fuction takes the frequencies produced from two
different strings with text and returns a dictionary with the
word frequencies of the concatenation of these two strings.
Hints:
-Dictionary .keys() returns a list so you might need to cast it
to a set, if you want to get the union.
-It is better if you create a dictionary with 0 for all words in
both dictionaries and then update the values
-If frDict1=={"hello":1,"world":1} and frDict2=={"goodbye":1,"world":1},
the result must be {"hello":1,"world":2,"goodbye":1}
-The simplest solution involves 3 non nested for loops.
Args:
frDict1 (dict): a dictionary with stemmed words as keys and
positive integers as values.
frDict2 (dict): a dictionary with stemmed words as keys and
positive integers as values.
Returns:
dict: a dictionary with stemmed words as keys and positive
integers as values.
"""
new_dict = frDict1
for i in frDict2:
if i in new_dict:
new_dict[i] += frDict2[i]
else:
new_dict[i] = frDict2[i]
return new_dict
def __init__(self,txtList=[]):
"""LangueModel constructor
Initialises the class members to valid values.
__texts is a list with one or more strings with texts.
__wordFreq is a dictionary with stemmed words for keys and the
count of the occurences of each word (int) as values.
Args:
txtList (list): A list of strings where each string will
contains some text.
"""
self.__wordFreq={}#Dictionary with the word's frequencies.
self.__texts=[]#A list with a text string
if txtList.__class__ != [].__class__:
raise Exception('txtList must be a list of strings')
for txt in txtList:
self.addText(txt)#add text to the list
def addText(self,txt):
"""Adds a string containing text to the model
This method just uses getWordFreq and mergeWordFreqDict static
methods on a specific instance of the class
Args:
txt (str): the string containing text to be added
"""
self.__texts.append(txt)
newFreq=LanguageModel.getWordFreq(txt) #newFreq is a dictionary with the word's frequencies of the added text
self.__wordFreq=LanguageModel.mergeWordFreqDict(self.__wordFreq,newFreq) #Sum of the two dictionaries
def addTextFile(self,fileName):
"""Ads text contained in a text-file
Args:
fileName (str): the absolute or relative path to a file
containing text.
"""
self.addText(open(fileName).read())#Adds text in a file
def wordCount(self):
"""Returns the total number of words found in self.__texts
Hints:
-The answer can be writen in a single line
-The method values() of dict is the key to solving this question
-The distionary __wordFreq contains how many times each word was
found in the texts
Returns:
int: the count of all the words
"""
return sum(self.__wordFreq.values())
def uniqueWordCount(self):
"""Returns the number of unique words found in self.__texts
Unique word means that a word occuring twice or more times, counts
as one.
Hints:
-The answer can be writen in a single line
-The method keys() of dict is the key to solving this question
Returns:
int: the count of unique words
"""
return len(self.__wordFreq.keys())
def getWordProbabillity(self,word):
"""Returns the probabillity of a word occuring according to the
model
The probabillity of a word occuring is the number of times it has
occured divided by the count of all word occurences in __texts
Args:
word (str): an string with a word which is not necessarilly
stemmed.
Returns:
float: a float between 0 and 1 that contains the probabillity
"""
stemmedWord=stemWord(word)#Converts 'word' in a simple string in lowercase
if stemmedWord in self.__wordFreq.keys():#if the word is in the list
return self.__wordFreq[stemmedWord]/float(self.wordCount())#Divide between all the words to know the probabillity
else:
return 0#If not found return 0
def __str__(self):
"""Generate a string description of the Language Model
Hints:
-The result must be constructed with string concatenation
-Cast an integer to a string before concatening it.
-Use the already availabe methods to obtain information
-lm=LanguageModel(['hello world','Goodbye World!'])
lm.__str__() will return
"LanguageModel\n\t#texts:2\n\t#words:4\n\t#unique words:3\n"
-self.__texts, is a list containing all texts the LanguageModel has
seen.
Returns:
string: A description of the language model spanning 4 lines.
"""
uwords = self.uniqueWordCount(self)
nwords = self.wordCount(self)
texts = len(self.__texts(self))
lm = "The lenguage model has:\n\t" + "texts:" + str(texts) + "\n\t"+ "words:" + str(nwords) + "\n\t" + "unique words:" + str(uwords) + "\n"
return lm
def __repr__(self):
"""Generate a string description of the Language Model that allows
to reconstruct it
Returns:
string: A python expression that invockes the constructor of the
class so that if executed a deep copy of the LangueageModel is
obtained.
"""
res=str(self.__class__)+'('+self.__texts.__repr__()+')'
return res
def getWordsByProbabillity(self):
"""Produces a list containing all stemmed words from the language
model sorted from the most probable to the least probable
Hints:
-function reversed returns a list with reverse order of the input
list
-function sorted returns a list with the elements of the input sorted
in ascending order.
-A list of tuples is sorted by the first element of each tuple
Returns:
list: a list of strings (not tuples!)
"""
finalist = {}
newdict = self.__wordFreq.keys()
for i in newdict:
finalist[i] = self.getWordProbabillity(i)
sorted_keys = sorted(finalist, key=finalist.__getitem__)
l = list(reversed(sorted_keys))
return l
def isPalindrome(sentence):
"""Tells us whether a string is a palindrome.
Pallindromes are sentences whos characters read in both directions are
the same. Testing for pallindromes ignores spaces and puntuation marks
as if they did not exist.
Hits:
-A list can be indexed form the end with negative values.
-The first character in a string is at position 0
If a=[1,"b",3,4] Then a[-1] is 4, a[-2] is 3, etc.
-The expression a[len(a)-1]==a[-1] is always True if a is not empty
-You will need to use .split() and .join methods of the str type
Args:
sentence (str): A string with one or more words assumed to have no
possessive (stemWord can help).
Returns:
bool: The return value. True if the sentence was a palindrome, False
otherwise.
"""
answer = True
newstring = stemWord(sentence)
s_no_space = newstring.replace(" ","")#remplaza el espacio
if s_no_space == s_no_space[::-1]:#compara las dos listas
answer = True#si son iguales es que son cap i cua
else:
answer = False
return answer
if __name__ == '__main__':
#Everything here is ignored by joc-de-proves
#You can debug your program by testing your functions and classes here
pass
| 35.901695 | 147 | 0.589274 |
def stemWord(w):
w=w.split("'")[0]#Separates and eliminates all after the "'"
w=[letter for letter in w if not (letter in ';-?.,!:')]
return ''.join(w).lower()
class LanguageModel:
@staticmethod
def getWordFreq(txt):
list1 = txt.split()
newlist = []
dic = {}
for word in list1:
newlist.append(stemWord(word))
for word in newlist:
if dic.has_key(word) == True:
dic[word] = dic[word] +1
else:
dic[word] = 1
return dic
@staticmethod
def mergeWordFreqDict(frDict1,frDict2):
new_dict = frDict1
for i in frDict2:
if i in new_dict:
new_dict[i] += frDict2[i]
else:
new_dict[i] = frDict2[i]
return new_dict
def __init__(self,txtList=[]):
self.__wordFreq={}
self.__texts=[]#A list with a text string
if txtList.__class__ != [].__class__:
raise Exception('txtList must be a list of strings')
for txt in txtList:
self.addText(txt)#add text to the list
def addText(self,txt):
self.__texts.append(txt)
newFreq=LanguageModel.getWordFreq(txt) #newFreq is a dictionary with the word's frequencies of the added text
self.__wordFreq=LanguageModel.mergeWordFreqDict(self.__wordFreq,newFreq)
def addTextFile(self,fileName):
self.addText(open(fileName).read())
def wordCount(self):
return sum(self.__wordFreq.values())
def uniqueWordCount(self):
return len(self.__wordFreq.keys())
def getWordProbabillity(self,word):
stemmedWord=stemWord(word)
if stemmedWord in self.__wordFreq.keys():
return self.__wordFreq[stemmedWord]/float(self.wordCount())
else:
return 0
def __str__(self):
uwords = self.uniqueWordCount(self)
nwords = self.wordCount(self)
texts = len(self.__texts(self))
lm = "The lenguage model has:\n\t" + "texts:" + str(texts) + "\n\t"+ "words:" + str(nwords) + "\n\t" + "unique words:" + str(uwords) + "\n"
return lm
def __repr__(self):
res=str(self.__class__)+'('+self.__texts.__repr__()+')'
return res
def getWordsByProbabillity(self):
finalist = {}
newdict = self.__wordFreq.keys()
for i in newdict:
finalist[i] = self.getWordProbabillity(i)
sorted_keys = sorted(finalist, key=finalist.__getitem__)
l = list(reversed(sorted_keys))
return l
def isPalindrome(sentence):
answer = True
newstring = stemWord(sentence)
s_no_space = newstring.replace(" ","")
if s_no_space == s_no_space[::-1]:
answer = True
else:
answer = False
return answer
if __name__ == '__main__':
pass
| true | true |
1c2e8556fed0cb0df722a9c46df711a1dc400983 | 14,733 | py | Python | yolov3_tf2/models.py | makra89/Quake_AI | 8df69c75b117079f5e40929341c4638e741de11d | [
"BSD-3-Clause"
] | 4 | 2022-02-10T22:13:37.000Z | 2022-03-27T18:44:38.000Z | yolov3_tf2/models.py | makra89/Quake_AI | 8df69c75b117079f5e40929341c4638e741de11d | [
"BSD-3-Clause"
] | 14 | 2021-08-29T13:36:12.000Z | 2021-09-22T05:22:57.000Z | yolov3_tf2/models.py | makra89/Quake_AI | 8df69c75b117079f5e40929341c4638e741de11d | [
"BSD-3-Clause"
] | 1 | 2021-11-21T21:01:31.000Z | 2021-11-21T21:01:31.000Z | import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
MaxPool2D,
UpSampling2D,
ZeroPadding2D,
BatchNormalization,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
binary_crossentropy,
sparse_categorical_crossentropy
)
from .utils import broadcast_iou
# TODO: Add these values to config?
YOLO_MAX_BOXES = 100
YOLO_IOU_THRESHOLD = 0.5
YOLO_SCORE_THRESHOLD = 0.5
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = yolo_tiny_3l_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
yolo_tiny_3l_anchors = np.array([(11, 20), (15, 31), (16, 56), (24, 44), (27, 70),
(35, 101), (51, 120), (71, 179), (132, 245)],
np.float32) / 416
# Determined for custom dataset
yolo_tiny_anchors = np.array([(14, 28), (22, 54), (30, 87),
(45, 95), (52, 145), (106, 216)],
np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = 'valid'
x = Conv2D(filters=filters, kernel_size=size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2) # skip connection
x = x_36 = DarknetBlock(x, 256, 8) # skip connection
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def DarknetTiny(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3) # skip connection
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_8, x), name=name)
def DarknetTiny3L(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_6 = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3) # skip connection
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_6, x_8, x), name=name)
def YoloConv(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloConvTiny(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
# As tensorflow lite doesn't support tf.size used in tf.meshgrid,
# we reimplemented a simple meshgrid function that use basic tf function.
def _meshgrid(n_a, n_b):
return [
tf.reshape(tf.tile(tf.range(n_a), [n_b]), (n_b, n_a)),
tf.reshape(tf.repeat(tf.range(n_b), n_a), (n_b, n_a))
]
def yolo_boxes(pred, anchors, classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1:3]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = _meshgrid(grid_size[1],grid_size[0])
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
# If we only have one class, do not multiply by class_prob (always 0.5)
if classes == 1:
scores = confidence
else:
scores = confidence * class_probs
dscores = tf.squeeze(scores, axis=0)
scores = tf.reduce_max(dscores,[1])
bbox = tf.reshape(bbox,(-1,4))
classes = tf.argmax(dscores,1)
selected_indices, selected_scores = tf.image.non_max_suppression_with_scores(
boxes=bbox,
scores=scores,
max_output_size=YOLO_MAX_BOXES,
iou_threshold=YOLO_IOU_THRESHOLD,
score_threshold=YOLO_SCORE_THRESHOLD,
soft_nms_sigma=0.5
)
num_valid_nms_boxes = tf.shape(selected_indices)[0]
selected_indices = tf.concat([selected_indices,tf.zeros(YOLO_MAX_BOXES-num_valid_nms_boxes, tf.int32)], 0)
selected_scores = tf.concat([selected_scores,tf.zeros(YOLO_MAX_BOXES-num_valid_nms_boxes,tf.float32)], -1)
boxes=tf.gather(bbox, selected_indices)
boxes = tf.expand_dims(boxes, axis=0)
scores=selected_scores
scores = tf.expand_dims(scores, axis=0)
classes = tf.gather(classes,selected_indices)
classes = tf.expand_dims(classes, axis=0)
valid_detections=num_valid_nms_boxes
valid_detections = tf.expand_dims(valid_detections, axis=0)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80, training=False):
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
masks=yolo_tiny_anchor_masks, classes=80, training=False):
x = inputs = Input([size, size, channels], name='input')
x_8, x = DarknetTiny(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
if training:
return Model(inputs, (output_0, output_1), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
return Model(inputs, outputs, name='yolov3_tiny')
def YoloV3Tiny3L(size=None, channels=3, anchors=yolo_tiny_3l_anchors,
masks=yolo_tiny_3l_anchor_masks, classes=80, training=False):
x = inputs = Input([size, size, channels], name='input')
x_6, x_8, x = DarknetTiny3L(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConvTiny(128, name='yolo_conv_2')((x, x_6))
output_2 = YoloOutput(64, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3_tiny_3l')
def YoloLoss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
# 5. calculate all losses
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + \
(1 - obj_mask) * ignore_mask * obj_loss
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
| 36.924812 | 110 | 0.602525 | import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
MaxPool2D,
UpSampling2D,
ZeroPadding2D,
BatchNormalization,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
binary_crossentropy,
sparse_categorical_crossentropy
)
from .utils import broadcast_iou
YOLO_MAX_BOXES = 100
YOLO_IOU_THRESHOLD = 0.5
YOLO_SCORE_THRESHOLD = 0.5
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = yolo_tiny_3l_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
yolo_tiny_3l_anchors = np.array([(11, 20), (15, 31), (16, 56), (24, 44), (27, 70),
(35, 101), (51, 120), (71, 179), (132, 245)],
np.float32) / 416
yolo_tiny_anchors = np.array([(14, 28), (22, 54), (30, 87),
(45, 95), (52, 145), (106, 216)],
np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x)
padding = 'valid'
x = Conv2D(filters=filters, kernel_size=size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2)
x = x_36 = DarknetBlock(x, 256, 8)
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def DarknetTiny(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_8, x), name=name)
def DarknetTiny3L(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_6 = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_6, x_8, x), name=name)
def YoloConv(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloConvTiny(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
# we reimplemented a simple meshgrid function that use basic tf function.
def _meshgrid(n_a, n_b):
return [
tf.reshape(tf.tile(tf.range(n_a), [n_b]), (n_b, n_a)),
tf.reshape(tf.repeat(tf.range(n_b), n_a), (n_b, n_a))
]
def yolo_boxes(pred, anchors, classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1:3]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = _meshgrid(grid_size[1],grid_size[0])
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
# If we only have one class, do not multiply by class_prob (always 0.5)
if classes == 1:
scores = confidence
else:
scores = confidence * class_probs
dscores = tf.squeeze(scores, axis=0)
scores = tf.reduce_max(dscores,[1])
bbox = tf.reshape(bbox,(-1,4))
classes = tf.argmax(dscores,1)
selected_indices, selected_scores = tf.image.non_max_suppression_with_scores(
boxes=bbox,
scores=scores,
max_output_size=YOLO_MAX_BOXES,
iou_threshold=YOLO_IOU_THRESHOLD,
score_threshold=YOLO_SCORE_THRESHOLD,
soft_nms_sigma=0.5
)
num_valid_nms_boxes = tf.shape(selected_indices)[0]
selected_indices = tf.concat([selected_indices,tf.zeros(YOLO_MAX_BOXES-num_valid_nms_boxes, tf.int32)], 0)
selected_scores = tf.concat([selected_scores,tf.zeros(YOLO_MAX_BOXES-num_valid_nms_boxes,tf.float32)], -1)
boxes=tf.gather(bbox, selected_indices)
boxes = tf.expand_dims(boxes, axis=0)
scores=selected_scores
scores = tf.expand_dims(scores, axis=0)
classes = tf.gather(classes,selected_indices)
classes = tf.expand_dims(classes, axis=0)
valid_detections=num_valid_nms_boxes
valid_detections = tf.expand_dims(valid_detections, axis=0)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80, training=False):
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
masks=yolo_tiny_anchor_masks, classes=80, training=False):
x = inputs = Input([size, size, channels], name='input')
x_8, x = DarknetTiny(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
if training:
return Model(inputs, (output_0, output_1), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
return Model(inputs, outputs, name='yolov3_tiny')
def YoloV3Tiny3L(size=None, channels=3, anchors=yolo_tiny_3l_anchors,
masks=yolo_tiny_3l_anchor_masks, classes=80, training=False):
x = inputs = Input([size, size, channels], name='input')
x_6, x_8, x = DarknetTiny3L(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConvTiny(128, name='yolo_conv_2')((x, x_6))
output_2 = YoloOutput(64, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3_tiny_3l')
def YoloLoss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
# 5. calculate all losses
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + \
(1 - obj_mask) * ignore_mask * obj_loss
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
| true | true |
1c2e86674db23f39db6bd3763d114c2f1fca0050 | 2,508 | py | Python | test_jsondb.py | andreixk/SimpleRecipeManagerAPI | 240d536fc6783005c8fb48f54a75dfb7654b7680 | [
"MIT"
] | null | null | null | test_jsondb.py | andreixk/SimpleRecipeManagerAPI | 240d536fc6783005c8fb48f54a75dfb7654b7680 | [
"MIT"
] | null | null | null | test_jsondb.py | andreixk/SimpleRecipeManagerAPI | 240d536fc6783005c8fb48f54a75dfb7654b7680 | [
"MIT"
] | null | null | null | import pytest
from fastapi.testclient import TestClient
from unittest import mock
from unittest.mock import mock_open
import json
from jsondb import PrimitiveStorage, ObjectStorage
from conftest import Stub
class TestStorage():
@pytest.mark.parametrize('Storage,fake_data', [
(PrimitiveStorage, ['a','b']),
(ObjectStorage, [{'a':'b'}])
])
def test_gets_list_of_items_from_file(self, Storage, fake_data):
collection = 'hello'
mocked_open = mock_open(read_data=json.dumps(fake_data))
with mock.patch('builtins.open', mocked_open) as open_stub:
store = Storage(collection)
rs = store.read()
assert rs == fake_data
assert Stub(open_stub).called_with(f'{collection}.json', 'r')
@pytest.mark.parametrize('Storage,fake_data', [
(PrimitiveStorage, ['a','b','c','d']),
(ObjectStorage, [{'a':'b'},{'c':'d'},{'e':'f'},{'g':'h'}])
])
def test_can_limit_number_of_items_returned(self, Storage, fake_data):
collection = 'hello'
mocked_open = mock_open(read_data=json.dumps(fake_data))
start=1
end=3
with mock.patch('builtins.open', mocked_open) as open_stub:
store = Storage(collection)
rs = store.read(start=start,end=end)
assert rs == fake_data[start:end]
assert Stub(open_stub).called_with(f'{collection}.json', 'r')
@pytest.mark.parametrize('Storage,input_data,expected_data', [
(PrimitiveStorage, ['a','b', 'c'], ['a', 'b', 'c']),
(ObjectStorage, {'a':'b'}, [{'a':'b'}])
])
@mock.patch('jsondb.json.dump')
def test_adds_items_to_file(self, jdump_stub, Storage, input_data, expected_data):
collection = 'byebye'
mocked_open = mock_open()
with mock.patch('builtins.open', mocked_open) as open_stub:
Storage(collection).create(input_data)
assert Stub(open_stub).called_with(f'{collection}.json', 'w')
assert Stub(jdump_stub).called_with(expected_data, mocked_open())
@mock.patch('jsondb.json.dump')
def test_primitive_adds_unique_sorted_items_to_file(self, jdump_stub):
collection = 'byebye'
input_data = ['b', 'b', 'c', 'a', 'a']
expected_data = ['a','b','c']
mocked_open = mock_open()
with mock.patch('builtins.open', mocked_open) as open_stub:
PrimitiveStorage(collection).create(input_data)
assert Stub(jdump_stub).called_with(expected_data, mocked_open())
| 41.8 | 86 | 0.639155 | import pytest
from fastapi.testclient import TestClient
from unittest import mock
from unittest.mock import mock_open
import json
from jsondb import PrimitiveStorage, ObjectStorage
from conftest import Stub
class TestStorage():
@pytest.mark.parametrize('Storage,fake_data', [
(PrimitiveStorage, ['a','b']),
(ObjectStorage, [{'a':'b'}])
])
def test_gets_list_of_items_from_file(self, Storage, fake_data):
collection = 'hello'
mocked_open = mock_open(read_data=json.dumps(fake_data))
with mock.patch('builtins.open', mocked_open) as open_stub:
store = Storage(collection)
rs = store.read()
assert rs == fake_data
assert Stub(open_stub).called_with(f'{collection}.json', 'r')
@pytest.mark.parametrize('Storage,fake_data', [
(PrimitiveStorage, ['a','b','c','d']),
(ObjectStorage, [{'a':'b'},{'c':'d'},{'e':'f'},{'g':'h'}])
])
def test_can_limit_number_of_items_returned(self, Storage, fake_data):
collection = 'hello'
mocked_open = mock_open(read_data=json.dumps(fake_data))
start=1
end=3
with mock.patch('builtins.open', mocked_open) as open_stub:
store = Storage(collection)
rs = store.read(start=start,end=end)
assert rs == fake_data[start:end]
assert Stub(open_stub).called_with(f'{collection}.json', 'r')
@pytest.mark.parametrize('Storage,input_data,expected_data', [
(PrimitiveStorage, ['a','b', 'c'], ['a', 'b', 'c']),
(ObjectStorage, {'a':'b'}, [{'a':'b'}])
])
@mock.patch('jsondb.json.dump')
def test_adds_items_to_file(self, jdump_stub, Storage, input_data, expected_data):
collection = 'byebye'
mocked_open = mock_open()
with mock.patch('builtins.open', mocked_open) as open_stub:
Storage(collection).create(input_data)
assert Stub(open_stub).called_with(f'{collection}.json', 'w')
assert Stub(jdump_stub).called_with(expected_data, mocked_open())
@mock.patch('jsondb.json.dump')
def test_primitive_adds_unique_sorted_items_to_file(self, jdump_stub):
collection = 'byebye'
input_data = ['b', 'b', 'c', 'a', 'a']
expected_data = ['a','b','c']
mocked_open = mock_open()
with mock.patch('builtins.open', mocked_open) as open_stub:
PrimitiveStorage(collection).create(input_data)
assert Stub(jdump_stub).called_with(expected_data, mocked_open())
| true | true |
1c2e8693a0bd5d8977f78731eb94fec0b7bbd19d | 22,241 | py | Python | test/functional/test_framework/util.py | minblock/watacoin | 39b64687af9237feed324ee01e330e153aa5bc36 | [
"MIT"
] | null | null | null | test/functional/test_framework/util.py | minblock/watacoin | 39b64687af9237feed324ee01e330e153aa5bc36 | [
"MIT"
] | null | null | null | test/functional/test_framework/util.py | minblock/watacoin | 39b64687af9237feed324ee01e330e153aa5bc36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s WTC too low! (Should be %s WTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s WTC too high! (Should be %s WTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "watacoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "watacoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "watacoin.conf")):
with open(os.path.join(datadir, "watacoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| 38.882867 | 140 | 0.652309 |
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
nError("Fee of %s WTC too low! (Should be %s WTC)" % (str(fee), str(target_fee)))
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s WTC too high! (Should be %s WTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
PORT_RANGE = 5000
class PortSeed:
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
os.makedirs(datadir)
with open(os.path.join(datadir, "watacoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "watacoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "watacoin.conf")):
with open(os.path.join(datadir, "watacoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29:
raise
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
equired >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
def gen_return_txouts():
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
txouts = txouts + "0000000000000000"
txouts = txouts + "fd0402"
txouts = txouts + script_pubkey
return txouts
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| true | true |
1c2e869b0603b84c2ae7af7ba218f65b9f92ec56 | 17,754 | py | Python | wagl/data.py | ASVincent/wagl | cf3a72e53e53f3a7b2f2b5308068069b1b714f2a | [
"Apache-2.0"
] | null | null | null | wagl/data.py | ASVincent/wagl | cf3a72e53e53f3a7b2f2b5308068069b1b714f2a | [
"Apache-2.0"
] | null | null | null | wagl/data.py | ASVincent/wagl | cf3a72e53e53f3a7b2f2b5308068069b1b714f2a | [
"Apache-2.0"
] | 1 | 2019-01-23T00:51:56.000Z | 2019-01-23T00:51:56.000Z | """
Data access functions
---------------------
"""
from __future__ import absolute_import
from os.path import join as pjoin, basename, dirname
import subprocess
import tempfile
import logging
import numpy as np
import h5py
import rasterio
from rasterio.crs import CRS
from rasterio.warp import reproject
from rasterio.enums import Resampling
from wagl.geobox import GriddedGeoBox
from wagl.tiling import generate_tiles
def get_pixel(filename, lonlat, band=1):
"""Return a pixel from `filename` at the longitude and latitude given
by the tuple `lonlat`. Optionally, the `band` can be specified."""
with rasterio.open(filename) as src:
x, y = [int(v) for v in ~src.transform * lonlat]
if isinstance(band, list):
data = src.read(band, window=((y, y + 1), (x, x + 1))).ravel()
else:
data = src.read(band, window=((y, y + 1), (x, x + 1))).flat[0]
return data
def select_acquisitions(acqs_list, fn=(lambda acq: True)):
"""
Given a list of acquisitions, apply the supplied fn to select the
desired acquisitions.
"""
acqs = [acq for acq in acqs_list if fn(acq)]
return acqs
def stack_data(acqs_list, fn=(lambda acq: True), window=None, masked=False):
"""
Given a list of acquisitions, return the data from each acquisition
collected in a 3D numpy array (first index is the acquisition number).
If window is defined, then the subset contained within the window is
returned along with a GriddedGeoBox instance detailing the
spatial information associated with that subset.
:param acqs_list:
The list of acquisitions from which to generate a stack of data.
:param window:
Defines a subset ((ystart, yend), (xstart, xend)) in array
co-ordinates. Default is None.
:param masked:
Indicates whether or not to return a masked array. Default is False.
:return:
A 2-tuple containing:
* 1. A 3D numpy array (or None) containing the corresponding
acquisition data. (None if no data).
* 2. A GriddedGeoBox instance specifying the spatial context
of the 3D numpy array. Note: All Acquisitions share the
same GriddedGeoBox.
"""
# determine data type and dimensions by reading the first band
acqs = acqs_list
a, geo_box = acqs[0].data_and_box(window=window, masked=masked)
# create the result array, setting datatype based on source type
stack_shape = (len(acqs), a.shape[0], a.shape[1])
stack = np.empty(stack_shape, a.dtype)
stack[0] = a
del a
# read remaining aquisitions into it
for i in range(1, stack_shape[0]):
# can't use this statement because it will cause data to be
# resampled. But we want an exception thrown if the user
# tries to stack irreqular aquisitions
stack[i] = acqs[i].data(window=window, masked=masked)
return stack, geo_box
def write_img(array, filename, driver='GTiff', geobox=None, nodata=None,
tags=None, options=None, cogtif=False, levels=None,
resampling=Resampling.nearest):
"""
Writes a 2D/3D image to disk using rasterio.
:param array:
A 2D/3D NumPy array.
:param filename:
A string containing the output file name.
:param driver:
A string containing a GDAL compliant image driver. Default is
'GTiff'.
:param geobox:
An instance of a GriddedGeoBox object.
:param nodata:
A value representing the no data value for the array.
:param tags:
A dictionary of dataset-level metadata.
:param options:
A dictionary containing other dataset creation options.
See creation options for the respective GDAL formats.
:param cogtif:
If set to True, override the `driver` keyword with `GTiff`
and create a Cloud Optimised GeoTiff. Default is False.
See:
https://trac.osgeo.org/gdal/wiki/CloudOptimizedGeoTIFF
:param levels:
If cogtif is set to True, build overviews/pyramids
according to levels. Default levels are [2, 4, 8, 16, 32].
:param resampling:
If cogtif is set to True, build overviews/pyramids using
a resampling method from `rasterio.enums.Resampling`.
Default is `Resampling.nearest`.
:notes:
If array is an instance of a `h5py.Dataset`, then the output
file will include blocksizes based on the `h5py.Dataset's`
chunks. To override the blocksizes, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
If `cogtif` is set to True, the default blocksizes will be
256x256. To override this behaviour, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
"""
# Get the datatype of the array
dtype = array.dtype.name
# Check for excluded datatypes
excluded_dtypes = ['int64', 'int8', 'uint64']
if dtype in excluded_dtypes:
msg = "Datatype not supported: {dt}".format(dt=dtype)
raise TypeError(msg)
# convert any bools to uin8
if dtype == 'bool':
array = np.uint8(array)
dtype = 'uint8'
ndims = array.ndim
dims = array.shape
# Get the (z, y, x) dimensions (assuming BSQ interleave)
if ndims == 2:
samples = dims[1]
lines = dims[0]
bands = 1
elif ndims == 3:
samples = dims[2]
lines = dims[1]
bands = dims[0]
else:
logging.error('Input array is not of 2 or 3 dimensions!!!')
err = 'Array dimensions: {dims}'.format(dims=ndims)
raise IndexError(err)
# If we have a geobox, then retrieve the geotransform and projection
if geobox is not None:
transform = geobox.transform
projection = geobox.crs.ExportToWkt()
else:
transform = None
projection = None
# override the driver if we are creating a cogtif
if cogtif:
driver = 'GTiff'
# compression predictor choices
predictor = {'int8': 2,
'uint8': 2,
'int16': 2,
'uint16': 2,
'int32': 2,
'uint32': 2,
'int64': 2,
'uint64': 2,
'float32': 3,
'float64': 3}
kwargs = {'count': bands,
'width': samples,
'height': lines,
'crs': projection,
'transform': transform,
'dtype': dtype,
'driver': driver,
'nodata': nodata,
'predictor': predictor[dtype]}
if isinstance(array, h5py.Dataset):
# TODO: if array is 3D get x & y chunks
if array.chunks[1] == array.shape[1]:
# GDAL doesn't like tiled or blocksize options to be set
# the same length as the columns (probably true for rows as well)
array = array[:]
else:
y_tile, x_tile = array.chunks
tiles = generate_tiles(samples, lines, x_tile, y_tile)
# add blocksizes to the creation keywords
kwargs['tiled'] = 'yes'
kwargs['blockxsize'] = x_tile
kwargs['blockysize'] = y_tile
# the user can override any derived blocksizes by supplying `options`
if options is not None:
for key in options:
kwargs[key] = options[key]
with tempfile.TemporaryDirectory() as tmpdir:
out_fname = pjoin(tmpdir, basename(filename)) if cogtif else filename
with rasterio.open(out_fname, 'w', **kwargs) as outds:
if bands == 1:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
outds.write(array[idx], 1, window=tile)
else:
outds.write(array, 1)
else:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
subs = array[:, idx[0], idx[1]]
for i in range(bands):
outds.write(subs[i], i + 1, window=tile)
else:
for i in range(bands):
outds.write(array[i], i + 1)
if tags is not None:
outds.update_tags(**tags)
# overviews/pyramids
if cogtif:
if levels is None:
levels = [2, 4, 8, 16, 32]
outds.build_overviews(levels, resampling)
if cogtif:
cmd = ['gdal_translate',
'-co',
'TILED=YES',
'-co',
'COPY_SRC_OVERVIEWS=YES',
'-co',
'{}={}'.format('PREDICTOR', predictor[dtype])]
for key, value in options.items():
cmd.extend(['-co', '{}={}'.format(key, value)])
cmd.extend([out_fname, filename])
subprocess.check_call(cmd, cwd=dirname(filename))
def read_subset(fname, ul_xy, ur_xy, lr_xy, ll_xy, bands=1):
"""
Return a 2D or 3D NumPy array subsetted to the given bounding
extents.
:param fname:
A string containing the full file pathname to an image on
disk.
:param ul_xy:
A tuple containing the Upper Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ur_xy:
A tuple containing the Upper Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param lr_xy:
A tuple containing the Lower Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ll_xy:
A tuple containing the Lower Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param bands:
Can be an integer of list of integers representing the band(s)
to be read from disk. If bands is a list, then the returned
subset will be 3D, otherwise the subset will be strictly 2D.
:return:
A tuple of 3 elements:
* 1. 2D or 3D NumPy array containing the image subset.
* 2. A list of length 6 containing the GDAL geotransform.
* 3. A WKT formatted string representing the co-ordinate
reference system (projection).
:additional notes:
The ending array co-ordinates are increased by +1,
i.e. xend = 270 + 1
to account for Python's [inclusive, exclusive) index notation.
"""
if isinstance(fname, h5py.Dataset):
geobox = GriddedGeoBox.from_dataset(fname)
prj = fname.attrs['crs_wkt']
else:
# Open the file
with rasterio.open(fname) as src:
# Get the inverse transform of the affine co-ordinate reference
geobox = GriddedGeoBox.from_dataset(src)
prj = src.crs.wkt # rasterio returns a unicode
inv = ~geobox.transform
rows, cols = geobox.shape
# Convert each map co-ordinate to image/array co-ordinates
img_ul_x, img_ul_y = [int(v) for v in inv * ul_xy]
img_ur_x, img_ur_y = [int(v) for v in inv * ur_xy]
img_lr_x, img_lr_y = [int(v) for v in inv * lr_xy]
img_ll_x, img_ll_y = [int(v) for v in inv * ll_xy]
# Calculate the min and max array extents
# The ending array extents have +1 to account for Python's
# [inclusive, exclusive) index notation.
xstart = min(img_ul_x, img_ll_x)
ystart = min(img_ul_y, img_ur_y)
xend = max(img_ur_x, img_lr_x) + 1
yend = max(img_ll_y, img_lr_y) + 1
# Check for out of bounds
if (((xstart < 0) or (ystart < 0)) or
((xend -1 > cols) or (yend -1 > rows))):
msg = ("Error! Attempt to read a subset that is outside of the"
"image domain. Index: ({ys}, {ye}), ({xs}, {xe}))")
msg = msg.format(ys=ystart, ye=yend, xs=xstart, xe=xend)
raise IndexError(msg)
if isinstance(fname, h5py.Dataset):
subs = fname[ystart:yend, xstart:xend]
else:
with rasterio.open(fname) as src:
subs = src.read(bands, window=((ystart, yend), (xstart, xend)))
# Get the new UL co-ordinates of the array
ul_x, ul_y = geobox.transform * (xstart, ystart)
geobox_subs = GriddedGeoBox(shape=subs.shape, origin=(ul_x, ul_y),
pixelsize=geobox.pixelsize, crs=prj)
return (subs, geobox_subs)
def reproject_file_to_array(src_filename, src_band=1, dst_geobox=None,
resampling=Resampling.nearest):
"""
Given an image on file, reproject to the desired coordinate
reference system.
:param src_filename:
A string containing the full file path name to the source
image on disk.
:param src_band:
An integer representing the band number to be reprojected.
Default is 1, the 1st band.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
with rasterio.open(src_filename) as src:
# Define a rasterio band
rio_band = rasterio.band(src, src_band)
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src.dtypes[0])
# Get the rasterio proj4 styled dict
prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
reproject(rio_band, dst_arr, dst_transform=dst_geobox.transform,
dst_crs=prj, resampling=resampling)
return dst_arr
def reproject_img_to_img(src_img, src_geobox, dst_geobox,
resampling=Resampling.nearest):
"""
Reprojects an image/array to the desired co-ordinate reference system.
:param src_img:
A NumPy array containing the source image.
:param src_geobox:
An instance of a GriddedGeoBox object containing the
source parameters such as origin, affine, projection.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
if not isinstance(src_geobox, GriddedGeoBox):
msg = 'src_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(src_geobox))
raise TypeError(msg)
# Get the source and destination projections in Proj4 styled dicts
src_prj = CRS.from_string(src_geobox.crs.ExportToProj4())
dst_prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
# Get the source and destination transforms
src_trans = src_geobox.transform
dst_trans = dst_geobox.transform
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src_img.dtype)
reproject(src_img, dst_arr, src_transform=src_trans,
src_crs=src_prj, dst_transform=dst_trans, dst_crs=dst_prj,
resampling=resampling)
return dst_arr
def as_array(array, dtype, transpose=False):
"""
Given an array and dtype, array will be converted to dtype if
and only if array.dtype != dtype. If transpose is set to True
then array will be transposed before returning.
:param array:
A NumPy array.
:param dtype:
The type to return the array as.
:type dtype:
A NumPy data type (e.g. ``numpy.float32``).
:param transpose:
If set then array will be transposed before returning.
Useful for passing arrays into Fortran routiines. Default is
False.
:type transpose:
Bool.
:return:
A :py:class:`numpy.ndarry` of type ``dtype`` with the same
dimensions as array.
"""
if array.dtype != dtype:
if transpose:
return array.astype(dtype).transpose()
return array.astype(dtype)
if transpose:
return array.transpose()
return array
| 34.340426 | 77 | 0.611299 |
from __future__ import absolute_import
from os.path import join as pjoin, basename, dirname
import subprocess
import tempfile
import logging
import numpy as np
import h5py
import rasterio
from rasterio.crs import CRS
from rasterio.warp import reproject
from rasterio.enums import Resampling
from wagl.geobox import GriddedGeoBox
from wagl.tiling import generate_tiles
def get_pixel(filename, lonlat, band=1):
with rasterio.open(filename) as src:
x, y = [int(v) for v in ~src.transform * lonlat]
if isinstance(band, list):
data = src.read(band, window=((y, y + 1), (x, x + 1))).ravel()
else:
data = src.read(band, window=((y, y + 1), (x, x + 1))).flat[0]
return data
def select_acquisitions(acqs_list, fn=(lambda acq: True)):
acqs = [acq for acq in acqs_list if fn(acq)]
return acqs
def stack_data(acqs_list, fn=(lambda acq: True), window=None, masked=False):
acqs = acqs_list
a, geo_box = acqs[0].data_and_box(window=window, masked=masked)
stack_shape = (len(acqs), a.shape[0], a.shape[1])
stack = np.empty(stack_shape, a.dtype)
stack[0] = a
del a
for i in range(1, stack_shape[0]):
# resampled. But we want an exception thrown if the user
# tries to stack irreqular aquisitions
stack[i] = acqs[i].data(window=window, masked=masked)
return stack, geo_box
def write_img(array, filename, driver='GTiff', geobox=None, nodata=None,
tags=None, options=None, cogtif=False, levels=None,
resampling=Resampling.nearest):
# Get the datatype of the array
dtype = array.dtype.name
# Check for excluded datatypes
excluded_dtypes = ['int64', 'int8', 'uint64']
if dtype in excluded_dtypes:
msg = "Datatype not supported: {dt}".format(dt=dtype)
raise TypeError(msg)
# convert any bools to uin8
if dtype == 'bool':
array = np.uint8(array)
dtype = 'uint8'
ndims = array.ndim
dims = array.shape
# Get the (z, y, x) dimensions (assuming BSQ interleave)
if ndims == 2:
samples = dims[1]
lines = dims[0]
bands = 1
elif ndims == 3:
samples = dims[2]
lines = dims[1]
bands = dims[0]
else:
logging.error('Input array is not of 2 or 3 dimensions!!!')
err = 'Array dimensions: {dims}'.format(dims=ndims)
raise IndexError(err)
# If we have a geobox, then retrieve the geotransform and projection
if geobox is not None:
transform = geobox.transform
projection = geobox.crs.ExportToWkt()
else:
transform = None
projection = None
# override the driver if we are creating a cogtif
if cogtif:
driver = 'GTiff'
# compression predictor choices
predictor = {'int8': 2,
'uint8': 2,
'int16': 2,
'uint16': 2,
'int32': 2,
'uint32': 2,
'int64': 2,
'uint64': 2,
'float32': 3,
'float64': 3}
kwargs = {'count': bands,
'width': samples,
'height': lines,
'crs': projection,
'transform': transform,
'dtype': dtype,
'driver': driver,
'nodata': nodata,
'predictor': predictor[dtype]}
if isinstance(array, h5py.Dataset):
# TODO: if array is 3D get x & y chunks
if array.chunks[1] == array.shape[1]:
# GDAL doesn't like tiled or blocksize options to be set
array = array[:]
else:
y_tile, x_tile = array.chunks
tiles = generate_tiles(samples, lines, x_tile, y_tile)
kwargs['tiled'] = 'yes'
kwargs['blockxsize'] = x_tile
kwargs['blockysize'] = y_tile
if options is not None:
for key in options:
kwargs[key] = options[key]
with tempfile.TemporaryDirectory() as tmpdir:
out_fname = pjoin(tmpdir, basename(filename)) if cogtif else filename
with rasterio.open(out_fname, 'w', **kwargs) as outds:
if bands == 1:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
outds.write(array[idx], 1, window=tile)
else:
outds.write(array, 1)
else:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
subs = array[:, idx[0], idx[1]]
for i in range(bands):
outds.write(subs[i], i + 1, window=tile)
else:
for i in range(bands):
outds.write(array[i], i + 1)
if tags is not None:
outds.update_tags(**tags)
if cogtif:
if levels is None:
levels = [2, 4, 8, 16, 32]
outds.build_overviews(levels, resampling)
if cogtif:
cmd = ['gdal_translate',
'-co',
'TILED=YES',
'-co',
'COPY_SRC_OVERVIEWS=YES',
'-co',
'{}={}'.format('PREDICTOR', predictor[dtype])]
for key, value in options.items():
cmd.extend(['-co', '{}={}'.format(key, value)])
cmd.extend([out_fname, filename])
subprocess.check_call(cmd, cwd=dirname(filename))
def read_subset(fname, ul_xy, ur_xy, lr_xy, ll_xy, bands=1):
if isinstance(fname, h5py.Dataset):
geobox = GriddedGeoBox.from_dataset(fname)
prj = fname.attrs['crs_wkt']
else:
with rasterio.open(fname) as src:
geobox = GriddedGeoBox.from_dataset(src)
prj = src.crs.wkt
inv = ~geobox.transform
rows, cols = geobox.shape
img_ul_x, img_ul_y = [int(v) for v in inv * ul_xy]
img_ur_x, img_ur_y = [int(v) for v in inv * ur_xy]
img_lr_x, img_lr_y = [int(v) for v in inv * lr_xy]
img_ll_x, img_ll_y = [int(v) for v in inv * ll_xy]
# [inclusive, exclusive) index notation.
xstart = min(img_ul_x, img_ll_x)
ystart = min(img_ul_y, img_ur_y)
xend = max(img_ur_x, img_lr_x) + 1
yend = max(img_ll_y, img_lr_y) + 1
# Check for out of bounds
if (((xstart < 0) or (ystart < 0)) or
((xend -1 > cols) or (yend -1 > rows))):
msg = ("Error! Attempt to read a subset that is outside of the"
"image domain. Index: ({ys}, {ye}), ({xs}, {xe}))")
msg = msg.format(ys=ystart, ye=yend, xs=xstart, xe=xend)
raise IndexError(msg)
if isinstance(fname, h5py.Dataset):
subs = fname[ystart:yend, xstart:xend]
else:
with rasterio.open(fname) as src:
subs = src.read(bands, window=((ystart, yend), (xstart, xend)))
# Get the new UL co-ordinates of the array
ul_x, ul_y = geobox.transform * (xstart, ystart)
geobox_subs = GriddedGeoBox(shape=subs.shape, origin=(ul_x, ul_y),
pixelsize=geobox.pixelsize, crs=prj)
return (subs, geobox_subs)
def reproject_file_to_array(src_filename, src_band=1, dst_geobox=None,
resampling=Resampling.nearest):
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
with rasterio.open(src_filename) as src:
# Define a rasterio band
rio_band = rasterio.band(src, src_band)
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src.dtypes[0])
# Get the rasterio proj4 styled dict
prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
reproject(rio_band, dst_arr, dst_transform=dst_geobox.transform,
dst_crs=prj, resampling=resampling)
return dst_arr
def reproject_img_to_img(src_img, src_geobox, dst_geobox,
resampling=Resampling.nearest):
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
if not isinstance(src_geobox, GriddedGeoBox):
msg = 'src_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(src_geobox))
raise TypeError(msg)
# Get the source and destination projections in Proj4 styled dicts
src_prj = CRS.from_string(src_geobox.crs.ExportToProj4())
dst_prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
# Get the source and destination transforms
src_trans = src_geobox.transform
dst_trans = dst_geobox.transform
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src_img.dtype)
reproject(src_img, dst_arr, src_transform=src_trans,
src_crs=src_prj, dst_transform=dst_trans, dst_crs=dst_prj,
resampling=resampling)
return dst_arr
def as_array(array, dtype, transpose=False):
if array.dtype != dtype:
if transpose:
return array.astype(dtype).transpose()
return array.astype(dtype)
if transpose:
return array.transpose()
return array
| true | true |
1c2e87352e339ee35eb0dfb1569501950c26402b | 388 | py | Python | nudni/wsgi.py | misli/nudni | a0c03cd3104a57d5104dd1f593a777a3571fca4d | [
"BSD-3-Clause"
] | 2 | 2017-11-18T11:12:10.000Z | 2017-11-18T11:12:12.000Z | nudni/wsgi.py | misli/nudni | a0c03cd3104a57d5104dd1f593a777a3571fca4d | [
"BSD-3-Clause"
] | null | null | null | nudni/wsgi.py | misli/nudni | a0c03cd3104a57d5104dd1f593a777a3571fca4d | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for nudni project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nudni.settings")
application = get_wsgi_application()
| 22.823529 | 78 | 0.783505 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nudni.settings")
application = get_wsgi_application()
| true | true |
1c2e875a17af7e0890ffebcddd9c7877a9fa8af2 | 1,168 | py | Python | setup.py | yucelkilic/ASTRiDE | cca4e3ee0d907c3675d8f2dcf9246acf69027dae | [
"MIT"
] | null | null | null | setup.py | yucelkilic/ASTRiDE | cca4e3ee0d907c3675d8f2dcf9246acf69027dae | [
"MIT"
] | null | null | null | setup.py | yucelkilic/ASTRiDE | cca4e3ee0d907c3675d8f2dcf9246acf69027dae | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='astride',
version='0.3.5',
description='Automated Streak Detection for High Velocity Objects',
long_description=readme(),
platforms=['any'],
packages=find_packages(),
include_package_data=True,
url='https://github.com/dwkim78/astride',
license='MIT',
author='Dae-Won Kim',
author_email='dwkim78@gmail.com',
install_requires=['numpy>=1.14', 'photutils>=0.4', 'astropy>=3.0',
'matplotlib>=2.1.1', 'scipy>=1.0.0',
'scikit-image>=0.13.1'],
keywords=['astronomy', 'image', 'streak', 'satellite', 'meteor', 'NEO',
'fast-moving objects', 'boundary-tracing', 'contour-tracing'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Astronomy'
]
)
| 34.352941 | 76 | 0.598459 | from setuptools import find_packages, setup
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='astride',
version='0.3.5',
description='Automated Streak Detection for High Velocity Objects',
long_description=readme(),
platforms=['any'],
packages=find_packages(),
include_package_data=True,
url='https://github.com/dwkim78/astride',
license='MIT',
author='Dae-Won Kim',
author_email='dwkim78@gmail.com',
install_requires=['numpy>=1.14', 'photutils>=0.4', 'astropy>=3.0',
'matplotlib>=2.1.1', 'scipy>=1.0.0',
'scikit-image>=0.13.1'],
keywords=['astronomy', 'image', 'streak', 'satellite', 'meteor', 'NEO',
'fast-moving objects', 'boundary-tracing', 'contour-tracing'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Astronomy'
]
)
| true | true |
1c2e878fc87fe5d8f0093738d896b1c599ff30cc | 3,195 | py | Python | cobrinha trabalhadeira/lab06/lab6MT.py | ansattz/solving_sto | 4f66a0a78fdbfe101f07e0f7cb8f7c8b595f16b9 | [
"MIT"
] | null | null | null | cobrinha trabalhadeira/lab06/lab6MT.py | ansattz/solving_sto | 4f66a0a78fdbfe101f07e0f7cb8f7c8b595f16b9 | [
"MIT"
] | null | null | null | cobrinha trabalhadeira/lab06/lab6MT.py | ansattz/solving_sto | 4f66a0a78fdbfe101f07e0f7cb8f7c8b595f16b9 | [
"MIT"
] | null | null | null | # Douglas Vieira
# @ansattz
#1 - Não delete nem modifique esta linha
def lettercount(s):
phrase = str.split(s)
letters = len(phrase)
return letters
#Casos de teste da questão 1 - Não delete nem modifique esta linha
print(lettercount("Vamos terminar este lab hoje mesmo"))
# 2 - Não delete nem modifique esta linha
def phrasecount(s):
fpoint = str.replace(s,'...','.')
phtype1 = str.count(fpoint,'.')
phtype2 = str.count(s,'!')
phtype3 = str.count(s,'?')
phrases = phtype1 + phtype2 + phtype3
return phrases
#Casos de teste da questão 2 - Não delete nem modifique esta linha
print(phrasecount("O produto! Sim... o produto de ponto euclidiano. Fornece uma maneira de definir o tamanho ou a norma de um vetor em Rn."))
#3 - Não delete nem modifique esta linha
def modifyph(s):
fpoint = str.replace(s,'...','.')
tpoint = str.replace(fpoint,'..','.')
esppoint = str.replace(tpoint,'.',' ')
esptravess = str.replace(esppoint,'-',' ')
espvirg = str.replace(esptravess,',',' ')
esppointvirg = str.replace(espvirg,';',' ')
espexcl = str.replace(esppointvirg,'!',' ')
espint = str.replace(espexcl,'?',' ')
return espint
#Casos de teste da questão 3 - Não delete nem modifique esta linha
print(modifyph("Toda base de um espaço vetorial - que pode ter o mesmo número de vetore - pode ser referenciada como a dimensão do espaço vetorial dado o seu número de vetores!"))
#4 - Não delete nem modifique esta linha
def revphr(s):
modfpoint = str.replace(s,'...','.')
fpoint = str.replace(modfpoint,'.','')
reexcl = str.replace(fpoint,'!','')
reint = str.replace(reexcl,'?','')
reevirg = str.replace(reint,',','')
repointvirg = str.replace(reevirg,';','')
redoisp = str.replace(repointvirg,':','')
retr = str.replace(redoisp,'-',' ')
reasp = str.replace(retr,'"','')
repar1 = str.replace(reasp,'(','')
repar2 = str.replace(repar1,')','')
withoutupp = str.lower(repar2)
splitng = withoutupp.split()
revletters = list(reversed(splitng))
return " ".join(revletters)
#Casos de teste da questão 4 - Não delete nem modifique esta linha
print(revphr("A SSI RU."))
#5 - Não delete nem modifique esta linha
def insere(lista_numero,n):
addint = lista_numero + [n,]
list.sort(addint)
return addint
#Casos de teste da questão 5 - Não delete nem modifique esta linha
print(insere([1,2,15,5,0,8,9], 4))
#6 - Não delete nem modifique esta linha
def greatlist(lista,n):
list.append(lista,n)
list.sort(lista)
ind = list.index(lista,n)
return lista[ind+1:]
#Casos de teste da questão 6 - Não delete nem modifique esta linha
print(greatlist([1,2,5,9,15,4],8))
#7 - Não delete nem modifique esta linha
def acima_da_media(lista):
nullist = []
orlist = lista[:]
media = sum(lista)/len(lista)
list.append(lista,media)
list.sort(lista)
ind = list.index(lista,media)
if media in orlist:
return lista[ind+2:]
elif len(lista) > 2:
return lista[ind+1:]
else:
return nullist
#Casos de teste da questão 7 - Não delete nem modifique esta linha
print(acima_da_media([1, 6, 9, 4, 0, 8, 5, 7])) | 28.783784 | 179 | 0.650704 |
def lettercount(s):
phrase = str.split(s)
letters = len(phrase)
return letters
print(lettercount("Vamos terminar este lab hoje mesmo"))
def phrasecount(s):
fpoint = str.replace(s,'...','.')
phtype1 = str.count(fpoint,'.')
phtype2 = str.count(s,'!')
phtype3 = str.count(s,'?')
phrases = phtype1 + phtype2 + phtype3
return phrases
print(phrasecount("O produto! Sim... o produto de ponto euclidiano. Fornece uma maneira de definir o tamanho ou a norma de um vetor em Rn."))
def modifyph(s):
fpoint = str.replace(s,'...','.')
tpoint = str.replace(fpoint,'..','.')
esppoint = str.replace(tpoint,'.',' ')
esptravess = str.replace(esppoint,'-',' ')
espvirg = str.replace(esptravess,',',' ')
esppointvirg = str.replace(espvirg,';',' ')
espexcl = str.replace(esppointvirg,'!',' ')
espint = str.replace(espexcl,'?',' ')
return espint
print(modifyph("Toda base de um espaço vetorial - que pode ter o mesmo número de vetore - pode ser referenciada como a dimensão do espaço vetorial dado o seu número de vetores!"))
def revphr(s):
modfpoint = str.replace(s,'...','.')
fpoint = str.replace(modfpoint,'.','')
reexcl = str.replace(fpoint,'!','')
reint = str.replace(reexcl,'?','')
reevirg = str.replace(reint,',','')
repointvirg = str.replace(reevirg,';','')
redoisp = str.replace(repointvirg,':','')
retr = str.replace(redoisp,'-',' ')
reasp = str.replace(retr,'"','')
repar1 = str.replace(reasp,'(','')
repar2 = str.replace(repar1,')','')
withoutupp = str.lower(repar2)
splitng = withoutupp.split()
revletters = list(reversed(splitng))
return " ".join(revletters)
#Casos de teste da questão 4 - Não delete nem modifique esta linha
print(revphr("A SSI RU."))
#5 - Não delete nem modifique esta linha
def insere(lista_numero,n):
addint = lista_numero + [n,]
list.sort(addint)
return addint
#Casos de teste da questão 5 - Não delete nem modifique esta linha
print(insere([1,2,15,5,0,8,9], 4))
#6 - Não delete nem modifique esta linha
def greatlist(lista,n):
list.append(lista,n)
list.sort(lista)
ind = list.index(lista,n)
return lista[ind+1:]
#Casos de teste da questão 6 - Não delete nem modifique esta linha
print(greatlist([1,2,5,9,15,4],8))
#7 - Não delete nem modifique esta linha
def acima_da_media(lista):
nullist = []
orlist = lista[:]
media = sum(lista)/len(lista)
list.append(lista,media)
list.sort(lista)
ind = list.index(lista,media)
if media in orlist:
return lista[ind+2:]
elif len(lista) > 2:
return lista[ind+1:]
else:
return nullist
#Casos de teste da questão 7 - Não delete nem modifique esta linha
print(acima_da_media([1, 6, 9, 4, 0, 8, 5, 7])) | true | true |
1c2e88c6ed8136dab2b63811698e0b327fcdc54f | 3,758 | py | Python | dl/scan/part_tflite.py | showkeyjar/beauty | 7c944cf896c899d9e23b2e50e293103bb03fe6cd | [
"MulanPSL-1.0"
] | 1 | 2022-01-29T12:32:38.000Z | 2022-01-29T12:32:38.000Z | dl/scan/part_tflite.py | showkeyjar/beauty | 7c944cf896c899d9e23b2e50e293103bb03fe6cd | [
"MulanPSL-1.0"
] | null | null | null | dl/scan/part_tflite.py | showkeyjar/beauty | 7c944cf896c899d9e23b2e50e293103bb03fe6cd | [
"MulanPSL-1.0"
] | null | null | null | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import numpy as np
import cv2
from os.path import dirname, join
import tensorflow as tf
"""
判断人脸部位
"""
# Load TFLite model and allocate tensors.
model_file = join(dirname(__file__), "face_part.tflite")
interpreter = tf.compat.v1.lite.Interpreter(model_path=model_file)
# Get input and output tensors.
input_details = interpreter.get_input_details()
print("input:", input_details)
output_details = interpreter.get_output_details()
print("output:", output_details)
def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='parsing_face.jpg'):
# Colors for all 19 parts
part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
[255, 0, 85], [255, 0, 170],
[0, 255, 0], [85, 255, 0], [170, 255, 0],
[0, 255, 85], [0, 255, 170],
[0, 0, 255], [85, 0, 255], [170, 0, 255],
[0, 85, 255], [0, 170, 255],
[255, 255, 0], [255, 255, 85], [255, 255, 170],
[255, 0, 255], [255, 85, 255], [255, 170, 255],
[0, 255, 255], [85, 255, 255], [170, 255, 255]]
# part_names = ['hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r', 'nose', 'mouth', 'skin', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'bg', 'hat']
part_names = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r',
'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair', 'hat', 'bg']
im = np.array(im)
vis_im = im.copy().astype(np.uint8)
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255
num_of_class = np.max(vis_parsing_anno)
for pi in range(0, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
print("cls ", pi," find shape:", index[0].shape, index[1].shape)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
# 确保文字在最上层
for pi in range(0, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
try:
cv2.putText(vis_parsing_anno_color, str(pi) + ":" + part_names[pi], (index[0][0] + 1,index[1][0] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_AA)
except Exception as e:
print(e)
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
# print(vis_parsing_anno_color.shape, vis_im.shape)
vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)
# Save result or not
if save_im:
# cv2.imwrite(save_path[:-4] +'.png', vis_parsing_anno)
cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# return vis_im
def evaluate(img_path='./data'):
global interpreter
image = cv2.imread(img_path)
image = cv2.resize(image, (512, 512), interpolation=cv2.INTER_NEAREST)
img = image / 255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = (img - mean) / std
img = img.astype(np.float32)
# change to channel first
img = np.moveaxis(img, 2, 0)
print(img.shape, img.dtype)
interpreter.allocate_tensors()
interpreter.set_tensor(interpreter.get_input_details()[0]['index'], [img])
interpreter.invoke()
preds = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
parsing = preds[0].argmax(0)
print(np.unique(parsing))
vis_parsing_maps(image, parsing, stride=1, save_im=True)
if __name__ == "__main__":
evaluate(img_path='1.png')
| 40.847826 | 184 | 0.61868 |
import numpy as np
import cv2
from os.path import dirname, join
import tensorflow as tf
model_file = join(dirname(__file__), "face_part.tflite")
interpreter = tf.compat.v1.lite.Interpreter(model_path=model_file)
input_details = interpreter.get_input_details()
print("input:", input_details)
output_details = interpreter.get_output_details()
print("output:", output_details)
def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='parsing_face.jpg'):
part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
[255, 0, 85], [255, 0, 170],
[0, 255, 0], [85, 255, 0], [170, 255, 0],
[0, 255, 85], [0, 255, 170],
[0, 0, 255], [85, 0, 255], [170, 0, 255],
[0, 85, 255], [0, 170, 255],
[255, 255, 0], [255, 255, 85], [255, 255, 170],
[255, 0, 255], [255, 85, 255], [255, 170, 255],
[0, 255, 255], [85, 255, 255], [170, 255, 255]]
part_names = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r',
'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair', 'hat', 'bg']
im = np.array(im)
vis_im = im.copy().astype(np.uint8)
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255
num_of_class = np.max(vis_parsing_anno)
for pi in range(0, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
print("cls ", pi," find shape:", index[0].shape, index[1].shape)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
for pi in range(0, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
try:
cv2.putText(vis_parsing_anno_color, str(pi) + ":" + part_names[pi], (index[0][0] + 1,index[1][0] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_AA)
except Exception as e:
print(e)
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)
if save_im:
cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
def evaluate(img_path='./data'):
global interpreter
image = cv2.imread(img_path)
image = cv2.resize(image, (512, 512), interpolation=cv2.INTER_NEAREST)
img = image / 255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = (img - mean) / std
img = img.astype(np.float32)
img = np.moveaxis(img, 2, 0)
print(img.shape, img.dtype)
interpreter.allocate_tensors()
interpreter.set_tensor(interpreter.get_input_details()[0]['index'], [img])
interpreter.invoke()
preds = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
parsing = preds[0].argmax(0)
print(np.unique(parsing))
vis_parsing_maps(image, parsing, stride=1, save_im=True)
if __name__ == "__main__":
evaluate(img_path='1.png')
| true | true |
1c2e88e126f2fb3f6cce316611db58129c930a48 | 391 | py | Python | django_projects/chat_app_channels/chatapp/chatapp/asgi.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 1 | 2021-04-21T09:38:38.000Z | 2021-04-21T09:38:38.000Z | django_projects/chat_app_channels/chatapp/chatapp/asgi.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 34 | 2019-12-16T16:53:24.000Z | 2022-01-13T02:29:30.000Z | django_projects/chat_app_channels/chatapp/chatapp/asgi.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | null | null | null | """
ASGI config for chatapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chatapp.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chatapp.settings')
application = get_asgi_application()
| true | true |
1c2e88eb86dd7141662fd801b624b54caf1cf2ef | 1,370 | py | Python | backend/database/migration.py | GCETTB-HYLAND-Hackathon2012-22/Miracurol-Rebuild | a6748e93b45872efdbaca013388dd6277824a9a5 | [
"Apache-2.0"
] | null | null | null | backend/database/migration.py | GCETTB-HYLAND-Hackathon2012-22/Miracurol-Rebuild | a6748e93b45872efdbaca013388dd6277824a9a5 | [
"Apache-2.0"
] | null | null | null | backend/database/migration.py | GCETTB-HYLAND-Hackathon2012-22/Miracurol-Rebuild | a6748e93b45872efdbaca013388dd6277824a9a5 | [
"Apache-2.0"
] | null | null | null | import pathlib
from alembic import command
from alembic.config import Config as AlembicConfig
from sqlalchemy.engine import make_url
from backend.commons.enums import Config
from backend.utils.config_utils import read_config
def run_migrations(downgrade_first: bool = False) -> None:
script_location: str = f'{read_config(Config.APP.HOME)}/db_migrations'
db_url: str = read_config(Config.DATABASE.URL)
alembic_cfg = AlembicConfig()
alembic_cfg.set_main_option('script_location', script_location)
alembic_cfg.set_main_option('sqlalchemy.url', db_url)
alembic_cfg.print_stdout('INFO: Running DB Migration on %r', make_url(db_url))
try:
# Check and create path/to/script_location/versions
# Git ignores all empty directories, but it is a necessary for this path to exist
pathlib.Path(f'{script_location}/versions').mkdir(parents=True, exist_ok=True)
# Run alembic downgrade command
if downgrade_first:
command.downgrade(alembic_cfg, 'base')
# Run alembic upgrade command
command.upgrade(alembic_cfg, 'head')
# Display current status
command.history(alembic_cfg, indicate_current=True)
alembic_cfg.print_stdout('INFO: DB Migrations Successful')
except Exception:
alembic_cfg.print_stdout('INFO: DB Migrations Failed')
raise
| 35.128205 | 89 | 0.729197 | import pathlib
from alembic import command
from alembic.config import Config as AlembicConfig
from sqlalchemy.engine import make_url
from backend.commons.enums import Config
from backend.utils.config_utils import read_config
def run_migrations(downgrade_first: bool = False) -> None:
script_location: str = f'{read_config(Config.APP.HOME)}/db_migrations'
db_url: str = read_config(Config.DATABASE.URL)
alembic_cfg = AlembicConfig()
alembic_cfg.set_main_option('script_location', script_location)
alembic_cfg.set_main_option('sqlalchemy.url', db_url)
alembic_cfg.print_stdout('INFO: Running DB Migration on %r', make_url(db_url))
try:
pathlib.Path(f'{script_location}/versions').mkdir(parents=True, exist_ok=True)
if downgrade_first:
command.downgrade(alembic_cfg, 'base')
command.upgrade(alembic_cfg, 'head')
command.history(alembic_cfg, indicate_current=True)
alembic_cfg.print_stdout('INFO: DB Migrations Successful')
except Exception:
alembic_cfg.print_stdout('INFO: DB Migrations Failed')
raise
| true | true |
1c2e897ad5c9e8578ce18e6f53c03911e852907f | 700 | py | Python | brain/src/sm_main.py | FabianFalck/de_niro | 1f9d05b7232cb9e76eff975e5ef1c8bf3fb5cde6 | [
"MIT"
] | 7 | 2019-06-12T03:36:15.000Z | 2021-06-27T18:39:40.000Z | brain/src/sm_main.py | Hankfirst/de_niro | 1f9d05b7232cb9e76eff975e5ef1c8bf3fb5cde6 | [
"MIT"
] | null | null | null | brain/src/sm_main.py | Hankfirst/de_niro | 1f9d05b7232cb9e76eff975e5ef1c8bf3fb5cde6 | [
"MIT"
] | 3 | 2019-06-12T03:35:36.000Z | 2021-06-27T04:37:31.000Z | #!/usr/bin/env python
"""
Main thread for running the state machine.
Author: Sagar Doshi
Date: 05/2018
"""
import rospy
import smach_ros
import unittest
import threading
import time
# Custom modules import
import sm_mach
if __name__ == "__main__":
rospy.init_node('fezzik_state_machine')
# Create the state machine
sm = sm_mach.init_sm()
# Create and start introspection server - automatically traverses sm's child
# containers, so only need to add this to the top-level state machine
sis = smach_ros.IntrospectionServer("Fezzik_Introspection_Server", sm, "/SM_TOP")
sis.start()
# Execute SMACH plan
outcome = sm.execute()
rospy.spin()
sis.stop()
| 20 | 85 | 0.715714 |
import rospy
import smach_ros
import unittest
import threading
import time
import sm_mach
if __name__ == "__main__":
rospy.init_node('fezzik_state_machine')
sm = sm_mach.init_sm()
# containers, so only need to add this to the top-level state machine
sis = smach_ros.IntrospectionServer("Fezzik_Introspection_Server", sm, "/SM_TOP")
sis.start()
# Execute SMACH plan
outcome = sm.execute()
rospy.spin()
sis.stop()
| true | true |
1c2e8983ec719dce811b9524bf40526bfd2eedb5 | 193 | py | Python | py_tdlib/constructors/set_scope_notification_settings.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/set_scope_notification_settings.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/set_scope_notification_settings.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Method
class setScopeNotificationSettings(Method):
scope = None # type: "NotificationSettingsScope"
notification_settings = None # type: "scopeNotificationSettings"
| 27.571429 | 66 | 0.797927 | from ..factory import Method
class setScopeNotificationSettings(Method):
scope = None
notification_settings = None
| true | true |
1c2e8a2ecb1994da7171f9cf7c06b1e07ef5eb21 | 717 | py | Python | testutils/decorators.py | getmetamapper/metamapper | 0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f | [
"BSD-2-Clause"
] | 53 | 2020-07-01T23:11:59.000Z | 2022-03-31T19:10:28.000Z | testutils/decorators.py | getmetamapper/metamapper | 0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f | [
"BSD-2-Clause"
] | 5 | 2020-11-25T19:48:57.000Z | 2022-02-27T23:50:18.000Z | testutils/decorators.py | getmetamapper/metamapper | 0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f | [
"BSD-2-Clause"
] | 5 | 2020-08-29T16:43:59.000Z | 2022-01-17T19:05:30.000Z | # -*- coding: utf-8 -*-
import functools
import testutils.helpers as helpers
def as_someone(user_types):
"""Decorator to mimic different user roles.
"""
def the_decorator(func):
@functools.wraps(func)
def func_wrapper(view, *args, **kwargs):
cached_user = view.user
for user_type in user_types:
view.user_type = user_type
view.user = view.users[user_type]
view._client = helpers.api_client(view.user, uuid=view.workspace.id)
func(view, *args, **kwargs)
view.user = cached_user
view._client = helpers.api_client(view.user)
return func_wrapper
return the_decorator
| 32.590909 | 84 | 0.608089 |
import functools
import testutils.helpers as helpers
def as_someone(user_types):
def the_decorator(func):
@functools.wraps(func)
def func_wrapper(view, *args, **kwargs):
cached_user = view.user
for user_type in user_types:
view.user_type = user_type
view.user = view.users[user_type]
view._client = helpers.api_client(view.user, uuid=view.workspace.id)
func(view, *args, **kwargs)
view.user = cached_user
view._client = helpers.api_client(view.user)
return func_wrapper
return the_decorator
| true | true |
1c2e8be0c07c4c79c57782db9882b7b968cd8d52 | 7,198 | py | Python | models.py | mrdbarros/csgo_analyze | a4d7487bd56a14a99dc762920965060a5a43fa15 | [
"Apache-2.0"
] | null | null | null | models.py | mrdbarros/csgo_analyze | a4d7487bd56a14a99dc762920965060a5a43fa15 | [
"Apache-2.0"
] | 4 | 2020-06-17T20:24:53.000Z | 2021-09-28T03:55:45.000Z | models.py | mrdbarros/csgo_analyze | a4d7487bd56a14a99dc762920965060a5a43fa15 | [
"Apache-2.0"
] | null | null | null | import data_loading
import torch
def emb_sz_rule(n_cat):
"Rule of thumb to pick embedding size corresponding to `n_cat`"
return min(600, round(1.6 * n_cat ** 0.56))
class LinBnDrop(torch.nn.Sequential):
"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers"
def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):
layers = [torch.nn.BatchNorm1d(n_in)] if bn else []
if p != 0: layers.append(torch.nn.Dropout(p))
lin = [torch.nn.Linear(n_in, n_out, bias=not bn)]
if act is not None: lin.append(act)
layers = lin + layers if lin_first else layers + lin
super().__init__(*layers)
class TabularModelCustom(torch.nn.Module):
"Basic model for tabular data."
def __init__(self, category_list, class_groups_sizes, n_cont, layers, ps=None, embed_p=0.,
use_bn=True, bn_final=True, bn_cont=True):
super().__init__()
ps = ps
class_group_map = {}
for i, cat in enumerate(category_list):
class_group = cat[cat.rfind("_") + 1:]
class_group_index, _ = class_groups_sizes[class_group]
if class_group_index in class_group_map:
class_group_map[class_group_index].append(i)
else:
class_group_map[class_group_index] = [i]
self.class_group_map = class_group_map
self.embeds = torch.nn.ModuleList(
[torch.nn.Embedding(index_ni[1], emb_sz_rule(index_ni[1])) for _, index_ni in class_groups_sizes.items() if
index_ni[1] > 2])
self.emb_drop = torch.nn.Dropout(embed_p)
self.bn_cont = torch.nn.BatchNorm1d(n_cont) if bn_cont else None
binary_size = sum(len(class_group_map[i]) for i in range(len(self.embeds), len(class_group_map)))
n_emb = sum(e.embedding_dim * len(class_group_map[i]) for i, e in enumerate(self.embeds)) + binary_size
self.n_emb, self.n_cont = n_emb, n_cont
sizes = [n_emb + n_cont] + layers
actns = [torch.nn.ReLU(inplace=True) for _ in range(len(sizes) - 1)]
_layers = [LinBnDrop(sizes[i], sizes[i + 1], bn=use_bn and (i != len(actns) - 1 or bn_final), p=p, act=a)
for i, (p, a) in enumerate(zip(ps, actns))]
self.layers = torch.nn.Sequential(*_layers)
def forward(self, x_cat, x_cont=None):
if self.n_emb != 0:
x_cat_binary = []
for i in range(len(self.embeds), len(self.class_group_map)):
x_cat_binary += self.class_group_map[i]
with torch.no_grad():
x_cat_binary = x_cat[:, x_cat_binary].float()
x_cat_nonbinary = [torch.flatten(e(x_cat[:, self.class_group_map[i]]), start_dim=1) for i, e in
enumerate(self.embeds)]
x = torch.cat(x_cat_nonbinary + [x_cat_binary], 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.bn_cont is not None: x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
return self.layers(x)
class CustomMixedModel(torch.nn.Module):
def __init__(self, image_model, tab_model, seq_model, image_output_size, embeds_size,prepare_and_pad,max_image_batch):
super(CustomMixedModel, self).__init__()
self.image_model = image_model
# embedding types are primaries, secondaries, flashbangs and binaries
# self.classifier = TabularModel_NoCat(emb_sizes,1536, 30,[400],ps=[0.1],use_bn=False)
self.tab_model = tab_model
# n_emb = sum(e.embedding_dim for e in self.embeds)
self.seq_model = seq_model
self.classifier = torch.nn.Sequential(LinBnDrop(200 + image_output_size
+ embeds_size
, 1, act=None, p=0.))
self.prepare_and_pad = prepare_and_pad
self.max_image_batch = max_image_batch
def forward(self, input_cat, input_cont, input_image, attention_mask, train_embeds=True, train_seq_model=True):
valid_sizes = torch.sum((attention_mask == 1), dim=1)
if train_embeds:
input_embed = self.forward_embeds(input_cat, input_cont, input_image,valid_sizes)
else:
with torch.no_grad():
input_embed = self.forward_embeds(input_cat, input_cont, input_image,valid_sizes)
input_embed=self.prepare_and_pad(input_embed,valid_sizes)
if train_seq_model:
bert_out = self.forward_seq_model(input_embed, attention_mask)
else:
with torch.no_grad():
bert_out = self.forward_seq_model(input_embed, attention_mask)
output = self.classifier(
torch.cat((input_embed[range(input_embed.shape[0]), valid_sizes-1], bert_out),
dim=1))
# output = self.classifier(input_embed[range(input_embed.shape[0]), (input_embed.shape[1] - mask_size - 1)])
# output = self.classifier(bert_out)
return output
def forward_embeds(self, input_cat, input_cont, input_image,valid_size):
n_batches = (input_image.shape[0]//self.max_image_batch)+1*(input_image.shape[0]%self.max_image_batch>0)
tab_out = self.tab_model(input_cat,input_cont)
image_out = torch.cat([self.image_model(input_image[i*self.max_image_batch:min((i+1)*self.max_image_batch,input_image.shape[0])])
for i in range(n_batches)],dim=0)
# comprehension to break inputs in 'n_batches' of 'self.max_image_batch' size
input_embed = torch.cat((tab_out,image_out),dim=1)
input_embed = torch.nn.ReLU()(input_embed)
return input_embed
def forward_seq_model(self, input_embed, attention_mask):
# bert_out = self.seq_model(input_embed.permute((1, 0, 2)),
# src_key_padding_mask=attention_mask).permute((1, 0, 2))[:, 0]
bert_out = torch.mean(self.seq_model(inputs_embeds=input_embed,
attention_mask=attention_mask)[0],dim=1)
bert_out = torch.nn.ReLU()(bert_out)
return bert_out
class CustomMixedModelSingleImage(torch.nn.Module):
def __init__(self, image_model, tab_model, image_output_size,class_p):
super(CustomMixedModelSingleImage, self).__init__()
self.image_model = image_model
# embedding types are primaries, secondaries, flashbangs and binaries
# self.classifier = TabularModel_NoCat(emb_sizes,1536, 30,[400],ps=[0.1],use_bn=False)
self.tab_model = tab_model
# n_emb = sum(e.embedding_dim for e in self.embeds)
self.classifier = torch.nn.Sequential(LinBnDrop(50 + image_output_size, 40, act=torch.nn.ReLU(), p=class_p),
LinBnDrop(40, 1, act=None, p=class_p))
def forward(self, input_cat, input_cont, input_image):
output_tabular = self.tab_model(input_cat, input_cont)
output_image = self.image_model(input_image)
logits = self.classifier(torch.nn.ReLU()(torch.cat((output_tabular, output_image), dim=1)))
return logits
| 45.556962 | 137 | 0.634065 | import data_loading
import torch
def emb_sz_rule(n_cat):
return min(600, round(1.6 * n_cat ** 0.56))
class LinBnDrop(torch.nn.Sequential):
def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):
layers = [torch.nn.BatchNorm1d(n_in)] if bn else []
if p != 0: layers.append(torch.nn.Dropout(p))
lin = [torch.nn.Linear(n_in, n_out, bias=not bn)]
if act is not None: lin.append(act)
layers = lin + layers if lin_first else layers + lin
super().__init__(*layers)
class TabularModelCustom(torch.nn.Module):
def __init__(self, category_list, class_groups_sizes, n_cont, layers, ps=None, embed_p=0.,
use_bn=True, bn_final=True, bn_cont=True):
super().__init__()
ps = ps
class_group_map = {}
for i, cat in enumerate(category_list):
class_group = cat[cat.rfind("_") + 1:]
class_group_index, _ = class_groups_sizes[class_group]
if class_group_index in class_group_map:
class_group_map[class_group_index].append(i)
else:
class_group_map[class_group_index] = [i]
self.class_group_map = class_group_map
self.embeds = torch.nn.ModuleList(
[torch.nn.Embedding(index_ni[1], emb_sz_rule(index_ni[1])) for _, index_ni in class_groups_sizes.items() if
index_ni[1] > 2])
self.emb_drop = torch.nn.Dropout(embed_p)
self.bn_cont = torch.nn.BatchNorm1d(n_cont) if bn_cont else None
binary_size = sum(len(class_group_map[i]) for i in range(len(self.embeds), len(class_group_map)))
n_emb = sum(e.embedding_dim * len(class_group_map[i]) for i, e in enumerate(self.embeds)) + binary_size
self.n_emb, self.n_cont = n_emb, n_cont
sizes = [n_emb + n_cont] + layers
actns = [torch.nn.ReLU(inplace=True) for _ in range(len(sizes) - 1)]
_layers = [LinBnDrop(sizes[i], sizes[i + 1], bn=use_bn and (i != len(actns) - 1 or bn_final), p=p, act=a)
for i, (p, a) in enumerate(zip(ps, actns))]
self.layers = torch.nn.Sequential(*_layers)
def forward(self, x_cat, x_cont=None):
if self.n_emb != 0:
x_cat_binary = []
for i in range(len(self.embeds), len(self.class_group_map)):
x_cat_binary += self.class_group_map[i]
with torch.no_grad():
x_cat_binary = x_cat[:, x_cat_binary].float()
x_cat_nonbinary = [torch.flatten(e(x_cat[:, self.class_group_map[i]]), start_dim=1) for i, e in
enumerate(self.embeds)]
x = torch.cat(x_cat_nonbinary + [x_cat_binary], 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.bn_cont is not None: x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
return self.layers(x)
class CustomMixedModel(torch.nn.Module):
def __init__(self, image_model, tab_model, seq_model, image_output_size, embeds_size,prepare_and_pad,max_image_batch):
super(CustomMixedModel, self).__init__()
self.image_model = image_model
self.tab_model = tab_model
self.seq_model = seq_model
self.classifier = torch.nn.Sequential(LinBnDrop(200 + image_output_size
+ embeds_size
, 1, act=None, p=0.))
self.prepare_and_pad = prepare_and_pad
self.max_image_batch = max_image_batch
def forward(self, input_cat, input_cont, input_image, attention_mask, train_embeds=True, train_seq_model=True):
valid_sizes = torch.sum((attention_mask == 1), dim=1)
if train_embeds:
input_embed = self.forward_embeds(input_cat, input_cont, input_image,valid_sizes)
else:
with torch.no_grad():
input_embed = self.forward_embeds(input_cat, input_cont, input_image,valid_sizes)
input_embed=self.prepare_and_pad(input_embed,valid_sizes)
if train_seq_model:
bert_out = self.forward_seq_model(input_embed, attention_mask)
else:
with torch.no_grad():
bert_out = self.forward_seq_model(input_embed, attention_mask)
output = self.classifier(
torch.cat((input_embed[range(input_embed.shape[0]), valid_sizes-1], bert_out),
dim=1))
return output
def forward_embeds(self, input_cat, input_cont, input_image,valid_size):
n_batches = (input_image.shape[0]//self.max_image_batch)+1*(input_image.shape[0]%self.max_image_batch>0)
tab_out = self.tab_model(input_cat,input_cont)
image_out = torch.cat([self.image_model(input_image[i*self.max_image_batch:min((i+1)*self.max_image_batch,input_image.shape[0])])
for i in range(n_batches)],dim=0)
input_embed = torch.cat((tab_out,image_out),dim=1)
input_embed = torch.nn.ReLU()(input_embed)
return input_embed
def forward_seq_model(self, input_embed, attention_mask):
bert_out = torch.mean(self.seq_model(inputs_embeds=input_embed,
attention_mask=attention_mask)[0],dim=1)
bert_out = torch.nn.ReLU()(bert_out)
return bert_out
class CustomMixedModelSingleImage(torch.nn.Module):
def __init__(self, image_model, tab_model, image_output_size,class_p):
super(CustomMixedModelSingleImage, self).__init__()
self.image_model = image_model
self.tab_model = tab_model
self.classifier = torch.nn.Sequential(LinBnDrop(50 + image_output_size, 40, act=torch.nn.ReLU(), p=class_p),
LinBnDrop(40, 1, act=None, p=class_p))
def forward(self, input_cat, input_cont, input_image):
output_tabular = self.tab_model(input_cat, input_cont)
output_image = self.image_model(input_image)
logits = self.classifier(torch.nn.ReLU()(torch.cat((output_tabular, output_image), dim=1)))
return logits
| true | true |
1c2e8bf63583af76879137c28d1e1bfd7558d730 | 5,464 | py | Python | quantumGAN/deprecated_files/discriminator_minimax.py | tomiock/qGAN | fb98a2b5286eb479665ade353efa40bd6e55dc36 | [
"Apache-2.0"
] | 1 | 2021-06-14T09:33:32.000Z | 2021-06-14T09:33:32.000Z | quantumGAN/deprecated_files/discriminator_minimax.py | tomiock/gGAN | fb98a2b5286eb479665ade353efa40bd6e55dc36 | [
"Apache-2.0"
] | 7 | 2021-06-14T12:21:40.000Z | 2021-10-01T07:29:01.000Z | quantumGAN/deprecated_files/discriminator_minimax.py | tomiock/qGAN | fb98a2b5286eb479665ade353efa40bd6e55dc36 | [
"Apache-2.0"
] | null | null | null | """DISCRIMINATOR BCE"""
import json
import random
from typing import Dict, List
import numpy as np
from quantumGAN.functions import minimax_derivative_fake, minimax_derivative_real, sigmoid, \
sigmoid_prime
def load(filename):
f = open(filename, "r")
data = json.load(f)
f.close()
# cost = getattr(sys.modules[__name__], data["cost"])
net = ClassicalDiscriminatorMINIMAX(None, None, data["sizes"], data["loss"])
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
class ClassicalDiscriminatorMINIMAX:
def __init__(self,
training_data: List or None,
mini_batch_size: int or None,
sizes: List[int],
loss_BCE: bool) -> None:
self.training_data = training_data
self.mini_batch_size: int = mini_batch_size
self.num_layers = len(sizes)
self.sizes = sizes
self.loss_BCE = loss_BCE
self.data_loss = {"real": [],
"fake": []}
self.ret: Dict[str, any] = {"loss": [],
"label real": [],
"label fake": [],
"label fake time": [],
"label real time": []}
self.biases = [np.random.randn(y, ) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b)
return a
def predict(self, x):
# feedforward
activation = x
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
return activation
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def save(self, filename):
"""Save the neural network to the file ``filename``."""
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"loss": self.loss_BCE # ,
# "cost": str(self..__name__)
}
f = open(filename, "w")
json.dump(data, f)
f.close()
def forwardprop(self, x: np.ndarray):
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
return activation, activations, zs
def backprop(self, real_image, fake_image, is_real):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward and back error calculation depending on type of image
activation_real, activations_real, zs_real = self.forwardprop(real_image)
activation_fake, activations_fake, zs_fake = self.forwardprop(fake_image)
if is_real:
delta = minimax_derivative_real(activations_real[-1]) * sigmoid_prime(zs_real[-1])
activations, zs = activations_real, zs_real
else:
delta = minimax_derivative_fake(activations_fake[-1]) * sigmoid_prime(zs_fake[-1])
activations, zs = activations_fake, zs_fake
# backward pass
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].reshape(1, activations[-2].shape[0]))
for l in range(2, self.num_layers):
z = zs[-l]
delta = np.dot(self.weights[-l + 1].transpose(), delta) * sigmoid_prime(z)
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta.reshape(delta.shape[0], 1),
activations[-l - 1].reshape(1, activations[-l - 1].shape[0]))
return nabla_b, nabla_w, activations[-1]
def train_mini_batch(self, mini_batch, learning_rate):
global label_real, label_fake
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for real_image, fake_image in mini_batch:
delta_nabla_b, delta_nabla_w, label_real = self.backprop(real_image, fake_image, True)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
delta_nabla_b, delta_nabla_w, label_fake = self.backprop(real_image, fake_image, False)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
# gradient descent
# nabla_w and nabla_b are multiplied by the learning rate
# and taken the mean of (dividing by the mini batch size)
self.weights = [w - (learning_rate / len(mini_batch)) * nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b - (learning_rate / len(mini_batch)) * nb
for b, nb in zip(self.biases, nabla_b)]
def create_mini_batches(self):
n = len(self.training_data)
random.shuffle(self.training_data)
mini_batches = [
self.training_data[k:k + self.mini_batch_size]
for k in range(0, n, self.mini_batch_size)]
return [mini_batches[0]]
| 35.947368 | 93 | 0.646962 | import json
import random
from typing import Dict, List
import numpy as np
from quantumGAN.functions import minimax_derivative_fake, minimax_derivative_real, sigmoid, \
sigmoid_prime
def load(filename):
f = open(filename, "r")
data = json.load(f)
f.close()
net = ClassicalDiscriminatorMINIMAX(None, None, data["sizes"], data["loss"])
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
class ClassicalDiscriminatorMINIMAX:
def __init__(self,
training_data: List or None,
mini_batch_size: int or None,
sizes: List[int],
loss_BCE: bool) -> None:
self.training_data = training_data
self.mini_batch_size: int = mini_batch_size
self.num_layers = len(sizes)
self.sizes = sizes
self.loss_BCE = loss_BCE
self.data_loss = {"real": [],
"fake": []}
self.ret: Dict[str, any] = {"loss": [],
"label real": [],
"label fake": [],
"label fake time": [],
"label real time": []}
self.biases = [np.random.randn(y, ) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b)
return a
def predict(self, x):
activation = x
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
return activation
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def save(self, filename):
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"loss": self.loss_BCE
}
f = open(filename, "w")
json.dump(data, f)
f.close()
def forwardprop(self, x: np.ndarray):
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
return activation, activations, zs
def backprop(self, real_image, fake_image, is_real):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
activation_real, activations_real, zs_real = self.forwardprop(real_image)
activation_fake, activations_fake, zs_fake = self.forwardprop(fake_image)
if is_real:
delta = minimax_derivative_real(activations_real[-1]) * sigmoid_prime(zs_real[-1])
activations, zs = activations_real, zs_real
else:
delta = minimax_derivative_fake(activations_fake[-1]) * sigmoid_prime(zs_fake[-1])
activations, zs = activations_fake, zs_fake
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].reshape(1, activations[-2].shape[0]))
for l in range(2, self.num_layers):
z = zs[-l]
delta = np.dot(self.weights[-l + 1].transpose(), delta) * sigmoid_prime(z)
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta.reshape(delta.shape[0], 1),
activations[-l - 1].reshape(1, activations[-l - 1].shape[0]))
return nabla_b, nabla_w, activations[-1]
def train_mini_batch(self, mini_batch, learning_rate):
global label_real, label_fake
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for real_image, fake_image in mini_batch:
delta_nabla_b, delta_nabla_w, label_real = self.backprop(real_image, fake_image, True)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
delta_nabla_b, delta_nabla_w, label_fake = self.backprop(real_image, fake_image, False)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w - (learning_rate / len(mini_batch)) * nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b - (learning_rate / len(mini_batch)) * nb
for b, nb in zip(self.biases, nabla_b)]
def create_mini_batches(self):
n = len(self.training_data)
random.shuffle(self.training_data)
mini_batches = [
self.training_data[k:k + self.mini_batch_size]
for k in range(0, n, self.mini_batch_size)]
return [mini_batches[0]]
| true | true |
1c2e8da7f1a8c7cb5c1ba3839e249911f1c92d46 | 271 | py | Python | 18_Math3/Step01/wowo0709.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 18_Math3/Step01/wowo0709.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 18_Math3/Step01/wowo0709.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | while(1):
a,b = map(int,input().split())
if a==0 and b==0:
exit(0)
elif (a!=0 and b==0) or (a==0 and b!=0):
print('neither')
elif a%b==0:
print('multiple')
elif b%a==0:
print('factor')
else:
print('neither') | 22.583333 | 44 | 0.457565 | while(1):
a,b = map(int,input().split())
if a==0 and b==0:
exit(0)
elif (a!=0 and b==0) or (a==0 and b!=0):
print('neither')
elif a%b==0:
print('multiple')
elif b%a==0:
print('factor')
else:
print('neither') | true | true |
1c2e8db06813e97495b935f3a62d070441bf6d9a | 1,756 | py | Python | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/synthetics_basic_auth_ntlm_type.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/synthetics_basic_auth_ntlm_type.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/synthetics_basic_auth_ntlm_type.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelSimple,
cached_property,
)
class SyntheticsBasicAuthNTLMType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
allowed_values = {
"value": {
"NTLM": "ntlm",
},
}
validations = {}
@cached_property
def openapi_types():
return {
"value": (str,),
}
def __init__(self, *args, **kwargs):
"""SyntheticsBasicAuthNTLMType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): The type of authentication to use when performing the test. If omitted defaults to "ntlm". Must be one of ["ntlm"].
Keyword Args:
value (str): The type of authentication to use when performing the test. If omitted defaults to "ntlm". Must be one of ["ntlm"].
"""
super().__init__(kwargs)
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
value = "ntlm"
self._check_pos_args(args)
self.value = value
self._check_kw_args(kwargs)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
return cls(*args, **kwargs)
| 27.4375 | 142 | 0.612756 |
from datadog_api_client.v1.model_utils import (
ModelSimple,
cached_property,
)
class SyntheticsBasicAuthNTLMType(ModelSimple):
allowed_values = {
"value": {
"NTLM": "ntlm",
},
}
validations = {}
@cached_property
def openapi_types():
return {
"value": (str,),
}
def __init__(self, *args, **kwargs):
super().__init__(kwargs)
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
value = "ntlm"
self._check_pos_args(args)
self.value = value
self._check_kw_args(kwargs)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
return cls(*args, **kwargs)
| true | true |
1c2e8e6321d95ddbf511a3d098115f8a1538a5ba | 549 | py | Python | yamtbx/dataproc/xds/command_line/xds_integrate_lp_to_xparm.py | 7l2icj/kamo_clone | 5f4a5eed3cd9d91a021d805e46125c19cc2ed1b6 | [
"BSD-3-Clause"
] | 16 | 2016-05-20T11:19:40.000Z | 2021-01-01T19:44:23.000Z | yamtbx/dataproc/xds/command_line/xds_integrate_lp_to_xparm.py | 7l2icj/kamo_clone | 5f4a5eed3cd9d91a021d805e46125c19cc2ed1b6 | [
"BSD-3-Clause"
] | 4 | 2017-03-10T00:51:11.000Z | 2021-02-07T17:18:46.000Z | yamtbx/dataproc/xds/command_line/xds_integrate_lp_to_xparm.py | 7l2icj/kamo_clone | 5f4a5eed3cd9d91a021d805e46125c19cc2ed1b6 | [
"BSD-3-Clause"
] | 9 | 2016-12-15T16:00:06.000Z | 2021-09-10T08:34:14.000Z | #!/usr/bin/env yamtbx.python
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import os
from yamtbx.dataproc.xds.xparm import get_xparm_from_integrate_lp
def run(integrate_lp, frame_num):
xparm_str = get_xparm_from_integrate_lp(integrate_lp, frame_num)
open("XPARM.XDS.%.4d"%frame_num, "w").write(xparm_str)
# run()
if __name__ == "__main__":
import sys
integrate_lp = sys.argv[1]
frame = int(sys.argv[2])
run(integrate_lp, frame)
| 23.869565 | 68 | 0.724954 |
import os
from yamtbx.dataproc.xds.xparm import get_xparm_from_integrate_lp
def run(integrate_lp, frame_num):
xparm_str = get_xparm_from_integrate_lp(integrate_lp, frame_num)
open("XPARM.XDS.%.4d"%frame_num, "w").write(xparm_str)
if __name__ == "__main__":
import sys
integrate_lp = sys.argv[1]
frame = int(sys.argv[2])
run(integrate_lp, frame)
| true | true |
1c2e8ee4df75228959da73fbbd7d2db0011e6712 | 430 | py | Python | Week2/problem1.py | AustinKladke/6.00.1x-MIT-CS-Python | 0f9e2840fdd5f005bae2a53a3b8a251e0635654e | [
"MIT"
] | null | null | null | Week2/problem1.py | AustinKladke/6.00.1x-MIT-CS-Python | 0f9e2840fdd5f005bae2a53a3b8a251e0635654e | [
"MIT"
] | null | null | null | Week2/problem1.py | AustinKladke/6.00.1x-MIT-CS-Python | 0f9e2840fdd5f005bae2a53a3b8a251e0635654e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 11:40:18 2021
@author: akladke
"""
#balance = 42
balance = 484
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
for i in range(0, 12):
min_payment = balance * monthlyPaymentRate
unpaid_balance = balance - min_payment
interest = annualInterestRate/12 * unpaid_balance
balance = round(unpaid_balance + interest, 2)
print("Remaining balance: {}".format(balance)) | 23.888889 | 53 | 0.702326 |
balance = 484
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
for i in range(0, 12):
min_payment = balance * monthlyPaymentRate
unpaid_balance = balance - min_payment
interest = annualInterestRate/12 * unpaid_balance
balance = round(unpaid_balance + interest, 2)
print("Remaining balance: {}".format(balance)) | true | true |
1c2e8f5126310bf5dc00013e478e20cf77af87a8 | 1,858 | py | Python | Hash.py | V3noM-Cyber/Hashjokar | 9d29bbdee22e29488074bc2b16178d91cee2778e | [
"MIT"
] | null | null | null | Hash.py | V3noM-Cyber/Hashjokar | 9d29bbdee22e29488074bc2b16178d91cee2778e | [
"MIT"
] | null | null | null | Hash.py | V3noM-Cyber/Hashjokar | 9d29bbdee22e29488074bc2b16178d91cee2778e | [
"MIT"
] | null | null | null | import hashlib
from colorama import init
from time import *
from termcolor import colored
import sys
import os
import hashlib, binascii
from colorama import Fore, Back, Style
hash = ""
init()
print( Fore.RED+'''
●🔥════════════════════════◄►═══════════════════🔥●
██╗░░░██╗██████╗░███╗░░██╗░█████╗░███╗░░░███╗
██║░░░██║╚════██╗████╗░██║██╔══██╗████╗░████║
╚██╗░██╔╝░█████╔╝██╔██╗██║██║░░██║██╔████╔██║
░╚████╔╝░░╚═══██╗██║╚████║██║░░██║██║╚██╔╝██║
░░╚██╔╝░░██████╔╝██║░╚███║╚█████╔╝██║░╚═╝░██║
░░░╚═╝░░░╚═════╝░╚═╝░░╚══╝░╚════╝░╚═╝░░░░░╚═╝
Telegram:https://t.me/V3n0M_Cyber
YouTube:V3n0m Cyber ✪ vercon 1.0
●🔥════════════════════════◄►═══════════════════🔥●
''')
os.system("xdg-open https://t.me/V3n0M_Cyber")
hash =input("Creating Hash: ")
result = hashlib.md5(hash.encode())
print(Fore.RED +"[+]Hash MD5: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha1(hash.encode())
print(Fore.RED +"[+]Hash SHA1: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha224(hash.encode())
print(Fore.RED +"[+]Hash SHA224: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha256(hash.encode())
print(Fore.RED+"[+]Hash SHA256: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha384(hash.encode())
print(Fore.RED+"[+]Hash SHA384: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.1)
result = hashlib.sha512(hash.encode())
print(Fore.RED+"[+]Hash SHA512: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
print("Hash Crack website ")
print("Bo Ragrtn CTRL+Z Dabgra ")
sleep(3)
sleep(200)
v= os.system("clear")
| 29.967742 | 70 | 0.453714 | import hashlib
from colorama import init
from time import *
from termcolor import colored
import sys
import os
import hashlib, binascii
from colorama import Fore, Back, Style
hash = ""
init()
print( Fore.RED+'''
●🔥════════════════════════◄►═══════════════════🔥●
██╗░░░██╗██████╗░███╗░░██╗░█████╗░███╗░░░███╗
██║░░░██║╚════██╗████╗░██║██╔══██╗████╗░████║
╚██╗░██╔╝░█████╔╝██╔██╗██║██║░░██║██╔████╔██║
░╚████╔╝░░╚═══██╗██║╚████║██║░░██║██║╚██╔╝██║
░░╚██╔╝░░██████╔╝██║░╚███║╚█████╔╝██║░╚═╝░██║
░░░╚═╝░░░╚═════╝░╚═╝░░╚══╝░╚════╝░╚═╝░░░░░╚═╝
Telegram:https://t.me/V3n0M_Cyber
YouTube:V3n0m Cyber ✪ vercon 1.0
●🔥════════════════════════◄►═══════════════════🔥●
''')
os.system("xdg-open https://t.me/V3n0M_Cyber")
hash =input("Creating Hash: ")
result = hashlib.md5(hash.encode())
print(Fore.RED +"[+]Hash MD5: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha1(hash.encode())
print(Fore.RED +"[+]Hash SHA1: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha224(hash.encode())
print(Fore.RED +"[+]Hash SHA224: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha256(hash.encode())
print(Fore.RED+"[+]Hash SHA256: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.0)
result = hashlib.sha384(hash.encode())
print(Fore.RED+"[+]Hash SHA384: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
sleep(1.1)
result = hashlib.sha512(hash.encode())
print(Fore.RED+"[+]Hash SHA512: ",end="")
print(result.hexdigest())
print("●════════════════════════════●")
print("Hash Crack website ")
print("Bo Ragrtn CTRL+Z Dabgra ")
sleep(3)
sleep(200)
v= os.system("clear")
| true | true |
1c2e8f59e079c1df103e60b7754c0feb424f213c | 363 | py | Python | doc/tutorials/data/03_halide_basic.py | Hconk/AutoKernel | edfb409efe37e2aea5350961e3a8160c11856853 | [
"Apache-2.0"
] | 754 | 2020-11-11T06:34:26.000Z | 2022-03-29T03:45:36.000Z | doc/tutorials/data/03_halide_basic.py | Hconk/AutoKernel | edfb409efe37e2aea5350961e3a8160c11856853 | [
"Apache-2.0"
] | 16 | 2020-12-01T03:27:01.000Z | 2021-08-13T06:20:58.000Z | doc/tutorials/data/03_halide_basic.py | Hconk/AutoKernel | edfb409efe37e2aea5350961e3a8160c11856853 | [
"Apache-2.0"
] | 82 | 2020-11-12T00:14:45.000Z | 2022-03-27T09:22:08.000Z |
#!/usr/bin/python3
import halide as hl
x, y = hl.Var("x"), hl.Var("y")
func = hl.Func("func")
func[x,y] = x + 10*y
#func.trace_stores()
out = func.realize(3, 4) # width, height = 3,4
print("=============================")
for j in range(out.height()):
for i in range(out.width()):
print("out[x=%i,y=%i]=%i"%(i,j,out[i,j]))
print("Success!")
| 16.5 | 49 | 0.517906 |
import halide as hl
x, y = hl.Var("x"), hl.Var("y")
func = hl.Func("func")
func[x,y] = x + 10*y
out = func.realize(3, 4)
print("=============================")
for j in range(out.height()):
for i in range(out.width()):
print("out[x=%i,y=%i]=%i"%(i,j,out[i,j]))
print("Success!")
| true | true |
1c2e90a0f9ba63d07086acc94891fb7c5086ab1e | 4,403 | py | Python | usaspending_api/etl/transaction_loaders/data_load_helpers.py | gaybro8777/usaspending-api | fe9d730acd632401bbbefa168e3d86d59560314b | [
"CC0-1.0"
] | null | null | null | usaspending_api/etl/transaction_loaders/data_load_helpers.py | gaybro8777/usaspending-api | fe9d730acd632401bbbefa168e3d86d59560314b | [
"CC0-1.0"
] | null | null | null | usaspending_api/etl/transaction_loaders/data_load_helpers.py | gaybro8777/usaspending-api | fe9d730acd632401bbbefa168e3d86d59560314b | [
"CC0-1.0"
] | null | null | null | from datetime import datetime
import os
import re
import boto3
import csv
import logging
from django.conf import settings
logger = logging.getLogger("console")
def capitalize_if_string(val):
try:
return val.upper()
except AttributeError:
return val
# 10/31/2019: According to PO direction, this functionality is NOT desired, and should be phased out as soon as it's safe
def false_if_null(val):
if val is None:
return False
return val
def truncate_timestamp(val):
if isinstance(val, datetime):
return val.date()
elif isinstance(val, str):
return datetime.strptime(val, "%Y-%m-%d %H:%M:%S").date()
elif val is None:
return None
else:
raise ValueError("{} is not parsable as a date!".format(val.type))
def format_value_for_sql(val, cur):
return str(cur.mogrify("%s", (val,)), "utf-8")
def format_bulk_insert_list_column_sql(cursor, load_objects, type):
"""creates formatted sql text to put into a bulk insert statement"""
keys = load_objects[0][type].keys()
columns = ['"{}"'.format(key) for key in load_objects[0][type].keys()]
values = [[format_value_for_sql(load_object[type][key], cursor) for key in keys] for load_object in load_objects]
col_string = "({})".format(",".join(columns))
val_string = ",".join(["({})".format(",".join(map(str, value))) for value in values])
return col_string, val_string
def format_insert_or_update_column_sql(cursor, load_object, type):
"""creates formatted sql text to put into a single row insert or update statement"""
columns = []
values = []
update_pairs = []
for key in load_object[type].keys():
columns.append('"{}"'.format(key))
val = format_value_for_sql(load_object[type][key], cursor)
values.append(val)
if key not in ["create_date", "created_at"]:
update_pairs.append(" {}={}".format(key, val))
col_string = "({})".format(",".join(map(str, columns)))
val_string = "({})".format(",".join(map(str, values)))
pairs_string = ",".join(update_pairs)
return col_string, val_string, pairs_string
def get_deleted_fpds_data_from_s3(date):
ids_to_delete = []
regex_str = ".*_delete_records_(IDV|award).*"
if settings.IS_LOCAL:
for file in os.listdir(settings.CSV_LOCAL_PATH):
if re.search(regex_str, file) and datetime.strptime(file[: file.find("_")], "%m-%d-%Y").date() >= date:
with open(settings.CSV_LOCAL_PATH + file, "r") as current_file:
# open file, split string to array, skip the header
reader = csv.reader(current_file.read().splitlines())
next(reader)
unique_key_list = [rows[0] for rows in reader]
ids_to_delete += unique_key_list
else:
# Connect to AWS
aws_region = settings.USASPENDING_AWS_REGION
fpds_bucket_name = settings.FPDS_BUCKET_NAME
if not (aws_region and fpds_bucket_name):
raise Exception("Missing required environment variables: USASPENDING_AWS_REGION, FPDS_BUCKET_NAME")
s3client = boto3.client("s3", region_name=aws_region)
s3resource = boto3.resource("s3", region_name=aws_region)
s3_bucket = s3resource.Bucket(fpds_bucket_name)
# make an array of all the keys in the bucket
file_list = [item.key for item in s3_bucket.objects.all()]
# Only use files that match the date we're currently checking
for item in file_list:
# if the date on the file is the same day as we're checking
if (
re.search(regex_str, item)
and "/" not in item
and datetime.strptime(item[: item.find("_")], "%m-%d-%Y").date() >= date
):
s3_item = s3client.get_object(Bucket=fpds_bucket_name, Key=item)
reader = csv.reader(s3_item["Body"].read().decode("utf-8").splitlines())
# skip the header, the reader doesn't ignore it for some reason
next(reader)
# make an array of all the detached_award_procurement_ids
unique_key_list = [rows[0] for rows in reader]
ids_to_delete += unique_key_list
logger.info("Number of records to delete: %s" % str(len(ids_to_delete)))
return ids_to_delete
| 35.796748 | 121 | 0.633205 | from datetime import datetime
import os
import re
import boto3
import csv
import logging
from django.conf import settings
logger = logging.getLogger("console")
def capitalize_if_string(val):
try:
return val.upper()
except AttributeError:
return val
def false_if_null(val):
if val is None:
return False
return val
def truncate_timestamp(val):
if isinstance(val, datetime):
return val.date()
elif isinstance(val, str):
return datetime.strptime(val, "%Y-%m-%d %H:%M:%S").date()
elif val is None:
return None
else:
raise ValueError("{} is not parsable as a date!".format(val.type))
def format_value_for_sql(val, cur):
return str(cur.mogrify("%s", (val,)), "utf-8")
def format_bulk_insert_list_column_sql(cursor, load_objects, type):
keys = load_objects[0][type].keys()
columns = ['"{}"'.format(key) for key in load_objects[0][type].keys()]
values = [[format_value_for_sql(load_object[type][key], cursor) for key in keys] for load_object in load_objects]
col_string = "({})".format(",".join(columns))
val_string = ",".join(["({})".format(",".join(map(str, value))) for value in values])
return col_string, val_string
def format_insert_or_update_column_sql(cursor, load_object, type):
columns = []
values = []
update_pairs = []
for key in load_object[type].keys():
columns.append('"{}"'.format(key))
val = format_value_for_sql(load_object[type][key], cursor)
values.append(val)
if key not in ["create_date", "created_at"]:
update_pairs.append(" {}={}".format(key, val))
col_string = "({})".format(",".join(map(str, columns)))
val_string = "({})".format(",".join(map(str, values)))
pairs_string = ",".join(update_pairs)
return col_string, val_string, pairs_string
def get_deleted_fpds_data_from_s3(date):
ids_to_delete = []
regex_str = ".*_delete_records_(IDV|award).*"
if settings.IS_LOCAL:
for file in os.listdir(settings.CSV_LOCAL_PATH):
if re.search(regex_str, file) and datetime.strptime(file[: file.find("_")], "%m-%d-%Y").date() >= date:
with open(settings.CSV_LOCAL_PATH + file, "r") as current_file:
# open file, split string to array, skip the header
reader = csv.reader(current_file.read().splitlines())
next(reader)
unique_key_list = [rows[0] for rows in reader]
ids_to_delete += unique_key_list
else:
# Connect to AWS
aws_region = settings.USASPENDING_AWS_REGION
fpds_bucket_name = settings.FPDS_BUCKET_NAME
if not (aws_region and fpds_bucket_name):
raise Exception("Missing required environment variables: USASPENDING_AWS_REGION, FPDS_BUCKET_NAME")
s3client = boto3.client("s3", region_name=aws_region)
s3resource = boto3.resource("s3", region_name=aws_region)
s3_bucket = s3resource.Bucket(fpds_bucket_name)
# make an array of all the keys in the bucket
file_list = [item.key for item in s3_bucket.objects.all()]
# Only use files that match the date we're currently checking
for item in file_list:
if (
re.search(regex_str, item)
and "/" not in item
and datetime.strptime(item[: item.find("_")], "%m-%d-%Y").date() >= date
):
s3_item = s3client.get_object(Bucket=fpds_bucket_name, Key=item)
reader = csv.reader(s3_item["Body"].read().decode("utf-8").splitlines())
# skip the header, the reader doesn't ignore it for some reason
next(reader)
unique_key_list = [rows[0] for rows in reader]
ids_to_delete += unique_key_list
logger.info("Number of records to delete: %s" % str(len(ids_to_delete)))
return ids_to_delete
| true | true |
1c2e90c332fe1e210457721c098a7ba2d97cb487 | 7,344 | py | Python | src/external_model.py | alex-calderwood/twine_generation | 8a3ba3779bffeb7a0b18bfd0960685b26bd4e5e8 | [
"BSD-4-Clause-UC"
] | 2 | 2021-11-18T07:28:09.000Z | 2022-03-23T13:56:18.000Z | src/external_model.py | alex-calderwood/twine_generation | 8a3ba3779bffeb7a0b18bfd0960685b26bd4e5e8 | [
"BSD-4-Clause-UC"
] | null | null | null | src/external_model.py | alex-calderwood/twine_generation | 8a3ba3779bffeb7a0b18bfd0960685b26bd4e5e8 | [
"BSD-4-Clause-UC"
] | null | null | null | import openai
import os
import time
import twee_utils as utils
# Load your API key from an environment variable or secret management service
openai.api_key = os.getenv("OPENAI_API_KEY")
ZERO_SHOT = open('src/zero_shot.txt', 'r').read()
class TwineGenerator:
def __init__(self, model='context', verbose=True):
"""
:param model: must be one of: 'context', 'mock', or 'naive'
context: Call GPT-3 with a context description and passage title
naive: Call GPT-3 with only a passage title
mock: Mock the language model call
Each will return a passage body that can be appended to the title to make a complete Twee passage.
"""
# Decide which generator to use (GPT-3 or mock)
self.model = model.lower()
self.verbose = bool(verbose)
if self.model == 'events':
self._call_model = TwineGenerator._call_contextual_event_model
elif self.model == 'context':
self._call_model = TwineGenerator._call_contextual_model
elif self.model == 'mock':
self._call_model = TwineGenerator._mock_generate
elif self.model == 'naive':
self._call_model = TwineGenerator._call_naive_model
else:
raise ValueError(f"TwineGenerator({self.model}) is invalid")
def get_completion(self, prompt):
"""
call the correct language model
"""
if self.verbose:
print("prompt", prompt)
while True:
try:
return self._call_model(prompt)
except openai.error.RateLimitError as e:
print(e)
print('retrying...')
def _call_model(self, prompt):
raise RuntimeError("This should have been defined in the constructor")
@staticmethod
def _call_naive_model(prompt):
"""
Return a generated twee passage from a given title. The code is elsewhere modular to allow this to be re-implemented
with any language model. Here, we call a fine-tuned GPT-3 instance trained to generate scaffolded Twee.
"""
model = 'curie:ft-user-wmco7qacght9seweh8jgp4ib-2021-10-28-04-55-18'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=utils.END,
)
return response['choices'][0]['text']
@staticmethod
def _call_contextual_model(prompt):
"""
Return a generated twee passage from a given title. The code is elsewhere modular to allow this to be re-implemented
with any language model. Here, we call a fine-tuned GPT-3 instance trained to generate scaffolded Twee.
"""
model = 'curie:ft-user-wmco7qacght9seweh8jgp4ib-2021-11-29-05-45-10'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=utils.END,
)
return response['choices'][0]['text']
@staticmethod
def _call_contextual_event_model(prompt):
"""
Return a generated twee passage from a given title. The code is elsewhere modular to allow this to be re-implemented
with any language model. Here, we call a fine-tuned GPT-3 instance trained to generate scaffolded Twee.
"""
model = 'curie:ft-user-wmco7qacght9seweh8jgp4ib-2021-12-07-08-07-09'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=utils.END,
)
return response['choices'][0]['text']
@staticmethod
def summarize(passage, clean_passage=True):
"""
Use GPT-3 as a zero-shot summarization for the given passage.
:param clean_passage: (bool) whether to clean the passage of extraneous twee formatting
"""
if clean_passage:
passage = utils.passage_to_text(passage)
prompt = ZERO_SHOT.format(passage.strip())
model = 'davinci' # It doesn't really work with curie
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=280,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop='.',
)
response = response['choices'][0]['text']
return response.strip() + '.'
@staticmethod
def _mock_generate(prompt):
time.sleep(1)
result = {}
result['completion'] = \
"""\"You don't have to answer, I've seen it a million times." She pulls out a wallet and hands you a business card.<newline>DR. CHAE YEON-SEOK<newline>SPECIALIST<newline>[["Specialist?"|specialist]]<newline>[["..."|dotdotdot]]<|end|>"""
return result
# Usage: python src/external_model.py
if __name__ == '__main__':
mem = "This happened sometime ago now, but I still treasure this memory close to my heart. I've always wanted to execute the perfect date for a girl, and that day would come when I met my Loretta. One night, I really wanted to do something romantic for her. I'm twenty two but my ideas are rather old fashioned. I decided a surprise picnic would be a perfect date for Loretta. I thought she'd love it. I ended up secretly buying all her favorite foods I could think of, creating a playlist of all our favorite love songs, and packing her favorite blankets. Everything was going to plan. Well, until the day I decided to actually put my plan into motion. On the day of the picnic, I finally realized the one thing I forgot to do while planning this picnic, check the weather. It ended up pouring down hard that day. However, that didn't stop my plans. Instead of feeling defeated and calling it a night I decided to execute a quick plan B. I had brought Loretta to my room, and I began to set the stage. With the lights dimmed and a fake fireplace roaring on my tv, I turned on my blue tooth speaker and prepared the playlist I made. I had set up the blankets and cups, along with everything else I had ready for the picnic, and had everything laid out nicely on the clear floor. Loretta was overjoyed that I had gone through all this trouble for her. After we ate she snuggled up with me as we kissed and let the music play until it's end. We're still happily together and of course now I always remember to check the weather, but Loretta still says that night was very special to her, and it is to me as well! "
# ex = "She was gorgeous and I was in love with her. When we made out for the first time, the world came to a halt. It literally blew up. No one survived, except for us."
ex = "Long ago there lived a man in a room with an ipad. The man was obsessed with a game called townscaper. The game was played by desiging towns. There was no winning or losing in the game, it was purely for fun."
sum = TwineGenerator.summarize(ex, False)
print('summary: ', sum)
| 45.614907 | 1,616 | 0.651144 | import openai
import os
import time
import twee_utils as utils
openai.api_key = os.getenv("OPENAI_API_KEY")
ZERO_SHOT = open('src/zero_shot.txt', 'r').read()
class TwineGenerator:
def __init__(self, model='context', verbose=True):
self.model = model.lower()
self.verbose = bool(verbose)
if self.model == 'events':
self._call_model = TwineGenerator._call_contextual_event_model
elif self.model == 'context':
self._call_model = TwineGenerator._call_contextual_model
elif self.model == 'mock':
self._call_model = TwineGenerator._mock_generate
elif self.model == 'naive':
self._call_model = TwineGenerator._call_naive_model
else:
raise ValueError(f"TwineGenerator({self.model}) is invalid")
def get_completion(self, prompt):
if self.verbose:
print("prompt", prompt)
while True:
try:
return self._call_model(prompt)
except openai.error.RateLimitError as e:
print(e)
print('retrying...')
def _call_model(self, prompt):
raise RuntimeError("This should have been defined in the constructor")
@staticmethod
def _call_naive_model(prompt):
model = 'curie:ft-user-wmco7qacght9seweh8jgp4ib-2021-10-28-04-55-18'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=utils.END,
)
return response['choices'][0]['text']
@staticmethod
def _call_contextual_model(prompt):
model = 'curie:ft-user-wmco7qacght9seweh8jgp4ib-2021-11-29-05-45-10'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=utils.END,
)
return response['choices'][0]['text']
@staticmethod
def _call_contextual_event_model(prompt):
model = 'curie:ft-user-wmco7qacght9seweh8jgp4ib-2021-12-07-08-07-09'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=utils.END,
)
return response['choices'][0]['text']
@staticmethod
def summarize(passage, clean_passage=True):
if clean_passage:
passage = utils.passage_to_text(passage)
prompt = ZERO_SHOT.format(passage.strip())
model = 'davinci'
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.7,
max_tokens=280,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop='.',
)
response = response['choices'][0]['text']
return response.strip() + '.'
@staticmethod
def _mock_generate(prompt):
time.sleep(1)
result = {}
result['completion'] = \
"""\"You don't have to answer, I've seen it a million times." She pulls out a wallet and hands you a business card.<newline>DR. CHAE YEON-SEOK<newline>SPECIALIST<newline>[["Specialist?"|specialist]]<newline>[["..."|dotdotdot]]<|end|>"""
return result
# Usage: python src/external_model.py
if __name__ == '__main__':
mem = "This happened sometime ago now, but I still treasure this memory close to my heart. I've always wanted to execute the perfect date for a girl, and that day would come when I met my Loretta. One night, I really wanted to do something romantic for her. I'm twenty two but my ideas are rather old fashioned. I decided a surprise picnic would be a perfect date for Loretta. I thought she'd love it. I ended up secretly buying all her favorite foods I could think of, creating a playlist of all our favorite love songs, and packing her favorite blankets. Everything was going to plan. Well, until the day I decided to actually put my plan into motion. On the day of the picnic, I finally realized the one thing I forgot to do while planning this picnic, check the weather. It ended up pouring down hard that day. However, that didn't stop my plans. Instead of feeling defeated and calling it a night I decided to execute a quick plan B. I had brought Loretta to my room, and I began to set the stage. With the lights dimmed and a fake fireplace roaring on my tv, I turned on my blue tooth speaker and prepared the playlist I made. I had set up the blankets and cups, along with everything else I had ready for the picnic, and had everything laid out nicely on the clear floor. Loretta was overjoyed that I had gone through all this trouble for her. After we ate she snuggled up with me as we kissed and let the music play until it's end. We're still happily together and of course now I always remember to check the weather, but Loretta still says that night was very special to her, and it is to me as well! "
# ex = "She was gorgeous and I was in love with her. When we made out for the first time, the world came to a halt. It literally blew up. No one survived, except for us."
ex = "Long ago there lived a man in a room with an ipad. The man was obsessed with a game called townscaper. The game was played by desiging towns. There was no winning or losing in the game, it was purely for fun."
sum = TwineGenerator.summarize(ex, False)
print('summary: ', sum)
| true | true |
1c2e9132a54de644c28ae19522b0d1693090fb3f | 10,968 | py | Python | models/llc_score.py | litosly/RankingOptimizationApprochtoLLC | b6b79fb1364fcc481aa84093a8e8399966b19d02 | [
"MIT"
] | 5 | 2020-10-10T02:11:16.000Z | 2021-11-08T22:55:34.000Z | models/llc_score.py | litosly/RankingOptimizationApprochtoLLC | b6b79fb1364fcc481aa84093a8e8399966b19d02 | [
"MIT"
] | null | null | null | models/llc_score.py | litosly/RankingOptimizationApprochtoLLC | b6b79fb1364fcc481aa84093a8e8399966b19d02 | [
"MIT"
] | 2 | 2020-10-22T14:50:43.000Z | 2021-08-15T07:00:43.000Z | from prediction.predictor import predict_scores, predict_vector
from sklearn.linear_model import LinearRegression
from utils.critique import LP1SimplifiedOptimize
from utils.keyphrase_selection import *
import numpy as np
from tqdm import tqdm
class LP1Simplified(object):
def __init__(self, keyphrase_freq, item_keyphrase_freq, row, matrix_Train, matrix_Test, test_users,
target_ranks, num_items_sampled, num_keyphrases, df,
max_iteration_threshold, keyphrase_popularity, dataset_name,
model, parameters_row, keyphrase_selection_method, topk=None, lamb = None,**unused):
self.keyphrase_freq = keyphrase_freq
self.item_keyphrase_freq = item_keyphrase_freq.T
self.row = row
self.matrix_Train = matrix_Train
self.num_users, self.num_items = matrix_Train.shape
self.matrix_Test = matrix_Test
self.test_users = test_users
self.target_ranks = target_ranks
self.num_items_sampled = num_items_sampled
self.num_keyphrases = num_keyphrases
self.df = df
self.max_iteration_threshold = max_iteration_threshold
self.keyphrase_popularity = keyphrase_popularity
self.dataset_name = dataset_name
self.model = model
self.parameters_row = parameters_row
self.keyphrase_selection_method = keyphrase_selection_method
self.max_wanted_keyphrase = max_iteration_threshold # Set diff length to be equal to max_iteration_threshold
self.topk = topk
def start_critiquing(self):
self.get_initial_predictions()
for user in tqdm(self.test_users):
# User id starts from 0
self.row['user_id'] = user
# The iteration will stop if the wanted item is in top n
for target_rank in self.target_ranks:
self.row['target_rank'] = target_rank
# Pick wanted items in test items
candidate_items = self.matrix_Test[user].nonzero()[1]
train_items = self.matrix_Train[user].nonzero()[1]
wanted_items = np.setdiff1d(candidate_items, train_items)
for item in wanted_items:
# Item id starts from 0
self.row['item_id'] = item
# Set the wanted item's initial rank as None
self.row['item_rank'] = None
# Set the wanted item's initial prediction score as None
self.row['item_score'] = None
# Get the item's existing keyphrases
item_keyphrases = self.item_keyphrase_freq[item].nonzero()[0]
# Get keyphrases that don't belong to the item (we can critique)
remaining_keyphrases = np.setdiff1d(range(self.num_keyphrases), item_keyphrases)
# print("The number of remaining_keyphrases is {}. remaining_keyphrases are: {}".format(len(remaining_keyphrases), remaining_keyphrases))
self.row['num_existing_keyphrases'] = len(remaining_keyphrases)
if self.keyphrase_selection_method == "diff":
# For keyphrase selection method 'diff'
target_keyphrase_freq = get_item_keyphrase_freq(self.item_keyphrase_freq,item = item)
# diff_keyphrase_freq = top_recommended_keyphrase_freq - target_keyphrase_freq
# remaining_keyphrases = np.argsort(np.ravel(diff_keyphrase_freq))[::-1][:self.max_wanted_keyphrase]
self.row['num_existing_keyphrases'] = self.max_iteration_threshold
if len(remaining_keyphrases) == 0:
break
self.row['iteration'] = 0
self.row['critiqued_keyphrase'] = None
self.row['result'] = None
self.df = self.df.append(self.row, ignore_index=True)
query = []
affected_items = np.array([])
for iteration in range(self.max_iteration_threshold):
self.row['iteration'] = iteration + 1
if self.keyphrase_selection_method == "pop":
# Always critique the most popular keyphrase
critiqued_keyphrase = remaining_keyphrases[np.argmax(self.keyphrase_popularity[remaining_keyphrases])]
elif self.keyphrase_selection_method == "random":
# Randomly critique a remaining keyphrase
critiqued_keyphrase = np.random.choice(remaining_keyphrases, size=1, replace=False)[0]
elif self.keyphrase_selection_method == "diff":
if iteration == 0:
initial_prediction_items = predict_vector(rating_vector=self.prediction_scores[user],
train_vector=self.matrix_Train[user],
remove_train=True)
top_recommended_keyphrase_freq = get_item_keyphrase_freq(self.item_keyphrase_freq,item = initial_prediction_items[0])
self.row["Recommended Item"] = initial_prediction_items[0]
# self.row['Recommended Item Name'] = get_restaurant_name(self.df_train, self.business_df,initial_prediction_items[0])
else:
top_recommended_keyphrase_freq = get_item_keyphrase_freq(self.item_keyphrase_freq,item = prediction_items[0])
self.row["Recommended Item"] = prediction_items[0]
# self.row['Recommended Item Name'] = get_restaurant_name(self.df_train, self.business_df,prediction_items[0])
diff_keyphrase_freq = top_recommended_keyphrase_freq - target_keyphrase_freq
remaining_keyphrases = np.argsort(np.ravel(diff_keyphrase_freq))[::-1][:self.max_wanted_keyphrase]
critiqued_keyphrase = remaining_keyphrases[0]
self.row['critiqued_keyphrase'] = critiqued_keyphrase
query.append(critiqued_keyphrase)
# Get affected items (items have critiqued keyphrase)
current_affected_items = self.item_keyphrase_freq[:, critiqued_keyphrase].nonzero()[0]
affected_items = np.unique(np.concatenate((affected_items, current_affected_items))).astype(int)
unaffected_items = np.setdiff1d(range(self.num_items), affected_items)
if iteration == 0:
prediction_items = predict_vector(rating_vector=self.prediction_scores[user],
train_vector=self.matrix_Train[user],
remove_train=True)
affected_items_mask = np.in1d(prediction_items, affected_items)
affected_items_index_rank = np.where(affected_items_mask == True)
unaffected_items_index_rank = np.where(affected_items_mask == False)
import copy
top_k = int(self.topk)
prediction_scores_u, lambdas = LP1SimplifiedOptimize(initial_prediction_u=self.prediction_scores[user],
keyphrase_freq=copy.deepcopy(self.keyphrase_freq),
affected_items=np.intersect1d(affected_items, prediction_items[affected_items_index_rank[0][:top_k]]),
unaffected_items=np.intersect1d(unaffected_items, prediction_items[unaffected_items_index_rank[0][:top_k]]),
num_keyphrases=self.num_keyphrases,
query=query,
test_user=user,
item_latent=self.Y,
reg=self.reg)
self.row['lambda'] = lambdas
prediction_items = predict_vector(rating_vector=prediction_scores_u,
train_vector=self.matrix_Train[user],
remove_train=True)
recommended_items = prediction_items
# Current item rank
item_rank = np.where(recommended_items == item)[0][0]
self.row['item_rank'] = item_rank
self.row['item_score'] = prediction_scores_u[item]
if item_rank + 1 <= target_rank:
# Items is ranked within target rank
self.row['result'] = 'successful'
self.df = self.df.append(self.row, ignore_index=True)
break
else:
remaining_keyphrases = np.setdiff1d(remaining_keyphrases, critiqued_keyphrase)
# Continue if more keyphrases and iterations remained
if len(remaining_keyphrases) > 0 and self.row['iteration'] < self.max_iteration_threshold:
self.row['result'] = None
self.df = self.df.append(self.row, ignore_index=True)
else:
# Otherwise, mark fail
self.row['result'] = 'fail'
self.df = self.df.append(self.row, ignore_index=True)
break
return self.df
def get_initial_predictions(self):
self.RQ, Yt, Bias = self.model(self.matrix_Train,
iteration=self.parameters_row['iter'].values[0],
lamb=self.parameters_row['lambda'].values[0],
rank=self.parameters_row['rank'].values[0])
self.Y = Yt.T
self.reg = LinearRegression().fit(self.keyphrase_freq, self.RQ)
self.prediction_scores = predict_scores(matrix_U=self.RQ,
matrix_V=self.Y,
bias=Bias)
| 60.596685 | 185 | 0.535284 | from prediction.predictor import predict_scores, predict_vector
from sklearn.linear_model import LinearRegression
from utils.critique import LP1SimplifiedOptimize
from utils.keyphrase_selection import *
import numpy as np
from tqdm import tqdm
class LP1Simplified(object):
def __init__(self, keyphrase_freq, item_keyphrase_freq, row, matrix_Train, matrix_Test, test_users,
target_ranks, num_items_sampled, num_keyphrases, df,
max_iteration_threshold, keyphrase_popularity, dataset_name,
model, parameters_row, keyphrase_selection_method, topk=None, lamb = None,**unused):
self.keyphrase_freq = keyphrase_freq
self.item_keyphrase_freq = item_keyphrase_freq.T
self.row = row
self.matrix_Train = matrix_Train
self.num_users, self.num_items = matrix_Train.shape
self.matrix_Test = matrix_Test
self.test_users = test_users
self.target_ranks = target_ranks
self.num_items_sampled = num_items_sampled
self.num_keyphrases = num_keyphrases
self.df = df
self.max_iteration_threshold = max_iteration_threshold
self.keyphrase_popularity = keyphrase_popularity
self.dataset_name = dataset_name
self.model = model
self.parameters_row = parameters_row
self.keyphrase_selection_method = keyphrase_selection_method
self.max_wanted_keyphrase = max_iteration_threshold
self.topk = topk
def start_critiquing(self):
self.get_initial_predictions()
for user in tqdm(self.test_users):
self.row['user_id'] = user
for target_rank in self.target_ranks:
self.row['target_rank'] = target_rank
candidate_items = self.matrix_Test[user].nonzero()[1]
train_items = self.matrix_Train[user].nonzero()[1]
wanted_items = np.setdiff1d(candidate_items, train_items)
for item in wanted_items:
self.row['item_id'] = item
self.row['item_rank'] = None
# Set the wanted item's initial prediction score as None
self.row['item_score'] = None
item_keyphrases = self.item_keyphrase_freq[item].nonzero()[0]
# Get keyphrases that don't belong to the item (we can critique)
remaining_keyphrases = np.setdiff1d(range(self.num_keyphrases), item_keyphrases)
self.row['num_existing_keyphrases'] = len(remaining_keyphrases)
if self.keyphrase_selection_method == "diff":
target_keyphrase_freq = get_item_keyphrase_freq(self.item_keyphrase_freq,item = item)
self.row['num_existing_keyphrases'] = self.max_iteration_threshold
if len(remaining_keyphrases) == 0:
break
self.row['iteration'] = 0
self.row['critiqued_keyphrase'] = None
self.row['result'] = None
self.df = self.df.append(self.row, ignore_index=True)
query = []
affected_items = np.array([])
for iteration in range(self.max_iteration_threshold):
self.row['iteration'] = iteration + 1
if self.keyphrase_selection_method == "pop":
critiqued_keyphrase = remaining_keyphrases[np.argmax(self.keyphrase_popularity[remaining_keyphrases])]
elif self.keyphrase_selection_method == "random":
critiqued_keyphrase = np.random.choice(remaining_keyphrases, size=1, replace=False)[0]
elif self.keyphrase_selection_method == "diff":
if iteration == 0:
initial_prediction_items = predict_vector(rating_vector=self.prediction_scores[user],
train_vector=self.matrix_Train[user],
remove_train=True)
top_recommended_keyphrase_freq = get_item_keyphrase_freq(self.item_keyphrase_freq,item = initial_prediction_items[0])
self.row["Recommended Item"] = initial_prediction_items[0]
else:
top_recommended_keyphrase_freq = get_item_keyphrase_freq(self.item_keyphrase_freq,item = prediction_items[0])
self.row["Recommended Item"] = prediction_items[0]
diff_keyphrase_freq = top_recommended_keyphrase_freq - target_keyphrase_freq
remaining_keyphrases = np.argsort(np.ravel(diff_keyphrase_freq))[::-1][:self.max_wanted_keyphrase]
critiqued_keyphrase = remaining_keyphrases[0]
self.row['critiqued_keyphrase'] = critiqued_keyphrase
query.append(critiqued_keyphrase)
current_affected_items = self.item_keyphrase_freq[:, critiqued_keyphrase].nonzero()[0]
affected_items = np.unique(np.concatenate((affected_items, current_affected_items))).astype(int)
unaffected_items = np.setdiff1d(range(self.num_items), affected_items)
if iteration == 0:
prediction_items = predict_vector(rating_vector=self.prediction_scores[user],
train_vector=self.matrix_Train[user],
remove_train=True)
affected_items_mask = np.in1d(prediction_items, affected_items)
affected_items_index_rank = np.where(affected_items_mask == True)
unaffected_items_index_rank = np.where(affected_items_mask == False)
import copy
top_k = int(self.topk)
prediction_scores_u, lambdas = LP1SimplifiedOptimize(initial_prediction_u=self.prediction_scores[user],
keyphrase_freq=copy.deepcopy(self.keyphrase_freq),
affected_items=np.intersect1d(affected_items, prediction_items[affected_items_index_rank[0][:top_k]]),
unaffected_items=np.intersect1d(unaffected_items, prediction_items[unaffected_items_index_rank[0][:top_k]]),
num_keyphrases=self.num_keyphrases,
query=query,
test_user=user,
item_latent=self.Y,
reg=self.reg)
self.row['lambda'] = lambdas
prediction_items = predict_vector(rating_vector=prediction_scores_u,
train_vector=self.matrix_Train[user],
remove_train=True)
recommended_items = prediction_items
item_rank = np.where(recommended_items == item)[0][0]
self.row['item_rank'] = item_rank
self.row['item_score'] = prediction_scores_u[item]
if item_rank + 1 <= target_rank:
self.row['result'] = 'successful'
self.df = self.df.append(self.row, ignore_index=True)
break
else:
remaining_keyphrases = np.setdiff1d(remaining_keyphrases, critiqued_keyphrase)
if len(remaining_keyphrases) > 0 and self.row['iteration'] < self.max_iteration_threshold:
self.row['result'] = None
self.df = self.df.append(self.row, ignore_index=True)
else:
self.row['result'] = 'fail'
self.df = self.df.append(self.row, ignore_index=True)
break
return self.df
def get_initial_predictions(self):
self.RQ, Yt, Bias = self.model(self.matrix_Train,
iteration=self.parameters_row['iter'].values[0],
lamb=self.parameters_row['lambda'].values[0],
rank=self.parameters_row['rank'].values[0])
self.Y = Yt.T
self.reg = LinearRegression().fit(self.keyphrase_freq, self.RQ)
self.prediction_scores = predict_scores(matrix_U=self.RQ,
matrix_V=self.Y,
bias=Bias)
| true | true |
1c2e91ad8c46cbfb50cd64e65f5ea767cbea1915 | 4,982 | py | Python | sld-api-backend/helpers/get_data.py | guorenxi/Stack-Lifecycle-Deployment | 2780441cb692392993116cf94c14a94ae8edbc6c | [
"MIT"
] | null | null | null | sld-api-backend/helpers/get_data.py | guorenxi/Stack-Lifecycle-Deployment | 2780441cb692392993116cf94c14a94ae8edbc6c | [
"MIT"
] | null | null | null | sld-api-backend/helpers/get_data.py | guorenxi/Stack-Lifecycle-Deployment | 2780441cb692392993116cf94c14a94ae8edbc6c | [
"MIT"
] | null | null | null | from fastapi import HTTPException
from celery.result import AsyncResult
from datetime import datetime
from functools import wraps
from croniter import croniter
import redis
from crud import stacks as crud_stacks
from crud import deploys as crud_deploys
from crud import user as crud_users
from crud import activityLogs as crud_activity
from config.api import settings
r = redis.Redis(host=settings.BACKEND_SERVER, port=6379, db=2,
charset="utf-8", decode_responses=True)
def user_squad_scope(db, user, squad):
try:
if user.isdigit():
user_info = crud_users.get_user_by_id(db=db, id=user)
else:
user_info = crud_users.get_user_by_username(db=db, username=user)
if user_info == None:
raise ValueError(f"User {user} no exists")
if user_info.squad == squad:
return True
except Exception as err:
raise HTTPException(
status_code=400,
detail=str(err))
def stack(db, stack_name: str):
try:
stack_data = crud_stacks.get_stack_by_name(
db=db, stack_name=stack_name)
if stack_data is None:
raise Exception("Stack Name Not Found")
return stack_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def deploy(db, deploy_id: int):
try:
deploy_data = crud_deploys.get_deploy_by_id(
db=db, deploy_id=deploy_id)
if deploy_data is None:
raise Exception("Deploy id Not Found")
return deploy_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def deploy_squad(db, deploy_id: int, squad: str):
try:
deploy_data = crud_deploys.get_deploy_by_id_squad(
db=db, deploy_id=deploy_id, squad=squad)
if deploy_data is None:
raise Exception("Deploy id Not Found")
return deploy_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def get_deploy(db, deploy_id: int):
try:
deploy_data = crud_deploys.get_deploy_by_id(
db=db, deploy_id=deploy_id)
if deploy_data is None:
raise Exception("Deploy id Not Found")
return deploy_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def check_deploy_exist(db, deploy_name: str, squad: str, env: str, stack: str):
data_source_check = f'{deploy_name}-{squad}-{env}-{stack}'
try:
db_data = crud_deploys.get_deploy_by_name_squad(
db=db, deploy_name=deploy_name, squad=squad, environment=env)
if db_data is not None:
data_db_check = f'{db_data.name}-{db_data.squad}-{db_data.environment}-{db_data.stack_name}'
if data_source_check == data_db_check:
raise Exception(
"The name of the deployment already exists in the current squad and with specified environment")
except Exception as err:
raise HTTPException(
status_code=409,
detail=f"{err}")
def check_deploy_state(task_id: str):
result = AsyncResult(task_id)
list_state = ["SUCCESS", "FAILURE", "REVOKED", "PENDING"]
return any(result.state == i for i in list_state)
def check_deploy_task_pending_state(deploy_name, squad, environment, task_id=None):
if task_id:
result = AsyncResult(str(task_id))
list_state = ["REVOKED"]
if any(result.state == i for i in list_state):
return True
try:
if r.exists(f"{deploy_name}-{squad}-{environment}"):
raise Exception("Task already exists in pending state waiting to be executed")
except Exception as err:
raise HTTPException(
status_code=409, detail=f"{err}")
r.set(f"{deploy_name}-{squad}-{environment}", "Locked")
r.expire(f"{deploy_name}-{squad}-{environment}", settings.TASK_LOCKED_EXPIRED)
def check_providers(stack_name):
providers_support = settings.PROVIDERS_SUPPORT
if any(i in stack_name.lower() for i in providers_support):
return True
else:
raise HTTPException(
status_code=404,
detail=f"stack name {stack_name.lower()} no content providers support name preffix: {providers_support}")
def activity_log(func):
@wraps(func)
async def wrapper(*args, **kwargs):
crud_activity.create_activity_log(
db=kwargs['db'],
username=kwargs['current_user'].username,
squad=kwargs['current_user'].squad,
action=f'Delete User {kwargs["user"]}'
)
return await func(*args, **kwargs)
return wrapper
def check_cron_schedule(cron_time: str):
if cron_time:
if not croniter.is_valid(cron_time):
raise ValueError("Cron time its no valid")
return True
| 32.776316 | 117 | 0.642914 | from fastapi import HTTPException
from celery.result import AsyncResult
from datetime import datetime
from functools import wraps
from croniter import croniter
import redis
from crud import stacks as crud_stacks
from crud import deploys as crud_deploys
from crud import user as crud_users
from crud import activityLogs as crud_activity
from config.api import settings
r = redis.Redis(host=settings.BACKEND_SERVER, port=6379, db=2,
charset="utf-8", decode_responses=True)
def user_squad_scope(db, user, squad):
try:
if user.isdigit():
user_info = crud_users.get_user_by_id(db=db, id=user)
else:
user_info = crud_users.get_user_by_username(db=db, username=user)
if user_info == None:
raise ValueError(f"User {user} no exists")
if user_info.squad == squad:
return True
except Exception as err:
raise HTTPException(
status_code=400,
detail=str(err))
def stack(db, stack_name: str):
try:
stack_data = crud_stacks.get_stack_by_name(
db=db, stack_name=stack_name)
if stack_data is None:
raise Exception("Stack Name Not Found")
return stack_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def deploy(db, deploy_id: int):
try:
deploy_data = crud_deploys.get_deploy_by_id(
db=db, deploy_id=deploy_id)
if deploy_data is None:
raise Exception("Deploy id Not Found")
return deploy_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def deploy_squad(db, deploy_id: int, squad: str):
try:
deploy_data = crud_deploys.get_deploy_by_id_squad(
db=db, deploy_id=deploy_id, squad=squad)
if deploy_data is None:
raise Exception("Deploy id Not Found")
return deploy_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def get_deploy(db, deploy_id: int):
try:
deploy_data = crud_deploys.get_deploy_by_id(
db=db, deploy_id=deploy_id)
if deploy_data is None:
raise Exception("Deploy id Not Found")
return deploy_data
except Exception as err:
raise HTTPException(
status_code=404,
detail=f"{err}")
def check_deploy_exist(db, deploy_name: str, squad: str, env: str, stack: str):
data_source_check = f'{deploy_name}-{squad}-{env}-{stack}'
try:
db_data = crud_deploys.get_deploy_by_name_squad(
db=db, deploy_name=deploy_name, squad=squad, environment=env)
if db_data is not None:
data_db_check = f'{db_data.name}-{db_data.squad}-{db_data.environment}-{db_data.stack_name}'
if data_source_check == data_db_check:
raise Exception(
"The name of the deployment already exists in the current squad and with specified environment")
except Exception as err:
raise HTTPException(
status_code=409,
detail=f"{err}")
def check_deploy_state(task_id: str):
result = AsyncResult(task_id)
list_state = ["SUCCESS", "FAILURE", "REVOKED", "PENDING"]
return any(result.state == i for i in list_state)
def check_deploy_task_pending_state(deploy_name, squad, environment, task_id=None):
if task_id:
result = AsyncResult(str(task_id))
list_state = ["REVOKED"]
if any(result.state == i for i in list_state):
return True
try:
if r.exists(f"{deploy_name}-{squad}-{environment}"):
raise Exception("Task already exists in pending state waiting to be executed")
except Exception as err:
raise HTTPException(
status_code=409, detail=f"{err}")
r.set(f"{deploy_name}-{squad}-{environment}", "Locked")
r.expire(f"{deploy_name}-{squad}-{environment}", settings.TASK_LOCKED_EXPIRED)
def check_providers(stack_name):
providers_support = settings.PROVIDERS_SUPPORT
if any(i in stack_name.lower() for i in providers_support):
return True
else:
raise HTTPException(
status_code=404,
detail=f"stack name {stack_name.lower()} no content providers support name preffix: {providers_support}")
def activity_log(func):
@wraps(func)
async def wrapper(*args, **kwargs):
crud_activity.create_activity_log(
db=kwargs['db'],
username=kwargs['current_user'].username,
squad=kwargs['current_user'].squad,
action=f'Delete User {kwargs["user"]}'
)
return await func(*args, **kwargs)
return wrapper
def check_cron_schedule(cron_time: str):
if cron_time:
if not croniter.is_valid(cron_time):
raise ValueError("Cron time its no valid")
return True
| true | true |
1c2e91e14d21753133705a1872b6a3cc03cf96d4 | 1,675 | py | Python | Autoencoding/save32by32ForAll.py | MiningMyBusiness/SketchyExperiments | 11f3baee63a984501b9707aa58420540b19db59f | [
"MIT"
] | 2 | 2019-01-31T19:56:50.000Z | 2019-03-21T12:26:57.000Z | Autoencoding/save32by32ForAll.py | MiningMyBusiness/SketchyExperiments | 11f3baee63a984501b9707aa58420540b19db59f | [
"MIT"
] | null | null | null | Autoencoding/save32by32ForAll.py | MiningMyBusiness/SketchyExperiments | 11f3baee63a984501b9707aa58420540b19db59f | [
"MIT"
] | null | null | null | ## Name: Save 32 by 32 image vector
## Description: Reads in the matrix representation of the images
## for each class and resizes each image from 256 by 256
## to 32 by 32 and then converts it to a vector with
## 1024 elements (32*32). A stack of these vectors (matrix)
## is then saved in the class directory.
## NOTE: This code uses the output from PNGtoNumpy.py
## Author: Kiran Bhattacharyya
import glob
import numpy as np
import matplotlib.pyplot as plt
import cv2
# location of the data
dataLoc = '256X256/sketch/tx_000100000000'
allFolders = glob.glob(dataLoc + '/*') # get all folders with data
newSize = 32
def readImgAndResize(folderName, multi, newSize):
imgStack_mat = np.load(folder + '/matStack.npy')
num_of_images = imgStack_mat.shape[2]
smallerImgs = np.zeros((num_of_images, newSize**2))
for i in range(0, int(multi*num_of_images)):
smallerImgs[i, :] = resizeAndVectorize(imgStack_mat[:, :, i], newSize)
return smallerImgs
def resizeAndVectorize(thisImg, newSize):
resized = cv2.resize(thisImg, (newSize, newSize),
interpolation=cv2.INTER_AREA)
resized = np.reshape(resized, newSize**2)
resized = resized - np.min(resized)
resized = resized/np.max(resized)
return resized
folderCount = 0
iter = 0
multi = 1.0
allData = []
for folder in allFolders:
print('Folder ' + str(folderCount+1) + ' of ' + str(len(allFolders)) + '.')
smallerImgs = readImgAndResize(folder, multi, newSize)
np.save(folder + '/32by32vecStack.npy', smallerImgs)
folderCount += 1
| 34.895833 | 80 | 0.654328 | smallerImgs[i, :] = resizeAndVectorize(imgStack_mat[:, :, i], newSize)
return smallerImgs
def resizeAndVectorize(thisImg, newSize):
resized = cv2.resize(thisImg, (newSize, newSize),
interpolation=cv2.INTER_AREA)
resized = np.reshape(resized, newSize**2)
resized = resized - np.min(resized)
resized = resized/np.max(resized)
return resized
folderCount = 0
iter = 0
multi = 1.0
allData = []
for folder in allFolders:
print('Folder ' + str(folderCount+1) + ' of ' + str(len(allFolders)) + '.')
smallerImgs = readImgAndResize(folder, multi, newSize)
np.save(folder + '/32by32vecStack.npy', smallerImgs)
folderCount += 1
| true | true |
1c2e9290459d49bd1262122f8dd1051edf6b6a31 | 1,516 | py | Python | crypto_data_app/__init__.py | gldeng/crypto-data-app | 4460ffd2417a02605862b66073e172519ea5fbd7 | [
"MIT"
] | null | null | null | crypto_data_app/__init__.py | gldeng/crypto-data-app | 4460ffd2417a02605862b66073e172519ea5fbd7 | [
"MIT"
] | null | null | null | crypto_data_app/__init__.py | gldeng/crypto-data-app | 4460ffd2417a02605862b66073e172519ea5fbd7 | [
"MIT"
] | null | null | null | from flask_redis import FlaskRedis
from flask import Flask
from flask_restful import Resource, Api
from datetime import datetime
def create_app(config_pyfile):
app = Flask(__name__, instance_relative_config=False)
app.config.from_pyfile(config_pyfile)
redis_store = FlaskRedis(app, strict=False)
api = Api(app)
class Pairs(Resource):
def get(self):
return sorted(list(set(map(lambda x: x.split(':')[0], redis_store.keys()))))
class OnePair(Resource):
def get(self, pair):
keys = redis_store.keys('%s*' % pair)
exchanges = []
min_ = None
max_ = None
for k in keys:
d = redis_store.hgetall(k)
if 'trades_date_time' not in d:
continue
ts = datetime.strptime(d['trades_date_time'], '%Y%m%d %H:%M:%S.%f')
if (datetime.utcnow() - ts).total_seconds() >= 600:
# exclude if older than 10 minutes
continue
price = float(d['trade_px'])
if min_ is None or price < min_:
min_ = price
if max_ is None or price > max_:
max_ = price
exchanges.append(d)
return {
'min': min_,
'max': max_,
'exchanges': exchanges
}
api.add_resource(Pairs, '/pairs')
api.add_resource(OnePair, '/pairs/<pair>')
return app
| 31.583333 | 88 | 0.525066 | from flask_redis import FlaskRedis
from flask import Flask
from flask_restful import Resource, Api
from datetime import datetime
def create_app(config_pyfile):
app = Flask(__name__, instance_relative_config=False)
app.config.from_pyfile(config_pyfile)
redis_store = FlaskRedis(app, strict=False)
api = Api(app)
class Pairs(Resource):
def get(self):
return sorted(list(set(map(lambda x: x.split(':')[0], redis_store.keys()))))
class OnePair(Resource):
def get(self, pair):
keys = redis_store.keys('%s*' % pair)
exchanges = []
min_ = None
max_ = None
for k in keys:
d = redis_store.hgetall(k)
if 'trades_date_time' not in d:
continue
ts = datetime.strptime(d['trades_date_time'], '%Y%m%d %H:%M:%S.%f')
if (datetime.utcnow() - ts).total_seconds() >= 600:
continue
price = float(d['trade_px'])
if min_ is None or price < min_:
min_ = price
if max_ is None or price > max_:
max_ = price
exchanges.append(d)
return {
'min': min_,
'max': max_,
'exchanges': exchanges
}
api.add_resource(Pairs, '/pairs')
api.add_resource(OnePair, '/pairs/<pair>')
return app
| true | true |
1c2e930bbcde5c690c5b9f48f7f7e63d0c74e976 | 2,587 | py | Python | Space Invaders/utilities/deus_ex.py | msutic/python_project_alpha | 5fed153babf294ce5f5714de8f711b1a5943d2d5 | [
"MIT"
] | 1 | 2021-01-08T00:38:02.000Z | 2021-01-08T00:38:02.000Z | Space Invaders/utilities/deus_ex.py | msutic/python_project_alpha | 5fed153babf294ce5f5714de8f711b1a5943d2d5 | [
"MIT"
] | 4 | 2021-01-12T20:07:19.000Z | 2021-03-19T22:21:31.000Z | Space Invaders/utilities/deus_ex.py | msutic/python_project_alpha | 5fed153babf294ce5f5714de8f711b1a5943d2d5 | [
"MIT"
] | null | null | null | from time import time, sleep
from PyQt5.QtCore import pyqtSignal, QThread, pyqtSlot
from PyQt5.QtWidgets import QLabel
from config import cfg
class DeusEx(QThread):
empower = pyqtSignal(QLabel)
collision_occured = pyqtSignal(QLabel, QLabel, int)
def __init__(self):
super().__init__()
self.is_not_done = True
self.powers = []
self.index = 0
self.players = []
def add_power(self, power: QLabel, index: int):
self.powers.append(power)
self.index = index
self.time_added = time()
def rem_power(self, power: QLabel):
self.powers.remove(power)
def add_player(self, player: QLabel):
self.players.append(player)
def rem_player(self, player: QLabel):
self.players.remove(player)
@pyqtSlot()
def run(self):
while self.is_not_done:
collided = False
time_now = time()
if len(self.powers) > 0:
if time_now - self.time_added > 2:
for power in self.powers:
self.powers.remove(power)
self.empower.emit(power)
for player in self.players:
player_xy_begin = [player.geometry().x(), player.geometry().y()]
player_xy_end = [
player.geometry().x() + cfg.SPACESHIP_WIDTH,
player.geometry().y() + cfg.SPACESHIP_HEIGHT
]
player_x_coordinates = range(player_xy_begin[0], player_xy_end[0])
player_y_coordinates = range(player_xy_begin[1], player_xy_end[1])
for power in self.powers:
power_xy_begin = [power.geometry().x(), power.geometry().y()]
power_xy_end = [power.geometry().x() + 30, power.geometry().y() + 30]
power_x_coords = range(power_xy_begin[0], power_xy_end[0])
power_y_coords = range(power_xy_begin[1], power_xy_end[1])
for player_y in player_y_coordinates:
if collided:
break
if player_y in power_y_coords:
for player_x in player_x_coordinates:
if player_x in power_x_coords:
self.rem_power(power)
self.collision_occured.emit(player, power, self.index)
collided = True
break
sleep(0.05)
| 33.166667 | 90 | 0.526479 | from time import time, sleep
from PyQt5.QtCore import pyqtSignal, QThread, pyqtSlot
from PyQt5.QtWidgets import QLabel
from config import cfg
class DeusEx(QThread):
empower = pyqtSignal(QLabel)
collision_occured = pyqtSignal(QLabel, QLabel, int)
def __init__(self):
super().__init__()
self.is_not_done = True
self.powers = []
self.index = 0
self.players = []
def add_power(self, power: QLabel, index: int):
self.powers.append(power)
self.index = index
self.time_added = time()
def rem_power(self, power: QLabel):
self.powers.remove(power)
def add_player(self, player: QLabel):
self.players.append(player)
def rem_player(self, player: QLabel):
self.players.remove(player)
@pyqtSlot()
def run(self):
while self.is_not_done:
collided = False
time_now = time()
if len(self.powers) > 0:
if time_now - self.time_added > 2:
for power in self.powers:
self.powers.remove(power)
self.empower.emit(power)
for player in self.players:
player_xy_begin = [player.geometry().x(), player.geometry().y()]
player_xy_end = [
player.geometry().x() + cfg.SPACESHIP_WIDTH,
player.geometry().y() + cfg.SPACESHIP_HEIGHT
]
player_x_coordinates = range(player_xy_begin[0], player_xy_end[0])
player_y_coordinates = range(player_xy_begin[1], player_xy_end[1])
for power in self.powers:
power_xy_begin = [power.geometry().x(), power.geometry().y()]
power_xy_end = [power.geometry().x() + 30, power.geometry().y() + 30]
power_x_coords = range(power_xy_begin[0], power_xy_end[0])
power_y_coords = range(power_xy_begin[1], power_xy_end[1])
for player_y in player_y_coordinates:
if collided:
break
if player_y in power_y_coords:
for player_x in player_x_coordinates:
if player_x in power_x_coords:
self.rem_power(power)
self.collision_occured.emit(player, power, self.index)
collided = True
break
sleep(0.05)
| true | true |
1c2e93b8c4720408303fc37f202fa46b4a4ef25f | 5,329 | py | Python | confidnet/loaders/loader.py | luoyan407/predict_trustworthiness_smallscale | b7e1e2a68b0aee9b484228d1b5686f7252919e97 | [
"Apache-2.0"
] | 149 | 2019-10-01T14:04:05.000Z | 2022-03-24T12:25:15.000Z | confidnet/loaders/loader.py | luoyan407/predict_trustworthiness_smallscale | b7e1e2a68b0aee9b484228d1b5686f7252919e97 | [
"Apache-2.0"
] | 10 | 2019-12-12T09:45:50.000Z | 2021-12-27T04:45:22.000Z | confidnet/loaders/loader.py | luoyan407/predict_trustworthiness_smallscale | b7e1e2a68b0aee9b484228d1b5686f7252919e97 | [
"Apache-2.0"
] | 30 | 2019-12-02T16:25:22.000Z | 2022-02-16T10:48:47.000Z | from pathlib import Path
import numpy as np
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from confidnet.augmentations import get_composed_augmentations
from confidnet.utils.logger import get_logger
LOGGER = get_logger(__name__, level="DEBUG")
class AbstractDataLoader:
def __init__(self, config_args):
self.output_folder = config_args['training']['output_folder']
self.data_dir = config_args['data']['data_dir']
self.batch_size = config_args['training']['batch_size']
self.img_size = (config_args['data']['input_size'][0],
config_args['data']['input_size'][1],
config_args['data']['input_channels'])
self.augmentations = config_args['training'].get('augmentations', None)
self.ft_on_val = config_args['training'].get('ft_on_val', None)
self.resume_folder = config_args['model']['resume'].parent if isinstance(config_args['model']['resume'], Path) else None
self.valid_size = config_args['data']['valid_size']
self.perturbed_folder = config_args['data'].get('perturbed_images', None)
self.pin_memory = config_args['training']['pin_memory']
self.num_workers = config_args['training']['num_workers']
self.train_loader, self.val_loader, self.test_loader = None, None, None
# Set up augmentations
self.augmentations_train, self.augmentations_train_lbl = None, None
self.augmentations_test, self.augmentations_test_lbl = None, None
if self.augmentations:
LOGGER.info("--- Augmentations ---")
self.add_augmentations()
# Load dataset
self.train_dataset, self.val_dataset, self.test_dataset = None, None, None
self.load_dataset()
def add_augmentations(self):
self.augmentations_train = get_composed_augmentations(
self.augmentations, training="classif"
)
self.augmentations_train_lbl = get_composed_augmentations(
{
key: self.augmentations[key]
for key in self.augmentations
if key not in ["normalize", "color_jitter"]
},
verbose=False,
training="classif",
)
self.augmentations_test = get_composed_augmentations(
{key: self.augmentations[key] for key in self.augmentations if key == "normalize"},
verbose=False,
training="classif",
)
self.augmentations_test_lbl = get_composed_augmentations(
None, verbose=False, training="classif"
)
def load_dataset(self):
pass
def make_loaders(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
if self.valid_size == 0:
LOGGER.warning("Valid size=0, no validation loader")
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
else:
num_train = len(self.train_dataset)
indices = list(range(num_train))
if (self.output_folder / "train_idx.npy").exists():
LOGGER.warning("Loading existing train-val split indices")
train_idx = np.load(self.output_folder / "train_idx.npy")
val_idx = np.load(self.output_folder / "val_idx.npy")
# Splitting indices
elif self.resume_folder:
LOGGER.warning("Loading existing train-val split indices from ORIGINAL training")
train_idx = np.load(self.resume_folder / "train_idx.npy")
val_idx = np.load(self.resume_folder / "val_idx.npy")
else:
split = int(np.floor(self.valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, val_idx = indices[split:], indices[:split]
np.save(self.output_folder / "train_idx.npy", train_idx)
np.save(self.output_folder / "val_idx.npy", val_idx)
# Make samplers
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
# Special case where val set is used for training
if self.ft_on_val:
LOGGER.warning("Using val set as training")
train_sampler = val_sampler
# Make loaders
self.train_loader = torch.utils.data.DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
sampler=train_sampler,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
self.val_loader = torch.utils.data.DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
sampler=val_sampler,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
| 42.632 | 128 | 0.605555 | from pathlib import Path
import numpy as np
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from confidnet.augmentations import get_composed_augmentations
from confidnet.utils.logger import get_logger
LOGGER = get_logger(__name__, level="DEBUG")
class AbstractDataLoader:
def __init__(self, config_args):
self.output_folder = config_args['training']['output_folder']
self.data_dir = config_args['data']['data_dir']
self.batch_size = config_args['training']['batch_size']
self.img_size = (config_args['data']['input_size'][0],
config_args['data']['input_size'][1],
config_args['data']['input_channels'])
self.augmentations = config_args['training'].get('augmentations', None)
self.ft_on_val = config_args['training'].get('ft_on_val', None)
self.resume_folder = config_args['model']['resume'].parent if isinstance(config_args['model']['resume'], Path) else None
self.valid_size = config_args['data']['valid_size']
self.perturbed_folder = config_args['data'].get('perturbed_images', None)
self.pin_memory = config_args['training']['pin_memory']
self.num_workers = config_args['training']['num_workers']
self.train_loader, self.val_loader, self.test_loader = None, None, None
self.augmentations_train, self.augmentations_train_lbl = None, None
self.augmentations_test, self.augmentations_test_lbl = None, None
if self.augmentations:
LOGGER.info("--- Augmentations ---")
self.add_augmentations()
self.train_dataset, self.val_dataset, self.test_dataset = None, None, None
self.load_dataset()
def add_augmentations(self):
self.augmentations_train = get_composed_augmentations(
self.augmentations, training="classif"
)
self.augmentations_train_lbl = get_composed_augmentations(
{
key: self.augmentations[key]
for key in self.augmentations
if key not in ["normalize", "color_jitter"]
},
verbose=False,
training="classif",
)
self.augmentations_test = get_composed_augmentations(
{key: self.augmentations[key] for key in self.augmentations if key == "normalize"},
verbose=False,
training="classif",
)
self.augmentations_test_lbl = get_composed_augmentations(
None, verbose=False, training="classif"
)
def load_dataset(self):
pass
def make_loaders(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
if self.valid_size == 0:
LOGGER.warning("Valid size=0, no validation loader")
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
else:
num_train = len(self.train_dataset)
indices = list(range(num_train))
if (self.output_folder / "train_idx.npy").exists():
LOGGER.warning("Loading existing train-val split indices")
train_idx = np.load(self.output_folder / "train_idx.npy")
val_idx = np.load(self.output_folder / "val_idx.npy")
elif self.resume_folder:
LOGGER.warning("Loading existing train-val split indices from ORIGINAL training")
train_idx = np.load(self.resume_folder / "train_idx.npy")
val_idx = np.load(self.resume_folder / "val_idx.npy")
else:
split = int(np.floor(self.valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, val_idx = indices[split:], indices[:split]
np.save(self.output_folder / "train_idx.npy", train_idx)
np.save(self.output_folder / "val_idx.npy", val_idx)
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
if self.ft_on_val:
LOGGER.warning("Using val set as training")
train_sampler = val_sampler
self.train_loader = torch.utils.data.DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
sampler=train_sampler,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
self.val_loader = torch.utils.data.DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
sampler=val_sampler,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
)
| true | true |
1c2e9443cf5eb3a35aeb948b351e7d6cdc5b1396 | 13,537 | py | Python | plugins/modules/oci_analytics_private_access_channel.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_analytics_private_access_channel.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_analytics_private_access_channel.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_analytics_private_access_channel
short_description: Manage a PrivateAccessChannel resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a PrivateAccessChannel resource in Oracle Cloud Infrastructure
- For I(state=present), create an Private access Channel for the Analytics instance. The operation is long-running
and creates a new WorkRequest.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
analytics_instance_id:
description:
- The OCID of the AnalyticsInstance.
type: str
aliases: ["id"]
required: true
display_name:
description:
- Display Name of the Private Access Channel.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
aliases: ["name"]
vcn_id:
description:
- OCID of the customer VCN peered with private access channel.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
subnet_id:
description:
- OCID of the customer subnet connected to private access channel.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
private_source_dns_zones:
description:
- List of Private Source DNS zones registered with Private Access Channel,
where datasource hostnames from these dns zones / domains will be resolved in the peered VCN for access from Analytics Instance.
Min of 1 is required and Max of 30 Private Source DNS zones can be registered.
- Required for create using I(state=present).
- This parameter is updatable.
type: list
elements: dict
suboptions:
dns_zone:
description:
- "Private Source DNS Zone. Ex: example-vcn.oraclevcn.com, corp.example.com."
type: str
required: true
description:
description:
- Description of private source dns zone.
type: str
private_access_channel_key:
description:
- The unique identifier key of the Private Access Channel.
- Required for update using I(state=present) with analytics_instance_id present.
- Required for delete using I(state=absent).
type: str
state:
description:
- The state of the PrivateAccessChannel.
- Use I(state=present) to create or update a PrivateAccessChannel.
- Use I(state=absent) to delete a PrivateAccessChannel.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create private_access_channel
oci_analytics_private_access_channel:
# required
analytics_instance_id: "ocid1.analyticsinstance.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
vcn_id: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
private_source_dns_zones:
- # required
dns_zone: dns_zone_example
# optional
description: description_example
- name: Update private_access_channel
oci_analytics_private_access_channel:
# required
analytics_instance_id: "ocid1.analyticsinstance.oc1..xxxxxxEXAMPLExxxxxx"
private_access_channel_key: private_access_channel_key_example
# optional
display_name: display_name_example
vcn_id: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
private_source_dns_zones:
- # required
dns_zone: dns_zone_example
# optional
description: description_example
- name: Delete private_access_channel
oci_analytics_private_access_channel:
# required
analytics_instance_id: "ocid1.analyticsinstance.oc1..xxxxxxEXAMPLExxxxxx"
private_access_channel_key: private_access_channel_key_example
state: absent
"""
RETURN = """
private_access_channel:
description:
- Details of the PrivateAccessChannel resource acted upon by the current operation
returned: on success
type: complex
contains:
key:
description:
- Private Access Channel unique identifier key.
returned: on success
type: str
sample: key_example
display_name:
description:
- Display Name of the Private Access Channel.
returned: on success
type: str
sample: display_name_example
vcn_id:
description:
- OCID of the customer VCN peered with private access channel.
returned: on success
type: str
sample: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id:
description:
- OCID of the customer subnet connected to private access channel.
returned: on success
type: str
sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
ip_address:
description:
- IP Address of the Private Access channel.
returned: on success
type: str
sample: ip_address_example
egress_source_ip_addresses:
description:
- The list of IP addresses from the customer subnet connected to private access channel, used as a source Ip by Private Access Channel
for network traffic from the AnalyticsInstance to Private Sources.
returned: on success
type: list
sample: []
private_source_dns_zones:
description:
- List of Private Source DNS zones registered with Private Access Channel,
where datasource hostnames from these dns zones / domains will be resolved in the peered VCN for access from Analytics Instance.
Min of 1 is required and Max of 30 Private Source DNS zones can be registered.
returned: on success
type: complex
contains:
dns_zone:
description:
- "Private Source DNS Zone. Ex: example-vcn.oraclevcn.com, corp.example.com."
returned: on success
type: str
sample: dns_zone_example
description:
description:
- Description of private source dns zone.
returned: on success
type: str
sample: description_example
sample: {
"key": "key_example",
"display_name": "display_name_example",
"vcn_id": "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"ip_address": "ip_address_example",
"egress_source_ip_addresses": [],
"private_source_dns_zones": [{
"dns_zone": "dns_zone_example",
"description": "description_example"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.analytics import AnalyticsClient
from oci.analytics.models import CreatePrivateAccessChannelDetails
from oci.analytics.models import UpdatePrivateAccessChannelDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class PrivateAccessChannelHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get and delete"""
def get_module_resource_id_param(self):
return "analytics_instance_id"
def get_module_resource_id(self):
return self.module.params.get("analytics_instance_id")
def get_get_fn(self):
return self.client.get_private_access_channel
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_private_access_channel,
private_access_channel_key=self.module.params.get(
"private_access_channel_key"
),
analytics_instance_id=self.module.params.get("analytics_instance_id"),
)
def get_create_model_class(self):
return CreatePrivateAccessChannelDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_private_access_channel,
call_fn_args=(),
call_fn_kwargs=dict(
analytics_instance_id=self.module.params.get("analytics_instance_id"),
create_private_access_channel_details=create_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdatePrivateAccessChannelDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_private_access_channel,
call_fn_args=(),
call_fn_kwargs=dict(
private_access_channel_key=self.module.params.get(
"private_access_channel_key"
),
analytics_instance_id=self.module.params.get("analytics_instance_id"),
update_private_access_channel_details=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_private_access_channel,
call_fn_args=(),
call_fn_kwargs=dict(
private_access_channel_key=self.module.params.get(
"private_access_channel_key"
),
analytics_instance_id=self.module.params.get("analytics_instance_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
PrivateAccessChannelHelperCustom = get_custom_class("PrivateAccessChannelHelperCustom")
class ResourceHelper(PrivateAccessChannelHelperCustom, PrivateAccessChannelHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
analytics_instance_id=dict(aliases=["id"], type="str", required=True),
display_name=dict(aliases=["name"], type="str"),
vcn_id=dict(type="str"),
subnet_id=dict(type="str"),
private_source_dns_zones=dict(
type="list",
elements="dict",
options=dict(
dns_zone=dict(type="str", required=True),
description=dict(type="str"),
),
),
private_access_channel_key=dict(type="str", no_log=True),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="private_access_channel",
service_client_class=AnalyticsClient,
namespace="analytics",
)
result = dict(changed=False)
if resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 36.785326 | 150 | 0.650218 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_analytics_private_access_channel
short_description: Manage a PrivateAccessChannel resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a PrivateAccessChannel resource in Oracle Cloud Infrastructure
- For I(state=present), create an Private access Channel for the Analytics instance. The operation is long-running
and creates a new WorkRequest.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
analytics_instance_id:
description:
- The OCID of the AnalyticsInstance.
type: str
aliases: ["id"]
required: true
display_name:
description:
- Display Name of the Private Access Channel.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
aliases: ["name"]
vcn_id:
description:
- OCID of the customer VCN peered with private access channel.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
subnet_id:
description:
- OCID of the customer subnet connected to private access channel.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
private_source_dns_zones:
description:
- List of Private Source DNS zones registered with Private Access Channel,
where datasource hostnames from these dns zones / domains will be resolved in the peered VCN for access from Analytics Instance.
Min of 1 is required and Max of 30 Private Source DNS zones can be registered.
- Required for create using I(state=present).
- This parameter is updatable.
type: list
elements: dict
suboptions:
dns_zone:
description:
- "Private Source DNS Zone. Ex: example-vcn.oraclevcn.com, corp.example.com."
type: str
required: true
description:
description:
- Description of private source dns zone.
type: str
private_access_channel_key:
description:
- The unique identifier key of the Private Access Channel.
- Required for update using I(state=present) with analytics_instance_id present.
- Required for delete using I(state=absent).
type: str
state:
description:
- The state of the PrivateAccessChannel.
- Use I(state=present) to create or update a PrivateAccessChannel.
- Use I(state=absent) to delete a PrivateAccessChannel.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create private_access_channel
oci_analytics_private_access_channel:
# required
analytics_instance_id: "ocid1.analyticsinstance.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
vcn_id: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
private_source_dns_zones:
- # required
dns_zone: dns_zone_example
# optional
description: description_example
- name: Update private_access_channel
oci_analytics_private_access_channel:
# required
analytics_instance_id: "ocid1.analyticsinstance.oc1..xxxxxxEXAMPLExxxxxx"
private_access_channel_key: private_access_channel_key_example
# optional
display_name: display_name_example
vcn_id: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
private_source_dns_zones:
- # required
dns_zone: dns_zone_example
# optional
description: description_example
- name: Delete private_access_channel
oci_analytics_private_access_channel:
# required
analytics_instance_id: "ocid1.analyticsinstance.oc1..xxxxxxEXAMPLExxxxxx"
private_access_channel_key: private_access_channel_key_example
state: absent
"""
RETURN = """
private_access_channel:
description:
- Details of the PrivateAccessChannel resource acted upon by the current operation
returned: on success
type: complex
contains:
key:
description:
- Private Access Channel unique identifier key.
returned: on success
type: str
sample: key_example
display_name:
description:
- Display Name of the Private Access Channel.
returned: on success
type: str
sample: display_name_example
vcn_id:
description:
- OCID of the customer VCN peered with private access channel.
returned: on success
type: str
sample: "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
subnet_id:
description:
- OCID of the customer subnet connected to private access channel.
returned: on success
type: str
sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
ip_address:
description:
- IP Address of the Private Access channel.
returned: on success
type: str
sample: ip_address_example
egress_source_ip_addresses:
description:
- The list of IP addresses from the customer subnet connected to private access channel, used as a source Ip by Private Access Channel
for network traffic from the AnalyticsInstance to Private Sources.
returned: on success
type: list
sample: []
private_source_dns_zones:
description:
- List of Private Source DNS zones registered with Private Access Channel,
where datasource hostnames from these dns zones / domains will be resolved in the peered VCN for access from Analytics Instance.
Min of 1 is required and Max of 30 Private Source DNS zones can be registered.
returned: on success
type: complex
contains:
dns_zone:
description:
- "Private Source DNS Zone. Ex: example-vcn.oraclevcn.com, corp.example.com."
returned: on success
type: str
sample: dns_zone_example
description:
description:
- Description of private source dns zone.
returned: on success
type: str
sample: description_example
sample: {
"key": "key_example",
"display_name": "display_name_example",
"vcn_id": "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"ip_address": "ip_address_example",
"egress_source_ip_addresses": [],
"private_source_dns_zones": [{
"dns_zone": "dns_zone_example",
"description": "description_example"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.analytics import AnalyticsClient
from oci.analytics.models import CreatePrivateAccessChannelDetails
from oci.analytics.models import UpdatePrivateAccessChannelDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class PrivateAccessChannelHelperGen(OCIResourceHelperBase):
def get_module_resource_id_param(self):
return "analytics_instance_id"
def get_module_resource_id(self):
return self.module.params.get("analytics_instance_id")
def get_get_fn(self):
return self.client.get_private_access_channel
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_private_access_channel,
private_access_channel_key=self.module.params.get(
"private_access_channel_key"
),
analytics_instance_id=self.module.params.get("analytics_instance_id"),
)
def get_create_model_class(self):
return CreatePrivateAccessChannelDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_private_access_channel,
call_fn_args=(),
call_fn_kwargs=dict(
analytics_instance_id=self.module.params.get("analytics_instance_id"),
create_private_access_channel_details=create_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdatePrivateAccessChannelDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_private_access_channel,
call_fn_args=(),
call_fn_kwargs=dict(
private_access_channel_key=self.module.params.get(
"private_access_channel_key"
),
analytics_instance_id=self.module.params.get("analytics_instance_id"),
update_private_access_channel_details=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_private_access_channel,
call_fn_args=(),
call_fn_kwargs=dict(
private_access_channel_key=self.module.params.get(
"private_access_channel_key"
),
analytics_instance_id=self.module.params.get("analytics_instance_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
PrivateAccessChannelHelperCustom = get_custom_class("PrivateAccessChannelHelperCustom")
class ResourceHelper(PrivateAccessChannelHelperCustom, PrivateAccessChannelHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
analytics_instance_id=dict(aliases=["id"], type="str", required=True),
display_name=dict(aliases=["name"], type="str"),
vcn_id=dict(type="str"),
subnet_id=dict(type="str"),
private_source_dns_zones=dict(
type="list",
elements="dict",
options=dict(
dns_zone=dict(type="str", required=True),
description=dict(type="str"),
),
),
private_access_channel_key=dict(type="str", no_log=True),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="private_access_channel",
service_client_class=AnalyticsClient,
namespace="analytics",
)
result = dict(changed=False)
if resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| true | true |
1c2e95f5e12bce802fd887828e3ab4159233eaa3 | 414 | py | Python | reapit/reapit/doctype/item_sync_settings/item_sync_settings.py | rtdany10/reapit | aba62ac13fb1a81da9e2e182506712416b63aa92 | [
"MIT"
] | null | null | null | reapit/reapit/doctype/item_sync_settings/item_sync_settings.py | rtdany10/reapit | aba62ac13fb1a81da9e2e182506712416b63aa92 | [
"MIT"
] | null | null | null | reapit/reapit/doctype/item_sync_settings/item_sync_settings.py | rtdany10/reapit | aba62ac13fb1a81da9e2e182506712416b63aa92 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Wahni Green Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemSyncSettings(Document):
def validate(self):
if self.enabled:
if not (self.api_endpoint and self.price_list):
frappe.throw("Invalid endpoint or price list.")
| 29.571429 | 63 | 0.768116 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemSyncSettings(Document):
def validate(self):
if self.enabled:
if not (self.api_endpoint and self.price_list):
frappe.throw("Invalid endpoint or price list.")
| true | true |
1c2e972200daba906a25b9d9ebe3ed09289c0ab6 | 10,914 | py | Python | tests/test_scrape_ew_and_errew.py | mwanakijiji/rrlfe2 | 0637b348b8d3e54ff34c56caa8b4c6fdac1c699e | [
"MIT"
] | null | null | null | tests/test_scrape_ew_and_errew.py | mwanakijiji/rrlfe2 | 0637b348b8d3e54ff34c56caa8b4c6fdac1c699e | [
"MIT"
] | 18 | 2022-01-13T14:43:57.000Z | 2022-03-24T12:52:41.000Z | tests/test_scrape_ew_and_errew.py | mwanakijiji/rrlfe | 4a822bb499bd0af4543f8b34d9322e812a5a3d2c | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import sys, os
from configparser import ConfigParser, ExtendedInterpolation
import pandas as pd
import astropy
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../"))
sys.path.insert(0, target_dir)
# import more things with changed system path
from modules import *
from modules import scrape_ew_and_errew
from conf import *
import numpy as np
import glob
# configuration data for reduction
config_red = ConfigParser(interpolation=ExtendedInterpolation()) # for parsing values in .init file
# config for reduction to find a, b, c, d
config_red.read(os.path.join(os.path.dirname(__file__), '../conf', 'config_red.ini'))
def test_line_order_check():
dummy_lines = [3933.660,3970.075,4101.7100,4340.472,4861.290]
test_num_glitches_0 = scrape_ew_and_errew.line_order_check(dummy_lines)
dummy_lines[1] = 3990.
dummy_lines[3] = 4320.
test_num_glitches_1 = scrape_ew_and_errew.line_order_check(dummy_lines)
dummy_lines[0] = 3913.
dummy_lines[2] = 4121.
dummy_lines[4] = 4881.
test_num_glitches_2 = scrape_ew_and_errew.line_order_check(dummy_lines)
# assert glitches is boolean
assert test_num_glitches_0 == 0
assert test_num_glitches_1 == 1
assert test_num_glitches_2 == 1
def test_Scraper():
'''
write_dir_test = config_red["data_dirs"]["TEST_DIR_BIN"]
robo_dir = config_red["sys_dirs"]["DIR_ROBO"]
file_names_test = glob.glob(config_red["data_dirs"]["TEST_DIR_SRC"] + "spec_norm_final/*")
'''
# instantiate
scraper_instance = scrape_ew_and_errew.Scraper(subdir = config_red["data_dirs"]["TEST_DIR_ROBO_OUTPUT"],
file_scraped_info = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["SCRAPED_EW_ALL_DATA"])
# try a single instance; does it work?
# note the writing of files is not directly tested here
function_state = True
try:
test = scraper_instance()
except Exception as e:
# e contains printable attributes of exception object
function_state = False
# assert that instantiation worked
assert function_state
# make sure lines are really being identified correctly
assert np.allclose(test.where(test["line_name"]=="CaIIK").dropna()["wavel_found_center"],3933.660, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Heps").dropna()["wavel_found_center"],3970.075, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Hdel").dropna()["wavel_found_center"],4101.710, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Hgam").dropna()["wavel_found_center"],4340.472, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Hbet").dropna()["wavel_found_center"],4861.290, atol=2.)
# only 2 of the 3 spectra should have been scraped, because one should have
# triggered a parsing errors
assert len(test.where(test["line_name"]=="CaIIK").dropna()) == 2
def test_quality_check():
data_out_test = scrape_ew_and_errew.quality_check(
read_in_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_SCRAPED_EW_ALL_DATA"],
write_out_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_SCRAPED_EW_DATA_GOOD_ONLY"])
# lots of checks of data types
# note this uses .iloc[0] instead of [0], because bad rows with index 0 may
# have been removed
assert isinstance(data_out_test["wavel_stated_center"].iloc[0],np.float64)
assert isinstance(data_out_test["wavel_found_center"].iloc[0],np.float64)
assert isinstance(data_out_test["gaussianSigma"].iloc[0],np.float64)
assert isinstance(data_out_test["gaussianAmp"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintyMu"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintySigma"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintyAmp"].iloc[0],np.float64)
assert isinstance(data_out_test["priorMu"].iloc[0],np.float64)
assert isinstance(data_out_test["priorSigma"].iloc[0],np.float64)
assert isinstance(data_out_test["priorAmp"].iloc[0],np.float64)
assert isinstance(data_out_test["EQW"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintyEQW"].iloc[0],np.float64)
assert isinstance(data_out_test["chiSqr"].iloc[0],np.float64)
assert isinstance(data_out_test["flags"].iloc[0],str)
assert isinstance(data_out_test["blendGroup"].iloc[0],np.int64)
assert isinstance(data_out_test["line_name"].iloc[0],str)
assert isinstance(data_out_test["robolines_file_name"].iloc[0],str)
assert isinstance(data_out_test["realization_spec_file_name"].iloc[0],str)
assert isinstance(data_out_test["quality"].iloc[0],str)
def test_stack_spectra():
print("input list")
print(config_red["data_dirs"]["TEST_DIR_SRC"] + "test_input_file_list.list")
print("read in file name")
data_stacked_test = scrape_ew_and_errew.stack_spectra(
read_in_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_SCRAPED_EW_DATA_GOOD_ONLY"],
write_out_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_RESTACKED_EW_DATA_GOOD_ONLY"],
input_list = config_red["data_dirs"]["TEST_DIR_SRC"] + "test_input_file_list.list")
print("data_stacked")
print(data_stacked_test.keys())
# lots of checks of data types
# note this uses .iloc[0] instead of [0], because bad rows with index 0 may
# have been removed
assert isinstance(data_stacked_test["realization_spec_file_name"].iloc[0],str)
assert isinstance(data_stacked_test["orig_spec_file_name"].iloc[0],str)
assert isinstance(data_stacked_test["EW_Hbeta"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Hbeta_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_Hdelta"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Hdelta_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_Hgamma"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Hgamma_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_Heps"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Heps_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_CaIIK"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_CaIIK_from_robo"].iloc[0],np.float64)
def test_generate_net_balmer():
## ## CONTINUE HERE; FINISH THIS TEST
# generate the fake data: H_del =
params_data, data_net_balmer_test = scrape_ew_and_errew.generate_net_balmer(
read_in_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/test_stacked_data_pre_net_balmer_calc.csv",
write_out_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/test_stacked_data_post_net_balmer_calc.csv")
# is the Balmer line a true element wise average?
#assert np.array(data_net_balmer_test["EW_Balmer"]) == np.mean([data_net_balmer_test["EW_Hgamma"],data_net_balmer_test["EW_Hdelta"]], axis=0)
'''
# check data type of newly-added data
assert isinstance(data_net_balmer_test["EW_Balmer"].iloc[0],np.float64)
assert isinstance(data_net_balmer_test["err_EW_Balmer_based_Robo"].iloc[0],np.float64)
'''
assert 1<2
def test_generate_addl_ew_errors():
# placeholder for now, until more decisions about how to calculate EW errors
test_df_postbalmer_errors = scrape_ew_and_errew.generate_addl_ew_errors(read_in_filename=config_red["data_dirs"]["TEST_DIR_SRC"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER"],
write_out_filename=config_red["data_dirs"]["TEST_DIR_BIN"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"])
# loop through batches of rows corresponding to an individual spectrum, and
# make sure the errors are consistent and the value expected
array_1 = test_df_postbalmer_errors["err_EW_Balmer_based_noise_churning"].where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m10.smo").dropna().values
array_2 = test_df_postbalmer_errors["err_EW_Balmer_based_noise_churning"].where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m15.smo").dropna().values
array_3 = test_df_postbalmer_errors["err_EW_Balmer_based_noise_churning"].where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m20.smo").dropna().values
# check that error values are same
assert np.all(array_1)
assert np.all(array_2)
assert np.all(array_3)
# check that one value has expected value
assert round(array_1[0], 3) == 0.023
assert round(array_2[0], 3) == 0.020
assert round(array_3[0], 3) == 0.048
# in unusual case where collapsing the noise-churned spectra is not desired
test_df_postbalmer_errors_nogrouping = scrape_ew_and_errew.generate_addl_ew_errors(read_in_filename=config_red["data_dirs"]["TEST_DIR_SRC"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER"],
write_out_filename=config_red["data_dirs"]["TEST_DIR_BIN"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"],
groupby_parent = False)
array_1_children = test_df_postbalmer_errors_nogrouping.where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m10.smo").dropna()
# there are 40 rows for each parent spectrum, and the Balmer EW values are different in each
assert len(array_1_children) == 20
assert np.std(array_1_children["EW_Balmer"]) > 0
def test_add_synthetic_meta_data():
combined_data = scrape_ew_and_errew.add_synthetic_meta_data(input_list = config_red["data_dirs"]["TEST_DIR_SRC"] + config_red["file_names"]["TEST_LIST_SPEC_PHASE"],
read_in_filename = config_red["data_dirs"]["TEST_DIR_EW_PRODS"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"],
write_out_filename = config_red["data_dirs"]["TEST_DIR_EW_PRODS"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_METADATA_WRITEOUT"])
# columns from Robospect output and meta-data are all there
assert "wavel_stated_center" in combined_data.columns
assert "feh" in combined_data.columns
assert "teff" in combined_data.columns
# there are no nans in the table
assert np.sum(combined_data.isnull().sum()) == 0
| 52.724638 | 208 | 0.723566 | import matplotlib
matplotlib.use('Agg')
import sys, os
from configparser import ConfigParser, ExtendedInterpolation
import pandas as pd
import astropy
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../"))
sys.path.insert(0, target_dir)
from modules import *
from modules import scrape_ew_and_errew
from conf import *
import numpy as np
import glob
config_red = ConfigParser(interpolation=ExtendedInterpolation())
config_red.read(os.path.join(os.path.dirname(__file__), '../conf', 'config_red.ini'))
def test_line_order_check():
dummy_lines = [3933.660,3970.075,4101.7100,4340.472,4861.290]
test_num_glitches_0 = scrape_ew_and_errew.line_order_check(dummy_lines)
dummy_lines[1] = 3990.
dummy_lines[3] = 4320.
test_num_glitches_1 = scrape_ew_and_errew.line_order_check(dummy_lines)
dummy_lines[0] = 3913.
dummy_lines[2] = 4121.
dummy_lines[4] = 4881.
test_num_glitches_2 = scrape_ew_and_errew.line_order_check(dummy_lines)
assert test_num_glitches_0 == 0
assert test_num_glitches_1 == 1
assert test_num_glitches_2 == 1
def test_Scraper():
scraper_instance = scrape_ew_and_errew.Scraper(subdir = config_red["data_dirs"]["TEST_DIR_ROBO_OUTPUT"],
file_scraped_info = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["SCRAPED_EW_ALL_DATA"])
function_state = True
try:
test = scraper_instance()
except Exception as e:
function_state = False
assert function_state
assert np.allclose(test.where(test["line_name"]=="CaIIK").dropna()["wavel_found_center"],3933.660, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Heps").dropna()["wavel_found_center"],3970.075, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Hdel").dropna()["wavel_found_center"],4101.710, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Hgam").dropna()["wavel_found_center"],4340.472, atol=2.)
assert np.allclose(test.where(test["line_name"]=="Hbet").dropna()["wavel_found_center"],4861.290, atol=2.)
assert len(test.where(test["line_name"]=="CaIIK").dropna()) == 2
def test_quality_check():
data_out_test = scrape_ew_and_errew.quality_check(
read_in_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_SCRAPED_EW_ALL_DATA"],
write_out_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_SCRAPED_EW_DATA_GOOD_ONLY"])
assert isinstance(data_out_test["wavel_stated_center"].iloc[0],np.float64)
assert isinstance(data_out_test["wavel_found_center"].iloc[0],np.float64)
assert isinstance(data_out_test["gaussianSigma"].iloc[0],np.float64)
assert isinstance(data_out_test["gaussianAmp"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintyMu"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintySigma"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintyAmp"].iloc[0],np.float64)
assert isinstance(data_out_test["priorMu"].iloc[0],np.float64)
assert isinstance(data_out_test["priorSigma"].iloc[0],np.float64)
assert isinstance(data_out_test["priorAmp"].iloc[0],np.float64)
assert isinstance(data_out_test["EQW"].iloc[0],np.float64)
assert isinstance(data_out_test["uncertaintyEQW"].iloc[0],np.float64)
assert isinstance(data_out_test["chiSqr"].iloc[0],np.float64)
assert isinstance(data_out_test["flags"].iloc[0],str)
assert isinstance(data_out_test["blendGroup"].iloc[0],np.int64)
assert isinstance(data_out_test["line_name"].iloc[0],str)
assert isinstance(data_out_test["robolines_file_name"].iloc[0],str)
assert isinstance(data_out_test["realization_spec_file_name"].iloc[0],str)
assert isinstance(data_out_test["quality"].iloc[0],str)
def test_stack_spectra():
print("input list")
print(config_red["data_dirs"]["TEST_DIR_SRC"] + "test_input_file_list.list")
print("read in file name")
data_stacked_test = scrape_ew_and_errew.stack_spectra(
read_in_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_SCRAPED_EW_DATA_GOOD_ONLY"],
write_out_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/"+config_red["file_names"]["TEST_RESTACKED_EW_DATA_GOOD_ONLY"],
input_list = config_red["data_dirs"]["TEST_DIR_SRC"] + "test_input_file_list.list")
print("data_stacked")
print(data_stacked_test.keys())
assert isinstance(data_stacked_test["realization_spec_file_name"].iloc[0],str)
assert isinstance(data_stacked_test["orig_spec_file_name"].iloc[0],str)
assert isinstance(data_stacked_test["EW_Hbeta"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Hbeta_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_Hdelta"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Hdelta_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_Hgamma"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Hgamma_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_Heps"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_Heps_from_robo"].iloc[0],np.float64)
assert isinstance(data_stacked_test["EW_CaIIK"].iloc[0],np.float64)
assert isinstance(data_stacked_test["err_EW_CaIIK_from_robo"].iloc[0],np.float64)
def test_generate_net_balmer():
read_in_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/test_stacked_data_pre_net_balmer_calc.csv",
write_out_filename = config_red["data_dirs"]["TEST_DIR_BIN"]+"scraper_output/test_stacked_data_post_net_balmer_calc.csv")
assert 1<2
def test_generate_addl_ew_errors():
test_df_postbalmer_errors = scrape_ew_and_errew.generate_addl_ew_errors(read_in_filename=config_red["data_dirs"]["TEST_DIR_SRC"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER"],
write_out_filename=config_red["data_dirs"]["TEST_DIR_BIN"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"])
array_1 = test_df_postbalmer_errors["err_EW_Balmer_based_noise_churning"].where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m10.smo").dropna().values
array_2 = test_df_postbalmer_errors["err_EW_Balmer_based_noise_churning"].where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m15.smo").dropna().values
array_3 = test_df_postbalmer_errors["err_EW_Balmer_based_noise_churning"].where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m20.smo").dropna().values
assert np.all(array_1)
assert np.all(array_2)
assert np.all(array_3)
assert round(array_1[0], 3) == 0.023
assert round(array_2[0], 3) == 0.020
assert round(array_3[0], 3) == 0.048
test_df_postbalmer_errors_nogrouping = scrape_ew_and_errew.generate_addl_ew_errors(read_in_filename=config_red["data_dirs"]["TEST_DIR_SRC"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER"],
write_out_filename=config_red["data_dirs"]["TEST_DIR_BIN"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"],
groupby_parent = False)
array_1_children = test_df_postbalmer_errors_nogrouping.where(test_df_postbalmer_errors["orig_spec_file_name"]=="575020m10.smo").dropna()
assert len(array_1_children) == 20
assert np.std(array_1_children["EW_Balmer"]) > 0
def test_add_synthetic_meta_data():
combined_data = scrape_ew_and_errew.add_synthetic_meta_data(input_list = config_red["data_dirs"]["TEST_DIR_SRC"] + config_red["file_names"]["TEST_LIST_SPEC_PHASE"],
read_in_filename = config_red["data_dirs"]["TEST_DIR_EW_PRODS"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"],
write_out_filename = config_red["data_dirs"]["TEST_DIR_EW_PRODS"]+config_red["file_names"]["TEST_RESTACKED_EW_DATA_W_METADATA_WRITEOUT"])
assert "wavel_stated_center" in combined_data.columns
assert "feh" in combined_data.columns
assert "teff" in combined_data.columns
assert np.sum(combined_data.isnull().sum()) == 0
| true | true |
1c2e97e86be84acb21ba351d83b002528e614107 | 23,642 | py | Python | src/translate.py | CloudRenderVR/human-motion-prediction-pytorch-uncertainty | cb96f3b2d8a2b54916b2f3f76c1c9b982af5b46b | [
"MIT"
] | null | null | null | src/translate.py | CloudRenderVR/human-motion-prediction-pytorch-uncertainty | cb96f3b2d8a2b54916b2f3f76c1c9b982af5b46b | [
"MIT"
] | null | null | null | src/translate.py | CloudRenderVR/human-motion-prediction-pytorch-uncertainty | cb96f3b2d8a2b54916b2f3f76c1c9b982af5b46b | [
"MIT"
] | null | null | null |
"""Simple code for training an RNN for motion prediction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import h5py
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import data_utils
import seq2seq_model
import torch
import torch.optim as optim
from torch.autograd import Variable
import argparse
# Learning
parser = argparse.ArgumentParser(description='Train RNN for human pose estimation')
parser.add_argument('--learning_rate', dest='learning_rate',
help='Learning rate',
default=0.005, type=float)
parser.add_argument('--learning_rate_decay_factor', dest='learning_rate_decay_factor',
help='Learning rate is multiplied by this much. 1 means no decay.',
default=0.95, type=float)
parser.add_argument('--learning_rate_step', dest='learning_rate_step',
help='Every this many steps, do decay.',
default=10000, type=int)
parser.add_argument('--batch_size', dest='batch_size',
help='Batch size to use during training.',
default=16, type=int)
parser.add_argument('--max_gradient_norm', dest='max_gradient_norm',
help='Clip gradients to this norm.',
default=5, type=float)
parser.add_argument('--iterations', dest='iterations',
help='Iterations to train for.',
default=1e5, type=int)
parser.add_argument('--test_every', dest='test_every',
help='',
default=1000, type=int)
# Architecture
parser.add_argument('--architecture', dest='architecture',
help='Seq2seq architecture to use: [basic, tied].',
default='tied', type=str)
parser.add_argument('--loss_to_use', dest='loss_to_use',
help='The type of loss to use, supervised or sampling_based',
default='sampling_based', type=str)
parser.add_argument('--residual_velocities', dest='residual_velocities',
help='Add a residual connection that effectively models velocities',action='store_true',
default=False)
parser.add_argument('--size', dest='size',
help='Size of each model layer.',
default=1024, type=int)
parser.add_argument('--num_layers', dest='num_layers',
help='Number of layers in the model.',
default=1, type=int)
parser.add_argument('--seq_length_in', dest='seq_length_in',
help='Number of frames to feed into the encoder. 25 fp',
default=50, type=int)
parser.add_argument('--seq_length_out', dest='seq_length_out',
help='Number of frames that the decoder has to predict. 25fps',
default=10, type=int)
parser.add_argument('--omit_one_hot', dest='omit_one_hot',
help='', action='store_true',
default=False)
parser.add_argument('--taylor', dest='finite_taylor_extrapolate',
help='Whether to augment the network with a taylor series extrapolation from a finite difference scheme of the previous frames', action='store_true',
default=False)
# Directories
parser.add_argument('--data_dir', dest='data_dir',
help='Data directory',
default=os.path.normpath("./data/h3.6m/dataset"), type=str)
parser.add_argument('--train_dir', dest='train_dir',
help='Training directory',
default=os.path.normpath("./experiments/"), type=str)
parser.add_argument('--action', dest='action',
help='The action to train on. all means all the actions, all_periodic means walking, eating and smoking',
default='all', type=str)
parser.add_argument('--use_cpu', dest='use_cpu',
help='', action='store_true',
default=False)
parser.add_argument('--load', dest='load',
help='Try to load a previous checkpoint.',
default=0, type=int)
parser.add_argument('--sample', dest='sample',
help='Set to True for sampling.', action='store_true',
default=False)
parser.add_argument('--distribution_output_direct', dest='distribution_output_direct',
default=False)
args = parser.parse_args()
train_dir = os.path.normpath(os.path.join( args.train_dir, args.action,
'out_{0}'.format(args.seq_length_out),
'iterations_{0}'.format(args.iterations),
args.architecture,
args.loss_to_use,
'omit_one_hot' if args.omit_one_hot else 'one_hot',
'depth_{0}'.format(args.num_layers),
'size_{0}'.format(args.size),
'lr_{0}'.format(args.learning_rate),
'residual_vel' if args.residual_velocities else 'not_residual_vel'))
print(train_dir)
os.makedirs(train_dir, exist_ok=True)
def create_model(actions, sampling=False):
"""Create translation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
args.architecture,
args.seq_length_in if not sampling else 50,
args.seq_length_out if not sampling else 100,
args.size, # hidden layer size
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
args.learning_rate_decay_factor,
args.loss_to_use if not sampling else "sampling_based",
len( actions ),
not args.omit_one_hot,
args.residual_velocities,
args.finite_taylor_extrapolate,
output_as_normal_distribution = args.distribution_output_direct,
dtype=torch.float32)
if args.load <= 0:
return model
print("Loading model")
model = torch.load(train_dir + '/model_' + str(args.load))
if sampling:
model.source_seq_len = 50
model.target_seq_len = 100
return model
def clean_batch(batch):
encoder_inputs, decoder_inputs, decoder_outputs = batch
encoder_inputs = torch.from_numpy(encoder_inputs).float()
decoder_inputs = torch.from_numpy(decoder_inputs).float()
decoder_outputs = torch.from_numpy(decoder_outputs).float()
if not args.use_cpu:
encoder_inputs = encoder_inputs.cuda()
decoder_inputs = decoder_inputs.cuda()
decoder_outputs = decoder_outputs.cuda()
encoder_inputs = Variable(encoder_inputs)
decoder_inputs = Variable(decoder_inputs)
decoder_outputs = Variable(decoder_outputs)
return (encoder_inputs, decoder_inputs, decoder_outputs)
import flags
def get_loss(output, truth):
if flags.translate_loss_func == "mse":
return ( (output-truth)**2 ).mean()
if flags.translate_loss_func == "me":
return ( np.abs(output-truth) ).mean()
if flags.translate_loss_func == "mle":
assert(output.shape[-1] == truth.shape[-1] * 2)
means = output[..., :int(truth.shape[-1])]
sigmas = output[..., int(truth.shape[-1]):]
#print("################################")
neg_log_likelihood = torch.sum(torch.log(torch.pow(sigmas, 2))) / 2.0
p1 = (means - truth)
p2 = p1 / sigmas
p3 = torch.pow(p2, 2)
#print("Sigma likelihood cont:", neg_log_likelihood)
neg_log_likelihood += torch.numel(means) / 2.0 * np.log(2.0*3.1415926)
neg_log_likelihood += torch.sum(p3) / 2.0
#print("Max Means:", torch.max(means))
#print("Min Sigmas:", torch.min(sigmas))
#print("p1 max:", torch.max(torch.abs(p1)))
#print("p1 avg:", torch.mean(torch.abs(p1)))
#print("p2 max:", torch.max(torch.abs(p2)))
#print("p3 max:", torch.max(p3))
#print("p3 min:", torch.min(p3))
#print("p3 avg:", torch.mean(p3))
#print("likelihood:", neg_log_likelihood)
return neg_log_likelihood
def train():
"""Train a seq2seq model on human motion"""
actions = define_actions( args.action )
number_of_actions = len( actions )
#these will all be expangles
train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
# Limit TF to take a fraction of the GPU memory
if True:
model = create_model(actions, args.sample)
if not args.use_cpu:
model = model.cuda()
# === Read and denormalize the gt with srnn's seeds, as we'll need them
# many times for evaluation in Euler Angles ===
srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not args.omit_one_hot )
#=== This is the training loop ===
step_time, loss, val_loss = 0.0, 0.0, 0.0
current_step = 0 if args.load <= 0 else args.load + 1
previous_losses = []
step_time, loss = 0, 0
optimiser = optim.SGD(model.parameters(), lr=args.learning_rate)
#optimiser = optim.Adam(model.parameters(), lr=learning_rate, betas = (0.9, 0.999))
for _ in range( args.iterations ):
optimiser.zero_grad()
model.train()
start_time = time.time()
# Actual training
# === Training step ===
encoder_inputs, decoder_inputs, decoder_outputs = clean_batch(model.get_batch( train_set, not args.omit_one_hot ))
preds = model(encoder_inputs, decoder_inputs)
step_loss = get_loss(preds, decoder_outputs)
# Actual backpropagation
step_loss.backward()
optimiser.step()
step_loss = step_loss.cpu().data.numpy()
# TODO:
preds = preds[..., :54]
#if current_step % 100 == 0:
# print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss ))
step_time += (time.time() - start_time) / args.test_every
loss += step_loss / args.test_every
current_step += 1
# === step decay ===
if current_step % args.learning_rate_step == 0:
args.learning_rate = args.learning_rate*args.learning_rate_decay_factor
optimiser = optim.Adam(model.parameters(), lr=args.learning_rate, betas = (0.9, 0.999))
print("Decay learning rate. New value at " + str(args.learning_rate))
#cuda.empty_cache()
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % args.test_every == 0:
model.eval()
# === Validation with randomly chosen seeds ===
encoder_inputs, decoder_inputs, decoder_outputs = clean_batch(model.get_batch( test_set, not args.omit_one_hot ))
preds = model(encoder_inputs, decoder_inputs)
mse_loss = torch.mean( (preds[..., :54] - decoder_outputs)**2)
step_loss = get_loss(preds, decoder_outputs)
val_loss = step_loss # Loss book-keeping
# TODO:
preds = preds[..., :54]
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in [80, 160, 320, 400, 560, 1000]:
print(" {0:5d} |".format(ms), end="")
print()
# === Validation with srnn's seeds ===
for action in actions:
# Evaluate the model on the test batches
#### Evaluate model on action
encoder_inputs, decoder_inputs, decoder_outputs = clean_batch(model.get_batch(test_set, action))
srnn_poses = model(encoder_inputs, decoder_inputs)
srnn_loss = get_loss(srnn_poses, decoder_outputs)
#TODO:
srnn_poses = srnn_poses[...,:54]
srnn_poses = srnn_poses.cpu().data.numpy()
srnn_poses = srnn_poses.transpose([1,0,2])
srnn_loss = srnn_loss.cpu().data.numpy()
# Denormalize the output
srnn_pred_expmap = data_utils.revert_output_format( srnn_poses,
data_mean, data_std, dim_to_ignore, actions, not args.omit_one_hot )
# Save the errors here
mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
# Training is done in exponential map, but the error is reported in
# Euler angles, as in previous work.
# See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-247769197
N_SEQUENCE_TEST = 8
for i in np.arange(N_SEQUENCE_TEST):
eulerchannels_pred = srnn_pred_expmap[i]
# Convert from exponential map to Euler angles
for j in np.arange( eulerchannels_pred.shape[0] ):
for k in np.arange(3,97,3):
eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
# The global translation (first 3 entries) and global rotation
# (next 3 entries) are also not considered in the error, so the_key
# are set to zero.
# See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-249404882
gt_i=np.copy(srnn_gts_euler[action][i])
gt_i[:,0:6] = 0
# Now compute the l2 error. The following is numpy port of the error
# function provided by Ashesh Jain (in matlab), available at
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/motionGenerationError.m#L40-L54
idx_to_use = np.where( np.std( gt_i, 0 ) > 1e-4 )[0]
euc_error = np.power( gt_i[:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt( euc_error )
mean_errors[i,:] = euc_error
# This is simply the mean error over the N_SEQUENCE_TEST examples
mean_mean_errors = np.mean( mean_errors, 0 )
# Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms
print("{0: <16} |".format(action), end="")
for ms in [1,3,7,9,13,24]:
if args.seq_length_out >= ms+1:
print(" {0:.3f} |".format( mean_mean_errors[ms] ), end="")
else:
print(" n/a |", end="")
print()
print()
print("============================\n"
"Global step: %d\n"
"Learning rate: %.4f\n"
"Step-time (ms): %.4f\n"
"Train loss avg: %.4f\n"
"--------------------------\n"
"Val loss: %.4f\n"
"srnn loss: %.4f\n"
"MSE loss: %.4f\n"
"============================" % (current_step,
args.learning_rate, step_time*1000, loss,
val_loss, srnn_loss, mse_loss))
with open("training_out.txt", 'a+') as f:
f.write(action + " " + str(current_step)+": "+str(val_loss)+"\n")
torch.save(model, train_dir + '/model_' + str(current_step))
print()
previous_losses.append(loss)
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def get_srnn_gts( actions, model, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True ):
"""
Get the ground truths for srnn's sequences, and convert to Euler angles (by default).
(the error is always computed in Euler angles).
Args
actions: a list of actions to get ground truths for.
model: training model we are using (we only use the "get_batch" method).
test_set: dictionary with normalized training data.
data_mean: d-long vector with the mean of the training data.
data_std: d-long vector with the standard deviation of the training data.
dim_to_ignore: dimensions that we are not using to train/predict.
one_hot: whether the data comes with one-hot encoding indicating action.
to_euler: whether to convert the angles to Euler format or keep thm in exponential map
Returns
srnn_gts_euler: a dictionary where the keys are actions, and the values
are the ground_truth, denormalized expected outputs of srnns's seeds.
"""
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
if to_euler:
for j in np.arange( denormed.shape[0] ):
for k in np.arange(3,97,3):
denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed );
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def sample():
"""Sample predictions for srnn's seeds"""
actions = define_actions( args.action )
if True:
# === Create the model ===
print("Creating %d layers of %d units." % (args.num_layers, args.size))
sampling = True
model = create_model(actions, sampling)
if not args.use_cpu:
model = model.cuda()
print("Model created")
# Load all the data
train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
# === Read and denormalize the gt with srnn's seeds, as we'll need them
# many times for evaluation in Euler Angles ===
srnn_gts_expmap = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not args.omit_one_hot, to_euler=False )
srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not args.omit_one_hot )
# Clean and create a new h5 file of samples
SAMPLES_FNAME = 'samples.h5'
try:
os.remove( SAMPLES_FNAME )
except OSError:
pass
# Predict and save for each action
for action in actions:
# Make prediction with srnn' seeds. These will just be in expangles.
encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch_srnn( test_set, action )
encoder_inputs = torch.from_numpy(encoder_inputs).float()
decoder_inputs = torch.from_numpy(decoder_inputs).float()
decoder_outputs = torch.from_numpy(decoder_outputs).float()
if not args.use_cpu:
encoder_inputs = encoder_inputs.cuda()
decoder_inputs = decoder_inputs.cuda()
decoder_outputs = decoder_outputs.cuda()
encoder_inputs = Variable(encoder_inputs)
decoder_inputs = Variable(decoder_inputs)
decoder_outputs = Variable(decoder_outputs)
srnn_poses = model(encoder_inputs, decoder_inputs)
srnn_loss = (srnn_poses[..., :54] - decoder_outputs)**2
srnn_loss.cpu().data.numpy()
srnn_loss = srnn_loss.mean()
srnn_poses = srnn_poses.cpu().data.numpy()
srnn_poses = srnn_poses.transpose([1,0,2])
srnn_loss = srnn_loss.cpu().data.numpy()
# denormalizes too
srnn_pred_expmap = data_utils.revert_output_format(srnn_poses[..., :54], data_mean, data_std, dim_to_ignore, actions, not args.omit_one_hot )
# Save the samples
with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
for i in np.arange(8):
# Save conditioning ground truth
node_name = 'expmap/gt/{1}_{0}'.format(i, action)
hf.create_dataset( node_name, data=srnn_gts_expmap[action][i] )
# Save prediction
node_name = 'expmap/preds/{1}_{0}'.format(i, action)
hf.create_dataset( node_name, data=srnn_pred_expmap[i] )
# Compute and save the errors here
mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
for i in np.arange(8):
eulerchannels_pred = srnn_pred_expmap[i]
for j in np.arange( eulerchannels_pred.shape[0] ):
for k in np.arange(3,97,3):
eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
eulerchannels_pred[:,0:6] = 0
# Pick only the dimensions with sufficient standard deviation. Others are ignored.
idx_to_use = np.where( np.std( eulerchannels_pred, 0 ) > 1e-4 )[0]
euc_error = np.power( srnn_gts_euler[action][i][:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt( euc_error )
mean_errors[i,:] = euc_error
mean_mean_errors = np.mean( mean_errors, 0 )
print( action )
print( ','.join(map(str, mean_mean_errors.tolist() )) )
with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
node_name = 'mean_{0}_error'.format( action )
hf.create_dataset( node_name, data=mean_mean_errors )
return
def define_actions( action ):
"""
Define the list of actions we are using.
Args
action: String with the passed action. Could be "all"
Returns
actions: List of strings of actions
Raises
ValueError if the action is not included in H3.6M
"""
actions = ["walking", "eating", "smoking", "discussion", "directions",
"greeting", "phoning", "posing", "purchases", "sitting",
"sittingdown", "takingphoto", "waiting", "walkingdog",
"walkingtogether"]
if action in actions:
return [action]
if action == "all":
return actions
if action == "all_srnn":
return ["walking", "eating", "smoking", "discussion"]
raise( ValueError, "Unrecognized action: %d" % action )
def read_all_data( actions, seq_length_in, seq_length_out, data_dir, one_hot ):
"""
Loads data for training/testing and normalizes it ALSO REMOVING UNUSED DIMENSIONS AS DEFINED IN
normalization_stats()!
Does nothing to rotation format.
Args
actions: list of strings (actions) to load
seq_length_in: number of frames to use in the burn-in sequence
seq_length_out: number of frames to use in the output sequence
data_dir: directory to load the data from
one_hot: whether to use one-hot encoding per action
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
# === Read training data ===
print ("Reading training data (seq_len_in: {0}, seq_len_out {1}).".format(
seq_length_in, seq_length_out))
train_subject_ids = [1,6,7,8,9,11]
test_subject_ids = [5]
train_set, complete_train = data_utils.load_data( data_dir, train_subject_ids, actions, one_hot )
test_set, complete_test = data_utils.load_data( data_dir, test_subject_ids, actions, one_hot )
#Convert to euler angles here I guess?
# Compute normalization stats
data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(complete_train)
# Normalize -- subtract mean, divide by stdev
train_set = data_utils.normalize_data( train_set, data_mean, data_std, dim_to_use, actions, one_hot )
test_set = data_utils.normalize_data( test_set, data_mean, data_std, dim_to_use, actions, one_hot )
print("done reading data.")
return train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use
def main():
if args.sample:
sample()
else:
import sys
with open("training_out.txt", 'a+') as f:
f.write("============================================================\n"+str(sys.argv)+"\n")
train()
if __name__ == "__main__":
main()
| 39.469115 | 169 | 0.641232 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import h5py
import numpy as np
from six.moves import xrange
import data_utils
import seq2seq_model
import torch
import torch.optim as optim
from torch.autograd import Variable
import argparse
parser = argparse.ArgumentParser(description='Train RNN for human pose estimation')
parser.add_argument('--learning_rate', dest='learning_rate',
help='Learning rate',
default=0.005, type=float)
parser.add_argument('--learning_rate_decay_factor', dest='learning_rate_decay_factor',
help='Learning rate is multiplied by this much. 1 means no decay.',
default=0.95, type=float)
parser.add_argument('--learning_rate_step', dest='learning_rate_step',
help='Every this many steps, do decay.',
default=10000, type=int)
parser.add_argument('--batch_size', dest='batch_size',
help='Batch size to use during training.',
default=16, type=int)
parser.add_argument('--max_gradient_norm', dest='max_gradient_norm',
help='Clip gradients to this norm.',
default=5, type=float)
parser.add_argument('--iterations', dest='iterations',
help='Iterations to train for.',
default=1e5, type=int)
parser.add_argument('--test_every', dest='test_every',
help='',
default=1000, type=int)
parser.add_argument('--architecture', dest='architecture',
help='Seq2seq architecture to use: [basic, tied].',
default='tied', type=str)
parser.add_argument('--loss_to_use', dest='loss_to_use',
help='The type of loss to use, supervised or sampling_based',
default='sampling_based', type=str)
parser.add_argument('--residual_velocities', dest='residual_velocities',
help='Add a residual connection that effectively models velocities',action='store_true',
default=False)
parser.add_argument('--size', dest='size',
help='Size of each model layer.',
default=1024, type=int)
parser.add_argument('--num_layers', dest='num_layers',
help='Number of layers in the model.',
default=1, type=int)
parser.add_argument('--seq_length_in', dest='seq_length_in',
help='Number of frames to feed into the encoder. 25 fp',
default=50, type=int)
parser.add_argument('--seq_length_out', dest='seq_length_out',
help='Number of frames that the decoder has to predict. 25fps',
default=10, type=int)
parser.add_argument('--omit_one_hot', dest='omit_one_hot',
help='', action='store_true',
default=False)
parser.add_argument('--taylor', dest='finite_taylor_extrapolate',
help='Whether to augment the network with a taylor series extrapolation from a finite difference scheme of the previous frames', action='store_true',
default=False)
parser.add_argument('--data_dir', dest='data_dir',
help='Data directory',
default=os.path.normpath("./data/h3.6m/dataset"), type=str)
parser.add_argument('--train_dir', dest='train_dir',
help='Training directory',
default=os.path.normpath("./experiments/"), type=str)
parser.add_argument('--action', dest='action',
help='The action to train on. all means all the actions, all_periodic means walking, eating and smoking',
default='all', type=str)
parser.add_argument('--use_cpu', dest='use_cpu',
help='', action='store_true',
default=False)
parser.add_argument('--load', dest='load',
help='Try to load a previous checkpoint.',
default=0, type=int)
parser.add_argument('--sample', dest='sample',
help='Set to True for sampling.', action='store_true',
default=False)
parser.add_argument('--distribution_output_direct', dest='distribution_output_direct',
default=False)
args = parser.parse_args()
train_dir = os.path.normpath(os.path.join( args.train_dir, args.action,
'out_{0}'.format(args.seq_length_out),
'iterations_{0}'.format(args.iterations),
args.architecture,
args.loss_to_use,
'omit_one_hot' if args.omit_one_hot else 'one_hot',
'depth_{0}'.format(args.num_layers),
'size_{0}'.format(args.size),
'lr_{0}'.format(args.learning_rate),
'residual_vel' if args.residual_velocities else 'not_residual_vel'))
print(train_dir)
os.makedirs(train_dir, exist_ok=True)
def create_model(actions, sampling=False):
model = seq2seq_model.Seq2SeqModel(
args.architecture,
args.seq_length_in if not sampling else 50,
args.seq_length_out if not sampling else 100,
args.size,
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
args.learning_rate_decay_factor,
args.loss_to_use if not sampling else "sampling_based",
len( actions ),
not args.omit_one_hot,
args.residual_velocities,
args.finite_taylor_extrapolate,
output_as_normal_distribution = args.distribution_output_direct,
dtype=torch.float32)
if args.load <= 0:
return model
print("Loading model")
model = torch.load(train_dir + '/model_' + str(args.load))
if sampling:
model.source_seq_len = 50
model.target_seq_len = 100
return model
def clean_batch(batch):
encoder_inputs, decoder_inputs, decoder_outputs = batch
encoder_inputs = torch.from_numpy(encoder_inputs).float()
decoder_inputs = torch.from_numpy(decoder_inputs).float()
decoder_outputs = torch.from_numpy(decoder_outputs).float()
if not args.use_cpu:
encoder_inputs = encoder_inputs.cuda()
decoder_inputs = decoder_inputs.cuda()
decoder_outputs = decoder_outputs.cuda()
encoder_inputs = Variable(encoder_inputs)
decoder_inputs = Variable(decoder_inputs)
decoder_outputs = Variable(decoder_outputs)
return (encoder_inputs, decoder_inputs, decoder_outputs)
import flags
def get_loss(output, truth):
if flags.translate_loss_func == "mse":
return ( (output-truth)**2 ).mean()
if flags.translate_loss_func == "me":
return ( np.abs(output-truth) ).mean()
if flags.translate_loss_func == "mle":
assert(output.shape[-1] == truth.shape[-1] * 2)
means = output[..., :int(truth.shape[-1])]
sigmas = output[..., int(truth.shape[-1]):]
neg_log_likelihood = torch.sum(torch.log(torch.pow(sigmas, 2))) / 2.0
p1 = (means - truth)
p2 = p1 / sigmas
p3 = torch.pow(p2, 2)
neg_log_likelihood += torch.numel(means) / 2.0 * np.log(2.0*3.1415926)
neg_log_likelihood += torch.sum(p3) / 2.0
return neg_log_likelihood
def train():
actions = define_actions( args.action )
number_of_actions = len( actions )
train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
if True:
model = create_model(actions, args.sample)
if not args.use_cpu:
model = model.cuda()
srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not args.omit_one_hot )
step_time, loss, val_loss = 0.0, 0.0, 0.0
current_step = 0 if args.load <= 0 else args.load + 1
previous_losses = []
step_time, loss = 0, 0
optimiser = optim.SGD(model.parameters(), lr=args.learning_rate)
for _ in range( args.iterations ):
optimiser.zero_grad()
model.train()
start_time = time.time()
encoder_inputs, decoder_inputs, decoder_outputs = clean_batch(model.get_batch( train_set, not args.omit_one_hot ))
preds = model(encoder_inputs, decoder_inputs)
step_loss = get_loss(preds, decoder_outputs)
step_loss.backward()
optimiser.step()
step_loss = step_loss.cpu().data.numpy()
preds = preds[..., :54]
step_time += (time.time() - start_time) / args.test_every
loss += step_loss / args.test_every
current_step += 1
if current_step % args.learning_rate_step == 0:
args.learning_rate = args.learning_rate*args.learning_rate_decay_factor
optimiser = optim.Adam(model.parameters(), lr=args.learning_rate, betas = (0.9, 0.999))
print("Decay learning rate. New value at " + str(args.learning_rate))
if current_step % args.test_every == 0:
model.eval()
encoder_inputs, decoder_inputs, decoder_outputs = clean_batch(model.get_batch( test_set, not args.omit_one_hot ))
preds = model(encoder_inputs, decoder_inputs)
mse_loss = torch.mean( (preds[..., :54] - decoder_outputs)**2)
step_loss = get_loss(preds, decoder_outputs)
val_loss = step_loss
preds = preds[..., :54]
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in [80, 160, 320, 400, 560, 1000]:
print(" {0:5d} |".format(ms), end="")
print()
for action in actions:
# Evaluate the model on the test batches
#### Evaluate model on action
encoder_inputs, decoder_inputs, decoder_outputs = clean_batch(model.get_batch(test_set, action))
srnn_poses = model(encoder_inputs, decoder_inputs)
srnn_loss = get_loss(srnn_poses, decoder_outputs)
#TODO:
srnn_poses = srnn_poses[...,:54]
srnn_poses = srnn_poses.cpu().data.numpy()
srnn_poses = srnn_poses.transpose([1,0,2])
srnn_loss = srnn_loss.cpu().data.numpy()
# Denormalize the output
srnn_pred_expmap = data_utils.revert_output_format( srnn_poses,
data_mean, data_std, dim_to_ignore, actions, not args.omit_one_hot )
# Save the errors here
mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
# Training is done in exponential map, but the error is reported in
# Euler angles, as in previous work.
# See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-247769197
N_SEQUENCE_TEST = 8
for i in np.arange(N_SEQUENCE_TEST):
eulerchannels_pred = srnn_pred_expmap[i]
# Convert from exponential map to Euler angles
for j in np.arange( eulerchannels_pred.shape[0] ):
for k in np.arange(3,97,3):
eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
# The global translation (first 3 entries) and global rotation
# (next 3 entries) are also not considered in the error, so the_key
# are set to zero.
# See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-249404882
gt_i=np.copy(srnn_gts_euler[action][i])
gt_i[:,0:6] = 0
# Now compute the l2 error. The following is numpy port of the error
# function provided by Ashesh Jain (in matlab), available at
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/motionGenerationError.m#L40-L54
idx_to_use = np.where( np.std( gt_i, 0 ) > 1e-4 )[0]
euc_error = np.power( gt_i[:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt( euc_error )
mean_errors[i,:] = euc_error
# This is simply the mean error over the N_SEQUENCE_TEST examples
mean_mean_errors = np.mean( mean_errors, 0 )
# Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms
print("{0: <16} |".format(action), end="")
for ms in [1,3,7,9,13,24]:
if args.seq_length_out >= ms+1:
print(" {0:.3f} |".format( mean_mean_errors[ms] ), end="")
else:
print(" n/a |", end="")
print()
print()
print("============================\n"
"Global step: %d\n"
"Learning rate: %.4f\n"
"Step-time (ms): %.4f\n"
"Train loss avg: %.4f\n"
"--------------------------\n"
"Val loss: %.4f\n"
"srnn loss: %.4f\n"
"MSE loss: %.4f\n"
"============================" % (current_step,
args.learning_rate, step_time*1000, loss,
val_loss, srnn_loss, mse_loss))
with open("training_out.txt", 'a+') as f:
f.write(action + " " + str(current_step)+": "+str(val_loss)+"\n")
torch.save(model, train_dir + '/model_' + str(current_step))
print()
previous_losses.append(loss)
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def get_srnn_gts( actions, model, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True ):
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
if to_euler:
for j in np.arange( denormed.shape[0] ):
for k in np.arange(3,97,3):
denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed );
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def sample():
actions = define_actions( args.action )
if True:
# === Create the model ===
print("Creating %d layers of %d units." % (args.num_layers, args.size))
sampling = True
model = create_model(actions, sampling)
if not args.use_cpu:
model = model.cuda()
print("Model created")
# Load all the data
train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
# === Read and denormalize the gt with srnn's seeds, as we'll need them
# many times for evaluation in Euler Angles ===
srnn_gts_expmap = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not args.omit_one_hot, to_euler=False )
srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not args.omit_one_hot )
# Clean and create a new h5 file of samples
SAMPLES_FNAME = 'samples.h5'
try:
os.remove( SAMPLES_FNAME )
except OSError:
pass
# Predict and save for each action
for action in actions:
# Make prediction with srnn' seeds. These will just be in expangles.
encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch_srnn( test_set, action )
encoder_inputs = torch.from_numpy(encoder_inputs).float()
decoder_inputs = torch.from_numpy(decoder_inputs).float()
decoder_outputs = torch.from_numpy(decoder_outputs).float()
if not args.use_cpu:
encoder_inputs = encoder_inputs.cuda()
decoder_inputs = decoder_inputs.cuda()
decoder_outputs = decoder_outputs.cuda()
encoder_inputs = Variable(encoder_inputs)
decoder_inputs = Variable(decoder_inputs)
decoder_outputs = Variable(decoder_outputs)
srnn_poses = model(encoder_inputs, decoder_inputs)
srnn_loss = (srnn_poses[..., :54] - decoder_outputs)**2
srnn_loss.cpu().data.numpy()
srnn_loss = srnn_loss.mean()
srnn_poses = srnn_poses.cpu().data.numpy()
srnn_poses = srnn_poses.transpose([1,0,2])
srnn_loss = srnn_loss.cpu().data.numpy()
srnn_pred_expmap = data_utils.revert_output_format(srnn_poses[..., :54], data_mean, data_std, dim_to_ignore, actions, not args.omit_one_hot )
with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
for i in np.arange(8):
node_name = 'expmap/gt/{1}_{0}'.format(i, action)
hf.create_dataset( node_name, data=srnn_gts_expmap[action][i] )
node_name = 'expmap/preds/{1}_{0}'.format(i, action)
hf.create_dataset( node_name, data=srnn_pred_expmap[i] )
mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
for i in np.arange(8):
eulerchannels_pred = srnn_pred_expmap[i]
for j in np.arange( eulerchannels_pred.shape[0] ):
for k in np.arange(3,97,3):
eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
eulerchannels_pred[:,0:6] = 0
idx_to_use = np.where( np.std( eulerchannels_pred, 0 ) > 1e-4 )[0]
euc_error = np.power( srnn_gts_euler[action][i][:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt( euc_error )
mean_errors[i,:] = euc_error
mean_mean_errors = np.mean( mean_errors, 0 )
print( action )
print( ','.join(map(str, mean_mean_errors.tolist() )) )
with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
node_name = 'mean_{0}_error'.format( action )
hf.create_dataset( node_name, data=mean_mean_errors )
return
def define_actions( action ):
actions = ["walking", "eating", "smoking", "discussion", "directions",
"greeting", "phoning", "posing", "purchases", "sitting",
"sittingdown", "takingphoto", "waiting", "walkingdog",
"walkingtogether"]
if action in actions:
return [action]
if action == "all":
return actions
if action == "all_srnn":
return ["walking", "eating", "smoking", "discussion"]
raise( ValueError, "Unrecognized action: %d" % action )
def read_all_data( actions, seq_length_in, seq_length_out, data_dir, one_hot ):
print ("Reading training data (seq_len_in: {0}, seq_len_out {1}).".format(
seq_length_in, seq_length_out))
train_subject_ids = [1,6,7,8,9,11]
test_subject_ids = [5]
train_set, complete_train = data_utils.load_data( data_dir, train_subject_ids, actions, one_hot )
test_set, complete_test = data_utils.load_data( data_dir, test_subject_ids, actions, one_hot )
data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(complete_train)
train_set = data_utils.normalize_data( train_set, data_mean, data_std, dim_to_use, actions, one_hot )
test_set = data_utils.normalize_data( test_set, data_mean, data_std, dim_to_use, actions, one_hot )
print("done reading data.")
return train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use
def main():
if args.sample:
sample()
else:
import sys
with open("training_out.txt", 'a+') as f:
f.write("============================================================\n"+str(sys.argv)+"\n")
train()
if __name__ == "__main__":
main()
| true | true |
1c2e97f80754cf03aef7cce4dbf0e63f4c25eeb5 | 141 | py | Python | solutions/Video_analysis/webservice/service/__init__.py | xiaogaozi/bootcamp | d1a4b69edc3f7e31ce3809043128348aa3597df6 | [
"Apache-2.0"
] | 1 | 2021-04-06T06:13:20.000Z | 2021-04-06T06:13:20.000Z | solutions/Video_analysis/webservice/service/__init__.py | xiaogaozi/bootcamp | d1a4b69edc3f7e31ce3809043128348aa3597df6 | [
"Apache-2.0"
] | null | null | null | solutions/Video_analysis/webservice/service/__init__.py | xiaogaozi/bootcamp | d1a4b69edc3f7e31ce3809043128348aa3597df6 | [
"Apache-2.0"
] | null | null | null | import logging
logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
| 47 | 125 | 0.723404 | import logging
logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
| true | true |
1c2e98c5d3a6ae7e454bd0a11c1fd19a668d87f1 | 727 | py | Python | 0-python-tutorial/15-whileLoops01.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | 0-python-tutorial/15-whileLoops01.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | 0-python-tutorial/15-whileLoops01.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | # Demo Python While Loops - The while Loop
'''
Python Loops
Python has two primitive loop commands:
* while loops
* for loops
The while Loop
With the while loop we can execute a set of statements as long as a condition is true.
The break Statement
With the break statement we can stop the loop even if the while condition is true.
The continue Statement
With the continue statement we can stop the current iteration, and continue with the next.
The else Statement
With the else statement we can run a block of code once when the condition no longer is true.
'''
# Print i as long as i is less than 6:
i = 1
while i < 6:
print(i)
i += 1 # Note: remember to increment i, or else the loop will continue forever.
| 24.233333 | 93 | 0.742779 |
i = 1
while i < 6:
print(i)
i += 1
| true | true |
1c2e9b907048749d4a615844f89a74dedb32a1ae | 2,412 | py | Python | autoimpute/analysis/metrics.py | gjdv/autoimpute | aa418102d3b64fc7e0c0dafa6839746f0b9a6545 | [
"MIT"
] | 191 | 2019-03-16T17:00:33.000Z | 2022-03-11T12:14:17.000Z | autoimpute/analysis/metrics.py | MlataIbrahim/autoimpute | d22cd86db6facd4a68746c8c0fcb3fae70071dac | [
"MIT"
] | 57 | 2019-03-09T23:59:38.000Z | 2022-03-01T08:17:33.000Z | autoimpute/analysis/metrics.py | MlataIbrahim/autoimpute | d22cd86db6facd4a68746c8c0fcb3fae70071dac | [
"MIT"
] | 19 | 2019-04-13T19:01:23.000Z | 2021-05-14T08:59:27.000Z | """This module devises metrics to compare estimates from analysis models."""
import numpy as np
import pandas as pd
def raw_bias(Q_bar, Q):
"""Calculate raw bias between coefficients Q and actual Q.
Q_bar can be one estimate (scalar) or a vector of estimates. This equation
subtracts the expected Q_bar from Q, element-wise. The result is the bias
of each coefficient from its true value.
Args:
Q_bar (number, array): single estimate or array of estimates.
Q (number, array): single truth or array of truths.
Returns:
scalar, array: element-wise difference between estimates and truths.
Raises:
ValueError: Shape mismatch
ValueError: Q_bar and Q not the same length
"""
# handle errors first
shape_err = "Q_bar & Q must be scalars or vectors of same length."
if isinstance(Q_bar, pd.DataFrame):
s = len(Q_bar.shape)
if s != 1:
raise ValueError(shape_err)
if isinstance(Q, pd.DataFrame):
s = len(Q.shape)
if s != 1:
raise ValueError(shape_err)
if len(Q_bar) != len(Q):
raise ValueError(shape_err)
# convert any lists to ensure element-wise performed
if isinstance(Q_bar, (tuple, list)):
Q_bar = np.array(Q_bar)
if isinstance(Q, (tuple, list)):
Q = np.array(Q)
# perform element-wise subtraction
rb = Q_bar - Q
return rb
def percent_bias(Q_bar, Q):
"""Calculate precent bias between coefficients Q and actual Q.
Q_bar can be one estimate (scalar) or a vector of estimates. This equation
subtracts the expected Q_bar from Q, element-wise. The result is the bias
of each coefficient from its true value. We then divide this number by
Q itself, again in element-wise fashion, to produce % bias.
Args:
Q_bar (number, array): single estimate or array of estimates.
Q (number, array): single truth or array of truths.
Returns:
scalar, array: element-wise difference between estimates and truths.
Raises:
ValueError: Shape mismatch
ValueError: Q_bar and Q not the same length
"""
# calling this method will validate Q_bar and Q
rb = raw_bias(Q_bar, Q)
# convert Q if necessary. must re-perform operation
if isinstance(Q, (tuple, list)):
Q = np.array(Q)
pct_bias = 100 * (abs(rb)/Q)
return pct_bias
| 30.531646 | 78 | 0.65796 |
import numpy as np
import pandas as pd
def raw_bias(Q_bar, Q):
shape_err = "Q_bar & Q must be scalars or vectors of same length."
if isinstance(Q_bar, pd.DataFrame):
s = len(Q_bar.shape)
if s != 1:
raise ValueError(shape_err)
if isinstance(Q, pd.DataFrame):
s = len(Q.shape)
if s != 1:
raise ValueError(shape_err)
if len(Q_bar) != len(Q):
raise ValueError(shape_err)
if isinstance(Q_bar, (tuple, list)):
Q_bar = np.array(Q_bar)
if isinstance(Q, (tuple, list)):
Q = np.array(Q)
rb = Q_bar - Q
return rb
def percent_bias(Q_bar, Q):
rb = raw_bias(Q_bar, Q)
if isinstance(Q, (tuple, list)):
Q = np.array(Q)
pct_bias = 100 * (abs(rb)/Q)
return pct_bias
| true | true |
1c2e9bcf8eeceddc793d357ac20131bc4c084d2d | 2,020 | py | Python | download_and_clean_data_scripts/pixabay/pixabay_main_custom.py | NMag-ze/Colorization | 1c91fb69d3505de6cd746dab3ddc15a704aa028a | [
"MIT"
] | null | null | null | download_and_clean_data_scripts/pixabay/pixabay_main_custom.py | NMag-ze/Colorization | 1c91fb69d3505de6cd746dab3ddc15a704aa028a | [
"MIT"
] | null | null | null | download_and_clean_data_scripts/pixabay/pixabay_main_custom.py | NMag-ze/Colorization | 1c91fb69d3505de6cd746dab3ddc15a704aa028a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2018/8/16 10:59
# @Author : 陈子昂
import os
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import sys
from utils import save_img, path_processor, img_name_processor
def pexels(keyword):
img_cnt = 0
if not keyword: sys.exit('程序退出:未输入关键字!')
for page in tqdm(range(1, 50)):
print(f'\n-----[{keyword}]正在爬取第{page}页-----')
pexels_url = "https://www.pexels.com/search/%s/?page=%s" % (keyword, page)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
res = requests.get(pexels_url,headers=headers,verify=False)
# print(res.text)
if 'Sorry, no pictures found!' in res.text:
print('-*--*--*-爬取完毕-*--*--*-')
sys.exit(0)
soup = BeautifulSoup(res.text, 'lxml')
# print(soup)
articles = soup.find_all('article')
# print(len(articles))
for article in articles:
src = article.img.attrs['src']
print(src)
path = rf'D://人脸相关的图片//pexels//{keyword}'
if not os.path.exists(path):
os.makedirs(path)
filename = img_name_processor(src)
file = os.path.join(path, filename)
save_img(file=file, src=src)
if __name__ == "__main__":
categories = ['male', 'old', 'vintage', 'dog', 'cat', 'building', 'nature', 'castle', 'water', 'ocean', 'cities', 'body', 'hands', 'people', 'culture', 'religion', 'color', 'patterns', 'houses', 'vintage', 'river', 'landscape', 'lights', 'animals', 'wallpaper', 'texture', 'current events', 'architecture', 'business', 'work', 'travel', 'fashion', 'food', 'drink', 'spirituality', 'experimental', 'health', 'arts', 'culture', 'children', 'people', 'events', 'trees', 'green', 'yellow', 'pink', 'blue', 'red', 'minimal', 'hands', 'head', 'eyes', 'mouth', 'eating', 'playing', 'sports']
for i in categories:
pexels(i)
| 42.978723 | 588 | 0.587624 |
import os
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import sys
from utils import save_img, path_processor, img_name_processor
def pexels(keyword):
img_cnt = 0
if not keyword: sys.exit('程序退出:未输入关键字!')
for page in tqdm(range(1, 50)):
print(f'\n-----[{keyword}]正在爬取第{page}页-----')
pexels_url = "https://www.pexels.com/search/%s/?page=%s" % (keyword, page)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
res = requests.get(pexels_url,headers=headers,verify=False)
if 'Sorry, no pictures found!' in res.text:
print('-*--*--*-爬取完毕-*--*--*-')
sys.exit(0)
soup = BeautifulSoup(res.text, 'lxml')
articles = soup.find_all('article')
for article in articles:
src = article.img.attrs['src']
print(src)
path = rf'D://人脸相关的图片//pexels//{keyword}'
if not os.path.exists(path):
os.makedirs(path)
filename = img_name_processor(src)
file = os.path.join(path, filename)
save_img(file=file, src=src)
if __name__ == "__main__":
categories = ['male', 'old', 'vintage', 'dog', 'cat', 'building', 'nature', 'castle', 'water', 'ocean', 'cities', 'body', 'hands', 'people', 'culture', 'religion', 'color', 'patterns', 'houses', 'vintage', 'river', 'landscape', 'lights', 'animals', 'wallpaper', 'texture', 'current events', 'architecture', 'business', 'work', 'travel', 'fashion', 'food', 'drink', 'spirituality', 'experimental', 'health', 'arts', 'culture', 'children', 'people', 'events', 'trees', 'green', 'yellow', 'pink', 'blue', 'red', 'minimal', 'hands', 'head', 'eyes', 'mouth', 'eating', 'playing', 'sports']
for i in categories:
pexels(i)
| true | true |
1c2e9bd62250529bf5b8c71a6f78cdc41dea29c9 | 1,568 | py | Python | stats_scripts/data_management/validate_consistancy.py | LilithHafner/SymbulationEmp | fb53c7fbefa03f6d901cdd49bdb823a637449bba | [
"MIT"
] | null | null | null | stats_scripts/data_management/validate_consistancy.py | LilithHafner/SymbulationEmp | fb53c7fbefa03f6d901cdd49bdb823a637449bba | [
"MIT"
] | null | null | null | stats_scripts/data_management/validate_consistancy.py | LilithHafner/SymbulationEmp | fb53c7fbefa03f6d901cdd49bdb823a637449bba | [
"MIT"
] | null | null | null | lines = open('collated_data.data').readlines()
header = [x.strip('"') for x in lines[0].split()]
independent_variables = 'HRR', 'HRR', 'SLR', 'BS', 'BT', 'SL', 'SYN', 'POP', 'UPS', 'T'
dependant_variables = 'host_count', 'sym_count', 'survival', 'moi'
omit = ('file_name',)
versions = '"Standard-1.1"', '"Duplicate-1.1"'
data = {}
passes, fails = 0, 0
for line in lines:
values = line.split()
if values[0] in versions:
independent_values = []
dependant_values = []
other_values = []
for name, value in zip(header, values):
if name in independent_variables:
independent_values.append(value)
elif name in dependant_variables:
dependant_values.append(value)
elif name not in omit:
other_values.append(value)
independent_values = tuple(independent_values)
dependant_values = tuple(dependant_values)
other_values = tuple(other_values)
if independent_values in data:
if data[independent_values][0] != dependant_values:
print(independent_values)
print(data[independent_values][0])
print(dependant_values)#, other_values)
print()
fails += 1
else:
passes += 1
else:
data[independent_values] = [dependant_values]
data[independent_values].append(other_values)
print('Fail:' if fails else 'Pass:' if passes else 'No Data:', '{}/{}'.format(passes, passes+fails))
| 35.636364 | 100 | 0.58801 | lines = open('collated_data.data').readlines()
header = [x.strip('"') for x in lines[0].split()]
independent_variables = 'HRR', 'HRR', 'SLR', 'BS', 'BT', 'SL', 'SYN', 'POP', 'UPS', 'T'
dependant_variables = 'host_count', 'sym_count', 'survival', 'moi'
omit = ('file_name',)
versions = '"Standard-1.1"', '"Duplicate-1.1"'
data = {}
passes, fails = 0, 0
for line in lines:
values = line.split()
if values[0] in versions:
independent_values = []
dependant_values = []
other_values = []
for name, value in zip(header, values):
if name in independent_variables:
independent_values.append(value)
elif name in dependant_variables:
dependant_values.append(value)
elif name not in omit:
other_values.append(value)
independent_values = tuple(independent_values)
dependant_values = tuple(dependant_values)
other_values = tuple(other_values)
if independent_values in data:
if data[independent_values][0] != dependant_values:
print(independent_values)
print(data[independent_values][0])
print(dependant_values)#, other_values)
print()
fails += 1
else:
passes += 1
else:
data[independent_values] = [dependant_values]
data[independent_values].append(other_values)
print('Fail:' if fails else 'Pass:' if passes else 'No Data:', '{}/{}'.format(passes, passes+fails))
| true | true |
1c2e9ca1193e20f92c9e20e10de63accebbd6262 | 1,642 | py | Python | laikaboss/modules/meta_iqy.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | 2 | 2019-11-02T23:40:23.000Z | 2019-12-01T22:24:57.000Z | laikaboss/modules/meta_iqy.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | null | null | null | laikaboss/modules/meta_iqy.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | 3 | 2017-08-09T23:58:40.000Z | 2019-12-01T22:25:06.000Z | # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#A module to analyze.iqy files
#import classes and helpers from the Laika framework
from laikaboss.si_module import SI_MODULE
class META_IQY(SI_MODULE):
'''Laika module for collecting metadata from .iqy files'''
def __init__(self):
'''init, module name'''
self.module_name = "META_IQY"
def _run(self, scanObject, result, depth, args):
'''add metadata and return result (empty)'''
s = scanObject.buffer
index_one = s.index(b"1") #urls start after WEB1 or WEB\n1\n
newline = b'\n' #urls end with a newline
url = s[index_one+1:s.index(newline,index_one+2)]
if url[:1] == newline: url=url[1:] #shave off newline if necessary
scanObject.addMetadata(self.module_name,"url",url)
moduleResult = []
return moduleResult
def _close(self):
'''nothing to be done here'''
pass
| 35.695652 | 76 | 0.696711 |
from laikaboss.si_module import SI_MODULE
class META_IQY(SI_MODULE):
def __init__(self):
self.module_name = "META_IQY"
def _run(self, scanObject, result, depth, args):
s = scanObject.buffer
index_one = s.index(b"1")
newline = b'\n'
url = s[index_one+1:s.index(newline,index_one+2)]
if url[:1] == newline: url=url[1:]
scanObject.addMetadata(self.module_name,"url",url)
moduleResult = []
return moduleResult
def _close(self):
pass
| true | true |
1c2e9cc4b9a651b57cc3b5e361842704cab3a988 | 11,924 | py | Python | docs/doxygen/swig_doc.py | hb9fxq/gr-aaronia_rtsa | ac66cc0630668b70fddc393735e6034ccd1cf40a | [
"MIT"
] | null | null | null | docs/doxygen/swig_doc.py | hb9fxq/gr-aaronia_rtsa | ac66cc0630668b70fddc393735e6034ccd1cf40a | [
"MIT"
] | null | null | null | docs/doxygen/swig_doc.py | hb9fxq/gr-aaronia_rtsa | ac66cc0630668b70fddc393735e6034ccd1cf40a | [
"MIT"
] | null | null | null | #
# Copyright 2010-2012 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-aaronia_rtsa
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Creates the swig_doc.i SWIG interface file.
Execute using: python swig_doc.py xml_path outputfilename
The file instructs SWIG to transfer the doxygen comments into the
python docstrings.
"""
from __future__ import unicode_literals
import sys, time
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
from doxyxml import DoxyOther, base
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
class Block(object):
"""
Checks if doxyxml produced objects correspond to a gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
friendname = make_name(item.name())
is_a_block = item.has_member(friendname, DoxyFriend)
# But now sometimes the make function isn't a friend so check again.
if not is_a_block:
is_a_block = di.has_member(friendname, DoxyFunction)
return is_a_block
class Block2(object):
"""
Checks if doxyxml produced objects correspond to a new style
gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther)
return is_a_block2
def utoascii(text):
"""
Convert unicode text into ascii and escape quotes and backslashes.
"""
if text is None:
return ''
out = text.encode('ascii', 'replace')
# swig will require us to replace blackslash with 4 backslashes
out = out.replace(b'\\', b'\\\\\\\\')
out = out.replace(b'"', b'\\"').decode('ascii')
return str(out)
def combine_descriptions(obj):
"""
Combines the brief and detailed descriptions of an object together.
"""
description = []
bd = obj.brief_description.strip()
dd = obj.detailed_description.strip()
if bd:
description.append(bd)
if dd:
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
def format_params(parameteritems):
output = ['Args:']
template = ' {0} : {1}'
for pi in parameteritems:
output.append(template.format(pi.name, pi.description))
return '\n'.join(output)
entry_templ = '%feature("docstring") {name} "{docstring}"'
def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
"""
Create a docstring entry for a swig interface file.
obj - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to obj.name())
templ - an optional template for the docstring containing only one
variable named 'description'.
description - if this optional variable is set then it's value is
used as the description instead of extracting it from obj.
"""
if name is None:
name=obj.name()
if "operator " in name:
return ''
if description is None:
description = combine_descriptions(obj)
if params:
description += '\n\n'
description += utoascii(format_params(params))
docstring = templ.format(description=description)
if not docstring:
return ''
return entry_templ.format(
name=name,
docstring=docstring,
)
def make_func_entry(func, name=None, description=None, params=None):
"""
Create a function docstring entry for a swig interface file.
func - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to func.name())
description - if this optional variable is set then it's value is
used as the description instead of extracting it from func.
params - a parameter list that overrides using func.params.
"""
#if params is None:
# params = func.params
#params = [prm.declname for prm in params]
#if params:
# sig = "Params: (%s)" % ", ".join(params)
#else:
# sig = "Params: (NONE)"
#templ = "{description}\n\n" + sig
#return make_entry(func, name=name, templ=utoascii(templ),
# description=description)
return make_entry(func, name=name, description=description, params=params)
def make_class_entry(klass, description=None, ignored_methods=[], params=None):
"""
Create a class docstring for a swig interface file.
"""
if params is None:
params = klass.params
output = []
output.append(make_entry(klass, description=description, params=params))
for func in klass.in_category(DoxyFunction):
if func.name() not in ignored_methods:
name = klass.name() + '::' + func.name()
output.append(make_func_entry(func, name=name))
return "\n\n".join(output)
def make_block_entry(di, block):
"""
Create class and function docstrings of a gnuradio block for a
swig interface file.
"""
descriptions = []
# Get the documentation associated with the class.
class_desc = combine_descriptions(block)
if class_desc:
descriptions.append(class_desc)
# Get the documentation associated with the make function
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_func_desc = combine_descriptions(make_func)
if make_func_desc:
descriptions.append(make_func_desc)
# Get the documentation associated with the file
try:
block_file = di.get_member(block.name() + ".h", DoxyFile)
file_desc = combine_descriptions(block_file)
if file_desc:
descriptions.append(file_desc)
except base.Base.NoSuchMember:
# Don't worry if we can't find a matching file.
pass
# And join them all together to make a super duper description.
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(block, description=super_description))
output.append(make_func_entry(make_func, description=super_description,
params=block.params))
return "\n\n".join(output)
def make_block2_entry(di, block):
"""
Create class and function docstrings of a new style gnuradio block for a
swig interface file.
"""
descriptions = []
# For new style blocks all the relevant documentation should be
# associated with the 'make' method.
class_description = combine_descriptions(block)
make_func = block.get_member('make', DoxyFunction)
make_description = combine_descriptions(make_func)
description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(
block, description=description,
ignored_methods=['make'], params=make_func.params))
makename = block.name() + '::make'
output.append(make_func_entry(
make_func, name=makename, description=description,
params=make_func.params))
return "\n\n".join(output)
def make_swig_interface_file(di, swigdocfilename, custom_output=None):
output = ["""
/*
* This file was automatically generated using swig_doc.py.
*
* Any changes to it will be lost next time it is regenerated.
*/
"""]
if custom_output is not None:
output.append(custom_output)
# Create docstrings for the blocks.
blocks = di.in_category(Block)
blocks2 = di.in_category(Block2)
make_funcs = set([])
for block in blocks:
try:
make_func = di.get_member(make_name(block.name()), DoxyFunction)
# Don't want to risk writing to output twice.
if make_func.name() not in make_funcs:
make_funcs.add(make_func.name())
output.append(make_block_entry(di, block))
except block.ParsingError:
sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
raise
for block in blocks2:
try:
make_func = block.get_member('make', DoxyFunction)
make_func_name = block.name() +'::make'
# Don't want to risk writing to output twice.
if make_func_name not in make_funcs:
make_funcs.add(make_func_name)
output.append(make_block2_entry(di, block))
except block.ParsingError:
sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
raise
# Create docstrings for functions
# Don't include the make functions since they have already been dealt with.
funcs = [f for f in di.in_category(DoxyFunction)
if f.name() not in make_funcs and not f.name().startswith('std::')]
for f in funcs:
try:
output.append(make_func_entry(f))
except f.ParsingError:
sys.stderr.write('Parsing error for function {0}\n'.format(f.name()))
# Create docstrings for classes
block_names = [block.name() for block in blocks]
block_names += [block.name() for block in blocks2]
klasses = [k for k in di.in_category(DoxyClass)
if k.name() not in block_names and not k.name().startswith('std::')]
for k in klasses:
try:
output.append(make_class_entry(k))
except k.ParsingError:
sys.stderr.write('Parsing error for class {0}\n'.format(k.name()))
# Docstrings are not created for anything that is not a function or a class.
# If this excludes anything important please add it here.
output = "\n\n".join(output)
swig_doc = open(swigdocfilename, 'w')
swig_doc.write(output)
swig_doc.close()
if __name__ == "__main__":
# Parse command line options and set up doxyxml.
err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
if len(sys.argv) != 3:
raise Exception(err_msg)
xml_path = sys.argv[1]
swigdocfilename = sys.argv[2]
di = DoxyIndex(xml_path)
# gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
# This is presumably a bug in SWIG.
#msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
#insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
#delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
output = []
#output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
#output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
custom_output = "\n\n".join(output)
# Generate the docstrings interface file.
make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
| 35.807808 | 102 | 0.666555 |
from __future__ import unicode_literals
import sys, time
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
from doxyxml import DoxyOther, base
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
class Block(object):
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
if item.error():
return False
friendname = make_name(item.name())
is_a_block = item.has_member(friendname, DoxyFriend)
if not is_a_block:
is_a_block = di.has_member(friendname, DoxyFunction)
return is_a_block
class Block2(object):
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther)
return is_a_block2
def utoascii(text):
if text is None:
return ''
out = text.encode('ascii', 'replace')
# swig will require us to replace blackslash with 4 backslashes
out = out.replace(b'\\', b'\\\\\\\\')
out = out.replace(b'"', b'\\"').decode('ascii')
return str(out)
def combine_descriptions(obj):
description = []
bd = obj.brief_description.strip()
dd = obj.detailed_description.strip()
if bd:
description.append(bd)
if dd:
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
def format_params(parameteritems):
output = ['Args:']
template = ' {0} : {1}'
for pi in parameteritems:
output.append(template.format(pi.name, pi.description))
return '\n'.join(output)
entry_templ = '%feature("docstring") {name} "{docstring}"'
def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
if name is None:
name=obj.name()
if "operator " in name:
return ''
if description is None:
description = combine_descriptions(obj)
if params:
description += '\n\n'
description += utoascii(format_params(params))
docstring = templ.format(description=description)
if not docstring:
return ''
return entry_templ.format(
name=name,
docstring=docstring,
)
def make_func_entry(func, name=None, description=None, params=None):
#if params is None:
# params = func.params
#params = [prm.declname for prm in params]
#if params:
# sig = "Params: (%s)" % ", ".join(params)
#else:
# sig = "Params: (NONE)"
#templ = "{description}\n\n" + sig
#return make_entry(func, name=name, templ=utoascii(templ),
# description=description)
return make_entry(func, name=name, description=description, params=params)
def make_class_entry(klass, description=None, ignored_methods=[], params=None):
if params is None:
params = klass.params
output = []
output.append(make_entry(klass, description=description, params=params))
for func in klass.in_category(DoxyFunction):
if func.name() not in ignored_methods:
name = klass.name() + '::' + func.name()
output.append(make_func_entry(func, name=name))
return "\n\n".join(output)
def make_block_entry(di, block):
descriptions = []
# Get the documentation associated with the class.
class_desc = combine_descriptions(block)
if class_desc:
descriptions.append(class_desc)
# Get the documentation associated with the make function
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_func_desc = combine_descriptions(make_func)
if make_func_desc:
descriptions.append(make_func_desc)
# Get the documentation associated with the file
try:
block_file = di.get_member(block.name() + ".h", DoxyFile)
file_desc = combine_descriptions(block_file)
if file_desc:
descriptions.append(file_desc)
except base.Base.NoSuchMember:
# Don't worry if we can't find a matching file.
pass
# And join them all together to make a super duper description.
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(block, description=super_description))
output.append(make_func_entry(make_func, description=super_description,
params=block.params))
return "\n\n".join(output)
def make_block2_entry(di, block):
descriptions = []
# For new style blocks all the relevant documentation should be
# associated with the 'make' method.
class_description = combine_descriptions(block)
make_func = block.get_member('make', DoxyFunction)
make_description = combine_descriptions(make_func)
description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(
block, description=description,
ignored_methods=['make'], params=make_func.params))
makename = block.name() + '::make'
output.append(make_func_entry(
make_func, name=makename, description=description,
params=make_func.params))
return "\n\n".join(output)
def make_swig_interface_file(di, swigdocfilename, custom_output=None):
output = ["""
/*
* This file was automatically generated using swig_doc.py.
*
* Any changes to it will be lost next time it is regenerated.
*/
"""]
if custom_output is not None:
output.append(custom_output)
# Create docstrings for the blocks.
blocks = di.in_category(Block)
blocks2 = di.in_category(Block2)
make_funcs = set([])
for block in blocks:
try:
make_func = di.get_member(make_name(block.name()), DoxyFunction)
# Don't want to risk writing to output twice.
if make_func.name() not in make_funcs:
make_funcs.add(make_func.name())
output.append(make_block_entry(di, block))
except block.ParsingError:
sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
raise
for block in blocks2:
try:
make_func = block.get_member('make', DoxyFunction)
make_func_name = block.name() +'::make'
if make_func_name not in make_funcs:
make_funcs.add(make_func_name)
output.append(make_block2_entry(di, block))
except block.ParsingError:
sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
raise
# Create docstrings for functions
# Don't include the make functions since they have already been dealt with.
funcs = [f for f in di.in_category(DoxyFunction)
if f.name() not in make_funcs and not f.name().startswith('std::')]
for f in funcs:
try:
output.append(make_func_entry(f))
except f.ParsingError:
sys.stderr.write('Parsing error for function {0}\n'.format(f.name()))
block_names = [block.name() for block in blocks]
block_names += [block.name() for block in blocks2]
klasses = [k for k in di.in_category(DoxyClass)
if k.name() not in block_names and not k.name().startswith('std::')]
for k in klasses:
try:
output.append(make_class_entry(k))
except k.ParsingError:
sys.stderr.write('Parsing error for class {0}\n'.format(k.name()))
output = "\n\n".join(output)
swig_doc = open(swigdocfilename, 'w')
swig_doc.write(output)
swig_doc.close()
if __name__ == "__main__":
err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
if len(sys.argv) != 3:
raise Exception(err_msg)
xml_path = sys.argv[1]
swigdocfilename = sys.argv[2]
di = DoxyIndex(xml_path)
output = []
custom_output = "\n\n".join(output)
make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
| true | true |
1c2e9da8b1410e0876c772e4e9d83e5a0a64b024 | 436 | py | Python | src/dendrograms/dendrogram.py | Ellon-M/visualizations | 5a42c213ea8fd0597e2035778d9ae6460eb9e821 | [
"MIT"
] | null | null | null | src/dendrograms/dendrogram.py | Ellon-M/visualizations | 5a42c213ea8fd0597e2035778d9ae6460eb9e821 | [
"MIT"
] | null | null | null | src/dendrograms/dendrogram.py | Ellon-M/visualizations | 5a42c213ea8fd0597e2035778d9ae6460eb9e821 | [
"MIT"
] | null | null | null | # dendrogram
import scipy.cluster.hierarchy as shc
# Import Data
df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/USArrests.csv')
# Plot
plt.figure(figsize=(16, 10), dpi= 80)
plt.title("USArrests Dendograms", fontsize=22)
dend = shc.dendrogram(shc.linkage(df[['Murder', 'Assault', 'UrbanPop', 'Rape']], method='ward'), labels=df.State.values, color_threshold=100)
plt.xticks(fontsize=12)
plt.show()
| 29.066667 | 143 | 0.729358 |
import scipy.cluster.hierarchy as shc
df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/USArrests.csv')
plt.figure(figsize=(16, 10), dpi= 80)
plt.title("USArrests Dendograms", fontsize=22)
dend = shc.dendrogram(shc.linkage(df[['Murder', 'Assault', 'UrbanPop', 'Rape']], method='ward'), labels=df.State.values, color_threshold=100)
plt.xticks(fontsize=12)
plt.show()
| true | true |
1c2e9f75bc7f7bb8f373ef0d4b1ceb3207725ff7 | 1,967 | py | Python | .history/main_20171106224753.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/main_20171106224753.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/main_20171106224753.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | # DADSA - Assignment 1
# Reece Benson
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
for i in range(len(players[gender]) - 1):
playerOne = players[gender][i]
playerTwo = players[gender][i + 1]
print("{0} vs {1} ".format(playerOne.name(), playerTwo.name()))
i += 1
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App() | 31.725806 | 83 | 0.543467 |
from classes import Menu as Menu
from classes import Handler as Handler
class App():
debug = True
handler = None
def __init__(self):
self.handler = Handler.Handler(self)
self.handler.load()
self.generate_rounds()
self.exit()
def generate_rounds(self):
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
for i in range(len(players[gender]) - 1):
playerOne = players[gender][i]
playerTwo = players[gender][i + 1]
print("{0} vs {1} ".format(playerOne.name(), playerTwo.name()))
i += 1
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App() | true | true |
1c2e9fe71d358bad524ca7f4ba082ca2127b186a | 2,471 | py | Python | lib/reda/importers/geotom.py | j-gallistl/reda | 13b1f9e1cda92bbbbafc5c28be2c691d3b722740 | [
"MIT"
] | 12 | 2017-12-11T08:32:46.000Z | 2021-06-09T05:41:57.000Z | lib/reda/importers/geotom.py | j-gallistl/reda | 13b1f9e1cda92bbbbafc5c28be2c691d3b722740 | [
"MIT"
] | 58 | 2017-11-12T11:10:42.000Z | 2021-06-11T13:52:44.000Z | lib/reda/importers/geotom.py | geophysics-ubonn/REDA | 8f0399031121f5a937171231a25f9ab03a3c8873 | [
"MIT"
] | 11 | 2017-11-12T12:02:35.000Z | 2021-02-16T06:54:04.000Z | # -*- coding: utf-8 -*-
from io import StringIO
import pandas as pd
import numpy as np
from reda.containers.ERT import ERT
from reda.importers.utils.decorators import enable_result_transforms
@enable_result_transforms
def _parse_wenner_file(filename, settings):
"""Parse a Geotom .wen (Wenner configuration) file
Parsing problems
----------------
Due to column overflows it is necessary to make sure that spaces are
present around the ; character. Example:
8.000 14.000 10835948.70; 0.001 -123.1853 -1.0 23.10.2014
"""
# read data
with open(filename, 'r') as fid2:
geotom_data_orig = fid2.read()
# replace all ';' by ' ; '
geotom_data = geotom_data_orig.replace(';', ' ; ')
fid = StringIO()
fid.write(geotom_data)
fid.seek(0)
header = [fid.readline() for i in range(0, 16)]
header
df = pd.read_csv(
fid,
delim_whitespace=True,
header=None,
names=(
'elec1_wenner',
'a_w',
'rho_a',
'c4',
'c5',
'c6',
'c6',
'c7',
'c8',
'c9',
),
)
# compute geometric factor using the Wenner formula
df['k'] = 2 * np.pi * df['a_w']
df['r'] = df['rho_a'] / df['k']
Am = df['elec1_wenner']
Bm = df['elec1_wenner'] + df['a_w']
Mm = df['elec1_wenner'] + 3 * df['a_w']
Nm = df['elec1_wenner'] + 2 * df['a_w']
df['a'] = Am / 2.0 + 1
df['b'] = Bm / 2.0 + 1
df['m'] = Mm / 2.0 + 1
df['n'] = Nm / 2.0 + 1
# remove any nan values
df.dropna(axis=0, subset=['a', 'b', 'm', 'n', 'r'], inplace=True)
return df
def add_file(filename, settings, container=None):
"""
Parameters
----------
filename: string
path
settings: dict
::
{
timestep: [int|datetime], timestep relating to this measurement
}
container: :class:`reda.container.ERT.ERT`
ERT container to add dataset to
"""
timestep = settings.get('timestep', 0)
# Wenner
if filename.endswith('.wen'):
data = _parse_wenner_file(filename, settings)
# add timestep column
data['timestep'] = timestep
else:
raise Exception('Not a Wenner file')
if container is None:
container = ERT(data)
else:
container.df = pd.concat((container.df, data))
return container
| 22.463636 | 79 | 0.541076 |
from io import StringIO
import pandas as pd
import numpy as np
from reda.containers.ERT import ERT
from reda.importers.utils.decorators import enable_result_transforms
@enable_result_transforms
def _parse_wenner_file(filename, settings):
with open(filename, 'r') as fid2:
geotom_data_orig = fid2.read()
geotom_data = geotom_data_orig.replace(';', ' ; ')
fid = StringIO()
fid.write(geotom_data)
fid.seek(0)
header = [fid.readline() for i in range(0, 16)]
header
df = pd.read_csv(
fid,
delim_whitespace=True,
header=None,
names=(
'elec1_wenner',
'a_w',
'rho_a',
'c4',
'c5',
'c6',
'c6',
'c7',
'c8',
'c9',
),
)
df['k'] = 2 * np.pi * df['a_w']
df['r'] = df['rho_a'] / df['k']
Am = df['elec1_wenner']
Bm = df['elec1_wenner'] + df['a_w']
Mm = df['elec1_wenner'] + 3 * df['a_w']
Nm = df['elec1_wenner'] + 2 * df['a_w']
df['a'] = Am / 2.0 + 1
df['b'] = Bm / 2.0 + 1
df['m'] = Mm / 2.0 + 1
df['n'] = Nm / 2.0 + 1
df.dropna(axis=0, subset=['a', 'b', 'm', 'n', 'r'], inplace=True)
return df
def add_file(filename, settings, container=None):
timestep = settings.get('timestep', 0)
if filename.endswith('.wen'):
data = _parse_wenner_file(filename, settings)
data['timestep'] = timestep
else:
raise Exception('Not a Wenner file')
if container is None:
container = ERT(data)
else:
container.df = pd.concat((container.df, data))
return container
| true | true |
1c2ea03e03b381cfed1b2a2fb9de2e155e4a83d4 | 3,527 | py | Python | tests/test_unicode_strings.py | alekseyl1992/pyrobuf | dc553f2c407d3ea1ca78c6b58e149d05ff811a23 | [
"Apache-2.0"
] | 578 | 2015-12-17T20:39:31.000Z | 2022-02-15T05:14:03.000Z | tests/test_unicode_strings.py | alekseyl1992/pyrobuf | dc553f2c407d3ea1ca78c6b58e149d05ff811a23 | [
"Apache-2.0"
] | 121 | 2015-12-19T07:37:32.000Z | 2022-02-22T05:22:55.000Z | tests/test_unicode_strings.py | alekseyl1992/pyrobuf | dc553f2c407d3ea1ca78c6b58e149d05ff811a23 | [
"Apache-2.0"
] | 82 | 2015-12-19T00:19:28.000Z | 2022-02-21T09:00:21.000Z | import unittest
Test = None
TestSs1 = None
# Alpha, Beta, Gamma, Delta
GREEK_LETTERS = u"\u0391\u0392\u0393\u0394"
class TestUnicodeStrings(unittest.TestCase):
@classmethod
def setUpClass(cls):
global Test, TestSs1, TestFieldTypes
from test_message_proto import Test, TestSs1
def test_unicode_string_parse_from_string(self):
message = Test.FromString(b'\x1a\x08\xce\x91\xce\x92\xce\x93\xce\x94')
self.assertEqual(message.string_field, GREEK_LETTERS)
def test_unicode_string_parse_from_dict(self):
message = Test()
message.ParseFromDict({'string_field': u"\u0391\u0392\u0393\u0394", 'req_field': 1})
self.assertEqual(message.string_field, GREEK_LETTERS)
def test_unicode_string_parse_from_json(self):
message = Test()
message.ParseFromJson('{"string_field": "\\u0391\\u0392\\u0393\\u0394", "req_field": 1}')
self.assertEqual(message.string_field, GREEK_LETTERS)
def test_unicode_string_serialize_to_string(self):
message = Test()
message.string_field = GREEK_LETTERS
self.assertEqual(message.SerializePartialToString(), b'\x1a\x08\xce\x91\xce\x92\xce\x93\xce\x94')
def test_unicode_string_serialize_to_dict(self):
message = Test()
message.string_field = GREEK_LETTERS
message.req_field = 1
self.assertEqual(
message.SerializeToDict(), {'string_field': u"\u0391\u0392\u0393\u0394", 'req_field': 1}
)
def test_unicode_string_serialize_to_json(self):
# Use TestSs1 here because Test has a required field
message = TestSs1()
message.field2 = GREEK_LETTERS
self.assertEqual(
message.SerializeToJson(), '{"field2": "\\u0391\\u0392\\u0393\\u0394"}'
)
def test_repeated_unicode_parse_from_string(self):
message = TestSs1.FromString(b'2\x04\xce\x91\xce\x922\x04\xce\x93\xce\x94')
self.assertEqual(message.list_string, [u"\u0391\u0392", u"\u0393\u0394"])
def test_repeated_unicode_parse_from_dict(self):
message = TestSs1()
message.ParseFromDict({'list_string': [u'\u0391\u0392', u'\u0393\u0394']})
self.assertEqual(message.list_string, [u"\u0391\u0392", u"\u0393\u0394"])
def test_repeated_unicode_parse_from_json(self):
message = TestSs1()
message.ParseFromJson('{"list_string": ["\\u0391\\u0392", "\\u0393\\u0394"]}')
self.assertEqual(message.list_string, [u"\u0391\u0392", u"\u0393\u0394"])
def test_repeated_unicode_string_serialize_to_string(self):
message = TestSs1()
message.list_string.append(GREEK_LETTERS[:2]) # Alpha, Beta
message.list_string.append(GREEK_LETTERS[2:]) # Gamma, Delta
self.assertEqual(message.SerializePartialToString(), b'2\x04\xce\x91\xce\x922\x04\xce\x93\xce\x94')
def test_repeated_unicode_string_serialize_to_dict(self):
message = TestSs1()
message.list_string.append(GREEK_LETTERS[:2]) # Alpha, Beta
message.list_string.append(GREEK_LETTERS[2:]) # Gamma, Delta
self.assertEqual(message.SerializeToDict(), {'list_string': [u'\u0391\u0392', u'\u0393\u0394']})
def test_repeated_unicode_string_serialize_to_json(self):
message = TestSs1()
message.list_string.append(GREEK_LETTERS[:2]) # Alpha, Beta
message.list_string.append(GREEK_LETTERS[2:]) # Gamma, Delta
self.assertEqual(message.SerializeToJson(), '{"list_string": ["\\u0391\\u0392", "\\u0393\\u0394"]}')
| 43.012195 | 108 | 0.686703 | import unittest
Test = None
TestSs1 = None
GREEK_LETTERS = u"\u0391\u0392\u0393\u0394"
class TestUnicodeStrings(unittest.TestCase):
@classmethod
def setUpClass(cls):
global Test, TestSs1, TestFieldTypes
from test_message_proto import Test, TestSs1
def test_unicode_string_parse_from_string(self):
message = Test.FromString(b'\x1a\x08\xce\x91\xce\x92\xce\x93\xce\x94')
self.assertEqual(message.string_field, GREEK_LETTERS)
def test_unicode_string_parse_from_dict(self):
message = Test()
message.ParseFromDict({'string_field': u"\u0391\u0392\u0393\u0394", 'req_field': 1})
self.assertEqual(message.string_field, GREEK_LETTERS)
def test_unicode_string_parse_from_json(self):
message = Test()
message.ParseFromJson('{"string_field": "\\u0391\\u0392\\u0393\\u0394", "req_field": 1}')
self.assertEqual(message.string_field, GREEK_LETTERS)
def test_unicode_string_serialize_to_string(self):
message = Test()
message.string_field = GREEK_LETTERS
self.assertEqual(message.SerializePartialToString(), b'\x1a\x08\xce\x91\xce\x92\xce\x93\xce\x94')
def test_unicode_string_serialize_to_dict(self):
message = Test()
message.string_field = GREEK_LETTERS
message.req_field = 1
self.assertEqual(
message.SerializeToDict(), {'string_field': u"\u0391\u0392\u0393\u0394", 'req_field': 1}
)
def test_unicode_string_serialize_to_json(self):
message = TestSs1()
message.field2 = GREEK_LETTERS
self.assertEqual(
message.SerializeToJson(), '{"field2": "\\u0391\\u0392\\u0393\\u0394"}'
)
def test_repeated_unicode_parse_from_string(self):
message = TestSs1.FromString(b'2\x04\xce\x91\xce\x922\x04\xce\x93\xce\x94')
self.assertEqual(message.list_string, [u"\u0391\u0392", u"\u0393\u0394"])
def test_repeated_unicode_parse_from_dict(self):
message = TestSs1()
message.ParseFromDict({'list_string': [u'\u0391\u0392', u'\u0393\u0394']})
self.assertEqual(message.list_string, [u"\u0391\u0392", u"\u0393\u0394"])
def test_repeated_unicode_parse_from_json(self):
message = TestSs1()
message.ParseFromJson('{"list_string": ["\\u0391\\u0392", "\\u0393\\u0394"]}')
self.assertEqual(message.list_string, [u"\u0391\u0392", u"\u0393\u0394"])
def test_repeated_unicode_string_serialize_to_string(self):
message = TestSs1()
message.list_string.append(GREEK_LETTERS[:2])
message.list_string.append(GREEK_LETTERS[2:])
self.assertEqual(message.SerializePartialToString(), b'2\x04\xce\x91\xce\x922\x04\xce\x93\xce\x94')
def test_repeated_unicode_string_serialize_to_dict(self):
message = TestSs1()
message.list_string.append(GREEK_LETTERS[:2])
message.list_string.append(GREEK_LETTERS[2:])
self.assertEqual(message.SerializeToDict(), {'list_string': [u'\u0391\u0392', u'\u0393\u0394']})
def test_repeated_unicode_string_serialize_to_json(self):
message = TestSs1()
message.list_string.append(GREEK_LETTERS[:2])
message.list_string.append(GREEK_LETTERS[2:])
self.assertEqual(message.SerializeToJson(), '{"list_string": ["\\u0391\\u0392", "\\u0393\\u0394"]}')
| true | true |
1c2ea062620b72b16560fe0603e880b6ea79c7f0 | 271 | py | Python | buoi7/bai12.py | Viet7501/viet1 | e0be1da1fb0f4736c8d0457733ccaaed9232c4e9 | [
"Apache-2.0"
] | null | null | null | buoi7/bai12.py | Viet7501/viet1 | e0be1da1fb0f4736c8d0457733ccaaed9232c4e9 | [
"Apache-2.0"
] | null | null | null | buoi7/bai12.py | Viet7501/viet1 | e0be1da1fb0f4736c8d0457733ccaaed9232c4e9 | [
"Apache-2.0"
] | null | null | null | set1 = {0, 1, 2, 3, 4, 5, 6}
set2 = {5, 6, 7, 8, 9, 0, 1}
# set1.difference_update(set2)
# set1.intersection_update(set2)
print(set1.issubset(set2))
print(set1.isdisjoint(set2))
print(set1.issuperset(set2))
set1.symmetric_difference_update(set2)
print(set1)
| 22.583333 | 39 | 0.682657 | set1 = {0, 1, 2, 3, 4, 5, 6}
set2 = {5, 6, 7, 8, 9, 0, 1}
print(set1.issubset(set2))
print(set1.isdisjoint(set2))
print(set1.issuperset(set2))
set1.symmetric_difference_update(set2)
print(set1)
| true | true |
1c2ea0d7071e64a32dcaa6aa1f4226fc529ca0a6 | 9,426 | py | Python | saleor/payment/gateway.py | facundon/Cholitas-Backend | 4d73e0697a6b6fc61b82b72ff1c99b2eabe8f4c6 | [
"CC-BY-4.0"
] | null | null | null | saleor/payment/gateway.py | facundon/Cholitas-Backend | 4d73e0697a6b6fc61b82b72ff1c99b2eabe8f4c6 | [
"CC-BY-4.0"
] | null | null | null | saleor/payment/gateway.py | facundon/Cholitas-Backend | 4d73e0697a6b6fc61b82b72ff1c99b2eabe8f4c6 | [
"CC-BY-4.0"
] | null | null | null | import logging
from decimal import Decimal
from typing import TYPE_CHECKING, Callable, List, Optional
from django.db import transaction
from ..payment.interface import TokenConfig
from ..plugins.manager import get_plugins_manager
from . import GatewayError, PaymentError, TransactionKind
from .models import Payment, Transaction
from .utils import (
clean_authorize,
clean_capture,
create_payment_information,
gateway_postprocess,
get_already_processed_transaction_or_create_new_transaction,
update_payment_method_details,
validate_gateway_response,
)
if TYPE_CHECKING:
# flake8: noqa
from ..payment.interface import CustomerSource, PaymentGateway
logger = logging.getLogger(__name__)
ERROR_MSG = "Oops! Something went wrong."
GENERIC_TRANSACTION_ERROR = "La transacción no tuvo exito."
def raise_payment_error(fn: Callable) -> Callable:
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
if not result.is_success:
raise PaymentError(result.error or GENERIC_TRANSACTION_ERROR)
return result
return wrapped
def payment_postprocess(fn: Callable) -> Callable:
def wrapped(*args, **kwargs):
txn = fn(*args, **kwargs)
gateway_postprocess(txn, txn.payment)
return txn
return wrapped
def require_active_payment(fn: Callable) -> Callable:
def wrapped(payment: Payment, *args, **kwargs):
if not payment.is_active:
raise PaymentError("This payment is no longer active.")
return fn(payment, *args, **kwargs)
return wrapped
def with_locked_payment(fn: Callable) -> Callable:
"""Lock payment to protect from asynchronous modification."""
def wrapped(payment: Payment, *args, **kwargs):
with transaction.atomic():
payment = Payment.objects.select_for_update().get(id=payment.id)
return fn(payment, *args, **kwargs)
return wrapped
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def process_payment(
payment: Payment,
token: str,
store_source: bool = False,
additional_data: Optional[dict] = None,
) -> Transaction:
plugin_manager = get_plugins_manager()
payment_data = create_payment_information(
payment=payment,
payment_token=token,
store_source=store_source,
additional_data=additional_data,
)
response, error = _fetch_gateway_response(
plugin_manager.process_payment, payment.gateway, payment_data
)
action_required = response is not None and response.action_required
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.CAPTURE,
action_required=action_required,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def authorize(payment: Payment, token: str, store_source: bool = False) -> Transaction:
plugin_manager = get_plugins_manager()
clean_authorize(payment)
payment_data = create_payment_information(
payment=payment, payment_token=token, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.authorize_payment, payment.gateway, payment_data
)
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.AUTH,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def capture(
payment: Payment, amount: Decimal = None, store_source: bool = False
) -> Transaction:
plugin_manager = get_plugins_manager()
if amount is None:
amount = payment.get_charge_amount()
clean_capture(payment, Decimal(amount))
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(
payment=payment, payment_token=token, amount=amount, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.capture_payment, payment.gateway, payment_data
)
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.CAPTURE,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def refund(payment: Payment, amount: Decimal = None) -> Transaction:
plugin_manager = get_plugins_manager()
if amount is None:
amount = payment.captured_amount
_validate_refund_amount(payment, amount)
if not payment.can_refund():
raise PaymentError("This payment cannot be refunded.")
token = _get_past_transaction_token(payment, TransactionKind.CAPTURE)
payment_data = create_payment_information(
payment=payment, payment_token=token, amount=amount
)
response, error = _fetch_gateway_response(
plugin_manager.refund_payment, payment.gateway, payment_data
)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.REFUND,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def void(payment: Payment) -> Transaction:
plugin_manager = get_plugins_manager()
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(payment=payment, payment_token=token)
response, error = _fetch_gateway_response(
plugin_manager.void_payment, payment.gateway, payment_data
)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.VOID,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def confirm(payment: Payment, additional_data: Optional[dict] = None) -> Transaction:
plugin_manager = get_plugins_manager()
txn = payment.transactions.filter(
kind=TransactionKind.ACTION_TO_CONFIRM, is_success=True
).last()
token = txn.token if txn else ""
payment_data = create_payment_information(
payment=payment, payment_token=token, additional_data=additional_data
)
response, error = _fetch_gateway_response(
plugin_manager.confirm_payment, payment.gateway, payment_data
)
action_required = response is not None and response.action_required
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.CONFIRM,
payment_information=payment_data,
action_required=action_required,
error_msg=error,
gateway_response=response,
)
def list_payment_sources(gateway: str, customer_id: str) -> List["CustomerSource"]:
plugin_manager = get_plugins_manager()
return plugin_manager.list_payment_sources(gateway, customer_id)
def get_client_token(gateway: str, customer_id: str = None) -> str:
plugin_manager = get_plugins_manager()
token_config = TokenConfig(customer_id=customer_id)
return plugin_manager.get_client_token(gateway, token_config)
def list_gateways() -> List["PaymentGateway"]:
return get_plugins_manager().list_payment_gateways()
def _fetch_gateway_response(fn, *args, **kwargs):
response, error = None, None
try:
response = fn(*args, **kwargs)
validate_gateway_response(response)
except GatewayError:
logger.exception("Gateway response validation failed!")
response = None
error = ERROR_MSG
except PaymentError:
logger.exception("Error encountered while executing payment gateway.")
error = ERROR_MSG
response = None
return response, error
def _get_past_transaction_token(
payment: Payment, kind: str # for kind use "TransactionKind"
) -> Optional[str]:
txn = payment.transactions.filter(kind=kind, is_success=True).last()
if txn is None:
raise PaymentError(f"Cannot find successful {kind} transaction.")
return txn.token
def _validate_refund_amount(payment: Payment, amount: Decimal):
if amount <= 0:
raise PaymentError("Amount should be a positive number.")
if amount > payment.captured_amount:
raise PaymentError("Cannot refund more than captured.")
def payment_refund_or_void(payment: Optional[Payment]):
if payment is None:
return
if payment.can_refund():
refund(payment)
elif payment.can_void():
void(payment)
| 32.729167 | 87 | 0.736474 | import logging
from decimal import Decimal
from typing import TYPE_CHECKING, Callable, List, Optional
from django.db import transaction
from ..payment.interface import TokenConfig
from ..plugins.manager import get_plugins_manager
from . import GatewayError, PaymentError, TransactionKind
from .models import Payment, Transaction
from .utils import (
clean_authorize,
clean_capture,
create_payment_information,
gateway_postprocess,
get_already_processed_transaction_or_create_new_transaction,
update_payment_method_details,
validate_gateway_response,
)
if TYPE_CHECKING:
from ..payment.interface import CustomerSource, PaymentGateway
logger = logging.getLogger(__name__)
ERROR_MSG = "Oops! Something went wrong."
GENERIC_TRANSACTION_ERROR = "La transacción no tuvo exito."
def raise_payment_error(fn: Callable) -> Callable:
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
if not result.is_success:
raise PaymentError(result.error or GENERIC_TRANSACTION_ERROR)
return result
return wrapped
def payment_postprocess(fn: Callable) -> Callable:
def wrapped(*args, **kwargs):
txn = fn(*args, **kwargs)
gateway_postprocess(txn, txn.payment)
return txn
return wrapped
def require_active_payment(fn: Callable) -> Callable:
def wrapped(payment: Payment, *args, **kwargs):
if not payment.is_active:
raise PaymentError("This payment is no longer active.")
return fn(payment, *args, **kwargs)
return wrapped
def with_locked_payment(fn: Callable) -> Callable:
def wrapped(payment: Payment, *args, **kwargs):
with transaction.atomic():
payment = Payment.objects.select_for_update().get(id=payment.id)
return fn(payment, *args, **kwargs)
return wrapped
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def process_payment(
payment: Payment,
token: str,
store_source: bool = False,
additional_data: Optional[dict] = None,
) -> Transaction:
plugin_manager = get_plugins_manager()
payment_data = create_payment_information(
payment=payment,
payment_token=token,
store_source=store_source,
additional_data=additional_data,
)
response, error = _fetch_gateway_response(
plugin_manager.process_payment, payment.gateway, payment_data
)
action_required = response is not None and response.action_required
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.CAPTURE,
action_required=action_required,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def authorize(payment: Payment, token: str, store_source: bool = False) -> Transaction:
plugin_manager = get_plugins_manager()
clean_authorize(payment)
payment_data = create_payment_information(
payment=payment, payment_token=token, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.authorize_payment, payment.gateway, payment_data
)
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.AUTH,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def capture(
payment: Payment, amount: Decimal = None, store_source: bool = False
) -> Transaction:
plugin_manager = get_plugins_manager()
if amount is None:
amount = payment.get_charge_amount()
clean_capture(payment, Decimal(amount))
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(
payment=payment, payment_token=token, amount=amount, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.capture_payment, payment.gateway, payment_data
)
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.CAPTURE,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def refund(payment: Payment, amount: Decimal = None) -> Transaction:
plugin_manager = get_plugins_manager()
if amount is None:
amount = payment.captured_amount
_validate_refund_amount(payment, amount)
if not payment.can_refund():
raise PaymentError("This payment cannot be refunded.")
token = _get_past_transaction_token(payment, TransactionKind.CAPTURE)
payment_data = create_payment_information(
payment=payment, payment_token=token, amount=amount
)
response, error = _fetch_gateway_response(
plugin_manager.refund_payment, payment.gateway, payment_data
)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.REFUND,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def void(payment: Payment) -> Transaction:
plugin_manager = get_plugins_manager()
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(payment=payment, payment_token=token)
response, error = _fetch_gateway_response(
plugin_manager.void_payment, payment.gateway, payment_data
)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.VOID,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@raise_payment_error
@require_active_payment
@with_locked_payment
@payment_postprocess
def confirm(payment: Payment, additional_data: Optional[dict] = None) -> Transaction:
plugin_manager = get_plugins_manager()
txn = payment.transactions.filter(
kind=TransactionKind.ACTION_TO_CONFIRM, is_success=True
).last()
token = txn.token if txn else ""
payment_data = create_payment_information(
payment=payment, payment_token=token, additional_data=additional_data
)
response, error = _fetch_gateway_response(
plugin_manager.confirm_payment, payment.gateway, payment_data
)
action_required = response is not None and response.action_required
if response and response.payment_method_info:
update_payment_method_details(payment, response)
return get_already_processed_transaction_or_create_new_transaction(
payment=payment,
kind=TransactionKind.CONFIRM,
payment_information=payment_data,
action_required=action_required,
error_msg=error,
gateway_response=response,
)
def list_payment_sources(gateway: str, customer_id: str) -> List["CustomerSource"]:
plugin_manager = get_plugins_manager()
return plugin_manager.list_payment_sources(gateway, customer_id)
def get_client_token(gateway: str, customer_id: str = None) -> str:
plugin_manager = get_plugins_manager()
token_config = TokenConfig(customer_id=customer_id)
return plugin_manager.get_client_token(gateway, token_config)
def list_gateways() -> List["PaymentGateway"]:
return get_plugins_manager().list_payment_gateways()
def _fetch_gateway_response(fn, *args, **kwargs):
response, error = None, None
try:
response = fn(*args, **kwargs)
validate_gateway_response(response)
except GatewayError:
logger.exception("Gateway response validation failed!")
response = None
error = ERROR_MSG
except PaymentError:
logger.exception("Error encountered while executing payment gateway.")
error = ERROR_MSG
response = None
return response, error
def _get_past_transaction_token(
payment: Payment, kind: str
) -> Optional[str]:
txn = payment.transactions.filter(kind=kind, is_success=True).last()
if txn is None:
raise PaymentError(f"Cannot find successful {kind} transaction.")
return txn.token
def _validate_refund_amount(payment: Payment, amount: Decimal):
if amount <= 0:
raise PaymentError("Amount should be a positive number.")
if amount > payment.captured_amount:
raise PaymentError("Cannot refund more than captured.")
def payment_refund_or_void(payment: Optional[Payment]):
if payment is None:
return
if payment.can_refund():
refund(payment)
elif payment.can_void():
void(payment)
| true | true |
1c2ea26a0e074c408ad814c5bbffa80acfedad47 | 1,230 | py | Python | tests/unit/objects/test_list_objects.py | dlite-tools/aws-s3-tools | f434ed36c1fc0530f2be6b52808cbc5e59ea8990 | [
"MIT"
] | 6 | 2021-02-18T10:03:17.000Z | 2022-01-14T06:11:54.000Z | tests/unit/objects/test_list.py | FerrariDG/aws-s3-tools | 0ddbb35cebc858bae1a0627aa8726e063a3ef5cd | [
"MIT"
] | 4 | 2021-04-20T18:20:32.000Z | 2022-02-04T08:32:39.000Z | tests/unit/objects/test_list_objects.py | dlite-tools/aws-s3-tools | f434ed36c1fc0530f2be6b52808cbc5e59ea8990 | [
"MIT"
] | 2 | 2021-03-04T00:25:51.000Z | 2021-05-10T13:20:42.000Z | """Unit tests for list.py"""
from botocore.exceptions import ClientError
from s3_tools import list_objects
from tests.unit.conftest import (
create_bucket,
BUCKET_NAME,
FILENAME
)
class TestList:
def test_list_nonexisting_bucket(self, s3_client):
try:
list_objects(BUCKET_NAME)
except ClientError as e:
error = e.response["Error"]["Code"]
assert error == "NoSuchBucket"
def test_list_empty_bucket(self, s3_client):
with create_bucket(s3_client, BUCKET_NAME):
keys = list_objects(BUCKET_NAME, "prefix")
assert len(keys) == 0
def test_list_bucket(self, s3_client):
lst = [(f"prefix/mock_{i}.csv", FILENAME) for i in range(1)]
with create_bucket(s3_client, BUCKET_NAME, keys_paths=lst):
keys = list_objects(BUCKET_NAME, "prefix")
assert len(keys) == 1
assert keys[0] == lst[0][0]
def test_list_bucket_with_pagination(self, s3_client):
lst = [(f"prefix/mock_{i}.csv", FILENAME) for i in range(10)]
with create_bucket(s3_client, BUCKET_NAME, keys_paths=lst):
keys = list_objects(BUCKET_NAME, "prefix", max_keys=3)
assert len(keys) == 10
| 27.333333 | 69 | 0.647154 | from botocore.exceptions import ClientError
from s3_tools import list_objects
from tests.unit.conftest import (
create_bucket,
BUCKET_NAME,
FILENAME
)
class TestList:
def test_list_nonexisting_bucket(self, s3_client):
try:
list_objects(BUCKET_NAME)
except ClientError as e:
error = e.response["Error"]["Code"]
assert error == "NoSuchBucket"
def test_list_empty_bucket(self, s3_client):
with create_bucket(s3_client, BUCKET_NAME):
keys = list_objects(BUCKET_NAME, "prefix")
assert len(keys) == 0
def test_list_bucket(self, s3_client):
lst = [(f"prefix/mock_{i}.csv", FILENAME) for i in range(1)]
with create_bucket(s3_client, BUCKET_NAME, keys_paths=lst):
keys = list_objects(BUCKET_NAME, "prefix")
assert len(keys) == 1
assert keys[0] == lst[0][0]
def test_list_bucket_with_pagination(self, s3_client):
lst = [(f"prefix/mock_{i}.csv", FILENAME) for i in range(10)]
with create_bucket(s3_client, BUCKET_NAME, keys_paths=lst):
keys = list_objects(BUCKET_NAME, "prefix", max_keys=3)
assert len(keys) == 10
| true | true |
1c2ea307dfc71029b6d922a7886d6cfe21a1e7e4 | 5,215 | py | Python | tests/common/test_run/ascend/matmul_addn_transdata_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | tests/common/test_run/ascend/matmul_addn_transdata_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | tests/common/test_run/ascend/matmul_addn_transdata_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg.tvm
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math.ascend import MatMul
from tests.common.test_run.ascend.matmul_run import *
from akg.ops.math import Addn
from akg.ops.math import Add
def matmul_addn_transdata(x, y, adds, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False,
transpose_y=False, attrs={}, target='cce'):
matmul_res, attrs_mat = MatMul(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs=attrs)
addn_res = Addn(adds, target=target)
res = Add(matmul_res, addn_res, target=target)
if out_format == 'zN':
n1, m1, m0, n0 = matmul_res.shape[-4:]
new_shape = matmul_res.shape[:-4] + [m1 * m0, n1 * n0]
elif out_format == 'zZ':
m1, n1, m0, n0 = matmul_res.shape[-4:]
new_shape = matmul_res.shape[:-4] + [m1 * m0, n1 * n0]
func = akg.tvm.get_global_func("TransData")
res = func([res], {"src_format" : "FRACTAL_NZ", "dst_format" : "DefaultFormat", "output_shape": new_shape})
return res, attrs_mat
def matmul_addn_transdata_compile(shape_x, shape_y, bias, add_n, left_format, right_format, output_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs, tuning=False):
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias,
left_format, right_format, output_format)
addn_shapes = []
for i in range(add_n):
addn_shapes.append(out_shape)
input_shapes = [shape_xx, shape_yy, addn_shapes, bias_shape]
input_types = [dtype, dtype, out_dtype, bias_dtype]
has_bias = False
if bias == 1:
has_bias = True
op_attrs = [out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]
if has_bias == False:
input_shapes = [shape_xx, shape_yy, addn_shapes]
input_types = [dtype, dtype, out_dtype]
op_attrs = [None, out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]
return utils.op_build_test(matmul_addn_transdata, input_shapes, input_types, op_attrs, kernel_name, attrs=attrs, tuning=tuning)
def matmul_addn_transdata_execute(shape_x, shape_y, bias, add_n, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs={}):
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)
mod = matmul_addn_transdata_compile(shape_x, shape_y, bias, add_n, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs=attrs)
# Generate data
m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, bias_dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format)
inputs = []
mod_data = [m_x, m_y]
for i in range(add_n):
input = random_gaussian(out_shape, miu=1, sigma=0.1).astype(out_dtype)
inputs.append(input)
mod_data.append(input)
bench_mark = np.add(np.sum(inputs, axis=0), bench_mark)
transpose_axis = []
new_shape = []
out_shape = list(out_shape)
if out_format == 'zN':
n1, m1, m0, n0 = out_shape[-4:]
new_shape = out_shape[:-4] + [m1 * m0, n1 * n0]
transpose_axis = [0, 1+1, 2+1, 0+1, 3+1]
elif out_format == 'zZ':
m1, n1, m0, n0 = out_shape[-4:]
new_shape = out_shape[:-4] + [m1 * m0, n1 * n0]
transpose_axis = [0, 0+1, 2+1, 1+1, 3+1]
bench_mark = bench_mark.transpose(transpose_axis)
bench_mark = np.reshape(bench_mark,new_shape)
# mod launch
output = np.full(bench_mark.shape, np.nan, out_dtype)
if bias == 0:
mod_data.append(output)
output = utils.mod_launch(mod, mod_data, expect=bench_mark)
elif bias == 1:
mod_data.append(bias_data)
mod_data.append(output)
output = utils.mod_launch(mod, mod_data, expect=bench_mark)
# compare result
rtol, atol = get_rtol_atol("matmul", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
return (m_x, m_y), output, bench_mark, compare_result
| 48.287037 | 185 | 0.671333 |
import akg.tvm
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math.ascend import MatMul
from tests.common.test_run.ascend.matmul_run import *
from akg.ops.math import Addn
from akg.ops.math import Add
def matmul_addn_transdata(x, y, adds, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False,
transpose_y=False, attrs={}, target='cce'):
matmul_res, attrs_mat = MatMul(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs=attrs)
addn_res = Addn(adds, target=target)
res = Add(matmul_res, addn_res, target=target)
if out_format == 'zN':
n1, m1, m0, n0 = matmul_res.shape[-4:]
new_shape = matmul_res.shape[:-4] + [m1 * m0, n1 * n0]
elif out_format == 'zZ':
m1, n1, m0, n0 = matmul_res.shape[-4:]
new_shape = matmul_res.shape[:-4] + [m1 * m0, n1 * n0]
func = akg.tvm.get_global_func("TransData")
res = func([res], {"src_format" : "FRACTAL_NZ", "dst_format" : "DefaultFormat", "output_shape": new_shape})
return res, attrs_mat
def matmul_addn_transdata_compile(shape_x, shape_y, bias, add_n, left_format, right_format, output_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs, tuning=False):
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias,
left_format, right_format, output_format)
addn_shapes = []
for i in range(add_n):
addn_shapes.append(out_shape)
input_shapes = [shape_xx, shape_yy, addn_shapes, bias_shape]
input_types = [dtype, dtype, out_dtype, bias_dtype]
has_bias = False
if bias == 1:
has_bias = True
op_attrs = [out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]
if has_bias == False:
input_shapes = [shape_xx, shape_yy, addn_shapes]
input_types = [dtype, dtype, out_dtype]
op_attrs = [None, out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]
return utils.op_build_test(matmul_addn_transdata, input_shapes, input_types, op_attrs, kernel_name, attrs=attrs, tuning=tuning)
def matmul_addn_transdata_execute(shape_x, shape_y, bias, add_n, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs={}):
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)
mod = matmul_addn_transdata_compile(shape_x, shape_y, bias, add_n, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs=attrs)
m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, bias_dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format)
inputs = []
mod_data = [m_x, m_y]
for i in range(add_n):
input = random_gaussian(out_shape, miu=1, sigma=0.1).astype(out_dtype)
inputs.append(input)
mod_data.append(input)
bench_mark = np.add(np.sum(inputs, axis=0), bench_mark)
transpose_axis = []
new_shape = []
out_shape = list(out_shape)
if out_format == 'zN':
n1, m1, m0, n0 = out_shape[-4:]
new_shape = out_shape[:-4] + [m1 * m0, n1 * n0]
transpose_axis = [0, 1+1, 2+1, 0+1, 3+1]
elif out_format == 'zZ':
m1, n1, m0, n0 = out_shape[-4:]
new_shape = out_shape[:-4] + [m1 * m0, n1 * n0]
transpose_axis = [0, 0+1, 2+1, 1+1, 3+1]
bench_mark = bench_mark.transpose(transpose_axis)
bench_mark = np.reshape(bench_mark,new_shape)
output = np.full(bench_mark.shape, np.nan, out_dtype)
if bias == 0:
mod_data.append(output)
output = utils.mod_launch(mod, mod_data, expect=bench_mark)
elif bias == 1:
mod_data.append(bias_data)
mod_data.append(output)
output = utils.mod_launch(mod, mod_data, expect=bench_mark)
rtol, atol = get_rtol_atol("matmul", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
return (m_x, m_y), output, bench_mark, compare_result
| true | true |
1c2ea46aad078a13c077e1ce5e6d9d9417d6e2d3 | 815 | py | Python | gan/PyTorch-GAN-Mnist/test.py | hadleyhzy34/GANs-practice | 37d1dcf4e4b492e6d070b0ba72f320913af9b17a | [
"MIT"
] | null | null | null | gan/PyTorch-GAN-Mnist/test.py | hadleyhzy34/GANs-practice | 37d1dcf4e4b492e6d070b0ba72f320913af9b17a | [
"MIT"
] | null | null | null | gan/PyTorch-GAN-Mnist/test.py | hadleyhzy34/GANs-practice | 37d1dcf4e4b492e6d070b0ba72f320913af9b17a | [
"MIT"
] | null | null | null | import torch
from torchvision import transforms
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
def show_images(images):
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
for index, image in enumerate(images):
plt.subplot(sqrtn, sqrtn, index+1)
plt.imshow(image.reshape(28, 28))
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('GPU State:', device)
# Model
G = torch.load('Generator_epoch_200.pth')
G.eval()
# Generator
noise = (torch.rand(16, 128)-0.5) / 0.5
noise = noise.to(device)
fake_image = G(noise)
imgs_numpy = (fake_image.data.cpu().numpy()+1.0)/2.0
show_images(imgs_numpy)
plt.show()
| 21.447368 | 57 | 0.70184 | import torch
from torchvision import transforms
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
def show_images(images):
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
for index, image in enumerate(images):
plt.subplot(sqrtn, sqrtn, index+1)
plt.imshow(image.reshape(28, 28))
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('GPU State:', device)
G = torch.load('Generator_epoch_200.pth')
G.eval()
noise = (torch.rand(16, 128)-0.5) / 0.5
noise = noise.to(device)
fake_image = G(noise)
imgs_numpy = (fake_image.data.cpu().numpy()+1.0)/2.0
show_images(imgs_numpy)
plt.show()
| true | true |
1c2ea46c2dbae491088472dec33c9b4319f71f78 | 8,389 | py | Python | src/m2_functions.py | ElliotBoutell/ObjectsFunctionsAndMethods | 2f022fc7dbca5a5ad3b90cdb280e59b1ef28dd93 | [
"MIT"
] | null | null | null | src/m2_functions.py | ElliotBoutell/ObjectsFunctionsAndMethods | 2f022fc7dbca5a5ad3b90cdb280e59b1ef28dd93 | [
"MIT"
] | null | null | null | src/m2_functions.py | ElliotBoutell/ObjectsFunctionsAndMethods | 2f022fc7dbca5a5ad3b90cdb280e59b1ef28dd93 | [
"MIT"
] | null | null | null | """
Practice DEFINING and CALLING
FUNCTIONS
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Elliot Boutell.
"""
########################################################################
#
# Done: 1. PUT YOUR NAME IN THE ABOVE LINE and...
#
# Allow this file to use the rosegraphics.py file by marking the src
# directory as a "Sources Root". Do that by right clicking on the src folder,
# then selector Mark Directory As --> Sources Root
#
# Expand the import lines below and watch the red underlines disappear as you do that step.
# You will do that once for every project that uses rosegraphics so get used to it. :)
#
# Then run this module and look for more TO DO's later in the file.
########################################################################
import rosegraphics as rg
import random
def main():
"""
Makes a TurtleWindow,
calls the other functions in this module to demo them, and
waits for the user to click anywhere in the window to close it.
"""
# A TurtleWindow works "behind the scenes" to enable Turtle movement
window = rg.TurtleWindow()
turtle1()
turtle4()
turtle5()
turtle3()
turtle2()
turtle2()
window.close_on_mouse_click()
def turtle1():
"""
Constructs a square SimpleTurtle.
Makes that SimpleTurtle draw a yellow-filled circle.
"""
ada = rg.SimpleTurtle('square')
ada.pen = rg.Pen('aquamarine', 30)
ada.paint_bucket = rg.PaintBucket('yellow')
ada.begin_fill()
ada.draw_circle(150)
ada.end_fill()
def turtle2():
"""
Constructs a triangle SimpleTurtle.
Makes that SimpleTurle go to a RANDOM point,
draws a cool shape, and return to where it started from.
"""
grace = rg.SimpleTurtle('triangle')
grace.pen = rg.Pen('blue', 15)
grace.paint_bucket = rg.PaintBucket('magenta')
# Keep track of where I am, to go back to it at the end.
# Then choose a RANDOM starting point for the motion in here.
i_began_here = rg.Point(grace.x_cor(), grace.y_cor())
i_am_going_here = rg.Point(random.randrange(-500, 500),
random.randrange(-300, 0))
grace.pen_up()
grace.go_to(i_am_going_here)
grace.pen_down()
# Do the motion.
grace.left(90)
grace.forward(200)
grace.begin_fill()
grace.draw_circle(25)
grace.end_fill()
# Go back to where I was when this function began its run.
grace.go_to(i_began_here)
def turtle3():
"""
Constructs a default SimpleTurtle.
Makes that SimpleTurtle go forward 300 units
and then draw a black-filled circle.
"""
maja = rg.SimpleTurtle()
maja.pen = rg.Pen('black', 10)
maja.forward(300)
maja.begin_fill()
maja.draw_circle(50)
maja.end_fill()
def turtle4():
caleb = rg.SimpleTurtle()
caleb.pen = rg.Pen('purple', 3)
caleb.backward(100)
caleb.draw_circle(40)
def turtle5():
connor = rg.SimpleTurtle()
connor.pen = rg.Pen('orange', 5)
connor.right(180)
connor.forward(100)
connor.draw_square(20)
gabe = rg.SimpleTurtle()
gabe.pen = rg.Pen('blue', 8)
gabe.left(45)
gabe.draw_circle(6)
gabe.forward(6)
########################################################################
#
# Done: 2.
# READ the code above. Be sure you understand:
# -- How many functions are defined above?
# (Answer: 4)
# -- For each function definition:
# -- Where does that function definition begin?
# Where does it end?
# -- How many times does main call the turtle1 function?
# (Answer: 1)
# -- How many times does main call the turtle2 function?
# (Hint: the answer is NOT 1.)
# -- What line of code calls the main function?
# (Answer: look at the LAST line of this module, far below.)
#
# ** ASK QUESTIONS if you are uncertain. **
#
# RELATE what is DRAWN to the CODE above. Be sure you understand:
# -- WHEN does the code in main run?
# -- WHEN does the code in turtle1 run?
# the code in turtle2 run?
# the code in turtle3 run?
# -- For each of the above, WHY does that code run when it does?
#
# ** ASK QUESTIONS if you are uncertain. **
#
# When you believe you understand the answers
# to all of the above questions, change the above TO DO to DONE.
#
########################################################################
########################################################################
#
# Done: 3.
# Define another function,
# immediately below the end of the definition of turtle3 above.
# Name your new function turtle4.
#
# The Python "pep8" coding standard says to leave exactly 2 blank
# lines between function definitions, so be sure to do so.
#
# Your new function should:
# 1. Define a SimpleTurtle (as turtle3 as other functions did).
# 2. Set your SimpleTurtle's
# pen
# to a new rg.Pen with a color and thickness of your own choosing.
# See the COLORS.txt file in this project for a list of legal color-names.
# 3. Make your SimpleTurtle move around a bit.
#
# ----------------------------------------------------------------------
# ** IMPORTANT: **
# ** Nothing fancy is required. **
# ** Save fancy stuff for exercises later today. **
# ----------------------------------------------------------------------
#
# BTW, if you see a RED underline, that means that there is
# a SYNTAX (notation) error at that line or elsewhere.
# Get help as needed to fix any such errors.
#
########################################################################
########################################################################
#
# Done: 4.
# Add a line to main that CALLS your new function immediately
# AFTER main calls turtle1. So:
# -- the SimpleTurtle from turtle1 should move,
# -- then YOUR SimpleTurtle should move,
# -- then the other 3 SimpleTurtles should move.
#
# Run this module. Check that there is another SimpleTurtle (yours)
# that uses the pen you chose and moves around a bit.
# If your code has errors (shows RED in the Console window)
# or does not do what it should, get help as needed to fix it.
#
########################################################################
########################################################################
#
# Done: 5.
# The previous two TODOs IMPLEMENTED a function (TO DO 3)
# and TESTED that function (TO DO 4).
#
# Now implement AND test one more function, defining it immediately
# below the definition of your turtle4 function.
# Name your new function turtle5.
#
# The Python "pep8" coding standard says to leave exactly 2 blank
# lines between function definitions, so be sure to do so.
#
# Your new function should define TWO new SimpleTurtles,
# set their characteristics (i.e., instance variables) as you choose,
# and make each move a bit.
#
# ----------------------------------------------------------------------
# ** IMPORTANT: **
# ** Nothing fancy is required. **
# ** Save fancy stuff for exercises later today. **
# ----------------------------------------------------------------------
#
# Get help as needed on this (and every!) exercise!
#
# As always COMMIT and Push your work as often as you want, but for sure after
# you have tested it and believe that it is correct.
#
# Reminder of those steps...
# COMMIT your work by selecting VCS from the menu bar, then select Commit Changes
# Make sure only the files you want to commit are checked and optionally
# add a quick Commit message to describe your work. Then hover over the
# Commit button and select Commit and Push. Commit saves the work to
# your computer. "and Push" saves a copy of your work up into your Github
# repository (saving to the cloud is a better way to permanently safe work).
#
########################################################################
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 34.101626 | 93 | 0.561092 |
module, far below.)
#
# ** ASK QUESTIONS if you are uncertain. **
#
# RELATE what is DRAWN to the CODE above. Be sure you understand:
# -- WHEN does the code in main run?
# -- WHEN does the code in turtle1 run?
# the code in turtle2 run?
# the code in turtle3 run?
# -- For each of the above, WHY does that code run when it does?
#
# ** ASK QUESTIONS if you are uncertain. **
#
# When you believe you understand the answers
# to all of the above questions, change the above TO DO to DONE.
#
########################################################################
########################################################################
#
# Done: 3.
# Define another function,
# immediately below the end of the definition of turtle3 above.
# Name your new function turtle4.
#
# The Python "pep8" coding standard says to leave exactly 2 blank
# lines between function definitions, so be sure to do so.
#
# Your new function should:
# 1. Define a SimpleTurtle (as turtle3 as other functions did).
# 2. Set your SimpleTurtle's
| true | true |
1c2ea51069614377d2b53c72c7b82fa4c473af18 | 3,559 | py | Python | anuvaad-etl/anuvaad-extractor/aligner/etl-aligner/kafkawrapper/alignmentwflowconsumer.py | ManavTriesStuff/anuvaad | 6993e3ac78818c171c173ccf8acf962ff57856a4 | [
"MIT"
] | 15 | 2021-01-08T08:42:30.000Z | 2022-03-12T17:52:15.000Z | anuvaad-etl/anuvaad-extractor/aligner/etl-aligner/kafkawrapper/alignmentwflowconsumer.py | ManavTriesStuff/anuvaad | 6993e3ac78818c171c173ccf8acf962ff57856a4 | [
"MIT"
] | 16 | 2021-01-21T01:38:51.000Z | 2022-01-20T08:59:52.000Z | anuvaad-etl/anuvaad-extractor/aligner/etl-aligner/kafkawrapper/alignmentwflowconsumer.py | ManavTriesStuff/anuvaad | 6993e3ac78818c171c173ccf8acf962ff57856a4 | [
"MIT"
] | 25 | 2020-08-26T11:25:38.000Z | 2022-03-29T04:40:21.000Z | import json
import logging
import random
import string
import threading
from kafka import KafkaConsumer, TopicPartition
from service.alignmentservice import AlignmentService
from utilities.alignmentutils import AlignmentUtils
from logging.config import dictConfig
from configs.alignerconfig import kafka_bootstrap_server_host
from configs.alignerconfig import align_job_consumer_grp
from configs.alignerconfig import anu_dp_wf_aligner_in_topic
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_exception
log = logging.getLogger('file')
class WflowConsumer:
def __init__(self):
pass
# Method to instantiate the kafka consumer
def instantiate(self, topics):
consumer = KafkaConsumer(*topics,
bootstrap_servers=list(str(kafka_bootstrap_server_host).split(",")),
api_version=(1, 0, 0),
group_id=align_job_consumer_grp,
auto_offset_reset='latest',
enable_auto_commit=True,
value_deserializer=lambda x: self.handle_json(x))
return consumer
# Method to read and process the requests from the kafka queue
def consume(self):
topics = [anu_dp_wf_aligner_in_topic]
consumer = self.instantiate(topics)
service = AlignmentService()
util = AlignmentUtils()
rand_str = ''.join(random.choice(string.ascii_letters) for i in range(4))
prefix = "Align-WFM-Consumer(" + rand_str + ")"
log_info(prefix + " running.......", None)
while True:
#thread_count = 0
for msg in consumer:
data = {}
try:
data = msg.value
if data:
log_info(prefix + " | Received on Topic: " + msg.topic + " | Partition: " + str(msg.partition), data)
service.wf_process(data)
break
except Exception as e:
log_exception("Exception while consuming: " + str(e), data, e)
util.error_handler("ALIGNER_CONSUMER_ERROR", "Exception while consuming", data, True)
break
# Method that provides a deserialiser for the kafka record.
def handle_json(self, x):
try:
return json.loads(x.decode('utf-8'))
except Exception as e:
log_exception("handle_json", "Exception while deserialising: ", None, e)
return {}
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
| 35.237624 | 125 | 0.540882 | import json
import logging
import random
import string
import threading
from kafka import KafkaConsumer, TopicPartition
from service.alignmentservice import AlignmentService
from utilities.alignmentutils import AlignmentUtils
from logging.config import dictConfig
from configs.alignerconfig import kafka_bootstrap_server_host
from configs.alignerconfig import align_job_consumer_grp
from configs.alignerconfig import anu_dp_wf_aligner_in_topic
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_exception
log = logging.getLogger('file')
class WflowConsumer:
def __init__(self):
pass
def instantiate(self, topics):
consumer = KafkaConsumer(*topics,
bootstrap_servers=list(str(kafka_bootstrap_server_host).split(",")),
api_version=(1, 0, 0),
group_id=align_job_consumer_grp,
auto_offset_reset='latest',
enable_auto_commit=True,
value_deserializer=lambda x: self.handle_json(x))
return consumer
def consume(self):
topics = [anu_dp_wf_aligner_in_topic]
consumer = self.instantiate(topics)
service = AlignmentService()
util = AlignmentUtils()
rand_str = ''.join(random.choice(string.ascii_letters) for i in range(4))
prefix = "Align-WFM-Consumer(" + rand_str + ")"
log_info(prefix + " running.......", None)
while True:
for msg in consumer:
data = {}
try:
data = msg.value
if data:
log_info(prefix + " | Received on Topic: " + msg.topic + " | Partition: " + str(msg.partition), data)
service.wf_process(data)
break
except Exception as e:
log_exception("Exception while consuming: " + str(e), data, e)
util.error_handler("ALIGNER_CONSUMER_ERROR", "Exception while consuming", data, True)
break
def handle_json(self, x):
try:
return json.loads(x.decode('utf-8'))
except Exception as e:
log_exception("handle_json", "Exception while deserialising: ", None, e)
return {}
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
| true | true |
1c2ea52c32488d1ead6f2d2517e097d076dc1c24 | 13,804 | py | Python | circuit.py | Diimu/circuitsimulator | 3a4aeaaa89b5e9bba1598736140e32b3ca1e0da4 | [
"MIT"
] | null | null | null | circuit.py | Diimu/circuitsimulator | 3a4aeaaa89b5e9bba1598736140e32b3ca1e0da4 | [
"MIT"
] | 3 | 2019-09-08T18:34:27.000Z | 2019-09-08T21:51:18.000Z | circuit.py | Diimu/circuitsimulator | 3a4aeaaa89b5e9bba1598736140e32b3ca1e0da4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Class "Circuit" defines simple one port object/circuit having
frequency (single point or array), impedance, and possibly a list
of components that constitute the circuit.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import skrf
class Circuit:
def __init__(self, f, Z, Z0=50, draw_smith=True):
self.Z = Z
self.f = f
self.Z0 = Z0
self.draw_smith=draw_smith
self.components = [('start', Z)]
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
skrf.plotting.plot_smith(self.refl(self.Z), marker='o', color='k', linestyle=None, x_label='',y_label='', title='Smith chart, matching network', label='start')
#self.components is an array of tuples containing the history of the circuit,
#i.e., all components attached to the circuit
def __str__(self):
if not isinstance(self.f, np.ndarray):
freq, multi = find_si_prefix(self.f)
ret = "{:.2f}+j{:2f} ohm at {:.2f} {:s}Hz:\n".format(self.Z.real, self.Z.imag, freq, multi)
else:
return "not implemented for freq range"
for component in self.components:
prefix = find_si_prefix(component[1], False)
ct = '' #component type
cc = '' #component connection
unit = ''
n = component[0]
if n.find('cap') >= 0:
ct = 'capacitor'
unit = 'F'
elif n.find('ind') >= 0:
ct = 'inductor'
unit = 'H'
elif n.find('line') >= 0:
ct = 'line'
unit = '\pi'
if n.find('ser') >= 0:
cc = 'Series'
elif n.find('par') >= 0:
cc = 'Parallel'
elif n.find('start') >= 0:
cc = 'Start'
ret += '{:s} {:s}: {:.2f} {:s}{:s}\n'.format(cc, ct, prefix[0], prefix[1], unit)
return ret
def cap(self, C):
'''
impedance of a capacitor at frequency of this circuit
'''
return cap(self.f, C)
def ind(self, L):
return ind(self.f, L)
def refl(self, Z=None):
'''
returns the reflection coefficient of the present circuit
'''
if Z is None:
return refl(self.Z,self.Z0)
else:
return refl(Z, self.Z0)
def ser(self, Z):
self.components.append(('ser',Z))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = np.logspace(np.log10(Z/1000), np.log10(Z), 100)
#impedance of a capacitance array, starting from very large cap
#(large series capacitance == small change in impedance)
tmp = ser(self.Z, tmp)
#the array transformed into impedance
scaled_value = find_si_prefix(Z)
label = 'ser {:2g} {:s}$\Omega$'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z += Z #after plotting, add series capacitance to the current impedance
def par(self, Z):
self.components.append(('par',Z))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = np.logspace(np.log10(Z*1000), np.log10(Z), 100)
#impedance of a capacitance array, starting from very large cap
#(large series capacitance == small change in impedance)
tmp = par(self.Z, tmp)
#the array transformed into impedance
scaled_value = find_si_prefix(Z)
label = 'par {:2g} {:s}$\Omega$'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(Z, self.Z) #after plotting, add series capacitance to the current impedance
def sercap(self, C):
'''
Set a capacitor in series with the present circuit
'''
self.components.append(('sercap',C))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = self.cap(np.logspace(np.log10(C*1000), np.log10(C), 100))
#impedance of a capacitance array, starting from very large cap
#(large series capacitance == small change in impedance)
tmp = ser(self.Z, tmp)
#the array transformed into impedance
scaled_value = find_si_prefix(C)
label = 'ser {:2g} {:s}F'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = ser(self.Z, self.cap(C)) #after plotting, add series capacitance to the current impedance
def serind(self, L):
'''
Set an inductor in series with the present circuit
'''
self.components.append(('serind',L))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = self.ind(np.logspace(np.log10(L/1000), np.log10(L), 100))
#impedance of a capacitance array, starting from very small inductance
tmp = ser(self.Z, tmp)
#the array transformed into impedance
scaled_value = find_si_prefix(L)
label = 'ser {:2g} {:s}H'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = ser(self.Z, self.ind(L)) #after plotting, add series inductance to the current impedance
def parcap(self, C):
'''
Set a capacitor parallel to the present circuit
'''
self.components.append(('parcap',C))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = self.cap(np.logspace(np.log10(C/1000), np.log10(C), 100))
#impedance of a capacitance array, starting from very small cap
tmp = par(self.Z, tmp)
#the array transformed into impedance
scaled_value = find_si_prefix(C)
label = 'ser {:2g} {:s}F'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(self.Z, self.cap(C)) #after plotting, add series capacitance to the current impedance
def parind(self, L):
'''
Set an inductor parallel to the present circuit
'''
self.components.append(('parind',L))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = self.ind(np.logspace(np.log10(L*1000), np.log10(L), 100))
#impedance of a capacitance array, starting from very large inductance
#(large series capacitance == small change in impedance)
tmp = par(self.Z, tmp)
#the array transformed into impedance
scaled_value = find_si_prefix(L)
label = 'par {:2g} {:s}H'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(self.Z, self.ind(L)) #after plotting, add series inductance to the current impedance
def serline(self, kl, Z0=None):
'''
perform impedance as seen through a transmission line
'''
if Z0==None:
Z0=self.Z0
self.components.append(('serline',kl, Z0))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = line(self.Z, np.linspace(0, kl, 100), Z0)
label = 'line {:2g}'.format(kl)
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = line(self.Z, kl, Z0)
def parline(self, kl, Zl=np.inf, Z0=None):
'''
add a transmission line parallel to the current impedance (defaults to open self.Z0 ohm line)
'''
if Z0==None:
Z0=self.Z0
self.components.append(('parline',kl, Zl, Z0))
if np.size(self.f)==1 and self.draw_smith: #single frequency -> plot transition on smith chart
tmp = par(self.Z, line(Zl, np.linspace(0, kl, 100), Z0))
label = 'par line {:2g}'.format(kl)
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(self.Z0, line(Zl, kl, Z0))
def db(self, noextrastuff=False):
'''
For a point frequency return reflection coefficient, for a frequency range
plot the response curve. noextrastuff means the labels and grids
'''
if np.size(self.f)==1:
return 20*np.log10(abs(self.refl()))
elif noextrastuff:
plt.plot(self.f, 20*np.log10(abs(self.refl())))
else:
plt.figure()
plt.plot(self.f, 20*np.log10(np.abs(self.refl())))
plt.ylabel('Reflection coefficient, dB')
plt.xlabel('Frequency, Hz')
plt.grid()
def smith(self, annotations=True,smith_r=1, chart_type='z', x_label='',
y_label='', title='Smith chart, frequency', show_legend=True,
axis='equal', ax=None, force_chart = False, *args, **kwargs):
'''
plots the current mathcing network on the smith chart as the function of frequency
'''
if np.size(self.f)>1:
plt.figure()
skrf.plotting.plot_smith(self.refl(), smith_r=smith_r, chart_type=chart_type, x_label=x_label,
y_label=y_label, title=title, show_legend=show_legend,
axis=axis, ax=ax, force_chart = force_chart, *args, **kwargs)
if annotations:
xy=(np.real(self.refl()[0]),np.imag(self.refl()[0]))
plt.annotate('{:.2e}'.format(self.f[0]) , xy=xy,xycoords='data', xytext=(xy[0]/ np.abs(xy[0]), xy[1]/ np.abs(xy[1])), textcoords='data', arrowprops=dict(arrowstyle="->")).draggable()
xy=(np.real(self.refl()[-1]),np.imag(self.refl()[-1]))
plt.annotate('{:.2e}'.format(self.f[-1]) , xy=xy,xycoords='data', xytext=(xy[0]/ np.abs(xy[0]), xy[1]/ np.abs(xy[1])), textcoords='data', arrowprops=dict(arrowstyle="->")).draggable()
ind = np.argmin(np.abs(self.refl()))
xy=(np.real(self.refl()[ind]),np.imag(self.refl()[ind]))
plt.annotate('{:.2e}\n{:.1f} dB'.format(self.f[ind], 20*np.log10(np.abs(self.refl()[ind]))) , xy=xy,xycoords='data', xytext=(xy[0]/ np.abs(xy[0])+0.2, xy[1]/ np.abs(xy[1])-0.2), textcoords='data', arrowprops=dict(arrowstyle="->")).draggable()
'''
After this point the file defines basic equations used in Circuit class
'''
def cap(f, C):
'''
impedance of a capacitor
'''
if isinstance(C, np.ndarray) or isinstance(f, np.ndarray):
return 1/(1j*2*np.pi*f*C)
elif np.iscomplex(C) or f<0 or C<0:
raise ValueError #bullshit numbers given
else:
return 1/(1j*2*np.pi*f*C)
def ind(f, L):
'''
impedance of a inductor
'''
#TODO: check that input is valid.
return 1j*2*np.pi*f*L
def ser(Z1, Z2, Z3=0, Z4=0, Z5=0, Z6=0, Z7=0, Z8=0, Z9=0, Z10=0):
'''
impedance of a series of compoents
'''
return Z1+Z2+Z3+Z4+Z5+Z6+Z7+Z8+Z9+Z10
def par(Z1, Z2, Z3=np.inf, Z4=np.inf, Z5=np.inf, Z6=np.inf, Z7=np.inf, Z8=np.inf, Z9=np.inf, Z10=np.inf):
'''
impedance of parallel compoents
'''
return 1/(1/Z1+1/Z2+1/Z3+1/Z4+1/Z5+1/Z6+1/Z7+1/Z8+1/Z9+1/Z10)
def refl(Z, Z0=50):
if isinstance(Z, np.ndarray):
return (Z-Z0)/(Z+Z0)
elif Z==np.inf and Z0<np.inf:
return 1
else:
return (Z-Z0)/(Z+Z0)
def find_si_prefix(x, use_latex = True):
'''
return (numeric value , si-prefix)
>>> find_si_prefix(5000)
(5.0, 'k')
'''
if use_latex:
prefixes = ['f', 'p', 'n', '$\mu$', 'm', '', 'k','M','G']
else:
prefixes = ['f', 'p', 'n', 'u', 'm', '', 'k','M','G']
multipliers = np.asarray([1e-15,1e-12, 1e-9, 1e-6,1e-3,1,1e3,1e6,1e9])
for n, k in enumerate(x/multipliers):
if 0.1<k<=(100 + 1e-10):
break #loop the list until we find a prefix that sets the k to be in nice range
return (k, prefixes[n])
def line(Z, kl, Z0=50):
#from scipy.special import cotdg
if Z != np.inf:
Z = Z0 * (Z+1j*Z0*np.tan(kl))/(Z0 + 1j* Z*np.tan(kl))
else:
np.seterr(divide='ignore') #lazy solution to situation kl=0 == divide by error
Z = -1j*Z0*np.true_divide(1,np.tan(kl))# if np.tan(kl)==0 else 1j*np.inf
#Z = -1j*Z0*np.cos(kl)/np.sin(kl)
np.seterr(divide='warn')
return Z
def db(Z, Z0=50):
if Z==Z0: #avoid log(0)
return -np.inf
else:
return 20*np.log10(abs(refl(Z, Z0)))
if __name__ == '__main__':
plt.ion()
c = Circuit(64e6, 30)
c.sercap(30e-12)
c.parind(100e-9)
| 41.083333 | 262 | 0.562808 |
import numpy as np
import scipy
import matplotlib.pyplot as plt
import skrf
class Circuit:
def __init__(self, f, Z, Z0=50, draw_smith=True):
self.Z = Z
self.f = f
self.Z0 = Z0
self.draw_smith=draw_smith
self.components = [('start', Z)]
if np.size(self.f)==1 and self.draw_smith:
skrf.plotting.plot_smith(self.refl(self.Z), marker='o', color='k', linestyle=None, x_label='',y_label='', title='Smith chart, matching network', label='start')
def __str__(self):
if not isinstance(self.f, np.ndarray):
freq, multi = find_si_prefix(self.f)
ret = "{:.2f}+j{:2f} ohm at {:.2f} {:s}Hz:\n".format(self.Z.real, self.Z.imag, freq, multi)
else:
return "not implemented for freq range"
for component in self.components:
prefix = find_si_prefix(component[1], False)
ct = ''
cc = ''
unit = ''
n = component[0]
if n.find('cap') >= 0:
ct = 'capacitor'
unit = 'F'
elif n.find('ind') >= 0:
ct = 'inductor'
unit = 'H'
elif n.find('line') >= 0:
ct = 'line'
unit = '\pi'
if n.find('ser') >= 0:
cc = 'Series'
elif n.find('par') >= 0:
cc = 'Parallel'
elif n.find('start') >= 0:
cc = 'Start'
ret += '{:s} {:s}: {:.2f} {:s}{:s}\n'.format(cc, ct, prefix[0], prefix[1], unit)
return ret
def cap(self, C):
return cap(self.f, C)
def ind(self, L):
return ind(self.f, L)
def refl(self, Z=None):
if Z is None:
return refl(self.Z,self.Z0)
else:
return refl(Z, self.Z0)
def ser(self, Z):
self.components.append(('ser',Z))
if np.size(self.f)==1 and self.draw_smith:
tmp = np.logspace(np.log10(Z/1000), np.log10(Z), 100)
tmp = ser(self.Z, tmp)
scaled_value = find_si_prefix(Z)
label = 'ser {:2g} {:s}$\Omega$'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z += Z
def par(self, Z):
self.components.append(('par',Z))
if np.size(self.f)==1 and self.draw_smith:
tmp = np.logspace(np.log10(Z*1000), np.log10(Z), 100)
tmp = par(self.Z, tmp)
scaled_value = find_si_prefix(Z)
label = 'par {:2g} {:s}$\Omega$'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(Z, self.Z)
def sercap(self, C):
self.components.append(('sercap',C))
if np.size(self.f)==1 and self.draw_smith:
tmp = self.cap(np.logspace(np.log10(C*1000), np.log10(C), 100))
tmp = ser(self.Z, tmp)
scaled_value = find_si_prefix(C)
label = 'ser {:2g} {:s}F'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = ser(self.Z, self.cap(C))
def serind(self, L):
self.components.append(('serind',L))
if np.size(self.f)==1 and self.draw_smith:
tmp = self.ind(np.logspace(np.log10(L/1000), np.log10(L), 100))
tmp = ser(self.Z, tmp)
scaled_value = find_si_prefix(L)
label = 'ser {:2g} {:s}H'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = ser(self.Z, self.ind(L))
def parcap(self, C):
self.components.append(('parcap',C))
if np.size(self.f)==1 and self.draw_smith:
tmp = self.cap(np.logspace(np.log10(C/1000), np.log10(C), 100))
tmp = par(self.Z, tmp)
scaled_value = find_si_prefix(C)
label = 'ser {:2g} {:s}F'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(self.Z, self.cap(C))
def parind(self, L):
self.components.append(('parind',L))
if np.size(self.f)==1 and self.draw_smith:
tmp = self.ind(np.logspace(np.log10(L*1000), np.log10(L), 100))
tmp = par(self.Z, tmp)
scaled_value = find_si_prefix(L)
label = 'par {:2g} {:s}H'.format(scaled_value[0], scaled_value[1])
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(self.Z, self.ind(L))
def serline(self, kl, Z0=None):
if Z0==None:
Z0=self.Z0
self.components.append(('serline',kl, Z0))
if np.size(self.f)==1 and self.draw_smith:
tmp = line(self.Z, np.linspace(0, kl, 100), Z0)
label = 'line {:2g}'.format(kl)
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = line(self.Z, kl, Z0)
def parline(self, kl, Zl=np.inf, Z0=None):
if Z0==None:
Z0=self.Z0
self.components.append(('parline',kl, Zl, Z0))
if np.size(self.f)==1 and self.draw_smith:
tmp = par(self.Z, line(Zl, np.linspace(0, kl, 100), Z0))
label = 'par line {:2g}'.format(kl)
skrf.plotting.plot_smith(self.refl(tmp), x_label='',y_label='', title='Smith chart, matching network', label=label)
self.Z = par(self.Z0, line(Zl, kl, Z0))
def db(self, noextrastuff=False):
if np.size(self.f)==1:
return 20*np.log10(abs(self.refl()))
elif noextrastuff:
plt.plot(self.f, 20*np.log10(abs(self.refl())))
else:
plt.figure()
plt.plot(self.f, 20*np.log10(np.abs(self.refl())))
plt.ylabel('Reflection coefficient, dB')
plt.xlabel('Frequency, Hz')
plt.grid()
def smith(self, annotations=True,smith_r=1, chart_type='z', x_label='',
y_label='', title='Smith chart, frequency', show_legend=True,
axis='equal', ax=None, force_chart = False, *args, **kwargs):
if np.size(self.f)>1:
plt.figure()
skrf.plotting.plot_smith(self.refl(), smith_r=smith_r, chart_type=chart_type, x_label=x_label,
y_label=y_label, title=title, show_legend=show_legend,
axis=axis, ax=ax, force_chart = force_chart, *args, **kwargs)
if annotations:
xy=(np.real(self.refl()[0]),np.imag(self.refl()[0]))
plt.annotate('{:.2e}'.format(self.f[0]) , xy=xy,xycoords='data', xytext=(xy[0]/ np.abs(xy[0]), xy[1]/ np.abs(xy[1])), textcoords='data', arrowprops=dict(arrowstyle="->")).draggable()
xy=(np.real(self.refl()[-1]),np.imag(self.refl()[-1]))
plt.annotate('{:.2e}'.format(self.f[-1]) , xy=xy,xycoords='data', xytext=(xy[0]/ np.abs(xy[0]), xy[1]/ np.abs(xy[1])), textcoords='data', arrowprops=dict(arrowstyle="->")).draggable()
ind = np.argmin(np.abs(self.refl()))
xy=(np.real(self.refl()[ind]),np.imag(self.refl()[ind]))
plt.annotate('{:.2e}\n{:.1f} dB'.format(self.f[ind], 20*np.log10(np.abs(self.refl()[ind]))) , xy=xy,xycoords='data', xytext=(xy[0]/ np.abs(xy[0])+0.2, xy[1]/ np.abs(xy[1])-0.2), textcoords='data', arrowprops=dict(arrowstyle="->")).draggable()
def cap(f, C):
if isinstance(C, np.ndarray) or isinstance(f, np.ndarray):
return 1/(1j*2*np.pi*f*C)
elif np.iscomplex(C) or f<0 or C<0:
raise ValueError
else:
return 1/(1j*2*np.pi*f*C)
def ind(f, L):
return 1j*2*np.pi*f*L
def ser(Z1, Z2, Z3=0, Z4=0, Z5=0, Z6=0, Z7=0, Z8=0, Z9=0, Z10=0):
return Z1+Z2+Z3+Z4+Z5+Z6+Z7+Z8+Z9+Z10
def par(Z1, Z2, Z3=np.inf, Z4=np.inf, Z5=np.inf, Z6=np.inf, Z7=np.inf, Z8=np.inf, Z9=np.inf, Z10=np.inf):
return 1/(1/Z1+1/Z2+1/Z3+1/Z4+1/Z5+1/Z6+1/Z7+1/Z8+1/Z9+1/Z10)
def refl(Z, Z0=50):
if isinstance(Z, np.ndarray):
return (Z-Z0)/(Z+Z0)
elif Z==np.inf and Z0<np.inf:
return 1
else:
return (Z-Z0)/(Z+Z0)
def find_si_prefix(x, use_latex = True):
if use_latex:
prefixes = ['f', 'p', 'n', '$\mu$', 'm', '', 'k','M','G']
else:
prefixes = ['f', 'p', 'n', 'u', 'm', '', 'k','M','G']
multipliers = np.asarray([1e-15,1e-12, 1e-9, 1e-6,1e-3,1,1e3,1e6,1e9])
for n, k in enumerate(x/multipliers):
if 0.1<k<=(100 + 1e-10):
break
return (k, prefixes[n])
def line(Z, kl, Z0=50):
if Z != np.inf:
Z = Z0 * (Z+1j*Z0*np.tan(kl))/(Z0 + 1j* Z*np.tan(kl))
else:
np.seterr(divide='ignore')
Z = -1j*Z0*np.true_divide(1,np.tan(kl))
np.seterr(divide='warn')
return Z
def db(Z, Z0=50):
if Z==Z0:
return -np.inf
else:
return 20*np.log10(abs(refl(Z, Z0)))
if __name__ == '__main__':
plt.ion()
c = Circuit(64e6, 30)
c.sercap(30e-12)
c.parind(100e-9)
| true | true |
1c2ea563c1498911db332ccc6f834146e3a96988 | 105,906 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_network_watchers_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_network_watchers_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_network_watchers_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkWatchersOperations:
"""NetworkWatchersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkWatcher",
**kwargs: Any
) -> "_models.NetworkWatcher":
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher resource.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.NetworkWatcher
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkWatcher')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> "_models.NetworkWatcher":
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkWatcher":
"""Updates a network watcher tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters supplied to update network watcher tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkWatcherListResult"]:
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkWatcherListResult"]:
"""Gets all network watchers by subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'} # type: ignore
async def get_topology(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TopologyParameters",
**kwargs: Any
) -> "_models.Topology":
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the representation of topology.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TopologyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Topology, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.Topology
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topology"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.get_topology.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TopologyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topology', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'} # type: ignore
async def _verify_ip_flow_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.VerificationIPFlowParameters",
**kwargs: Any
) -> "_models.VerificationIPFlowResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VerificationIPFlowResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._verify_ip_flow_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_verify_ip_flow_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
async def begin_verify_ip_flow(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.VerificationIPFlowParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VerificationIPFlowResult"]:
"""Verify IP flow from the specified VM to a location given the currently configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.VerificationIPFlowParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VerificationIPFlowResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.VerificationIPFlowResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VerificationIPFlowResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
async def _get_next_hop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NextHopParameters",
**kwargs: Any
) -> "_models.NextHopResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NextHopResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_next_hop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NextHopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_next_hop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
async def begin_get_next_hop(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NextHopParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.NextHopResult"]:
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination endpoint.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.NextHopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NextHopResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.NextHopResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NextHopResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
async def _get_vm_security_rules_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.SecurityGroupViewParameters",
**kwargs: Any
) -> "_models.SecurityGroupViewResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityGroupViewResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_vm_security_rules_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vm_security_rules_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
async def begin_get_vm_security_rules(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.SecurityGroupViewParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SecurityGroupViewResult"]:
"""Gets the configured and effective security group rules on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the VM to check security groups for.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.SecurityGroupViewParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityGroupViewResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.SecurityGroupViewResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityGroupViewResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
async def _get_troubleshooting_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TroubleshootingParameters",
**kwargs: Any
) -> "_models.TroubleshootingResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
async def begin_get_troubleshooting(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TroubleshootingParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.TroubleshootingResult"]:
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to troubleshoot.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
async def _get_troubleshooting_result_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.QueryTroubleshootingParameters",
**kwargs: Any
) -> "_models.TroubleshootingResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_result_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_result_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
async def begin_get_troubleshooting_result(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.QueryTroubleshootingParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.TroubleshootingResult"]:
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to query the troubleshooting result.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.QueryTroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
async def _set_flow_log_configuration_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogInformation",
**kwargs: Any
) -> "_models.FlowLogInformation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_flow_log_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_flow_log_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
async def begin_set_flow_log_configuration(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogInformation",
**kwargs: Any
) -> AsyncLROPoller["_models.FlowLogInformation"]:
"""Configures flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow log.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.FlowLogInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
async def _get_flow_log_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogStatusParameters",
**kwargs: Any
) -> "_models.FlowLogInformation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_flow_log_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_flow_log_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
async def begin_get_flow_log_status(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogStatusParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.FlowLogInformation"]:
"""Queries status of flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define a resource to query flow log and traffic analytics
(optional) status.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.FlowLogStatusParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
async def _check_connectivity_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.ConnectivityParameters",
**kwargs: Any
) -> "_models.ConnectivityInformation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectivityInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_connectivity_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_connectivity_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
async def begin_check_connectivity(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.ConnectivityParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectivityInformation"]:
"""Verifies the possibility of establishing a direct TCP connection from a virtual machine to a
given endpoint including another VM or an arbitrary remote server.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine how the connectivity check will be performed.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.ConnectivityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectivityInformation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.ConnectivityInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectivityInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
async def _get_azure_reachability_report_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AzureReachabilityReportParameters",
**kwargs: Any
) -> "_models.AzureReachabilityReport":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureReachabilityReport"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_azure_reachability_report_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_azure_reachability_report_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
async def begin_get_azure_reachability_report(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AzureReachabilityReportParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.AzureReachabilityReport"]:
"""Gets the relative latency score for internet service providers from a specified location to
Azure regions.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine Azure reachability report configuration.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.AzureReachabilityReportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AzureReachabilityReport or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.AzureReachabilityReport]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureReachabilityReport"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
async def _list_available_providers_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AvailableProvidersListParameters",
**kwargs: Any
) -> "_models.AvailableProvidersList":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableProvidersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._list_available_providers_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_available_providers_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
async def begin_list_available_providers(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AvailableProvidersListParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.AvailableProvidersList"]:
"""Lists all available internet service providers for a specified Azure region.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that scope the list of available providers.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.AvailableProvidersListParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AvailableProvidersList or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.AvailableProvidersList]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableProvidersList"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
async def _get_network_configuration_diagnostic_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkConfigurationDiagnosticParameters",
**kwargs: Any
) -> "_models.NetworkConfigurationDiagnosticResponse":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkConfigurationDiagnosticResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_network_configuration_diagnostic_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_network_configuration_diagnostic_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
async def begin_get_network_configuration_diagnostic(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkConfigurationDiagnosticParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkConfigurationDiagnosticResponse"]:
"""Get network configuration diagnostic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters to get network configuration diagnostic.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.NetworkConfigurationDiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkConfigurationDiagnosticResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.NetworkConfigurationDiagnosticResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkConfigurationDiagnosticResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
| 53.541962 | 250 | 0.686288 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkWatchersOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkWatcher",
**kwargs: Any
) -> "_models.NetworkWatcher":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'NetworkWatcher')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> "_models.NetworkWatcher":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
async def update_tags(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkWatcher":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkWatcherListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'}
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkWatcherListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'}
async def get_topology(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TopologyParameters",
**kwargs: Any
) -> "_models.Topology":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.get_topology.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TopologyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topology', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'}
async def _verify_ip_flow_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.VerificationIPFlowParameters",
**kwargs: Any
) -> "_models.VerificationIPFlowResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._verify_ip_flow_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_verify_ip_flow_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'}
async def begin_verify_ip_flow(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.VerificationIPFlowParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VerificationIPFlowResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'}
async def _get_next_hop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NextHopParameters",
**kwargs: Any
) -> "_models.NextHopResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_next_hop_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'NextHopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_next_hop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'}
async def begin_get_next_hop(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NextHopParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.NextHopResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'}
async def _get_vm_security_rules_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.SecurityGroupViewParameters",
**kwargs: Any
) -> "_models.SecurityGroupViewResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_vm_security_rules_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vm_security_rules_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'}
async def begin_get_vm_security_rules(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.SecurityGroupViewParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SecurityGroupViewResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'}
async def _get_troubleshooting_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TroubleshootingParameters",
**kwargs: Any
) -> "_models.TroubleshootingResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_troubleshooting_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'}
async def begin_get_troubleshooting(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TroubleshootingParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.TroubleshootingResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'}
async def _get_troubleshooting_result_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.QueryTroubleshootingParameters",
**kwargs: Any
) -> "_models.TroubleshootingResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_troubleshooting_result_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_result_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'}
async def begin_get_troubleshooting_result(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.QueryTroubleshootingParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.TroubleshootingResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'}
async def _set_flow_log_configuration_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogInformation",
**kwargs: Any
) -> "_models.FlowLogInformation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._set_flow_log_configuration_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'FlowLogInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_flow_log_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'}
async def begin_set_flow_log_configuration(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogInformation",
**kwargs: Any
) -> AsyncLROPoller["_models.FlowLogInformation"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'}
async def _get_flow_log_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogStatusParameters",
**kwargs: Any
) -> "_models.FlowLogInformation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_flow_log_status_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_flow_log_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'}
async def begin_get_flow_log_status(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogStatusParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.FlowLogInformation"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'}
async def _check_connectivity_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.ConnectivityParameters",
**kwargs: Any
) -> "_models.ConnectivityInformation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._check_connectivity_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_connectivity_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'}
async def begin_check_connectivity(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.ConnectivityParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectivityInformation"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'}
async def _get_azure_reachability_report_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AzureReachabilityReportParameters",
**kwargs: Any
) -> "_models.AzureReachabilityReport":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_azure_reachability_report_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_azure_reachability_report_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'}
async def begin_get_azure_reachability_report(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AzureReachabilityReportParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.AzureReachabilityReport"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'}
async def _list_available_providers_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AvailableProvidersListParameters",
**kwargs: Any
) -> "_models.AvailableProvidersList":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._list_available_providers_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_available_providers_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'}
async def begin_list_available_providers(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AvailableProvidersListParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.AvailableProvidersList"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'}
async def _get_network_configuration_diagnostic_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkConfigurationDiagnosticParameters",
**kwargs: Any
) -> "_models.NetworkConfigurationDiagnosticResponse":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._get_network_configuration_diagnostic_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_network_configuration_diagnostic_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'}
async def begin_get_network_configuration_diagnostic(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkConfigurationDiagnosticParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkConfigurationDiagnosticResponse"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'}
| true | true |
1c2ea56f126d1754020d9b2394105da6f3ce0700 | 7,580 | py | Python | sleepy/http.py | HitSyr/Sleepy | 70ec9479fb947a624e3c658ea39c886c30bf794d | [
"MIT"
] | 4 | 2021-12-08T21:38:14.000Z | 2022-01-30T04:16:38.000Z | sleepy/http.py | HitSyr/Sleepy | 70ec9479fb947a624e3c658ea39c886c30bf794d | [
"MIT"
] | 2 | 2021-09-29T13:33:26.000Z | 2021-12-12T12:52:16.000Z | sleepy/http.py | HitSyr/Sleepy | 70ec9479fb947a624e3c658ea39c886c30bf794d | [
"MIT"
] | null | null | null | """
Copyright (c) 2018-present HitchedSyringe
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
from __future__ import annotations
__all__ = (
"HTTPRequester",
"HTTPRequestFailed",
)
import asyncio
import logging
from collections.abc import MutableMapping
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import aiohttp
from discord.ext import commands
_LOG = logging.getLogger(__name__)
if TYPE_CHECKING:
from multidict import CIMultiDictProxy
from yarl import URL
HTTPResponseData = Union[str, bytes, Dict[str, Any]]
RequestUrl = Union[str, URL]
class HTTPRequestFailed(commands.CommandError):
"""Exception raised when an HTTP request fails.
This inherits from :exc:`commands.CommandError`.
.. versionadded:: 1.10
.. versionchanged:: 2.0
* Renamed to ``HTTPError``.
* This now subclasses :exc:`commands.CommandError`
for ease of use with command error handlers.
.. versionchanged:: 3.0
Renamed to ``HTTPRequestFailed``.
Attributes
----------
response: :class:`aiohttp.ClientResponse`
The response of the failed request.
status: :class:`int`
The HTTP status code.
reason: :class:`str`
The HTTP status reason.
.. versionadded:: 3.0
headers: multidict.CIMultiDictProxy[:class:`str`]
The response headers.
.. versionadded:: 3.0
data: Union[:class:`dict`, :class:`str`, :class:`bytes`]
The data returned from the failed request.
"""
def __init__(self, response: aiohttp.ClientResponse, data: HTTPResponseData) -> None:
self.response: aiohttp.ClientResponse = response
self.status = status = response.status # type: int
self.reason: str = response.reason
self.headers: CIMultiDictProxy[str] = response.headers
self.data: HTTPResponseData = data
super().__init__(
f"{response.method} {response.url} failed with HTTP status code {status}."
)
class HTTPRequester:
"""An HTTP requests handler that optionally implements caching.
.. versionadded:: 1.10
.. versionchanged:: 2.0
* Renamed to ``CachedHTTPRequester``.
* Classes can now be manually constructed.
.. versionchanged:: 3.0
Renamed to ``HTTPRequester``.
.. versionchanged:: 3.2
Removed the `loop` kwarg and property.
Parameters
----------
cache: Optional[:class:`MutableMapping`]
The mapping to use for caching the received data.
``None`` (the default) denotes disabling caching
HTTP requests entirely.
.. versionadded:: 3.0
"""
__slots__: Tuple[str, ...] = ("_cache", "_request_lock", "__session")
def __init__(
self,
*,
cache: Optional[MutableMapping[str, Any]] = None,
**kwargs: Any
) -> None:
if cache is not None and not isinstance(cache, MutableMapping):
raise TypeError(f"cache must be MutableMapping or NoneType, not {type(cache)!r}.")
self._cache: Optional[MutableMapping[str, Any]] = cache
self._request_lock: asyncio.Lock = asyncio.Lock()
self.__session: aiohttp.ClientSession = aiohttp.ClientSession(**kwargs)
_LOG.info("Started a new session.")
@property
def cache(self) -> Optional[MutableMapping[str, Any]]:
"""Optional[:class:`MutableMapping`]: The mapping used for caching received data.
.. versionadded:: 3.0
"""
return self._cache
@cache.setter
def cache(self, value: Optional[MutableMapping[str, Any]]) -> None:
if value is not None and not isinstance(value, MutableMapping):
raise TypeError(f"cache must be MutableMapping or NoneType, not {type(value)!r}.")
self._cache = value
@property
def session(self) -> aiohttp.ClientSession:
""":class:`aiohttp.ClientSession`: The client session used for handling requests."""
return self.__session
async def close(self) -> None:
"""|coro|
Closes the session.
"""
await self.__session.close()
_LOG.info("Session closed.")
async def __request(
self,
method: str,
url: RequestUrl,
/,
**kwargs: Any
) -> HTTPResponseData:
# Allows this to work with params__ in case an API requires
# a parameter that is the same name as a reserved keyword.
params = kwargs.pop("params__", {})
params.update(kwargs.copy())
kwargs = {k[:-2]: params.pop(k) for k in kwargs if k.endswith("__")}
async with self.__session.request(method, url, params=params, **kwargs) as resp:
if "application/json" in resp.content_type:
data = await resp.json()
elif "text/" in resp.content_type:
data = await resp.text("utf-8")
else:
data = await resp.read()
# aiohttp takes care of HTTP 1xx and 3xx internally, so
# it's probably safe to exclude these from the range of
# successful status codes.
if not 200 <= resp.status < 300:
_LOG.warning("%s %s failed with HTTP status %s.", method, url, resp.status)
raise HTTPRequestFailed(resp, data)
_LOG.info("%s %s succeeded with HTTP status %s.", method, url, resp.status)
return data
async def request(
self,
method: str,
url: RequestUrl,
/,
*,
cache__: bool = False,
**kwargs: Any
) -> HTTPResponseData:
"""|coro|
Performs an HTTP request and optionally caches the response.
.. note::
Any kwargs that :meth:`aiohttp.ClientSession.request`
takes must be suffixed with a dunder.
.. versionchanged:: 3.0
Renamed ``cache`` argument to ``cache__``.
Parameters
----------
method: :class:`str`
The HTTP method.
.. versionchanged:: 3.0
This is now a positional-only argument.
url: Union[:class:`str`, :class:`yarl.URL`]
The URL to make a request to.
.. versionchanged:: 3.0
This is now a positional-only argument.
cache__: :class:`bool`
Whether or not to cache the response data.
If :attr:`cache` is ``None``, then caching
the data will be disabled regardless of
this setting.
Defaults to ``False``.
Returns
-------
Union[:class:`dict`, :class:`str`, :class:`bytes`]
The raw response data.
Raises
------
:exc:`.HTTPRequestFailed`
The request returned a status code of either 4xx or 5xx.
"""
if not cache__ or self._cache is None:
return await self.__request(method, url, **kwargs)
async with self._request_lock:
key = f"{method}:{url}:<{' '.join(f'{k}={v}' for k, v in kwargs.items())}>"
if (cached := self._cache.get(key)) is not None:
_LOG.debug("%s %s got %s from the cache.", method, url, cached)
return cached
data = await self.__request(method, url, **kwargs)
self._cache[key] = data
_LOG.debug("Inserted %s into the cache.", data)
return data
| 29.84252 | 94 | 0.596042 |
from __future__ import annotations
__all__ = (
"HTTPRequester",
"HTTPRequestFailed",
)
import asyncio
import logging
from collections.abc import MutableMapping
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import aiohttp
from discord.ext import commands
_LOG = logging.getLogger(__name__)
if TYPE_CHECKING:
from multidict import CIMultiDictProxy
from yarl import URL
HTTPResponseData = Union[str, bytes, Dict[str, Any]]
RequestUrl = Union[str, URL]
class HTTPRequestFailed(commands.CommandError):
def __init__(self, response: aiohttp.ClientResponse, data: HTTPResponseData) -> None:
self.response: aiohttp.ClientResponse = response
self.status = status = response.status
self.reason: str = response.reason
self.headers: CIMultiDictProxy[str] = response.headers
self.data: HTTPResponseData = data
super().__init__(
f"{response.method} {response.url} failed with HTTP status code {status}."
)
class HTTPRequester:
__slots__: Tuple[str, ...] = ("_cache", "_request_lock", "__session")
def __init__(
self,
*,
cache: Optional[MutableMapping[str, Any]] = None,
**kwargs: Any
) -> None:
if cache is not None and not isinstance(cache, MutableMapping):
raise TypeError(f"cache must be MutableMapping or NoneType, not {type(cache)!r}.")
self._cache: Optional[MutableMapping[str, Any]] = cache
self._request_lock: asyncio.Lock = asyncio.Lock()
self.__session: aiohttp.ClientSession = aiohttp.ClientSession(**kwargs)
_LOG.info("Started a new session.")
@property
def cache(self) -> Optional[MutableMapping[str, Any]]:
return self._cache
@cache.setter
def cache(self, value: Optional[MutableMapping[str, Any]]) -> None:
if value is not None and not isinstance(value, MutableMapping):
raise TypeError(f"cache must be MutableMapping or NoneType, not {type(value)!r}.")
self._cache = value
@property
def session(self) -> aiohttp.ClientSession:
return self.__session
async def close(self) -> None:
await self.__session.close()
_LOG.info("Session closed.")
async def __request(
self,
method: str,
url: RequestUrl,
/,
**kwargs: Any
) -> HTTPResponseData:
params = kwargs.pop("params__", {})
params.update(kwargs.copy())
kwargs = {k[:-2]: params.pop(k) for k in kwargs if k.endswith("__")}
async with self.__session.request(method, url, params=params, **kwargs) as resp:
if "application/json" in resp.content_type:
data = await resp.json()
elif "text/" in resp.content_type:
data = await resp.text("utf-8")
else:
data = await resp.read()
# successful status codes.
if not 200 <= resp.status < 300:
_LOG.warning("%s %s failed with HTTP status %s.", method, url, resp.status)
raise HTTPRequestFailed(resp, data)
_LOG.info("%s %s succeeded with HTTP status %s.", method, url, resp.status)
return data
async def request(
self,
method: str,
url: RequestUrl,
/,
*,
cache__: bool = False,
**kwargs: Any
) -> HTTPResponseData:
if not cache__ or self._cache is None:
return await self.__request(method, url, **kwargs)
async with self._request_lock:
key = f"{method}:{url}:<{' '.join(f'{k}={v}' for k, v in kwargs.items())}>"
if (cached := self._cache.get(key)) is not None:
_LOG.debug("%s %s got %s from the cache.", method, url, cached)
return cached
data = await self.__request(method, url, **kwargs)
self._cache[key] = data
_LOG.debug("Inserted %s into the cache.", data)
return data
| true | true |
1c2ea76b9cf021143d713f6a07b53b3b5319c5e6 | 3,425 | py | Python | server.py | umran23/2_Threaded_server | eed32916ae645419c9634f7f6f4994300fe3feef | [
"Apache-2.0"
] | null | null | null | server.py | umran23/2_Threaded_server | eed32916ae645419c9634f7f6f4994300fe3feef | [
"Apache-2.0"
] | null | null | null | server.py | umran23/2_Threaded_server | eed32916ae645419c9634f7f6f4994300fe3feef | [
"Apache-2.0"
] | null | null | null | import socket
from threading import Thread
class Server:
addr = ''
port = 3030
connections = 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logging('Socket has created')
self.port, self.addr = self.ask_for_port_addr()
self.bind()
self.logging(f'Socket binded to port {self.port}')
self.sock.listen(self.connections)
self.logging(f'Socket is listening {self.connections} connections')
self.main()
@staticmethod
def logging(data):
with open("log_server.txt", "a") as f:
f.write(f'{data}\n')
def cleaning(self):
with open("log_server.txt", "a") as f:
f.write(f'\n{self.addr}, {self.port}')
def main(self):
try:
while 1:
conn, addr = self.sock.accept()
self.logging(f'Client {addr} was connected')
Thread(target=self.for_client, args=(conn,)).start()
finally:
self.logging('Server is closing')
self.sock.close()
def send(self, conn, message):
assert len(message) <= 1024
conn.send(f'{message}'.encode())
self.logging(f'Sending this message {message}')
def recv(self, conn):
data = conn.recv(1024).decode()
self.logging(f'Receiving a message => {data}')
return data
@staticmethod
def for_port(port):
try:
if 1023 < int(port) < 65536:
return True
else:
return False
except ValueError:
return False
@staticmethod
def for_ip(ip):
try:
sum = 0
if ip == 'localhost':
return True
parts = ip.split(".", 4)
if len(parts) == 4:
for part in parts:
part = int(part)
if -1 < part < 256:
sum += 1
else:
return False
if sum != 4:
return False
except ValueError:
return False
def ask_for_port_addr(self):
user_port = input("Enter Port:")
if self.for_port(user_port) is False:
print(f'Wrong input, Port by Default is : - {self.port}')
user_port = str(self.port)
user_ip = input("Enter IP Address:")
if self.for_ip(user_ip) is False:
print(f'Wrong Input, IP Address by Default is : - {self.addr}')
user_ip = self.addr
return int(user_port), user_ip
def bind(self):
try:
self.sock.bind((self.addr, self.port))
except OSError:
self.sock.bind((self.addr, 0))
self.port = self.sock.getsockname()[1]
print(f'New PORT is {self.port}')
def for_client(self, conn):
self.logging(f'Client {self.addr} was connected')
while 1:
data = self.recv(conn)
if not data:
self.logging(f'Client {self.addr} was disconnected\n')
break
if data == 'exit':
self.logging('Сonnection is closing\n')
break
self.logging(f'Received message:{data}')
self.logging(f'Sending message: {data}')
self.send(conn, data)
Server()
| 29.525862 | 76 | 0.52 | import socket
from threading import Thread
class Server:
addr = ''
port = 3030
connections = 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logging('Socket has created')
self.port, self.addr = self.ask_for_port_addr()
self.bind()
self.logging(f'Socket binded to port {self.port}')
self.sock.listen(self.connections)
self.logging(f'Socket is listening {self.connections} connections')
self.main()
@staticmethod
def logging(data):
with open("log_server.txt", "a") as f:
f.write(f'{data}\n')
def cleaning(self):
with open("log_server.txt", "a") as f:
f.write(f'\n{self.addr}, {self.port}')
def main(self):
try:
while 1:
conn, addr = self.sock.accept()
self.logging(f'Client {addr} was connected')
Thread(target=self.for_client, args=(conn,)).start()
finally:
self.logging('Server is closing')
self.sock.close()
def send(self, conn, message):
assert len(message) <= 1024
conn.send(f'{message}'.encode())
self.logging(f'Sending this message {message}')
def recv(self, conn):
data = conn.recv(1024).decode()
self.logging(f'Receiving a message => {data}')
return data
@staticmethod
def for_port(port):
try:
if 1023 < int(port) < 65536:
return True
else:
return False
except ValueError:
return False
@staticmethod
def for_ip(ip):
try:
sum = 0
if ip == 'localhost':
return True
parts = ip.split(".", 4)
if len(parts) == 4:
for part in parts:
part = int(part)
if -1 < part < 256:
sum += 1
else:
return False
if sum != 4:
return False
except ValueError:
return False
def ask_for_port_addr(self):
user_port = input("Enter Port:")
if self.for_port(user_port) is False:
print(f'Wrong input, Port by Default is : - {self.port}')
user_port = str(self.port)
user_ip = input("Enter IP Address:")
if self.for_ip(user_ip) is False:
print(f'Wrong Input, IP Address by Default is : - {self.addr}')
user_ip = self.addr
return int(user_port), user_ip
def bind(self):
try:
self.sock.bind((self.addr, self.port))
except OSError:
self.sock.bind((self.addr, 0))
self.port = self.sock.getsockname()[1]
print(f'New PORT is {self.port}')
def for_client(self, conn):
self.logging(f'Client {self.addr} was connected')
while 1:
data = self.recv(conn)
if not data:
self.logging(f'Client {self.addr} was disconnected\n')
break
if data == 'exit':
self.logging('Сonnection is closing\n')
break
self.logging(f'Received message:{data}')
self.logging(f'Sending message: {data}')
self.send(conn, data)
Server()
| true | true |
1c2ea7d11faf357ec52b98e2b099ef65b110baa3 | 4,548 | py | Python | temboo/core/Library/Twitter/Trends/Available.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Twitter/Trends/Available.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Twitter/Trends/Available.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# Available
# Retrieves the locations that Twitter has trending topic information for.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Available(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Available Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Available, self).__init__(temboo_session, '/Library/Twitter/Trends/Available')
def new_input_set(self):
return AvailableInputSet()
def _make_result_set(self, result, path):
return AvailableResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AvailableChoreographyExecution(session, exec_id, path)
class AvailableInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Available
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(AvailableInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(AvailableInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(AvailableInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(AvailableInputSet, self)._set_input('ConsumerSecret', value)
class AvailableResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Available Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The rate limit ceiling for this particular request.)
"""
return self._output.get('Limit', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The number of requests left for the 15 minute window.)
"""
return self._output.get('Remaining', None)
def get_Reset(self):
"""
Retrieve the value for the "Reset" output from this Choreo execution. ((date) The remaining window before the rate limit resets in UTC epoch seconds.)
"""
return self._output.get('Reset', None)
class AvailableChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AvailableResultSet(response, path)
| 40.247788 | 173 | 0.677661 | true | true | |
1c2ea7fb4024eb02de28a8b3543f21c6f2b00b6c | 67 | py | Python | tuiuiu/tuiuiutenant/__init__.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | 3 | 2019-08-08T09:09:35.000Z | 2020-12-15T18:04:17.000Z | tuiuiu/tuiuiutenant/__init__.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | null | null | null | tuiuiu/tuiuiutenant/__init__.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | 1 | 2017-09-09T20:10:40.000Z | 2017-09-09T20:10:40.000Z | default_app_config = 'tuiuiu.tuiuiutenant.apps.TuiuiuTenantConfig'
| 33.5 | 66 | 0.865672 | default_app_config = 'tuiuiu.tuiuiutenant.apps.TuiuiuTenantConfig'
| true | true |
1c2ea8d8d7ef40216683485021e310b67b3e922f | 1,963 | py | Python | genanki/deck.py | jahzielv/genanki | 6d173fcd8a50b29164d733fce750276bc9a9f39d | [
"MIT"
] | null | null | null | genanki/deck.py | jahzielv/genanki | 6d173fcd8a50b29164d733fce750276bc9a9f39d | [
"MIT"
] | null | null | null | genanki/deck.py | jahzielv/genanki | 6d173fcd8a50b29164d733fce750276bc9a9f39d | [
"MIT"
] | null | null | null | import json
from .apkg_col import APKG_COL
class Deck:
def __init__(self, deck_id=None, name=None):
self.deck_id = deck_id
self.name = name
self.notes = []
self.models = {} # map of model id to model
def add_note(self, note):
self.notes.append(note)
def add_model(self, model):
self.models[model.model_id] = model
def write_to_db(self, cursor, now_ts):
if not isinstance(self.deck_id, int):
raise TypeError('Deck .deck_id must be an integer, not {}.'.format(self.deck_id))
if not isinstance(self.name, str):
raise TypeError('Deck .name must be a string, not {}.'.format(self.name))
for note in self.notes:
self.add_model(note.model)
models = {model.model_id: model.to_json(now_ts, self.deck_id) for model in self.models.values()}
cursor.execute(APKG_COL, [self.name, self.deck_id, json.dumps(models)])
for note in self.notes:
note.write_to_db(cursor, now_ts, self.deck_id)
def write_to_file(self, file):
"""
Write this deck to a .apkg file.
"""
from .package import Package
Package(self).write_to_file(file)
def write_to_collection_from_addon(self):
"""
Write to local collection. *Only usable when running inside an Anki addon!* Only tested on Anki 2.1.
This writes to a temporary file and then calls the code that Anki uses to import packages.
Note: the caller may want to use mw.checkpoint and mw.reset as follows:
# creates a menu item called "Undo Add Notes From MyAddon" after this runs
mw.checkpoint('Add Notes From MyAddon')
# run import
my_package.write_to_collection_from_addon()
# refreshes main view so new deck is visible
mw.reset()
Tip: if your deck has the same name and ID as an existing deck, then the notes will get placed in that deck rather
than a new deck being created.
"""
from .package import Package
Package(self).write_to_collection_from_addon()
| 32.716667 | 118 | 0.691798 | import json
from .apkg_col import APKG_COL
class Deck:
def __init__(self, deck_id=None, name=None):
self.deck_id = deck_id
self.name = name
self.notes = []
self.models = {}
def add_note(self, note):
self.notes.append(note)
def add_model(self, model):
self.models[model.model_id] = model
def write_to_db(self, cursor, now_ts):
if not isinstance(self.deck_id, int):
raise TypeError('Deck .deck_id must be an integer, not {}.'.format(self.deck_id))
if not isinstance(self.name, str):
raise TypeError('Deck .name must be a string, not {}.'.format(self.name))
for note in self.notes:
self.add_model(note.model)
models = {model.model_id: model.to_json(now_ts, self.deck_id) for model in self.models.values()}
cursor.execute(APKG_COL, [self.name, self.deck_id, json.dumps(models)])
for note in self.notes:
note.write_to_db(cursor, now_ts, self.deck_id)
def write_to_file(self, file):
from .package import Package
Package(self).write_to_file(file)
def write_to_collection_from_addon(self):
from .package import Package
Package(self).write_to_collection_from_addon()
| true | true |
1c2ea92a4a85689ac010f07469d5822e50872e06 | 9,408 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaauser_authorizationpolicy_binding.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaauser_authorizationpolicy_binding.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaauser_authorizationpolicy_binding.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaauser_authorizationpolicy_binding(base_resource) :
""" Binding class showing the authorizationpolicy that can be bound to aaauser.
"""
def __init__(self) :
self._policy = None
self._priority = None
self._acttype = None
self._type = None
self._gotopriorityexpression = None
self._username = None
self.___count = None
@property
def priority(self) :
r"""Integer specifying the priority of the policy. A lower number indicates a higher priority. Policies are evaluated in the order of their priority numbers. Maximum value for default syntax policies is 2147483647 and for classic policies max priority is 64000. .<br/>Minimum value = 0<br/>Maximum value = 2147483647.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""Integer specifying the priority of the policy. A lower number indicates a higher priority. Policies are evaluated in the order of their priority numbers. Maximum value for default syntax policies is 2147483647 and for classic policies max priority is 64000. .<br/>Minimum value = 0<br/>Maximum value = 2147483647
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def username(self) :
r"""User account to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
r"""User account to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policy(self) :
r"""The policy Name.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
r"""The policy Name.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def type(self) :
r"""Bindpoint to which the policy is bound.<br/>Default value: REQUEST<br/>Possible values = REQUEST, UDP_REQUEST, DNS_REQUEST, ICMP_REQUEST.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
r"""Bindpoint to which the policy is bound.<br/>Default value: REQUEST<br/>Possible values = REQUEST, UDP_REQUEST, DNS_REQUEST, ICMP_REQUEST
"""
try :
self._type = type
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaauser_authorizationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaauser_authorizationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.username is not None :
return str(self.username)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = aaauser_authorizationpolicy_binding()
addresource.username = resource.username
addresource.policy = resource.policy
addresource.priority = resource.priority
addresource.type = resource.type
addresource.gotopriorityexpression = resource.gotopriorityexpression
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [aaauser_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = aaauser_authorizationpolicy_binding()
deleteresource.username = resource.username
deleteresource.policy = resource.policy
deleteresource.type = resource.type
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [aaauser_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, username="", option_="") :
r""" Use this API to fetch aaauser_authorizationpolicy_binding resources.
"""
try :
if not username :
obj = aaauser_authorizationpolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, username, filter_) :
r""" Use this API to fetch filtered set of aaauser_authorizationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, username) :
r""" Use this API to count aaauser_authorizationpolicy_binding resources configued on NetScaler.
"""
try :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, username, filter_) :
r""" Use this API to count the filtered set of aaauser_authorizationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Type:
REQUEST = "REQUEST"
UDP_REQUEST = "UDP_REQUEST"
DNS_REQUEST = "DNS_REQUEST"
ICMP_REQUEST = "ICMP_REQUEST"
class aaauser_authorizationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.aaauser_authorizationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaauser_authorizationpolicy_binding = [aaauser_authorizationpolicy_binding() for _ in range(length)]
| 32 | 322 | 0.737883 |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaauser_authorizationpolicy_binding(base_resource) :
def __init__(self) :
self._policy = None
self._priority = None
self._acttype = None
self._type = None
self._gotopriorityexpression = None
self._username = None
self.___count = None
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
try :
self._priority = priority
except Exception as e:
raise e
@property
def username(self) :
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
try :
self._username = username
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policy(self) :
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
try :
self._policy = policy
except Exception as e:
raise e
@property
def type(self) :
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
try :
self._type = type
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(aaauser_authorizationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaauser_authorizationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
try :
if self.username is not None :
return str(self.username)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
addresource = aaauser_authorizationpolicy_binding()
addresource.username = resource.username
addresource.policy = resource.policy
addresource.priority = resource.priority
addresource.type = resource.type
addresource.gotopriorityexpression = resource.gotopriorityexpression
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [aaauser_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
deleteresource = aaauser_authorizationpolicy_binding()
deleteresource.username = resource.username
deleteresource.policy = resource.policy
deleteresource.type = resource.type
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [aaauser_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, username="", option_="") :
try :
if not username :
obj = aaauser_authorizationpolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, username, filter_) :
try :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, username) :
try :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, username, filter_) :
try :
obj = aaauser_authorizationpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Type:
REQUEST = "REQUEST"
UDP_REQUEST = "UDP_REQUEST"
DNS_REQUEST = "DNS_REQUEST"
ICMP_REQUEST = "ICMP_REQUEST"
class aaauser_authorizationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.aaauser_authorizationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaauser_authorizationpolicy_binding = [aaauser_authorizationpolicy_binding() for _ in range(length)]
| true | true |
1c2ea92c956ad3a9d2609c38a046d8ef976c4efa | 4,139 | py | Python | benchmark/startQiskit_noisy954.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy954.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy954.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=45
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[1],input_qubit[2]) # number=38
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=39
prog.cz(input_qubit[1],input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[0]) # number=33
prog.x(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=34
prog.cz(input_qubit[1],input_qubit[0]) # number=35
prog.h(input_qubit[0]) # number=36
prog.x(input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.x(input_qubit[2]) # number=26
prog.cx(input_qubit[0],input_qubit[2]) # number=27
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[4]) # number=42
prog.cz(input_qubit[2],input_qubit[4]) # number=43
prog.h(input_qubit[4]) # number=44
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy954.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.88806 | 82 | 0.60691 |
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[4])
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cx(input_qubit[1],input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[3])
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0])
prog.x(input_qubit[1])
prog.x(input_qubit[2])
prog.x(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[4])
prog.cz(input_qubit[2],input_qubit[4])
prog.h(input_qubit[4])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy954.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
1c2ea9c9fc370bfa92466b9a39561738eb589c91 | 1,175 | py | Python | calories_screen.py | RossinesP/pygamer_bike_game | 2b852126f1d04d6fc5e1053a3217862d86c7856d | [
"MIT"
] | null | null | null | calories_screen.py | RossinesP/pygamer_bike_game | 2b852126f1d04d6fc5e1053a3217862d86c7856d | [
"MIT"
] | null | null | null | calories_screen.py | RossinesP/pygamer_bike_game | 2b852126f1d04d6fc5e1053a3217862d86c7856d | [
"MIT"
] | null | null | null | from screen import Screen
from tracker import Tracker
from adafruit_display_text import label
import neopixel
import terminalio
class CaloriesScreen(Screen):
def __init__(self, screen_manager, pixels: neopixel.NeoPixel, tracker: Tracker):
super().__init__(screen_manager, pixels, tracker)
font = terminalio.FONT
color = 0xFFFFFF
self.cal_label = label.Label(font, text="0.00", color=color, scale= 5)
self.cal_label.x = 25
self.cal_label.y = 50
self.cal_label.anchor_point = (1,0)
self.cal_label.text = self.get_calories_text(tracker.calories)
self.cal_unit_label = label.Label(font, text="km", color=color, scale=2)
self.cal_unit_label.x = 70
self.cal_unit_label.y = 100
self.cal_unit_label.text = "cal"
self.group.append(self.cal_label)
self.group.append(self.cal_unit_label)
def get_calories_text(self, calories: float):
if calories >= 10000:
return "LOTS"
else:
return f"{calories:.0f}"
def on_tracker_update(self, now: int):
self.cal_label.text = self.get_calories_text(self.tracker.calories) | 33.571429 | 84 | 0.670638 | from screen import Screen
from tracker import Tracker
from adafruit_display_text import label
import neopixel
import terminalio
class CaloriesScreen(Screen):
def __init__(self, screen_manager, pixels: neopixel.NeoPixel, tracker: Tracker):
super().__init__(screen_manager, pixels, tracker)
font = terminalio.FONT
color = 0xFFFFFF
self.cal_label = label.Label(font, text="0.00", color=color, scale= 5)
self.cal_label.x = 25
self.cal_label.y = 50
self.cal_label.anchor_point = (1,0)
self.cal_label.text = self.get_calories_text(tracker.calories)
self.cal_unit_label = label.Label(font, text="km", color=color, scale=2)
self.cal_unit_label.x = 70
self.cal_unit_label.y = 100
self.cal_unit_label.text = "cal"
self.group.append(self.cal_label)
self.group.append(self.cal_unit_label)
def get_calories_text(self, calories: float):
if calories >= 10000:
return "LOTS"
else:
return f"{calories:.0f}"
def on_tracker_update(self, now: int):
self.cal_label.text = self.get_calories_text(self.tracker.calories) | true | true |
1c2eaa0ab91564a8c0e4fbb3f53ce5a5e680221a | 10,973 | py | Python | deeppavlov/tasks/insults/build.py | deepmipt/kpi2017 | 0f6b13c6ea76e544804ce66ba372c66d5ef9ee30 | [
"Apache-2.0"
] | 3 | 2018-02-19T15:34:44.000Z | 2018-06-05T10:02:00.000Z | deeppavlov/tasks/insults/build.py | deepmipt/kpi2017 | 0f6b13c6ea76e544804ce66ba372c66d5ef9ee30 | [
"Apache-2.0"
] | null | null | null | deeppavlov/tasks/insults/build.py | deepmipt/kpi2017 | 0f6b13c6ea76e544804ce66ba372c66d5ef9ee30 | [
"Apache-2.0"
] | 1 | 2021-03-22T09:06:52.000Z | 2021-03-22T09:06:52.000Z | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import parlai.core.build_data as build_data
import os
import re
import string
import numpy as np
import pandas as pd
import urllib
def data_preprocessing(f):
"""Preprocess the data.
Args:
f: list of text samples
Returns:
preprocessed list of text samples
"""
f = [x.lower() for x in f]
f = [re.sub(r'^"|"$', '', x) for x in f]
f = [x.replace("\\n", " ") for x in f]
f = [x.replace("\\t", " ") for x in f]
f = [x.replace("\\xa0", " ") for x in f]
f = [x.replace("\\xc2", " ") for x in f]
f = [re.sub('!!+', ' !! ', x) for x in f]
f = [re.sub('!', ' ! ', x) for x in f]
f = [re.sub('! !', '!!', x) for x in f]
f = [re.sub('\?\?+', ' ?? ', x) for x in f]
f = [re.sub('\?', ' ? ', x) for x in f]
f = [re.sub('\? \?', '??', x) for x in f]
f = [re.sub('\?!+', ' ?! ', x) for x in f]
f = [re.sub('\.\.+', '..', x) for x in f]
f = [re.sub('\.', ' . ', x) for x in f]
f = [re.sub('\. \.', '..', x) for x in f]
f = [re.sub(',', ' , ', x) for x in f]
f = [re.sub(':', ' : ', x) for x in f]
f = [re.sub(';', ' ; ', x) for x in f]
f = [re.sub('\%', ' % ', x) for x in f]
f = [x.replace("$", "s") for x in f]
f = [x.replace(" u ", " you ") for x in f]
f = [x.replace(" em ", " them ") for x in f]
f = [x.replace(" da ", " the ") for x in f]
f = [x.replace(" yo ", " you ") for x in f]
f = [x.replace(" ur ", " your ") for x in f]
f = [x.replace("you\'re", "you are") for x in f]
f = [x.replace(" u r ", " you are ") for x in f]
f = [x.replace("yo\'re", " you are ") for x in f]
f = [x.replace("yu\'re", " you are ") for x in f]
f = [x.replace("u\'re", " you are ") for x in f]
f = [x.replace(" urs ", " yours ") for x in f]
f = [x.replace("y'all", "you all") for x in f]
f = [x.replace(" r u ", " are you ") for x in f]
f = [x.replace(" r you", " are you") for x in f]
f = [x.replace(" are u ", " are you ") for x in f]
f = [x.replace(" mom ", " mother ") for x in f]
f = [x.replace(" momm ", " mother ") for x in f]
f = [x.replace(" mommy ", " mother ") for x in f]
f = [x.replace(" momma ", " mother ") for x in f]
f = [x.replace(" mama ", " mother ") for x in f]
f = [x.replace(" mamma ", " mother ") for x in f]
f = [x.replace(" mum ", " mother ") for x in f]
f = [x.replace(" mummy ", " mother ") for x in f]
f = [x.replace("won't", "will not") for x in f]
f = [x.replace("can't", "cannot") for x in f]
f = [x.replace("i'm", "i am") for x in f]
f = [x.replace(" im ", " i am ") for x in f]
f = [x.replace("ain't", "is not") for x in f]
f = [x.replace("'ll", " will") for x in f]
f = [x.replace("'t", " not") for x in f]
f = [x.replace("'ve", " have") for x in f]
f = [x.replace("'s", " is") for x in f]
f = [x.replace("'re", " are") for x in f]
f = [x.replace("'d", " would") for x in f]
# replace multiple letters (3 and more) by 2 letters
for letter in string.ascii_lowercase:
f = [re.sub(letter * 3 + '+', letter, x).strip() for x in f]
bad_words_file = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "badwords.txt"), "r")
bwMap = dict()
for line in bad_words_file:
sp = line.strip().lower().split(",")
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
for key, value in bwMap.items():
kpad = " " + key + " "
vpad = " " + value + " "
f = [x.replace(kpad, vpad) for x in f]
# stemming
f = [re.sub("ies( |$)", "y ", x) for x in f]
f = [re.sub("s( |$)", " ", x) for x in f]
f = [re.sub("ing( |$)", " ", x) for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.sub(" [*$%&#@][*$%&#@]+", " xexp ", x) for x in f]
f = [re.sub(" [0-9]+ ", " DD ", x) for x in f]
f = [re.sub("<\S*>", "", x) for x in f]
f = [re.sub('\s+', ' ', x) for x in f]
return f
def write_input_fasttext_cls(data, path, data_name):
"""Write down input files for fasttext classificator.
Args:
data: array of text samples
path: path to folder to put the files
data_name: mode of writing files "train" or "test"
Returns:
nothing
"""
f = open(path + '_fasttext_cls.txt', 'w')
for i in range(data.shape[0]):
if data_name == 'train':
f.write('__label__' + str(data.iloc[i,0]) + ' ' + data.iloc[i,1] + '\n')
elif data_name == 'test':
f.write(data.iloc[i,1] + '\n')
else:
print('Incorrect data name')
f.close()
def write_input_fasttext_emb(data, path, data_name):
"""Write down input files for fasttext embedding.
Args:
data: array of text samples
path: path to folder to put the files
data_name: mode of writing files "train" or "test"
Returns:
nothing
"""
f = open(path + '_fasttext_emb.txt', 'w')
for i in range(data.shape[0]):
if data_name == 'train' or data_name == 'test':
f.write(data.iloc[i,1] + '\n')
else:
print('Incorrect data name')
f.close()
def balance_dataset(dataset_0, labels_0, dataset_1, labels_1, ratio=1):
"""Balance the dataset_0 with samples from dataset_1 up to given ratio.
Args:
dataset_0: array of text samples
labels_0: array of labels for dataset_0
dataset_1: array of text samples
labels_1: array of labels for dataset_1
ratio: ratio of samples of class 1 to samples of class 0 (default 1.0)
Returns:
balanced array of text samples, corresponding array of labels
"""
initial_train_size = dataset_0.shape[0]
insult_inds = np.nonzero(labels_1)[0]
num_insults_0 = len(np.nonzero(labels_0)[0])
num_insults_1 = len(np.nonzero(labels_1)[0])
insult_inds_to_add = insult_inds[np.random.randint(low=0, high=num_insults_1,
size=(ratio * (initial_train_size - num_insults_0) - num_insults_0))]
result = dataset_0.append(dataset_1.iloc[insult_inds_to_add])
result_labels = labels_0.append(labels_1.iloc[insult_inds_to_add])
return result, result_labels
def build(opt):
"""Read and preprocess data, save preprocessed data, balance data,
create input files for fasttext classifier and embeddings.
Args:
opt: given parameters
Returns:
nothing
"""
# get path to data directory
dpath = os.path.join(opt['datapath'], 'insults')
# define version if any
version = '1.0'
# check if data had been previously built
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
# make a clean directory if needed
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
raw_path = os.path.abspath(opt['raw_dataset_path'] or ".")
train_file = os.path.join(raw_path, 'train.csv')
valid_file = os.path.join(raw_path, 'test_with_solutions.csv')
test_file = os.path.join(raw_path, 'impermium_verification_labels.csv')
if not os.path.isfile(train_file) or not os.path.isfile(valid_file) or not os.path.isfile(test_file):
ds_path = os.environ.get('DATASETS_URL')
file_name = 'insults.tar.gz'
if not ds_path:
raise RuntimeError('Please download dataset files from'
' https://www.kaggle.com/c/detecting-insults-in-social-commentary/data'
' and set path to their directory in raw-dataset-path parameter')
print('Trying to download a insults dataset from the repository')
url = urllib.parse.urljoin(ds_path, file_name)
print(repr(url))
build_data.download(url, dpath, file_name)
build_data.untar(dpath, file_name)
opt['raw_dataset_path'] = dpath
print('Downloaded a insults dataset')
raw_path = os.path.abspath(opt['raw_dataset_path'])
train_file = os.path.join(raw_path, 'train.csv')
valid_file = os.path.join(raw_path, 'test_with_solutions.csv')
test_file = os.path.join(raw_path, 'impermium_verification_labels.csv')
train_data = pd.read_csv(train_file)
train_data = train_data.drop('Date', axis=1)
test_data = pd.read_csv(test_file)
test_data = test_data.drop('id', axis=1)
test_data = test_data.drop('Usage', axis=1)
test_data = test_data.drop('Date', axis=1)
valid_data = pd.read_csv(valid_file)
valid_data = valid_data.drop('Date', axis=1)
valid_data = valid_data.drop('Usage', axis=1)
# merge train and valid due to use of cross validation
train_data = train_data.append(valid_data)
if opt.get('balance_train_dataset'):
if opt['balance_train_dataset']:
train_data['Comment'],train_data['Insult'] = balance_dataset(train_data['Comment'],
train_data['Insult'],
train_data['Comment'],
train_data['Insult'], ratio=1)
print('Preprocessing train')
train_data['Comment'] = data_preprocessing(train_data['Comment'])
print('Preprocessing test')
test_data['Comment'] = data_preprocessing(test_data['Comment'])
print('Writing input files for fasttext')
write_input_fasttext_cls(train_data, os.path.join(dpath, 'train'), 'train')
write_input_fasttext_cls(test_data, os.path.join(dpath, 'test'), 'test')
write_input_fasttext_emb(train_data, os.path.join(dpath, 'train'), 'train')
write_input_fasttext_emb(test_data, os.path.join(dpath, 'test'), 'test')
print('Writing input normalized input files')
train_data.to_csv(os.path.join(dpath, 'train.csv'), index=False)
test_data.to_csv(os.path.join(dpath, 'test.csv'), index=False)
# mark the data as built
build_data.mark_done(dpath, version_string=version)
| 38.773852 | 124 | 0.564385 |
import parlai.core.build_data as build_data
import os
import re
import string
import numpy as np
import pandas as pd
import urllib
def data_preprocessing(f):
f = [x.lower() for x in f]
f = [re.sub(r'^"|"$', '', x) for x in f]
f = [x.replace("\\n", " ") for x in f]
f = [x.replace("\\t", " ") for x in f]
f = [x.replace("\\xa0", " ") for x in f]
f = [x.replace("\\xc2", " ") for x in f]
f = [re.sub('!!+', ' !! ', x) for x in f]
f = [re.sub('!', ' ! ', x) for x in f]
f = [re.sub('! !', '!!', x) for x in f]
f = [re.sub('\?\?+', ' ?? ', x) for x in f]
f = [re.sub('\?', ' ? ', x) for x in f]
f = [re.sub('\? \?', '??', x) for x in f]
f = [re.sub('\?!+', ' ?! ', x) for x in f]
f = [re.sub('\.\.+', '..', x) for x in f]
f = [re.sub('\.', ' . ', x) for x in f]
f = [re.sub('\. \.', '..', x) for x in f]
f = [re.sub(',', ' , ', x) for x in f]
f = [re.sub(':', ' : ', x) for x in f]
f = [re.sub(';', ' ; ', x) for x in f]
f = [re.sub('\%', ' % ', x) for x in f]
f = [x.replace("$", "s") for x in f]
f = [x.replace(" u ", " you ") for x in f]
f = [x.replace(" em ", " them ") for x in f]
f = [x.replace(" da ", " the ") for x in f]
f = [x.replace(" yo ", " you ") for x in f]
f = [x.replace(" ur ", " your ") for x in f]
f = [x.replace("you\'re", "you are") for x in f]
f = [x.replace(" u r ", " you are ") for x in f]
f = [x.replace("yo\'re", " you are ") for x in f]
f = [x.replace("yu\'re", " you are ") for x in f]
f = [x.replace("u\'re", " you are ") for x in f]
f = [x.replace(" urs ", " yours ") for x in f]
f = [x.replace("y'all", "you all") for x in f]
f = [x.replace(" r u ", " are you ") for x in f]
f = [x.replace(" r you", " are you") for x in f]
f = [x.replace(" are u ", " are you ") for x in f]
f = [x.replace(" mom ", " mother ") for x in f]
f = [x.replace(" momm ", " mother ") for x in f]
f = [x.replace(" mommy ", " mother ") for x in f]
f = [x.replace(" momma ", " mother ") for x in f]
f = [x.replace(" mama ", " mother ") for x in f]
f = [x.replace(" mamma ", " mother ") for x in f]
f = [x.replace(" mum ", " mother ") for x in f]
f = [x.replace(" mummy ", " mother ") for x in f]
f = [x.replace("won't", "will not") for x in f]
f = [x.replace("can't", "cannot") for x in f]
f = [x.replace("i'm", "i am") for x in f]
f = [x.replace(" im ", " i am ") for x in f]
f = [x.replace("ain't", "is not") for x in f]
f = [x.replace("'ll", " will") for x in f]
f = [x.replace("'t", " not") for x in f]
f = [x.replace("'ve", " have") for x in f]
f = [x.replace("'s", " is") for x in f]
f = [x.replace("'re", " are") for x in f]
f = [x.replace("'d", " would") for x in f]
# replace multiple letters (3 and more) by 2 letters
for letter in string.ascii_lowercase:
f = [re.sub(letter * 3 + '+', letter, x).strip() for x in f]
bad_words_file = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "badwords.txt"), "r")
bwMap = dict()
for line in bad_words_file:
sp = line.strip().lower().split(",")
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
for key, value in bwMap.items():
kpad = " " + key + " "
vpad = " " + value + " "
f = [x.replace(kpad, vpad) for x in f]
# stemming
f = [re.sub("ies( |$)", "y ", x) for x in f]
f = [re.sub("s( |$)", " ", x) for x in f]
f = [re.sub("ing( |$)", " ", x) for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.sub(" [*$%&#@][*$%&#@]+", " xexp ", x) for x in f]
f = [re.sub(" [0-9]+ ", " DD ", x) for x in f]
f = [re.sub("<\S*>", "", x) for x in f]
f = [re.sub('\s+', ' ', x) for x in f]
return f
def write_input_fasttext_cls(data, path, data_name):
f = open(path + '_fasttext_cls.txt', 'w')
for i in range(data.shape[0]):
if data_name == 'train':
f.write('__label__' + str(data.iloc[i,0]) + ' ' + data.iloc[i,1] + '\n')
elif data_name == 'test':
f.write(data.iloc[i,1] + '\n')
else:
print('Incorrect data name')
f.close()
def write_input_fasttext_emb(data, path, data_name):
f = open(path + '_fasttext_emb.txt', 'w')
for i in range(data.shape[0]):
if data_name == 'train' or data_name == 'test':
f.write(data.iloc[i,1] + '\n')
else:
print('Incorrect data name')
f.close()
def balance_dataset(dataset_0, labels_0, dataset_1, labels_1, ratio=1):
initial_train_size = dataset_0.shape[0]
insult_inds = np.nonzero(labels_1)[0]
num_insults_0 = len(np.nonzero(labels_0)[0])
num_insults_1 = len(np.nonzero(labels_1)[0])
insult_inds_to_add = insult_inds[np.random.randint(low=0, high=num_insults_1,
size=(ratio * (initial_train_size - num_insults_0) - num_insults_0))]
result = dataset_0.append(dataset_1.iloc[insult_inds_to_add])
result_labels = labels_0.append(labels_1.iloc[insult_inds_to_add])
return result, result_labels
def build(opt):
# get path to data directory
dpath = os.path.join(opt['datapath'], 'insults')
# define version if any
version = '1.0'
# check if data had been previously built
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
# make a clean directory if needed
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
raw_path = os.path.abspath(opt['raw_dataset_path'] or ".")
train_file = os.path.join(raw_path, 'train.csv')
valid_file = os.path.join(raw_path, 'test_with_solutions.csv')
test_file = os.path.join(raw_path, 'impermium_verification_labels.csv')
if not os.path.isfile(train_file) or not os.path.isfile(valid_file) or not os.path.isfile(test_file):
ds_path = os.environ.get('DATASETS_URL')
file_name = 'insults.tar.gz'
if not ds_path:
raise RuntimeError('Please download dataset files from'
' https://www.kaggle.com/c/detecting-insults-in-social-commentary/data'
' and set path to their directory in raw-dataset-path parameter')
print('Trying to download a insults dataset from the repository')
url = urllib.parse.urljoin(ds_path, file_name)
print(repr(url))
build_data.download(url, dpath, file_name)
build_data.untar(dpath, file_name)
opt['raw_dataset_path'] = dpath
print('Downloaded a insults dataset')
raw_path = os.path.abspath(opt['raw_dataset_path'])
train_file = os.path.join(raw_path, 'train.csv')
valid_file = os.path.join(raw_path, 'test_with_solutions.csv')
test_file = os.path.join(raw_path, 'impermium_verification_labels.csv')
train_data = pd.read_csv(train_file)
train_data = train_data.drop('Date', axis=1)
test_data = pd.read_csv(test_file)
test_data = test_data.drop('id', axis=1)
test_data = test_data.drop('Usage', axis=1)
test_data = test_data.drop('Date', axis=1)
valid_data = pd.read_csv(valid_file)
valid_data = valid_data.drop('Date', axis=1)
valid_data = valid_data.drop('Usage', axis=1)
# merge train and valid due to use of cross validation
train_data = train_data.append(valid_data)
if opt.get('balance_train_dataset'):
if opt['balance_train_dataset']:
train_data['Comment'],train_data['Insult'] = balance_dataset(train_data['Comment'],
train_data['Insult'],
train_data['Comment'],
train_data['Insult'], ratio=1)
print('Preprocessing train')
train_data['Comment'] = data_preprocessing(train_data['Comment'])
print('Preprocessing test')
test_data['Comment'] = data_preprocessing(test_data['Comment'])
print('Writing input files for fasttext')
write_input_fasttext_cls(train_data, os.path.join(dpath, 'train'), 'train')
write_input_fasttext_cls(test_data, os.path.join(dpath, 'test'), 'test')
write_input_fasttext_emb(train_data, os.path.join(dpath, 'train'), 'train')
write_input_fasttext_emb(test_data, os.path.join(dpath, 'test'), 'test')
print('Writing input normalized input files')
train_data.to_csv(os.path.join(dpath, 'train.csv'), index=False)
test_data.to_csv(os.path.join(dpath, 'test.csv'), index=False)
# mark the data as built
build_data.mark_done(dpath, version_string=version)
| true | true |
1c2eab47e8c5b4f21800af9cd5ed1e8c95cb0209 | 4,176 | py | Python | src/compas_assembly/datastructures/block.py | BlockResearchGroup/compas_assembly | 6a257e1afaf304f9ddad02baed2396e5bacf91f8 | [
"MIT"
] | 8 | 2019-01-30T18:08:07.000Z | 2021-06-25T09:35:01.000Z | src/compas_assembly/datastructures/block.py | BlockResearchGroup/compas_assembly | 6a257e1afaf304f9ddad02baed2396e5bacf91f8 | [
"MIT"
] | 6 | 2019-07-17T11:29:45.000Z | 2020-03-20T13:32:38.000Z | src/compas_assembly/datastructures/block.py | BlockResearchGroup/compas_assembly | 6a257e1afaf304f9ddad02baed2396e5bacf91f8 | [
"MIT"
] | 18 | 2019-01-29T09:02:40.000Z | 2021-12-09T09:52:25.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import centroid_points
from compas.geometry import cross_vectors
from compas.geometry import dot_vectors
from compas.geometry import normalize_vector
from compas.geometry import centroid_polyhedron
from compas.geometry import volume_polyhedron
from compas.datastructures import Mesh
class Block(Mesh):
"""A data structure for the individual blocks of a discrete element assembly.
Examples
--------
>>>
"""
def __init__(self):
super(Block, self).__init__()
@classmethod
def from_polysurface(cls, guid):
"""Class method for constructing a block from a Rhino poly-surface.
Parameters
----------
guid : str
The GUID of the poly-surface.
Returns
-------
Block
The block corresponding to the poly-surface.
Notes
-----
In Rhino, poly-surfaces are organised such that the cycle directions of
the individual sub-surfaces produce normal vectors that point out of the
enclosed volume. The normal vectors of the faces of the mesh, therefore
also point "out" of the enclosed volume.
"""
from compas_rhino.geometry import RhinoSurface
surface = RhinoSurface.from_guid(guid)
return surface.to_compas(cls)
@classmethod
def from_rhinomesh(cls, guid):
"""Class method for constructing a block from a Rhino mesh.
Parameters
----------
guid : str
The GUID of the mesh.
Returns
-------
Block
The block corresponding to the Rhino mesh.
"""
from compas_rhino.geometry import RhinoMesh
mesh = RhinoMesh.from_guid(guid)
return mesh.to_compas(cls)
def centroid(self):
"""Compute the centroid of the block.
Returns
-------
point
The XYZ location of the centroid.
"""
return centroid_points(
[self.vertex_coordinates(key) for key in self.vertices()])
def frames(self):
"""Compute the local frame of each face of the block.
Returns
-------
dict
A dictionary mapping face identifiers to face frames.
"""
return {fkey: self.frame(fkey) for fkey in self.faces()}
def frame(self, fkey):
"""Compute the frame of a specific face.
Parameters
----------
fkey : hashable
The identifier of the frame.
Returns
-------
frame
The frame of the specified face.
"""
xyz = self.face_coordinates(fkey)
o = self.face_center(fkey)
w = self.face_normal(fkey)
u = [xyz[1][i] - xyz[0][i] for i in range(3)] # align with longest edge instead?
v = cross_vectors(w, u)
uvw = normalize_vector(u), normalize_vector(v), normalize_vector(w)
return o, uvw
def top(self):
"""Identify the *top* face of the block.
Returns
-------
int
The identifier of the face.
"""
z = [0, 0, 1]
faces = list(self.faces())
normals = [self.face_norma(face) for face in faces]
return sorted(zip(faces, normals), key=lambda x: dot_vectors(x[1], z))[-1][0]
def center(self):
"""Compute the center of mass of the block.
Returns
-------
point
The center of mass of the block.
"""
vertices = [self.vertex_coordinates(key) for key in self.vertices()]
faces = [self.face_vertices(fkey) for fkey in self.faces()]
return centroid_polyhedron((vertices, faces))
def volume(self):
"""Compute the volume of the block.
Returns
-------
float
The volume of the block.
"""
vertices = [self.vertex_coordinates(key) for key in self.vertices()]
faces = [self.face_vertices(fkey) for fkey in self.faces()]
v = volume_polyhedron((vertices, faces))
return v
| 28.216216 | 89 | 0.588362 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import centroid_points
from compas.geometry import cross_vectors
from compas.geometry import dot_vectors
from compas.geometry import normalize_vector
from compas.geometry import centroid_polyhedron
from compas.geometry import volume_polyhedron
from compas.datastructures import Mesh
class Block(Mesh):
def __init__(self):
super(Block, self).__init__()
@classmethod
def from_polysurface(cls, guid):
from compas_rhino.geometry import RhinoSurface
surface = RhinoSurface.from_guid(guid)
return surface.to_compas(cls)
@classmethod
def from_rhinomesh(cls, guid):
from compas_rhino.geometry import RhinoMesh
mesh = RhinoMesh.from_guid(guid)
return mesh.to_compas(cls)
def centroid(self):
return centroid_points(
[self.vertex_coordinates(key) for key in self.vertices()])
def frames(self):
return {fkey: self.frame(fkey) for fkey in self.faces()}
def frame(self, fkey):
xyz = self.face_coordinates(fkey)
o = self.face_center(fkey)
w = self.face_normal(fkey)
u = [xyz[1][i] - xyz[0][i] for i in range(3)]
v = cross_vectors(w, u)
uvw = normalize_vector(u), normalize_vector(v), normalize_vector(w)
return o, uvw
def top(self):
z = [0, 0, 1]
faces = list(self.faces())
normals = [self.face_norma(face) for face in faces]
return sorted(zip(faces, normals), key=lambda x: dot_vectors(x[1], z))[-1][0]
def center(self):
vertices = [self.vertex_coordinates(key) for key in self.vertices()]
faces = [self.face_vertices(fkey) for fkey in self.faces()]
return centroid_polyhedron((vertices, faces))
def volume(self):
vertices = [self.vertex_coordinates(key) for key in self.vertices()]
faces = [self.face_vertices(fkey) for fkey in self.faces()]
v = volume_polyhedron((vertices, faces))
return v
| true | true |
1c2eabd376142cf807c1e8d6a65fe520113a747a | 2,247 | py | Python | rbapi/apps/api/models.py | Yariquezz/check_gov_ua | 89d9a4d66ad5a675f64c8172de9d343b1024b102 | [
"Apache-2.0"
] | null | null | null | rbapi/apps/api/models.py | Yariquezz/check_gov_ua | 89d9a4d66ad5a675f64c8172de9d343b1024b102 | [
"Apache-2.0"
] | 9 | 2020-04-04T12:38:15.000Z | 2021-10-02T09:56:42.000Z | rbapi/apps/api/models.py | Yariquezz/check_gov_ua | 89d9a4d66ad5a675f64c8172de9d343b1024b102 | [
"Apache-2.0"
] | null | null | null | from django.db import models
import uuid
def get_default_uuid():
return uuid.uuid4().hex
class BankInfo(models.Model):
tax_code = models.IntegerField(primary_key=True)
bank_name = models.CharField(max_length=200, default=None, null=True)
support_number_1 = models.CharField(max_length=13, default=None, null=True)
support_number_2 = models.CharField(max_length=13, default=None, null=True)
support_number_3 = models.CharField(max_length=13, default=None, null=True)
email = models.CharField(max_length=20, default=None, null=True)
website = models.CharField(max_length=20, default=None, null=True)
info = models.TextField(max_length=500, default=None, null=True)
signature_info = models.TextField(max_length=100, default=None, null=True)
signature_person = models.TextField(max_length=100, default=None, null=True)
sign = models.ImageField(null=True, blank=True)
logo = models.ImageField(null=True, blank=True)
class Meta:
ordering = [
'tax_code',
'bank_name'
]
def __str__(self):
return '{} {}'.format(self.bank_name, self.tax_code)
class RBAResponse(models.Model):
receipt_id = models.IntegerField(primary_key=True, unique=True)
sender = models.CharField(max_length=200, default=None, null=True)
recipient = models.CharField(max_length=200, default=None, null=True)
amount = models.IntegerField(null=True, blank=False)
date = models.DateTimeField()
description = models.CharField(max_length=500, default=None, null=True)
CURRENCY_CODES = [
(980, 'UAH'),
(840, 'USD'),
(978, 'EUR'),
]
currencyCode = models.IntegerField(default=None, null=True, choices=CURRENCY_CODES)
commissionRate = models.IntegerField(null=True, blank=False)
link_code = models.UUIDField(default=get_default_uuid(), editable=False)
sender_bank_tax_code = models.ForeignKey(
BankInfo,
default=None,
null=True,
related_name='sender_bank_tax_code',
on_delete=models.CASCADE
)
class Meta:
ordering = [
'receipt_id',
'date'
]
def __str__(self):
return 'receipt {}'.format(self.receipt_id)
| 34.569231 | 87 | 0.683133 | from django.db import models
import uuid
def get_default_uuid():
return uuid.uuid4().hex
class BankInfo(models.Model):
tax_code = models.IntegerField(primary_key=True)
bank_name = models.CharField(max_length=200, default=None, null=True)
support_number_1 = models.CharField(max_length=13, default=None, null=True)
support_number_2 = models.CharField(max_length=13, default=None, null=True)
support_number_3 = models.CharField(max_length=13, default=None, null=True)
email = models.CharField(max_length=20, default=None, null=True)
website = models.CharField(max_length=20, default=None, null=True)
info = models.TextField(max_length=500, default=None, null=True)
signature_info = models.TextField(max_length=100, default=None, null=True)
signature_person = models.TextField(max_length=100, default=None, null=True)
sign = models.ImageField(null=True, blank=True)
logo = models.ImageField(null=True, blank=True)
class Meta:
ordering = [
'tax_code',
'bank_name'
]
def __str__(self):
return '{} {}'.format(self.bank_name, self.tax_code)
class RBAResponse(models.Model):
receipt_id = models.IntegerField(primary_key=True, unique=True)
sender = models.CharField(max_length=200, default=None, null=True)
recipient = models.CharField(max_length=200, default=None, null=True)
amount = models.IntegerField(null=True, blank=False)
date = models.DateTimeField()
description = models.CharField(max_length=500, default=None, null=True)
CURRENCY_CODES = [
(980, 'UAH'),
(840, 'USD'),
(978, 'EUR'),
]
currencyCode = models.IntegerField(default=None, null=True, choices=CURRENCY_CODES)
commissionRate = models.IntegerField(null=True, blank=False)
link_code = models.UUIDField(default=get_default_uuid(), editable=False)
sender_bank_tax_code = models.ForeignKey(
BankInfo,
default=None,
null=True,
related_name='sender_bank_tax_code',
on_delete=models.CASCADE
)
class Meta:
ordering = [
'receipt_id',
'date'
]
def __str__(self):
return 'receipt {}'.format(self.receipt_id)
| true | true |
1c2eabda09f4342028ede80cfd64ae89c9792eb2 | 2,844 | py | Python | apps/backend/users/views.py | 12roshan12/Hotel-website | 81c9ca74ea1f080c7fffb7cc350a39ccb2f2596f | [
"MIT"
] | null | null | null | apps/backend/users/views.py | 12roshan12/Hotel-website | 81c9ca74ea1f080c7fffb7cc350a39ccb2f2596f | [
"MIT"
] | null | null | null | apps/backend/users/views.py | 12roshan12/Hotel-website | 81c9ca74ea1f080c7fffb7cc350a39ccb2f2596f | [
"MIT"
] | null | null | null | from django.urls import reverse
from django.shortcuts import render, redirect, HttpResponseRedirect
from django.contrib.messages.views import SuccessMessageMixin
from django.utils.translation import templatize
from django.views.generic import CreateView, ListView, DeleteView, UpdateView
from .forms import EnquiryForm, UserRegistrationForm, UserUpdateForm
from django.contrib.auth.views import LoginView, LogoutView
from .models import Events, User
from django.contrib import messages
from django.urls import reverse_lazy
# Create your views here.
class UserRegistrationField(SuccessMessageMixin, CreateView):
template_name = 'backend/user/register.html'
form_class = UserRegistrationForm
success_message = 'User Successfully Created'
success_url = reverse_lazy('backend.users:index')
def form_valid(self, form):
user = form.save(commit=False)
user_type = form.cleaned_data['user_types']
if user_type == 'is_buyer':
user.is_buyer = True
elif user_type == 'is_seller':
user.is_seller = True
user.save()
messages.success(self.request, self.success_message)
return redirect(self.success_url)
class UserLoginField(LoginView):
template_name = 'backend/user/login.html'
class UserLogoutField(LogoutView):
template_name = 'backend/user/login.html'
class IndexUserView(ListView):
template_name = 'backend/user/index.html'
model = User
context_object_name = 'users'
paginate_by = 20
def get_context_data(self, *, object_list=None, **kwargs):
context = super(IndexUserView, self).get_context_data(**kwargs)
context['users'] = User.object.all()
return context
class UpdateUserView(SuccessMessageMixin, UpdateView):
model = User
template_name = 'backend/user/register.html'
success_message = 'User Successfully Updated'
success_url = reverse_lazy()
form_class = UserUpdateForm
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(UpdateUserView, self).get(self, request, *args, **kwargs)
def get_success_url(self):
return reverse('backend.users:index')
class DeleteUserView(SuccessMessageMixin, DeleteView):
model = User
template_name = 'backend/layouts/partial/delete.html'
success_message = 'User Successfully deleted'
success_url = reverse_lazy('backend.users:index')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(DeleteUserView, self).get(self, request, *args, **kwargs)
| 32.689655 | 78 | 0.716245 | from django.urls import reverse
from django.shortcuts import render, redirect, HttpResponseRedirect
from django.contrib.messages.views import SuccessMessageMixin
from django.utils.translation import templatize
from django.views.generic import CreateView, ListView, DeleteView, UpdateView
from .forms import EnquiryForm, UserRegistrationForm, UserUpdateForm
from django.contrib.auth.views import LoginView, LogoutView
from .models import Events, User
from django.contrib import messages
from django.urls import reverse_lazy
class UserRegistrationField(SuccessMessageMixin, CreateView):
template_name = 'backend/user/register.html'
form_class = UserRegistrationForm
success_message = 'User Successfully Created'
success_url = reverse_lazy('backend.users:index')
def form_valid(self, form):
user = form.save(commit=False)
user_type = form.cleaned_data['user_types']
if user_type == 'is_buyer':
user.is_buyer = True
elif user_type == 'is_seller':
user.is_seller = True
user.save()
messages.success(self.request, self.success_message)
return redirect(self.success_url)
class UserLoginField(LoginView):
template_name = 'backend/user/login.html'
class UserLogoutField(LogoutView):
template_name = 'backend/user/login.html'
class IndexUserView(ListView):
template_name = 'backend/user/index.html'
model = User
context_object_name = 'users'
paginate_by = 20
def get_context_data(self, *, object_list=None, **kwargs):
context = super(IndexUserView, self).get_context_data(**kwargs)
context['users'] = User.object.all()
return context
class UpdateUserView(SuccessMessageMixin, UpdateView):
model = User
template_name = 'backend/user/register.html'
success_message = 'User Successfully Updated'
success_url = reverse_lazy()
form_class = UserUpdateForm
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(UpdateUserView, self).get(self, request, *args, **kwargs)
def get_success_url(self):
return reverse('backend.users:index')
class DeleteUserView(SuccessMessageMixin, DeleteView):
model = User
template_name = 'backend/layouts/partial/delete.html'
success_message = 'User Successfully deleted'
success_url = reverse_lazy('backend.users:index')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(DeleteUserView, self).get(self, request, *args, **kwargs)
| true | true |
1c2eabe68bf4698868e90a9f2d54a2c3d20e6bd8 | 12,464 | py | Python | nonebot/matcher.py | Lancercmd/nonebot2 | 59d49becf49d108442c7ca05e4f3e2fb98c3a972 | [
"MIT"
] | null | null | null | nonebot/matcher.py | Lancercmd/nonebot2 | 59d49becf49d108442c7ca05e4f3e2fb98c3a972 | [
"MIT"
] | null | null | null | nonebot/matcher.py | Lancercmd/nonebot2 | 59d49becf49d108442c7ca05e4f3e2fb98c3a972 | [
"MIT"
] | null | null | null | """
事件响应器
==========
该模块实现事件响应器的创建与运行,并提供一些快捷方法来帮助用户更好的与机器人进行 对话 。
"""
from nonebot.log import logger
import typing
import inspect
from functools import wraps
from datetime import datetime
from contextvars import ContextVar
from collections import defaultdict
from nonebot.rule import Rule
from nonebot.permission import Permission, USER
from nonebot.typing import Type, List, Dict, Union, Callable, Optional, NoReturn
from nonebot.typing import Bot, Event, Handler, Message, ArgsParser, MessageSegment
from nonebot.exception import PausedException, RejectedException, FinishedException
matchers: Dict[int, List[Type["Matcher"]]] = defaultdict(list)
"""
:类型: ``Dict[int, List[Type[Matcher]]]``
:说明: 用于存储当前所有的事件响应器
"""
current_bot: ContextVar = ContextVar("current_bot")
current_event: ContextVar = ContextVar("current_event")
class MatcherMeta(type):
def __repr__(self) -> str:
return (f"<Matcher from {self.module or 'unknow'}, " # type: ignore
f"type={self.type}, priority={self.priority}, " # type: ignore
f"temp={self.temp}>") # type: ignore
def __str__(self) -> str:
return repr(self)
class Matcher(metaclass=MatcherMeta):
"""事件响应器类"""
module: Optional[str] = None
"""
:类型: ``Optional[str]``
:说明: 事件响应器所在模块名称
"""
type: str = ""
"""
:类型: ``str``
:说明: 事件响应器类型
"""
rule: Rule = Rule()
"""
:类型: ``Rule``
:说明: 事件响应器匹配规则
"""
permission: Permission = Permission()
"""
:类型: ``Permission``
:说明: 事件响应器触发权限
"""
handlers: List[Handler] = []
"""
:类型: ``List[Handler]``
:说明: 事件响应器拥有的事件处理函数列表
"""
priority: int = 1
"""
:类型: ``int``
:说明: 事件响应器优先级
"""
block: bool = False
"""
:类型: ``bool``
:说明: 事件响应器是否阻止事件传播
"""
temp: bool = False
"""
:类型: ``bool``
:说明: 事件响应器是否为临时
"""
expire_time: Optional[datetime] = None
"""
:类型: ``Optional[datetime]``
:说明: 事件响应器过期时间点
"""
_default_state: dict = {}
"""
:类型: ``dict``
:说明: 事件响应器默认状态
"""
_default_parser: Optional[ArgsParser] = None
"""
:类型: ``Optional[ArgsParser]``
:说明: 事件响应器默认参数解析函数
"""
def __init__(self):
"""实例化 Matcher 以便运行"""
self.handlers = self.handlers.copy()
self.state = self._default_state.copy()
def __repr__(self) -> str:
return (f"<Matcher from {self.module or 'unknow'}, type={self.type}, "
f"priority={self.priority}, temp={self.temp}>")
def __str__(self) -> str:
return self.__repr__()
@classmethod
def new(cls,
type_: str = "",
rule: Optional[Rule] = None,
permission: Optional[Permission] = None,
handlers: Optional[List[Handler]] = None,
temp: bool = False,
priority: int = 1,
block: bool = False,
*,
module: Optional[str] = None,
default_state: Optional[dict] = None,
expire_time: Optional[datetime] = None) -> Type["Matcher"]:
"""
:说明:
创建一个新的事件响应器,并存储至 `matchers <#matchers>`_
:参数:
* ``type_: str``: 事件响应器类型,与 ``event.type`` 一致时触发,空字符串表示任意
* ``rule: Optional[Rule]``: 匹配规则
* ``permission: Optional[Permission]``: 权限
* ``handlers: Optional[List[Handler]]``: 事件处理函数列表
* ``temp: bool``: 是否为临时事件响应器,即触发一次后删除
* ``priority: int``: 响应优先级
* ``block: bool``: 是否阻止事件向更低优先级的响应器传播
* ``module: Optional[str]``: 事件响应器所在模块名称
* ``default_state: Optional[dict]``: 默认状态 ``state``
* ``expire_time: Optional[datetime]``: 事件响应器最终有效时间点,过时即被删除
:返回:
- ``Type[Matcher]``: 新的事件响应器类
"""
NewMatcher = type(
"Matcher", (Matcher,), {
"module": module,
"type": type_,
"rule": rule or Rule(),
"permission": permission or Permission(),
"handlers": handlers or [],
"temp": temp,
"expire_time": expire_time,
"priority": priority,
"block": block,
"_default_state": default_state or {}
})
matchers[priority].append(NewMatcher)
return NewMatcher
@classmethod
async def check_perm(cls, bot: Bot, event: Event) -> bool:
"""
:说明:
检查是否满足触发权限
:参数:
* ``bot: Bot``: Bot 对象
* ``event: Event``: 上报事件
:返回:
- ``bool``: 是否满足权限
"""
return await cls.permission(bot, event)
@classmethod
async def check_rule(cls, bot: Bot, event: Event, state: dict) -> bool:
"""
:说明:
检查是否满足匹配规则
:参数:
* ``bot: Bot``: Bot 对象
* ``event: Event``: 上报事件
* ``state: dict``: 当前状态
:返回:
- ``bool``: 是否满足匹配规则
"""
return (event.type == (cls.type or event.type) and
await cls.rule(bot, event, state))
@classmethod
def args_parser(cls, func: ArgsParser) -> ArgsParser:
"""
:说明:
装饰一个函数来更改当前事件响应器的默认参数解析函数
:参数:
* ``func: ArgsParser``: 参数解析函数
"""
cls._default_parser = func
return func
@classmethod
def handle(cls) -> Callable[[Handler], Handler]:
"""
:说明:
装饰一个函数来向事件响应器直接添加一个处理函数
:参数:
* 无
"""
def _decorator(func: Handler) -> Handler:
cls.handlers.append(func)
return func
return _decorator
@classmethod
def receive(cls) -> Callable[[Handler], Handler]:
"""
:说明:
装饰一个函数来指示 NoneBot 在接收用户新的一条消息后继续运行该函数
:参数:
* 无
"""
async def _receive(bot: Bot, event: Event, state: dict) -> NoReturn:
raise PausedException
if cls.handlers:
# 已有前置handlers则接受一条新的消息,否则视为接收初始消息
cls.handlers.append(_receive)
def _decorator(func: Handler) -> Handler:
if not cls.handlers or cls.handlers[-1] is not func:
cls.handlers.append(func)
return func
return _decorator
@classmethod
def got(
cls,
key: str,
prompt: Optional[Union[str, Message, MessageSegment]] = None,
args_parser: Optional[ArgsParser] = None
) -> Callable[[Handler], Handler]:
"""
:说明:
装饰一个函数来指示 NoneBot 当要获取的 ``key`` 不存在时接收用户新的一条消息并经过 ``ArgsParser`` 处理后再运行该函数,如果 ``key`` 已存在则直接继续运行
:参数:
* ``key: str``: 参数名
* ``prompt: Optional[Union[str, Message, MessageSegment]]``: 在参数不存在时向用户发送的消息
* ``args_parser: Optional[ArgsParser]``: 可选参数解析函数,空则使用默认解析函数
"""
async def _key_getter(bot: Bot, event: Event, state: dict):
state["_current_key"] = key
if key not in state:
if prompt:
await bot.send(event=event,
message=str(prompt).format(**state))
raise PausedException
else:
state["_skip_key"] = True
async def _key_parser(bot: Bot, event: Event, state: dict):
if key in state and state.get("_skip_key"):
del state["_skip_key"]
return
parser = args_parser or cls._default_parser
if parser:
await parser(bot, event, state)
else:
state[state["_current_key"]] = str(event.message)
cls.handlers.append(_key_getter)
cls.handlers.append(_key_parser)
def _decorator(func: Handler) -> Handler:
if not hasattr(cls.handlers[-1], "__wrapped__"):
parser = cls.handlers.pop()
@wraps(func)
async def wrapper(bot: Bot, event: Event, state: dict):
await parser(bot, event, state)
await func(bot, event, state)
if "_current_key" in state:
del state["_current_key"]
cls.handlers.append(wrapper)
return func
return _decorator
@classmethod
async def send(cls, message: Union[str, Message, MessageSegment], **kwargs):
"""
:说明:
发送一条消息给当前交互用户
:参数:
* ``message: Union[str, Message, MessageSegment]``: 消息内容
* ``**kwargs``: 其他传递给 ``bot.send`` 的参数,请参考对应 adapter 的 bot 对象 api
"""
bot = current_bot.get()
event = current_event.get()
await bot.send(event=event, message=message, **kwargs)
@classmethod
async def finish(cls,
message: Optional[Union[str, Message,
MessageSegment]] = None,
**kwargs) -> NoReturn:
"""
:说明:
发送一条消息给当前交互用户并结束当前事件响应器
:参数:
* ``message: Union[str, Message, MessageSegment]``: 消息内容
* ``**kwargs``: 其他传递给 ``bot.send`` 的参数,请参考对应 adapter 的 bot 对象 api
"""
bot = current_bot.get()
event = current_event.get()
if message:
await bot.send(event=event, message=message, **kwargs)
raise FinishedException
@classmethod
async def pause(cls,
prompt: Optional[Union[str, Message,
MessageSegment]] = None,
**kwargs) -> NoReturn:
"""
:说明:
发送一条消息给当前交互用户并暂停事件响应器,在接收用户新的一条消息后继续下一个处理函数
:参数:
* ``prompt: Union[str, Message, MessageSegment]``: 消息内容
* ``**kwargs``: 其他传递给 ``bot.send`` 的参数,请参考对应 adapter 的 bot 对象 api
"""
bot = current_bot.get()
event = current_event.get()
if prompt:
await bot.send(event=event, message=prompt, **kwargs)
raise PausedException
@classmethod
async def reject(cls,
prompt: Optional[Union[str, Message,
MessageSegment]] = None,
**kwargs) -> NoReturn:
"""
:说明:
发送一条消息给当前交互用户并暂停事件响应器,在接收用户新的一条消息后重新运行当前处理函数
:参数:
* ``prompt: Union[str, Message, MessageSegment]``: 消息内容
* ``**kwargs``: 其他传递给 ``bot.send`` 的参数,请参考对应 adapter 的 bot 对象 api
"""
bot = current_bot.get()
event = current_event.get()
if prompt:
await bot.send(event=event, message=prompt, **kwargs)
raise RejectedException
# 运行handlers
async def run(self, bot: Bot, event: Event, state: dict):
b_t = current_bot.set(bot)
e_t = current_event.set(event)
try:
# Refresh preprocess state
self.state.update(state)
for _ in range(len(self.handlers)):
handler = self.handlers.pop(0)
annotation = typing.get_type_hints(handler)
BotType = annotation.get("bot")
if BotType and inspect.isclass(BotType) and not isinstance(
bot, BotType):
continue
await handler(bot, event, self.state)
except RejectedException:
self.handlers.insert(0, handler) # type: ignore
Matcher.new(
self.type,
Rule(),
USER(event.user_id, perm=self.permission), # type:ignore
self.handlers,
temp=True,
priority=0,
block=True,
module=self.module,
default_state=self.state,
expire_time=datetime.now() + bot.config.session_expire_timeout)
except PausedException:
Matcher.new(
self.type,
Rule(),
USER(event.user_id, perm=self.permission), # type:ignore
self.handlers,
temp=True,
priority=0,
block=True,
module=self.module,
default_state=self.state,
expire_time=datetime.now() + bot.config.session_expire_timeout)
except FinishedException:
pass
finally:
logger.info(f"Matcher {self} running complete")
current_bot.reset(b_t)
current_event.reset(e_t)
| 27.575221 | 106 | 0.522866 |
from nonebot.log import logger
import typing
import inspect
from functools import wraps
from datetime import datetime
from contextvars import ContextVar
from collections import defaultdict
from nonebot.rule import Rule
from nonebot.permission import Permission, USER
from nonebot.typing import Type, List, Dict, Union, Callable, Optional, NoReturn
from nonebot.typing import Bot, Event, Handler, Message, ArgsParser, MessageSegment
from nonebot.exception import PausedException, RejectedException, FinishedException
matchers: Dict[int, List[Type["Matcher"]]] = defaultdict(list)
current_bot: ContextVar = ContextVar("current_bot")
current_event: ContextVar = ContextVar("current_event")
class MatcherMeta(type):
def __repr__(self) -> str:
return (f"<Matcher from {self.module or 'unknow'}, "
f"type={self.type}, priority={self.priority}, "
f"temp={self.temp}>")
def __str__(self) -> str:
return repr(self)
class Matcher(metaclass=MatcherMeta):
module: Optional[str] = None
type: str = ""
rule: Rule = Rule()
permission: Permission = Permission()
handlers: List[Handler] = []
priority: int = 1
block: bool = False
temp: bool = False
expire_time: Optional[datetime] = None
_default_state: dict = {}
_default_parser: Optional[ArgsParser] = None
def __init__(self):
self.handlers = self.handlers.copy()
self.state = self._default_state.copy()
def __repr__(self) -> str:
return (f"<Matcher from {self.module or 'unknow'}, type={self.type}, "
f"priority={self.priority}, temp={self.temp}>")
def __str__(self) -> str:
return self.__repr__()
@classmethod
def new(cls,
type_: str = "",
rule: Optional[Rule] = None,
permission: Optional[Permission] = None,
handlers: Optional[List[Handler]] = None,
temp: bool = False,
priority: int = 1,
block: bool = False,
*,
module: Optional[str] = None,
default_state: Optional[dict] = None,
expire_time: Optional[datetime] = None) -> Type["Matcher"]:
NewMatcher = type(
"Matcher", (Matcher,), {
"module": module,
"type": type_,
"rule": rule or Rule(),
"permission": permission or Permission(),
"handlers": handlers or [],
"temp": temp,
"expire_time": expire_time,
"priority": priority,
"block": block,
"_default_state": default_state or {}
})
matchers[priority].append(NewMatcher)
return NewMatcher
@classmethod
async def check_perm(cls, bot: Bot, event: Event) -> bool:
return await cls.permission(bot, event)
@classmethod
async def check_rule(cls, bot: Bot, event: Event, state: dict) -> bool:
return (event.type == (cls.type or event.type) and
await cls.rule(bot, event, state))
@classmethod
def args_parser(cls, func: ArgsParser) -> ArgsParser:
cls._default_parser = func
return func
@classmethod
def handle(cls) -> Callable[[Handler], Handler]:
def _decorator(func: Handler) -> Handler:
cls.handlers.append(func)
return func
return _decorator
@classmethod
def receive(cls) -> Callable[[Handler], Handler]:
async def _receive(bot: Bot, event: Event, state: dict) -> NoReturn:
raise PausedException
if cls.handlers:
cls.handlers.append(_receive)
def _decorator(func: Handler) -> Handler:
if not cls.handlers or cls.handlers[-1] is not func:
cls.handlers.append(func)
return func
return _decorator
@classmethod
def got(
cls,
key: str,
prompt: Optional[Union[str, Message, MessageSegment]] = None,
args_parser: Optional[ArgsParser] = None
) -> Callable[[Handler], Handler]:
async def _key_getter(bot: Bot, event: Event, state: dict):
state["_current_key"] = key
if key not in state:
if prompt:
await bot.send(event=event,
message=str(prompt).format(**state))
raise PausedException
else:
state["_skip_key"] = True
async def _key_parser(bot: Bot, event: Event, state: dict):
if key in state and state.get("_skip_key"):
del state["_skip_key"]
return
parser = args_parser or cls._default_parser
if parser:
await parser(bot, event, state)
else:
state[state["_current_key"]] = str(event.message)
cls.handlers.append(_key_getter)
cls.handlers.append(_key_parser)
def _decorator(func: Handler) -> Handler:
if not hasattr(cls.handlers[-1], "__wrapped__"):
parser = cls.handlers.pop()
@wraps(func)
async def wrapper(bot: Bot, event: Event, state: dict):
await parser(bot, event, state)
await func(bot, event, state)
if "_current_key" in state:
del state["_current_key"]
cls.handlers.append(wrapper)
return func
return _decorator
@classmethod
async def send(cls, message: Union[str, Message, MessageSegment], **kwargs):
bot = current_bot.get()
event = current_event.get()
await bot.send(event=event, message=message, **kwargs)
@classmethod
async def finish(cls,
message: Optional[Union[str, Message,
MessageSegment]] = None,
**kwargs) -> NoReturn:
bot = current_bot.get()
event = current_event.get()
if message:
await bot.send(event=event, message=message, **kwargs)
raise FinishedException
@classmethod
async def pause(cls,
prompt: Optional[Union[str, Message,
MessageSegment]] = None,
**kwargs) -> NoReturn:
bot = current_bot.get()
event = current_event.get()
if prompt:
await bot.send(event=event, message=prompt, **kwargs)
raise PausedException
@classmethod
async def reject(cls,
prompt: Optional[Union[str, Message,
MessageSegment]] = None,
**kwargs) -> NoReturn:
bot = current_bot.get()
event = current_event.get()
if prompt:
await bot.send(event=event, message=prompt, **kwargs)
raise RejectedException
async def run(self, bot: Bot, event: Event, state: dict):
b_t = current_bot.set(bot)
e_t = current_event.set(event)
try:
self.state.update(state)
for _ in range(len(self.handlers)):
handler = self.handlers.pop(0)
annotation = typing.get_type_hints(handler)
BotType = annotation.get("bot")
if BotType and inspect.isclass(BotType) and not isinstance(
bot, BotType):
continue
await handler(bot, event, self.state)
except RejectedException:
self.handlers.insert(0, handler)
Matcher.new(
self.type,
Rule(),
USER(event.user_id, perm=self.permission),
self.handlers,
temp=True,
priority=0,
block=True,
module=self.module,
default_state=self.state,
expire_time=datetime.now() + bot.config.session_expire_timeout)
except PausedException:
Matcher.new(
self.type,
Rule(),
USER(event.user_id, perm=self.permission),
self.handlers,
temp=True,
priority=0,
block=True,
module=self.module,
default_state=self.state,
expire_time=datetime.now() + bot.config.session_expire_timeout)
except FinishedException:
pass
finally:
logger.info(f"Matcher {self} running complete")
current_bot.reset(b_t)
current_event.reset(e_t)
| true | true |
1c2eacfafb6429adc60a79646324136bfdb1e1d0 | 786 | py | Python | var/spack/repos/builtin/packages/py-lit/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/py-lit/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/py-lit/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLit(PythonPackage):
"""lit is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. lit is
designed to be a lightweight testing tool with as simple a user
interface as possible."""
pypi = "lit/lit-0.5.0.tar.gz"
version('0.7.1', sha256='ecef2833aef7f411cb923dac109c7c9dcc7dbe7cafce0650c1e8d19c243d955f')
version('0.5.0', sha256='3ea4251e78ebeb2e07be2feb33243d1f8931d956efc96ccc2b0846ced212b58c')
depends_on('py-setuptools', type='build')
| 37.428571 | 96 | 0.749364 |
from spack import *
class PyLit(PythonPackage):
pypi = "lit/lit-0.5.0.tar.gz"
version('0.7.1', sha256='ecef2833aef7f411cb923dac109c7c9dcc7dbe7cafce0650c1e8d19c243d955f')
version('0.5.0', sha256='3ea4251e78ebeb2e07be2feb33243d1f8931d956efc96ccc2b0846ced212b58c')
depends_on('py-setuptools', type='build')
| true | true |
1c2ead19859c5bebfcacdc7ae5358b7e31a8fe09 | 2,097 | py | Python | is_ros_pkg/examples/client.py | viniciusbaltoe/is-ros-pkg | e1ec6d5aa40d82b9259e406a1d6d196402e7a246 | [
"MIT"
] | null | null | null | is_ros_pkg/examples/client.py | viniciusbaltoe/is-ros-pkg | e1ec6d5aa40d82b9259e406a1d6d196402e7a246 | [
"MIT"
] | null | null | null | is_ros_pkg/examples/client.py | viniciusbaltoe/is-ros-pkg | e1ec6d5aa40d82b9259e406a1d6d196402e7a246 | [
"MIT"
] | null | null | null | from is_wire.core import Channel, Subscription, Message
from google.protobuf.empty_pb2 import Empty
from is_msgs.common_pb2 import Position
import json
import socket
import time
if __name__ == "__main__":
# -------------------------- Options -------------------------
cont = True
config_file = '../etc/conf/config.json'
config = json.load(open(config_file, 'r'))
channel = Channel(config["broker_uri"])
robot_config = config["robot"]
subscription = Subscription(channel)
# ---------------------- Get Position ------------------------
topic = "ROSRobot.{}.GetPosition".format(robot_config["robot_id"])
print("Publishing to topic: {}".format(topic))
request = Empty()
channel.publish(
Message(content=request, reply_to=subscription),
topic=topic)
try:
reply = channel.consume(timeout=3.0)
unpacked_msg = reply.unpack(Position)
print('Position:\n', unpacked_msg)
except socket.timeout:
print('No reply to Get 1 :(')
# ---------------------- Set Goal Position -------------------
topic = "ROSRobot.{}.GoalPosition".format(robot_config["robot_id"])
print("Publishing to topic: {}".format(topic))
request = Position()
request.x = 1.5
request.y = -0.5
request.z = 0.0
channel.publish(
Message(content=request, reply_to=subscription),
topic=topic)
try:
reply = channel.consume()
unpacked_msg = reply.unpack(Empty)
print('RPC Status:', reply.status)
except socket.timeout:
print('No reply to Goal_Position :(')
# ---------------------- Get Position ------------------------
topic = "ROSRobot.{}.GetPosition".format(robot_config["robot_id"])
print("Publishing to topic: {}".format(topic))
request = Empty()
channel.publish(
Message(content=request, reply_to=subscription),
topic=topic)
try:
reply = channel.consume(timeout=3.0)
unpacked_msg = reply.unpack(Position)
print('Position:\n', unpacked_msg)
except socket.timeout:
print('No reply :(') | 33.822581 | 71 | 0.592752 | from is_wire.core import Channel, Subscription, Message
from google.protobuf.empty_pb2 import Empty
from is_msgs.common_pb2 import Position
import json
import socket
import time
if __name__ == "__main__":
cont = True
config_file = '../etc/conf/config.json'
config = json.load(open(config_file, 'r'))
channel = Channel(config["broker_uri"])
robot_config = config["robot"]
subscription = Subscription(channel)
topic = "ROSRobot.{}.GetPosition".format(robot_config["robot_id"])
print("Publishing to topic: {}".format(topic))
request = Empty()
channel.publish(
Message(content=request, reply_to=subscription),
topic=topic)
try:
reply = channel.consume(timeout=3.0)
unpacked_msg = reply.unpack(Position)
print('Position:\n', unpacked_msg)
except socket.timeout:
print('No reply to Get 1 :(')
topic = "ROSRobot.{}.GoalPosition".format(robot_config["robot_id"])
print("Publishing to topic: {}".format(topic))
request = Position()
request.x = 1.5
request.y = -0.5
request.z = 0.0
channel.publish(
Message(content=request, reply_to=subscription),
topic=topic)
try:
reply = channel.consume()
unpacked_msg = reply.unpack(Empty)
print('RPC Status:', reply.status)
except socket.timeout:
print('No reply to Goal_Position :(')
topic = "ROSRobot.{}.GetPosition".format(robot_config["robot_id"])
print("Publishing to topic: {}".format(topic))
request = Empty()
channel.publish(
Message(content=request, reply_to=subscription),
topic=topic)
try:
reply = channel.consume(timeout=3.0)
unpacked_msg = reply.unpack(Position)
print('Position:\n', unpacked_msg)
except socket.timeout:
print('No reply :(') | true | true |
1c2eadfcae187ab3eede7b128082e792f9694294 | 95 | py | Python | tests/__init__.py | rebornix/jedi-language-server | 13297ba1cc109c5f8c2e3b72a82ea79cc9711473 | [
"MIT"
] | 300 | 2019-08-20T14:00:37.000Z | 2022-03-31T00:10:25.000Z | tests/__init__.py | rebornix/jedi-language-server | 13297ba1cc109c5f8c2e3b72a82ea79cc9711473 | [
"MIT"
] | 136 | 2019-08-19T16:04:16.000Z | 2022-03-17T21:31:18.000Z | tests/__init__.py | rebornix/jedi-language-server | 13297ba1cc109c5f8c2e3b72a82ea79cc9711473 | [
"MIT"
] | 37 | 2020-05-03T14:20:51.000Z | 2022-03-23T06:12:22.000Z | """Testing entrypoint."""
import py
TEST_DATA = py.path.local(__file__) / ".." / "test_data"
| 15.833333 | 56 | 0.652632 |
import py
TEST_DATA = py.path.local(__file__) / ".." / "test_data"
| true | true |
1c2eaf20ba4549dae9aed8b51f3a802ab0cf1569 | 2,506 | py | Python | bigml/tests/inspect_model_steps.py | deven96/python | 46be8622fe58f004bdbd636a08a8904ef4134bcd | [
"Apache-2.0"
] | 1 | 2021-08-30T20:18:38.000Z | 2021-08-30T20:18:38.000Z | bigml/tests/inspect_model_steps.py | deven96/python | 46be8622fe58f004bdbd636a08a8904ef4134bcd | [
"Apache-2.0"
] | null | null | null | bigml/tests/inspect_model_steps.py | deven96/python | 46be8622fe58f004bdbd636a08a8904ef4134bcd | [
"Apache-2.0"
] | 1 | 2021-08-30T20:18:40.000Z | 2021-08-30T20:18:40.000Z | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012, 2015-2019 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os
from bigml.tests.world import res_filename
from world import world
from nose.tools import eq_
#@step(r'I translate the tree into IF-THEN rules$')
def i_translate_the_tree_into_IF_THEN_rules(step):
output = io.BytesIO()
world.local_model.rules(out=output)
world.output = output.getvalue()
#@step(r'I check data distribution with "(.*)" file$')
def i_check_the_data_distribution(step, file):
distribution = world.local_model.get_data_distribution()
distribution_str = ''
for bin_value, bin_instances in distribution:
distribution_str += "[%s,%s]\n" % (bin_value, bin_instances)
world.output = distribution_str.encode('utf-8')
i_check_if_the_output_is_like_expected_file(step, file)
#@step(r'I check the predictions distribution with "(.*)" file$')
def i_check_the_predictions_distribution(step, file):
predictions = world.local_model.get_prediction_distribution()
distribution_str = ''
for group, instances in predictions:
distribution_str += "[%s,%s]\n" % (group, instances)
world.output = distribution_str.encode('utf-8')
i_check_if_the_output_is_like_expected_file(step, file)
#@step(r'I check the model summary with "(.*)" file$')
def i_check_the_model_summary_with(step, file):
output = io.BytesIO()
world.local_model.summarize(out=output)
world.output = output.getvalue()
i_check_if_the_output_is_like_expected_file(step, file)
#@step(r'I check the output is like "(.*)" expected file')
def i_check_if_the_output_is_like_expected_file(step, expected_file):
file = open(res_filename(expected_file), "rb")
expected_content = file.read()
file.close()
eq_(world.output.strip(), expected_content.strip())
def update_content(filename, content):
with open(res_filename(filename), "w") as file_handler:
file_handler.write(content)
| 32.545455 | 75 | 0.735834 |
import io
import os
from bigml.tests.world import res_filename
from world import world
from nose.tools import eq_
def i_translate_the_tree_into_IF_THEN_rules(step):
output = io.BytesIO()
world.local_model.rules(out=output)
world.output = output.getvalue()
def i_check_the_data_distribution(step, file):
distribution = world.local_model.get_data_distribution()
distribution_str = ''
for bin_value, bin_instances in distribution:
distribution_str += "[%s,%s]\n" % (bin_value, bin_instances)
world.output = distribution_str.encode('utf-8')
i_check_if_the_output_is_like_expected_file(step, file)
def i_check_the_predictions_distribution(step, file):
predictions = world.local_model.get_prediction_distribution()
distribution_str = ''
for group, instances in predictions:
distribution_str += "[%s,%s]\n" % (group, instances)
world.output = distribution_str.encode('utf-8')
i_check_if_the_output_is_like_expected_file(step, file)
def i_check_the_model_summary_with(step, file):
output = io.BytesIO()
world.local_model.summarize(out=output)
world.output = output.getvalue()
i_check_if_the_output_is_like_expected_file(step, file)
def i_check_if_the_output_is_like_expected_file(step, expected_file):
file = open(res_filename(expected_file), "rb")
expected_content = file.read()
file.close()
eq_(world.output.strip(), expected_content.strip())
def update_content(filename, content):
with open(res_filename(filename), "w") as file_handler:
file_handler.write(content)
| true | true |
1c2eaf3facbffed299455ef3e1efd26f63e89301 | 591 | py | Python | runners/fastwsgi_server.py | timb-machine-mirrors/pcf | d697a531da8c4206a6d874e689312a359446f8da | [
"MIT"
] | null | null | null | runners/fastwsgi_server.py | timb-machine-mirrors/pcf | d697a531da8c4206a6d874e689312a359446f8da | [
"MIT"
] | null | null | null | runners/fastwsgi_server.py | timb-machine-mirrors/pcf | d697a531da8c4206a6d874e689312a359446f8da | [
"MIT"
] | null | null | null | import fastwsgi
import app
def start_server():
host, port, debug, ssl_context = app.config_prepare()
def requires_authorization(f):
@wraps(f)
def decorated(*args, **kwargs):
if config['security']['basic_auth'] == '0':
return f(*args, **kwargs)
auth = request.authorization
if not auth or not ok_user_and_password(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
fastwsgi.run(wsgi_app=app.application, host=host, port=port)
| 28.142857 | 82 | 0.605753 | import fastwsgi
import app
def start_server():
host, port, debug, ssl_context = app.config_prepare()
def requires_authorization(f):
@wraps(f)
def decorated(*args, **kwargs):
if config['security']['basic_auth'] == '0':
return f(*args, **kwargs)
auth = request.authorization
if not auth or not ok_user_and_password(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
fastwsgi.run(wsgi_app=app.application, host=host, port=port)
| true | true |
1c2eaf46a4ed2fe9166ba288f3f111543133d5b3 | 10,128 | py | Python | workbench/deals/models.py | yoshson/workbench | 701558cac3357cd82e4dc99f0fefed12ee81ddc5 | [
"MIT"
] | 15 | 2020-09-02T22:17:34.000Z | 2022-02-01T20:09:10.000Z | workbench/deals/models.py | yoshson/workbench | 701558cac3357cd82e4dc99f0fefed12ee81ddc5 | [
"MIT"
] | 18 | 2020-01-08T15:28:26.000Z | 2022-02-28T02:46:41.000Z | workbench/deals/models.py | yoshson/workbench | 701558cac3357cd82e4dc99f0fefed12ee81ddc5 | [
"MIT"
] | 8 | 2020-09-29T08:00:24.000Z | 2022-01-16T11:58:19.000Z | import datetime as dt
from functools import total_ordering
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from workbench.accounts.models import User
from workbench.contacts.models import Organization, Person
from workbench.tools.formats import Z2, currency, local_date_format
from workbench.tools.models import Model, MoneyField, SearchQuerySet
from workbench.tools.urls import model_urls
class NotArchivedQuerySet(models.QuerySet):
def active(self, include=None):
return self.filter(Q(is_archived=False) | Q(id=include))
class AttributeGroup(models.Model):
title = models.CharField(_("title"), max_length=200)
position = models.PositiveIntegerField(_("position"), default=0)
is_archived = models.BooleanField(_("is archived"), default=False)
is_required = models.BooleanField(_("is required"), default=True)
class Meta:
ordering = ("position", "id")
verbose_name = _("attribute group")
verbose_name_plural = _("attribute groups")
def __str__(self):
return self.title
class Attribute(models.Model):
group = models.ForeignKey(
AttributeGroup,
on_delete=models.CASCADE,
related_name="attributes",
verbose_name=_("attribute group"),
)
title = models.CharField(_("title"), max_length=200)
position = models.PositiveIntegerField(_("position"), default=0)
is_archived = models.BooleanField(_("is archived"), default=False)
objects = NotArchivedQuerySet.as_manager()
class Meta:
ordering = ("position", "id")
verbose_name = _("attribute")
verbose_name_plural = _("attributes")
def __str__(self):
return self.title
class ClosingType(models.Model):
title = models.CharField(_("title"), max_length=200)
represents_a_win = models.BooleanField(_("represents a win"), default=False)
position = models.PositiveIntegerField(_("position"), default=0)
class Meta:
ordering = ("position", "id")
verbose_name = _("closing type")
verbose_name_plural = _("closing types")
def __str__(self):
return self.title
class DealQuerySet(SearchQuerySet):
def maybe_actionable(self, *, user):
return self.filter(
Q(status=Deal.OPEN), Q(owned_by=user) | Q(owned_by__is_active=False)
).select_related("owned_by")
@model_urls
class Deal(Model):
OPEN = 10
ACCEPTED = 20
DECLINED = 30
STATUS_CHOICES = (
(OPEN, _("Open")),
(ACCEPTED, _("Accepted")),
(DECLINED, _("Declined")),
)
UNKNOWN = 10
NORMAL = 20
HIGH = 30
PROBABILITY_CHOICES = [
(UNKNOWN, _("unknown")),
(NORMAL, _("normal")),
(HIGH, _("high")),
]
customer = models.ForeignKey(
Organization, on_delete=models.PROTECT, verbose_name=_("customer")
)
contact = models.ForeignKey(
Person,
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name=_("contact"),
)
title = models.CharField(_("title"), max_length=200)
description = models.TextField(_("description"), blank=True)
owned_by = models.ForeignKey(
User, on_delete=models.PROTECT, verbose_name=_("contact person")
)
value = MoneyField(_("value"))
status = models.PositiveIntegerField(
_("status"), choices=STATUS_CHOICES, default=OPEN
)
probability = models.IntegerField(
_("probability"), choices=PROBABILITY_CHOICES, default=UNKNOWN
)
decision_expected_on = models.DateField(
_("decision expected on"), blank=True, null=True
)
created_at = models.DateTimeField(_("created at"), default=timezone.now)
attributes = models.ManyToManyField(
Attribute,
verbose_name=_("attributes"),
through="DealAttribute",
)
closed_on = models.DateField(_("closed on"), blank=True, null=True)
closing_type = models.ForeignKey(
ClosingType,
on_delete=models.PROTECT,
blank=True,
null=True,
verbose_name=_("closing type"),
)
closing_notice = models.TextField(_("closing notice"), blank=True)
_fts = models.TextField(editable=False, blank=True)
related_offers = models.ManyToManyField(
"offers.Offer",
blank=True,
related_name="deals",
verbose_name=_("related offers"),
)
contributors = models.ManyToManyField(
User,
verbose_name=_("contributors"),
related_name="+",
through="Contribution",
help_text=_(
"The value of the deal will be distributed among all"
" contributors in the accepted deals report."
),
)
objects = DealQuerySet.as_manager()
class Meta:
ordering = ["-pk"]
verbose_name = _("deal")
verbose_name_plural = _("deals")
def __str__(self):
return f"{self.code} {self.title} - {self.owned_by.get_short_name()}"
def __html__(self):
return format_html(
"<small>{}</small> {} - {}",
self.code,
self.title,
self.owned_by.get_short_name(),
)
def get_related_offers(self):
return self.related_offers.select_related("owned_by", "project")
def save(self, *args, **kwargs):
skip_value_calculation = kwargs.pop("skip_value_calculation", False)
if not skip_value_calculation:
self.value = sum((v.value for v in self.values.all()), Z2)
self._fts = " ".join(
str(part)
for part in [
self.code,
self.customer.name,
self.contact.full_name if self.contact else "",
]
)
super().save(*args, **kwargs)
save.alters_data = True
@property
def pretty_status(self):
d = {
"created_at": local_date_format(self.created_at.date()),
"closed_on": self.closed_on and local_date_format(self.closed_on),
"decision_expected_on": self.decision_expected_on
and local_date_format(self.decision_expected_on),
"status": self.get_status_display(),
}
if self.status != self.OPEN:
return _("%(status)s on %(closed_on)s") % d
if self.decision_expected_on:
return _("Decision expected on %(decision_expected_on)s") % d
return _("Open since %(created_at)s") % d
@property
def status_badge(self):
if self.status != self.OPEN:
css = {self.ACCEPTED: "success", self.DECLINED: "danger"}[self.status]
elif self.decision_expected_on:
css = "warning" if self.decision_expected_on < dt.date.today() else "info"
else:
open_since = (dt.date.today() - self.created_at.date()).days
if (
open_since
> {self.UNKNOWN: 90, self.NORMAL: 45, self.HIGH: 20}[self.probability]
):
css = "caveat"
else:
css = "info"
return format_html(
'<span class="badge badge-{}">{}</span>', css, self.pretty_status
)
@property
def pretty_closing_type(self):
return self.closing_type or _("<closing type missing>")
@property
def all_contributions(self):
contributors = {}
for contribution in self.contributions.all():
contributors[contribution.user] = contribution.weight
total = sum(contributors.values(), 0)
return sorted(
(
{"user": user, "value": self.value * weight / total}
for user, weight in contributors.items()
),
key=lambda row: row["value"],
reverse=True,
)
class DealAttribute(models.Model):
deal = models.ForeignKey(Deal, on_delete=models.CASCADE, verbose_name=_("deal"))
attribute = models.ForeignKey(
Attribute, on_delete=models.PROTECT, verbose_name=_("attribute")
)
class Meta:
verbose_name = _("deal attribute")
verbose_name_plural = _("deal attributes")
def __str__(self):
return f"{self.deal} - {self.attribute}"
@total_ordering
class ValueType(models.Model):
title = models.CharField(_("title"), max_length=200)
position = models.PositiveIntegerField(_("position"), default=0)
is_archived = models.BooleanField(_("is archived"), default=False)
weekly_target = MoneyField(_("weekly target"), blank=True, null=True)
class Meta:
ordering = ("position", "id")
verbose_name = _("value type")
verbose_name_plural = _("value types")
def __str__(self):
return self.title
def __lt__(self, other):
return (
(self.position, -self.pk) < (other.position, -other.pk)
if isinstance(other, self.__class__)
else 1
)
class Value(models.Model):
deal = models.ForeignKey(
Deal, on_delete=models.CASCADE, related_name="values", verbose_name=_("deal")
)
type = models.ForeignKey(
ValueType, on_delete=models.PROTECT, verbose_name=_("type")
)
value = MoneyField(_("value"))
class Meta:
ordering = ["type"]
unique_together = [("deal", "type")]
verbose_name = _("value")
verbose_name_plural = _("values")
def __str__(self):
return currency(self.value)
class Contribution(models.Model):
deal = models.ForeignKey(
Deal,
on_delete=models.CASCADE,
verbose_name=_("deal"),
related_name="contributions",
)
user = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name=_("user"))
weight = models.SmallIntegerField(_("weight"), default=100)
class Meta:
ordering = ["-weight"]
unique_together = [("deal", "user")]
verbose_name = _("contribution")
verbose_name_plural = _("contributions")
def __str__(self):
return f"{self.user}: {self.weight}"
| 30.053412 | 86 | 0.61957 | import datetime as dt
from functools import total_ordering
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from workbench.accounts.models import User
from workbench.contacts.models import Organization, Person
from workbench.tools.formats import Z2, currency, local_date_format
from workbench.tools.models import Model, MoneyField, SearchQuerySet
from workbench.tools.urls import model_urls
class NotArchivedQuerySet(models.QuerySet):
def active(self, include=None):
return self.filter(Q(is_archived=False) | Q(id=include))
class AttributeGroup(models.Model):
title = models.CharField(_("title"), max_length=200)
position = models.PositiveIntegerField(_("position"), default=0)
is_archived = models.BooleanField(_("is archived"), default=False)
is_required = models.BooleanField(_("is required"), default=True)
class Meta:
ordering = ("position", "id")
verbose_name = _("attribute group")
verbose_name_plural = _("attribute groups")
def __str__(self):
return self.title
class Attribute(models.Model):
group = models.ForeignKey(
AttributeGroup,
on_delete=models.CASCADE,
related_name="attributes",
verbose_name=_("attribute group"),
)
title = models.CharField(_("title"), max_length=200)
position = models.PositiveIntegerField(_("position"), default=0)
is_archived = models.BooleanField(_("is archived"), default=False)
objects = NotArchivedQuerySet.as_manager()
class Meta:
ordering = ("position", "id")
verbose_name = _("attribute")
verbose_name_plural = _("attributes")
def __str__(self):
return self.title
class ClosingType(models.Model):
title = models.CharField(_("title"), max_length=200)
represents_a_win = models.BooleanField(_("represents a win"), default=False)
position = models.PositiveIntegerField(_("position"), default=0)
class Meta:
ordering = ("position", "id")
verbose_name = _("closing type")
verbose_name_plural = _("closing types")
def __str__(self):
return self.title
class DealQuerySet(SearchQuerySet):
def maybe_actionable(self, *, user):
return self.filter(
Q(status=Deal.OPEN), Q(owned_by=user) | Q(owned_by__is_active=False)
).select_related("owned_by")
@model_urls
class Deal(Model):
OPEN = 10
ACCEPTED = 20
DECLINED = 30
STATUS_CHOICES = (
(OPEN, _("Open")),
(ACCEPTED, _("Accepted")),
(DECLINED, _("Declined")),
)
UNKNOWN = 10
NORMAL = 20
HIGH = 30
PROBABILITY_CHOICES = [
(UNKNOWN, _("unknown")),
(NORMAL, _("normal")),
(HIGH, _("high")),
]
customer = models.ForeignKey(
Organization, on_delete=models.PROTECT, verbose_name=_("customer")
)
contact = models.ForeignKey(
Person,
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name=_("contact"),
)
title = models.CharField(_("title"), max_length=200)
description = models.TextField(_("description"), blank=True)
owned_by = models.ForeignKey(
User, on_delete=models.PROTECT, verbose_name=_("contact person")
)
value = MoneyField(_("value"))
status = models.PositiveIntegerField(
_("status"), choices=STATUS_CHOICES, default=OPEN
)
probability = models.IntegerField(
_("probability"), choices=PROBABILITY_CHOICES, default=UNKNOWN
)
decision_expected_on = models.DateField(
_("decision expected on"), blank=True, null=True
)
created_at = models.DateTimeField(_("created at"), default=timezone.now)
attributes = models.ManyToManyField(
Attribute,
verbose_name=_("attributes"),
through="DealAttribute",
)
closed_on = models.DateField(_("closed on"), blank=True, null=True)
closing_type = models.ForeignKey(
ClosingType,
on_delete=models.PROTECT,
blank=True,
null=True,
verbose_name=_("closing type"),
)
closing_notice = models.TextField(_("closing notice"), blank=True)
_fts = models.TextField(editable=False, blank=True)
related_offers = models.ManyToManyField(
"offers.Offer",
blank=True,
related_name="deals",
verbose_name=_("related offers"),
)
contributors = models.ManyToManyField(
User,
verbose_name=_("contributors"),
related_name="+",
through="Contribution",
help_text=_(
"The value of the deal will be distributed among all"
" contributors in the accepted deals report."
),
)
objects = DealQuerySet.as_manager()
class Meta:
ordering = ["-pk"]
verbose_name = _("deal")
verbose_name_plural = _("deals")
def __str__(self):
return f"{self.code} {self.title} - {self.owned_by.get_short_name()}"
def __html__(self):
return format_html(
"<small>{}</small> {} - {}",
self.code,
self.title,
self.owned_by.get_short_name(),
)
def get_related_offers(self):
return self.related_offers.select_related("owned_by", "project")
def save(self, *args, **kwargs):
skip_value_calculation = kwargs.pop("skip_value_calculation", False)
if not skip_value_calculation:
self.value = sum((v.value for v in self.values.all()), Z2)
self._fts = " ".join(
str(part)
for part in [
self.code,
self.customer.name,
self.contact.full_name if self.contact else "",
]
)
super().save(*args, **kwargs)
save.alters_data = True
@property
def pretty_status(self):
d = {
"created_at": local_date_format(self.created_at.date()),
"closed_on": self.closed_on and local_date_format(self.closed_on),
"decision_expected_on": self.decision_expected_on
and local_date_format(self.decision_expected_on),
"status": self.get_status_display(),
}
if self.status != self.OPEN:
return _("%(status)s on %(closed_on)s") % d
if self.decision_expected_on:
return _("Decision expected on %(decision_expected_on)s") % d
return _("Open since %(created_at)s") % d
@property
def status_badge(self):
if self.status != self.OPEN:
css = {self.ACCEPTED: "success", self.DECLINED: "danger"}[self.status]
elif self.decision_expected_on:
css = "warning" if self.decision_expected_on < dt.date.today() else "info"
else:
open_since = (dt.date.today() - self.created_at.date()).days
if (
open_since
> {self.UNKNOWN: 90, self.NORMAL: 45, self.HIGH: 20}[self.probability]
):
css = "caveat"
else:
css = "info"
return format_html(
'<span class="badge badge-{}">{}</span>', css, self.pretty_status
)
@property
def pretty_closing_type(self):
return self.closing_type or _("<closing type missing>")
@property
def all_contributions(self):
contributors = {}
for contribution in self.contributions.all():
contributors[contribution.user] = contribution.weight
total = sum(contributors.values(), 0)
return sorted(
(
{"user": user, "value": self.value * weight / total}
for user, weight in contributors.items()
),
key=lambda row: row["value"],
reverse=True,
)
class DealAttribute(models.Model):
deal = models.ForeignKey(Deal, on_delete=models.CASCADE, verbose_name=_("deal"))
attribute = models.ForeignKey(
Attribute, on_delete=models.PROTECT, verbose_name=_("attribute")
)
class Meta:
verbose_name = _("deal attribute")
verbose_name_plural = _("deal attributes")
def __str__(self):
return f"{self.deal} - {self.attribute}"
@total_ordering
class ValueType(models.Model):
title = models.CharField(_("title"), max_length=200)
position = models.PositiveIntegerField(_("position"), default=0)
is_archived = models.BooleanField(_("is archived"), default=False)
weekly_target = MoneyField(_("weekly target"), blank=True, null=True)
class Meta:
ordering = ("position", "id")
verbose_name = _("value type")
verbose_name_plural = _("value types")
def __str__(self):
return self.title
def __lt__(self, other):
return (
(self.position, -self.pk) < (other.position, -other.pk)
if isinstance(other, self.__class__)
else 1
)
class Value(models.Model):
deal = models.ForeignKey(
Deal, on_delete=models.CASCADE, related_name="values", verbose_name=_("deal")
)
type = models.ForeignKey(
ValueType, on_delete=models.PROTECT, verbose_name=_("type")
)
value = MoneyField(_("value"))
class Meta:
ordering = ["type"]
unique_together = [("deal", "type")]
verbose_name = _("value")
verbose_name_plural = _("values")
def __str__(self):
return currency(self.value)
class Contribution(models.Model):
deal = models.ForeignKey(
Deal,
on_delete=models.CASCADE,
verbose_name=_("deal"),
related_name="contributions",
)
user = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name=_("user"))
weight = models.SmallIntegerField(_("weight"), default=100)
class Meta:
ordering = ["-weight"]
unique_together = [("deal", "user")]
verbose_name = _("contribution")
verbose_name_plural = _("contributions")
def __str__(self):
return f"{self.user}: {self.weight}"
| true | true |
1c2eb04a1a9b3067e99eeca4aaa9170032411906 | 6,912 | py | Python | pyastar/astar.py | julesy89/pyastar | 8f063d55c9f88e1d1f3c6a08b652038abf33b90c | [
"Apache-2.0"
] | 3 | 2020-02-10T16:33:31.000Z | 2022-01-30T06:09:46.000Z | pyastar/astar.py | julesy89/pyastar | 8f063d55c9f88e1d1f3c6a08b652038abf33b90c | [
"Apache-2.0"
] | null | null | null | pyastar/astar.py | julesy89/pyastar | 8f063d55c9f88e1d1f3c6a08b652038abf33b90c | [
"Apache-2.0"
] | null | null | null | from pyastar.problem import Problem
from pyastar.util.node import NodeFactory
from pyastar.util.pq import PriorityQueueSet
class AStar:
def __init__(self,
problem,
heuristic_is_inconsistent=False,
open_set_max_size=None,
open_set_truncate_size=None,
verbose=False,
):
# either the user provides the problem or directly a node factory (allows more customization)
if isinstance(problem, Problem):
self.factory = NodeFactory(problem)
else:
self.factory = problem
# whether printout in each iteration is desired
self.verbose = verbose
# the optimal path to be returned
self.opt = None
# the current node that is processed
self.node = None
# initialize the open set as custom priority queue (implemented as a set)
self.open_set = PriorityQueueSet()
# the maximum size of the open set - if it is not None what is the size it should be truncated to
self.open_set_max_size = open_set_max_size
self.open_set_truncate_size = open_set_truncate_size
# keep track of all nodes that are already closed
self.closed_set = {}
# assuming the heuristic is inconsistent we change the behavior slightly
self.heuristic_is_inconsistent = heuristic_is_inconsistent
if self.heuristic_is_inconsistent:
self.pop = self.pop_if_inconsistent
self.skip = self.skip_if_inconsistent
def find(self, **kwargs):
self.initialize()
while self.has_next():
self.next()
return self.result(**kwargs)
def initialize(self):
self.add(self.factory.create())
return self
def next(self):
# retrieve the first node and remove it
self.pop()
# if pop was successful
if self.node is not None:
# if the verbose flag is set to true, print some information about the current status
if self.verbose:
self.info()
# actually process the node
self.process()
# does the truncation of the open queue if it is enabled - otherwise nothing happens
self.truncate_if_necessary()
def process(self):
# get access to the current node directly
node = self.node
# if the verbose flag is set to true, print some information about the current status
if self.verbose:
self.info()
# mark this key to be visited by adding it to the closed set
self.closed_set[node.key] = node
# if the goal has been found - we know it is optimal if the heuristic is admissible
if node.is_goal():
self.goal_found()
else:
for neighbor in node.get_neighbors():
neighbor = self.factory.create(neighbor)
neighbor.set_previous(node)
# if the node is not supposed to be skipped
if not self.skip(neighbor):
self.add(neighbor)
def add(self, node):
# if we have been there - remove it from the priority queue because we have found a better one
self.open_set.remove(node.key)
# create a new entry containing the necessary information
node.calc_g_h_f()
# finally add to the open set
self.open_set.push(node.key, node.f, node)
return True
def has_next(self):
# if there are no nodes to process the algorithm always terminates
if self.open_set.empty():
return False
else:
return self.opt is None
def goal_found(self):
self.opt = {"path": reconstruct_path(self.node), "node": self.node, "costs": self.node.g}
def pop(self):
self.node = self.open_set.pop()
def truncate_if_necessary(self):
if self.open_set_max_size is not None and self.open_set.size() > self.open_set_max_size:
if self.open_set_truncate_size is None:
raise Exception("Please set open_set_truncate_size if you have enabled a maximum size!")
else:
return self.truncate()
def truncate(self):
if self.open_set_max_size is not None and self.open_set.size() > self.open_set_max_size:
_node = self.node
pq = PriorityQueueSet()
while self.open_set.size() > 0 and pq.size() < self.open_set_truncate_size:
self.pop()
n = self.node
pq.push(n.key, n.f, n)
self.open_set = pq
self.node = _node
def skip(self, node):
if node.key in self.closed_set:
return True
else:
if not self.open_set.contains(node.key):
return False
else:
node.calc_g()
node_in_open_set = self.open_set.get(node.key)
return node_in_open_set.g <= node.g
def skip_if_inconsistent(self, node):
if node.key in self.closed_set:
node.calc_g()
node_in_closed_set = self.closed_set[node.key]
return node_in_closed_set.g <= node.g
else:
if not self.open_set.contains(node.key):
return False
else:
node.calc_g()
node_in_open_set = self.open_set.get(node.key)
return node_in_open_set.g <= node.g
def pop_if_inconsistent(self):
self.node = None
while not self.open_set.empty():
node = self.open_set.pop()
if not self.skip_if_inconsistent(node):
self.node = node
break
def result(self, **kwargs):
return extract(self.opt, **kwargs)
def info(self):
from copy import deepcopy
_node, _open = self.node, deepcopy(self.open_set)
print("CURRENT")
print(_node.key, "->", _node.__dict__)
print()
print("OPEN SET")
for k in range(min(10, _open.size())):
entry = _open.pop()
print(entry.key, "->", entry.__dict__)
print()
print("-----------------------------------------------------------------")
def extract(opt, return_path=True, return_costs=True, return_node=False):
ret = []
if return_path:
ret.append(opt["path"])
if return_costs:
ret.append(opt["costs"])
if return_node:
ret.append(opt["node"])
return tuple(ret)
def reconstruct_path(node):
path = []
while node.prev is not None:
path.append(node.key)
node = node.prev
path.append(node.key)
return tuple(path[::-1])
def evaluate_path(problem, path):
costs = 0
for k in range(len(path) - 1):
costs += problem.get_costs(path[k], path[k + 1])
return costs
| 31.135135 | 105 | 0.584346 | from pyastar.problem import Problem
from pyastar.util.node import NodeFactory
from pyastar.util.pq import PriorityQueueSet
class AStar:
def __init__(self,
problem,
heuristic_is_inconsistent=False,
open_set_max_size=None,
open_set_truncate_size=None,
verbose=False,
):
if isinstance(problem, Problem):
self.factory = NodeFactory(problem)
else:
self.factory = problem
self.verbose = verbose
self.opt = None
self.node = None
self.open_set = PriorityQueueSet()
self.open_set_max_size = open_set_max_size
self.open_set_truncate_size = open_set_truncate_size
self.closed_set = {}
self.heuristic_is_inconsistent = heuristic_is_inconsistent
if self.heuristic_is_inconsistent:
self.pop = self.pop_if_inconsistent
self.skip = self.skip_if_inconsistent
def find(self, **kwargs):
self.initialize()
while self.has_next():
self.next()
return self.result(**kwargs)
def initialize(self):
self.add(self.factory.create())
return self
def next(self):
self.pop()
if self.node is not None:
if self.verbose:
self.info()
self.process()
self.truncate_if_necessary()
def process(self):
node = self.node
if self.verbose:
self.info()
self.closed_set[node.key] = node
if node.is_goal():
self.goal_found()
else:
for neighbor in node.get_neighbors():
neighbor = self.factory.create(neighbor)
neighbor.set_previous(node)
if not self.skip(neighbor):
self.add(neighbor)
def add(self, node):
self.open_set.remove(node.key)
node.calc_g_h_f()
self.open_set.push(node.key, node.f, node)
return True
def has_next(self):
if self.open_set.empty():
return False
else:
return self.opt is None
def goal_found(self):
self.opt = {"path": reconstruct_path(self.node), "node": self.node, "costs": self.node.g}
def pop(self):
self.node = self.open_set.pop()
def truncate_if_necessary(self):
if self.open_set_max_size is not None and self.open_set.size() > self.open_set_max_size:
if self.open_set_truncate_size is None:
raise Exception("Please set open_set_truncate_size if you have enabled a maximum size!")
else:
return self.truncate()
def truncate(self):
if self.open_set_max_size is not None and self.open_set.size() > self.open_set_max_size:
_node = self.node
pq = PriorityQueueSet()
while self.open_set.size() > 0 and pq.size() < self.open_set_truncate_size:
self.pop()
n = self.node
pq.push(n.key, n.f, n)
self.open_set = pq
self.node = _node
def skip(self, node):
if node.key in self.closed_set:
return True
else:
if not self.open_set.contains(node.key):
return False
else:
node.calc_g()
node_in_open_set = self.open_set.get(node.key)
return node_in_open_set.g <= node.g
def skip_if_inconsistent(self, node):
if node.key in self.closed_set:
node.calc_g()
node_in_closed_set = self.closed_set[node.key]
return node_in_closed_set.g <= node.g
else:
if not self.open_set.contains(node.key):
return False
else:
node.calc_g()
node_in_open_set = self.open_set.get(node.key)
return node_in_open_set.g <= node.g
def pop_if_inconsistent(self):
self.node = None
while not self.open_set.empty():
node = self.open_set.pop()
if not self.skip_if_inconsistent(node):
self.node = node
break
def result(self, **kwargs):
return extract(self.opt, **kwargs)
def info(self):
from copy import deepcopy
_node, _open = self.node, deepcopy(self.open_set)
print("CURRENT")
print(_node.key, "->", _node.__dict__)
print()
print("OPEN SET")
for k in range(min(10, _open.size())):
entry = _open.pop()
print(entry.key, "->", entry.__dict__)
print()
print("-----------------------------------------------------------------")
def extract(opt, return_path=True, return_costs=True, return_node=False):
ret = []
if return_path:
ret.append(opt["path"])
if return_costs:
ret.append(opt["costs"])
if return_node:
ret.append(opt["node"])
return tuple(ret)
def reconstruct_path(node):
path = []
while node.prev is not None:
path.append(node.key)
node = node.prev
path.append(node.key)
return tuple(path[::-1])
def evaluate_path(problem, path):
costs = 0
for k in range(len(path) - 1):
costs += problem.get_costs(path[k], path[k + 1])
return costs
| true | true |
1c2eb1241d77c82f23ac9ba30d8c63a85cb425c1 | 4,431 | py | Python | source/runbooks/ssm-py-scripts/start_secondary_ssm_automation_aurora_cluster.py | elduds/aws-instance-scheduler | 4b4c8a628fd0ab34e7d5c17af215f0bf10e48bd8 | [
"Apache-2.0"
] | 338 | 2018-02-09T17:24:26.000Z | 2021-10-06T01:33:52.000Z | source/runbooks/ssm-py-scripts/start_secondary_ssm_automation_aurora_cluster.py | elduds/aws-instance-scheduler | 4b4c8a628fd0ab34e7d5c17af215f0bf10e48bd8 | [
"Apache-2.0"
] | 267 | 2018-02-09T10:45:25.000Z | 2021-10-15T14:47:34.000Z | source/runbooks/ssm-py-scripts/start_secondary_ssm_automation_aurora_cluster.py | elduds/aws-instance-scheduler | 4b4c8a628fd0ab34e7d5c17af215f0bf10e48bd8 | [
"Apache-2.0"
] | 193 | 2018-02-14T08:25:30.000Z | 2021-10-06T14:59:14.000Z | ######################################################################################################################
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
import logging
from botocore.config import Config
def start_rds_secondary_ssm_automation(event, context) -> list:
"""
start new SSM automation document to start or stop RDS clusters.
Parameters:
event (dict): event from SSM automation
example:
{
"RdsResourceArns": [
"arn:aws:rds:us-east-1:account:cluster:cluster-3d3r",
"arn:aws:rds:us-east-1:account:cluster:cluster-1",
"arn:aws:rds:us-east-1:account:cluster:cluster-2",
"arn:aws:rds:us-east-1:account:cluster:cluster-a1df"
],
"Action": "start"|"stop"
"Region": "us-east-1",
"SecondaryAutomationName": "secondary-automation-name",
"SolutionId": "SO0030",
"SolutionVersion": "v2.0.0",
"ExecutionRoleArn": "arn:aws:iam::account:role/role-name"
}
context (dict): context object
Returns:
list : list of secondary execution ids
"""
logging.debug(context)
execution_ids = []
# handle empty arn list
if not event.get('RdsResourceArns'):
return execution_ids
ssm_client = get_client(event)
resource_list = get_resource_list(event.get('RdsResourceArns'))
for rds_cluster_name in resource_list:
execution_id = ssm_client.start_automation_execution(
# Launch SSM Doc via Automation
DocumentName=event.get('SecondaryAutomationName'),
Parameters={
"ClusterName": [
rds_cluster_name
],
"Action": [
event.get('Action')
],
"AutomationAssumeRole": [
event.get('ExecutionRoleArn')
]
}
)['AutomationExecutionId']
execution_ids.append(execution_id)
return execution_ids
def get_client(event):
"""Create Boto3 client for SSM service"""
boto_config = Config(
user_agent_extra=f'AwsSolution/{event.get("SolutionId")}/'
f'{event.get("SolutionVersion")}',
retries={
'mode': 'standard'
}
)
return boto3.client('ssm',
region_name=event.get('Region'),
config=boto_config)
def get_resource_list(arn_list: list) -> list:
""" Parse the arn list and return rds db or cluster ids"""
cluster_separator = ":cluster:"
cluster_prefix = "cluster-"
logging.info("Found RDS Cluster list.")
cluster_ids = split_arn_list(arn_list, cluster_separator)
# filtering cluster ids for duplicate cluster ids
return [x for x in cluster_ids if not x.startswith(cluster_prefix)]
def split_arn_list(arn_list, separator):
resource_ids = []
for arn in arn_list:
resource_id = arn.split(separator)[1]
resource_ids.append(resource_id)
return resource_ids
| 41.027778 | 118 | 0.488152 | true | true | |
1c2eb12c6402438db405004fbe1816c60fc14302 | 969 | py | Python | MSKHackMAG/api.py | kelbin/MCHProject2021Mosru | 3acf7bef228da7a42a1f2ae42562d4ac98a53acb | [
"MIT"
] | null | null | null | MSKHackMAG/api.py | kelbin/MCHProject2021Mosru | 3acf7bef228da7a42a1f2ae42562d4ac98a53acb | [
"MIT"
] | null | null | null | MSKHackMAG/api.py | kelbin/MCHProject2021Mosru | 3acf7bef228da7a42a1f2ae42562d4ac98a53acb | [
"MIT"
] | 1 | 2021-06-13T23:33:51.000Z | 2021-06-13T23:33:51.000Z | from afisha import get_afisha
from analytic import write_like, calculate_recomendation
from result import Result
import json
def get_feed():
r = get_afisha()
return r
def send_like(data):
userId = data["userId"]
afishaId = data["afishaId"]
result = write_like(userId, afishaId, 1)
return result
def send_likes(data):
userId = data["userId"]
stats = data["stats"]
for stat in stats:
afishaId = stat["id"]
like = stat["value"]
write_like(userId, afishaId, like)
return Result.void_success()
def get_recomendation_feed(userId):
afishas = json.loads(get_feed().resut)
values = []
for afisha in afishas:
value = calculate_recomendation(userId, afisha["id"])
values.append((value, afisha))
values.sort(key=lambda x: x[0], reverse=True)
resulted_feed = list(map(lambda x: x[1], values))
print(resulted_feed)
return Result(result=json.dumps(resulted_feed))
| 22.534884 | 61 | 0.668731 | from afisha import get_afisha
from analytic import write_like, calculate_recomendation
from result import Result
import json
def get_feed():
r = get_afisha()
return r
def send_like(data):
userId = data["userId"]
afishaId = data["afishaId"]
result = write_like(userId, afishaId, 1)
return result
def send_likes(data):
userId = data["userId"]
stats = data["stats"]
for stat in stats:
afishaId = stat["id"]
like = stat["value"]
write_like(userId, afishaId, like)
return Result.void_success()
def get_recomendation_feed(userId):
afishas = json.loads(get_feed().resut)
values = []
for afisha in afishas:
value = calculate_recomendation(userId, afisha["id"])
values.append((value, afisha))
values.sort(key=lambda x: x[0], reverse=True)
resulted_feed = list(map(lambda x: x[1], values))
print(resulted_feed)
return Result(result=json.dumps(resulted_feed))
| true | true |
1c2eb33b28bbe6a77f7479bf5efcdd305388856e | 539 | py | Python | flame/core/model/model_wrapper.py | v-tronglh/flamev2 | 7f376e8fbcfb592e0ad6f72fdaad0af5f2cf9231 | [
"MIT"
] | 3 | 2021-08-05T15:42:05.000Z | 2022-03-17T08:56:58.000Z | flame/core/model/model_wrapper.py | v-tronglh/flamev2 | 7f376e8fbcfb592e0ad6f72fdaad0af5f2cf9231 | [
"MIT"
] | null | null | null | flame/core/model/model_wrapper.py | v-tronglh/flamev2 | 7f376e8fbcfb592e0ad6f72fdaad0af5f2cf9231 | [
"MIT"
] | null | null | null | from typing import Any, Callable
from torch.nn import Module
class ModelWrapper:
def __init__(
self,
model: Module,
input_transform: Callable = lambda x: x,
):
super(ModelWrapper, self).__init__()
self.model = model
self.input_transform = input_transform
def __call__(self, *args) -> Any:
args = self.input_transform(args)
output = self.model(*args)
return output
def __getattr__(self, name: str) -> Any:
return getattr(self.model, name)
| 23.434783 | 48 | 0.621521 | from typing import Any, Callable
from torch.nn import Module
class ModelWrapper:
def __init__(
self,
model: Module,
input_transform: Callable = lambda x: x,
):
super(ModelWrapper, self).__init__()
self.model = model
self.input_transform = input_transform
def __call__(self, *args) -> Any:
args = self.input_transform(args)
output = self.model(*args)
return output
def __getattr__(self, name: str) -> Any:
return getattr(self.model, name)
| true | true |
1c2eb363382ba5f5dcee9f8740c9c3dcf0ae5183 | 362 | py | Python | idact/detail/log/set_paramiko_log_level.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | idact/detail/log/set_paramiko_log_level.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | idact/detail/log/set_paramiko_log_level.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | """This module contains a function for setting the log level for paramiko."""
import logging
from idact.detail.log.get_logger import get_debug_logger
PARAMIKO_LOG_LEVEL = logging.WARNING
def set_paramiko_log_level():
"""Sets the log level for paramiko."""
transport = get_debug_logger('paramiko.transport')
transport.setLevel(PARAMIKO_LOG_LEVEL)
| 27.846154 | 77 | 0.779006 | import logging
from idact.detail.log.get_logger import get_debug_logger
PARAMIKO_LOG_LEVEL = logging.WARNING
def set_paramiko_log_level():
transport = get_debug_logger('paramiko.transport')
transport.setLevel(PARAMIKO_LOG_LEVEL)
| true | true |
1c2eb66c68f8848733b17a3cf4239ec78a433d0d | 7,298 | py | Python | configs/trainval/daotad_eccv2022/2.a.iii.1.py | klauscc/vedatad | c59f5ddc8fb227ef08baccbb513948bb1bb23857 | [
"Apache-2.0"
] | null | null | null | configs/trainval/daotad_eccv2022/2.a.iii.1.py | klauscc/vedatad | c59f5ddc8fb227ef08baccbb513948bb1bb23857 | [
"Apache-2.0"
] | null | null | null | configs/trainval/daotad_eccv2022/2.a.iii.1.py | klauscc/vedatad | c59f5ddc8fb227ef08baccbb513948bb1bb23857 | [
"Apache-2.0"
] | null | null | null | # 1. data
dataset_type = "Thumos14Dataset"
data_root = "data/thumos14/"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
num_frames = 480
img_shape = (224, 224)
overlap_ratio = 0.25
img_dir = "frames_15fps_256x256"
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
typename=dataset_type,
ann_file=data_root + "annotations/val.json",
video_prefix=data_root + f"{img_dir}/val",
pipeline=[
dict(typename="LoadMetaInfo"),
dict(typename="LoadAnnotations"),
dict(typename="Time2Frame"),
dict(typename="TemporalRandomCrop", num_frames=num_frames, iof_th=0.75),
dict(typename="LoadFrames", to_float32=True),
dict(typename="SpatialRandomCrop", crop_size=img_shape),
dict(
typename="PhotoMetricDistortion",
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
p=0.5,
),
dict(typename="Rotate", limit=(-45, 45), border_mode="reflect101", p=0.5),
dict(typename="SpatialRandomFlip", flip_ratio=0.5),
dict(typename="Normalize", **img_norm_cfg),
dict(typename="Pad", size=(num_frames, *img_shape)),
dict(typename="DefaultFormatBundle"),
dict(
typename="Collect",
keys=["imgs", "gt_segments", "gt_labels", "gt_segments_ignore"],
),
],
),
val=dict(
typename=dataset_type,
ann_file=data_root + "annotations/test.json",
video_prefix=data_root + f"{img_dir}/test",
pipeline=[
dict(typename="LoadMetaInfo"),
dict(typename="Time2Frame"),
dict(
typename="OverlapCropAug",
num_frames=num_frames,
overlap_ratio=overlap_ratio,
transforms=[
dict(typename="TemporalCrop"),
dict(typename="LoadFrames", to_float32=True),
dict(typename="SpatialCenterCrop", crop_size=img_shape),
dict(typename="Normalize", **img_norm_cfg),
dict(typename="Pad", size=(num_frames, *img_shape)),
dict(typename="DefaultFormatBundle"),
dict(typename="Collect", keys=["imgs"]),
],
),
],
),
)
# 2. model
num_classes = 20
strides = [8, 16, 32, 64, 128]
use_sigmoid = True
scales_per_octave = 5
octave_base_scale = 2
num_anchors = scales_per_octave
model = dict(
typename="SingleStageDetector",
backbone=dict(
typename="GradDropChunkVideoSwinV2",
keep_ratio=0.4,
chunk_size=32,
bp_idx_mode="uniform_jitter",
forward_mode="split",
shift_inp=False,
frozen_stages=2,
use_checkpoint=True,
patch_size=(2, 4, 4),
in_chans=3,
embed_dim=128,
drop_path_rate=0.2,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=(8, 7, 7),
patch_norm=True,
),
neck=[
dict(
typename="SRMSwin",
srm_cfg=dict(
in_channels=1024,
out_channels=512,
),
),
dict(
typename="TDM",
in_channels=512,
stage_layers=(1, 1, 1, 1),
out_channels=512,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
act_cfg=dict(typename="ReLU"),
out_indices=(0, 1, 2, 3, 4),
),
dict(
typename="FPN",
in_channels=[512, 512, 512, 512, 512],
out_channels=256,
num_outs=5,
start_level=0,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
),
],
head=dict(
typename="RetinaHead",
num_classes=num_classes,
num_anchors=num_anchors,
in_channels=256,
stacked_convs=4,
feat_channels=256,
use_sigmoid=use_sigmoid,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
),
)
# 3. engines
meshgrid = dict(
typename="SegmentAnchorMeshGrid",
strides=strides,
base_anchor=dict(
typename="SegmentBaseAnchor",
base_sizes=strides,
octave_base_scale=octave_base_scale,
scales_per_octave=scales_per_octave,
),
)
segment_coder = dict(
typename="DeltaSegmentCoder", target_means=[0.0, 0.0], target_stds=[1.0, 1.0]
)
train_engine = dict(
typename="TrainEngine",
model=model,
criterion=dict(
typename="SegmentAnchorCriterion",
num_classes=num_classes,
meshgrid=meshgrid,
segment_coder=segment_coder,
reg_decoded_segment=True,
loss_cls=dict(
typename="FocalLoss",
use_sigmoid=use_sigmoid,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
),
loss_segment=dict(typename="DIoULoss", loss_weight=1.0),
train_cfg=dict(
assigner=dict(
typename="MaxIoUAssigner",
pos_iou_thr=0.6,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
iou_calculator=dict(typename="SegmentOverlaps"),
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
),
optimizer=dict(
typename="SGD",
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(custom_keys=dict(backbone={"lr_mult": 0.4})),
),
)
# 3.2 val engine
val_engine = dict(
typename="ValEngine",
model=model,
meshgrid=meshgrid,
converter=dict(
typename="SegmentAnchorConverter",
num_classes=num_classes,
segment_coder=segment_coder,
nms_pre=1000,
use_sigmoid=use_sigmoid,
),
num_classes=num_classes,
test_cfg=dict(
score_thr=0.005, nms=dict(typename="nmw", iou_thr=0.5), max_per_video=1200
),
use_sigmoid=use_sigmoid,
)
# 4. hooks
hooks = [
dict(typename="OptimizerHook"),
dict(
typename="CosineRestartLrSchedulerHook",
periods=[100] * 12,
restart_weights=[1] * 12,
warmup="linear",
warmup_iters=500,
warmup_ratio=1e-1,
min_lr_ratio=1e-2,
),
dict(typename="EvalHook", eval_cfg=dict(mode="anet")),
dict(typename="SnapshotHook", interval=100),
dict(typename="LoggerHook", interval=10),
]
# 5. work modes
modes = ["train"]
max_epochs = 1200
# 6. checkpoint
# weights = dict(filepath='open-mmlab://i3d_r50_256p_32x2x1_100e_kinetics400_rgb')
weights = dict(
filepath="data/pretrained_models/vswin/swin_base_patch244_window877_kinetics400_22k_keysfrom_backbone.pth"
)
# optimizer = dict(filepath='epoch_900_optim.pth')
# meta = dict(filepath='epoch_900_meta.pth')
# 7. misc
seed = 10
dist_params = dict(backend="nccl")
log_level = "INFO"
find_unused_parameters = False
deterministic = True
# gpu_mem_fraction = 0.65
| 28.960317 | 110 | 0.571252 |
dataset_type = "Thumos14Dataset"
data_root = "data/thumos14/"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
num_frames = 480
img_shape = (224, 224)
overlap_ratio = 0.25
img_dir = "frames_15fps_256x256"
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
typename=dataset_type,
ann_file=data_root + "annotations/val.json",
video_prefix=data_root + f"{img_dir}/val",
pipeline=[
dict(typename="LoadMetaInfo"),
dict(typename="LoadAnnotations"),
dict(typename="Time2Frame"),
dict(typename="TemporalRandomCrop", num_frames=num_frames, iof_th=0.75),
dict(typename="LoadFrames", to_float32=True),
dict(typename="SpatialRandomCrop", crop_size=img_shape),
dict(
typename="PhotoMetricDistortion",
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
p=0.5,
),
dict(typename="Rotate", limit=(-45, 45), border_mode="reflect101", p=0.5),
dict(typename="SpatialRandomFlip", flip_ratio=0.5),
dict(typename="Normalize", **img_norm_cfg),
dict(typename="Pad", size=(num_frames, *img_shape)),
dict(typename="DefaultFormatBundle"),
dict(
typename="Collect",
keys=["imgs", "gt_segments", "gt_labels", "gt_segments_ignore"],
),
],
),
val=dict(
typename=dataset_type,
ann_file=data_root + "annotations/test.json",
video_prefix=data_root + f"{img_dir}/test",
pipeline=[
dict(typename="LoadMetaInfo"),
dict(typename="Time2Frame"),
dict(
typename="OverlapCropAug",
num_frames=num_frames,
overlap_ratio=overlap_ratio,
transforms=[
dict(typename="TemporalCrop"),
dict(typename="LoadFrames", to_float32=True),
dict(typename="SpatialCenterCrop", crop_size=img_shape),
dict(typename="Normalize", **img_norm_cfg),
dict(typename="Pad", size=(num_frames, *img_shape)),
dict(typename="DefaultFormatBundle"),
dict(typename="Collect", keys=["imgs"]),
],
),
],
),
)
num_classes = 20
strides = [8, 16, 32, 64, 128]
use_sigmoid = True
scales_per_octave = 5
octave_base_scale = 2
num_anchors = scales_per_octave
model = dict(
typename="SingleStageDetector",
backbone=dict(
typename="GradDropChunkVideoSwinV2",
keep_ratio=0.4,
chunk_size=32,
bp_idx_mode="uniform_jitter",
forward_mode="split",
shift_inp=False,
frozen_stages=2,
use_checkpoint=True,
patch_size=(2, 4, 4),
in_chans=3,
embed_dim=128,
drop_path_rate=0.2,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=(8, 7, 7),
patch_norm=True,
),
neck=[
dict(
typename="SRMSwin",
srm_cfg=dict(
in_channels=1024,
out_channels=512,
),
),
dict(
typename="TDM",
in_channels=512,
stage_layers=(1, 1, 1, 1),
out_channels=512,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
act_cfg=dict(typename="ReLU"),
out_indices=(0, 1, 2, 3, 4),
),
dict(
typename="FPN",
in_channels=[512, 512, 512, 512, 512],
out_channels=256,
num_outs=5,
start_level=0,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
),
],
head=dict(
typename="RetinaHead",
num_classes=num_classes,
num_anchors=num_anchors,
in_channels=256,
stacked_convs=4,
feat_channels=256,
use_sigmoid=use_sigmoid,
conv_cfg=dict(typename="Conv1d"),
norm_cfg=dict(typename="SyncBN"),
),
)
meshgrid = dict(
typename="SegmentAnchorMeshGrid",
strides=strides,
base_anchor=dict(
typename="SegmentBaseAnchor",
base_sizes=strides,
octave_base_scale=octave_base_scale,
scales_per_octave=scales_per_octave,
),
)
segment_coder = dict(
typename="DeltaSegmentCoder", target_means=[0.0, 0.0], target_stds=[1.0, 1.0]
)
train_engine = dict(
typename="TrainEngine",
model=model,
criterion=dict(
typename="SegmentAnchorCriterion",
num_classes=num_classes,
meshgrid=meshgrid,
segment_coder=segment_coder,
reg_decoded_segment=True,
loss_cls=dict(
typename="FocalLoss",
use_sigmoid=use_sigmoid,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
),
loss_segment=dict(typename="DIoULoss", loss_weight=1.0),
train_cfg=dict(
assigner=dict(
typename="MaxIoUAssigner",
pos_iou_thr=0.6,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
iou_calculator=dict(typename="SegmentOverlaps"),
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
),
optimizer=dict(
typename="SGD",
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(custom_keys=dict(backbone={"lr_mult": 0.4})),
),
)
val_engine = dict(
typename="ValEngine",
model=model,
meshgrid=meshgrid,
converter=dict(
typename="SegmentAnchorConverter",
num_classes=num_classes,
segment_coder=segment_coder,
nms_pre=1000,
use_sigmoid=use_sigmoid,
),
num_classes=num_classes,
test_cfg=dict(
score_thr=0.005, nms=dict(typename="nmw", iou_thr=0.5), max_per_video=1200
),
use_sigmoid=use_sigmoid,
)
hooks = [
dict(typename="OptimizerHook"),
dict(
typename="CosineRestartLrSchedulerHook",
periods=[100] * 12,
restart_weights=[1] * 12,
warmup="linear",
warmup_iters=500,
warmup_ratio=1e-1,
min_lr_ratio=1e-2,
),
dict(typename="EvalHook", eval_cfg=dict(mode="anet")),
dict(typename="SnapshotHook", interval=100),
dict(typename="LoggerHook", interval=10),
]
modes = ["train"]
max_epochs = 1200
weights = dict(
filepath="data/pretrained_models/vswin/swin_base_patch244_window877_kinetics400_22k_keysfrom_backbone.pth"
)
seed = 10
dist_params = dict(backend="nccl")
log_level = "INFO"
find_unused_parameters = False
deterministic = True
| true | true |
1c2eb69e1e3be5add0d649df44930718da7344ef | 797 | py | Python | miniurl_generic/url/migrations/0001_initial.py | yannisVentura/Django_MiniUrl_Bootstrap | 89a7cdb8c12ba75cf0cd2cb6206961506aad9287 | [
"BSD-3-Clause"
] | null | null | null | miniurl_generic/url/migrations/0001_initial.py | yannisVentura/Django_MiniUrl_Bootstrap | 89a7cdb8c12ba75cf0cd2cb6206961506aad9287 | [
"BSD-3-Clause"
] | 8 | 2017-04-05T13:15:39.000Z | 2017-10-17T09:39:04.000Z | miniurl_generic/url/migrations/0001_initial.py | yannisVentura/Django_MiniUrl_Bootstrap | 89a7cdb8c12ba75cf0cd2cb6206961506aad9287 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 08:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MiniUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url_complete', models.CharField(max_length=255)),
('date_creation', models.DateTimeField(auto_now_add=True, verbose_name='Date de parution')),
('pseudo', models.IntegerField(max_length=100)),
('nb_acces', models.CharField(max_length=10)),
],
),
]
| 29.518519 | 114 | 0.603513 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MiniUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url_complete', models.CharField(max_length=255)),
('date_creation', models.DateTimeField(auto_now_add=True, verbose_name='Date de parution')),
('pseudo', models.IntegerField(max_length=100)),
('nb_acces', models.CharField(max_length=10)),
],
),
]
| true | true |
1c2eb6b0bb8c9e52dc65101ee61fd393746c6d9b | 1,205 | py | Python | airflow/ti_deps/deps/dag_unpaused_dep.py | wileeam/airflow | f46be8152a4d89c57db4ca46f5b3339e4876b723 | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | airflow/ti_deps/deps/dag_unpaused_dep.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | airflow/ti_deps/deps/dag_unpaused_dep.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | 3 | 2016-07-14T21:51:10.000Z | 2020-10-12T13:26:36.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class DagUnpausedDep(BaseTIDep):
NAME = "Dag Not Paused"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.is_paused:
yield self._failing_status(
reason="Task's DAG '{0}' is paused.".format(ti.dag_id))
| 37.65625 | 71 | 0.744398 |
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class DagUnpausedDep(BaseTIDep):
NAME = "Dag Not Paused"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.is_paused:
yield self._failing_status(
reason="Task's DAG '{0}' is paused.".format(ti.dag_id))
| true | true |
1c2eb6fa73a233d255344a1bb49e88539d1b2bb5 | 616 | py | Python | leetcode/easy/intersection_of_two_arrays_ii/py/solution.py | lilsweetcaligula/Online-Judges | 48454a8e6b5b86f80e89eca1b396480df8960cfd | [
"MIT"
] | null | null | null | leetcode/easy/intersection_of_two_arrays_ii/py/solution.py | lilsweetcaligula/Online-Judges | 48454a8e6b5b86f80e89eca1b396480df8960cfd | [
"MIT"
] | null | null | null | leetcode/easy/intersection_of_two_arrays_ii/py/solution.py | lilsweetcaligula/Online-Judges | 48454a8e6b5b86f80e89eca1b396480df8960cfd | [
"MIT"
] | null | null | null | class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
result = []
nums1 = sorted(nums1)
nums2 = sorted(nums2)
i = 0
j = 0
while i < len(nums1) and j < len(nums2):
if nums1[i] == nums2[j]:
result.append(nums1[i])
i += 1
j += 1
elif nums1[i] > nums2[j]:
j += 1
else:
i += 1
return result
| 23.692308 | 48 | 0.371753 | class Solution(object):
def intersect(self, nums1, nums2):
result = []
nums1 = sorted(nums1)
nums2 = sorted(nums2)
i = 0
j = 0
while i < len(nums1) and j < len(nums2):
if nums1[i] == nums2[j]:
result.append(nums1[i])
i += 1
j += 1
elif nums1[i] > nums2[j]:
j += 1
else:
i += 1
return result
| true | true |
1c2eb71ead2363009a56bf89e169532913118a38 | 160 | py | Python | cards/apps.py | neosergio/WisdomBox | f41bd828f5e264c7ad05262b29c8f02cf904b54a | [
"MIT"
] | null | null | null | cards/apps.py | neosergio/WisdomBox | f41bd828f5e264c7ad05262b29c8f02cf904b54a | [
"MIT"
] | 2 | 2017-02-09T14:52:43.000Z | 2017-02-10T19:31:38.000Z | cards/apps.py | neosergio/WisdomBox | f41bd828f5e264c7ad05262b29c8f02cf904b54a | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
class CardsConfig(AppConfig):
name = 'cards'
verbose_name = 'Wisdom Cards'
| 17.777778 | 39 | 0.7625 | from __future__ import unicode_literals
from django.apps import AppConfig
class CardsConfig(AppConfig):
name = 'cards'
verbose_name = 'Wisdom Cards'
| true | true |
1c2eb726a627a42117538c0779f66d6db0987c03 | 218 | py | Python | stupidArtnet/__init__.py | verycollective/stupidArtnet | 80e7722598d06e1c533ffd28a92e26c784e23389 | [
"MIT"
] | 13 | 2018-11-07T20:19:57.000Z | 2020-07-31T23:15:00.000Z | stupidArtnet/__init__.py | digitalanimalscollective/stupidArtnet | 80e7722598d06e1c533ffd28a92e26c784e23389 | [
"MIT"
] | 2 | 2019-04-10T17:22:23.000Z | 2020-02-11T14:55:44.000Z | stupidArtnet/__init__.py | digitalanimalscollective/stupidArtnet | 80e7722598d06e1c533ffd28a92e26c784e23389 | [
"MIT"
] | 9 | 2019-03-01T12:00:27.000Z | 2020-07-30T19:34:15.000Z | """Facilitates library imports."""
from stupidArtnet.StupidArtnetServer import StupidArtnetServer
from stupidArtnet.ArtnetUtils import shift_this, put_in_range, make_address_mask
from .StupidArtnet import StupidArtnet
| 43.6 | 80 | 0.866972 | from stupidArtnet.StupidArtnetServer import StupidArtnetServer
from stupidArtnet.ArtnetUtils import shift_this, put_in_range, make_address_mask
from .StupidArtnet import StupidArtnet
| true | true |
1c2eb746dbf03c9401fcbe7364388d36d331e74a | 72 | py | Python | straph/paths/__init__.py | GiulioRossetti/Straph | edc021d25243bcca619c62dca1f28cf05b73a92c | [
"Apache-2.0"
] | 3 | 2021-05-24T16:23:51.000Z | 2021-08-07T20:14:53.000Z | straph/paths/__init__.py | GiulioRossetti/Straph | edc021d25243bcca619c62dca1f28cf05b73a92c | [
"Apache-2.0"
] | 1 | 2021-05-25T12:30:36.000Z | 2021-05-25T12:30:36.000Z | straph/paths/__init__.py | GiulioRossetti/Straph | edc021d25243bcca619c62dca1f28cf05b73a92c | [
"Apache-2.0"
] | 3 | 2021-05-25T09:04:43.000Z | 2021-11-02T16:27:23.000Z | from straph.paths.path_object import *
from straph.paths.paths import *
| 24 | 38 | 0.805556 | from straph.paths.path_object import *
from straph.paths.paths import *
| true | true |
1c2eb7cfed9158485e706c5f38abfc820fbb10d4 | 3,169 | py | Python | src/ef/field/__init__.py | fizmat/ef_python | 1fccdc90b1dd628c9adb5f8713bee4e4df633daf | [
"MIT"
] | 1 | 2018-12-13T08:32:46.000Z | 2018-12-13T08:32:46.000Z | src/ef/field/__init__.py | dumbman/ef_python | d26e48d3afd81c3b24a5ec207fce674cdc9609d3 | [
"MIT"
] | 3 | 2018-05-21T11:26:57.000Z | 2020-03-12T12:28:44.000Z | src/ef/field/__init__.py | dumbman/ef_python | d26e48d3afd81c3b24a5ec207fce674cdc9609d3 | [
"MIT"
] | null | null | null | import inject
import numpy as np
from ef.util.serializable_h5 import SerializableH5
class Field(SerializableH5):
def __init__(self, name, electric_or_magnetic):
self.name = name
self.electric_or_magnetic = electric_or_magnetic
def __add__(self, b):
return FieldSum.factory((self, b))
def get_at_points(self, positions, time):
raise NotImplementedError()
@staticmethod
def import_h5(g):
from ef.field.expression import FieldExpression
from ef.field.from_csv import FieldFromCSVFile
from ef.field.uniform import FieldUniform
ga = g.attrs
ft = ga['field_type']
name = g.name.split('/')[-1]
if ft == b'electric_uniform':
return FieldUniform(name, 'electric',
np.array([ga['electric_uniform_field_{}'.format(c)] for c in 'xyz']).reshape(3))
elif ft == b'electric_tinyexpr':
return FieldExpression(name, 'electric',
*[ga['electric_tinyexpr_field_{}'.format(c)].decode('utf8') for c in 'xyz'])
elif ft == b'electric_on_regular_grid':
return FieldFromCSVFile(name, 'electric', ga['electric_h5filename'].decode('utf8'))
elif ft == b'magnetic_uniform':
return FieldUniform(name, 'magnetic',
np.array([ga['magnetic_uniform_field_{}'.format(c)] for c in 'xyz']).reshape(3))
elif ft == b'magnetic_tinyexpr':
return FieldExpression(name, 'magnetic',
*[ga['magnetic_tinyexpr_field_{}'.format(c)].decode('utf8') for c in 'xyz'])
elif ft == b'magnetic_on_regular_grid':
return FieldFromCSVFile(name, 'magnetic', ga['magnetic_h5filename'].decode('utf8'))
class FieldZero(Field):
xp = inject.attr(np)
def get_at_points(self, positions, time):
positions = self.xp.array(positions)
return self.xp.zeros_like(positions)
class FieldSum(Field):
def __init__(self, electric_or_magnetic, fields):
super().__init__('FieldSum', electric_or_magnetic)
self.fields = fields
@classmethod
def factory(cls, fields, electric_or_magnetic=None):
try:
fields = [f for f in fields if f is not None]
except TypeError:
fields = [] if fields is None else [fields]
em = (set(f.electric_or_magnetic for f in fields) | {electric_or_magnetic}) - {None}
if len(em) > 1:
raise ValueError('Trying to combine inconsistent fields')
elif em:
em = em.pop()
else:
raise ValueError('FieldSum type unknown')
sums = [f for f in fields if type(f) is FieldSum]
fields = [f for f in fields if type(f) not in (FieldZero, FieldSum)]
for f in sums:
fields += f.fields
if len(fields) > 1:
return cls(em, fields)
elif len(fields) == 1:
return fields[0]
else:
return FieldZero('ZeroSum', em)
def get_at_points(self, positions, time):
return sum(f.get_at_points(positions, time) for f in self.fields)
| 38.180723 | 112 | 0.603976 | import inject
import numpy as np
from ef.util.serializable_h5 import SerializableH5
class Field(SerializableH5):
def __init__(self, name, electric_or_magnetic):
self.name = name
self.electric_or_magnetic = electric_or_magnetic
def __add__(self, b):
return FieldSum.factory((self, b))
def get_at_points(self, positions, time):
raise NotImplementedError()
@staticmethod
def import_h5(g):
from ef.field.expression import FieldExpression
from ef.field.from_csv import FieldFromCSVFile
from ef.field.uniform import FieldUniform
ga = g.attrs
ft = ga['field_type']
name = g.name.split('/')[-1]
if ft == b'electric_uniform':
return FieldUniform(name, 'electric',
np.array([ga['electric_uniform_field_{}'.format(c)] for c in 'xyz']).reshape(3))
elif ft == b'electric_tinyexpr':
return FieldExpression(name, 'electric',
*[ga['electric_tinyexpr_field_{}'.format(c)].decode('utf8') for c in 'xyz'])
elif ft == b'electric_on_regular_grid':
return FieldFromCSVFile(name, 'electric', ga['electric_h5filename'].decode('utf8'))
elif ft == b'magnetic_uniform':
return FieldUniform(name, 'magnetic',
np.array([ga['magnetic_uniform_field_{}'.format(c)] for c in 'xyz']).reshape(3))
elif ft == b'magnetic_tinyexpr':
return FieldExpression(name, 'magnetic',
*[ga['magnetic_tinyexpr_field_{}'.format(c)].decode('utf8') for c in 'xyz'])
elif ft == b'magnetic_on_regular_grid':
return FieldFromCSVFile(name, 'magnetic', ga['magnetic_h5filename'].decode('utf8'))
class FieldZero(Field):
xp = inject.attr(np)
def get_at_points(self, positions, time):
positions = self.xp.array(positions)
return self.xp.zeros_like(positions)
class FieldSum(Field):
def __init__(self, electric_or_magnetic, fields):
super().__init__('FieldSum', electric_or_magnetic)
self.fields = fields
@classmethod
def factory(cls, fields, electric_or_magnetic=None):
try:
fields = [f for f in fields if f is not None]
except TypeError:
fields = [] if fields is None else [fields]
em = (set(f.electric_or_magnetic for f in fields) | {electric_or_magnetic}) - {None}
if len(em) > 1:
raise ValueError('Trying to combine inconsistent fields')
elif em:
em = em.pop()
else:
raise ValueError('FieldSum type unknown')
sums = [f for f in fields if type(f) is FieldSum]
fields = [f for f in fields if type(f) not in (FieldZero, FieldSum)]
for f in sums:
fields += f.fields
if len(fields) > 1:
return cls(em, fields)
elif len(fields) == 1:
return fields[0]
else:
return FieldZero('ZeroSum', em)
def get_at_points(self, positions, time):
return sum(f.get_at_points(positions, time) for f in self.fields)
| true | true |
1c2eb7eb9834caf7eb2dfffa95fda486d9a9f4e3 | 35,676 | py | Python | pyrex/generation.py | abigailbishop/pyrex | 10ba2e9f4c8820f4fcf5f00bd866927dacb0b2b5 | [
"MIT"
] | 6 | 2018-06-19T16:01:35.000Z | 2020-05-21T20:02:53.000Z | pyrex/generation.py | bhokansonfasig/pyrex | 8b2abc954f2cf4945424042f33847b783c72dcfa | [
"MIT"
] | 4 | 2018-01-19T14:52:33.000Z | 2021-09-02T17:20:07.000Z | pyrex/generation.py | abigailbishop/pyrex | 10ba2e9f4c8820f4fcf5f00bd866927dacb0b2b5 | [
"MIT"
] | 5 | 2018-01-19T14:49:16.000Z | 2021-01-21T12:34:59.000Z | """
Module for particle (neutrino) generators.
Generators are responsible for the input of events into the simulation.
"""
from collections.abc import Iterable
from enum import Enum
import logging
import numpy as np
from pyrex.internal_functions import get_from_enum
from pyrex.earth_model import earth
from pyrex.particle import Event, Particle, NeutrinoInteraction
from pyrex.io import File
logger = logging.getLogger(__name__)
class Generator:
"""
Base class for neutrino generators.
Provides methods for generating neutrino attributes except for neutrino
vertex, which should be provided by child classes to generate neutrinos
in specific volumes.
Parameters
----------
energy : float or function
Energy (GeV) of the neutrinos. If ``float``, all neutrinos have the
same constant energy. If ``function``, neutrinos are generated with the
energy returned by successive function calls.
shadow : bool, optional
Whether Earth shadowing effects should be used to reject events. If
``True`` then neutrinos which don't survive transit through the Earth
will be skipped when creating events. If ``False`` then all events are
allowed and assigned a weight to scale their probability of occurrence.
flavor_ratio : array_like, optional
Flavor ratio of neutrinos to be generated. Of the form [electron, muon,
tau] neutrino fractions.
source : optional
Source type of neutrinos to be generated. Used in the determination of
per-flavor neutrino/antineutrino fractions.
interaction_model : optional
Class to use to describe interactions of the generated particles.
Should inherit from (or behave like) the base ``Interaction`` class.
Attributes
----------
count : int
Number of neutrinos produced by the generator, including those not
returned due to Earth shadowing or other effects.
get_energy : function
Function returning energy (GeV) of the neutrinos by successive function
calls.
shadow : bool
Whether Earth shadowing effects will be used to reject events.
ratio : ndarray
(Normalized) flavor ratio of neutrinos to be generated. Of the form
[electron, muon, tau] neutrino fractions.
source : Generator.SourceType
Source type of neutrinos to be generated. Used in the determination of
per-flavor neutrino/antineutrino fractions.
interaction_model : Interaction
Class to use to describe interactions of the generated particles.
volume
solid_angle
See Also
--------
pyrex.particle.Interaction : Base class for describing neutrino interaction
attributes.
"""
class SourceType(Enum):
"""
Enum containing possible sources for neutrinos.
Attributes
----------
pgamma, cosmogenic
pp, astrophysical
unknown, undefined
"""
undefined = 0
unknown = 0
cosmogenic = 1
pgamma = 1
astrophysical = 2
pp = 2
def __init__(self, energy, shadow=False, flavor_ratio=(1,1,1),
source="cosmogenic", interaction_model=NeutrinoInteraction,
earth_model=earth):
if not callable(energy):
try:
e = float(energy)
except TypeError:
raise ValueError("energy argument must be a function "+
"or a number")
else:
energy = lambda: e
self.get_energy = energy
self.shadow = shadow
self.ratio = np.array(flavor_ratio)/np.sum(flavor_ratio)
self.source = source
self.interaction_model = interaction_model
self.earth_model = earth_model
self.count = 0
@property
def source(self):
"""
Value of the source type.
Should always be a value from the ``Interaction.Type`` enum. Setting
with integer or string values may work if carefully chosen.
"""
return self._source
@source.setter
def source(self, src_type):
if src_type is None:
self._source = self.SourceType.undefined
else:
self._source = get_from_enum(src_type, self.SourceType)
@property
def volume(self):
"""
Generation volume (m^3) in which event vertices are produced.
"""
raise NotImplementedError("volume property must be implemented by "+
"inheriting class")
@property
def solid_angle(self):
"""
Generation solid angle (sr) in which event directions are produced.
"""
logger.debug("Using default solid_angle from "+
"pyrex.generation.Generator")
return 4 * np.pi
def get_vertex(self):
"""
Get the vertex of the next particle to be generated.
For the `Generator` class, this method is not implemented.
Subclasses should override this method with their own procedure for
generating neutrino vertices in some volume.
Raises
------
NotImplementedError
Always, unless a subclass overrides the function.
"""
logger.debug("Using default get_vertex from "+
"pyrex.generation.Generator")
raise NotImplementedError("get_vertex method must be implemented by "
+"inheriting class")
def get_direction(self):
"""
Get the direction of the next particle to be generated.
Randomly generates a cartesian unit vector uniformly distributed over
the unit sphere.
Returns
-------
ndarray
(Unit) vector direction.
Notes
-----
Generates random vector direction by pulling from uniform distributions
for -1<cos(theta)<1 and 0<phi<2*pi.
"""
cos_theta = np.random.random_sample()*2-1
sin_theta = np.sqrt(1 - cos_theta**2)
phi = np.random.random_sample() * 2*np.pi
return np.array([sin_theta * np.cos(phi),
sin_theta * np.sin(phi),
cos_theta])
def get_particle_type(self):
"""
Get the particle type of the next particle to be generated.
Randomly generates a neutrino flavor according to the flavor ratio of
the generator, and chooses neutrino or antineutrino based on ratios
derived from the source type.
Returns
-------
Particle.Type
Enum value for the type of the particle.
See Also
--------
pyrex.Particle : Class for storing particle attributes.
Notes
-----
The neutrino/antineutrino choice is based on Section 3 of [1]_.
References
----------
.. [1] A. Bhattacharya et al, "The Glashow resonance at IceCube."
JCAP **1110**, 017 (2011). :arxiv:`1108.3163`
:doi:`10.1088/1475-7516/2011/10/017`
"""
rand_flavor = np.random.rand()
rand_nunubar = np.random.rand()
if self.source==self.SourceType.cosmogenic:
nunubar_ratios = [0.78, 0.61, 0.61]
elif self.source==self.SourceType.astrophysical:
nunubar_ratios = [0.5, 0.5, 0.5]
else:
raise ValueError("Source type not supported")
# Electron neutrinos
if rand_flavor<self.ratio[0]:
if rand_nunubar<nunubar_ratios[0]:
return Particle.Type.electron_neutrino
else:
return Particle.Type.electron_antineutrino
# Muon neutrinos
elif rand_flavor<self.ratio[0]+self.ratio[1]:
if rand_nunubar<nunubar_ratios[1]:
return Particle.Type.muon_neutrino
else:
return Particle.Type.muon_antineutrino
# Tau neutrinos
else:
if rand_nunubar<nunubar_ratios[2]:
return Particle.Type.tau_neutrino
else:
return Particle.Type.tau_antineutrino
def get_exit_points(self, particle):
"""
Get the intersections of the particle path with the ice volume edges.
For the `Generator` class, this method is not implemented.
Subclasses should override this method with their own procedure for
calculating exit points given the generation volume.
Parameters
----------
particle : Particle
Particle traveling through the ice.
Raises
------
NotImplementedError
Always, unless a subclass overrides the function.
See Also
--------
pyrex.Particle : Class for storing particle attributes.
"""
logger.debug("Using default get_exit_points from "+
"pyrex.generation.Generator")
raise NotImplementedError("get_exit_points method must be implemented "
+"by inheriting class")
def get_weights(self, particle):
"""
Get the weighting factors to be applied to the particle.
Calculates both the survival and interaction weights of `particle`.
The survival weight is based on the probability of interaction along
the path through the Earth. The interaction weight of `particle` based
on the probability of interaction at its given vertex in the ice
volume.
Parameters
----------
particle : Particle
Particle to be weighted.
Returns
-------
survival_weight : float
Survival weight of the given `particle`.
interaction_weight : float
Interaction weight of the given `particle`.
See Also
--------
pyrex.Particle : Class for storing particle attributes.
"""
t = self.earth_model.slant_depth(particle.vertex, -particle.direction)
x = t / particle.interaction.total_interaction_length
survival_weight = np.exp(-x)
entry_point, exit_point = self.get_exit_points(particle)
in_ice_vector = np.array(exit_point) - np.array(entry_point)
in_ice_length = np.sqrt(np.sum(in_ice_vector**2))
vertex_vector = particle.vertex - np.array(entry_point)
travel_length = np.sqrt(np.sum(vertex_vector**2))
# Convert cm water equivalent interaction length to meters in ice
interaction_length = (particle.interaction.total_interaction_length
/ 0.92 / 100)
interaction_weight = (in_ice_length/interaction_length *
np.exp(-travel_length/interaction_length))
return survival_weight, interaction_weight
def create_event(self):
"""
Generate a neutrino event in the ice volume.
Creates a neutrino with a random vertex in the volume, a random
direction, and an energy based on ``get_energy``. Particle type is
randomly chosen, and its interaction type is also randomly chosen based
on the branching ratio. Weights the particles according to their
survival probability through the Earth and their probability of
interacting in the ice at their vertex. If Earth shadowing has been
turned on then particles which don't survive transit through the Earth
are skipped, and surviving particles are given a survival weight of 1.
Currently each `Event` returned consists of only a single `Particle`.
Returns
-------
Event
Random neutrino event not shadowed by the Earth.
See Also
--------
pyrex.Event : Class for storing a tree of `Particle` objects
representing an event.
pyrex.Particle : Class for storing particle attributes.
"""
self.count += 1
vtx = self.get_vertex()
u = self.get_direction()
E = self.get_energy()
particle_id = self.get_particle_type()
particle = Particle(particle_id=particle_id, vertex=vtx, direction=u,
energy=E, interaction_model=self.interaction_model)
weights = self.get_weights(particle)
if not self.shadow:
particle.survival_weight = weights[0]
particle.interaction_weight = weights[1]
logger.debug("Successfully created %s with survival weight %d and "
+"interaction weight %d", particle, weights[0],
weights[1])
return Event(particle)
elif np.random.rand() < weights[0]:
particle.survival_weight = 1
particle.interaction_weight = weights[1]
logger.debug("Successfully created %s with survival weight %d and "
+"interaction weight %d", particle, weights[0],
weights[1])
return Event(particle)
else:
# Particle was shadowed by the earth. Try again
logger.debug("Particle creation shadowed by the Earth")
return self.create_event()
class CylindricalGenerator(Generator):
"""
Class to generate neutrino vertices in a cylindrical ice volume.
Generates neutrinos in a cylinder with given radius and height.
Parameters
----------
dr : float
Radius of the ice volume. Neutrinos generated within (0, `dr`).
dz : float
Height of the ice volume in the z-direction. Neutrinos generated within
(-`dz`, 0).
energy : float or function
Energy (GeV) of the neutrinos. If ``float``, all neutrinos have the
same constant energy. If ``function``, neutrinos are generated with the
energy returned by successive function calls.
shadow : bool, optional
Whether Earth shadowing effects should be used to reject events. If
``True`` then neutrinos which don't survive transit through the Earth
will be skipped when creating events. If ``False`` then all events are
allowed and assigned a weight to scale their probability of occurrence.
flavor_ratio : array_like, optional
Flavor ratio of neutrinos to be generated. Of the form [electron, muon,
tau] neutrino fractions.
source : optional
Source type of neutrinos to be generated. Used in the determination of
per-flavor neutrino/antineutrino fractions.
interaction_model : optional
Class to use to describe interactions of the generated particles.
Should inherit from (or behave like) the base ``Interaction`` class.
Attributes
----------
count : int
Number of neutrinos produced by the generator, including those not
returned due to Earth shadowing or other effects.
dr : float
Radius of the ice volume. Neutrinos generated within (0, `dr`).
dz : float
Height of the ice volume in the z-direction. Neutrinos generated within
(-`dz`, 0).
get_energy : function
Function returning energy (GeV) of the neutrinos by successive function
calls.
shadow : bool
Whether Earth shadowing effects will be used to reject events.
ratio : ndarray
(Normalized) flavor ratio of neutrinos to be generated. Of the form
[electron, muon, tau] neutrino fractions.
source : Generator.SourceType
Source type of neutrinos to be generated. Used in the determination of
per-flavor neutrino/antineutrino fractions.
interaction_model : Interaction
Class to use to describe interactions of the generated particles.
volume
solid_angle
See Also
--------
pyrex.particle.Interaction : Base class for describing neutrino interaction
attributes.
"""
def __init__(self, dr, dz, energy, shadow=False, flavor_ratio=(1,1,1),
source="cosmogenic", interaction_model=NeutrinoInteraction,
earth_model=earth):
self.dr = dr
self.dz = dz
super().__init__(energy=energy, shadow=shadow,
flavor_ratio=flavor_ratio, source=source,
interaction_model=interaction_model,
earth_model=earth_model)
@property
def volume(self):
"""
Generation volume (m^3) in which event vertices are produced.
"""
return np.pi * self.dr**2 * self.dz
def get_vertex(self):
"""
Get the vertex of the next particle to be generated.
Randomly generates a vertex uniformly distributed within the specified
ice volume.
Returns
-------
ndarray
Vector vertex in the ice volume.
"""
r = self.dr * np.sqrt(np.random.random_sample())
theta = 2*np.pi * np.random.random_sample()
z = -self.dz * np.random.random_sample()
return np.array([r*np.cos(theta), r*np.sin(theta), z])
def get_exit_points(self, particle):
"""
Get the intersections of the particle path with the ice volume edges.
For the given `particle`, calculates where its travel path intersects
with the edges of the ice volume.
Parameters
----------
particle : Particle
Particle traveling through the ice.
Returns
-------
enter_point, exit_point : ndarray
Vector points where the particle's path intersects with the edges
of the ice volume.
See Also
--------
pyrex.Particle : Class for storing particle attributes.
"""
enter_point = None
exit_point = None
# Find the intersection points of the circle, assuming infinite z
if particle.direction[0]==0:
x0 = particle.vertex[0]
y0 = -np.sqrt(self.dr**2 - x0**2)
z0 = (particle.vertex[2] + (y0-particle.vertex[1])
* particle.direction[2]/particle.direction[1])
x1 = particle.vertex[0]
y1 = np.sqrt(self.dr**2 - x1**2)
z1 = (particle.vertex[2] + (y1-particle.vertex[1])
* particle.direction[2]/particle.direction[1])
else:
slope = particle.direction[1]/particle.direction[0]
a = 1 + slope**2
b = particle.vertex[1] - slope*particle.vertex[0]
x0 = - (slope*b + np.sqrt(-b**2 + a*self.dr**2)) / a
y0 = (particle.vertex[1] - slope *
(particle.vertex[0] + np.sqrt(-b**2 + a*self.dr**2))) / a
z0 = (particle.vertex[2] + (x0-particle.vertex[0])
* particle.direction[2]/particle.direction[0])
x1 = (-slope*b + np.sqrt(-b**2 + a*self.dr**2)) / a
y1 = (particle.vertex[1] + slope *
(-particle.vertex[0] + np.sqrt(-b**2 + a*self.dr**2))) / a
z1 = (particle.vertex[2] + (x1-particle.vertex[0])
* particle.direction[2]/particle.direction[0])
for pt in ([x0, y0, z0], [x1, y1, z1]):
# Check for intersections at the top & bottom that supersede the
# intersections at the sides
z = None
if pt[2]>0:
z = 0
elif pt[2]<-self.dz:
z = -self.dz
if z is not None:
pt[0] = (particle.vertex[0] + (z-particle.vertex[2])
* particle.direction[0]/particle.direction[2])
pt[1] = (particle.vertex[1] + (z-particle.vertex[2])
* particle.direction[1]/particle.direction[2])
pt[2] = z
pt = np.array(pt)
# Sort into enter and exit points based on particle direction
nonzero = particle.direction!=0
direction = ((pt[nonzero]-particle.vertex[nonzero])
/particle.direction[nonzero])
if np.all(direction<0):
enter_point = pt
elif np.all(direction>0):
exit_point = pt
elif np.all(direction==0):
if enter_point is None:
enter_point = pt
if exit_point is None:
exit_point = pt
if enter_point is not None and exit_point is not None:
return enter_point, exit_point
else:
raise ValueError("Could not determine exit points")
class RectangularGenerator(Generator):
"""
Class to generate neutrino vertices in a rectangular ice volume.
Generates neutrinos in a box with given width, length, and height.
Parameters
----------
dx : float
Width of the ice volume in the x-direction. Neutrinos generated within
(-`dx` / 2, `dx` / 2).
dy : float
Length of the ice volume in the y-direction. Neutrinos generated within
(-`dy` / 2, `dy` / 2).
dz : float
Height of the ice volume in the z-direction. Neutrinos generated within
(-`dz`, 0).
energy : float or function
Energy (GeV) of the neutrinos. If ``float``, all neutrinos have the
same constant energy. If ``function``, neutrinos are generated with the
energy returned by successive function calls.
shadow : bool, optional
Whether Earth shadowing effects should be used to reject events. If
``True`` then neutrinos which don't survive transit through the Earth
will be skipped when creating events. If ``False`` then all events are
allowed and assigned a weight to scale their probability of occurrence.
flavor_ratio : array_like, optional
Flavor ratio of neutrinos to be generated. Of the form [electron, muon,
tau] neutrino fractions.
source : optional
Source type of neutrinos to be generated. Used in the determination of
per-flavor neutrino/antineutrino fractions.
interaction_model : optional
Class to use to describe interactions of the generated particles.
Should inherit from (or behave like) the base ``Interaction`` class.
Attributes
----------
count : int
Number of neutrinos produced by the generator, including those not
returned due to Earth shadowing or other effects.
dx : float
Width of the ice volume in the x-direction. Neutrinos generated within
(-`dx` / 2, `dx` / 2).
dy : float
Length of the ice volume in the y-direction. Neutrinos generated within
(-`dy` / 2, `dy` / 2).
dz : float
Height of the ice volume in the z-direction. Neutrinos generated within
(-`dz`, 0).
get_energy : function
Function returning energy (GeV) of the neutrinos by successive function
calls.
shadow : bool
Whether Earth shadowing effects will be used to reject events.
ratio : ndarray
(Normalized) flavor ratio of neutrinos to be generated. Of the form
[electron, muon, tau] neutrino fractions.
source : Generator.SourceType
Source type of neutrinos to be generated. Used in the determination of
per-flavor neutrino/antineutrino fractions.
interaction_model : Interaction
Class to use to describe interactions of the generated particles.
volume
solid_angle
See Also
--------
pyrex.particle.Interaction : Base class for describing neutrino interaction
attributes.
"""
def __init__(self, dx, dy, dz, energy, shadow=False, flavor_ratio=(1,1,1),
source="cosmogenic", interaction_model=NeutrinoInteraction,
earth_model=earth):
self.dx = dx
self.dy = dy
self.dz = dz
super().__init__(energy=energy, shadow=shadow,
flavor_ratio=flavor_ratio, source=source,
interaction_model=interaction_model,
earth_model=earth_model)
@property
def volume(self):
"""
Generation volume (m^3) in which event vertices are produced.
"""
return self.dx * self.dy * self.dz
def get_vertex(self):
"""
Get the vertex of the next particle to be generated.
Randomly generates a vertex uniformly distributed within the specified
ice volume.
Returns
-------
ndarray
Vector vertex in the ice volume.
"""
return np.random.uniform(low=(-self.dx/2, -self.dy/2, -self.dz),
high=(self.dx/2, self.dy/2, 0))
def get_exit_points(self, particle):
"""
Get the intersections of the particle path with the ice volume edges.
For the given `particle`, calculates where its travel path intersects
with the edges of the ice volume.
Parameters
----------
particle : Particle
Particle traveling through the ice.
Returns
-------
enter_point, exit_point : ndarray
Vector points where the particle's path intersects with the edges
of the ice volume.
See Also
--------
pyrex.Particle : Class for storing particle attributes.
"""
enter_point = None
exit_point = None
sides = ((-self.dx/2, self.dx/2),
(-self.dy/2, self.dy/2),
(-self.dz, 0))
for count in range(6):
coord = int(count/2)
min_max = count%2
if particle.direction[coord]==0:
continue
scale = ((sides[coord][min_max] - particle.vertex[coord]) /
particle.direction[coord])
intersection = particle.vertex + particle.direction * scale
valid = True
for i, pair in enumerate(sides):
if i==coord:
continue
if intersection[i]<pair[0] or intersection[i]>pair[1]:
valid = False
if valid:
sign = 1 if min_max==1 else -1
if sign*particle.direction[coord]<0:
enter_point = intersection
else:
exit_point = intersection
if enter_point is not None and exit_point is not None:
return enter_point, exit_point
raise ValueError("Could not determine exit points")
class ListGenerator:
"""
Class to generate neutrino events from a list.
Generates events by simply pulling them from a list of `Event` objects. By
default returns to the start of the list once the end is reached, but can
optionally fail after reaching the list's end.
Parameters
----------
events : Event, or list of Event
List of `Event` objects to draw from. If only a single `Event` object
is given, creates a list of that event alone.
loop : boolean, optional
Whether or not to return to the start of the list after throwing the
last `Event`. If ``False``, raises an error if trying to throw after
the last `Event`.
Attributes
----------
count : int
Number of neutrinos produced by the generator, including those not
returned due to Earth shadowing or other effects.
events : list of Event
List to draw `Event` objects from, sequentially.
loop : boolean
Whether or not to loop through the list more than once.
See Also
--------
pyrex.Event : Class for storing a tree of `Particle` objects
representing an event.
pyrex.Particle : Class for storing particle attributes.
"""
def __init__(self, events, loop=True):
if (isinstance(events, Iterable) and
not isinstance(events, Event)):
self.events = events
else:
self.events = [events]
for i, event in enumerate(self.events):
if isinstance(event, Particle):
self.events[i] = Event(event)
self.loop = loop
self._index = 0
self._additional_counts = 0
@property
def count(self):
"""
Number of neutrinos produced by the generator.
Count includes events which were not returned due to Earth shadowing
or other effects.
"""
return self._index + self._additional_counts
@count.setter
def count(self, custom_count):
self._additional_counts = custom_count - self._index
def create_event(self):
"""
Generate a neutrino event.
Pulls the next `Event` object from the class's list of events.
Returns
-------
Event
Next `Event` object in the list of events.
See Also
--------
pyrex.Event : Class for storing a tree of `Particle` objects
representing an event.
pyrex.Particle : Class for storing particle attributes.
Raises
------
StopIteration
If ``loop`` is ``False`` and the end of the list has been exceeded.
"""
if not self.loop and self._index>=len(self.events):
raise StopIteration("No more events to be generated")
self._index += 1
return self.events[(self._index-1)%len(self.events)]
class FileGenerator:
"""
Class to generate neutrino events from simulation file(s).
Generates neutrinos by pulling their attributes from a (list of) simulation
output file(s). Designed to make reproducing simulations easier.
Parameters
----------
files : str or list of str
List of file names containing neutrino event information. If only a
single file name is provided, creates a list with that file alone.
slice_range : int, optional
Number of events to load into memory at a time from the files.
Increasing this value should result in an improvement in speed, while
decreasing this value should result in an improvement in memory
consumption.
interaction_model : optional
Class used to describe the interactions of the stored particles.
Attributes
----------
count : int
Number of neutrinos produced by the generator, including those not
returned due to Earth shadowing or other effects.
files : list of str
List of file names containing neutrino information.
Warnings
--------
This generator only supports `Event` objects containing a single level of
`Particle` objects. Any dependencies among `Particle` objects will be
ignored and they will all appear in the root level.
See Also
--------
pyrex.particle.Interaction : Base class for describing neutrino interaction
attributes.
pyrex.Event : Class for storing a tree of `Particle` objects
representing an event.
pyrex.Particle : Class for storing particle attributes.
"""
def __init__(self, files, slice_range=100,
interaction_model=NeutrinoInteraction):
if isinstance(files, str):
self.files = [files]
else:
self.files = files
self.slice_range = slice_range
self.interaction_model = interaction_model
self._file_index = -1
self._file_counts = [0] * (len(self.files)+1)
self._load_events()
@property
def count(self):
"""
Number of neutrinos produced by the generator.
Count includes events which were not returned due to Earth shadowing
or other effects.
"""
return sum(self._file_counts)
@count.setter
def count(self, custom_count):
self._file_counts[0] = custom_count - sum(self._file_counts[1:])
def _load_events(self):
"""
Pulls the next chunk of events into memory.
Reads events up to the ``slice_range`` into memory from the current
file. If the current file is exhausted, loads the next file.
Returns
-------
list
List of `Event` objects read from the current file.
Raises
------
StopIteration
If the end of the last file in the file list has been reached.
"""
if self._file_index<0 or self._event_index>=len(self._file):
self._next_file()
start = self._event_index
stop = self._event_index + self.slice_range
self._event_index += self.slice_range
if stop>len(self._file):
stop = len(self._file)
self._events = []
self._event_counts = []
for file_event in self._file[start:stop]:
info = file_event.get_particle_info()
particles = []
for p in info:
part = Particle(
particle_id=p['particle_id'],
vertex=(p['vertex_x'],
p['vertex_y'],
p['vertex_z']),
direction=(p['direction_x'],
p['direction_y'],
p['direction_z']),
energy=p['energy'],
interaction_model=self.interaction_model,
interaction_type=p['interaction_kind']
)
part.interaction.inelasticity = p['interaction_inelasticity']
part.interaction.em_frac = p['interaction_em_frac']
part.interaction.had_frac = p['interaction_had_frac']
part.survival_weight = p['survival_weight']
part.interaction_weight = p['interaction_weight']
particles.append(part)
self._events.append(Event(particles))
self._event_counts.append(file_event.total_events_thrown)
def _next_file(self):
"""
Pulls the next file into memory.
Reads in the next file from the ``files`` list and stores its `Event`
objects in memory.
Raises
------
StopIteration
If the end of the last file in the file list has been reached.
"""
self._file_index += 1
self._event_index = 0
if self._file_index>0:
self._file.close()
if self._file_index>=len(self.files):
raise StopIteration("No more events to be generated")
# Try to open the next file with the appropriate slice range,
# otherwise just settle for opening it at all
try:
self._file = File(self.files[self._file_index], 'r',
slice_range=self.slice_range)
except TypeError:
self._file = File(self.files[self._file_index], 'r')
self._file.open()
def create_event(self):
"""
Generate a neutrino.
Pulls the next `Event` object from the file(s).
Returns
-------
Event
Next neutrino `Event` object from the file(s).
Raises
------
StopIteration
If the end of the last file in the file list has been reached.
See Also
--------
pyrex.Event : Class for storing a tree of `Particle` objects
representing an event.
pyrex.Particle : Class for storing particle attributes.
"""
if len(self._events)==0:
self._load_events()
self._file_counts[self._file_index+1] = self._event_counts.pop(0)
return self._events.pop(0)
| 35.927492 | 79 | 0.59746 |
from collections.abc import Iterable
from enum import Enum
import logging
import numpy as np
from pyrex.internal_functions import get_from_enum
from pyrex.earth_model import earth
from pyrex.particle import Event, Particle, NeutrinoInteraction
from pyrex.io import File
logger = logging.getLogger(__name__)
class Generator:
class SourceType(Enum):
undefined = 0
unknown = 0
cosmogenic = 1
pgamma = 1
astrophysical = 2
pp = 2
def __init__(self, energy, shadow=False, flavor_ratio=(1,1,1),
source="cosmogenic", interaction_model=NeutrinoInteraction,
earth_model=earth):
if not callable(energy):
try:
e = float(energy)
except TypeError:
raise ValueError("energy argument must be a function "+
"or a number")
else:
energy = lambda: e
self.get_energy = energy
self.shadow = shadow
self.ratio = np.array(flavor_ratio)/np.sum(flavor_ratio)
self.source = source
self.interaction_model = interaction_model
self.earth_model = earth_model
self.count = 0
@property
def source(self):
return self._source
@source.setter
def source(self, src_type):
if src_type is None:
self._source = self.SourceType.undefined
else:
self._source = get_from_enum(src_type, self.SourceType)
@property
def volume(self):
raise NotImplementedError("volume property must be implemented by "+
"inheriting class")
@property
def solid_angle(self):
logger.debug("Using default solid_angle from "+
"pyrex.generation.Generator")
return 4 * np.pi
def get_vertex(self):
logger.debug("Using default get_vertex from "+
"pyrex.generation.Generator")
raise NotImplementedError("get_vertex method must be implemented by "
+"inheriting class")
def get_direction(self):
cos_theta = np.random.random_sample()*2-1
sin_theta = np.sqrt(1 - cos_theta**2)
phi = np.random.random_sample() * 2*np.pi
return np.array([sin_theta * np.cos(phi),
sin_theta * np.sin(phi),
cos_theta])
def get_particle_type(self):
rand_flavor = np.random.rand()
rand_nunubar = np.random.rand()
if self.source==self.SourceType.cosmogenic:
nunubar_ratios = [0.78, 0.61, 0.61]
elif self.source==self.SourceType.astrophysical:
nunubar_ratios = [0.5, 0.5, 0.5]
else:
raise ValueError("Source type not supported")
if rand_flavor<self.ratio[0]:
if rand_nunubar<nunubar_ratios[0]:
return Particle.Type.electron_neutrino
else:
return Particle.Type.electron_antineutrino
elif rand_flavor<self.ratio[0]+self.ratio[1]:
if rand_nunubar<nunubar_ratios[1]:
return Particle.Type.muon_neutrino
else:
return Particle.Type.muon_antineutrino
else:
if rand_nunubar<nunubar_ratios[2]:
return Particle.Type.tau_neutrino
else:
return Particle.Type.tau_antineutrino
def get_exit_points(self, particle):
logger.debug("Using default get_exit_points from "+
"pyrex.generation.Generator")
raise NotImplementedError("get_exit_points method must be implemented "
+"by inheriting class")
def get_weights(self, particle):
t = self.earth_model.slant_depth(particle.vertex, -particle.direction)
x = t / particle.interaction.total_interaction_length
survival_weight = np.exp(-x)
entry_point, exit_point = self.get_exit_points(particle)
in_ice_vector = np.array(exit_point) - np.array(entry_point)
in_ice_length = np.sqrt(np.sum(in_ice_vector**2))
vertex_vector = particle.vertex - np.array(entry_point)
travel_length = np.sqrt(np.sum(vertex_vector**2))
interaction_length = (particle.interaction.total_interaction_length
/ 0.92 / 100)
interaction_weight = (in_ice_length/interaction_length *
np.exp(-travel_length/interaction_length))
return survival_weight, interaction_weight
def create_event(self):
self.count += 1
vtx = self.get_vertex()
u = self.get_direction()
E = self.get_energy()
particle_id = self.get_particle_type()
particle = Particle(particle_id=particle_id, vertex=vtx, direction=u,
energy=E, interaction_model=self.interaction_model)
weights = self.get_weights(particle)
if not self.shadow:
particle.survival_weight = weights[0]
particle.interaction_weight = weights[1]
logger.debug("Successfully created %s with survival weight %d and "
+"interaction weight %d", particle, weights[0],
weights[1])
return Event(particle)
elif np.random.rand() < weights[0]:
particle.survival_weight = 1
particle.interaction_weight = weights[1]
logger.debug("Successfully created %s with survival weight %d and "
+"interaction weight %d", particle, weights[0],
weights[1])
return Event(particle)
else:
logger.debug("Particle creation shadowed by the Earth")
return self.create_event()
class CylindricalGenerator(Generator):
def __init__(self, dr, dz, energy, shadow=False, flavor_ratio=(1,1,1),
source="cosmogenic", interaction_model=NeutrinoInteraction,
earth_model=earth):
self.dr = dr
self.dz = dz
super().__init__(energy=energy, shadow=shadow,
flavor_ratio=flavor_ratio, source=source,
interaction_model=interaction_model,
earth_model=earth_model)
@property
def volume(self):
return np.pi * self.dr**2 * self.dz
def get_vertex(self):
r = self.dr * np.sqrt(np.random.random_sample())
theta = 2*np.pi * np.random.random_sample()
z = -self.dz * np.random.random_sample()
return np.array([r*np.cos(theta), r*np.sin(theta), z])
def get_exit_points(self, particle):
enter_point = None
exit_point = None
if particle.direction[0]==0:
x0 = particle.vertex[0]
y0 = -np.sqrt(self.dr**2 - x0**2)
z0 = (particle.vertex[2] + (y0-particle.vertex[1])
* particle.direction[2]/particle.direction[1])
x1 = particle.vertex[0]
y1 = np.sqrt(self.dr**2 - x1**2)
z1 = (particle.vertex[2] + (y1-particle.vertex[1])
* particle.direction[2]/particle.direction[1])
else:
slope = particle.direction[1]/particle.direction[0]
a = 1 + slope**2
b = particle.vertex[1] - slope*particle.vertex[0]
x0 = - (slope*b + np.sqrt(-b**2 + a*self.dr**2)) / a
y0 = (particle.vertex[1] - slope *
(particle.vertex[0] + np.sqrt(-b**2 + a*self.dr**2))) / a
z0 = (particle.vertex[2] + (x0-particle.vertex[0])
* particle.direction[2]/particle.direction[0])
x1 = (-slope*b + np.sqrt(-b**2 + a*self.dr**2)) / a
y1 = (particle.vertex[1] + slope *
(-particle.vertex[0] + np.sqrt(-b**2 + a*self.dr**2))) / a
z1 = (particle.vertex[2] + (x1-particle.vertex[0])
* particle.direction[2]/particle.direction[0])
for pt in ([x0, y0, z0], [x1, y1, z1]):
z = None
if pt[2]>0:
z = 0
elif pt[2]<-self.dz:
z = -self.dz
if z is not None:
pt[0] = (particle.vertex[0] + (z-particle.vertex[2])
* particle.direction[0]/particle.direction[2])
pt[1] = (particle.vertex[1] + (z-particle.vertex[2])
* particle.direction[1]/particle.direction[2])
pt[2] = z
pt = np.array(pt)
nonzero = particle.direction!=0
direction = ((pt[nonzero]-particle.vertex[nonzero])
/particle.direction[nonzero])
if np.all(direction<0):
enter_point = pt
elif np.all(direction>0):
exit_point = pt
elif np.all(direction==0):
if enter_point is None:
enter_point = pt
if exit_point is None:
exit_point = pt
if enter_point is not None and exit_point is not None:
return enter_point, exit_point
else:
raise ValueError("Could not determine exit points")
class RectangularGenerator(Generator):
def __init__(self, dx, dy, dz, energy, shadow=False, flavor_ratio=(1,1,1),
source="cosmogenic", interaction_model=NeutrinoInteraction,
earth_model=earth):
self.dx = dx
self.dy = dy
self.dz = dz
super().__init__(energy=energy, shadow=shadow,
flavor_ratio=flavor_ratio, source=source,
interaction_model=interaction_model,
earth_model=earth_model)
@property
def volume(self):
return self.dx * self.dy * self.dz
def get_vertex(self):
return np.random.uniform(low=(-self.dx/2, -self.dy/2, -self.dz),
high=(self.dx/2, self.dy/2, 0))
def get_exit_points(self, particle):
enter_point = None
exit_point = None
sides = ((-self.dx/2, self.dx/2),
(-self.dy/2, self.dy/2),
(-self.dz, 0))
for count in range(6):
coord = int(count/2)
min_max = count%2
if particle.direction[coord]==0:
continue
scale = ((sides[coord][min_max] - particle.vertex[coord]) /
particle.direction[coord])
intersection = particle.vertex + particle.direction * scale
valid = True
for i, pair in enumerate(sides):
if i==coord:
continue
if intersection[i]<pair[0] or intersection[i]>pair[1]:
valid = False
if valid:
sign = 1 if min_max==1 else -1
if sign*particle.direction[coord]<0:
enter_point = intersection
else:
exit_point = intersection
if enter_point is not None and exit_point is not None:
return enter_point, exit_point
raise ValueError("Could not determine exit points")
class ListGenerator:
def __init__(self, events, loop=True):
if (isinstance(events, Iterable) and
not isinstance(events, Event)):
self.events = events
else:
self.events = [events]
for i, event in enumerate(self.events):
if isinstance(event, Particle):
self.events[i] = Event(event)
self.loop = loop
self._index = 0
self._additional_counts = 0
@property
def count(self):
return self._index + self._additional_counts
@count.setter
def count(self, custom_count):
self._additional_counts = custom_count - self._index
def create_event(self):
if not self.loop and self._index>=len(self.events):
raise StopIteration("No more events to be generated")
self._index += 1
return self.events[(self._index-1)%len(self.events)]
class FileGenerator:
def __init__(self, files, slice_range=100,
interaction_model=NeutrinoInteraction):
if isinstance(files, str):
self.files = [files]
else:
self.files = files
self.slice_range = slice_range
self.interaction_model = interaction_model
self._file_index = -1
self._file_counts = [0] * (len(self.files)+1)
self._load_events()
@property
def count(self):
return sum(self._file_counts)
@count.setter
def count(self, custom_count):
self._file_counts[0] = custom_count - sum(self._file_counts[1:])
def _load_events(self):
if self._file_index<0 or self._event_index>=len(self._file):
self._next_file()
start = self._event_index
stop = self._event_index + self.slice_range
self._event_index += self.slice_range
if stop>len(self._file):
stop = len(self._file)
self._events = []
self._event_counts = []
for file_event in self._file[start:stop]:
info = file_event.get_particle_info()
particles = []
for p in info:
part = Particle(
particle_id=p['particle_id'],
vertex=(p['vertex_x'],
p['vertex_y'],
p['vertex_z']),
direction=(p['direction_x'],
p['direction_y'],
p['direction_z']),
energy=p['energy'],
interaction_model=self.interaction_model,
interaction_type=p['interaction_kind']
)
part.interaction.inelasticity = p['interaction_inelasticity']
part.interaction.em_frac = p['interaction_em_frac']
part.interaction.had_frac = p['interaction_had_frac']
part.survival_weight = p['survival_weight']
part.interaction_weight = p['interaction_weight']
particles.append(part)
self._events.append(Event(particles))
self._event_counts.append(file_event.total_events_thrown)
def _next_file(self):
self._file_index += 1
self._event_index = 0
if self._file_index>0:
self._file.close()
if self._file_index>=len(self.files):
raise StopIteration("No more events to be generated")
try:
self._file = File(self.files[self._file_index], 'r',
slice_range=self.slice_range)
except TypeError:
self._file = File(self.files[self._file_index], 'r')
self._file.open()
def create_event(self):
if len(self._events)==0:
self._load_events()
self._file_counts[self._file_index+1] = self._event_counts.pop(0)
return self._events.pop(0)
| true | true |
1c2eb94162bd8c245bbcdc3f3cc42fa660d07460 | 6,069 | py | Python | routes.py | kuldeep24680/RESTAPI-for-analyzing-data-to-excel-uploaded-file-and-downloading-resultant-csv-files-on-flask | 5e3f35de2599f71078974c4e5a46bb94da426200 | [
"MIT"
] | null | null | null | routes.py | kuldeep24680/RESTAPI-for-analyzing-data-to-excel-uploaded-file-and-downloading-resultant-csv-files-on-flask | 5e3f35de2599f71078974c4e5a46bb94da426200 | [
"MIT"
] | null | null | null | routes.py | kuldeep24680/RESTAPI-for-analyzing-data-to-excel-uploaded-file-and-downloading-resultant-csv-files-on-flask | 5e3f35de2599f71078974c4e5a46bb94da426200 | [
"MIT"
] | null | null | null | from app import app
from flask import render_template, request,send_file
import pandas as pd
import csv
import xlrd
# to route the user the upload the .xlsx file
@app.route('/')
def upload_file():
return render_template('upload.html')
# function that converts the .xlsx file into .csv file and saves it into input folder as test.csv which is used for further analytics
@app.route('/Fileupload', methods = ['POST'])
def upload():
if request.method == 'POST':
f = request.files['file']
f.save("input/sample.xlsx")
wb = xlrd.open_workbook('input/sample.xlsx')
sh = wb.sheet_by_name('Raw Data')
your_csv_file = open('input/test.csv', 'w')
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in range(sh.nrows):
wr.writerow(sh.row_values(rownum))
your_csv_file.close()
return 'file uploaded successfully'
# function that reads the test.csv and creates dataframe from which a filtered dataframe is generated which contains all the metabolics with suffix as PC and later write it a file
@app.route('/metabolic_suffix_PC')
def metabolic_suffix_PC():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
child_dataset1 = csv_df[csv_df[2].str.endswith('PC', na=False)]
child_dataset1.rename(columns=dict,inplace=True)
tfile = open('outputs/output_PC.csv', 'w')
tfile.write(child_dataset1.to_string())
tfile.close()
return send_file('outputs/output_PC.csv',
mimetype='text/csv',
attachment_filename='output_PC.csv',
as_attachment=True)
# function that reads the test.csv and creates dataframe from which a filtered dataframe is generated which contains all the metabolics with suffix as LPC and later write it a file
@app.route('/metabolic_suffix_LPC')
def metabolic_suffix_LPC():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
child_dataset2 = csv_df[csv_df[2].str.endswith('LPC', na=False)]
child_dataset2.rename(columns=dict,inplace=True)
tfile = open('outputs/output_LPC.csv', 'w')
tfile.write(child_dataset2.to_string())
tfile.close()
return send_file('outputs/output_LPC.csv',
mimetype='text/csv',
attachment_filename='output_LPC.csv',
as_attachment=True)
# function that reads the test.csv and creates dataframe from which a filtered dataframe is generated which contains all the metabolics with suffix as plasmalogen and later write it a file
@app.route('/metabolic_suffix_plasmalogen')
def metabolic_suffix_plasmalogen():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
child_dataset3 = csv_df[csv_df[2].str.endswith('plasmalogen', na=False)]
child_dataset3.rename(columns=dict,inplace=True)
tfile = open('outputs/output_plasmalogen.csv', 'w')
tfile.write(child_dataset3.to_string())
tfile.close()
return send_file('outputs/output_plasmalogen.csv',
mimetype='text/csv',
attachment_filename='output_plasmalogen.csv',
as_attachment=True)
# function that appends the Retention Time Roundoff column in the main dataframe and later write it a file
@app.route('/parentfile_with_RTR')
def parentfile_with_RTR():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
csv_df[1050] = round(csv_df[1])
csv_df.rename(columns={1050:'Retention Time Roundoff(in mins)'}, inplace=True)
csv_df.rename(columns=dict,inplace=True)
tfile = open('outputs/outputfile_with_RTR.csv', 'w')
tfile.write(csv_df.to_string())
tfile.close()
return send_file('outputs/outputfile_with_RTR.csv',
mimetype='text/csv',
attachment_filename='outputfile_with_RTR.csv',
as_attachment=True)
# function that creates a dataframe that has mean of all metabolics readings grouped by Retention Time Roundoff(in mins) and these dataframe doesnot include unnecessary columns and finally it is written to the file
@app.route('/metabolicmean_with_similarRTR')
def metabolicmean_with_similarRTR():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(3,len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
csv_df[1050] = round(csv_df[1])
csv_df.drop([0,1,2], axis = 1, inplace = True)
csv_df.rename(columns=dict,inplace=True)
csv_df.rename(columns={1050:'Retention Time Roundoff(in mins)'}, inplace=True)
final =csv_df.groupby('Retention Time Roundoff(in mins)').agg('mean')
tfile = open('outputs/outputfile_metabolicmean_with_similarRTR.csv', 'w')
tfile.write(final.to_string())
tfile.close()
return send_file('outputs/outputfile_metabolicmean_with_similarRTR.csv',
mimetype='text/csv',
attachment_filename='outputfile_metabolicmean_with_similarRTR.csv',
as_attachment=True)
| 42.145833 | 215 | 0.672928 | from app import app
from flask import render_template, request,send_file
import pandas as pd
import csv
import xlrd
@app.route('/')
def upload_file():
return render_template('upload.html')
@app.route('/Fileupload', methods = ['POST'])
def upload():
if request.method == 'POST':
f = request.files['file']
f.save("input/sample.xlsx")
wb = xlrd.open_workbook('input/sample.xlsx')
sh = wb.sheet_by_name('Raw Data')
your_csv_file = open('input/test.csv', 'w')
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in range(sh.nrows):
wr.writerow(sh.row_values(rownum))
your_csv_file.close()
return 'file uploaded successfully'
@app.route('/metabolic_suffix_PC')
def metabolic_suffix_PC():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
child_dataset1 = csv_df[csv_df[2].str.endswith('PC', na=False)]
child_dataset1.rename(columns=dict,inplace=True)
tfile = open('outputs/output_PC.csv', 'w')
tfile.write(child_dataset1.to_string())
tfile.close()
return send_file('outputs/output_PC.csv',
mimetype='text/csv',
attachment_filename='output_PC.csv',
as_attachment=True)
@app.route('/metabolic_suffix_LPC')
def metabolic_suffix_LPC():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
child_dataset2 = csv_df[csv_df[2].str.endswith('LPC', na=False)]
child_dataset2.rename(columns=dict,inplace=True)
tfile = open('outputs/output_LPC.csv', 'w')
tfile.write(child_dataset2.to_string())
tfile.close()
return send_file('outputs/output_LPC.csv',
mimetype='text/csv',
attachment_filename='output_LPC.csv',
as_attachment=True)
@app.route('/metabolic_suffix_plasmalogen')
def metabolic_suffix_plasmalogen():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
child_dataset3 = csv_df[csv_df[2].str.endswith('plasmalogen', na=False)]
child_dataset3.rename(columns=dict,inplace=True)
tfile = open('outputs/output_plasmalogen.csv', 'w')
tfile.write(child_dataset3.to_string())
tfile.close()
return send_file('outputs/output_plasmalogen.csv',
mimetype='text/csv',
attachment_filename='output_plasmalogen.csv',
as_attachment=True)
@app.route('/parentfile_with_RTR')
def parentfile_with_RTR():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
csv_df[1050] = round(csv_df[1])
csv_df.rename(columns={1050:'Retention Time Roundoff(in mins)'}, inplace=True)
csv_df.rename(columns=dict,inplace=True)
tfile = open('outputs/outputfile_with_RTR.csv', 'w')
tfile.write(csv_df.to_string())
tfile.close()
return send_file('outputs/outputfile_with_RTR.csv',
mimetype='text/csv',
attachment_filename='outputfile_with_RTR.csv',
as_attachment=True)
@app.route('/metabolicmean_with_similarRTR')
def metabolicmean_with_similarRTR():
column_names = pd.read_csv('input/test.csv', engine='python')
columns=list(column_names.columns.values)
dict={}
for i in range(3,len(columns)):
dict[i]=columns[i]
csv_df = pd.read_csv('input/test.csv', header=None, skiprows=1, engine='python')
csv_df[1050] = round(csv_df[1])
csv_df.drop([0,1,2], axis = 1, inplace = True)
csv_df.rename(columns=dict,inplace=True)
csv_df.rename(columns={1050:'Retention Time Roundoff(in mins)'}, inplace=True)
final =csv_df.groupby('Retention Time Roundoff(in mins)').agg('mean')
tfile = open('outputs/outputfile_metabolicmean_with_similarRTR.csv', 'w')
tfile.write(final.to_string())
tfile.close()
return send_file('outputs/outputfile_metabolicmean_with_similarRTR.csv',
mimetype='text/csv',
attachment_filename='outputfile_metabolicmean_with_similarRTR.csv',
as_attachment=True)
| true | true |
1c2eb95134eb8fd3953659373a21b5dc998d5027 | 1,365 | py | Python | run_results.py | phylatechnologies/ibd_classification_benchmark | 667e0b42e70bd56c6675062b10dae38407e785b0 | [
"MIT"
] | null | null | null | run_results.py | phylatechnologies/ibd_classification_benchmark | 667e0b42e70bd56c6675062b10dae38407e785b0 | [
"MIT"
] | null | null | null | run_results.py | phylatechnologies/ibd_classification_benchmark | 667e0b42e70bd56c6675062b10dae38407e785b0 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import numpy as np
from metadata.getters import get_pwd
from analysis.evaluation_methods import get_performance
import random
np.random.seed(26)
random.seed(26)
def get_sample_info(dataset):
'''
:param exp_name: experiment string
:return: unpickled data set
'''
data_file = '{}.pkl'.format(dataset)
data_path = '{}/datasets/{}'.format(get_pwd(), data_file)
df = pd.read_pickle(data_path)
info = df[['diagnosis','studyID']]
return info
if (__name__=="__main__"):
res_path = get_pwd() + '/results'
res = os.listdir(res_path)
for e in res:
output = '{}/metrics'.format(get_pwd())
if not os.path.isdir(output):
os.mkdir(output)
exp_name = e.split('.')[0]
dataset = exp_name.split('-')[0]
if not os.path.isfile('{}/{}.csv'.format(output, exp_name)):
info = get_sample_info(dataset)
file = '{}/{}'.format(res_path, e)
y_pred = pd.read_csv(file, index_col=0)
y_pred = np.round(y_pred)
y = pd.concat([info, y_pred], axis = 1, sort=False)
y.columns = ['true', 'studyID', 'pred']
metrics = get_performance(y_df=y, index_name=exp_name)
print(metrics)
metrics.to_csv('{}/{}.csv'.format(output, exp_name))
| 26.25 | 68 | 0.589744 | import os
import pandas as pd
import numpy as np
from metadata.getters import get_pwd
from analysis.evaluation_methods import get_performance
import random
np.random.seed(26)
random.seed(26)
def get_sample_info(dataset):
data_file = '{}.pkl'.format(dataset)
data_path = '{}/datasets/{}'.format(get_pwd(), data_file)
df = pd.read_pickle(data_path)
info = df[['diagnosis','studyID']]
return info
if (__name__=="__main__"):
res_path = get_pwd() + '/results'
res = os.listdir(res_path)
for e in res:
output = '{}/metrics'.format(get_pwd())
if not os.path.isdir(output):
os.mkdir(output)
exp_name = e.split('.')[0]
dataset = exp_name.split('-')[0]
if not os.path.isfile('{}/{}.csv'.format(output, exp_name)):
info = get_sample_info(dataset)
file = '{}/{}'.format(res_path, e)
y_pred = pd.read_csv(file, index_col=0)
y_pred = np.round(y_pred)
y = pd.concat([info, y_pred], axis = 1, sort=False)
y.columns = ['true', 'studyID', 'pred']
metrics = get_performance(y_df=y, index_name=exp_name)
print(metrics)
metrics.to_csv('{}/{}.csv'.format(output, exp_name))
| true | true |
1c2eb97d2acd29ed9d533618df715e94812ce8e5 | 27,136 | py | Python | macOS/Xcode/Maestral/Maestral/app_packages/survey/tools.py | productinfo/maestral-cocoa | d1626c68ace1939bd53cda53c53cbb43c9fb7a8e | [
"MIT"
] | 8 | 2020-11-13T08:48:01.000Z | 2021-12-16T06:30:27.000Z | macOS/Xcode/Maestral/Maestral/app_packages/survey/tools.py | productinfo/maestral-cocoa | d1626c68ace1939bd53cda53c53cbb43c9fb7a8e | [
"MIT"
] | 4 | 2022-01-05T09:16:30.000Z | 2022-03-29T09:32:44.000Z | macOS/Xcode/Maestral/Maestral/app_packages/survey/tools.py | productinfo/maestral-cocoa | d1626c68ace1939bd53cda53c53cbb43c9fb7a8e | [
"MIT"
] | 1 | 2022-01-05T08:56:59.000Z | 2022-01-05T08:56:59.000Z | import enum
import types
import wrapio
import os
import string
import itertools
from . import helpers
__all__ = ('Source', 'Translator', 'LineEditor', 'MultiLineEditor', 'Select',
'MultiSelect')
_blocks = string.whitespace + string.punctuation
class Source(helpers.Handle):
"""
Turns stdin reads into events.
"""
Event = enum.Enum(
'Event',
'move_left move_right jump_left jump_right move_up move_down '
'delete_left delete_right escape indent enter insert'
)
_events = types.SimpleNamespace(
arrows = {
'D': Event.move_left,
'C': Event.move_right,
'A': Event.move_up,
'B': Event.move_down
},
normal = {
'\x0d': Event.enter,
'\x0a': Event.enter,
'\x7f': Event.delete_left,
'\x08': Event.delete_right,
'\x09': Event.indent
},
special = {
'': Event.escape,
'b': Event.jump_left,
'f': Event.jump_right
}
)
__slots__ = ('_io', '_done')
def __init__(self, io, *args, **kwargs):
super().__init__(*args, **kwargs)
self._io = io
self._done = False
def _escape(self):
key = self._io.recv()
if key == '[':
key = self._io.recv()
events = self._events.arrows
else:
events = self._events.special
return (events, key)
def _advance(self):
key = self._io.recv()
if key == '\x1b':
(events, key) = self._escape()
else:
events = self._events.normal
event = events.get(key, self.Event.insert)
self._dispatch(event, key)
def done(self):
self._done = True
def stream(self):
with self._io.atomic:
while not self._done:
self._advance()
self._done = False
class Abort(Exception):
"""
Raise when something's wrong.
"""
__slots__ = ()
class Translator(helpers.Handle):
"""
Combines related io events into single events with relevant info.
.. code-block: python
translator = Translator(callback = ...)
source = Source(io, callback = translator.invoke)
"""
Event = enum.Enum(
'Event',
'move_x jump_x move_y delete insert enter'
)
__slots__ = ('_io',)
def __init__(self, io, *args, **kwargs):
super().__init__(*args, **kwargs)
self._io = io
def _move_x(self, left):
self._dispatch(self.Event.move_x, left)
@wrapio.event(Source.Event.move_left)
def _nnc(self, key):
self._move_x(True)
@wrapio.event(Source.Event.move_right)
def _nnc(self, key):
self._move_x(False)
def _jump_x(self, left):
self._dispatch(self.Event.jump_x, left)
@wrapio.event(Source.Event.jump_left)
def _nnc(self, key):
self._jump_x(True)
@wrapio.event(Source.Event.jump_right)
def _nnc(self, key):
self._jump_x(False)
def _move_y(self, up):
self._dispatch(self.Event.move_y, up)
@wrapio.event(Source.Event.move_up)
def _nnc(self, key):
self._move_y(True)
@wrapio.event(Source.Event.move_down)
def _nnc(self, key):
self._move_y(False)
def _delete(self, left):
self._dispatch(self.Event.delete, left)
@wrapio.event(Source.Event.delete_left)
def _nnc(self, key):
self._delete(True)
@wrapio.event(Source.Event.delete_right)
def _nnc(self, key):
self._delete(False)
def _insert(self, key):
self._dispatch(self.Event.insert, key)
@wrapio.event(Source.Event.insert)
def _nnc(self, key):
self._insert(key)
@wrapio.event(Source.Event.indent)
def _nnc(self, key):
self._insert('\t')
def _enter(self, key):
self._dispatch(self.Event.enter, key)
@wrapio.event(Source.Event.enter)
def _nnc(self, key):
self._enter(key)
def invoke(self, *args, **kwargs):
try:
fail = super().invoke(*args, **kwargs)
except Abort:
fail = True
else:
if fail:
return
fail = False
if fail:
self._io.ring()
return fail
class WindowView:
"""
ABC for classes implementing something that can be partially viewed.
"""
__slots__ = () # ('_index', '_lower', '_bound') on each subclass
def __init__(self, bound):
self._index = 0
self._lower = 0
self._bound = bound
@property
def _upper(self):
return self._lower + self._bound
@property
def _among(self):
return self._index - self._lower
@property
def among(self):
return self._among
@property
def index(self):
return self._index
def _calibrate(self):
if self._index < self._lower:
# |[abc] <- [|ab]c
self._lower = self._index
elif self._index > self._upper:
# [abc]| -> a[bc|]
self._lower = self._index - self._bound
else:
return False
return True
def _resize(self, size):
bound = self._bound + size
if bound < 0:
raise ValueError('bound would be negative')
self._bound += size
if size > 0:
self._lower = max(0, self._lower - size)
self._calibrate()
def _reset(self):
self._index = 0
self._lower = 0
class Tool(WindowView, helpers.Handle):
"""
ABC for partially-viewable handlers.
"""
__slots__ = ('_index', '_lower', '_bound', '_io', '_cursor')
def __init__(self, io, cursor, bound, *args, **kwargs):
WindowView.__init__(self, bound)
helpers.Handle.__init__(self, *args, **kwargs)
self._io = io
self._cursor = cursor
def _clear(self):
raise NotImplementedError()
def clear(self):
self._clear()
def _draw(self, lower):
raise NotImplementedError()
def draw(self):
self._draw(self._lower)
def _focus(self):
raise NotImplementedError()
def focus(self):
self._focus()
def _redraw(self, skip = False):
if not skip:
self._clear()
self._draw(self._lower)
self._focus()
def resize(self, size, full = True):
if full:
self._clear()
self._resize(size)
if full:
self._redraw(skip = True)
def _move_y(self, up, size):
pass
def _e_move_y(self, up, size):
self._move_y(up, size)
self._dispatch('move_y', up, size)
@wrapio.event(Translator.Event.move_y)
def _nnc(self, up):
self._e_move_y(up, 1)
def _move_x(self, left, size):
pass
def _e_move_x(self, left, size):
self._move_x(left, size)
self._dispatch('move_x', left, size)
@wrapio.event(Translator.Event.move_x)
def _nnc(self, left):
self._e_move_x(left, 1)
def _jump_x(self, left):
pass
def _e_jump_x(self, left):
self._jump_x(left)
self._dispatch('jump_x', left)
@wrapio.event(Translator.Event.jump_x)
def _nnc(self, left):
self._e_jump_x(left)
def _tab(self):
pass
def _e_tab(self):
self._tab()
self._dispatch('tab')
def _insert(self, runes):
pass
def _e_insert(self, runes):
if '\t' in runes:
self._e_tab()
return
runes = self._insert(runes)
self._dispatch('insert', runes)
return runes
def insert(self, runes):
runes = self._e_insert(runes)
return runes
@wrapio.event(Translator.Event.insert)
def _nnc(self, rune):
runes = (rune,)
self._e_insert(runes)
def _delete(self, left, size):
pass
def _e_delete(self, left, size):
self._delete(left, size)
self._dispatch('delete', left, size)
def delete(self, left, size):
self._e_delete(left, size)
@wrapio.event(Translator.Event.delete)
def _nnc(self, left):
self._e_delete(left, 1)
def _submit(self):
self._dispatch('submit')
def _enter(self):
raise NotImplementedError()
@wrapio.event(Translator.Event.enter)
def _nnc(self, rune):
self._enter()
def _clean(value):
value = helpers.seq.clean(value)
value = helpers.clean(value)
return value
class LineEditor(Tool):
"""
Use for editing a single line of text.
Does not support line breaks or moving vertically.
"""
__slots__ = ('_limit', '_funnel', '_buffer')
def __init__(self,
io,
cursor,
width,
limit,
funnel,
*args,
**kwargs):
super().__init__(io, cursor, width, **kwargs)
self._limit = limit
self._funnel = funnel
self._buffer = []
@property
def buffer(self):
return self._buffer
def _place(self):
self._cursor.left(self._among)
def _clear(self):
self._place()
self._cursor.erase()
def _transform(self, rune):
rune = self._funnel(rune)
if not len(rune) == 1:
raise RuntimeError('rune must be of size 1')
if not rune.isprintable():
raise RuntimeError('rune must be printable')
return rune
def _show(self, runes):
if self._funnel:
runes = map(self._transform, runes)
runes = tuple(runes)
value = ''.join(runes)
self._io.send(value)
def _chunk(self, lower):
runes = self._buffer[lower:self._upper]
return runes
def _draw(self, lower):
runes = self._chunk(lower)
self._show(runes)
@property
def _shown(self):
return len(self._chunk(self._lower))
def _focus(self):
size = self._shown - self._among
self._cursor.left(size)
def _move_x(self, left, size):
if left:
limit = self._index
else:
limit = len(self._buffer) - self._index
excess = size - limit
if excess > 0:
raise Abort(excess)
if left:
index = self._index - size
limit = self._among
self._cursor.left(min(limit, size))
else:
index = self._index + size
limit = self._shown - self._among
self._cursor.right(min(limit, size))
self._index = index
change = self._calibrate()
if change:
self._redraw()
return change
def move(self, left, size):
self._move_x(left, size)
def _jump_x_left(self):
limit = 0
stop = self._index - 1
if stop < limit:
raise Abort()
indexes = []
for block in _blocks:
try:
index = helpers.rindex(self._buffer, block, 0, stop)
except ValueError:
continue
indexes.append(index + 1)
else:
indexes.append(limit)
size = min(self._index - index for index in indexes)
self._move_x(True, size)
def _jump_x_right(self):
limit = len(self._buffer)
start = self._index + 1
if start > limit:
raise Abort()
indexes = []
for block in _blocks:
try:
index = self._buffer.index(block, start)
except ValueError:
continue
indexes.append(index)
else:
indexes.append(limit)
size = min(index - self._index for index in indexes)
self._move_x(False, size)
def _jump_x(self, left):
if left:
self._jump_x_left()
else:
self._jump_x_right()
def jump(self, left):
self._jump_x(left)
def _ensure(self, runes):
value = ''.join(runes)
value = _clean(value)
return value
def _insert(self, runes):
runes = self._ensure(runes)
runes = tuple(runes)
esize = len(runes)
osize = len(self._buffer)
nsize = osize + esize
if not self._limit is None and nsize > self._limit:
raise Abort()
start = self._index
for (index, rune) in enumerate(runes):
self._buffer.insert(start + index, rune)
among = not start == osize
self._index = start + esize
change = self._calibrate()
if change:
self._redraw()
elif among:
self._draw(start)
self._focus()
else:
self._show(runes)
return runes
def _delete(self, left, size):
if left:
self._move_x(True, size)
limit = len(self._buffer) - self._index
excess = size - limit
if excess > 0:
raise Abort(excess)
for _ in range(size):
del self._buffer[self._index]
self._cursor.erase()
self._draw(self._index)
self._focus()
def _enter(self):
self._submit()
class Originful:
__slots__ = () # ('_origin',) on each subclass
def _originate(self):
(cy, cx) = self._cursor.locate()
self._origin = cx - 1
class MultiLineEditor(Tool, Originful):
"""
Use for editing multiple lines of text.
Supports line breaks or moving vertically.
"""
__slots__ = ('_origin', '_finchk', '_subs', '_make', '_limit', '_indent')
def __init__(self,
io,
cursor,
finchk,
height,
width,
limit,
funnel,
indent,
*args,
**kwargs):
Tool.__init__(self, io, cursor, height - 1, *args, **kwargs)
self._finchk = finchk
make = lambda: LineEditor(io, cursor, width, None, funnel)
self._subs = [make()]
self._make = make
self._limit = limit
self._indent = indent
self._originate()
@property
def _sub(self):
return self._subs[self._index]
@property
def subs(self):
return self._subs
def _place(self):
self._cursor.last(self._among)
self._cursor.right(self._origin)
def _clear(self):
self._place()
self._cursor.clear()
def _chunk(self, lower):
upper = self._upper + 1
runes = self._subs[lower:upper]
return runes
def _draw(self, lower):
self._originate()
subs = self._chunk(lower)
last = len(subs) - 1
for (index, sub) in enumerate(subs):
sub.draw()
if index == last:
break
self._io.send(os.linesep)
@property
def _shown(self):
return len(self._chunk(self._lower))
def _focus(self):
# if 1 shown and among 0, then move 0
ysize = self._shown - self._among - 1
self._cursor.last(ysize)
xsize = self._sub.among
if not self._among:
xsize += self._origin
self._cursor.right(xsize)
_SpotType = enum.Enum('SpotType', 'match left right')
def _spot(self, old, new, type):
to_left = - new.index
to_right = len(new.buffer) + to_left
if type is self._SpotType.match:
difference = old.index - new.index
size = max(to_left, min(to_right, difference))
elif type is self._SpotType.left:
size = to_left
elif type is self._SpotType.right:
size = to_right
else:
raise ValueError('unknown move type')
new.move(size < 0, abs(size))
def _move_y(self, up, size, type = _SpotType.match):
if up:
limit = self._index
else:
# if 1 sub and index 0, then limit is 0
limit = len(self._subs) - self._index - 1
excess = size - limit
if excess > 0:
raise Abort(excess)
if up:
index = self._index - size
limit = self._among
self._cursor.last(min(limit, size))
else:
index = self._index + size
limit = self._shown - self._among - 1
self._cursor.next(min(limit, size))
old = self._sub
self._index = index
new = self._sub
xsize = new.among
if not self._among:
xsize += self._origin
self._cursor.right(xsize)
change = self._calibrate()
if change:
self._redraw()
if not type is None:
self._spot(old, new, type)
def _rcut(self, left):
if left:
(*subs, sub) = self._subs[:self._index + 1]
buffer = sub.buffer[:sub.index]
subs = reversed(subs)
else:
(sub, *subs) = self._subs[self._index:]
buffer = sub.buffer[sub.index:]
buffers = (buffer, *(sub.buffer for sub in subs))
return buffers
def _rmsr(self, buffers, xsize):
ysize = 0
nsize = xsize
for buffer in buffers:
nsize -= len(buffer) + 1
if nsize < 0:
break
xsize = nsize
ysize += 1
return (ysize, xsize)
def _rclc(self, left, xsize):
buffers = self._rcut(left)
# remove one to account for current line
limit = sum(map(len, buffers)) + len(buffers) - 1
excess = xsize - limit
if excess > 0:
raise Abort(excess)
(ysize, xsize) = self._rmsr(buffers, xsize)
return (ysize, xsize)
def _move_x(self, left, xsize):
(ysize, xsize) = self._rclc(left, xsize)
if ysize:
type = self._SpotType.right if left else self._SpotType.left
self._move_y(left, ysize, type)
self._sub.move(left, xsize)
return (ysize, xsize)
def move(self, left, size):
self._move_x(left, size)
def _jump_x(self, left):
try:
self._sub.jump(left)
except Abort:
self._move_x(left, 1)
def _ensure(self, runes):
esize = len(runes)
buffers = tuple(sub.buffer for sub in self._subs)
osize = sum(map(len, buffers)) + len(buffers) - 1
nsize = osize + esize
if not self._limit is None and nsize > self._limit:
raise Abort()
def _tab(self):
self._e_insert((' ',) * self._indent)
def _insert(self, runes):
values = helpers.split(runes, os.linesep)
values = tuple(values)
runes = tuple(itertools.chain.from_iterable(values))
self._ensure(runes)
last = len(values) - 1
buffer = []
for (index, runes) in enumerate(values):
runes = self._sub.insert(runes)
buffer.extend(runes)
if index == last:
break
self._newsub()
buffer.append(os.linesep)
return buffer
def _delete(self, left, size):
if left:
self._move_x(True, size)
(ysize, xsize) = self._rclc(False, size)
kli = self._index + 1
sub = self._sub
for index in range(ysize):
nsub = self._subs.pop(kli)
sub.buffer.extend(nsub.buffer)
if ysize:
self._redraw()
sub.delete(False, size - ysize)
def _newsub(self):
old = self._sub
new = self._make()
while True:
try:
rune = old.buffer.pop(old.index)
except IndexError:
break
new.buffer.append(rune)
last = self._index == len(self._subs) - 1 and self._among < self._bound
full = not last
if full:
self._clear()
else:
self._cursor.erase()
index = self._index + 1
self._subs.insert(index, new)
self._index = index
runes = (os.linesep,)
if full:
self._calibrate()
self._redraw(skip = True)
else:
self._io.send(*runes)
self._draw(self._index)
self._focus()
self._dispatch('insert', runes)
def newsub(self):
self._newsub()
def _enter(self):
done = self._finchk()
(self._submit if done else self._newsub)()
class Select(Tool, Originful):
"""
Use for cycling through and selecting options.
"""
__slots__ = ('_origin', '_options', '_visible', '_changed', '_buffer',
'_width', '_prefix', '_indent', '_funnel', '_filter')
def __init__(self,
io,
cursor,
height,
width,
options,
prefix,
indent,
funnel,
filter,
*args,
**kwargs):
Tool.__init__(self, io, cursor, height - 1, *args, **kwargs)
self._options = options
self._visible = tuple(range(len(options)))
self._changed = {}
self._buffer = []
self._width = width
self._prefix = prefix
self._indent = indent
self._funnel = funnel
self._filter = filter
self._originate()
@property
def buffer(self):
return self._buffer
def _place(self):
self._cursor.last(self._among)
self._cursor.right(self._origin)
def _clear(self):
self._place()
self._cursor.clear()
def _tran(self, index, current, option):
return option
def _chunk(self, lower):
return self._visible[lower:self._upper + 1]
def _fetch(self, index, current):
option = self._options[index][:self._width]
if current:
try:
option = self._changed[index]
except KeyError:
if self._funnel:
option = self._funnel(index, option)
self._changed[index] = option
prefix = self._prefix if current else ' ' * self._indent
option = prefix + self._tran(index, current, option)
return option
def _show(self, index, current):
self._cursor.erase()
option = self._fetch(index, current)
self._io.send(option)
self._cursor.goto(0)
def _draw(self, lower):
indexes = self._chunk(lower)
options = []
for (cindex, oindex) in enumerate(indexes, start = lower):
current = cindex == self._index
option = self._fetch(oindex, current)
options.append(option)
result = os.linesep.join(options)
self._io.send(result)
@property
def _shown(self):
return len(self._chunk(self._lower))
def _focus(self):
# if 1 shown and among 0, then move 0
ysize = self._shown - self._among - 1
self._cursor.last(ysize)
xsize = 0 # doesn't matter
if not self._among:
xsize += self._origin
self._cursor.right(xsize)
def _slide(self, up, size):
limit = len(self._visible)
size = size % limit
index = self._index + (- size if up else size)
if index < 0:
index = limit - 1
else:
extra = index - limit
if not extra < 0:
index = extra
size = index - self._index
up = size < 0
size = abs(size)
return (up, size, index)
def _move_y(self, up, size):
(up, size, index) = self._slide(up, size)
if up:
limit = self._index
else:
# if 1 sub and index 0, then limit is 0
limit = len(self._visible) - self._index - 1
# no need to check excess, ``_slide`` ensures
self._show(self._visible[self._index], False)
if up:
limit = self._among
self._cursor.last(min(limit, size))
else:
limit = self._shown - self._among - 1
self._cursor.next(min(limit, size))
self._index = index
change = self._calibrate()
if change:
self._redraw()
else:
self._show(self._visible[index], True)
def move(self, up, size):
self._move_y(up, size)
def _specify(self, new):
argument = ''.join(self._buffer)
if new:
indexes = self._visible
options = (self._options[index] for index in indexes)
pairs = zip(indexes, options)
pairs = self._filter(pairs, argument)
(indexes, options) = zip(*pairs)
else:
indexes = range(len(self._options))
self._clear()
self._visible = indexes
self._index = 0
self._calibrate()
self._redraw(skip = True)
self._dispatch('filter', argument)
def _insert(self, runes):
save = self._buffer.copy()
value = ''.join(runes)
value = _clean(value)
self._buffer.extend(value)
try:
self._specify(True)
except ValueError:
self._buffer.clear()
self._buffer.extend(save)
raise Abort()
def _delete(self, left, size):
if not self._buffer:
raise Abort()
self._buffer.clear()
self._specify(False)
def _enter(self):
self._submit()
class MultiSelect(Select):
__slots__ = ('_unpin', '_pin', '_chosen')
def __init__(self, unpin, pin, indexes, *args, **kwargs):
super().__init__(*args, **kwargs)
self._unpin = unpin
self._pin = pin
self._chosen = set(indexes)
@property
def indexes(self):
return self._chosen
def _tran(self, index, current, option):
signal = self._pin if index in self._chosen else self._unpin
return signal + super()._tran(index, current, option)
def _add(self, index, full):
if full:
limit = len(self._options)
if len(self._chosen) == limit:
raise Abort()
self._chosen.update(range(limit))
else:
self._chosen.add(index)
def _pop(self, index, full):
if full:
if not self._chosen:
raise Abort()
self._chosen.clear()
else:
self._chosen.remove(index)
def _inform(self, new):
index = self._visible[self._index]
exists = index in self._chosen
full = exists if new else not exists
(self._add if new else self._pop)(index, full)
self._redraw()
self._dispatch('inform', new, full)
def _move_x(self, left, size):
new = not left
self._inform(new)
| 20.026568 | 79 | 0.532098 | import enum
import types
import wrapio
import os
import string
import itertools
from . import helpers
__all__ = ('Source', 'Translator', 'LineEditor', 'MultiLineEditor', 'Select',
'MultiSelect')
_blocks = string.whitespace + string.punctuation
class Source(helpers.Handle):
Event = enum.Enum(
'Event',
'move_left move_right jump_left jump_right move_up move_down '
'delete_left delete_right escape indent enter insert'
)
_events = types.SimpleNamespace(
arrows = {
'D': Event.move_left,
'C': Event.move_right,
'A': Event.move_up,
'B': Event.move_down
},
normal = {
'\x0d': Event.enter,
'\x0a': Event.enter,
'\x7f': Event.delete_left,
'\x08': Event.delete_right,
'\x09': Event.indent
},
special = {
'': Event.escape,
'b': Event.jump_left,
'f': Event.jump_right
}
)
__slots__ = ('_io', '_done')
def __init__(self, io, *args, **kwargs):
super().__init__(*args, **kwargs)
self._io = io
self._done = False
def _escape(self):
key = self._io.recv()
if key == '[':
key = self._io.recv()
events = self._events.arrows
else:
events = self._events.special
return (events, key)
def _advance(self):
key = self._io.recv()
if key == '\x1b':
(events, key) = self._escape()
else:
events = self._events.normal
event = events.get(key, self.Event.insert)
self._dispatch(event, key)
def done(self):
self._done = True
def stream(self):
with self._io.atomic:
while not self._done:
self._advance()
self._done = False
class Abort(Exception):
__slots__ = ()
class Translator(helpers.Handle):
Event = enum.Enum(
'Event',
'move_x jump_x move_y delete insert enter'
)
__slots__ = ('_io',)
def __init__(self, io, *args, **kwargs):
super().__init__(*args, **kwargs)
self._io = io
def _move_x(self, left):
self._dispatch(self.Event.move_x, left)
@wrapio.event(Source.Event.move_left)
def _nnc(self, key):
self._move_x(True)
@wrapio.event(Source.Event.move_right)
def _nnc(self, key):
self._move_x(False)
def _jump_x(self, left):
self._dispatch(self.Event.jump_x, left)
@wrapio.event(Source.Event.jump_left)
def _nnc(self, key):
self._jump_x(True)
@wrapio.event(Source.Event.jump_right)
def _nnc(self, key):
self._jump_x(False)
def _move_y(self, up):
self._dispatch(self.Event.move_y, up)
@wrapio.event(Source.Event.move_up)
def _nnc(self, key):
self._move_y(True)
@wrapio.event(Source.Event.move_down)
def _nnc(self, key):
self._move_y(False)
def _delete(self, left):
self._dispatch(self.Event.delete, left)
@wrapio.event(Source.Event.delete_left)
def _nnc(self, key):
self._delete(True)
@wrapio.event(Source.Event.delete_right)
def _nnc(self, key):
self._delete(False)
def _insert(self, key):
self._dispatch(self.Event.insert, key)
@wrapio.event(Source.Event.insert)
def _nnc(self, key):
self._insert(key)
@wrapio.event(Source.Event.indent)
def _nnc(self, key):
self._insert('\t')
def _enter(self, key):
self._dispatch(self.Event.enter, key)
@wrapio.event(Source.Event.enter)
def _nnc(self, key):
self._enter(key)
def invoke(self, *args, **kwargs):
try:
fail = super().invoke(*args, **kwargs)
except Abort:
fail = True
else:
if fail:
return
fail = False
if fail:
self._io.ring()
return fail
class WindowView:
__slots__ = ()
def __init__(self, bound):
self._index = 0
self._lower = 0
self._bound = bound
@property
def _upper(self):
return self._lower + self._bound
@property
def _among(self):
return self._index - self._lower
@property
def among(self):
return self._among
@property
def index(self):
return self._index
def _calibrate(self):
if self._index < self._lower:
self._lower = self._index
elif self._index > self._upper:
self._lower = self._index - self._bound
else:
return False
return True
def _resize(self, size):
bound = self._bound + size
if bound < 0:
raise ValueError('bound would be negative')
self._bound += size
if size > 0:
self._lower = max(0, self._lower - size)
self._calibrate()
def _reset(self):
self._index = 0
self._lower = 0
class Tool(WindowView, helpers.Handle):
__slots__ = ('_index', '_lower', '_bound', '_io', '_cursor')
def __init__(self, io, cursor, bound, *args, **kwargs):
WindowView.__init__(self, bound)
helpers.Handle.__init__(self, *args, **kwargs)
self._io = io
self._cursor = cursor
def _clear(self):
raise NotImplementedError()
def clear(self):
self._clear()
def _draw(self, lower):
raise NotImplementedError()
def draw(self):
self._draw(self._lower)
def _focus(self):
raise NotImplementedError()
def focus(self):
self._focus()
def _redraw(self, skip = False):
if not skip:
self._clear()
self._draw(self._lower)
self._focus()
def resize(self, size, full = True):
if full:
self._clear()
self._resize(size)
if full:
self._redraw(skip = True)
def _move_y(self, up, size):
pass
def _e_move_y(self, up, size):
self._move_y(up, size)
self._dispatch('move_y', up, size)
@wrapio.event(Translator.Event.move_y)
def _nnc(self, up):
self._e_move_y(up, 1)
def _move_x(self, left, size):
pass
def _e_move_x(self, left, size):
self._move_x(left, size)
self._dispatch('move_x', left, size)
@wrapio.event(Translator.Event.move_x)
def _nnc(self, left):
self._e_move_x(left, 1)
def _jump_x(self, left):
pass
def _e_jump_x(self, left):
self._jump_x(left)
self._dispatch('jump_x', left)
@wrapio.event(Translator.Event.jump_x)
def _nnc(self, left):
self._e_jump_x(left)
def _tab(self):
pass
def _e_tab(self):
self._tab()
self._dispatch('tab')
def _insert(self, runes):
pass
def _e_insert(self, runes):
if '\t' in runes:
self._e_tab()
return
runes = self._insert(runes)
self._dispatch('insert', runes)
return runes
def insert(self, runes):
runes = self._e_insert(runes)
return runes
@wrapio.event(Translator.Event.insert)
def _nnc(self, rune):
runes = (rune,)
self._e_insert(runes)
def _delete(self, left, size):
pass
def _e_delete(self, left, size):
self._delete(left, size)
self._dispatch('delete', left, size)
def delete(self, left, size):
self._e_delete(left, size)
@wrapio.event(Translator.Event.delete)
def _nnc(self, left):
self._e_delete(left, 1)
def _submit(self):
self._dispatch('submit')
def _enter(self):
raise NotImplementedError()
@wrapio.event(Translator.Event.enter)
def _nnc(self, rune):
self._enter()
def _clean(value):
value = helpers.seq.clean(value)
value = helpers.clean(value)
return value
class LineEditor(Tool):
__slots__ = ('_limit', '_funnel', '_buffer')
def __init__(self,
io,
cursor,
width,
limit,
funnel,
*args,
**kwargs):
super().__init__(io, cursor, width, **kwargs)
self._limit = limit
self._funnel = funnel
self._buffer = []
@property
def buffer(self):
return self._buffer
def _place(self):
self._cursor.left(self._among)
def _clear(self):
self._place()
self._cursor.erase()
def _transform(self, rune):
rune = self._funnel(rune)
if not len(rune) == 1:
raise RuntimeError('rune must be of size 1')
if not rune.isprintable():
raise RuntimeError('rune must be printable')
return rune
def _show(self, runes):
if self._funnel:
runes = map(self._transform, runes)
runes = tuple(runes)
value = ''.join(runes)
self._io.send(value)
def _chunk(self, lower):
runes = self._buffer[lower:self._upper]
return runes
def _draw(self, lower):
runes = self._chunk(lower)
self._show(runes)
@property
def _shown(self):
return len(self._chunk(self._lower))
def _focus(self):
size = self._shown - self._among
self._cursor.left(size)
def _move_x(self, left, size):
if left:
limit = self._index
else:
limit = len(self._buffer) - self._index
excess = size - limit
if excess > 0:
raise Abort(excess)
if left:
index = self._index - size
limit = self._among
self._cursor.left(min(limit, size))
else:
index = self._index + size
limit = self._shown - self._among
self._cursor.right(min(limit, size))
self._index = index
change = self._calibrate()
if change:
self._redraw()
return change
def move(self, left, size):
self._move_x(left, size)
def _jump_x_left(self):
limit = 0
stop = self._index - 1
if stop < limit:
raise Abort()
indexes = []
for block in _blocks:
try:
index = helpers.rindex(self._buffer, block, 0, stop)
except ValueError:
continue
indexes.append(index + 1)
else:
indexes.append(limit)
size = min(self._index - index for index in indexes)
self._move_x(True, size)
def _jump_x_right(self):
limit = len(self._buffer)
start = self._index + 1
if start > limit:
raise Abort()
indexes = []
for block in _blocks:
try:
index = self._buffer.index(block, start)
except ValueError:
continue
indexes.append(index)
else:
indexes.append(limit)
size = min(index - self._index for index in indexes)
self._move_x(False, size)
def _jump_x(self, left):
if left:
self._jump_x_left()
else:
self._jump_x_right()
def jump(self, left):
self._jump_x(left)
def _ensure(self, runes):
value = ''.join(runes)
value = _clean(value)
return value
def _insert(self, runes):
runes = self._ensure(runes)
runes = tuple(runes)
esize = len(runes)
osize = len(self._buffer)
nsize = osize + esize
if not self._limit is None and nsize > self._limit:
raise Abort()
start = self._index
for (index, rune) in enumerate(runes):
self._buffer.insert(start + index, rune)
among = not start == osize
self._index = start + esize
change = self._calibrate()
if change:
self._redraw()
elif among:
self._draw(start)
self._focus()
else:
self._show(runes)
return runes
def _delete(self, left, size):
if left:
self._move_x(True, size)
limit = len(self._buffer) - self._index
excess = size - limit
if excess > 0:
raise Abort(excess)
for _ in range(size):
del self._buffer[self._index]
self._cursor.erase()
self._draw(self._index)
self._focus()
def _enter(self):
self._submit()
class Originful:
__slots__ = ()
def _originate(self):
(cy, cx) = self._cursor.locate()
self._origin = cx - 1
class MultiLineEditor(Tool, Originful):
__slots__ = ('_origin', '_finchk', '_subs', '_make', '_limit', '_indent')
def __init__(self,
io,
cursor,
finchk,
height,
width,
limit,
funnel,
indent,
*args,
**kwargs):
Tool.__init__(self, io, cursor, height - 1, *args, **kwargs)
self._finchk = finchk
make = lambda: LineEditor(io, cursor, width, None, funnel)
self._subs = [make()]
self._make = make
self._limit = limit
self._indent = indent
self._originate()
@property
def _sub(self):
return self._subs[self._index]
@property
def subs(self):
return self._subs
def _place(self):
self._cursor.last(self._among)
self._cursor.right(self._origin)
def _clear(self):
self._place()
self._cursor.clear()
def _chunk(self, lower):
upper = self._upper + 1
runes = self._subs[lower:upper]
return runes
def _draw(self, lower):
self._originate()
subs = self._chunk(lower)
last = len(subs) - 1
for (index, sub) in enumerate(subs):
sub.draw()
if index == last:
break
self._io.send(os.linesep)
@property
def _shown(self):
return len(self._chunk(self._lower))
def _focus(self):
ysize = self._shown - self._among - 1
self._cursor.last(ysize)
xsize = self._sub.among
if not self._among:
xsize += self._origin
self._cursor.right(xsize)
_SpotType = enum.Enum('SpotType', 'match left right')
def _spot(self, old, new, type):
to_left = - new.index
to_right = len(new.buffer) + to_left
if type is self._SpotType.match:
difference = old.index - new.index
size = max(to_left, min(to_right, difference))
elif type is self._SpotType.left:
size = to_left
elif type is self._SpotType.right:
size = to_right
else:
raise ValueError('unknown move type')
new.move(size < 0, abs(size))
def _move_y(self, up, size, type = _SpotType.match):
if up:
limit = self._index
else:
limit = len(self._subs) - self._index - 1
excess = size - limit
if excess > 0:
raise Abort(excess)
if up:
index = self._index - size
limit = self._among
self._cursor.last(min(limit, size))
else:
index = self._index + size
limit = self._shown - self._among - 1
self._cursor.next(min(limit, size))
old = self._sub
self._index = index
new = self._sub
xsize = new.among
if not self._among:
xsize += self._origin
self._cursor.right(xsize)
change = self._calibrate()
if change:
self._redraw()
if not type is None:
self._spot(old, new, type)
def _rcut(self, left):
if left:
(*subs, sub) = self._subs[:self._index + 1]
buffer = sub.buffer[:sub.index]
subs = reversed(subs)
else:
(sub, *subs) = self._subs[self._index:]
buffer = sub.buffer[sub.index:]
buffers = (buffer, *(sub.buffer for sub in subs))
return buffers
def _rmsr(self, buffers, xsize):
ysize = 0
nsize = xsize
for buffer in buffers:
nsize -= len(buffer) + 1
if nsize < 0:
break
xsize = nsize
ysize += 1
return (ysize, xsize)
def _rclc(self, left, xsize):
buffers = self._rcut(left)
limit = sum(map(len, buffers)) + len(buffers) - 1
excess = xsize - limit
if excess > 0:
raise Abort(excess)
(ysize, xsize) = self._rmsr(buffers, xsize)
return (ysize, xsize)
def _move_x(self, left, xsize):
(ysize, xsize) = self._rclc(left, xsize)
if ysize:
type = self._SpotType.right if left else self._SpotType.left
self._move_y(left, ysize, type)
self._sub.move(left, xsize)
return (ysize, xsize)
def move(self, left, size):
self._move_x(left, size)
def _jump_x(self, left):
try:
self._sub.jump(left)
except Abort:
self._move_x(left, 1)
def _ensure(self, runes):
esize = len(runes)
buffers = tuple(sub.buffer for sub in self._subs)
osize = sum(map(len, buffers)) + len(buffers) - 1
nsize = osize + esize
if not self._limit is None and nsize > self._limit:
raise Abort()
def _tab(self):
self._e_insert((' ',) * self._indent)
def _insert(self, runes):
values = helpers.split(runes, os.linesep)
values = tuple(values)
runes = tuple(itertools.chain.from_iterable(values))
self._ensure(runes)
last = len(values) - 1
buffer = []
for (index, runes) in enumerate(values):
runes = self._sub.insert(runes)
buffer.extend(runes)
if index == last:
break
self._newsub()
buffer.append(os.linesep)
return buffer
def _delete(self, left, size):
if left:
self._move_x(True, size)
(ysize, xsize) = self._rclc(False, size)
kli = self._index + 1
sub = self._sub
for index in range(ysize):
nsub = self._subs.pop(kli)
sub.buffer.extend(nsub.buffer)
if ysize:
self._redraw()
sub.delete(False, size - ysize)
def _newsub(self):
old = self._sub
new = self._make()
while True:
try:
rune = old.buffer.pop(old.index)
except IndexError:
break
new.buffer.append(rune)
last = self._index == len(self._subs) - 1 and self._among < self._bound
full = not last
if full:
self._clear()
else:
self._cursor.erase()
index = self._index + 1
self._subs.insert(index, new)
self._index = index
runes = (os.linesep,)
if full:
self._calibrate()
self._redraw(skip = True)
else:
self._io.send(*runes)
self._draw(self._index)
self._focus()
self._dispatch('insert', runes)
def newsub(self):
self._newsub()
def _enter(self):
done = self._finchk()
(self._submit if done else self._newsub)()
class Select(Tool, Originful):
__slots__ = ('_origin', '_options', '_visible', '_changed', '_buffer',
'_width', '_prefix', '_indent', '_funnel', '_filter')
def __init__(self,
io,
cursor,
height,
width,
options,
prefix,
indent,
funnel,
filter,
*args,
**kwargs):
Tool.__init__(self, io, cursor, height - 1, *args, **kwargs)
self._options = options
self._visible = tuple(range(len(options)))
self._changed = {}
self._buffer = []
self._width = width
self._prefix = prefix
self._indent = indent
self._funnel = funnel
self._filter = filter
self._originate()
@property
def buffer(self):
return self._buffer
def _place(self):
self._cursor.last(self._among)
self._cursor.right(self._origin)
def _clear(self):
self._place()
self._cursor.clear()
def _tran(self, index, current, option):
return option
def _chunk(self, lower):
return self._visible[lower:self._upper + 1]
def _fetch(self, index, current):
option = self._options[index][:self._width]
if current:
try:
option = self._changed[index]
except KeyError:
if self._funnel:
option = self._funnel(index, option)
self._changed[index] = option
prefix = self._prefix if current else ' ' * self._indent
option = prefix + self._tran(index, current, option)
return option
def _show(self, index, current):
self._cursor.erase()
option = self._fetch(index, current)
self._io.send(option)
self._cursor.goto(0)
def _draw(self, lower):
indexes = self._chunk(lower)
options = []
for (cindex, oindex) in enumerate(indexes, start = lower):
current = cindex == self._index
option = self._fetch(oindex, current)
options.append(option)
result = os.linesep.join(options)
self._io.send(result)
@property
def _shown(self):
return len(self._chunk(self._lower))
def _focus(self):
ysize = self._shown - self._among - 1
self._cursor.last(ysize)
xsize = 0
if not self._among:
xsize += self._origin
self._cursor.right(xsize)
def _slide(self, up, size):
limit = len(self._visible)
size = size % limit
index = self._index + (- size if up else size)
if index < 0:
index = limit - 1
else:
extra = index - limit
if not extra < 0:
index = extra
size = index - self._index
up = size < 0
size = abs(size)
return (up, size, index)
def _move_y(self, up, size):
(up, size, index) = self._slide(up, size)
if up:
limit = self._index
else:
# if 1 sub and index 0, then limit is 0
limit = len(self._visible) - self._index - 1
# no need to check excess, ``_slide`` ensures
self._show(self._visible[self._index], False)
if up:
limit = self._among
self._cursor.last(min(limit, size))
else:
limit = self._shown - self._among - 1
self._cursor.next(min(limit, size))
self._index = index
change = self._calibrate()
if change:
self._redraw()
else:
self._show(self._visible[index], True)
def move(self, up, size):
self._move_y(up, size)
def _specify(self, new):
argument = ''.join(self._buffer)
if new:
indexes = self._visible
options = (self._options[index] for index in indexes)
pairs = zip(indexes, options)
pairs = self._filter(pairs, argument)
(indexes, options) = zip(*pairs)
else:
indexes = range(len(self._options))
self._clear()
self._visible = indexes
self._index = 0
self._calibrate()
self._redraw(skip = True)
self._dispatch('filter', argument)
def _insert(self, runes):
save = self._buffer.copy()
value = ''.join(runes)
value = _clean(value)
self._buffer.extend(value)
try:
self._specify(True)
except ValueError:
self._buffer.clear()
self._buffer.extend(save)
raise Abort()
def _delete(self, left, size):
if not self._buffer:
raise Abort()
self._buffer.clear()
self._specify(False)
def _enter(self):
self._submit()
class MultiSelect(Select):
__slots__ = ('_unpin', '_pin', '_chosen')
def __init__(self, unpin, pin, indexes, *args, **kwargs):
super().__init__(*args, **kwargs)
self._unpin = unpin
self._pin = pin
self._chosen = set(indexes)
@property
def indexes(self):
return self._chosen
def _tran(self, index, current, option):
signal = self._pin if index in self._chosen else self._unpin
return signal + super()._tran(index, current, option)
def _add(self, index, full):
if full:
limit = len(self._options)
if len(self._chosen) == limit:
raise Abort()
self._chosen.update(range(limit))
else:
self._chosen.add(index)
def _pop(self, index, full):
if full:
if not self._chosen:
raise Abort()
self._chosen.clear()
else:
self._chosen.remove(index)
def _inform(self, new):
index = self._visible[self._index]
exists = index in self._chosen
full = exists if new else not exists
(self._add if new else self._pop)(index, full)
self._redraw()
self._dispatch('inform', new, full)
def _move_x(self, left, size):
new = not left
self._inform(new)
| true | true |
1c2eba00650c8b64ca478183c3d227a97692f36e | 1,439 | py | Python | nonebot/adapters/feishu/utils.py | nonebot/adapter-feishu | 392d734bbf09b88c2e7557102f2562111b84fce8 | [
"MIT"
] | 2 | 2021-11-29T16:05:17.000Z | 2022-03-21T07:45:50.000Z | nonebot/adapters/feishu/utils.py | nonebot/adapter-feishu | 392d734bbf09b88c2e7557102f2562111b84fce8 | [
"MIT"
] | 1 | 2022-03-16T07:26:10.000Z | 2022-03-16T07:26:10.000Z | nonebot/adapters/feishu/utils.py | nonebot/adapter-feishu | 392d734bbf09b88c2e7557102f2562111b84fce8 | [
"MIT"
] | null | null | null | import base64
import hashlib
from typing import Any, Dict, Optional
from cashews import cache
from Crypto.Cipher import AES
from nonebot.utils import logger_wrapper
from .exception import ActionFailed
log = logger_wrapper("FEISHU")
cache.setup("mem://")
def _handle_api_result(result: Optional[Dict[str, Any]]) -> Any:
"""
:说明:
处理 API 请求返回值。
:参数:
* ``result: Optional[Dict[str, Any]]``: API 返回数据
:返回:
- ``Any``: API 调用返回数据
:异常:
- ``ActionFailed``: API 调用失败
"""
if isinstance(result, dict):
if result.get("code") != 0:
raise ActionFailed(**result)
return result.get("data")
else:
return result
class AESCipher(object):
def __init__(self, key):
self.block_size = AES.block_size
self.key = hashlib.sha256(AESCipher.str_to_bytes(key)).digest()
@staticmethod
def str_to_bytes(data):
u_type = type(b"".decode("utf8"))
if isinstance(data, u_type):
return data.encode("utf8")
return data
@staticmethod
def _unpad(s):
return s[: -ord(s[len(s) - 1 :])]
def decrypt(self, enc):
iv = enc[: AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size :]))
def decrypt_string(self, enc):
enc = base64.b64decode(enc)
return self.decrypt(enc).decode("utf8")
| 21.80303 | 71 | 0.607366 | import base64
import hashlib
from typing import Any, Dict, Optional
from cashews import cache
from Crypto.Cipher import AES
from nonebot.utils import logger_wrapper
from .exception import ActionFailed
log = logger_wrapper("FEISHU")
cache.setup("mem://")
def _handle_api_result(result: Optional[Dict[str, Any]]) -> Any:
if isinstance(result, dict):
if result.get("code") != 0:
raise ActionFailed(**result)
return result.get("data")
else:
return result
class AESCipher(object):
def __init__(self, key):
self.block_size = AES.block_size
self.key = hashlib.sha256(AESCipher.str_to_bytes(key)).digest()
@staticmethod
def str_to_bytes(data):
u_type = type(b"".decode("utf8"))
if isinstance(data, u_type):
return data.encode("utf8")
return data
@staticmethod
def _unpad(s):
return s[: -ord(s[len(s) - 1 :])]
def decrypt(self, enc):
iv = enc[: AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size :]))
def decrypt_string(self, enc):
enc = base64.b64decode(enc)
return self.decrypt(enc).decode("utf8")
| true | true |
1c2ebcb9f1c7e30c527922e1b531e8cc615065de | 1,998 | py | Python | src/oidcop/session/info.py | MdreW/oidc-op | 684355981ea12516e1b5ef3ed72a4ecf572109bf | [
"Apache-2.0"
] | null | null | null | src/oidcop/session/info.py | MdreW/oidc-op | 684355981ea12516e1b5ef3ed72a4ecf572109bf | [
"Apache-2.0"
] | null | null | null | src/oidcop/session/info.py | MdreW/oidc-op | 684355981ea12516e1b5ef3ed72a4ecf572109bf | [
"Apache-2.0"
] | null | null | null | from typing import List
from typing import Optional
from oidcmsg.impexp import ImpExp
class SessionInfo(ImpExp):
parameter = {"subordinate": [], "revoked": bool, "type": "", "extra_args": {}}
def __init__(
self,
subordinate: Optional[List[str]] = None,
revoked: Optional[bool] = False,
type: Optional[str] = "",
**kwargs
):
ImpExp.__init__(self)
self.subordinate = subordinate or []
self.revoked = revoked
self.type = type
self.extra_args = {}
def add_subordinate(self, value: str) -> "SessionInfo":
if value not in self.subordinate:
self.subordinate.append(value)
return self
def remove_subordinate(self, value: str) -> "SessionInfo":
self.subordinate.remove(value)
return self
def revoke(self) -> "SessionInfo":
self.revoked = True
return self
def is_revoked(self) -> bool:
return self.revoked
def keys(self):
return self.parameter.keys()
class UserSessionInfo(SessionInfo):
parameter = SessionInfo.parameter.copy()
parameter.update(
{"user_id": "",}
)
def __init__(self, **kwargs):
SessionInfo.__init__(self, **kwargs)
self.type = "UserSessionInfo"
self.user_id = kwargs.get("user_id", "")
self.extra_args = {k: v for k, v in kwargs.items() if k not in self.parameter}
class ClientSessionInfo(SessionInfo):
parameter = SessionInfo.parameter.copy()
parameter.update({"client_id": ""})
def __init__(self, **kwargs):
SessionInfo.__init__(self, **kwargs)
self.type = "ClientSessionInfo"
self.client_id = kwargs.get("client_id", "")
self.extra_args = {k: v for k, v in kwargs.items() if k not in self.parameter}
def find_grant_and_token(self, val: str):
for grant in self.subordinate:
token = grant.get_token(val)
if token:
return grant, token
| 28.140845 | 86 | 0.610611 | from typing import List
from typing import Optional
from oidcmsg.impexp import ImpExp
class SessionInfo(ImpExp):
parameter = {"subordinate": [], "revoked": bool, "type": "", "extra_args": {}}
def __init__(
self,
subordinate: Optional[List[str]] = None,
revoked: Optional[bool] = False,
type: Optional[str] = "",
**kwargs
):
ImpExp.__init__(self)
self.subordinate = subordinate or []
self.revoked = revoked
self.type = type
self.extra_args = {}
def add_subordinate(self, value: str) -> "SessionInfo":
if value not in self.subordinate:
self.subordinate.append(value)
return self
def remove_subordinate(self, value: str) -> "SessionInfo":
self.subordinate.remove(value)
return self
def revoke(self) -> "SessionInfo":
self.revoked = True
return self
def is_revoked(self) -> bool:
return self.revoked
def keys(self):
return self.parameter.keys()
class UserSessionInfo(SessionInfo):
parameter = SessionInfo.parameter.copy()
parameter.update(
{"user_id": "",}
)
def __init__(self, **kwargs):
SessionInfo.__init__(self, **kwargs)
self.type = "UserSessionInfo"
self.user_id = kwargs.get("user_id", "")
self.extra_args = {k: v for k, v in kwargs.items() if k not in self.parameter}
class ClientSessionInfo(SessionInfo):
parameter = SessionInfo.parameter.copy()
parameter.update({"client_id": ""})
def __init__(self, **kwargs):
SessionInfo.__init__(self, **kwargs)
self.type = "ClientSessionInfo"
self.client_id = kwargs.get("client_id", "")
self.extra_args = {k: v for k, v in kwargs.items() if k not in self.parameter}
def find_grant_and_token(self, val: str):
for grant in self.subordinate:
token = grant.get_token(val)
if token:
return grant, token
| true | true |
1c2ebce233d748bc3dbd55e3e7f871b51601686e | 21,795 | py | Python | ModernWarfare/modernwarfare.py | EthanC/Hyde | 0325eb12c5849dbe4e4b317bd8d3f026c989e7f9 | [
"MIT"
] | 14 | 2020-05-01T13:54:48.000Z | 2022-02-14T21:58:35.000Z | ModernWarfare/modernwarfare.py | EthanC/Hyde | 0325eb12c5849dbe4e4b317bd8d3f026c989e7f9 | [
"MIT"
] | 3 | 2020-06-09T19:24:48.000Z | 2021-03-06T11:34:04.000Z | ModernWarfare/modernwarfare.py | EthanC/Hyde | 0325eb12c5849dbe4e4b317bd8d3f026c989e7f9 | [
"MIT"
] | 4 | 2020-05-24T19:15:08.000Z | 2022-02-04T21:20:29.000Z | import logging
from typing import Any, Dict, List, Optional, TypedDict
from utility import Utility
from .database import Database
from .XAssets import (
Accessories,
BattlePasses,
BattlePassItems,
Bundles,
CallingCards,
Camos,
Charms,
Consumables,
Emblems,
Equipment,
Executions,
Features,
GameTypes,
Gestures,
ItemSources,
Killstreaks,
KioskBR,
KioskBRTruck,
Maps,
MasteryChallenges,
MiscellaneousChallenges,
MissionItems,
Missions,
OfficerChallenges,
Operators,
PlaylistEvents,
ProgressionRewards,
Quips,
Reticles,
SeasonalChallenges,
SeasonalEvents,
Skins,
SpecialItems,
Splashes,
Sprays,
Stickers,
TurboChallenges,
UnlockItemsT9,
VehicleCamos,
VehicleHorns,
Vehicles,
VehicleTracks,
Weapons,
WeaponUnlockChallenges,
WeeklyChallengesBR,
WeeklyChallengesMP,
)
log: logging.Logger = logging.getLogger(__name__)
class LootMaster(TypedDict):
"""Structure of loot/loot_master.csv"""
rangeStart: int
rangeEnd: int
typeName: str
typeValue: str
hidden: int
typeNameLoc: str
typeDesc: str
typeImg: str
breadcrumb: str
baseWeaponRef: str
class ItemSourceTable(TypedDict):
"""Structure of mp/itemsourcetable.csv"""
marketPlaceID: int
refType: str
refName: str
gameSourceID: str
equippableIW8MP: int # bool
equippableWZ: int # bool
equippableT9: int # bool
equippableS4: int # bool
lookupType: str
class OperatorIDs(TypedDict):
"""Structure of loot/operator_ids.csv"""
id: int
ref: str
rarity: int
price: int
salvage: int
license: int
premium: int # bool
class WeaponClassTable(TypedDict):
"""Structure of mp/weaponClassTable.csv"""
index: int
ref: str
slot: int
name: str
pluralName: str
showInMenus: int # bool
unlockTablePrefix: str
showInCP: int # bool
image: str
showInArmory: int # bool
previewScene: str
attachScenePrefix: str
unknown1: str # Not defined in luashared/csvutils.csv
unknown2: str # Not defined in luashared/csvutils.csv
classImage: str
canBeGunsmithed: int # bool
attachCategoryWhitelist: str # Array of strings
hasVariants: int # bool
isWZOnly: int # bool
extraAttachCategoryWhitelist: str # Array of strings
class AttachmentCategoryTable(TypedDict):
"""Structure of mp/attachmentcategorytable.csv"""
index: int
ref: str
name: str
buttonIndex: int
displayOrder: int
categoryScene: str
smallCategoryScene: str
largeCategoryScene: str
bone: str
defaultLineOffsetX: int
defaultLineOffsetY: int
defaultLineOffsetZ: int
enableBigGunPreviewCamera: int # bool
enableSmallGunPreviewCamera: int # bool
enableBigShotgunPreviewCamera: int # bool
class CamoCategoryTable(TypedDict):
"""Structure of mp/camocategorytable.csv"""
index: int
ref: str
name: str
class ModernWarfare:
"""Call of Duty: Modern Warfare (IW8)"""
def __init__(self: Any, config: dict) -> None:
self.ModernWarfare = self
self.config: Dict[str, Any] = config.get("ModernWarfare")
self.iXAssets: str = self.config["import"]["xassets"]
self.iImages: str = self.config["import"]["images"]
self.eXAssets: str = self.config["export"]["xassets"]
self.eImages: str = self.config["export"]["images"]
self.eVideos: str = self.config["export"]["videos"]
self.eDatabase: str = self.config["export"]["database"]
def Compile(self: Any) -> None:
"""Compile and export all supported XAsset types for Modern Warfare."""
log.info("Compiling XAssets for Call of Duty: Modern Warfare...")
# Global and reused XAssets
self.localize: Dict[str, Optional[str]] = ModernWarfare.LoadLocalize(self)
self.lootTypes: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/loot/loot_master.csv", LootMaster, 1
)
self.itemSources: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/itemsourcetable.csv", ItemSourceTable
)
self.operatorIds: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/loot/operator_ids.csv", OperatorIDs
)
self.weaponClasses: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/weaponClassTable.csv", WeaponClassTable
)
self.attachCategories: List[Dict[str, Any]] = Utility.ReadCSV(
self,
f"{self.iXAssets}/mp/attachmentcategorytable.csv",
AttachmentCategoryTable,
)
self.camoCategories: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/camocategorytable.csv", CamoCategoryTable
)
Accessories.Compile(self)
BattlePasses.Compile(self)
BattlePassItems.Compile(self)
Bundles.Compile(self)
CallingCards.Compile(self)
Camos.Compile(self)
Charms.Compile(self)
Consumables.Compile(self)
Emblems.Compile(self)
Equipment.Compile(self)
Executions.Compile(self)
Features.Compile(self)
GameTypes.Compile(self)
Gestures.Compile(self)
ItemSources.Compile(self)
Killstreaks.Compile(self)
KioskBR.Compile(self)
KioskBRTruck.Compile(self)
Maps.Compile(self)
MasteryChallenges.Compile(self)
MiscellaneousChallenges.Compile(self)
MissionItems.Compile(self)
Missions.Compile(self)
OfficerChallenges.Compile(self)
Operators.Compile(self)
PlaylistEvents.Compile(self)
ProgressionRewards.Compile(self)
Quips.Compile(self)
Reticles.Compile(self)
SeasonalChallenges.Compile(self)
SeasonalEvents.Compile(self)
Skins.Compile(self)
SpecialItems.Compile(self)
Splashes.Compile(self)
Sprays.Compile(self)
Stickers.Compile(self)
TurboChallenges.Compile(self)
UnlockItemsT9.Compile(self)
VehicleCamos.Compile(self)
VehicleHorns.Compile(self)
Vehicles.Compile(self)
VehicleTracks.Compile(self)
Weapons.Compile(self)
WeaponUnlockChallenges.Compile(self)
WeeklyChallengesBR.Compile(self)
WeeklyChallengesMP.Compile(self)
if self.config.get("compileDatabase") is True:
Database.Compile(self)
def LoadLocalize(self: Any) -> Dict[str, Optional[str]]:
"""Load and filter the localized string entries for Modern Warfare."""
localize: Dict[str, Optional[str]] = Utility.ReadFile(
self, f"{self.iXAssets}/localize.json"
)
placeholders: dict = Utility.ReadFile(self, "ModernWarfare/placeholders.json")
for key in localize:
value: Optional[str] = localize.get(key)
if value is None:
continue
for placeholder in placeholders.get("whole"):
if value.lower() == placeholder.lower():
localize[key] = None
break
for placeholder in placeholders.get("begins"):
if value.lower().startswith(placeholder.lower()) is True:
localize[key] = None
break
for placeholder in placeholders.get("ends"):
if value.lower().endswith(placeholder.lower()) is True:
localize[key] = None
break
if (value := localize.get(key)) is not None:
value = Utility.StripColorCodes(self, value)
value = Utility.StripButtonCodes(self, value)
localize[key] = value
return localize
def GetLootRarity(self: Any, value: int) -> Optional[str]:
"""Get the loot rarity for the provided value."""
return self.localize.get(f"LOOT_MP/QUALITY_{value}")
def GetLootType(self: Any, id: int) -> Optional[str]:
"""Get the loot type for the provided ID."""
if id is None:
return
for loot in self.lootTypes:
start: int = loot.get("rangeStart")
end: int = loot.get("rangeEnd")
if (id >= start) and (id <= end):
typeNameLoc: Optional[str] = loot.get("typeNameLoc")
if typeNameLoc == "LOOT_MP/PLACEHOLDER":
continue
return self.localize.get(typeNameLoc)
for source in self.itemSources:
if source.get("marketPlaceID") == id:
refType: Optional[str] = source.get("refType")
# Partially defined in ui/utils/lootutils.lua
if refType == "weapon":
return self.localize.get("LOOT_MP/ITEM_TYPE_WEAPON")
elif refType == "operator":
return self.localize.get("LOOT_MP/OPERATOR")
elif refType == "operator_skin":
return self.localize.get("LOOT_MP/OPERATOR_SKIN")
elif refType == "executions":
return self.localize.get("LOOT_MP/OPERATOR_EXECUTION")
elif refType == "equipment":
return self.localize.get("LOOT_MP/EQUIPMENT")
elif refType == "accessory":
return self.localize.get("LOOT_MP/WATCH")
elif refType == "playercards":
return self.localize.get("LOOT_MP/CALLING_CARD")
elif refType == "weapon_charm":
return self.localize.get("LOOT_MP/CHARM")
elif refType == "quip":
return self.localize.get("LOOT_MP/OPERATOR_QUIP")
elif refType == "camo":
return self.localize.get("LOOT_MP/CAMO")
elif refType == "emblems":
return self.localize.get("LOOT_MP/EMBLEM")
elif refType == "attachment":
return self.localize.get("LOOT_MP/ATTACHMENT")
elif refType == "sticker":
return self.localize.get("LOOT_MP/STICKER")
elif refType == "xp_token":
return self.localize.get("LOOT_MP/CONSUMABLE")
elif refType == "markeritem":
return self.localize.get("LOOT_MP/CONSUMABLE")
elif refType == "reticle":
return self.localize.get("LOOT_MP/RETICLE")
elif refType == "blueprint":
return self.localize.get("LOOT_MP/ITEM_TYPE_WEAPON")
elif refType == "battlepass":
return self.localize.get("LOOT_MP/BATTLE_PASS")
elif refType == "vehicle_track":
return self.localize.get("LOOT_MP/VEHICLE_TRACK")
elif refType == "vehicle_horn":
return self.localize.get("LOOT_MP/VEHICLE_HORN")
elif refType == "feature":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "gestures":
return self.localize.get("LOOT_MP/GESTURES")
elif refType == "mission":
return self.localize.get("LOOT_MP/MISSION")
elif refType == "weapon_attachment":
return self.localize.get("LOOT_MP/ATTACHMENT")
elif refType == "perk":
return self.localize.get("LOOT_MP/PERK")
elif refType == "t9_equipment":
return self.localize.get("LOOT_MP/EQUIPMENT")
elif refType == "killstreak":
return self.localize.get("LOOT_MP/STREAK")
elif refType == "class":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "zm_unlockable":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "weapon_skill":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "bonuscard":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "vehicleskin":
return self.localize.get("LOOT_MP/VEHICLE_SKIN")
elif refType == "bundle":
return self.localize.get("MENU/BUNDLE_TYPE_VARIETY")
elif refType == "placeholder":
return
elif refType == "arcadegame":
return self.localize.get("LOOT_MP/ARCADE_GAME")
elif refType == "sprays":
return self.localize.get("LOOT_MP/SPRAYS")
else:
log.warning(f"Unknown loot refType {refType} for ID {id}")
def GetLootSeason(self: Any, license: int) -> Optional[str]:
"""Get the loot season for the provided value."""
if license == 0:
return
elif license == 99:
# Defined in ui/utils/lootutils.lua
return "Unreleased"
elif ((license - 1) % 1000) == 0:
# For instances such as the Season 4: Reloaded update.
license -= 1
elif ((license - 2) % 1000) == 0:
# For instances such as the Season 6 extension.
license -= 2
elif (license % 1000) != 0:
# Seasonal licenses are multiples of 1,000.
return
return self.localize.get(f"SEASONS/SEASON_{round(license / 1000)}")
def GetOperatorID(self: Any, reference: str) -> Optional[int]:
"""Get the ID for the specified Operator."""
if reference == "universal_ref":
# Universal Operator items do not have an ID, so we'll just
# set one ourselves.
return 29999
elif reference == "universal_base_ref":
# Same reason as universal_ref, however, this is only intended
# for use with Operators where isLaunchOperator is True.
return 29998
elif reference == "t9_exclusive_ref":
# Same reason as universal_ref, however, this is only intended
# for use with Black Ops Cold War Operators.
return 29997
for operator in self.operatorIds:
if reference == operator.get("ref"):
return operator.get("id")
def GetWeaponClass(self: Any, reference: str) -> Optional[str]:
"""Get the name of the specified Weapon Class."""
for weaponClass in self.weaponClasses:
if reference == weaponClass.get("ref"):
return self.localize.get(weaponClass.get("name"))
def GetAttachmentCategory(self: Any, reference: str) -> Optional[str]:
"""Get the name of the specified attachment category."""
for category in self.attachCategories:
if category.get("ref") == reference:
return self.localize.get(category.get("name"))
def GetCamoCategory(self: Any, reference: str) -> Optional[str]:
"""Get the name of the specified camo category."""
for category in self.camoCategories:
if category.get("ref") == reference:
return self.localize.get(category.get("name"))
def GetAttribute(self: Any, reference: str) -> Optional[str]:
"""
Get the name of the specified attribute.
Defined in ui/utils/weaponutils.lua and ui/utils/vehicleutils.lua
"""
if reference is None:
return None
attributes: Dict[str, str] = {
"red": "WEAPON/TRACER_RED",
"blue": "WEAPON/TRACER_BLUE",
"pink": "WEAPON/TRACER_PINK",
"green": "WEAPON/TRACER_GREEN",
"purple": "WEAPON/TRACER_PURPLE",
"freedom": "WEAPON/TRACER_FREEDOM",
"shadow": "WEAPON/TRACER_SHADOW",
"gold": "WEAPON/TRACER_GOLD",
"morte": "WEAPON/TRACER_MORTE",
"tesla": "WEAPON/TRACER_TESLA",
"sixteenBit": "WEAPON/TRACER_16BIT",
"dark": "WEAPON/TRACER_DARK",
"light": "WEAPON/TRACER_LIGHT",
"orange": "WEAPON/TRACER_ORANGE",
"yellow": "WEAPON/TRACER_YELLOW",
"soul": "WEAPON/TRACER_SOUL",
"purpleGreen": "WEAPON/TRACER_PURPLE_GREEN",
"goldPurple": "WEAPON/TRACER_GOLD_PURPLE",
"bluePurple": "WEAPON/TRACER_BLUE_PURPLE",
"yellowGreen": "WEAPON/TRACER_YELLOW_GREEN",
"orangeGreen": "WEAPON/TRACER_ORANGE_GREEN",
"whitepink": "WEAPON/TRACER_WHITE_PINK",
"paintball": "WEAPON/TRACER_PAINTBALL",
"cyan": "WEAPON/TRACER_CYAN",
"purplecyan": "WEAPON/TRACER_PURPLE_CYAN",
"bluered": "WEAPON/TRACER_BLUE_RED",
"greengold": "WEAPON/TRACER_GREEN_GOLD",
"numbers": "WEAPON/TRACER_NUMBERS",
"paintballalt01": "WEAPON/TRACER_PAINTBALL_ALT_01",
"paintballalt02": "WEAPON/TRACER_PAINTBALL_ALT_02",
"electricgold": "WEAPON/TRACER_ELECTRIC_GOLD",
"lazer": "WEAPON/TRACER_LAZER",
"redwhite": "WEAPON/TRACER_RED_WHITE",
"orangepurple": "WEAPON/TRACER_ORANGE_PURPLE",
"standardDis": "WEAPON/DISMEMBERMENT",
"cryoDis": "WEAPON/CRYO_DISMEMBERMENT",
"goldDis": "WEAPON/DISMEMBERMENT_GOLD",
"electricDis": "WEAPON/DISMEMBERMENT_ELECTRIC",
"acidDis": "WEAPON/DISMEMBERMENT_ACID",
"runicDis": "WEAPON/DISMEMBERMENT_RUNIC",
"shatterBlast": "WEAPON/DISMEMBERMENT_SHATTERBLAST",
"radioactive": "WEAPON/DISMEMBERMENT_RADIOACTIVE",
"fire": "WEAPON/DISMEMBERMENT_FIRE",
"fireworkDis": "WEAPON/DISMEMBERMENT_FIREWORK",
"sunburstDis": "WEAPON/DISMEMBERMENT_SUNBURST",
"beerDis": "WEAPON/DISMEMBERMENT_BEER",
"electricgoldDis": "WEAPON/DISMEMBERMENT_ELECTRIC_GOLD",
"numbersdDis": "WEAPON/DISMEMBERMENT_NUMBERS",
"purpleelectric": "WEAPON/DISMEMBERMENT_PURPLE_ELECTRIC",
"purpleelectricDis": "WEAPON/DISMEMBERMENT_PURPLE_ELECTRIC",
"radioactiveblue": "WEAPON/DISMEMBERMENT_RADIOACTIVE_BLUE",
"tailLightTracerRed": "VEHICLES/ATTRIBUTE_TAIL_LIGHT_TRACER_RED",
"flightTrailStandard": "VEHICLES/ATTRIBUTE_FLIGHT_TRAIL_STANDARD",
"flightTrailShadow": "VEHICLES/ATTRIBUTE_FLIGHT_TRAIL_SHADOW",
"tireTrailFlame": "VEHICLES/ATTRIBUTE_TIRE_TRAIL_FLAME",
"smoke": "VEHICLES/ATTRIBUTE_SMOKE",
"tireTrailTesla": "VEHICLES/ATTRIBUTE_TIRE_TRAIL_TESLA",
"crimsonGold": "WEAPON/TRACER_CRIMSON_GOLD",
"emerald": "WEAPON/TRACER_EMERALD",
"amethyst": "WEAPON/TRACER_AMETHYST",
"cherryBlossom": "WEAPON/TRACER_CHERRY_BLOSSOM",
"ice": "WEAPON/TRACER_ICE",
"rainbow": "WEAPON/TRACER_RAINBOW",
"black": "WEAPON/TRACER_BLACK",
"crimsonRonin": "WEAPON/TRACER_CRIMSON_RONIN",
"acid": "WEAPON/TRACER_ACID",
"tailLightTracerAkira": "VEHICLES/ATTRIBUTE_TAIL_LIGHT_TRACER_AKIRA",
"flightTrailRainbow": "VEHICLES/ATTRIBUTE_FLIGHT_TRAIL_RAINBOW",
}
if attributes.get(reference) is None:
log.warning(f"Unknown attribute for ref {reference}")
return self.localize.get(attributes.get(reference))
def GetGameTypeCategory(self: Any, reference: str) -> Optional[str]:
"""
Get the name of the specified game type category.
Defined in ui/utils/mplobbyutils.lua and ui/frontend/mp/gamemodes.lua
"""
if reference is None:
return None
categories: Dict[str, str] = {
"PrivateTournament": "LUA_MENU/TOURNAMENT",
"Plunder": "LUA_MENU/GAMEMODE_PLUNDER",
"BattleRoyale": "LUA_MENU/GAMEMODE_BATTLE_ROYALE",
"WarzoneAlternate": "LUA_MENU/GAMEMODE_WARZONE_ALTERNATE",
"MyModes": "LUA_MENU/MY_MODES",
"Cwl": "LUA_MENU/CWL_MODES",
"Standard": "LUA_MENU/STANDARD_MODES",
"Alternate": "LUA_MENU/ALTERNATE_MODES",
"Rebirth": "LUA_MENU_CANTEEN/GAMEMODE_REBIRTH",
}
if categories.get(reference) is None:
log.warning(f"Unknown game type category for ref {reference}")
return self.localize.get(categories.get(reference))
def GetPlatformExclusivity(self: Any, reference: str) -> str:
"""
Get the name of the specified platform.
Defined in ui/utils/lui.lua
"""
if reference is None:
return
elif reference == "pc":
return "battlenet"
elif reference == "sy":
return "playstation"
elif reference == "ms":
return "xbox"
else:
log.warning(f"Unknown exclusivity platform for ref {reference}")
def GetTitleAvailability(self: Any, id: int) -> Dict[str, bool]:
"""Get the title availability for the specified item."""
for item in self.itemSources:
if id == item.get("marketPlaceID"):
return {
"vanguard": bool(item.get("equippableS4")),
"coldWar": bool(item.get("equippableT9")),
"warzone": bool(item.get("equippableWZ")),
"modernWarfare": bool(item.get("equippableIW8MP")),
}
return {
"vanguard": False,
"coldWar": False,
"warzone": True,
"modernWarfare": True,
}
| 36.446488 | 86 | 0.590411 | import logging
from typing import Any, Dict, List, Optional, TypedDict
from utility import Utility
from .database import Database
from .XAssets import (
Accessories,
BattlePasses,
BattlePassItems,
Bundles,
CallingCards,
Camos,
Charms,
Consumables,
Emblems,
Equipment,
Executions,
Features,
GameTypes,
Gestures,
ItemSources,
Killstreaks,
KioskBR,
KioskBRTruck,
Maps,
MasteryChallenges,
MiscellaneousChallenges,
MissionItems,
Missions,
OfficerChallenges,
Operators,
PlaylistEvents,
ProgressionRewards,
Quips,
Reticles,
SeasonalChallenges,
SeasonalEvents,
Skins,
SpecialItems,
Splashes,
Sprays,
Stickers,
TurboChallenges,
UnlockItemsT9,
VehicleCamos,
VehicleHorns,
Vehicles,
VehicleTracks,
Weapons,
WeaponUnlockChallenges,
WeeklyChallengesBR,
WeeklyChallengesMP,
)
log: logging.Logger = logging.getLogger(__name__)
class LootMaster(TypedDict):
rangeStart: int
rangeEnd: int
typeName: str
typeValue: str
hidden: int
typeNameLoc: str
typeDesc: str
typeImg: str
breadcrumb: str
baseWeaponRef: str
class ItemSourceTable(TypedDict):
marketPlaceID: int
refType: str
refName: str
gameSourceID: str
equippableIW8MP: int
equippableWZ: int
equippableT9: int
equippableS4: int
lookupType: str
class OperatorIDs(TypedDict):
id: int
ref: str
rarity: int
price: int
salvage: int
license: int
premium: int
class WeaponClassTable(TypedDict):
index: int
ref: str
slot: int
name: str
pluralName: str
showInMenus: int
unlockTablePrefix: str
showInCP: int
image: str
showInArmory: int
previewScene: str
attachScenePrefix: str
unknown1: str
unknown2: str
classImage: str
canBeGunsmithed: int
attachCategoryWhitelist: str
hasVariants: int
isWZOnly: int
extraAttachCategoryWhitelist: str
class AttachmentCategoryTable(TypedDict):
index: int
ref: str
name: str
buttonIndex: int
displayOrder: int
categoryScene: str
smallCategoryScene: str
largeCategoryScene: str
bone: str
defaultLineOffsetX: int
defaultLineOffsetY: int
defaultLineOffsetZ: int
enableBigGunPreviewCamera: int
enableSmallGunPreviewCamera: int
enableBigShotgunPreviewCamera: int
class CamoCategoryTable(TypedDict):
index: int
ref: str
name: str
class ModernWarfare:
def __init__(self: Any, config: dict) -> None:
self.ModernWarfare = self
self.config: Dict[str, Any] = config.get("ModernWarfare")
self.iXAssets: str = self.config["import"]["xassets"]
self.iImages: str = self.config["import"]["images"]
self.eXAssets: str = self.config["export"]["xassets"]
self.eImages: str = self.config["export"]["images"]
self.eVideos: str = self.config["export"]["videos"]
self.eDatabase: str = self.config["export"]["database"]
def Compile(self: Any) -> None:
log.info("Compiling XAssets for Call of Duty: Modern Warfare...")
self.localize: Dict[str, Optional[str]] = ModernWarfare.LoadLocalize(self)
self.lootTypes: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/loot/loot_master.csv", LootMaster, 1
)
self.itemSources: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/itemsourcetable.csv", ItemSourceTable
)
self.operatorIds: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/loot/operator_ids.csv", OperatorIDs
)
self.weaponClasses: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/weaponClassTable.csv", WeaponClassTable
)
self.attachCategories: List[Dict[str, Any]] = Utility.ReadCSV(
self,
f"{self.iXAssets}/mp/attachmentcategorytable.csv",
AttachmentCategoryTable,
)
self.camoCategories: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/camocategorytable.csv", CamoCategoryTable
)
Accessories.Compile(self)
BattlePasses.Compile(self)
BattlePassItems.Compile(self)
Bundles.Compile(self)
CallingCards.Compile(self)
Camos.Compile(self)
Charms.Compile(self)
Consumables.Compile(self)
Emblems.Compile(self)
Equipment.Compile(self)
Executions.Compile(self)
Features.Compile(self)
GameTypes.Compile(self)
Gestures.Compile(self)
ItemSources.Compile(self)
Killstreaks.Compile(self)
KioskBR.Compile(self)
KioskBRTruck.Compile(self)
Maps.Compile(self)
MasteryChallenges.Compile(self)
MiscellaneousChallenges.Compile(self)
MissionItems.Compile(self)
Missions.Compile(self)
OfficerChallenges.Compile(self)
Operators.Compile(self)
PlaylistEvents.Compile(self)
ProgressionRewards.Compile(self)
Quips.Compile(self)
Reticles.Compile(self)
SeasonalChallenges.Compile(self)
SeasonalEvents.Compile(self)
Skins.Compile(self)
SpecialItems.Compile(self)
Splashes.Compile(self)
Sprays.Compile(self)
Stickers.Compile(self)
TurboChallenges.Compile(self)
UnlockItemsT9.Compile(self)
VehicleCamos.Compile(self)
VehicleHorns.Compile(self)
Vehicles.Compile(self)
VehicleTracks.Compile(self)
Weapons.Compile(self)
WeaponUnlockChallenges.Compile(self)
WeeklyChallengesBR.Compile(self)
WeeklyChallengesMP.Compile(self)
if self.config.get("compileDatabase") is True:
Database.Compile(self)
def LoadLocalize(self: Any) -> Dict[str, Optional[str]]:
localize: Dict[str, Optional[str]] = Utility.ReadFile(
self, f"{self.iXAssets}/localize.json"
)
placeholders: dict = Utility.ReadFile(self, "ModernWarfare/placeholders.json")
for key in localize:
value: Optional[str] = localize.get(key)
if value is None:
continue
for placeholder in placeholders.get("whole"):
if value.lower() == placeholder.lower():
localize[key] = None
break
for placeholder in placeholders.get("begins"):
if value.lower().startswith(placeholder.lower()) is True:
localize[key] = None
break
for placeholder in placeholders.get("ends"):
if value.lower().endswith(placeholder.lower()) is True:
localize[key] = None
break
if (value := localize.get(key)) is not None:
value = Utility.StripColorCodes(self, value)
value = Utility.StripButtonCodes(self, value)
localize[key] = value
return localize
def GetLootRarity(self: Any, value: int) -> Optional[str]:
return self.localize.get(f"LOOT_MP/QUALITY_{value}")
def GetLootType(self: Any, id: int) -> Optional[str]:
if id is None:
return
for loot in self.lootTypes:
start: int = loot.get("rangeStart")
end: int = loot.get("rangeEnd")
if (id >= start) and (id <= end):
typeNameLoc: Optional[str] = loot.get("typeNameLoc")
if typeNameLoc == "LOOT_MP/PLACEHOLDER":
continue
return self.localize.get(typeNameLoc)
for source in self.itemSources:
if source.get("marketPlaceID") == id:
refType: Optional[str] = source.get("refType")
if refType == "weapon":
return self.localize.get("LOOT_MP/ITEM_TYPE_WEAPON")
elif refType == "operator":
return self.localize.get("LOOT_MP/OPERATOR")
elif refType == "operator_skin":
return self.localize.get("LOOT_MP/OPERATOR_SKIN")
elif refType == "executions":
return self.localize.get("LOOT_MP/OPERATOR_EXECUTION")
elif refType == "equipment":
return self.localize.get("LOOT_MP/EQUIPMENT")
elif refType == "accessory":
return self.localize.get("LOOT_MP/WATCH")
elif refType == "playercards":
return self.localize.get("LOOT_MP/CALLING_CARD")
elif refType == "weapon_charm":
return self.localize.get("LOOT_MP/CHARM")
elif refType == "quip":
return self.localize.get("LOOT_MP/OPERATOR_QUIP")
elif refType == "camo":
return self.localize.get("LOOT_MP/CAMO")
elif refType == "emblems":
return self.localize.get("LOOT_MP/EMBLEM")
elif refType == "attachment":
return self.localize.get("LOOT_MP/ATTACHMENT")
elif refType == "sticker":
return self.localize.get("LOOT_MP/STICKER")
elif refType == "xp_token":
return self.localize.get("LOOT_MP/CONSUMABLE")
elif refType == "markeritem":
return self.localize.get("LOOT_MP/CONSUMABLE")
elif refType == "reticle":
return self.localize.get("LOOT_MP/RETICLE")
elif refType == "blueprint":
return self.localize.get("LOOT_MP/ITEM_TYPE_WEAPON")
elif refType == "battlepass":
return self.localize.get("LOOT_MP/BATTLE_PASS")
elif refType == "vehicle_track":
return self.localize.get("LOOT_MP/VEHICLE_TRACK")
elif refType == "vehicle_horn":
return self.localize.get("LOOT_MP/VEHICLE_HORN")
elif refType == "feature":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "gestures":
return self.localize.get("LOOT_MP/GESTURES")
elif refType == "mission":
return self.localize.get("LOOT_MP/MISSION")
elif refType == "weapon_attachment":
return self.localize.get("LOOT_MP/ATTACHMENT")
elif refType == "perk":
return self.localize.get("LOOT_MP/PERK")
elif refType == "t9_equipment":
return self.localize.get("LOOT_MP/EQUIPMENT")
elif refType == "killstreak":
return self.localize.get("LOOT_MP/STREAK")
elif refType == "class":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "zm_unlockable":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "weapon_skill":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "bonuscard":
return self.localize.get("LOOT_MP/FEATURE")
elif refType == "vehicleskin":
return self.localize.get("LOOT_MP/VEHICLE_SKIN")
elif refType == "bundle":
return self.localize.get("MENU/BUNDLE_TYPE_VARIETY")
elif refType == "placeholder":
return
elif refType == "arcadegame":
return self.localize.get("LOOT_MP/ARCADE_GAME")
elif refType == "sprays":
return self.localize.get("LOOT_MP/SPRAYS")
else:
log.warning(f"Unknown loot refType {refType} for ID {id}")
def GetLootSeason(self: Any, license: int) -> Optional[str]:
if license == 0:
return
elif license == 99:
return "Unreleased"
elif ((license - 1) % 1000) == 0:
license -= 1
elif ((license - 2) % 1000) == 0:
license -= 2
elif (license % 1000) != 0:
return
return self.localize.get(f"SEASONS/SEASON_{round(license / 1000)}")
def GetOperatorID(self: Any, reference: str) -> Optional[int]:
if reference == "universal_ref":
# set one ourselves.
return 29999
elif reference == "universal_base_ref":
# Same reason as universal_ref, however, this is only intended
# for use with Operators where isLaunchOperator is True.
return 29998
elif reference == "t9_exclusive_ref":
# Same reason as universal_ref, however, this is only intended
# for use with Black Ops Cold War Operators.
return 29997
for operator in self.operatorIds:
if reference == operator.get("ref"):
return operator.get("id")
def GetWeaponClass(self: Any, reference: str) -> Optional[str]:
for weaponClass in self.weaponClasses:
if reference == weaponClass.get("ref"):
return self.localize.get(weaponClass.get("name"))
def GetAttachmentCategory(self: Any, reference: str) -> Optional[str]:
for category in self.attachCategories:
if category.get("ref") == reference:
return self.localize.get(category.get("name"))
def GetCamoCategory(self: Any, reference: str) -> Optional[str]:
for category in self.camoCategories:
if category.get("ref") == reference:
return self.localize.get(category.get("name"))
def GetAttribute(self: Any, reference: str) -> Optional[str]:
if reference is None:
return None
attributes: Dict[str, str] = {
"red": "WEAPON/TRACER_RED",
"blue": "WEAPON/TRACER_BLUE",
"pink": "WEAPON/TRACER_PINK",
"green": "WEAPON/TRACER_GREEN",
"purple": "WEAPON/TRACER_PURPLE",
"freedom": "WEAPON/TRACER_FREEDOM",
"shadow": "WEAPON/TRACER_SHADOW",
"gold": "WEAPON/TRACER_GOLD",
"morte": "WEAPON/TRACER_MORTE",
"tesla": "WEAPON/TRACER_TESLA",
"sixteenBit": "WEAPON/TRACER_16BIT",
"dark": "WEAPON/TRACER_DARK",
"light": "WEAPON/TRACER_LIGHT",
"orange": "WEAPON/TRACER_ORANGE",
"yellow": "WEAPON/TRACER_YELLOW",
"soul": "WEAPON/TRACER_SOUL",
"purpleGreen": "WEAPON/TRACER_PURPLE_GREEN",
"goldPurple": "WEAPON/TRACER_GOLD_PURPLE",
"bluePurple": "WEAPON/TRACER_BLUE_PURPLE",
"yellowGreen": "WEAPON/TRACER_YELLOW_GREEN",
"orangeGreen": "WEAPON/TRACER_ORANGE_GREEN",
"whitepink": "WEAPON/TRACER_WHITE_PINK",
"paintball": "WEAPON/TRACER_PAINTBALL",
"cyan": "WEAPON/TRACER_CYAN",
"purplecyan": "WEAPON/TRACER_PURPLE_CYAN",
"bluered": "WEAPON/TRACER_BLUE_RED",
"greengold": "WEAPON/TRACER_GREEN_GOLD",
"numbers": "WEAPON/TRACER_NUMBERS",
"paintballalt01": "WEAPON/TRACER_PAINTBALL_ALT_01",
"paintballalt02": "WEAPON/TRACER_PAINTBALL_ALT_02",
"electricgold": "WEAPON/TRACER_ELECTRIC_GOLD",
"lazer": "WEAPON/TRACER_LAZER",
"redwhite": "WEAPON/TRACER_RED_WHITE",
"orangepurple": "WEAPON/TRACER_ORANGE_PURPLE",
"standardDis": "WEAPON/DISMEMBERMENT",
"cryoDis": "WEAPON/CRYO_DISMEMBERMENT",
"goldDis": "WEAPON/DISMEMBERMENT_GOLD",
"electricDis": "WEAPON/DISMEMBERMENT_ELECTRIC",
"acidDis": "WEAPON/DISMEMBERMENT_ACID",
"runicDis": "WEAPON/DISMEMBERMENT_RUNIC",
"shatterBlast": "WEAPON/DISMEMBERMENT_SHATTERBLAST",
"radioactive": "WEAPON/DISMEMBERMENT_RADIOACTIVE",
"fire": "WEAPON/DISMEMBERMENT_FIRE",
"fireworkDis": "WEAPON/DISMEMBERMENT_FIREWORK",
"sunburstDis": "WEAPON/DISMEMBERMENT_SUNBURST",
"beerDis": "WEAPON/DISMEMBERMENT_BEER",
"electricgoldDis": "WEAPON/DISMEMBERMENT_ELECTRIC_GOLD",
"numbersdDis": "WEAPON/DISMEMBERMENT_NUMBERS",
"purpleelectric": "WEAPON/DISMEMBERMENT_PURPLE_ELECTRIC",
"purpleelectricDis": "WEAPON/DISMEMBERMENT_PURPLE_ELECTRIC",
"radioactiveblue": "WEAPON/DISMEMBERMENT_RADIOACTIVE_BLUE",
"tailLightTracerRed": "VEHICLES/ATTRIBUTE_TAIL_LIGHT_TRACER_RED",
"flightTrailStandard": "VEHICLES/ATTRIBUTE_FLIGHT_TRAIL_STANDARD",
"flightTrailShadow": "VEHICLES/ATTRIBUTE_FLIGHT_TRAIL_SHADOW",
"tireTrailFlame": "VEHICLES/ATTRIBUTE_TIRE_TRAIL_FLAME",
"smoke": "VEHICLES/ATTRIBUTE_SMOKE",
"tireTrailTesla": "VEHICLES/ATTRIBUTE_TIRE_TRAIL_TESLA",
"crimsonGold": "WEAPON/TRACER_CRIMSON_GOLD",
"emerald": "WEAPON/TRACER_EMERALD",
"amethyst": "WEAPON/TRACER_AMETHYST",
"cherryBlossom": "WEAPON/TRACER_CHERRY_BLOSSOM",
"ice": "WEAPON/TRACER_ICE",
"rainbow": "WEAPON/TRACER_RAINBOW",
"black": "WEAPON/TRACER_BLACK",
"crimsonRonin": "WEAPON/TRACER_CRIMSON_RONIN",
"acid": "WEAPON/TRACER_ACID",
"tailLightTracerAkira": "VEHICLES/ATTRIBUTE_TAIL_LIGHT_TRACER_AKIRA",
"flightTrailRainbow": "VEHICLES/ATTRIBUTE_FLIGHT_TRAIL_RAINBOW",
}
if attributes.get(reference) is None:
log.warning(f"Unknown attribute for ref {reference}")
return self.localize.get(attributes.get(reference))
def GetGameTypeCategory(self: Any, reference: str) -> Optional[str]:
if reference is None:
return None
categories: Dict[str, str] = {
"PrivateTournament": "LUA_MENU/TOURNAMENT",
"Plunder": "LUA_MENU/GAMEMODE_PLUNDER",
"BattleRoyale": "LUA_MENU/GAMEMODE_BATTLE_ROYALE",
"WarzoneAlternate": "LUA_MENU/GAMEMODE_WARZONE_ALTERNATE",
"MyModes": "LUA_MENU/MY_MODES",
"Cwl": "LUA_MENU/CWL_MODES",
"Standard": "LUA_MENU/STANDARD_MODES",
"Alternate": "LUA_MENU/ALTERNATE_MODES",
"Rebirth": "LUA_MENU_CANTEEN/GAMEMODE_REBIRTH",
}
if categories.get(reference) is None:
log.warning(f"Unknown game type category for ref {reference}")
return self.localize.get(categories.get(reference))
def GetPlatformExclusivity(self: Any, reference: str) -> str:
if reference is None:
return
elif reference == "pc":
return "battlenet"
elif reference == "sy":
return "playstation"
elif reference == "ms":
return "xbox"
else:
log.warning(f"Unknown exclusivity platform for ref {reference}")
def GetTitleAvailability(self: Any, id: int) -> Dict[str, bool]:
for item in self.itemSources:
if id == item.get("marketPlaceID"):
return {
"vanguard": bool(item.get("equippableS4")),
"coldWar": bool(item.get("equippableT9")),
"warzone": bool(item.get("equippableWZ")),
"modernWarfare": bool(item.get("equippableIW8MP")),
}
return {
"vanguard": False,
"coldWar": False,
"warzone": True,
"modernWarfare": True,
}
| true | true |
1c2ebfbba2889019f3daeaf8c2c41e1faae71ede | 896 | py | Python | examples/core/produce_c_file.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 1,160 | 2015-05-02T15:13:20.000Z | 2022-03-31T20:04:28.000Z | examples/core/produce_c_file.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 19 | 2015-04-20T13:47:00.000Z | 2021-07-07T13:00:42.000Z | examples/core/produce_c_file.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 257 | 2015-04-01T21:42:33.000Z | 2022-03-10T11:57:51.000Z | """
summary: decompile entire file
description:
automate IDA to perform auto-analysis on a file and,
once that is done, produce a .c file containing the
decompilation of all the functions in that file.
Run like so:
ida -A "-S...path/to/produce_c_file.py" <binary-file>
where:
* -A instructs IDA to run in non-interactive mode
* -S holds a path to the script to run (note this is a single token;
there is no space between '-S' and its path.)
"""
import ida_pro
import ida_auto
import ida_loader
import ida_hexrays
# derive output file name
idb_path = ida_loader.get_path(ida_loader.PATH_TYPE_IDB)
c_path = "%s.c" % idb_path
ida_auto.auto_wait() # wait for end of auto-analysis
ida_hexrays.decompile_many( # generate .c file
c_path,
None,
ida_hexrays.VDRUN_NEWFILE
|ida_hexrays.VDRUN_SILENT
|ida_hexrays.VDRUN_MAYSTOP)
ida_pro.qexit(0)
| 23.578947 | 72 | 0.722098 |
import ida_pro
import ida_auto
import ida_loader
import ida_hexrays
idb_path = ida_loader.get_path(ida_loader.PATH_TYPE_IDB)
c_path = "%s.c" % idb_path
ida_auto.auto_wait()
ida_hexrays.decompile_many(
c_path,
None,
ida_hexrays.VDRUN_NEWFILE
|ida_hexrays.VDRUN_SILENT
|ida_hexrays.VDRUN_MAYSTOP)
ida_pro.qexit(0)
| true | true |
1c2ebfd75fe3944d8065afbbfce8e4cb94d66e50 | 13,075 | py | Python | src/saml2/httpbase.py | skanct/pysaml2 | 0c1e26a6dd8759962857a30ebd67f63fe9e881ee | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/httpbase.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/httpbase.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | import calendar
import six
from six.moves import http_cookiejar
import copy
import re
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlencode
import requests
import time
from six.moves.http_cookies import SimpleCookie
from saml2.time_util import utc_now
from saml2 import class_name, SAMLError
from saml2.pack import http_form_post_message
from saml2.pack import http_post_message
from saml2.pack import make_soap_enveloped_saml_thingy
from saml2.pack import http_redirect_message
import logging
logger = logging.getLogger(__name__)
if requests.__version__ < "2.0.0":
DICT_HEADERS = False
else:
DICT_HEADERS = True
__author__ = 'rolandh'
ATTRS = {"version": None,
"name": "",
"value": None,
"port": None,
"port_specified": False,
"domain": "",
"domain_specified": False,
"domain_initial_dot": False,
"path": "",
"path_specified": False,
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": "",
"rfc2109": True}
PAIRS = {
"port": "port_specified",
"domain": "domain_specified",
"path": "path_specified"
}
class ConnectionError(SAMLError):
pass
class HTTPError(SAMLError):
pass
TIME_FORMAT = ["%d-%b-%Y %H:%M:%S %Z", "%d-%b-%y %H:%M:%S %Z",
"%d %b %Y %H:%M:%S %Z"]
def _since_epoch(cdate):
"""
:param cdate: date format 'Wed, 06-Jun-2012 01:34:34 GMT'
:return: UTC time
"""
if len(cdate) < 29: # somethings broken
if len(cdate) < 5:
return utc_now()
cdate = cdate[5:] # assume short weekday, i.e. do not support obsolete RFC 1036 date format
t = -1
for time_format in TIME_FORMAT :
try:
t = time.strptime(cdate, time_format) # e.g. 18-Apr-2014 12:30:51 GMT
except ValueError:
pass
else:
break
if t == -1:
raise (Exception,
'ValueError: Date "{0}" does not match any of: {1}'.format(
cdate,TIME_FORMAT))
return calendar.timegm(t)
def set_list2dict(sl):
return dict(sl)
def dict2set_list(dic):
return [(k, v) for k, v in dic.items()]
class HTTPBase(object):
def __init__(self, verify=True, ca_bundle=None, key_file=None,
cert_file=None):
self.request_args = {"allow_redirects": False}
#self.cookies = {}
self.cookiejar = http_cookiejar.CookieJar()
self.request_args["verify"] = verify
if verify:
if ca_bundle:
self.request_args["verify"] = ca_bundle
if key_file:
self.request_args["cert"] = (cert_file, key_file)
self.sec = None
self.user = None
self.passwd = None
def cookies(self, url):
"""
Return cookies that are matching the path and are still valid
:param url:
:return:
"""
part = urlparse(url)
#if part.port:
# _domain = "%s:%s" % (part.hostname, part.port)
#else:
_domain = part.hostname
cookie_dict = {}
now = utc_now()
for _, a in list(self.cookiejar._cookies.items()):
for _, b in a.items():
for cookie in list(b.values()):
# print(cookie)
if cookie.expires and cookie.expires <= now:
continue
if not re.search("%s$" % cookie.domain, _domain):
continue
if not re.match(cookie.path, part.path):
continue
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def set_cookie(self, kaka, request):
"""Returns a http_cookiejar.Cookie based on a set-cookie header line"""
if not kaka:
return
part = urlparse(request.url)
_domain = part.hostname
logger.debug("%s: '%s'", _domain, kaka)
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
# copy attributes that have values
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = _since_epoch(morsel[attr])
elif attr == "path":
if morsel[attr].endswith(","):
std_attr[attr] = morsel[attr][:-1]
else:
std_attr[attr] = morsel[attr]
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel["max-age"]:
std_attr["expires"] = time.time() + int(morsel["max-age"])
for att, item in PAIRS.items():
if std_attr[att]:
std_attr[item] = True
if std_attr["domain"]:
if std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
else:
std_attr["domain"] = _domain
std_attr["domain_specified"] = True
if morsel["max-age"] is 0:
try:
self.cookiejar.clear(domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"])
except ValueError:
pass
elif std_attr["expires"] and std_attr["expires"] < utc_now():
try:
self.cookiejar.clear(domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"])
except ValueError:
pass
else:
new_cookie = http_cookiejar.Cookie(**std_attr)
self.cookiejar.set_cookie(new_cookie)
def send(self, url, method="GET", **kwargs):
_kwargs = copy.copy(self.request_args)
if kwargs:
_kwargs.update(kwargs)
if self.cookiejar:
_cd = self.cookies(url)
if _cd:
_kwargs["cookies"] = _cd
if self.user and self.passwd:
_kwargs["auth"] = (self.user, self.passwd)
if "headers" in _kwargs and isinstance(_kwargs["headers"], list):
if DICT_HEADERS:
# requests.request wants a dict of headers, not a list of tuples
_kwargs["headers"] = dict(_kwargs["headers"])
try:
logger.debug("%s to %s", method, url)
for arg in ["cookies", "data", "auth"]:
try:
logger.debug("%s: %s", arg.upper(), _kwargs[arg])
except KeyError:
pass
r = requests.request(method, url, **_kwargs)
logger.debug("Response status: %s", r.status_code)
except requests.ConnectionError as exc:
raise ConnectionError("%s" % exc)
try:
self.set_cookie(SimpleCookie(r.headers["set-cookie"]), r)
except AttributeError:
pass
except KeyError:
pass
return r
@staticmethod
def use_http_post(message, destination, relay_state,
typ="SAMLRequest"):
"""
Return a urlencoded message that should be POSTed to the recipient.
:param message: The response
:param destination: Where the response should be sent
:param relay_state: The relay_state received in the request
:param typ: Whether a Request, Response or Artifact
:return: dictionary
"""
if not isinstance(message, six.string_types):
message = "%s" % (message,)
return http_post_message(message, relay_state, typ)
@staticmethod
def use_http_form_post(message, destination, relay_state,
typ="SAMLRequest"):
"""
Return a form that will automagically execute and POST the message
to the recipient.
:param message:
:param destination:
:param relay_state:
:param typ: Whether a Request, Response or Artifact
:return: dictionary
"""
if not isinstance(message, six.string_types):
message = "%s" % (message,)
return http_form_post_message(message, destination, relay_state, typ)
@staticmethod
def use_http_artifact(message, destination="", relay_state=""):
if relay_state:
query = urlencode({"SAMLart": message,
"RelayState": relay_state})
else:
query = urlencode({"SAMLart": message})
info = {
"data": "",
"url": "%s?%s" % (destination, query)
}
return info
@staticmethod
def use_http_uri(message, typ, destination="", relay_state=""):
if "\n" in message:
data = message.split("\n")[1]
else:
data = message.strip()
if typ == "SAMLResponse":
info = {
"data": data,
"headers": [
("Content-Type", "application/samlassertion+xml"),
("Cache-Control", "no-cache, no-store"),
("Pragma", "no-cache")
]
}
elif typ == "SAMLRequest":
# msg should be an identifier
if relay_state:
query = urlencode({"ID": message,
"RelayState": relay_state})
else:
query = urlencode({"ID": message})
info = {
"data": "",
"url": "%s?%s" % (destination, query)
}
else:
raise NotImplemented
return info
def use_soap(self, request, destination="", soap_headers=None, sign=False,
**kwargs):
"""
Construct the necessary information for using SOAP+POST
:param request:
:param destination:
:param soap_headers:
:param sign:
:return: dictionary
"""
headers = [("content-type", "application/soap+xml")]
soap_message = make_soap_enveloped_saml_thingy(request, soap_headers)
logger.debug("SOAP message: %s", soap_message)
if sign and self.sec:
_signed = self.sec.sign_statement(soap_message,
class_name=class_name(request),
node_id=request.id)
soap_message = _signed
return {"url": destination, "method": "POST",
"data": soap_message, "headers": headers}
def send_using_soap(self, request, destination, headers=None, sign=False):
"""
Send a message using SOAP+POST
:param request:
:param destination:
:param headers:
:param sign:
:return:
"""
# _response = self.server.post(soap_message, headers, path=path)
try:
args = self.use_soap(request, destination, headers, sign)
args["headers"] = dict(args["headers"])
response = self.send(**args)
except Exception as exc:
logger.info("HTTPClient exception: %s", exc)
raise
if response.status_code == 200:
logger.info("SOAP response: %s", response.text)
return response
else:
raise HTTPError("%d:%s" % (response.status_code, response.content))
def add_credentials(self, user, passwd):
self.user = user
self.passwd = passwd
@staticmethod
def use_http_get(message, destination, relay_state,
typ="SAMLRequest", sigalg="", signer=None, **kwargs):
"""
Send a message using GET, this is the HTTP-Redirect case so
no direct response is expected to this request.
:param message:
:param destination:
:param relay_state:
:param typ: Whether a Request, Response or Artifact
:param sigalg: Which algorithm the signature function will use to sign
the message
:param signer: A signing function that can be used to sign the message
:return: dictionary
"""
if not isinstance(message, six.string_types):
message = "%s" % (message,)
return http_redirect_message(message, destination, relay_state, typ,
sigalg, signer)
| 31.735437 | 95 | 0.524512 | import calendar
import six
from six.moves import http_cookiejar
import copy
import re
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlencode
import requests
import time
from six.moves.http_cookies import SimpleCookie
from saml2.time_util import utc_now
from saml2 import class_name, SAMLError
from saml2.pack import http_form_post_message
from saml2.pack import http_post_message
from saml2.pack import make_soap_enveloped_saml_thingy
from saml2.pack import http_redirect_message
import logging
logger = logging.getLogger(__name__)
if requests.__version__ < "2.0.0":
DICT_HEADERS = False
else:
DICT_HEADERS = True
__author__ = 'rolandh'
ATTRS = {"version": None,
"name": "",
"value": None,
"port": None,
"port_specified": False,
"domain": "",
"domain_specified": False,
"domain_initial_dot": False,
"path": "",
"path_specified": False,
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": "",
"rfc2109": True}
PAIRS = {
"port": "port_specified",
"domain": "domain_specified",
"path": "path_specified"
}
class ConnectionError(SAMLError):
pass
class HTTPError(SAMLError):
pass
TIME_FORMAT = ["%d-%b-%Y %H:%M:%S %Z", "%d-%b-%y %H:%M:%S %Z",
"%d %b %Y %H:%M:%S %Z"]
def _since_epoch(cdate):
if len(cdate) < 29:
if len(cdate) < 5:
return utc_now()
cdate = cdate[5:]
t = -1
for time_format in TIME_FORMAT :
try:
t = time.strptime(cdate, time_format)
except ValueError:
pass
else:
break
if t == -1:
raise (Exception,
'ValueError: Date "{0}" does not match any of: {1}'.format(
cdate,TIME_FORMAT))
return calendar.timegm(t)
def set_list2dict(sl):
return dict(sl)
def dict2set_list(dic):
return [(k, v) for k, v in dic.items()]
class HTTPBase(object):
def __init__(self, verify=True, ca_bundle=None, key_file=None,
cert_file=None):
self.request_args = {"allow_redirects": False}
self.cookiejar = http_cookiejar.CookieJar()
self.request_args["verify"] = verify
if verify:
if ca_bundle:
self.request_args["verify"] = ca_bundle
if key_file:
self.request_args["cert"] = (cert_file, key_file)
self.sec = None
self.user = None
self.passwd = None
def cookies(self, url):
part = urlparse(url)
_domain = part.hostname
cookie_dict = {}
now = utc_now()
for _, a in list(self.cookiejar._cookies.items()):
for _, b in a.items():
for cookie in list(b.values()):
if cookie.expires and cookie.expires <= now:
continue
if not re.search("%s$" % cookie.domain, _domain):
continue
if not re.match(cookie.path, part.path):
continue
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def set_cookie(self, kaka, request):
if not kaka:
return
part = urlparse(request.url)
_domain = part.hostname
logger.debug("%s: '%s'", _domain, kaka)
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = _since_epoch(morsel[attr])
elif attr == "path":
if morsel[attr].endswith(","):
std_attr[attr] = morsel[attr][:-1]
else:
std_attr[attr] = morsel[attr]
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel["max-age"]:
std_attr["expires"] = time.time() + int(morsel["max-age"])
for att, item in PAIRS.items():
if std_attr[att]:
std_attr[item] = True
if std_attr["domain"]:
if std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
else:
std_attr["domain"] = _domain
std_attr["domain_specified"] = True
if morsel["max-age"] is 0:
try:
self.cookiejar.clear(domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"])
except ValueError:
pass
elif std_attr["expires"] and std_attr["expires"] < utc_now():
try:
self.cookiejar.clear(domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"])
except ValueError:
pass
else:
new_cookie = http_cookiejar.Cookie(**std_attr)
self.cookiejar.set_cookie(new_cookie)
def send(self, url, method="GET", **kwargs):
_kwargs = copy.copy(self.request_args)
if kwargs:
_kwargs.update(kwargs)
if self.cookiejar:
_cd = self.cookies(url)
if _cd:
_kwargs["cookies"] = _cd
if self.user and self.passwd:
_kwargs["auth"] = (self.user, self.passwd)
if "headers" in _kwargs and isinstance(_kwargs["headers"], list):
if DICT_HEADERS:
_kwargs["headers"] = dict(_kwargs["headers"])
try:
logger.debug("%s to %s", method, url)
for arg in ["cookies", "data", "auth"]:
try:
logger.debug("%s: %s", arg.upper(), _kwargs[arg])
except KeyError:
pass
r = requests.request(method, url, **_kwargs)
logger.debug("Response status: %s", r.status_code)
except requests.ConnectionError as exc:
raise ConnectionError("%s" % exc)
try:
self.set_cookie(SimpleCookie(r.headers["set-cookie"]), r)
except AttributeError:
pass
except KeyError:
pass
return r
@staticmethod
def use_http_post(message, destination, relay_state,
typ="SAMLRequest"):
if not isinstance(message, six.string_types):
message = "%s" % (message,)
return http_post_message(message, relay_state, typ)
@staticmethod
def use_http_form_post(message, destination, relay_state,
typ="SAMLRequest"):
if not isinstance(message, six.string_types):
message = "%s" % (message,)
return http_form_post_message(message, destination, relay_state, typ)
@staticmethod
def use_http_artifact(message, destination="", relay_state=""):
if relay_state:
query = urlencode({"SAMLart": message,
"RelayState": relay_state})
else:
query = urlencode({"SAMLart": message})
info = {
"data": "",
"url": "%s?%s" % (destination, query)
}
return info
@staticmethod
def use_http_uri(message, typ, destination="", relay_state=""):
if "\n" in message:
data = message.split("\n")[1]
else:
data = message.strip()
if typ == "SAMLResponse":
info = {
"data": data,
"headers": [
("Content-Type", "application/samlassertion+xml"),
("Cache-Control", "no-cache, no-store"),
("Pragma", "no-cache")
]
}
elif typ == "SAMLRequest":
if relay_state:
query = urlencode({"ID": message,
"RelayState": relay_state})
else:
query = urlencode({"ID": message})
info = {
"data": "",
"url": "%s?%s" % (destination, query)
}
else:
raise NotImplemented
return info
def use_soap(self, request, destination="", soap_headers=None, sign=False,
**kwargs):
headers = [("content-type", "application/soap+xml")]
soap_message = make_soap_enveloped_saml_thingy(request, soap_headers)
logger.debug("SOAP message: %s", soap_message)
if sign and self.sec:
_signed = self.sec.sign_statement(soap_message,
class_name=class_name(request),
node_id=request.id)
soap_message = _signed
return {"url": destination, "method": "POST",
"data": soap_message, "headers": headers}
def send_using_soap(self, request, destination, headers=None, sign=False):
try:
args = self.use_soap(request, destination, headers, sign)
args["headers"] = dict(args["headers"])
response = self.send(**args)
except Exception as exc:
logger.info("HTTPClient exception: %s", exc)
raise
if response.status_code == 200:
logger.info("SOAP response: %s", response.text)
return response
else:
raise HTTPError("%d:%s" % (response.status_code, response.content))
def add_credentials(self, user, passwd):
self.user = user
self.passwd = passwd
@staticmethod
def use_http_get(message, destination, relay_state,
typ="SAMLRequest", sigalg="", signer=None, **kwargs):
if not isinstance(message, six.string_types):
message = "%s" % (message,)
return http_redirect_message(message, destination, relay_state, typ,
sigalg, signer)
| true | true |
1c2ec000659f19143f2f3a883502db0eb1d71d33 | 6,398 | py | Python | Chapter_Clustering_GMM/ProcessMonitoring_GMM.py | ML-PSE/Machine_Learning_for_PSE | b53578d7cc0e0eca4907527b188a60de06d6710e | [
"Apache-2.0"
] | 2 | 2022-02-20T18:57:46.000Z | 2022-03-03T07:07:12.000Z | Chapter_Clustering_GMM/ProcessMonitoring_GMM.py | ML-PSE/Machine_Learning_for_PSE | b53578d7cc0e0eca4907527b188a60de06d6710e | [
"Apache-2.0"
] | null | null | null | Chapter_Clustering_GMM/ProcessMonitoring_GMM.py | ML-PSE/Machine_Learning_for_PSE | b53578d7cc0e0eca4907527b188a60de06d6710e | [
"Apache-2.0"
] | null | null | null | ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Process Monitoring of Etch data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% import required packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
#%% fetch data
import scipy.io
matlab_data = scipy.io.loadmat('MACHINE_Data.mat', struct_as_record = False)
Etch_data = matlab_data['LAMDATA']
calibration_dataAll = Etch_data[0,0].calibration # calibration_dataAll[i,0] corresponds to a 2D data from ith batch where columns correspond to different variables
variable_names = Etch_data[0,0].variables
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## perform Multiway PCA
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% generate unfolded data matrix
n_vars = variable_names.size - 2 # first 2 columns are not process variables
n_samples = 85 # following the work of He et al.
unfolded_dataMatrix = np.empty((1,n_vars*n_samples))
for expt in range(calibration_dataAll.size):
calibration_expt = calibration_dataAll[expt,0][5:90,2:] # removing first 5 measurements as done in He et al.
if calibration_expt.shape[0] < 85:
continue
unfolded_row = np.ravel(calibration_expt, order='F')[np.newaxis,:]
unfolded_dataMatrix = np.vstack((unfolded_dataMatrix, unfolded_row))
unfolded_dataMatrix = unfolded_dataMatrix[1:,:]
#%% scale data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data_train_normal = scaler.fit_transform(unfolded_dataMatrix)
#%% fit PCA model to calibration data
from sklearn.decomposition import PCA
pca = PCA(n_components = 3) # following the work of He et al.
score_train = pca.fit_transform(data_train_normal)
#%% visualize in 2D
plt.figure()
plt.scatter(score_train[:,0],score_train[:,1])
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## GMM on PCA scores
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% finding # of components via BIC
BICs = []
lowestBIC = np.inf
for n_cluster in range(1, 10):
gmm = GaussianMixture(n_components = n_cluster, random_state = 100)
gmm.fit(score_train)
BIC = gmm.bic(score_train)
BICs.append(BIC)
if BIC < lowestBIC:
optimal_n_cluster = n_cluster
lowestBIC = BIC
plt.figure()
plt.plot(range(1,10), BICs, marker='o')
plt.xlabel('Number of components')
plt.ylabel('BIC')
plt.show()
#%% fit GMM model to metal-etch data
gmm = GaussianMixture(n_components = optimal_n_cluster, random_state = 100)
cluster_label = gmm.fit_predict(score_train)
plt.figure()
plt.scatter(score_train[:, 0], score_train[:, 1], c = cluster_label, s=20, cmap='viridis')
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
cluster_centers = gmm.means_
cluster_plot_labels = ['Cluster ' + str(i+1) for i in range(optimal_n_cluster)]
for i in range(optimal_n_cluster):
plt.scatter(cluster_centers[i, 0], cluster_centers[i, 1], c='red', s=20, marker = '*', alpha=0.5)
plt.annotate(cluster_plot_labels[i], (cluster_centers[i,0], cluster_centers[i,1]))
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Fault detection metric for training data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% global mahalonobis distance metric
Dglobal_train = np.zeros((score_train.shape[0],))
for i in range(score_train.shape[0]):
x = score_train[i,:,np.newaxis]
probs = gmm.predict_proba(x.T)
for component in range(3):
Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))
Dglobal_train[i] = Dglobal_train[i] + probs[0,component]*Dlocal
#%% Dglobal control limit
N = score_train.shape[0]
r = 3
alpha = 0.05 # 95% control limit
Dglobal_CL = r*(N**2-1)*scipy.stats.f.ppf(1-alpha,r,N-r)/(N*(N-r))
#%% Dglobal plot with CL
plt.figure()
plt.plot(Dglobal_train)
plt.plot([1,len(Dglobal_train)],[Dglobal_CL, Dglobal_CL], color='red')
plt.xlabel('Sample #')
plt.ylabel('D_global for training data')
plt.show()
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## test data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% fetch test data and unfold
test_dataAll = Etch_data[0,0].test
unfolded_TestdataMatrix = np.empty((1,n_vars*n_samples))
for expt in range(test_dataAll.size):
test_expt = test_dataAll[expt,0][5:90,2:]
if test_expt.shape[0] < 85:
continue
unfolded_row = np.ravel(test_expt, order='F')[np.newaxis,:]
unfolded_TestdataMatrix = np.vstack((unfolded_TestdataMatrix, unfolded_row))
unfolded_TestdataMatrix = unfolded_TestdataMatrix[1:,:]
#%% PCA on fault data
data_test_normal = scaler.transform(unfolded_TestdataMatrix)
score_test = pca.transform(data_test_normal)
#%% visualize in 2D (both test and calibration data)
plt.figure()
plt.scatter(score_train[:,0],score_train[:,1], c='blue', alpha=0.1)
plt.scatter(score_test[:,0],score_test[:,1], c='red', marker = '*')
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
#%% compute Dglobal_test
Dglobal_test = np.zeros((score_test.shape[0],))
for i in range(score_test.shape[0]):
x = score_test[i,:,np.newaxis]
probs = gmm.predict_proba(x.T)
for component in range(3):
Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))
Dglobal_test[i] = Dglobal_test[i] + probs[0,component]*Dlocal
#%% Dglobal plot with CL
plt.figure()
plt.plot(Dglobal_test, marker = '*')
plt.plot([1,len(Dglobal_test)],[Dglobal_CL,Dglobal_CL], color='red')
plt.xlabel('Sample #')
plt.ylabel('D_global for test data')
plt.show()
print('Number of faults identified: ', np.sum(Dglobal_test > Dglobal_CL), ' out of ', len(Dglobal_test))
| 36.770115 | 165 | 0.595186 | atlab_data['LAMDATA']
calibration_dataAll = Etch_data[0,0].calibration
variable_names = Etch_data[0,0].variables
0][5:90,2:]
if calibration_expt.shape[0] < 85:
continue
unfolded_row = np.ravel(calibration_expt, order='F')[np.newaxis,:]
unfolded_dataMatrix = np.vstack((unfolded_dataMatrix, unfolded_row))
unfolded_dataMatrix = unfolded_dataMatrix[1:,:]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data_train_normal = scaler.fit_transform(unfolded_dataMatrix)
from sklearn.decomposition import PCA
pca = PCA(n_components = 3)
score_train = pca.fit_transform(data_train_normal)
plt.figure()
plt.scatter(score_train[:,0],score_train[:,1])
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
if BIC < lowestBIC:
optimal_n_cluster = n_cluster
lowestBIC = BIC
plt.figure()
plt.plot(range(1,10), BICs, marker='o')
plt.xlabel('Number of components')
plt.ylabel('BIC')
plt.show()
gmm = GaussianMixture(n_components = optimal_n_cluster, random_state = 100)
cluster_label = gmm.fit_predict(score_train)
plt.figure()
plt.scatter(score_train[:, 0], score_train[:, 1], c = cluster_label, s=20, cmap='viridis')
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
cluster_centers = gmm.means_
cluster_plot_labels = ['Cluster ' + str(i+1) for i in range(optimal_n_cluster)]
for i in range(optimal_n_cluster):
plt.scatter(cluster_centers[i, 0], cluster_centers[i, 1], c='red', s=20, marker = '*', alpha=0.5)
plt.annotate(cluster_plot_labels[i], (cluster_centers[i,0], cluster_centers[i,1]))
np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))
Dglobal_train[i] = Dglobal_train[i] + probs[0,component]*Dlocal
N = score_train.shape[0]
r = 3
alpha = 0.05
Dglobal_CL = r*(N**2-1)*scipy.stats.f.ppf(1-alpha,r,N-r)/(N*(N-r))
plt.figure()
plt.plot(Dglobal_train)
plt.plot([1,len(Dglobal_train)],[Dglobal_CL, Dglobal_CL], color='red')
plt.xlabel('Sample #')
plt.ylabel('D_global for training data')
plt.show()
if test_expt.shape[0] < 85:
continue
unfolded_row = np.ravel(test_expt, order='F')[np.newaxis,:]
unfolded_TestdataMatrix = np.vstack((unfolded_TestdataMatrix, unfolded_row))
unfolded_TestdataMatrix = unfolded_TestdataMatrix[1:,:]
data_test_normal = scaler.transform(unfolded_TestdataMatrix)
score_test = pca.transform(data_test_normal)
plt.figure()
plt.scatter(score_train[:,0],score_train[:,1], c='blue', alpha=0.1)
plt.scatter(score_test[:,0],score_test[:,1], c='red', marker = '*')
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
Dglobal_test = np.zeros((score_test.shape[0],))
for i in range(score_test.shape[0]):
x = score_test[i,:,np.newaxis]
probs = gmm.predict_proba(x.T)
for component in range(3):
Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))
Dglobal_test[i] = Dglobal_test[i] + probs[0,component]*Dlocal
plt.figure()
plt.plot(Dglobal_test, marker = '*')
plt.plot([1,len(Dglobal_test)],[Dglobal_CL,Dglobal_CL], color='red')
plt.xlabel('Sample #')
plt.ylabel('D_global for test data')
plt.show()
print('Number of faults identified: ', np.sum(Dglobal_test > Dglobal_CL), ' out of ', len(Dglobal_test))
| true | true |
1c2ec0204f13deadf34e31a40a561e0198ba50a8 | 665 | py | Python | src/genie/libs/parser/bigip/get_net_sfcchain.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/bigip/get_net_sfcchain.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/bigip/get_net_sfcchain.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | # Global Imports
import json
from collections import defaultdict
# Metaparser
from genie.metaparser import MetaParser
# =============================================
# Collection for '/mgmt/tm/net/sfc/chain' resources
# =============================================
class NetSfcChainSchema(MetaParser):
schema = {}
class NetSfcChain(NetSfcChainSchema):
""" To F5 resource for /mgmt/tm/net/sfc/chain
"""
cli_command = "/mgmt/tm/net/sfc/chain"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| 19.558824 | 52 | 0.578947 |
import json
from collections import defaultdict
from genie.metaparser import MetaParser
class NetSfcChainSchema(MetaParser):
schema = {}
class NetSfcChain(NetSfcChainSchema):
cli_command = "/mgmt/tm/net/sfc/chain"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| true | true |
1c2ec05a5b77d971e797ce93d09ecd74e83f2274 | 585 | py | Python | tests/common/server_sent_events/sse4python/test_web_request_factory.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | tests/common/server_sent_events/sse4python/test_web_request_factory.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | tests/common/server_sent_events/sse4python/test_web_request_factory.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | from yellowdog_client.common.server_sent_events.sse4python import WebRequestFactory
class TestCreate(object):
def test__expect_requester_returned_with_headers_passed(self, mocker):
mock_requester = mocker.patch(
"yellowdog_client.common.server_sent_events.sse4python.web_request_factory.WebRequester"
)
auth_base = mocker.MagicMock()
factory = WebRequestFactory(auth_base=auth_base)
res = factory.create()
assert res == mock_requester.return_value
mock_requester.assert_called_once_with(auth_base=auth_base)
| 34.411765 | 100 | 0.752137 | from yellowdog_client.common.server_sent_events.sse4python import WebRequestFactory
class TestCreate(object):
def test__expect_requester_returned_with_headers_passed(self, mocker):
mock_requester = mocker.patch(
"yellowdog_client.common.server_sent_events.sse4python.web_request_factory.WebRequester"
)
auth_base = mocker.MagicMock()
factory = WebRequestFactory(auth_base=auth_base)
res = factory.create()
assert res == mock_requester.return_value
mock_requester.assert_called_once_with(auth_base=auth_base)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.