code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os, re
from lxml import etree
from file_paths import etl_source, etl_dest
from Section import Section
class EchoTarget:
def __init__(self, out_path = etl_dest):
self.level = 0
self.ent_id = None
self.form = None
self.pos = None
self.ss_id = None
self.synset = None
self.tx_pos = Section('WordNet', 'tx-pos', out_path + '/WordNet', num_rows = 10000)
self.tx_syn = Section('WordNet', 'tx-syn', out_path + '/WordNet', num_rows = 10000)
self.tx_rel = Section('WordNet', 'tx-rel', out_path + '/WordNet', num_rows = 10000)
self.t_ll = 0
def start(self, tag, attrib):
# print (' '*self.level + ':>' + tag)
self.level += 1
if tag == 'LexicalEntry':
assert self.level == 3 and self.ent_id is None
self.ent_id = attrib['id']
return
if tag == 'Lemma':
assert self.level == 4 and self.ent_id is not None and self.form is None and self.pos is None
self.form = attrib['writtenForm']
self.pos = attrib['partOfSpeech']
self.t_ll += 1
if self.t_ll % 10000 == 0:
print('%0.2fM,' % (self.t_ll/1000000), end = ' ', flush = True)
self.tx_pos.write_line('%s, %s, %s' % (self.form, self.pos, self.ent_id))
return
if tag == 'Sense':
assert self.level == 4 and self.ent_id is not None and self.form is None and self.pos is None
self.ss_id = attrib['id']
self.synset = attrib['synset']
self.tx_syn.write_line('%s, %s, %s' % (self.ent_id, self.ss_id, self.synset))
return
if tag == 'SenseRelation':
assert (self.level == 5 and self.ent_id is not None and self.form is None and self.pos is None
and self.ss_id is not None and self.synset is not None)
self.relation = attrib['relType']
self.target = attrib['target']
self.tx_rel.write_line('%s, %s, %s, %s' % (self.ent_id, self.ss_id, self.relation, self.target))
return
def end(self, tag):
self.level -= 1
if tag == 'LexicalEntry':
self.ent_id = None
return
if tag == 'Lemma':
self.form = None
self.pos = None
return
if tag == 'Sense':
self.ss_id = None
self.synset = None
def data(self, data):
pass
def close(self):
self.tx_pos.close()
self.tx_syn.close()
self.tx_rel.close()
return "closed!"
class WordNet:
def __init__(self,
source_fn = etl_source + '/english-wordnet/wn.xml',
out_path = etl_dest):
self.source_fn = source_fn
self.out_path = out_path
def build(self):
parser = etree.XMLParser(target = EchoTarget(self.out_path))
etree.parse(self.source_fn, parser)
c = WordNet()
c.build()
print('\n\nDone.') | [
"lxml.etree.parse",
"Section.Section"
] | [((308, 375), 'Section.Section', 'Section', (['"""WordNet"""', '"""tx-pos"""', "(out_path + '/WordNet')"], {'num_rows': '(10000)'}), "('WordNet', 'tx-pos', out_path + '/WordNet', num_rows=10000)\n", (315, 375), False, 'from Section import Section\n'), ((394, 461), 'Section.Section', 'Section', (['"""WordNet"""', '"""tx-syn"""', "(out_path + '/WordNet')"], {'num_rows': '(10000)'}), "('WordNet', 'tx-syn', out_path + '/WordNet', num_rows=10000)\n", (401, 461), False, 'from Section import Section\n'), ((480, 547), 'Section.Section', 'Section', (['"""WordNet"""', '"""tx-rel"""', "(out_path + '/WordNet')"], {'num_rows': '(10000)'}), "('WordNet', 'tx-rel', out_path + '/WordNet', num_rows=10000)\n", (487, 547), False, 'from Section import Section\n'), ((2474, 2509), 'lxml.etree.parse', 'etree.parse', (['self.source_fn', 'parser'], {}), '(self.source_fn, parser)\n', (2485, 2509), False, 'from lxml import etree\n')] |
from __future__ import annotations
from typing import Union, Tuple, List
import warnings
import numpy as np
class Question:
"""Question is a thershold/matching concept for splitting the node of the Decision Tree
Args:
column_index (int): Column index to be chosen from the array passed at the matching time.
value (Union[int, str, float, np.int64, np.float64]): Threshold value/ matching value.
header (str): column/header name.
"""
def __init__(self, column_index: int, value: Union[int, str, float, np.int64, np.float64], header: str):
"""Constructor
"""
self.column_index = column_index
self.value = value
self.header = header
def match(self, example: Union[list, np.ndarray]) -> bool:
"""Matching function to decide based on example whether result is true or false.
Args:
example (Union[list, np.ndarray]): Example to compare with question parameters.
Returns:
bool: if the example is in threshold or value matches then results true or false.
"""
if isinstance(example, list):
example = np.array(example, dtype="O")
val = example[self.column_index]
# adding numpy int and float data types as well
if isinstance(val, (int, float, np.int64, np.float64)):
# a condition for question to return True or False for numeric value
return float(val) >= float(self.value)
else:
return str(val) == str(self.value) # categorical data comparison
def __repr__(self):
condition = "=="
if isinstance(self.value, (int, float, np.int64, np.float64)):
condition = ">="
return f"Is {self.header} {condition} {self.value} ?"
class Node:
"""A Tree node either Decision Node or Leaf Node
Args:
question (Question, optional): question object. Defaults to None.
true_branch (Node, optional): connection to node at true side of the branch. Defaults to None.
false_branch (Node, optional): connection to node at false side of the branch. Defaults to None.
uncertainty (float, optional): Uncertainty value like gini,entropy,variance etc. Defaults to None.
leaf_value (Union[dict,int,float], optional): Leaf node/final node's value. Defaults to None.
"""
def __init__(self, question: Question = None, true_branch: Node = None, false_branch: Node = None, uncertainty: float = None, *, leaf_value: Union[dict, int, float] = None):
"""Constructor
"""
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
self.uncertainty = uncertainty
self.leaf_value = leaf_value
@property
def _is_leaf_node(self) -> bool:
"""Check if this node is leaf node or not.
Returns:
bool: True if leaf node else false.
"""
return self.leaf_value is not None
class DecisionTreeClassifier:
"""Decision Tree Based Classification Model
Args:
max_depth (int, optional): max depth of the tree. Defaults to 100.
min_samples_split (int, optional): min size of the sample at the time of split. Defaults to 2.
criteria (str, optional): what criteria to use for information. Defaults to 'gini'. available 'gini','entropy'.
"""
def __init__(self, max_depth: int = 100, min_samples_split: int = 2, criteria: str = 'gini'):
"""Constructor
"""
self._X = None
self._y = None
self._feature_names = None
self._target_name = None
self._tree = None
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criteria = criteria
def _count_dict(self, a: np.ndarray) -> dict:
"""Count class frequecies and get a dictionary from it
Args:
a (np.ndarray): input array. shape should be (m,1) for m samples.
Returns:
dict: categories/classes freq dictionary.
"""
unique_values = np.unique(a, return_counts=True)
zipped = zip(*unique_values)
dict_obj = dict(zipped)
return dict_obj
def _gini_impurity(self, arr: np.ndarray) -> float:
"""Calculate Gini Impurity
Args:
arr (np.ndarray): input array.
Returns:
float: gini impurity value.
"""
classes, counts = np.unique(arr, return_counts=True)
gini_score = 1 - np.square(counts / arr.shape[0]).sum(axis=0)
return gini_score
def _entropy(self, arr: np.ndarray) -> float:
"""Calculate Entropy
Args:
arr (np.ndarray): input array.
Returns:
float: entropy result.
"""
classes, counts = np.unique(arr, return_counts=True)
p = counts / arr.shape[0]
entropy_score = (-p * np.log2(p)).sum(axis=0)
return entropy_score
def _uncertainty(self, a: np.ndarray) -> float:
"""calcualte uncertainty
Args:
a (np.ndarray): input array
Returns:
float: uncertainty value
"""
if self.criteria == "entropy":
value = self._entropy(a)
elif self.criteria == "gini":
value = self._gini_impurity(a)
else:
warnings.warn(f"{self.criteria} is not coded yet. returning to gini.")
value = self._gini_impurity(a)
return value
def _partition(self, rows: np.ndarray, question: Union[Question, None]) -> Tuple[list, list]:
"""partition the rows based on the question
Args:
rows (np.ndarray): input array to split.
question (Question): question object containing spltting concept.
Returns:
Tuple[list,list]: true idxs and false idxs.
"""
true_idx, false_idx = [], []
for idx, row in enumerate(rows):
if question.match(row):
true_idx.append(idx)
else:
false_idx.append(idx)
return true_idx, false_idx
def _info_gain(self, left: np.ndarray, right: np.ndarray, parent_uncertainty: float) -> float:
"""Calculate information gain after splitting
Args:
left (np.ndarray): left side array.
right (np.ndarray): right side array.
parent_uncertainty (float): parent node Uncertainity.
Returns:
flaot: information gain value.
"""
# calculating portion/ partition/ weightage
pr = left.shape[0] / (left.shape[0] + right.shape[0])
# calcualte child uncertainity
child_uncertainty = pr * \
self._uncertainty(left) - (1 - pr) * self._uncertainty(right)
# calculate information gain
info_gain_value = parent_uncertainty - child_uncertainty
return info_gain_value
def _find_best_split(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, Union[Question, None], float]:
"""method to find best split possible for the sample
Args:
X (np.ndarray): Feature matrix.
y (np.ndarray): target matrix.
Returns:
Tuple[float,Union[Question,None],float]: maximum gain from the split, best question of it, and parent node uncertainty.
"""
max_gain = -1
best_split_question = None
parent_uncertainty = self._uncertainty(y)
m_samples, n_labels = X.shape
for col_index in range(n_labels): # iterate over feature columns
# get unique values from the feature
unique_values = np.unique(X[:, col_index])
for val in unique_values: # check for every value and find maximum info gain
ques = Question(
column_index=col_index,
value=val,
header=self._feature_names[col_index]
)
t_idx, f_idx = self._partition(X, ques)
# if it does not split the data
# skip it
if len(t_idx) == 0 or len(f_idx) == 0:
continue
true_y = y[t_idx, :]
false_y = y[f_idx, :]
# get information gain
gain = self._info_gain(true_y, false_y, parent_uncertainty)
if gain > max_gain:
max_gain, best_split_question = gain, ques
return max_gain, best_split_question, parent_uncertainty
def _build_tree(self, X: np.ndarray, y: np.ndarray, depth: int = 0) -> Node:
"""Recursive funtion to build tree.
Args:
X (np.ndarray): input features matrix.
y (np.ndarray): target matrix.
depth (int, optional): depth count of the recursion. Defaults to 0.
Returns:
Node: either leaf node or decision node
"""
m_samples, n_labels = X.shape
# if depth is greater than max depth defined or labels/features are left to 1
# or number of samples are less than the minimum size of samples to split then
# stop recursion and return a node
if (depth > self.max_depth or n_labels == 1 or m_samples < self.min_samples_split):
return Node(leaf_value=self._count_dict(y))
gain, ques, uncertainty = self._find_best_split(X, y)
# if gain is zero
# then no point grinding further here
if gain < 0:
return Node(leaf_value=self._count_dict(y))
t_idx, f_idx = self._partition(X, ques) # get partition idxs
true_branch = self._build_tree(
X[t_idx, :], y[t_idx, :], depth + 1) # recog true branch samples
false_branch = self._build_tree(
X[f_idx, :], y[f_idx, :], depth + 1) # recog false branch samples
return Node(
question=ques,
true_branch=true_branch,
false_branch=false_branch,
uncertainty=uncertainty
)
def train(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], feature_name: list = None, target_name: list = None) -> None:
"""Train the model
Args:
X (Union[np.ndarray,list]): feature matrix.
y (Union[np.ndarray,list]): target matrix.
feature_name (list, optional): feature names list. Defaults to None.
target_name (list, optional): target name list. Defaults to None.
"""
X = np.array(X, dtype='O') if not isinstance(
X, (np.ndarray)) else X # converting to numpy array
y = np.array(y, dtype='O') if not isinstance(
y, (np.ndarray)) else y # converting to numpy array
# reshaping to vectors
self._X = X.reshape(-1, 1) if len(X.shape) == 1 else X
self._y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# creating feature names if not mentioned
self._feature_names = feature_name or [
f"C_{i}" for i in range(self._X.shape[1])]
# creating target name if not mentioned
self._target_name = target_name or ['target']
# BOOOM
# building the tree
self._tree = self._build_tree(
X=self._X,
y=self._y
)
def print_tree(self, node: Union[Node, None] = None, spacing: str = "|-") -> None:
"""print the tree
Args:
node (Union[Node,None], optional): starting node. Defaults to None. then it will go to the root node of the tree.
spacing (str, optional): printing separater. Defaults to "|-".
"""
node = node or self._tree
if node._is_leaf_node:
print(spacing, " Predict :", node.leaf_value)
return
# Print the question at this node
print(spacing + str(node.question) +
" | " + self.criteria + " :" + str(node.uncertainty))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(node.true_branch, " " + spacing + "-")
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(node.false_branch, " " + spacing + "-")
def _classification(self, row: np.ndarray, node: Union[Node, None]) -> Union[dict]:
"""Classification recursive function
Args:
row (np.ndarray): input matrix.
node (Union[Node,None]): node to start with. mostly root node. rest will be handled by recursion.
Returns:
Union[dict]: leaf value. classification result.
"""
if node._is_leaf_node:
return node.leaf_value
if node.question.match(row):
return self._classification(row, node.true_branch)
else:
return self._classification(row, node.false_branch)
def _leaf_probabilities(self, results: dict) -> dict:
"""get probabilties
Args:
results (dict): results from _classification.
Returns:
dict: dictionary with categorical probabilities.
"""
total = sum(results.values())
probs = {}
for key in results:
probs[key] = (results[key] / total) * 100
return probs
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
"""predict classification results
Args:
X (Union[np.ndarray,list]): testing matrix.
Raises:
ValueError: input X can only be a list or numpy array.
Returns:
np.ndarray: results of classification.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
max_result = 0
result_dict = self._classification(row=X, node=self._tree)
result = None
for key in result_dict:
if result_dict[key] > max_result:
result = key
return np.array([[result]], dtype='O')
else:
leaf_value = []
# get maximum caterigorical value from all catergories
for row in X:
max_result = 0
result_dict = self._classification(row=row, node=self._tree)
result = None
for key in result_dict:
if result_dict[key] > max_result:
result = key
leaf_value.append([result])
return np.array(leaf_value, dtype='O')
else:
raise ValueError("X should be list or numpy array")
def predict_probability(self, X: Union[np.ndarray, list]) -> Union[np.ndarray, dict]:
"""predict classfication probabilities
Args:
X (Union[np.ndarray,list]): testing matrix.
Raises:
ValueError: input X can only be a list or numpy array.
Returns:
Union[np.ndarray, dict]: probabity results of classification.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
return self._leaf_probabilities(self._classification(row=X, node=self._tree))
else:
leaf_value = []
for row in X:
leaf_value.append([self._leaf_probabilities(
self._classification(row=row, node=self._tree))])
return np.array(leaf_value, dtype='O')
else:
raise ValueError("X should be list or numpy array")
class DecisionTreeRegressor:
"""Decision Tree Based Regression Model
Args:
max_depth (int, optional): maximum depth of the tree. Defaults to 10.
min_samples_split (int, optional): minimum number of samples while splitting. Defaults to 3.
criteria (str, optional): criteria for best info gain. Defaults to 'variance'.
"""
def __init__(self, max_depth: int = 10, min_samples_split: int = 3, criteria: str = 'variance'):
"""constructor
"""
self._X = None
self._y = None
self._feature_names = None
self._target_name = None
self._tree = None
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criteria = criteria
def _mean_leaf_value(self, a: np.ndarray) -> float:
"""leaf values mean
Args:
a (np.ndarray): input array.
Returns:
float: mean value
"""
return float(np.mean(a))
def _partition(self, rows: np.ndarray, question: Union[Question, None]) -> Tuple[list, list]:
"""partition the rows based on the question
Args:
rows (np.ndarray): input array to split.
question (Question): question object containing spltting concept.
Returns:
Tuple[list,list]: true idxs and false idxs.
"""
true_idx, false_idx = [], []
for idx, row in enumerate(rows):
if question.match(row):
true_idx.append(idx)
else:
false_idx.append(idx)
return true_idx, false_idx
def _uncertainty(self, a: np.ndarray) -> float:
"""calcualte uncertainty
Args:
a (np.ndarray): input array
Returns:
float: uncertainty value
"""
if self.criteria == "variance":
value = np.var(a)
else:
warnings.warn(f"{self.criteria} is not coded yet. returning to variance.")
value = np.var(a)
return float(value)
def _info_gain(self, left: np.ndarray, right: np.ndarray, parent_uncertainty: float) -> float:
"""Calculate information gain after splitting
Args:
left (np.ndarray): left side array.
right (np.ndarray): right side array.
parent_uncertainty (float): parent node Uncertainity.
Returns:
flaot: information gain value.
"""
pr = left.shape[0] / (left.shape[0] + right.shape[0])
child_uncertainty = pr * \
self._uncertainty(left) - (1 - pr) * self._uncertainty(right)
info_gain_value = parent_uncertainty - child_uncertainty
return info_gain_value
def _find_best_split(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, Union[Question, None], float]:
"""method to find best split possible for the sample
Args:
X (np.ndarray): Feature matrix.
y (np.ndarray): target matrix.
Returns:
Tuple[float,Union[Question,None],float]: maximum gain from the split, best question of it, and parent node uncertainty
"""
max_gain = -1
best_split_question = None
parent_uncertainty = self._uncertainty(y)
m_samples, n_labels = X.shape
for col_index in range(n_labels): # iterate over feature columns
# get unique values from the feature
unique_values = np.unique(X[:, col_index])
for val in unique_values: # check for every value and find maximum info gain
ques = Question(
column_index=col_index,
value=val,
header=self._feature_names[col_index]
)
t_idx, f_idx = self._partition(X, ques)
# if it does not split the data
# skip it
if len(t_idx) == 0 or len(f_idx) == 0:
continue
true_y = y[t_idx, :]
false_y = y[f_idx, :]
gain = self._info_gain(true_y, false_y, parent_uncertainty)
if gain > max_gain:
max_gain, best_split_question = gain, ques
return max_gain, best_split_question, parent_uncertainty
def _build_tree(self, X: np.ndarray, y: np.ndarray, depth: int = 0) -> Node:
"""Recursive funtion to build tree
Args:
X (np.ndarray): input features matrix.
y (np.ndarray): target matrix.
depth (int, optional): depth count of the recursion. Defaults to 0.
Returns:
Node: either leaf node or decision node
"""
m_samples, n_labels = X.shape
# if depth is greater than max depth defined or labels/features are left to 1
# or number of samples are less than the minimum size of samples to split then
# stop recursion and return a node
if (depth > self.max_depth or n_labels == 1 or m_samples < self.min_samples_split):
return Node(leaf_value=y)
gain, ques, uncertainty = self._find_best_split(X, y)
# if gain is zero no point in going further
if gain < 0:
return Node(leaf_value=y)
t_idx, f_idx = self._partition(X, ques)
true_branch = self._build_tree(
X[t_idx, :], y[t_idx, :], depth + 1) # get true samples
false_branch = self._build_tree(
X[f_idx, :], y[f_idx, :], depth + 1) # get false samples
return Node(
question=ques,
true_branch=true_branch,
false_branch=false_branch,
uncertainty=uncertainty
)
def train(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], feature_name: list = None, target_name: list = None) -> None:
"""Train the model
Args:
X (Union[np.ndarray,list]): feature matrix.
y (Union[np.ndarray,list]): target matrix.
feature_name (list, optional): feature names list. Defaults to None.
target_name (list, optional): target name list. Defaults to None.
"""
X = np.array(X, dtype='O') if not isinstance(
X, (np.ndarray)) else X # converting to numpy array
y = np.array(y, dtype='O') if not isinstance(
y, (np.ndarray)) else y # converting to numpy array
# reshaping to vectors
self._X = X.reshape(-1, 1) if len(X.shape) == 1 else X
self._y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# creating feature names if not mentioned
self._feature_names = feature_name or [
f"C_{i}" for i in range(self._X.shape[1])]
# creating target name if not mentioned
self._target_name = target_name or ['target']
# BOOOM
# building the tree
self._tree = self._build_tree(
X=self._X,
y=self._y
)
def print_tree(self, node: Union[Node, None] = None, spacing: str = "|-", mean_preds: bool = True) -> None:
"""print the tree
Args:
node (Union[Node,None], optional): starting node. Defaults to None. then it will go to the root node of the tree.
spacing (str, optional): printing separater. Defaults to "|-".
mean_preds (bool): do the mean of prediction values. Defaults to True.
"""
node = node or self._tree
if node._is_leaf_node:
if mean_preds:
print(spacing, " Predict :", self._mean_leaf_value(node.leaf_value))
else:
print(spacing, " Predict :", node.leaf_value[...,-1])
return
# Print the question at this node
print(spacing + str(node.question) +
" | " + self.criteria + " :" + str(node.uncertainty))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(node.true_branch, " " + spacing + "-", mean_preds)
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(node.false_branch, " " + spacing + "-", mean_preds)
def _regression(self, row: np.ndarray, node: Union[Node, None], mean_preds: bool) -> float:
"""regression recursive method
Args:
row (np.ndarray): input matrix.
node (Union[Node,None]): node to start with. mostly root node. rest will be handled by recursion.
mean_preds (bool): do the mean of prediction values.
Returns:
float: regression result.
"""
if node._is_leaf_node:
if mean_preds:
return self._mean_leaf_value(node.leaf_value)
else:
return node.leaf_value[...,-1]
if node.question.match(row):
return self._regression(row, node.true_branch, mean_preds)
else:
return self._regression(row, node.false_branch, mean_preds)
def predict(self, X: np.ndarray, mean_preds: bool = True) -> np.ndarray:
"""predict regresssion
Args:
X (np.ndarray): testing matrix.
mean_preds (bool): do the mean of prediction values. Defaults to True.
Raises:
ValueError: X should be list or numpy array
Returns:
np.ndarray: regression prediction.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
result = self._regression(row=X, node=self._tree, mean_preds=mean_preds)
return np.array([[result]], dtype='O')
else:
leaf_value = []
for row in X:
result = self._regression(row=row, node=self._tree, mean_preds=mean_preds)
leaf_value.append([result])
return np.array(leaf_value, dtype='O')
else:
raise ValueError("X should be list or numpy array")
| [
"numpy.mean",
"numpy.unique",
"numpy.square",
"numpy.array",
"warnings.warn",
"numpy.log2",
"numpy.var"
] | [((4092, 4124), 'numpy.unique', 'np.unique', (['a'], {'return_counts': '(True)'}), '(a, return_counts=True)\n', (4101, 4124), True, 'import numpy as np\n'), ((4464, 4498), 'numpy.unique', 'np.unique', (['arr'], {'return_counts': '(True)'}), '(arr, return_counts=True)\n', (4473, 4498), True, 'import numpy as np\n'), ((4824, 4858), 'numpy.unique', 'np.unique', (['arr'], {'return_counts': '(True)'}), '(arr, return_counts=True)\n', (4833, 4858), True, 'import numpy as np\n'), ((1157, 1185), 'numpy.array', 'np.array', (['example'], {'dtype': '"""O"""'}), "(example, dtype='O')\n", (1165, 1185), True, 'import numpy as np\n'), ((7681, 7707), 'numpy.unique', 'np.unique', (['X[:, col_index]'], {}), '(X[:, col_index])\n', (7690, 7707), True, 'import numpy as np\n'), ((10526, 10548), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (10534, 10548), True, 'import numpy as np\n'), ((10645, 10667), 'numpy.array', 'np.array', (['y'], {'dtype': '"""O"""'}), "(y, dtype='O')\n", (10653, 10667), True, 'import numpy as np\n'), ((16761, 16771), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (16768, 16771), True, 'import numpy as np\n'), ((17666, 17675), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (17672, 17675), True, 'import numpy as np\n'), ((17702, 17776), 'warnings.warn', 'warnings.warn', (['f"""{self.criteria} is not coded yet. returning to variance."""'], {}), "(f'{self.criteria} is not coded yet. returning to variance.')\n", (17715, 17776), False, 'import warnings\n'), ((17797, 17806), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (17803, 17806), True, 'import numpy as np\n'), ((19243, 19269), 'numpy.unique', 'np.unique', (['X[:, col_index]'], {}), '(X[:, col_index])\n', (19252, 19269), True, 'import numpy as np\n'), ((21952, 21974), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (21960, 21974), True, 'import numpy as np\n'), ((22071, 22093), 'numpy.array', 'np.array', (['y'], {'dtype': '"""O"""'}), "(y, dtype='O')\n", (22079, 22093), True, 'import numpy as np\n'), ((5368, 5438), 'warnings.warn', 'warnings.warn', (['f"""{self.criteria} is not coded yet. returning to gini."""'], {}), "(f'{self.criteria} is not coded yet. returning to gini.')\n", (5381, 5438), False, 'import warnings\n'), ((13730, 13752), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (13738, 13752), True, 'import numpy as np\n'), ((14120, 14151), 'numpy.array', 'np.array', (['[[result]]'], {'dtype': '"""O"""'}), "([[result]], dtype='O')\n", (14128, 14151), True, 'import numpy as np\n'), ((14667, 14698), 'numpy.array', 'np.array', (['leaf_value'], {'dtype': '"""O"""'}), "(leaf_value, dtype='O')\n", (14675, 14698), True, 'import numpy as np\n'), ((15236, 15258), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (15244, 15258), True, 'import numpy as np\n'), ((15672, 15703), 'numpy.array', 'np.array', (['leaf_value'], {'dtype': '"""O"""'}), "(leaf_value, dtype='O')\n", (15680, 15703), True, 'import numpy as np\n'), ((25266, 25288), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (25274, 25288), True, 'import numpy as np\n'), ((25478, 25509), 'numpy.array', 'np.array', (['[[result]]'], {'dtype': '"""O"""'}), "([[result]], dtype='O')\n", (25486, 25509), True, 'import numpy as np\n'), ((25756, 25787), 'numpy.array', 'np.array', (['leaf_value'], {'dtype': '"""O"""'}), "(leaf_value, dtype='O')\n", (25764, 25787), True, 'import numpy as np\n'), ((4524, 4556), 'numpy.square', 'np.square', (['(counts / arr.shape[0])'], {}), '(counts / arr.shape[0])\n', (4533, 4556), True, 'import numpy as np\n'), ((4923, 4933), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (4930, 4933), True, 'import numpy as np\n')] |
"""
Utilities for creating simulated data sets.
"""
from typing import Optional, Sequence
import numpy as np
import pandas as pd
from scipy.linalg import toeplitz
from ..api import AllTracker
__all__ = ["sim_data"]
__tracker = AllTracker(globals())
def sim_data(
n: int = 100,
intercept: float = -5,
two_way_coef: Optional[float] = None,
linear_vars: int = 10,
linear_var_coef: Optional[Sequence[float]] = None,
noise_vars: int = 0,
corr_vars: int = 0,
corr_type: str = "AR1",
corr_value: float = 0,
surg_err: float = 0.05,
bin_var_p: float = 0,
bin_coef: float = 0,
outcome: str = "classification",
regression_err: Optional[float] = None,
seed_val: int = 4763546,
) -> pd.DataFrame:
"""
Simulate data for classification or regression that includes an interaction between
two linear features, and some non-linear and linear features.
Noise variables, correlated variables that are not predictive and surrogate features
which are just derived from features that are predictive are also added.
This function is for the most part a direct translation of the ``twoClassSim``
function from the R package caret -- the option for an ordinal outcome and binary
outcome mis-labelling were omitted. Full credit for the approach used for simulating
binary classification data goes to the authors and contributors of caret
[`<NAME>. (2008). Caret package. Journal of Statistical Software, 28(5).
<https://rdrr.io/cran/caret/man/twoClassSim.html>`_]
Key modifications compared to the *R* implementation:
1. The ordinal outcome option has not been translated
2. Mis-labelling of the binary outcome has not been translated
3. The addition of a linear feature that is a copy of another used in the linear
predictor with a small amount of noise has been added to allow for the study
of variable surrogacy
4. Option for a binary predictor and surrogate has been added
5. Toggle option for regression versus classification has been added
6. Arguments for the coefficients of primary predictors of interest has been added
:param n: number of observations
:param intercept: value for the intercept which can be modified to generate class
imbalance
:param two_way_coef: list of three coefficients: two linear terms and an
interaction effect
:param linear_vars: number of linear features
:param linear_var_coef: an optional list of coefficients for linear features if
the default is not desired
:param noise_vars: number of unrelated independent noise features (do not
contribute to the linear predictor)
:param corr_vars: number of unrelated correlated noise features (do not contribute
to the linear predictor)
:param corr_type: type of correlation (exchangeable or auto-regressive) for
correlated noise features
:param corr_value: correlation for correlated noise features
:param surg_err: degree of noise added to first linear predictor
:param bin_var_p: prevalence for a binary feature to include in linear predictor
:param bin_coef: coefficient for the impact of binary feature on linear predictor
:param outcome: can be either classification for a binary outcome or regression
for a continuous outcome
:param regression_err: the error to be used in simulating a regression outcome
:param seed_val: set a seed for reproducibility
:return: data frame containing the simulated features and target for classification
"""
# set seed
np.random.seed(seed=seed_val)
# add two correlated normal features for use in creating an interaction term in the
# linear predictor
sigma = np.array([[2, 1.3], [1.3, 2]])
mu = [0, 0]
tmp_data = pd.DataFrame(
np.random.multivariate_normal(mu, sigma, size=n),
columns=["TwoFactor1", "TwoFactor2"],
)
# add independent linear features that contribute to the linear predictor
if linear_vars > 0:
lin_cols = ["Linear" + str(x) for x in range(1, linear_vars + 1)]
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(np.random.normal(size=(n, linear_vars)), columns=lin_cols),
],
axis=1,
)
else:
lin_cols = None
# add non-linear features that contribute to the linear predictor
tmp_data["Nonlinear1"] = pd.Series(np.random.uniform(low=-1.0, high=1.0, size=n))
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(
np.random.uniform(size=(n, 2)), columns=["Nonlinear2", "Nonlinear3"]
),
],
axis=1,
)
# add independent noise features that do not contribute to the linear predictor
if noise_vars > 0:
noise_cols = ["Noise" + str(x) for x in range(1, noise_vars + 1)]
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(
np.random.normal(size=(n, noise_vars)), columns=noise_cols
),
],
axis=1,
)
# add correlated noise features that do not contribute to the linear predictor
if corr_vars > 0:
if corr_type == "exch":
vc = corr_value * np.ones((corr_vars, corr_vars))
np.fill_diagonal(vc, 1)
elif corr_type == "AR1":
vc_values = corr_value ** np.arange(corr_vars)
vc = toeplitz(vc_values)
else:
raise ValueError(
f'arg corr_type must be "exch" or "AR1", but got {repr(corr_type)}'
)
corr_cols = ["Corr" + str(x) for x in range(1, corr_vars + 1)]
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(
np.random.multivariate_normal(np.zeros(corr_vars), vc, size=n),
columns=corr_cols,
),
],
axis=1,
)
# add a surrogate linear feature that does not contribute to the linear predictor
if linear_vars > 0:
tmp_data["Linear1_prime"] = tmp_data["Linear1"] + np.random.normal(
0, surg_err, size=n
)
# add a binary feature that contributes to the linear predictor
if bin_var_p > 0:
tmp_data["Binary1"] = np.where(np.random.uniform(size=n) <= bin_var_p, 0, 1)
# generate linear predictor
if two_way_coef is None:
two_way_coef = [4, 4, 2]
lp = (
intercept
- two_way_coef[0] * tmp_data.TwoFactor1
+ two_way_coef[1] * tmp_data.TwoFactor2
+ two_way_coef[2] * tmp_data.TwoFactor1 * tmp_data.TwoFactor2
+ tmp_data.Nonlinear1 ** 3
+ 2 * np.exp(-6 * (tmp_data.Nonlinear1 - 0.3) ** 2)
+ 2 * np.sin(np.pi * tmp_data.Nonlinear2 * tmp_data.Nonlinear3)
)
# add independent linear features to the linear predictor if required
if linear_vars > 0:
if linear_var_coef is None:
lin_coef = np.linspace(linear_vars, 1, num=linear_vars) / 4
neg_idx = list(range(1, linear_vars, 2))
lin_coef[neg_idx] *= -1
lp += tmp_data[lin_cols].dot(lin_coef)
elif linear_var_coef is not None:
if linear_vars != len(linear_var_coef):
raise ValueError(
"User defined linear feature coefficient list must be of length "
f"{linear_vars}"
)
lp += tmp_data[lin_cols].dot(linear_var_coef)
# add binary feature to the linear predictor if required
if bin_var_p > 0:
lp += bin_coef * tmp_data["Binary1"]
tmp_data["Binary1_prime"] = 1 - tmp_data["Binary1"]
# create classification outcome from linear predictor
if outcome == "classification":
# convert to a probability
prob = 1 / (1 + np.exp(-lp))
# generate target
tmp_data["target"] = np.where(prob <= np.random.uniform(size=n), 0, 1)
# create regression outcome
elif outcome == "regression":
# continuous outcome based on linear predictor
tmp_data["target"] = np.random.normal(lp, regression_err, size=n)
return tmp_data
__tracker.validate()
| [
"numpy.random.normal",
"numpy.ones",
"numpy.random.multivariate_normal",
"numpy.fill_diagonal",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"scipy.linalg.toeplitz",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.sin",
"numpy.arange"
] | [((3611, 3640), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed_val'}), '(seed=seed_val)\n', (3625, 3640), True, 'import numpy as np\n'), ((3765, 3795), 'numpy.array', 'np.array', (['[[2, 1.3], [1.3, 2]]'], {}), '([[2, 1.3], [1.3, 2]])\n', (3773, 3795), True, 'import numpy as np\n'), ((3849, 3897), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'sigma'], {'size': 'n'}), '(mu, sigma, size=n)\n', (3878, 3897), True, 'import numpy as np\n'), ((4476, 4521), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': 'n'}), '(low=-1.0, high=1.0, size=n)\n', (4493, 4521), True, 'import numpy as np\n'), ((5377, 5400), 'numpy.fill_diagonal', 'np.fill_diagonal', (['vc', '(1)'], {}), '(vc, 1)\n', (5393, 5400), True, 'import numpy as np\n'), ((6202, 6239), 'numpy.random.normal', 'np.random.normal', (['(0)', 'surg_err'], {'size': 'n'}), '(0, surg_err, size=n)\n', (6218, 6239), True, 'import numpy as np\n'), ((6838, 6895), 'numpy.sin', 'np.sin', (['(np.pi * tmp_data.Nonlinear2 * tmp_data.Nonlinear3)'], {}), '(np.pi * tmp_data.Nonlinear2 * tmp_data.Nonlinear3)\n', (6844, 6895), True, 'import numpy as np\n'), ((8192, 8236), 'numpy.random.normal', 'np.random.normal', (['lp', 'regression_err'], {'size': 'n'}), '(lp, regression_err, size=n)\n', (8208, 8236), True, 'import numpy as np\n'), ((4623, 4653), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 2)'}), '(size=(n, 2))\n', (4640, 4653), True, 'import numpy as np\n'), ((5333, 5364), 'numpy.ones', 'np.ones', (['(corr_vars, corr_vars)'], {}), '((corr_vars, corr_vars))\n', (5340, 5364), True, 'import numpy as np\n'), ((5511, 5530), 'scipy.linalg.toeplitz', 'toeplitz', (['vc_values'], {}), '(vc_values)\n', (5519, 5530), False, 'from scipy.linalg import toeplitz\n'), ((6392, 6417), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (6409, 6417), True, 'import numpy as np\n'), ((6778, 6823), 'numpy.exp', 'np.exp', (['(-6 * (tmp_data.Nonlinear1 - 0.3) ** 2)'], {}), '(-6 * (tmp_data.Nonlinear1 - 0.3) ** 2)\n', (6784, 6823), True, 'import numpy as np\n'), ((7060, 7104), 'numpy.linspace', 'np.linspace', (['linear_vars', '(1)'], {'num': 'linear_vars'}), '(linear_vars, 1, num=linear_vars)\n', (7071, 7104), True, 'import numpy as np\n'), ((7921, 7932), 'numpy.exp', 'np.exp', (['(-lp)'], {}), '(-lp)\n', (7927, 7932), True, 'import numpy as np\n'), ((8007, 8032), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (8024, 8032), True, 'import numpy as np\n'), ((4227, 4266), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, linear_vars)'}), '(size=(n, linear_vars))\n', (4243, 4266), True, 'import numpy as np\n'), ((5042, 5080), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, noise_vars)'}), '(size=(n, noise_vars))\n', (5058, 5080), True, 'import numpy as np\n'), ((5473, 5493), 'numpy.arange', 'np.arange', (['corr_vars'], {}), '(corr_vars)\n', (5482, 5493), True, 'import numpy as np\n'), ((5896, 5915), 'numpy.zeros', 'np.zeros', (['corr_vars'], {}), '(corr_vars)\n', (5904, 5915), True, 'import numpy as np\n')] |
import math
import brownie
from brownie import chain
def test_only_distributor_allowed(alice, stream):
with brownie.reverts("dev: only distributor"):
stream.notify_reward_amount(10 ** 18, {"from": alice})
def test_retrieves_reward_token(bob, stream, reward_token):
stream.notify_reward_amount(10 ** 18, {"from": bob})
post_notify = reward_token.balanceOf(stream)
assert post_notify == 10 ** 18
def test_reward_rate_updates(bob, stream):
stream.notify_reward_amount(10 ** 18, {"from": bob})
post_notify = stream.reward_rate()
assert post_notify > 0
assert post_notify == 10 ** 18 / (86400 * 10)
def test_reward_rate_updates_mid_duration(bob, stream):
stream.notify_reward_amount(10 ** 18, {"from": bob})
chain.sleep(86400 * 5) # half of the duration
# top up the balance to be 10 ** 18 again
stream.notify_reward_amount(10 ** 18 / 2, {"from": bob})
post_notify = stream.reward_rate()
# should relatively close .00001 seems about good of a heuristic
assert math.isclose(post_notify, 10 ** 18 / (86400 * 10), rel_tol=0.00001)
def test_period_finish_updates(bob, stream):
tx = stream.notify_reward_amount(10 ** 18, {"from": bob})
assert stream.period_finish() == tx.timestamp + 86400 * 10
def test_update_last_update_time(bob, stream):
tx = stream.notify_reward_amount(10 ** 18, {"from": bob})
assert stream.last_update_time() == tx.timestamp
| [
"brownie.reverts",
"math.isclose",
"brownie.chain.sleep"
] | [((762, 784), 'brownie.chain.sleep', 'chain.sleep', (['(86400 * 5)'], {}), '(86400 * 5)\n', (773, 784), False, 'from brownie import chain\n'), ((1037, 1102), 'math.isclose', 'math.isclose', (['post_notify', '(10 ** 18 / (86400 * 10))'], {'rel_tol': '(1e-05)'}), '(post_notify, 10 ** 18 / (86400 * 10), rel_tol=1e-05)\n', (1049, 1102), False, 'import math\n'), ((115, 155), 'brownie.reverts', 'brownie.reverts', (['"""dev: only distributor"""'], {}), "('dev: only distributor')\n", (130, 155), False, 'import brownie\n')] |
import wavefront_dispatch
import random
@wavefront_dispatch.wrapper
def handle(ctx, payload):
# Fibonacci
f_2, f_1 = 0, 1
for n in range(random.randint(800, 900)):
f = f_1 + f_2
f_2, f_1 = f_1, f
# Customized metrics
registry = wavefront_dispatch.get_registry()
# Report Gauge
gauge_val = registry.gauge("dispatch.function.wf.testgauge")
gauge_val.set_value(200)
# Report Counter
counter = registry.counter("dispatch.function.wf.testcounter")
counter.inc()
if __name__ == "__main__":
ctx = {
"secrets": {
"wavefront_server_url":"https://<INSTANCE>.wavefront.com",
"wavefront_auth_token":"<<PASSWORD>_<PASSWORD>>"}
}
payload = {}
handle(ctx, payload) | [
"wavefront_dispatch.get_registry",
"random.randint"
] | [((267, 300), 'wavefront_dispatch.get_registry', 'wavefront_dispatch.get_registry', ([], {}), '()\n', (298, 300), False, 'import wavefront_dispatch\n'), ((151, 175), 'random.randint', 'random.randint', (['(800)', '(900)'], {}), '(800, 900)\n', (165, 175), False, 'import random\n')] |
import itertools
import logging
from typing import Any, Dict, Set, Tuple
from pycoin.coins.bitcoin import Tx as pycoin_tx
from electrum_gui.common.basic.functional.require import require
from electrum_gui.common.coin import data as coin_data
from electrum_gui.common.conf import settings
from electrum_gui.common.provider import data, interfaces
from electrum_gui.common.provider.chains.btc import hardware_mixin, message_mixin
from electrum_gui.common.provider.chains.btc.clients import blockbook
from electrum_gui.common.provider.chains.btc.sdk import network, transaction
from electrum_gui.common.secret import interfaces as secret_interfaces
logger = logging.getLogger("app.chain")
class BTCProvider(interfaces.ProviderInterface, hardware_mixin.BTCHardwareMixin, message_mixin.BTCMessageMixin):
def __init__(self, chain_info: coin_data.ChainInfo, *args, **kwargs):
super(BTCProvider, self).__init__(chain_info, *args, **kwargs)
self._network = None
self._tx_version = None
self._tx_op_return_size_limit = None
self._supported_encodings = None
@property
def network(self) -> Any:
if self._network is None:
self._network = network.get_network_by_chain_code(self.chain_info.chain_code)
return self._network
@property
def tx_version(self) -> int:
if self._tx_version is None:
self._tx_version = transaction.TX_VERSION
return self._tx_version
@property
def tx_op_return_size_limit(self) -> int:
if self._tx_op_return_size_limit is None:
self._tx_op_return_size_limit = transaction.TX_OP_RETURN_SIZE_LIMIT
return self._tx_op_return_size_limit
@property
def supported_encodings(self) -> Set[str]:
if self._supported_encodings is None:
self._supported_encodings = {
*self.chain_info.bip44_purpose_options.keys(),
self.chain_info.default_address_encoding,
}
return self._supported_encodings
@property
def client(self) -> blockbook.BlockBook:
return self.client_selector(instance_required=blockbook.BlockBook)
def verify_address(self, address: str) -> data.AddressValidation:
is_valid, encoding = False, None
try:
parsed_address = self.network.parse.address(address)
address_info = parsed_address.info() if parsed_address else {}
address_type = address_info.get("type")
if address_type == "p2pkh":
encoding = "P2PKH"
elif address_type == "p2pkh_wit":
encoding = "P2WPKH"
elif address_type == "p2sh":
encoding = "P2WPKH-P2SH" # Cannot distinguish between legacy P2SH and P2WPKH-P2SH
is_valid = encoding is not None and encoding in self.supported_encodings
encoding = encoding if is_valid else None
except Exception as e:
logger.exception(f"Illegal address: {address}, error: {e}")
address = address if is_valid else ""
return data.AddressValidation(
normalized_address=address,
display_address=address,
is_valid=is_valid,
encoding=encoding,
)
def pubkey_to_address(self, verifier: secret_interfaces.VerifierInterface, encoding: str = None) -> str:
require(encoding in self.supported_encodings, f"Invalid address encoding: {encoding}")
pubkey = verifier.get_pubkey(compressed=True)
pubkey_hash = self.network.keys.public(pubkey).hash160(is_compressed=True)
if encoding == "P2PKH": # Pay To Public Key Hash
address = self.network.address.for_p2pkh(pubkey_hash)
elif encoding == "P2WPKH": # Pay To Witness Public Key Hash
address = self.network.address.for_p2pkh_wit(pubkey_hash)
elif encoding == "P2WPKH-P2SH": # P2WPKH nested in BIP16 P2SH
witness_script = self.network.contract.for_p2pkh_wit(pubkey_hash)
address = self.network.address.for_p2s(witness_script)
else:
raise Exception("Should not be here")
return address
def fill_unsigned_tx(self, unsigned_tx: data.UnsignedTx) -> data.UnsignedTx:
fee_price_per_unit = unsigned_tx.fee_price_per_unit or int(
self.client.get_prices_per_unit_of_fee().normal.price
)
fee_limit = unsigned_tx.fee_limit or 0
if unsigned_tx.inputs and unsigned_tx.outputs:
input_validations = [self.verify_address(i.address) for i in unsigned_tx.inputs]
output_validations = [self.verify_address(i.address) for i in unsigned_tx.outputs]
if all(i.is_valid for i in itertools.chain(input_validations, output_validations)):
vsize = transaction.calculate_vsize(
input_encodings=[i.encoding for i in input_validations],
output_encodings=[i.encoding for i in output_validations],
op_return=unsigned_tx.payload.get("op_return"),
op_return_size_limit=self.tx_op_return_size_limit,
)
fee_limit = max(fee_limit, vsize)
fee_limit = fee_limit or transaction.PLACEHOLDER_VSIZE
return unsigned_tx.clone(
fee_limit=fee_limit,
fee_price_per_unit=fee_price_per_unit,
)
def sign_transaction(
self, unsigned_tx: data.UnsignedTx, signers: Dict[str, secret_interfaces.SignerInterface]
) -> data.SignedTx:
tx = transaction.create_pycoin_tx(
self.network,
unsigned_tx,
version=self.tx_version,
op_return_size_limit=self.tx_op_return_size_limit,
)
tx.check()
tx.sign(
hash160_lookup=transaction.build_hash160_lookup(self.network, signers.values()),
p2sh_lookup=transaction.build_p2sh_lookup(self.network, signers.values()),
)
self._check_tx_after_signed(tx)
return data.SignedTx(
txid=tx.id(),
raw_tx=tx.as_hex(),
)
def _check_tx_after_signed(self, tx: pycoin_tx.Tx):
unsigned_after = tx.bad_solution_count()
if unsigned_after > 0:
not_fully_signed_message = (
f"{unsigned_after} TxIn items still unsigned, tx: {tx.as_hex(include_unspents=True)}"
)
if settings.IS_DEV:
dump_message = transaction.debug_dump_tx(self.network, tx)
logger.error("\n".join((not_fully_signed_message, dump_message)))
raise Exception(not_fully_signed_message)
def get_token_info_by_address(self, token_address: str) -> Tuple[str, str, int]:
raise NotImplementedError()
| [
"logging.getLogger",
"itertools.chain",
"electrum_gui.common.provider.chains.btc.sdk.transaction.debug_dump_tx",
"electrum_gui.common.basic.functional.require.require",
"electrum_gui.common.provider.data.AddressValidation",
"electrum_gui.common.provider.chains.btc.sdk.network.get_network_by_chain_code",
... | [((658, 688), 'logging.getLogger', 'logging.getLogger', (['"""app.chain"""'], {}), "('app.chain')\n", (675, 688), False, 'import logging\n'), ((3087, 3204), 'electrum_gui.common.provider.data.AddressValidation', 'data.AddressValidation', ([], {'normalized_address': 'address', 'display_address': 'address', 'is_valid': 'is_valid', 'encoding': 'encoding'}), '(normalized_address=address, display_address=address,\n is_valid=is_valid, encoding=encoding)\n', (3109, 3204), False, 'from electrum_gui.common.provider import data, interfaces\n'), ((3378, 3468), 'electrum_gui.common.basic.functional.require.require', 'require', (['(encoding in self.supported_encodings)', 'f"""Invalid address encoding: {encoding}"""'], {}), "(encoding in self.supported_encodings,\n f'Invalid address encoding: {encoding}')\n", (3385, 3468), False, 'from electrum_gui.common.basic.functional.require import require\n'), ((5555, 5691), 'electrum_gui.common.provider.chains.btc.sdk.transaction.create_pycoin_tx', 'transaction.create_pycoin_tx', (['self.network', 'unsigned_tx'], {'version': 'self.tx_version', 'op_return_size_limit': 'self.tx_op_return_size_limit'}), '(self.network, unsigned_tx, version=self.\n tx_version, op_return_size_limit=self.tx_op_return_size_limit)\n', (5583, 5691), False, 'from electrum_gui.common.provider.chains.btc.sdk import network, transaction\n'), ((1203, 1264), 'electrum_gui.common.provider.chains.btc.sdk.network.get_network_by_chain_code', 'network.get_network_by_chain_code', (['self.chain_info.chain_code'], {}), '(self.chain_info.chain_code)\n', (1236, 1264), False, 'from electrum_gui.common.provider.chains.btc.sdk import network, transaction\n'), ((6469, 6512), 'electrum_gui.common.provider.chains.btc.sdk.transaction.debug_dump_tx', 'transaction.debug_dump_tx', (['self.network', 'tx'], {}), '(self.network, tx)\n', (6494, 6512), False, 'from electrum_gui.common.provider.chains.btc.sdk import network, transaction\n'), ((4727, 4781), 'itertools.chain', 'itertools.chain', (['input_validations', 'output_validations'], {}), '(input_validations, output_validations)\n', (4742, 4781), False, 'import itertools\n')] |
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import os, sys
from tensorboardX import SummaryWriter
import time
import numpy as np
import pprint
import socket
import pickle
from resnet import *
from kwng import *
from gaussian import *
from data_loader import *
class Trainer(object):
def __init__(self,args):
torch.manual_seed(args.seed)
self.args = args
self.device = assign_device(args.device)
self.log_dir = make_log_dir(args)
if args.log_in_file:
self.log_file = open(os.path.join(self.log_dir, 'log.txt'), 'w', buffering=1)
sys.stdout = self.log_file
sys.stderr = self.log_file
print("Process id: " + str(os.getpid()) + " | hostname: " + socket.gethostname())
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(args))
print('Creating writer')
self.writer = SummaryWriter(self.log_dir)
print('Loading data')
if not os.path.isdir(args.data_dir):
os.makedirs(args.data_dir, exist_ok=True)
self.data_loaders = get_data_loader(args)
self.total_epochs = self.args.total_epochs
print('==> Building model..')
self.build_model()
def build_model(self):
self.net = get_network(self.args)
self.net = self.net.to(self.device)
if self.args.dtype=='64':
self.net = self.net.double()
if self.device == 'cuda':
self.net = torch.nn.DataParallel(self.net)
cudnn.benchmark = True
self.init_train_values()
self.criterion = get_criterion(self.args)
self.optimizer = get_optimizer(self.args,self.net.parameters(),self.net)
self.scheduler = get_scheduler(self.args,self.optimizer)
self.wrapped_optimizer = get_wrapped_optimizer(self.args,self.optimizer,self.criterion,self.net, device=self.device)
def train(self):
print(' Starting training')
self.init_train_values()
for epoch in range(self.start_epoch, self.start_epoch+self.total_epochs):
train_acc = self.epoch_pass(epoch,'train')
val_acc = self.epoch_pass(epoch,'val')
if self.args.use_scheduler:
self.scheduler.step()
return train_acc,val_acc
def test(self):
print('Starting test')
test_acc = self.epoch_pass(0,'test')
return test_acc
def init_train_values(self):
if self.args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(self.log_dir+'/checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load(self.log_dir+'/checkpoint/ckpt.t7')
self.net.load_state_dict(checkpoint['net'])
self.best_acc = checkpoint['acc']
self.best_loss = checkpoint['loss']
self.start_epoch = checkpoint['epoch']
self.total_iters = checkpoint['total_iters']
else:
self.best_acc = 0 # best test accuracy
self.start_epoch = 0 # start from epoch 0 or last checkpoint epoch
self.total_iters = 0
self.best_loss = torch.tensor(np.inf)
def epoch_pass(self,epoch,phase):
print('Epoch: '+ str(epoch) + ' | ' + phase + ' phase')
if phase == 'train':
self.net.train(True) # Set model to training mode
else:
self.net.train(False) # Set model to evaluate mode
self.net.train()
loss = 0
correct = 0
total = 0
counts = 0
for batch_idx, (inputs, targets) in enumerate(self.data_loaders[phase]):
tic = time.time()
inputs, targets = inputs.to(self.device), targets.to(self.device)
if self.args.dtype=='64':
inputs=inputs.double()
if phase=="train":
self.total_iters+=1
loss_step, predicted = self.wrapped_optimizer.step(inputs,targets)
loss_step, predicted = self.wrapped_optimizer.eval(inputs,targets)
loss += loss_step
running_loss = loss/(batch_idx+1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc= 100.*correct/total
if phase=="train":
self.writer.add_scalars('data/train_loss_step',{"loss_step":loss_step,"loss_averaged":running_loss},self.total_iters)
toc = time.time()
print(' Loss: ' + str(round(running_loss,3))+ ' | Acc: '+ str(acc) + ' ' +'('+str(correct) +'/'+str(total)+')' + ' time: ' + str(toc-tic) + ' iter: '+ str(batch_idx))
counts += 1
self.writer.add_scalars('data/total_stats_'+phase, {"loss":loss/(batch_idx+1), "correct":acc},epoch)
# Save checkpoint.
if phase == 'val':
avg_loss = loss/(batch_idx+1)
if avg_loss < self.best_loss:
save_checkpoint(self.writer.logdir,acc,avg_loss,epoch,self.total_iters,self.wrapped_optimizer.net)
self.best_loss = avg_loss
return acc
def save_checkpoint(checkpoint_dir,acc,loss,epoch,total_iters,net):
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'loss':loss,
'epoch': epoch,
'total_iters':total_iters,
}
if not os.path.isdir(checkpoint_dir +'/checkpoint'):
os.mkdir(checkpoint_dir + '/checkpoint')
torch.save(state,checkpoint_dir +'/checkpoint/ckpt.t7')
def assign_device(device):
if device >-1:
device = 'cuda:'+str(device) if torch.cuda.is_available() and device>-1 else 'cpu'
elif device==-1:
device = 'cuda'
elif device==-2:
device = 'cpu'
return device
def make_log_dir(args):
if args.with_sacred:
log_dir = args.log_dir + '_' + args.log_name
else:
log_dir = os.path.join(args.log_dir,args.log_name)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
return log_dir
def get_dtype(args):
if args.dtype=='32':
return torch.float32
elif args.dtype=='64':
return torch.float64
def get_network(args):
if args.network=='ResNet18':
return ResNet18(num_classes = args.num_classes)
elif args.network=='ResNet18IllCond':
return ResNet18IllCond(num_classes = args.num_classes)
def get_kernel(args,device = 'cuda'):
dtype = get_dtype(args)
if args.kernel=='gaussian':
return Gaussian(1,args.log_bandwidth,dtype=dtype, device = device)
def get_wrapped_optimizer(args,optimizer,criterion,net,device = 'cuda'):
if args.estimator=='EuclideanGradient':
return OptimizerWrapper(optimizer,criterion,net,args.clip_grad)
elif args.estimator=='KWNG':
kernel = get_kernel(args, device=device)
estimator = KWNG(kernel,eps=args.epsilon, num_basis = args.num_basis,with_diag_mat = args.with_diag_mat)
return KWNGWrapper(optimizer,criterion,net,args.clip_grad,estimator,args.dumping_freq,args.reduction_coeff,args.min_red,args.max_red)
def get_data_loader(args):
if args.dataset=='cifar10':
args.num_classes = 10
return CIFARLoader(args.data_dir,args.b_size)
elif args.dataset=='cifar100':
args.num_classes = 100
return CIFAR100Loader(args.data_dir,args.b_size)
def get_optimizer(args,params,net):
if args.optimizer=='sgd':
return optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
def get_scheduler(args,optimizer):
if args.scheduler=='MultiStepLR':
if args.milestone is None:
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(args.total_epochs*0.5), int(args.total_epochs*0.75)], gamma=args.lr_decay)
else:
milestone = [int(_) for _ in args.milestone.split(',')]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestone, gamma=args.lr_decay)
return lr_scheduler
def get_criterion(args):
if args.criterion=='cross_entropy':
return nn.CrossEntropyLoss()
| [
"torch.manual_seed",
"torch.optim.SGD",
"tensorboardX.SummaryWriter",
"torch.nn.CrossEntropyLoss",
"os.makedirs",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.load",
"os.path.join",
"torch.nn.DataParallel",
"torch.tensor",
"os.path.isdir",
"torch.cuda.is_available",
"pprint.PrettyPrinter",... | [((4759, 4816), 'torch.save', 'torch.save', (['state', "(checkpoint_dir + '/checkpoint/ckpt.t7')"], {}), "(state, checkpoint_dir + '/checkpoint/ckpt.t7')\n", (4769, 4816), False, 'import torch\n'), ((410, 438), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (427, 438), False, 'import torch\n'), ((796, 826), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (816, 826), False, 'import pprint\n'), ((895, 922), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['self.log_dir'], {}), '(self.log_dir)\n', (908, 922), False, 'from tensorboardX import SummaryWriter\n'), ((4669, 4714), 'os.path.isdir', 'os.path.isdir', (["(checkpoint_dir + '/checkpoint')"], {}), "(checkpoint_dir + '/checkpoint')\n", (4682, 4714), False, 'import os, sys\n'), ((4717, 4757), 'os.mkdir', 'os.mkdir', (["(checkpoint_dir + '/checkpoint')"], {}), "(checkpoint_dir + '/checkpoint')\n", (4725, 4757), False, 'import os, sys\n'), ((5142, 5183), 'os.path.join', 'os.path.join', (['args.log_dir', 'args.log_name'], {}), '(args.log_dir, args.log_name)\n', (5154, 5183), False, 'import os, sys\n'), ((5191, 5213), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (5204, 5213), False, 'import os, sys\n'), ((5217, 5234), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (5225, 5234), False, 'import os, sys\n'), ((6540, 6630), 'torch.optim.SGD', 'optim.SGD', (['params'], {'lr': 'args.lr', 'momentum': 'args.momentum', 'weight_decay': 'args.weight_decay'}), '(params, lr=args.lr, momentum=args.momentum, weight_decay=args.\n weight_decay)\n', (6549, 6630), True, 'import torch.optim as optim\n'), ((7152, 7173), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7171, 7173), True, 'import torch.nn as nn\n'), ((957, 985), 'os.path.isdir', 'os.path.isdir', (['args.data_dir'], {}), '(args.data_dir)\n', (970, 985), False, 'import os, sys\n'), ((990, 1031), 'os.makedirs', 'os.makedirs', (['args.data_dir'], {'exist_ok': '(True)'}), '(args.data_dir, exist_ok=True)\n', (1001, 1031), False, 'import os, sys\n'), ((1376, 1407), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.net'], {}), '(self.net)\n', (1397, 1407), False, 'import torch\n'), ((2323, 2366), 'os.path.isdir', 'os.path.isdir', (["(self.log_dir + '/checkpoint')"], {}), "(self.log_dir + '/checkpoint')\n", (2336, 2366), False, 'import os, sys\n'), ((2422, 2470), 'torch.load', 'torch.load', (["(self.log_dir + '/checkpoint/ckpt.t7')"], {}), "(self.log_dir + '/checkpoint/ckpt.t7')\n", (2432, 2470), False, 'import torch\n'), ((2848, 2868), 'torch.tensor', 'torch.tensor', (['np.inf'], {}), '(np.inf)\n', (2860, 2868), False, 'import torch\n'), ((3257, 3268), 'time.time', 'time.time', ([], {}), '()\n', (3266, 3268), False, 'import time\n'), ((3897, 3908), 'time.time', 'time.time', ([], {}), '()\n', (3906, 3908), False, 'import time\n'), ((6968, 7063), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'milestone', 'gamma': 'args.lr_decay'}), '(optimizer, milestones=milestone, gamma\n =args.lr_decay)\n', (7004, 7063), False, 'import torch\n'), ((588, 625), 'os.path.join', 'os.path.join', (['self.log_dir', '"""log.txt"""'], {}), "(self.log_dir, 'log.txt')\n", (600, 625), False, 'import os, sys\n'), ((767, 787), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (785, 787), False, 'import socket\n'), ((4893, 4918), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4916, 4918), False, 'import torch\n'), ((734, 745), 'os.getpid', 'os.getpid', ([], {}), '()\n', (743, 745), False, 'import os, sys\n')] |
"""
Copyright 2021 <NAME>
"""
# built-in
import argparse
from typing import List, Generator, Optional, Tuple, Union
# dual_tape
import dual_tape as dt
from . import assembler
from . import error
from . import vm
from .log import enable_log
class DualTapeAPI(error.DualTapeError):
@classmethod
def hit_timeout(cls):
return cls("Hit timeout!")
def dual_tape() -> None:
"""
info: Console Interface into dual_tape.
:return: None
"""
try:
parser = argparse.ArgumentParser(description="dual_tape")
parser.add_argument("file",
type=str,
action="store",
help="path to dual_tape script")
parser.add_argument("-a",
"--author",
default=False,
action="store_true",
help="get author of dual_tape")
parser.add_argument("-l",
"--log",
default=False,
action="store_true",
help="enables debug log")
parser.add_argument("-v",
"--version",
default=False,
action="store_true",
help="get version of dual_tape")
parser.add_argument("--timeout",
default=-1,
type=int,
help="max number of instructions that can run")
args = parser.parse_args()
if args.author:
print(dt.AUTHOR)
if args.version:
print(f"v{dt.MAJOR}.{dt.MINOR}.{dt.MAINTENANCE}")
for at, _ in enumerate(dual_tape_api(file=args.file, log=args.log)):
if at == args.timeout:
raise DualTapeAPI.hit_timeout()
except error.DualTapeError as e:
print(f"\nERROR: {e}", flush=True)
except KeyboardInterrupt:
print("\nKeyboard Interrupt!", flush=True)
def dual_tape_api(file: str,
inputs: Optional[Union[Tuple[str, ...], List[str]]] = None,
sys_output: bool = True,
catch_output: bool = False,
log: bool = False) -> Generator[vm.VMState, None, None]:
"""
info: API to dual_tape
:param: inputs: Optional[Union[Tuple[str, ...], List[str]]]
:param: sys_output: bool
:param: catch_output: bool
:param: log: bool
:return: Generator[vm.VMState, None, None]
"""
if log:
enable_log()
entry_point, instructions, data = assembler.assembler(file=file)
return vm.vm(entry_point, instructions, data, inputs, sys_output, catch_output)
| [
"argparse.ArgumentParser"
] | [((493, 541), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dual_tape"""'}), "(description='dual_tape')\n", (516, 541), False, 'import argparse\n')] |
"""
https://stackoverflow.com/questions/4827207/how-do-i-filter-the-pyqt-qcombobox-items-based-on-the-text-input
"""
import sys
from Qt import QtCore
from Qt import QtWidgets
class ExtendedCombo(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(ExtendedCombo, self).__init__(parent)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setEditable(True)
self.completer = QtWidgets.QCompleter(self)
# always show all completions
self.completer.setCompletionMode(QtWidgets.QCompleter.UnfilteredPopupCompletion)
self.pFilterModel = QtWidgets.QSortFilterProxyModel(self)
self.pFilterModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer.setPopup(self.view())
self.setCompleter(self.completer)
self.lineEdit().textEdited[unicode].connect(self.pFilterModel.setFilterFixedString)
self.completer.activated.connect(self.setTextIfCompleterIsClicked)
def setModel(self, model):
super(ExtendedCombo, self).setModel(model)
self.pFilterModel.setSourceModel(model)
self.completer.setModel(self.pFilterModel)
def setModelColumn(self, column):
self.completer.setCompletionColumn(column)
self.pFilterModel.setFilterKeyColumn(column)
super(ExtendedCombo, self).setModelColumn(column)
def view(self):
return self.completer.popup()
def index(self):
return self.currentIndex()
def setTextIfCompleterIsClicked(self, text):
if text:
index = self.findText(text)
self.setCurrentIndex(index)
def main(argv):
app = QtWidgets.QApplication(argv)
model = QtWidgets.QStandardItemModel()
words = ['hola', 'adios', 'hello', 'good bye']
for i, word in enumerate(words):
item = QtWidgets.QStandardItem(word)
model.setItem(i, 0, item)
combo = ExtendedCombo()
combo.setModel(model)
combo.setModelColumn(0)
combo.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv)
| [
"Qt.QtWidgets.QCompleter",
"Qt.QtWidgets.QApplication",
"Qt.QtWidgets.QSortFilterProxyModel",
"Qt.QtWidgets.QStandardItem",
"Qt.QtWidgets.QStandardItemModel"
] | [((1646, 1674), 'Qt.QtWidgets.QApplication', 'QtWidgets.QApplication', (['argv'], {}), '(argv)\n', (1668, 1674), False, 'from Qt import QtWidgets\n'), ((1688, 1718), 'Qt.QtWidgets.QStandardItemModel', 'QtWidgets.QStandardItemModel', ([], {}), '()\n', (1716, 1718), False, 'from Qt import QtWidgets\n'), ((416, 442), 'Qt.QtWidgets.QCompleter', 'QtWidgets.QCompleter', (['self'], {}), '(self)\n', (436, 442), False, 'from Qt import QtWidgets\n'), ((599, 636), 'Qt.QtWidgets.QSortFilterProxyModel', 'QtWidgets.QSortFilterProxyModel', (['self'], {}), '(self)\n', (630, 636), False, 'from Qt import QtWidgets\n'), ((1823, 1852), 'Qt.QtWidgets.QStandardItem', 'QtWidgets.QStandardItem', (['word'], {}), '(word)\n', (1846, 1852), False, 'from Qt import QtWidgets\n')] |
from distutils.core import setup, Extension
cGeo = Extension("cGeo", sources = ["cGeo.c"])
setup (name = "cGeo",
version = "0.1",
author = "<NAME>",
description = "Fast geometric functionality.",
ext_modules = [cGeo]) | [
"distutils.core.Extension",
"distutils.core.setup"
] | [((52, 89), 'distutils.core.Extension', 'Extension', (['"""cGeo"""'], {'sources': "['cGeo.c']"}), "('cGeo', sources=['cGeo.c'])\n", (61, 89), False, 'from distutils.core import setup, Extension\n'), ((93, 213), 'distutils.core.setup', 'setup', ([], {'name': '"""cGeo"""', 'version': '"""0.1"""', 'author': '"""<NAME>"""', 'description': '"""Fast geometric functionality."""', 'ext_modules': '[cGeo]'}), "(name='cGeo', version='0.1', author='<NAME>', description=\n 'Fast geometric functionality.', ext_modules=[cGeo])\n", (98, 213), False, 'from distutils.core import setup, Extension\n')] |
import json
from codegen import json_definitions as jd
from codegen import json_writer as jw
from codegen import fblas_routine
from codegen import fblas_types
import codegen.generator_definitions as gd
from codegen.fblas_helper import FBLASHelper
import logging
import os
import jinja2
from typing import List
class HostAPICodegen:
_output_path = ""
def __init__(self, output_path: str):
self._output_path = output_path
def generateRoutines(self, routines: List[fblas_routine.FBLASRoutine]):
"""
Generates the code for the given routines
:param routines:
:return:
"""
routine_id = 0
json_routines = []
for r in routines:
print("Generating: " + r.user_name)
#dispatch
method_name = "_codegen_" + r.blas_name
method = getattr(self, method_name)
jr = method(r, routine_id)
routine_id = routine_id + 1
json_routines.append(jr)
#Output json for generated routines
json_content = {"routine": json_routines}
jw.write_to_file(self._output_path+"generated_routines.json", json_content)
def _write_file(self, path, content, append=False):
print("Generating file: "+path)
with open(path, "a" if append else "w") as f:
if append is True:
f.write("\n")
f.write(content)
def _read_template_file(self, path):
templates = os.path.join(os.path.dirname(__file__), "../../templates")
loader = jinja2.FileSystemLoader(searchpath=templates)
logging.basicConfig()
logger = logging.getLogger('logger')
logger = jinja2.make_logging_undefined(logger=logger, base=jinja2.Undefined)
env = jinja2.Environment(loader=loader, undefined=logger)
env.lstrip_blocks = True
env.trim_blocks = True
return env.get_template(path)
def _codegen_dot(self, routine: fblas_routine.FBLASRoutine, id: int):
template = self._read_template_file("1/dot.cl")
chan_in_x_name = gd.CHANNEL_IN_VECTOR_X_BASE_NAME+str(id)
chan_in_y_name = gd.CHANNEL_IN_VECTOR_Y_BASE_NAME+str(id)
chan_out = gd.CHANNEL_OUT_SCALAR_BASE_NAME+str(id)
channels_routine = {"channel_in_vector_x": chan_in_x_name, "channel_in_vector_y": chan_in_y_name, "channel_out_scalar": chan_out}
output_path = self._output_path + "/" + routine.user_name+".cl"
self._write_file(output_path, template.render(routine=routine, channels=channels_routine))
#add helpers
template = self._read_template_file("helpers/"+gd.TEMPLATE_READ_VECTOR_X)
channels_helper = {"channel_out_vector": chan_in_x_name}
helper_name_read_x = gd.HELPER_READ_VECTOR_X_BASE_NAME+str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_x, helper=routine, channels=channels_helper), append=True)
#Read y
template = self._read_template_file("helpers/" + gd.TEMPLATE_READ_VECTOR_Y)
channels_helper = {"channel_out_vector": chan_in_y_name}
helper_name_read_y = gd.HELPER_READ_VECTOR_Y_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_y, helper=routine, channels=channels_helper),
append=True)
#Write scalar
template = self._read_template_file("helpers/" + gd.TEMPLATE_WRITE_SCALAR)
channels_helper = {"channel_in_scalar": chan_out}
helper_name_write_scalar = gd.HELPER_WRITE_SCALAR_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_write_scalar, helper=routine, channels=channels_helper),
append=True)
#create the json entries
json = {}
jw.add_commons(json, routine)
jw.add_incx(json, routine)
jw.add_incy(json, routine)
jw.add_item(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)
jw.add_item(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)
jw.add_item(json, jd.GENERATED_WRITE_SCALAR, helper_name_write_scalar)
return json
def _codegen_axpy(self, routine: fblas_routine.FBLASRoutine, id: int):
template = self._read_template_file("1/axpy.cl")
chan_in_x_name = gd.CHANNEL_IN_VECTOR_X_BASE_NAME+str(id)
chan_in_y_name = gd.CHANNEL_IN_VECTOR_Y_BASE_NAME+str(id)
chan_out = gd.CHANNEL_OUT_VECTOR_BASE_NAME+str(id)
channels_routine = {"channel_in_vector_x": chan_in_x_name, "channel_in_vector_y": chan_in_y_name, "channel_out_vector": chan_out}
output_path = self._output_path + "/" + routine.user_name+".cl"
self._write_file(output_path, template.render(routine=routine, channels=channels_routine))
#add helpers
template = self._read_template_file("helpers/"+gd.TEMPLATE_READ_VECTOR_X)
channels_helper = {"channel_out_vector": chan_in_x_name}
helper_name_read_x = gd.HELPER_READ_VECTOR_X_BASE_NAME+str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_x, helper=routine, channels=channels_helper), append=True)
#Read y
template = self._read_template_file("helpers/" + gd.TEMPLATE_READ_VECTOR_Y)
channels_helper = {"channel_out_vector": chan_in_y_name}
helper_name_read_y = gd.HELPER_READ_VECTOR_Y_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_read_y, helper=routine, channels=channels_helper),
append=True)
#Write vector
template = self._read_template_file("helpers/" + gd.TEMPLATE_WRITE_VECTOR)
channels_helper = {"channel_in_vector": chan_out}
helper_name_write_vector = gd.HELPER_WRITE_VECTOR_BASE_NAME + str(id)
self._write_file(output_path, template.render(helper_name=helper_name_write_vector, helper=routine, channels=channels_helper),
append=True)
#create the json entries
json = {}
jw.add_commons(json, routine)
jw.add_incx(json, routine)
jw.add_incy(json, routine)
jw.add_item(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)
jw.add_item(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)
jw.add_item(json, jd.GENERATED_WRITE_VECTOR, helper_name_write_vector)
return json
| [
"logging.basicConfig",
"logging.getLogger",
"codegen.json_writer.add_commons",
"codegen.json_writer.add_incy",
"jinja2.Environment",
"codegen.json_writer.add_item",
"codegen.json_writer.add_incx",
"codegen.json_writer.write_to_file",
"os.path.dirname",
"jinja2.make_logging_undefined",
"jinja2.Fi... | [((1100, 1177), 'codegen.json_writer.write_to_file', 'jw.write_to_file', (["(self._output_path + 'generated_routines.json')", 'json_content'], {}), "(self._output_path + 'generated_routines.json', json_content)\n", (1116, 1177), True, 'from codegen import json_writer as jw\n'), ((1558, 1603), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', ([], {'searchpath': 'templates'}), '(searchpath=templates)\n', (1581, 1603), False, 'import jinja2\n'), ((1613, 1634), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1632, 1634), False, 'import logging\n'), ((1652, 1679), 'logging.getLogger', 'logging.getLogger', (['"""logger"""'], {}), "('logger')\n", (1669, 1679), False, 'import logging\n'), ((1697, 1764), 'jinja2.make_logging_undefined', 'jinja2.make_logging_undefined', ([], {'logger': 'logger', 'base': 'jinja2.Undefined'}), '(logger=logger, base=jinja2.Undefined)\n', (1726, 1764), False, 'import jinja2\n'), ((1780, 1831), 'jinja2.Environment', 'jinja2.Environment', ([], {'loader': 'loader', 'undefined': 'logger'}), '(loader=loader, undefined=logger)\n', (1798, 1831), False, 'import jinja2\n'), ((3886, 3915), 'codegen.json_writer.add_commons', 'jw.add_commons', (['json', 'routine'], {}), '(json, routine)\n', (3900, 3915), True, 'from codegen import json_writer as jw\n'), ((3924, 3950), 'codegen.json_writer.add_incx', 'jw.add_incx', (['json', 'routine'], {}), '(json, routine)\n', (3935, 3950), True, 'from codegen import json_writer as jw\n'), ((3959, 3985), 'codegen.json_writer.add_incy', 'jw.add_incy', (['json', 'routine'], {}), '(json, routine)\n', (3970, 3985), True, 'from codegen import json_writer as jw\n'), ((3994, 4059), 'codegen.json_writer.add_item', 'jw.add_item', (['json', 'jd.GENERATED_READ_VECTOR_X', 'helper_name_read_x'], {}), '(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)\n', (4005, 4059), True, 'from codegen import json_writer as jw\n'), ((4068, 4133), 'codegen.json_writer.add_item', 'jw.add_item', (['json', 'jd.GENERATED_READ_VECTOR_Y', 'helper_name_read_y'], {}), '(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)\n', (4079, 4133), True, 'from codegen import json_writer as jw\n'), ((4142, 4212), 'codegen.json_writer.add_item', 'jw.add_item', (['json', 'jd.GENERATED_WRITE_SCALAR', 'helper_name_write_scalar'], {}), '(json, jd.GENERATED_WRITE_SCALAR, helper_name_write_scalar)\n', (4153, 4212), True, 'from codegen import json_writer as jw\n'), ((6189, 6218), 'codegen.json_writer.add_commons', 'jw.add_commons', (['json', 'routine'], {}), '(json, routine)\n', (6203, 6218), True, 'from codegen import json_writer as jw\n'), ((6227, 6253), 'codegen.json_writer.add_incx', 'jw.add_incx', (['json', 'routine'], {}), '(json, routine)\n', (6238, 6253), True, 'from codegen import json_writer as jw\n'), ((6262, 6288), 'codegen.json_writer.add_incy', 'jw.add_incy', (['json', 'routine'], {}), '(json, routine)\n', (6273, 6288), True, 'from codegen import json_writer as jw\n'), ((6297, 6362), 'codegen.json_writer.add_item', 'jw.add_item', (['json', 'jd.GENERATED_READ_VECTOR_X', 'helper_name_read_x'], {}), '(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)\n', (6308, 6362), True, 'from codegen import json_writer as jw\n'), ((6371, 6436), 'codegen.json_writer.add_item', 'jw.add_item', (['json', 'jd.GENERATED_READ_VECTOR_Y', 'helper_name_read_y'], {}), '(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)\n', (6382, 6436), True, 'from codegen import json_writer as jw\n'), ((6445, 6515), 'codegen.json_writer.add_item', 'jw.add_item', (['json', 'jd.GENERATED_WRITE_VECTOR', 'helper_name_write_vector'], {}), '(json, jd.GENERATED_WRITE_VECTOR, helper_name_write_vector)\n', (6456, 6515), True, 'from codegen import json_writer as jw\n'), ((1495, 1520), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1510, 1520), False, 'import os\n')] |
# coding=utf-8
"""Batch convert the world traj in actev to carla traj."""
import argparse
import os
from glob import glob
from tqdm import tqdm
import sys
if sys.version_info > (3, 0):
import subprocess as commands
else:
import commands
parser = argparse.ArgumentParser()
parser.add_argument("traj_world_path")
parser.add_argument("--traj_vehicle_world_path", default=None)
parser.add_argument("save_carla_traj_path")
parser.add_argument("--save_carla_vehicle_path", default=None)
calibrations = {
"0000": {
"world_rotate": 320,
"carla_rotate": 130,
"scale": 1.0,
"origin": [3.5, -48.0, 0.3]
},
"0400": {
"world_rotate": 100,
"carla_rotate": 153,
"scale": 1.0,
"origin": [-10.0, 58.0, 0.5]
},
"0401": {
"world_rotate": 120,
"carla_rotate": 135,
"scale": 1.0,
"origin": [-48.0, 24.0, 0.5]
},
"0500": {
"world_rotate": 90,
"carla_rotate": 179,
"scale": 1.0,
"origin": [-65.5, -75.5, 0.1]
},
}
# Zara
calibration = {
"world_rotate": 270,
"carla_rotate": -3.04,
"scale": 1.2,
"origin": [-44.0511921243, -79.6225002047, 0.],
}
def get_scene(videoname_):
"""ActEV scene extractor from videoname."""
s = videoname_.split("_S_")[-1]
s = s.split("_")[0]
return s[:4]
if __name__ == "__main__":
args = parser.parse_args()
# all files
ped_traj_files = glob(os.path.join(args.traj_world_path, "*.txt"))
if args.traj_vehicle_world_path is not None:
assert args.save_carla_vehicle_path is not None
if not os.path.exists(args.save_carla_vehicle_path):
os.makedirs(args.save_carla_vehicle_path)
if not os.path.exists(args.save_carla_traj_path):
os.makedirs(args.save_carla_traj_path)
script_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "plot_traj_carla.py")
assert os.path.exists(script_path), script_path
for ped_traj_file in tqdm(ped_traj_files):
filename = os.path.splitext(os.path.basename(ped_traj_file))[0]
target_ped_file = os.path.join(
args.save_carla_traj_path, "%s.txt" % filename)
if args.traj_vehicle_world_path is None:
output = commands.getoutput("python3 %s %s 0 %f %f %f %f --world_rotate"
" %f --scale %f --save_carla_traj_file %s" % (
script_path, ped_traj_file,
calibration["origin"][0],
calibration["origin"][1],
calibration["origin"][2],
calibration["carla_rotate"],
calibration["world_rotate"],
calibration["scale"],
target_ped_file))
else:
scene = get_scene(filename)
if scene == "0002":
continue
vehicle_traj_file = os.path.join(args.traj_vehicle_world_path,
"%s.txt" % filename)
target_vehicle_file = os.path.join(args.save_carla_vehicle_path,
"%s.txt" % filename)
cmd = "python3 %s %s 0 %f %f %f %f --world_rotate" \
" %f --scale %f --save_carla_traj_file %s" \
" --vehicle_world_traj_file %s" \
" --save_vehicle_carla_traj_file %s" % (
script_path, ped_traj_file,
calibrations[scene]["origin"][0],
calibrations[scene]["origin"][1],
calibrations[scene]["origin"][2],
calibrations[scene]["carla_rotate"],
calibrations[scene]["world_rotate"],
calibrations[scene]["scale"],
target_ped_file,
vehicle_traj_file,
target_vehicle_file)
output = commands.getoutput("python3 %s %s 0 %f %f %f %f --world_rotate"
" %f --scale %f --save_carla_traj_file %s"
" --vehicle_world_traj_file %s --is_actev"
" --save_vehicle_carla_traj_file %s" % (
script_path, ped_traj_file,
calibrations[scene]["origin"][0],
calibrations[scene]["origin"][1],
calibrations[scene]["origin"][2],
calibrations[scene]["carla_rotate"],
calibrations[scene]["world_rotate"],
calibrations[scene]["scale"],
target_ped_file,
vehicle_traj_file,
target_vehicle_file))
| [
"os.path.exists",
"commands.getoutput",
"os.makedirs",
"argparse.ArgumentParser",
"tqdm.tqdm",
"os.path.join",
"os.path.realpath",
"os.path.basename"
] | [((253, 278), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (276, 278), False, 'import argparse\n'), ((1908, 1935), 'os.path.exists', 'os.path.exists', (['script_path'], {}), '(script_path)\n', (1922, 1935), False, 'import os\n'), ((1973, 1993), 'tqdm.tqdm', 'tqdm', (['ped_traj_files'], {}), '(ped_traj_files)\n', (1977, 1993), False, 'from tqdm import tqdm\n'), ((1451, 1494), 'os.path.join', 'os.path.join', (['args.traj_world_path', '"""*.txt"""'], {}), "(args.traj_world_path, '*.txt')\n", (1463, 1494), False, 'import os\n'), ((1709, 1750), 'os.path.exists', 'os.path.exists', (['args.save_carla_traj_path'], {}), '(args.save_carla_traj_path)\n', (1723, 1750), False, 'import os\n'), ((1756, 1794), 'os.makedirs', 'os.makedirs', (['args.save_carla_traj_path'], {}), '(args.save_carla_traj_path)\n', (1767, 1794), False, 'import os\n'), ((2085, 2145), 'os.path.join', 'os.path.join', (['args.save_carla_traj_path', "('%s.txt' % filename)"], {}), "(args.save_carla_traj_path, '%s.txt' % filename)\n", (2097, 2145), False, 'import os\n'), ((1606, 1650), 'os.path.exists', 'os.path.exists', (['args.save_carla_vehicle_path'], {}), '(args.save_carla_vehicle_path)\n', (1620, 1650), False, 'import os\n'), ((1658, 1699), 'os.makedirs', 'os.makedirs', (['args.save_carla_vehicle_path'], {}), '(args.save_carla_vehicle_path)\n', (1669, 1699), False, 'import os\n'), ((1848, 1874), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1864, 1874), False, 'import os\n'), ((2216, 2545), 'commands.getoutput', 'commands.getoutput', (["('python3 %s %s 0 %f %f %f %f --world_rotate %f --scale %f --save_carla_traj_file %s'\n % (script_path, ped_traj_file, calibration['origin'][0], calibration[\n 'origin'][1], calibration['origin'][2], calibration['carla_rotate'],\n calibration['world_rotate'], calibration['scale'], target_ped_file))"], {}), "(\n 'python3 %s %s 0 %f %f %f %f --world_rotate %f --scale %f --save_carla_traj_file %s'\n % (script_path, ped_traj_file, calibration['origin'][0], calibration[\n 'origin'][1], calibration['origin'][2], calibration['carla_rotate'],\n calibration['world_rotate'], calibration['scale'], target_ped_file))\n", (2234, 2545), False, 'import commands\n'), ((2982, 3045), 'os.path.join', 'os.path.join', (['args.traj_vehicle_world_path', "('%s.txt' % filename)"], {}), "(args.traj_vehicle_world_path, '%s.txt' % filename)\n", (2994, 3045), False, 'import os\n'), ((3113, 3176), 'os.path.join', 'os.path.join', (['args.save_carla_vehicle_path', "('%s.txt' % filename)"], {}), "(args.save_carla_vehicle_path, '%s.txt' % filename)\n", (3125, 3176), False, 'import os\n'), ((3899, 4398), 'commands.getoutput', 'commands.getoutput', (["('python3 %s %s 0 %f %f %f %f --world_rotate %f --scale %f --save_carla_traj_file %s --vehicle_world_traj_file %s --is_actev --save_vehicle_carla_traj_file %s'\n % (script_path, ped_traj_file, calibrations[scene]['origin'][0],\n calibrations[scene]['origin'][1], calibrations[scene]['origin'][2],\n calibrations[scene]['carla_rotate'], calibrations[scene]['world_rotate'\n ], calibrations[scene]['scale'], target_ped_file, vehicle_traj_file,\n target_vehicle_file))"], {}), "(\n 'python3 %s %s 0 %f %f %f %f --world_rotate %f --scale %f --save_carla_traj_file %s --vehicle_world_traj_file %s --is_actev --save_vehicle_carla_traj_file %s'\n % (script_path, ped_traj_file, calibrations[scene]['origin'][0],\n calibrations[scene]['origin'][1], calibrations[scene]['origin'][2],\n calibrations[scene]['carla_rotate'], calibrations[scene]['world_rotate'\n ], calibrations[scene]['scale'], target_ped_file, vehicle_traj_file,\n target_vehicle_file))\n", (3917, 4398), False, 'import commands\n'), ((2027, 2058), 'os.path.basename', 'os.path.basename', (['ped_traj_file'], {}), '(ped_traj_file)\n', (2043, 2058), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.conf.urls import patterns
from lpp_test import views
urlpatterns = patterns(
'lpp_test.views',
# 定义URL
(r'^$', 'hello'),
(r'^test$', 'test'),
(r'^init_business$', 'init_business'),
(r'^init_host$', 'init_host'),
(r'^create_business$', 'create_business'),
(r'^delete_business$', 'delete_business'),
(r'^search_host$', 'search_host'),
(r'^delete_host', 'delete_host')
)
| [
"django.conf.urls.patterns"
] | [((776, 1076), 'django.conf.urls.patterns', 'patterns', (['"""lpp_test.views"""', "('^$', 'hello')", "('^test$', 'test')", "('^init_business$', 'init_business')", "('^init_host$', 'init_host')", "('^create_business$', 'create_business')", "('^delete_business$', 'delete_business')", "('^search_host$', 'search_host')", "('^delete_host', 'delete_host')"], {}), "('lpp_test.views', ('^$', 'hello'), ('^test$', 'test'), (\n '^init_business$', 'init_business'), ('^init_host$', 'init_host'), (\n '^create_business$', 'create_business'), ('^delete_business$',\n 'delete_business'), ('^search_host$', 'search_host'), ('^delete_host',\n 'delete_host'))\n", (784, 1076), False, 'from django.conf.urls import patterns\n')] |
# -*- coding:utf-8 -*-
#!/usr/bin/env python
import os, typing, datetime, glob, warnings, pathlib, pkg_resources
from setuptools import setup as setuptools_setup
from .cmd import bdist_app,cleanup
from ..utility.os import walk_relative_file
from ..utility.pkg import cov_to_program_name, cov_program_name_to_module_name, walk_requirements
def on_version(v:str, p:str = None, b:str = None) -> typing.Iterable[typing.Dict[str,str]]:
'''setuptools版本生成器
'''
v = v if v else '0.0.1'
if p is None:
pass
elif p in ['a', 'alpha']:
v = v + 'a'
elif p in ['b', 'beta']:
v = v + 'b'
elif p in ['c', 'rc']:
v = v + 'c'
if p is None or b is None:
pass
elif b in ['', '_', '-']:
v = v + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
else:
v = v + b
yield dict(version = v)
def on_description(description:str = None) -> typing.Iterable[typing.Dict[str,str]]:
'''setuptools描述生成器
'''
if description:
yield dict(description = description)
if os.path.exists("README.md"):
with open("README.md", "r", encoding = 'utf-8') as f:
yield dict(
long_description_content_type = "text/markdown",
long_description = f.read()
)
def on_requirement(*req_dirs: os.PathLike) -> typing.Iterable[typing.Dict[str,str]]:
'''setuptools依赖生成器
'''
setup_option = {}
all_req_dir = *req_dirs, os.path.join(os.getcwd(), 'req')
for req_dir in all_req_dir:
for name in os.listdir(req_dir):
pkgs = list(walk_requirements(os.path.join(req_dir, name)))
if 'requirements.txt' == name:
# Windows 自动添加pywin32的依赖,用于生成windows服务。
if 'nt' == os.name and not any([ pkg.startswith('pywin32') for pkg in pkgs ]):
try:
dist = pkg_resources.get_distribution('pywin32')
pkgs.append(f'{dist.key} == {dist.version}')
except:
pkgs.append('pywin32')
setup_option.update(install_requires = pkgs)
else:
basename, _ = name.split('-')
setup_option.setdefault("extras_require", {})
setup_option["extras_require"].update({ basename : pkgs })
yield setup_option
def on_data_dirs(**data_dir_info:typing.Tuple) -> typing.Iterable[typing.Dict[str,str]]:
'''setuptools数据文件生成器
'''
data_dir_info.setdefault('bin', ('bin', '*'))
data_file_info = {}
for data_dir_name, (data_dir_root, *data_file_expressions) in data_dir_info.items():
if not data_file_expressions:
warnings.warn(f"Invalid '{data_dir_name}' data directory parameter - Skip it")
continue
for data_relative_file in walk_relative_file(data_dir_root, *data_file_expressions):
data_file_dir_name = os.path.join(data_dir_name, os.path.dirname(data_relative_file)).strip('/')
if data_file_dir_name not in data_file_info: data_file_info.update({ data_file_dir_name : set() })
data_file_info[data_file_dir_name].add(os.path.join(data_dir_root, data_relative_file))
yield dict(data_files = [ (n, list(s)) for n, s in data_file_info.items() ])
def setup(*on_option_generators:typing.Iterable[typing.Dict[str,str]], **setup_option:typing.Any):
'''执行setup设置方法
'''
name = setup_option.get("name")
if not name:
# warnings.warn("Miss 'name' - Abort")
raise Exception("Miss 'name' - Abort")
# Merge option
for on_option_generator in on_option_generators:
for on_option in on_option_generator:
setup_option.update(on_option)
# Update cmdclass
setup_option.setdefault("cmdclass", {})
setup_option['cmdclass'].update(
bdist_app = bdist_app,
cleanup = cleanup
)
# Autogenerate entry_points
if os.path.exists(name):
setup_option.setdefault('entry_points', {})
setup_option['entry_points'].setdefault('console_scripts', [])
for file_path in glob.glob(f'{name}/*/__main__.py'):
name, *module_names, _, = file_path.split(os.sep)
program_name = cov_to_program_name(name, *module_names)
module_name = cov_program_name_to_module_name(program_name)
setup_option['entry_points']['console_scripts'].append(f"{program_name} = {module_name}.__main__:main")
# Autogenerate scripts
# p = pathlib.Path("binexe")
# if p.exists():
# setup_option.setdefault('scripts', [])
# for file_path in p.glob('*'):
# setup_option['scripts'].append(file_path.as_posix())
# Run setup
setuptools_setup(**setup_option) | [
"os.path.exists",
"os.listdir",
"setuptools.setup",
"os.path.join",
"os.getcwd",
"datetime.datetime.now",
"os.path.dirname",
"warnings.warn",
"pkg_resources.get_distribution",
"glob.glob"
] | [((1058, 1085), 'os.path.exists', 'os.path.exists', (['"""README.md"""'], {}), "('README.md')\n", (1072, 1085), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((3942, 3962), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (3956, 3962), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((4725, 4757), 'setuptools.setup', 'setuptools_setup', ([], {}), '(**setup_option)\n', (4741, 4757), True, 'from setuptools import setup as setuptools_setup\n'), ((1550, 1569), 'os.listdir', 'os.listdir', (['req_dir'], {}), '(req_dir)\n', (1560, 1569), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((4112, 4146), 'glob.glob', 'glob.glob', (['f"""{name}/*/__main__.py"""'], {}), "(f'{name}/*/__main__.py')\n", (4121, 4146), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((1478, 1489), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1487, 1489), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((2703, 2781), 'warnings.warn', 'warnings.warn', (['f"""Invalid \'{data_dir_name}\' data directory parameter - Skip it"""'], {}), '(f"Invalid \'{data_dir_name}\' data directory parameter - Skip it")\n', (2716, 2781), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((3168, 3215), 'os.path.join', 'os.path.join', (['data_dir_root', 'data_relative_file'], {}), '(data_dir_root, data_relative_file)\n', (3180, 3215), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((1613, 1640), 'os.path.join', 'os.path.join', (['req_dir', 'name'], {}), '(req_dir, name)\n', (1625, 1640), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((761, 784), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (782, 784), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((1893, 1934), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""pywin32"""'], {}), "('pywin32')\n", (1923, 1934), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n'), ((2958, 2993), 'os.path.dirname', 'os.path.dirname', (['data_relative_file'], {}), '(data_relative_file)\n', (2973, 2993), False, 'import os, typing, datetime, glob, warnings, pathlib, pkg_resources\n')] |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses.py."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import losses
class L2LossTest(parameterized.TestCase):
def setUp(self):
super(L2LossTest, self).setUp()
self.xs = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
self.ys = jnp.array([2., 0.5, 0.125, 0, 0.125, 0.5, 2.])
self.dys = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_l2_loss_scalar(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
x = place_fn(jnp.array(0.5))
# Test output.
np.testing.assert_allclose(l2_loss(x), 0.125)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_l2_loss_vector(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
xs = place_fn(self.xs)
# Test output.
np.testing.assert_allclose(l2_loss(xs), self.ys)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_l2_regularizer(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
xs = place_fn(self.xs)
# Test output.
np.testing.assert_allclose(l2_loss(xs), l2_loss(xs, jnp.zeros_like(xs)))
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_gradients(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
xs = place_fn(self.xs)
# Compute gradient in batch
batch_grad_func = jax.vmap(jax.grad(l2_loss), (0))
actual = batch_grad_func(xs)
np.testing.assert_allclose(actual, self.dys)
class LogLossTest(parameterized.TestCase):
def setUp(self):
super(LogLossTest, self).setUp()
self.preds = jnp.array([1., 1., 0., 0., 0.5, 0.5])
self.targets = jnp.array([1., 0., 0., 1., 1., 0])
self.expected = jnp.array([0., np.inf, 0., np.inf, 0.6931472, 0.6931472])
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_log_loss_scalar(self, compile_fn, place_fn):
# Optionally compile.
log_loss = compile_fn(losses.log_loss)
# Optionally convert to device array.
preds = place_fn(self.preds[2])
targets = place_fn(self.targets[2])
# Test output.
np.testing.assert_allclose(
log_loss(preds, targets), self.expected[2], atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_log_loss_vector(self, compile_fn, place_fn):
# Optionally compile.
log_loss = compile_fn(losses.log_loss)
# Optionally convert to device array.
preds = place_fn(self.preds)
targets = place_fn(self.targets)
# Test output.
np.testing.assert_allclose(
log_loss(preds, targets), self.expected, atol=1e-4)
if __name__ == '__main__':
absltest.main()
| [
"numpy.testing.assert_allclose",
"absl.testing.absltest.main",
"absl.testing.parameterized.named_parameters",
"jax.numpy.array",
"jax.grad",
"jax.numpy.zeros_like"
] | [((1173, 1371), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (1203, 1371), False, 'from absl.testing import parameterized\n'), ((1657, 1855), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (1687, 1855), False, 'from absl.testing import parameterized\n'), ((2138, 2336), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (2168, 2336), False, 'from absl.testing import parameterized\n'), ((2643, 2841), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (2673, 2841), False, 'from absl.testing import parameterized\n'), ((3505, 3703), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (3535, 3703), False, 'from absl.testing import parameterized\n'), ((4080, 4278), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (4110, 4278), False, 'from absl.testing import parameterized\n'), ((4673, 4688), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4686, 4688), False, 'from absl.testing import absltest\n'), ((1013, 1052), 'jax.numpy.array', 'jnp.array', (['[-2, -1, -0.5, 0, 0.5, 1, 2]'], {}), '([-2, -1, -0.5, 0, 0.5, 1, 2])\n', (1022, 1052), True, 'import jax.numpy as jnp\n'), ((1067, 1115), 'jax.numpy.array', 'jnp.array', (['[2.0, 0.5, 0.125, 0, 0.125, 0.5, 2.0]'], {}), '([2.0, 0.5, 0.125, 0, 0.125, 0.5, 2.0])\n', (1076, 1115), True, 'import jax.numpy as jnp\n'), ((1129, 1168), 'jax.numpy.array', 'jnp.array', (['[-2, -1, -0.5, 0, 0.5, 1, 2]'], {}), '([-2, -1, -0.5, 0, 0.5, 1, 2])\n', (1138, 1168), True, 'import jax.numpy as jnp\n'), ((3167, 3211), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'self.dys'], {}), '(actual, self.dys)\n', (3193, 3211), True, 'import numpy as np\n'), ((3331, 3372), 'jax.numpy.array', 'jnp.array', (['[1.0, 1.0, 0.0, 0.0, 0.5, 0.5]'], {}), '([1.0, 1.0, 0.0, 0.0, 0.5, 0.5])\n', (3340, 3372), True, 'import jax.numpy as jnp\n'), ((3388, 3427), 'jax.numpy.array', 'jnp.array', (['[1.0, 0.0, 0.0, 1.0, 1.0, 0]'], {}), '([1.0, 0.0, 0.0, 1.0, 1.0, 0])\n', (3397, 3427), True, 'import jax.numpy as jnp\n'), ((3443, 3502), 'jax.numpy.array', 'jnp.array', (['[0.0, np.inf, 0.0, np.inf, 0.6931472, 0.6931472]'], {}), '([0.0, np.inf, 0.0, np.inf, 0.6931472, 0.6931472])\n', (3452, 3502), True, 'import jax.numpy as jnp\n'), ((1568, 1582), 'jax.numpy.array', 'jnp.array', (['(0.5)'], {}), '(0.5)\n', (1577, 1582), True, 'import jax.numpy as jnp\n'), ((3106, 3123), 'jax.grad', 'jax.grad', (['l2_loss'], {}), '(l2_loss)\n', (3114, 3123), False, 'import jax\n'), ((2618, 2636), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['xs'], {}), '(xs)\n', (2632, 2636), True, 'import jax.numpy as jnp\n')] |
#!/usr/bin/env python
from distutils.core import setup
setup(name='oxford_term_dates',
version='1.3.0',
description='A Python library for translating between real dates and Oxford term dates',
author='IT Services, University of Oxford',
author_email='<EMAIL>',
url='https://github.com/ox-it/oxford-term-dates',
packages=['oxford_term_dates','oxford_term_dates.templatetags'],
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Academic Free License (AFL)',
'Intended Audience :: Education',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Education',
'Topic :: Internet',
],
)
| [
"distutils.core.setup"
] | [((57, 709), 'distutils.core.setup', 'setup', ([], {'name': '"""oxford_term_dates"""', 'version': '"""1.3.0"""', 'description': '"""A Python library for translating between real dates and Oxford term dates"""', 'author': '"""IT Services, University of Oxford"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/ox-it/oxford-term-dates"""', 'packages': "['oxford_term_dates', 'oxford_term_dates.templatetags']", 'classifiers': "['Framework :: Django', 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: Academic Free License (AFL)',\n 'Intended Audience :: Education', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Topic :: Education', 'Topic :: Internet'\n ]"}), "(name='oxford_term_dates', version='1.3.0', description=\n 'A Python library for translating between real dates and Oxford term dates'\n , author='IT Services, University of Oxford', author_email='<EMAIL>',\n url='https://github.com/ox-it/oxford-term-dates', packages=[\n 'oxford_term_dates', 'oxford_term_dates.templatetags'], classifiers=[\n 'Framework :: Django', 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: Academic Free License (AFL)',\n 'Intended Audience :: Education', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Topic :: Education',\n 'Topic :: Internet'])\n", (62, 709), False, 'from distutils.core import setup\n')] |
# Generated by Django 2.1.2 on 2018-10-12 14:18
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django_celery_beat.validators
import timezone_field.fields
class Migration(migrations.Migration):
replaces = [
('django_celery_beat', '0005_add_solarschedule_events_choices'),
('django_celery_beat', '0006_auto_20180210_1226'),
('django_celery_beat', '0006_auto_20180322_0932'),
('django_celery_beat', '0007_auto_20180521_0826'),
('django_celery_beat', '0008_auto_20180914_1922'),
]
dependencies = [
('django_celery_beat', '0004_auto_20170221_0000'),
]
operations = [
migrations.AlterField(
model_name='solarschedule',
name='event',
field=models.CharField(
choices=[('dawn_astronomical', 'dawn_astronomical'),
('dawn_civil', 'dawn_civil'),
('dawn_nautical', 'dawn_nautical'),
('dusk_astronomical', 'dusk_astronomical'),
('dusk_civil', 'dusk_civil'),
('dusk_nautical', 'dusk_nautical'),
('solar_noon', 'solar_noon'), ('sunrise', 'sunrise'),
('sunset', 'sunset')], max_length=24,
verbose_name='event'),
),
migrations.AlterModelOptions(
name='crontabschedule',
options={
'ordering': ['month_of_year', 'day_of_month', 'day_of_week',
'hour', 'minute', 'timezone'],
'verbose_name': 'crontab', 'verbose_name_plural': 'crontabs'},
),
migrations.AlterModelOptions(
name='crontabschedule',
options={
'ordering': ['month_of_year', 'day_of_month', 'day_of_week',
'hour', 'minute', 'timezone'],
'verbose_name': 'crontab', 'verbose_name_plural': 'crontabs'},
),
migrations.AddField(
model_name='crontabschedule',
name='timezone',
field=timezone_field.fields.TimeZoneField(default='UTC'),
),
migrations.AddField(
model_name='periodictask',
name='one_off',
field=models.BooleanField(default=False,
verbose_name='one-off task'),
),
migrations.AddField(
model_name='periodictask',
name='start_time',
field=models.DateTimeField(blank=True, null=True,
verbose_name='start_time'),
),
migrations.AlterField(
model_name='crontabschedule',
name='day_of_month',
field=models.CharField(default='*', max_length=124, validators=[
django_celery_beat.validators.day_of_month_validator],
verbose_name='day of month'),
),
migrations.AlterField(
model_name='crontabschedule',
name='day_of_week',
field=models.CharField(default='*', max_length=64, validators=[
django_celery_beat.validators.day_of_week_validator],
verbose_name='day of week'),
),
migrations.AlterField(
model_name='crontabschedule',
name='hour',
field=models.CharField(default='*', max_length=96, validators=[
django_celery_beat.validators.hour_validator],
verbose_name='hour'),
),
migrations.AlterField(
model_name='crontabschedule',
name='minute',
field=models.CharField(default='*', max_length=240, validators=[
django_celery_beat.validators.minute_validator],
verbose_name='minute'),
),
migrations.AlterField(
model_name='crontabschedule',
name='month_of_year',
field=models.CharField(default='*', max_length=64, validators=[
django_celery_beat.validators.month_of_year_validator],
verbose_name='month of year'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField"
] | [((1396, 1621), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""crontabschedule"""', 'options': "{'ordering': ['month_of_year', 'day_of_month', 'day_of_week', 'hour',\n 'minute', 'timezone'], 'verbose_name': 'crontab', 'verbose_name_plural':\n 'crontabs'}"}), "(name='crontabschedule', options={'ordering': [\n 'month_of_year', 'day_of_month', 'day_of_week', 'hour', 'minute',\n 'timezone'], 'verbose_name': 'crontab', 'verbose_name_plural': 'crontabs'})\n", (1424, 1621), False, 'from django.db import migrations, models\n'), ((1719, 1944), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""crontabschedule"""', 'options': "{'ordering': ['month_of_year', 'day_of_month', 'day_of_week', 'hour',\n 'minute', 'timezone'], 'verbose_name': 'crontab', 'verbose_name_plural':\n 'crontabs'}"}), "(name='crontabschedule', options={'ordering': [\n 'month_of_year', 'day_of_month', 'day_of_week', 'hour', 'minute',\n 'timezone'], 'verbose_name': 'crontab', 'verbose_name_plural': 'crontabs'})\n", (1747, 1944), False, 'from django.db import migrations, models\n'), ((808, 1190), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('dawn_astronomical', 'dawn_astronomical'), ('dawn_civil', 'dawn_civil'),\n ('dawn_nautical', 'dawn_nautical'), ('dusk_astronomical',\n 'dusk_astronomical'), ('dusk_civil', 'dusk_civil'), ('dusk_nautical',\n 'dusk_nautical'), ('solar_noon', 'solar_noon'), ('sunrise', 'sunrise'),\n ('sunset', 'sunset')]", 'max_length': '(24)', 'verbose_name': '"""event"""'}), "(choices=[('dawn_astronomical', 'dawn_astronomical'), (\n 'dawn_civil', 'dawn_civil'), ('dawn_nautical', 'dawn_nautical'), (\n 'dusk_astronomical', 'dusk_astronomical'), ('dusk_civil', 'dusk_civil'),\n ('dusk_nautical', 'dusk_nautical'), ('solar_noon', 'solar_noon'), (\n 'sunrise', 'sunrise'), ('sunset', 'sunset')], max_length=24,\n verbose_name='event')\n", (824, 1190), False, 'from django.db import migrations, models\n'), ((2329, 2392), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""one-off task"""'}), "(default=False, verbose_name='one-off task')\n", (2348, 2392), False, 'from django.db import migrations, models\n'), ((2560, 2630), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""start_time"""'}), "(blank=True, null=True, verbose_name='start_time')\n", (2580, 2630), False, 'from django.db import migrations, models\n'), ((2806, 2957), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""*"""', 'max_length': '(124)', 'validators': '[django_celery_beat.validators.day_of_month_validator]', 'verbose_name': '"""day of month"""'}), "(default='*', max_length=124, validators=[\n django_celery_beat.validators.day_of_month_validator], verbose_name=\n 'day of month')\n", (2822, 2957), False, 'from django.db import migrations, models\n'), ((3116, 3259), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""*"""', 'max_length': '(64)', 'validators': '[django_celery_beat.validators.day_of_week_validator]', 'verbose_name': '"""day of week"""'}), "(default='*', max_length=64, validators=[django_celery_beat\n .validators.day_of_week_validator], verbose_name='day of week')\n", (3132, 3259), False, 'from django.db import migrations, models\n'), ((3416, 3545), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""*"""', 'max_length': '(96)', 'validators': '[django_celery_beat.validators.hour_validator]', 'verbose_name': '"""hour"""'}), "(default='*', max_length=96, validators=[django_celery_beat\n .validators.hour_validator], verbose_name='hour')\n", (3432, 3545), False, 'from django.db import migrations, models\n'), ((3704, 3838), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""*"""', 'max_length': '(240)', 'validators': '[django_celery_beat.validators.minute_validator]', 'verbose_name': '"""minute"""'}), "(default='*', max_length=240, validators=[\n django_celery_beat.validators.minute_validator], verbose_name='minute')\n", (3720, 3838), False, 'from django.db import migrations, models\n'), ((4004, 4151), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""*"""', 'max_length': '(64)', 'validators': '[django_celery_beat.validators.month_of_year_validator]', 'verbose_name': '"""month of year"""'}), "(default='*', max_length=64, validators=[django_celery_beat\n .validators.month_of_year_validator], verbose_name='month of year')\n", (4020, 4151), False, 'from django.db import migrations, models\n')] |
# Columnar Transposition Hack per Cracking Codes with Python
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import pyperclip
from j_detect_english import is_english
from g_decrypt_columnar_transposition_cipher import decrypt_message as decrypt
def hack_transposition(text):
print('Press Ctrl-C to quit at any time.')
print('Hacking...')
for key in range(1, len(text)):
print('Trying key #%s...' % (key))
print()
print('...')
decrypted_text = decrypt(key, text)
print()
print('...')
if is_english(decrypted_text):
print()
print('Possible encryption hack:')
print('Key %s: %s' % (key, decrypted_text[:100]))
print()
print('Enter D if done, anything else to continue the hack:')
response = input('>')
if response.strip().upper().startswith('D'):
return decrypted_text
return None
def main(text):
hacked_text = hack_transposition(text)
if hacked_text == None:
print('Failed to hack the Columnar Transposition Encryption')
else:
print('Copying hacked string to clipboard:')
print(hacked_text)
pyperclip.copy(hacked_text)
if __name__ == '__main__':
text = input('What would you like to decrypt? ')
main(text)
| [
"pyperclip.copy",
"j_detect_english.is_english",
"g_decrypt_columnar_transposition_cipher.decrypt_message"
] | [((482, 500), 'g_decrypt_columnar_transposition_cipher.decrypt_message', 'decrypt', (['key', 'text'], {}), '(key, text)\n', (489, 500), True, 'from g_decrypt_columnar_transposition_cipher import decrypt_message as decrypt\n'), ((538, 564), 'j_detect_english.is_english', 'is_english', (['decrypted_text'], {}), '(decrypted_text)\n', (548, 564), False, 'from j_detect_english import is_english\n'), ((1128, 1155), 'pyperclip.copy', 'pyperclip.copy', (['hacked_text'], {}), '(hacked_text)\n', (1142, 1155), False, 'import pyperclip\n')] |
# -*- coding: utf-8 -*-
"""
test queen
"""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__version__ = '1.0'
__date__ = "15 January 2015"
from nose.tools import assert_equal
import dis
from progressmonitor.formatter import (progressbar_formatter_factory,
nb_iterations_formatter_factory,
elapsed_time_formatter_factory,
remaining_time_formatter_factory)
from progressmonitor.rule import (periodic_rule_factory,
span_rule_factory, rate_rule_factory)
from progressmonitor.util import call_with
def test_fb_rate2span():
kwargs = {"rate": 0.1, "span":10}
r1 = call_with(rate_rule_factory, kwargs)
r2 = call_with(span_rule_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
def test_fb_span2period():
kwargs = {"period":1}
r1 = call_with(span_rule_factory, kwargs)
r2 = call_with(periodic_rule_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
def test_fb_pb2nbiter():
kwargs = {}
r1 = call_with(progressbar_formatter_factory, kwargs)
r2 = call_with(nb_iterations_formatter_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
def test_fb_remaining2elapsed():
kwargs = {}
r1 = call_with(remaining_time_formatter_factory, kwargs)
r2 = call_with(elapsed_time_formatter_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
| [
"nose.tools.assert_equal",
"progressmonitor.util.call_with"
] | [((751, 787), 'progressmonitor.util.call_with', 'call_with', (['rate_rule_factory', 'kwargs'], {}), '(rate_rule_factory, kwargs)\n', (760, 787), False, 'from progressmonitor.util import call_with\n'), ((797, 833), 'progressmonitor.util.call_with', 'call_with', (['span_rule_factory', 'kwargs'], {}), '(span_rule_factory, kwargs)\n', (806, 833), False, 'from progressmonitor.util import call_with\n'), ((838, 876), 'nose.tools.assert_equal', 'assert_equal', (['r1.__name__', 'r2.__name__'], {}), '(r1.__name__, r2.__name__)\n', (850, 876), False, 'from nose.tools import assert_equal\n'), ((940, 976), 'progressmonitor.util.call_with', 'call_with', (['span_rule_factory', 'kwargs'], {}), '(span_rule_factory, kwargs)\n', (949, 976), False, 'from progressmonitor.util import call_with\n'), ((986, 1026), 'progressmonitor.util.call_with', 'call_with', (['periodic_rule_factory', 'kwargs'], {}), '(periodic_rule_factory, kwargs)\n', (995, 1026), False, 'from progressmonitor.util import call_with\n'), ((1031, 1069), 'nose.tools.assert_equal', 'assert_equal', (['r1.__name__', 'r2.__name__'], {}), '(r1.__name__, r2.__name__)\n', (1043, 1069), False, 'from nose.tools import assert_equal\n'), ((1122, 1170), 'progressmonitor.util.call_with', 'call_with', (['progressbar_formatter_factory', 'kwargs'], {}), '(progressbar_formatter_factory, kwargs)\n', (1131, 1170), False, 'from progressmonitor.util import call_with\n'), ((1180, 1230), 'progressmonitor.util.call_with', 'call_with', (['nb_iterations_formatter_factory', 'kwargs'], {}), '(nb_iterations_formatter_factory, kwargs)\n', (1189, 1230), False, 'from progressmonitor.util import call_with\n'), ((1235, 1273), 'nose.tools.assert_equal', 'assert_equal', (['r1.__name__', 'r2.__name__'], {}), '(r1.__name__, r2.__name__)\n', (1247, 1273), False, 'from nose.tools import assert_equal\n'), ((1333, 1384), 'progressmonitor.util.call_with', 'call_with', (['remaining_time_formatter_factory', 'kwargs'], {}), '(remaining_time_formatter_factory, kwargs)\n', (1342, 1384), False, 'from progressmonitor.util import call_with\n'), ((1394, 1443), 'progressmonitor.util.call_with', 'call_with', (['elapsed_time_formatter_factory', 'kwargs'], {}), '(elapsed_time_formatter_factory, kwargs)\n', (1403, 1443), False, 'from progressmonitor.util import call_with\n'), ((1448, 1486), 'nose.tools.assert_equal', 'assert_equal', (['r1.__name__', 'r2.__name__'], {}), '(r1.__name__, r2.__name__)\n', (1460, 1486), False, 'from nose.tools import assert_equal\n')] |
import socket
import time
import random
import logging
from _thread import start_new_thread
from threading import Lock
import utils
class Channel:
MAX_CONNECTION = 100
BUFFER_SIZE = 65536
CHANNEL_PORT = 10000
CLIENT_PORTS = {
0: 10001,
1: 10002,
2: 10003
}
SERVER_PORTS = {
# Client listener port, raft vote listener port, raft operation listener port.
0: (11001, 12001, 13001),
1: (11002, 12002, 13002),
2: (11003, 12003, 13003),
}
@classmethod
def network_delay(cls):
# Network delay are applied when transmitting a message in the channel.
delay = random.uniform(1.0, 5.0)
time.sleep(delay)
def __init__(self):
# Set up the network configuration and its lock.
self.is_gate_open = [True, True, True]
self.lock = Lock()
# Set up the ports.
self.port = Channel.CHANNEL_PORT
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((socket.gethostname(), self.port))
# Set up loggers.
log_file = f'channel.log'
# if os.path.exists(log_file):
# os.remove(log_file)
self.logger = logging.getLogger('Channel')
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(message)s', "%H:%M:%S")
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self.logger.setLevel(logging.INFO)
self.logger.info("==============================================STARTING==============================================")
def threaded_on_receive(self, connection):
# Relay the message from the sender to the receiver.
header, sender, receiver, message = utils.receive_message(connection)
# print(header, sender, receiver, message)
# Based on the header and network configuration, decides whether to relay the message.
if header in ('Client-Request', 'Client-Response'): # Always relay messages between a client and a server.
relay = True
else: # Don't relay messages that involve an isolated server.
self.lock.acquire()
if self.is_gate_open[sender] and self.is_gate_open[receiver]:
relay = True
else:
relay = False
self.lock.release()
if relay:
Channel.network_delay()
if header == 'Client-Response': # Receiver is a client.
receiver_port = Channel.CLIENT_PORTS[receiver]
elif header in ('Client-Request', 'Client-Relay'): # Receiver is the server's client listener port.
receiver_port = Channel.SERVER_PORTS[receiver][0]
elif header in ('Vote-Request', 'Vote-Response'): # Receiver is the server's vote listener port.
receiver_port = Channel.SERVER_PORTS[receiver][1]
else: # Receiver is the server's operation listener port.
receiver_port = Channel.SERVER_PORTS[receiver][2]
try:
log_msg = f'{header} {sender} {receiver} {message} {receiver_port}'
self.logger.info(log_msg)
utils.send_message((header, sender, receiver, message), receiver_port)
except Exception as e:
self.logger.info(e)
def start_message_listener(self):
# Start the message listener for all incoming messages.
self.socket.listen(Channel.MAX_CONNECTION)
while True:
connection, (ip, port) = self.socket.accept()
start_new_thread(self.threaded_on_receive, (connection,))
def get_partition_config(self):
cur = [True, True, True]
config = input('\nHow do you partition? (use format: a;b-c, a and b in the same partition): ')
partitions = config.split("-")
# check if the input is valid:
self.lock.acquire()
seen = set()
for partition in partitions:
for node in partition.split(";"):
if node in ['0', '1', '2']:
seen.add(int(node))
else:
print('Config format is wrong')
self.lock.release()
return cur
if len(seen) < 3:
print("Config format in wrong")
self.lock.release()
return cur
# format is valid, check partition
if len(partitions) == 3:
# all are isolated
cur = [False, False, False]
elif len(partitions) == 2:
# one isolated
if len(partitions[0]) == 1:
cur[int(partitions[0])] = False
else:
cur[int(partitions[1])] = False
self.lock.release()
return cur
def configuration_change_handler(self):
# Get input from the user to change the network configuration for network partition.
while True:
self.is_gate_open = self.get_partition_config()
print(f"Configuration has changed to: {self.is_gate_open}")
def start(self):
# Start the listener for messages and user input handler.
start_new_thread(self.start_message_listener, ())
start_new_thread(self.configuration_change_handler, ())
# generate first blockchain
utils.read_first_blockchain()
while 1:
pass
if __name__ == '__main__':
channel = Channel()
channel.start()
| [
"logging.getLogger",
"utils.send_message",
"random.uniform",
"socket.socket",
"threading.Lock",
"logging.Formatter",
"time.sleep",
"utils.read_first_blockchain",
"logging.FileHandler",
"utils.receive_message",
"socket.gethostname",
"_thread.start_new_thread"
] | [((662, 686), 'random.uniform', 'random.uniform', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (676, 686), False, 'import random\n'), ((695, 712), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (705, 712), False, 'import time\n'), ((862, 868), 'threading.Lock', 'Lock', ([], {}), '()\n', (866, 868), False, 'from threading import Lock\n'), ((961, 1010), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (974, 1010), False, 'import socket\n'), ((1227, 1255), 'logging.getLogger', 'logging.getLogger', (['"""Channel"""'], {}), "('Channel')\n", (1244, 1255), False, 'import logging\n'), ((1279, 1308), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1298, 1308), False, 'import logging\n'), ((1329, 1385), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(message)s"""', '"""%H:%M:%S"""'], {}), "('%(asctime)s %(message)s', '%H:%M:%S')\n", (1346, 1385), False, 'import logging\n'), ((1802, 1835), 'utils.receive_message', 'utils.receive_message', (['connection'], {}), '(connection)\n', (1823, 1835), False, 'import utils\n'), ((5232, 5281), '_thread.start_new_thread', 'start_new_thread', (['self.start_message_listener', '()'], {}), '(self.start_message_listener, ())\n', (5248, 5281), False, 'from _thread import start_new_thread\n'), ((5290, 5345), '_thread.start_new_thread', 'start_new_thread', (['self.configuration_change_handler', '()'], {}), '(self.configuration_change_handler, ())\n', (5306, 5345), False, 'from _thread import start_new_thread\n'), ((5391, 5420), 'utils.read_first_blockchain', 'utils.read_first_blockchain', ([], {}), '()\n', (5418, 5420), False, 'import utils\n'), ((3637, 3694), '_thread.start_new_thread', 'start_new_thread', (['self.threaded_on_receive', '(connection,)'], {}), '(self.threaded_on_receive, (connection,))\n', (3653, 3694), False, 'from _thread import start_new_thread\n'), ((1037, 1057), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1055, 1057), False, 'import socket\n'), ((3250, 3320), 'utils.send_message', 'utils.send_message', (['(header, sender, receiver, message)', 'receiver_port'], {}), '((header, sender, receiver, message), receiver_port)\n', (3268, 3320), False, 'import utils\n')] |
#!~/envs/udacity-python-env
import turtle
def draw_flower(some_turtle):
for i in range(1, 3):
some_turtle.forward(100)
some_turtle.right(60)
some_turtle.forward(100)
some_turtle.right(120)
def draw_art():
window = turtle.Screen()
window.bgcolor("grey")
# Create the turtle Brad - Draws a square
brad = turtle.Turtle()
brad.shape("turtle")
brad.speed(20)
brad.color("yellow")
# Put draw of square in loop to draw a flower
for i in range(0, 36):
draw_flower(brad)
brad.right(10)
brad.setheading(270)
brad.forward(400)
window.exitonclick()
draw_art()
| [
"turtle.Screen",
"turtle.Turtle"
] | [((259, 274), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (272, 274), False, 'import turtle\n'), ((360, 375), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (373, 375), False, 'import turtle\n')] |
from tornado import httpserver
from tornado import gen
from tornado.ioloop import IOLoop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello, world')
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/?", MainHandler)
]
tornado.web.Application.__init__(self, handlers)
def main():
app = Application()
app.listen(80)
IOLoop.instance().start()
if __name__ == '__main__':
main() | [
"tornado.ioloop.IOLoop.instance"
] | [((460, 477), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (475, 477), False, 'from tornado.ioloop import IOLoop\n')] |
#!/usr/bin/env python3
"""
Evolve network architecture on a classification dataset, while at the same time training the weights
with one of several learning algorithms.
"""
import joblib
import time
import torch.utils.data
import logging
import numpy as np
import copy
import os
import pickle
from networks import WeightLearningNetwork
from evolution import rank_by_dominance, reproduce_tournament
from datasets import load_preprocessed_dataset
from learning import train, test, train_and_evaluate, get_performance_value
import utils
# Set up parameters and output dir.
params = utils.load_params(mode='wlnn') # based on terminal input
params['script'] = 'run-wlnn-mnist.py'
writer, out_dir = utils.init_output(params, overwrite=params['overwrite_output'])
os.makedirs(os.path.join(out_dir, 'networks')) # dir to store all networks
if params['use_cuda'] and not torch.cuda.is_available():
logging.info('use_cuda was set but cuda is not available, running on cpu')
params['use_cuda'] = False
device = 'cuda' if params['use_cuda'] else 'cpu'
# Ensure deterministic computation.
utils.seed_all(0)
### Ensure that runs are reproducible even on GPU. Note, this slows down training!
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Load dataset.
train_images, train_labels, test_images, test_labels = load_preprocessed_dataset(
params['dataset'], flatten_images=True, use_torch=True)
train_dataset = torch.utils.data.TensorDataset(train_images, train_labels)
test_dataset = torch.utils.data.TensorDataset(test_images, test_labels)
# Create initial population.
# TODO: Make train_only_outputs a learning_rule.
train_only_outputs = (params['train_only_outputs'] or params['learning_rule'] == 'hebbian')
use_random_feedback = (params['learning_rule'] == 'feedback_alignment')
population = [
WeightLearningNetwork(params['num_inputs'], params['num_outputs'],
params['p_initial_connection_enabled'],
p_add_connection=params['p_add_connection'],
p_add_node=params['p_add_node'],
inherit_weights=params['inherit_weights'],
train_only_outputs=train_only_outputs,
use_random_feedback=use_random_feedback,
add_only_hidden_connections=True)
for _ in range(params['population_size'])]
# Add some nodes manually at the beginning.
for net in population:
for _ in range(net.get_num_connections()):
if np.random.rand() < 0.5:
net.add_node()
# Evaluate the networks before doing any evolution or learning.
for net in population:
net.create_torch_layers(device=device)
with joblib.Parallel(n_jobs=params['num_workers']) as parallel:
# Select champion based on training set for consistency with evolution loop.
objectives = parallel(joblib.delayed(test)(net, \
train_dataset, params, device=device) for net in population)
objectives = np.array(objectives)
rewards = -objectives[:, 0]
accs = objectives[:, 1]
best_index = rewards.argmax()
champion = {'net': copy.deepcopy(population[best_index]),
'reward': rewards[best_index],
'acc': accs[best_index],
'connections': population[best_index].get_num_connections()}
logging.info(f'Pre-evolution and training champion net on test set: '
f'reward: {champion["reward"]:.3f} '
f'(acc: {champion["acc"]:.3f})')
for net in population:
net.delete_torch_layers()
# Store the current champion network.
champion['net'].delete_torch_layers()
champion['net'].save(os.path.join(out_dir, 'champion_network.json'))
# Evolution loop.
generation = -1 # necessary for logging info when there are 0 generations
with joblib.Parallel(n_jobs=params['num_workers']) as parallel:
for generation in range(params['num_generations']):
start_time_generation = time.time()
# Evaluate fitness of all networks.
start_time_evaluation = time.time()
objectives = parallel(joblib.delayed(train_and_evaluate)(
net, train_dataset, test_dataset, params, verbose=0, save_net=(generation % 100 == 0),
filename=os.path.join(out_dir, 'networks', f'generation{generation}-net{i}.json'))
for i, net in enumerate(population))
objectives = np.array(objectives) # shape: population_size, 2
rewards = objectives[:, 0]
accs = objectives[:, 1]
complexities = np.array([net.get_num_connections() for net in population])
complexities = np.maximum(complexities, 1) # prevent 0 division
time_evaluation = time.time() - start_time_evaluation
# Pick best net from this generation (based on reward) and check
# if it's better than the previously observed best net (= champion).
start_time_champion_evaluation = time.time()
best_index = rewards.argmax()
if rewards[best_index] > champion['reward']:
# In contrast to run-wann-mnist.py, we don't have to check on the
# entire training set because the network was already evaluated on
# the complete set.
# TODO: Maybe train champion net on more epochs already here (it's
# done below right now) and compare against results of previous
# champion net. This would take quite a bit of time though because
# I probably need to do it at almost every generation.
champion = {'net': copy.deepcopy(population[best_index]),
'reward': rewards[best_index],
'acc': accs[best_index],
'connections': population[best_index].get_num_connections()}
# Save new champion net to file. Note that this net doesn't have weight_matrices when
# using multiple workers (weight_matrices is only created within the worker process).
champion['net'].delete_torch_layers()
champion['net'].save(os.path.join(out_dir, 'champion_network.json'))
time_champion_evaluation = time.time() - start_time_champion_evaluation
# Write metrics to log and tensorboard.
logging.info(f'{generation} - Best net: reward: {rewards[best_index]:.3f} '
f'(acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, '
f'champion evaluation: {time_champion_evaluation:.1f} s')
writer.add_scalar('best/reward', rewards[best_index], generation)
writer.add_scalar('best/acc', accs[best_index], generation)
if generation % 20 == 0:
if 'long_training_reward' not in champion:
# Train champion net for more epochs.
# TODO: Do this more elegantly. Maybe make an additional
# parameter num_epochs_long.
long_params = params.copy()
long_params['num_epochs'] = 10
champion['net'].create_torch_layers(device)
loss, acc = train(champion['net'], train_dataset, long_params, device=device)
champion['long_training_reward'] = - get_performance_value(loss, period='last_epoch')
champion['long_training_acc'] = get_performance_value(acc, period='last_epoch')
# Evaluate this long trained net on test set.
loss, acc = test(champion['net'], test_dataset, params, device=device)
champion['test_reward'] = -loss
champion['test_acc'] = acc
# Manually delete weight matrices, so they don't block memory
# (important on cuda).
champion['net'].delete_torch_layers()
utils.log_champion_info(champion)
utils.write_champion_info(writer, champion, generation)
utils.write_networks_stats(writer, population, generation)
utils.log_network_stats(population, writer, generation)
logging.info('')
# TODO: Is this necessary?
#writer.add_histogram('final_acc', accs, generation)
writer.add_histogram('population/acc', accs, generation)
writer.add_histogram('population/connections', [net.get_num_connections() for net
in population], generation)
# Store all accuracies and connections (for learning rate plots).
for i, (net, acc) in enumerate(zip(population, accs)):
writer.add_scalar(f'population/net{i}_acc', acc, generation)
writer.add_scalar(f'population/net{i}_connections', net.get_num_connections(), generation)
# Rank networks based on the evaluation metrics.
start_time_ranking = time.time()
# TODO: This is a dirty hack, I am using rewards for both mean_rewards
# and max_rewards for now. Think about how to make this better. Also,
# should maybe adapt parameters of how often complexity is used vs.
# reward.
ranks = rank_by_dominance(rewards, rewards, complexities,
p_complexity_objective=params['p_complexity_objective'])
time_ranking = time.time() - start_time_ranking
# Make new population by picking parent networks via tournament
# selection and mutating them.
start_time_reproduction = time.time()
new_population = reproduce_tournament(population, ranks, params['tournament_size'],
cull_ratio=params['cull_ratio'],
elite_ratio=params['elite_ratio'],
num_mutations=params['num_mutations_per_generation'])
population = new_population
time_reproduction = time.time() - start_time_reproduction
time_generation = time.time() - start_time_generation
writer.add_scalar('times/complete_generation', time_generation, generation)
writer.add_scalar('times/evaluation', time_evaluation, generation)
writer.add_scalar('times/champion_evaluation', time_champion_evaluation, generation)
writer.add_scalar('times/ranking', time_ranking, generation)
writer.add_scalar('times/reproduction', time_reproduction, generation)
# Log final results and close writer.
logging.info('\nResults at the end of evolution:')
utils.log_champion_info(champion)
utils.write_networks_stats(writer, population, generation)
utils.log_network_stats(population, writer, generation)
writer.close()
# Store performance summary.
utils.store_performance(objectives, out_dir=params['out_dir'])
| [
"numpy.random.rand",
"numpy.array",
"copy.deepcopy",
"utils.init_output",
"utils.store_performance",
"logging.info",
"utils.write_champion_info",
"utils.log_champion_info",
"utils.load_params",
"numpy.maximum",
"learning.get_performance_value",
"learning.train",
"networks.WeightLearningNetwo... | [((582, 612), 'utils.load_params', 'utils.load_params', ([], {'mode': '"""wlnn"""'}), "(mode='wlnn')\n", (599, 612), False, 'import utils\n'), ((697, 760), 'utils.init_output', 'utils.init_output', (['params'], {'overwrite': "params['overwrite_output']"}), "(params, overwrite=params['overwrite_output'])\n", (714, 760), False, 'import utils\n'), ((1092, 1109), 'utils.seed_all', 'utils.seed_all', (['(0)'], {}), '(0)\n', (1106, 1109), False, 'import utils\n'), ((1348, 1434), 'datasets.load_preprocessed_dataset', 'load_preprocessed_dataset', (["params['dataset']"], {'flatten_images': '(True)', 'use_torch': '(True)'}), "(params['dataset'], flatten_images=True, use_torch\n =True)\n", (1373, 1434), False, 'from datasets import load_preprocessed_dataset\n'), ((3350, 3491), 'logging.info', 'logging.info', (['f"""Pre-evolution and training champion net on test set: reward: {champion[\'reward\']:.3f} (acc: {champion[\'acc\']:.3f})"""'], {}), '(\n f"Pre-evolution and training champion net on test set: reward: {champion[\'reward\']:.3f} (acc: {champion[\'acc\']:.3f})"\n )\n', (3362, 3491), False, 'import logging\n'), ((10369, 10422), 'logging.info', 'logging.info', (['"""\nResults at the end of evolution:"""'], {}), '("""\nResults at the end of evolution:""")\n', (10381, 10422), False, 'import logging\n'), ((10420, 10453), 'utils.log_champion_info', 'utils.log_champion_info', (['champion'], {}), '(champion)\n', (10443, 10453), False, 'import utils\n'), ((10454, 10512), 'utils.write_networks_stats', 'utils.write_networks_stats', (['writer', 'population', 'generation'], {}), '(writer, population, generation)\n', (10480, 10512), False, 'import utils\n'), ((10513, 10568), 'utils.log_network_stats', 'utils.log_network_stats', (['population', 'writer', 'generation'], {}), '(population, writer, generation)\n', (10536, 10568), False, 'import utils\n'), ((10614, 10676), 'utils.store_performance', 'utils.store_performance', (['objectives'], {'out_dir': "params['out_dir']"}), "(objectives, out_dir=params['out_dir'])\n", (10637, 10676), False, 'import utils\n'), ((773, 806), 'os.path.join', 'os.path.join', (['out_dir', '"""networks"""'], {}), "(out_dir, 'networks')\n", (785, 806), False, 'import os\n'), ((899, 973), 'logging.info', 'logging.info', (['"""use_cuda was set but cuda is not available, running on cpu"""'], {}), "('use_cuda was set but cuda is not available, running on cpu')\n", (911, 973), False, 'import logging\n'), ((1844, 2204), 'networks.WeightLearningNetwork', 'WeightLearningNetwork', (["params['num_inputs']", "params['num_outputs']", "params['p_initial_connection_enabled']"], {'p_add_connection': "params['p_add_connection']", 'p_add_node': "params['p_add_node']", 'inherit_weights': "params['inherit_weights']", 'train_only_outputs': 'train_only_outputs', 'use_random_feedback': 'use_random_feedback', 'add_only_hidden_connections': '(True)'}), "(params['num_inputs'], params['num_outputs'], params[\n 'p_initial_connection_enabled'], p_add_connection=params[\n 'p_add_connection'], p_add_node=params['p_add_node'], inherit_weights=\n params['inherit_weights'], train_only_outputs=train_only_outputs,\n use_random_feedback=use_random_feedback, add_only_hidden_connections=True)\n", (1865, 2204), False, 'from networks import WeightLearningNetwork\n'), ((2728, 2773), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': "params['num_workers']"}), "(n_jobs=params['num_workers'])\n", (2743, 2773), False, 'import joblib\n'), ((3008, 3028), 'numpy.array', 'np.array', (['objectives'], {}), '(objectives)\n', (3016, 3028), True, 'import numpy as np\n'), ((3667, 3713), 'os.path.join', 'os.path.join', (['out_dir', '"""champion_network.json"""'], {}), "(out_dir, 'champion_network.json')\n", (3679, 3713), False, 'import os\n'), ((3813, 3858), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': "params['num_workers']"}), "(n_jobs=params['num_workers'])\n", (3828, 3858), False, 'import joblib\n'), ((3146, 3183), 'copy.deepcopy', 'copy.deepcopy', (['population[best_index]'], {}), '(population[best_index])\n', (3159, 3183), False, 'import copy\n'), ((3960, 3971), 'time.time', 'time.time', ([], {}), '()\n', (3969, 3971), False, 'import time\n'), ((4049, 4060), 'time.time', 'time.time', ([], {}), '()\n', (4058, 4060), False, 'import time\n'), ((4409, 4429), 'numpy.array', 'np.array', (['objectives'], {}), '(objectives)\n', (4417, 4429), True, 'import numpy as np\n'), ((4632, 4659), 'numpy.maximum', 'np.maximum', (['complexities', '(1)'], {}), '(complexities, 1)\n', (4642, 4659), True, 'import numpy as np\n'), ((4936, 4947), 'time.time', 'time.time', ([], {}), '()\n', (4945, 4947), False, 'import time\n'), ((6257, 6465), 'logging.info', 'logging.info', (['f"""{generation} - Best net: reward: {rewards[best_index]:.3f} (acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, champion evaluation: {time_champion_evaluation:.1f} s"""'], {}), "(\n f'{generation} - Best net: reward: {rewards[best_index]:.3f} (acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, champion evaluation: {time_champion_evaluation:.1f} s'\n )\n", (6269, 6465), False, 'import logging\n'), ((8776, 8787), 'time.time', 'time.time', ([], {}), '()\n', (8785, 8787), False, 'import time\n'), ((9058, 9169), 'evolution.rank_by_dominance', 'rank_by_dominance', (['rewards', 'rewards', 'complexities'], {'p_complexity_objective': "params['p_complexity_objective']"}), "(rewards, rewards, complexities, p_complexity_objective=\n params['p_complexity_objective'])\n", (9075, 9169), False, 'from evolution import rank_by_dominance, reproduce_tournament\n'), ((9401, 9412), 'time.time', 'time.time', ([], {}), '()\n', (9410, 9412), False, 'import time\n'), ((9438, 9634), 'evolution.reproduce_tournament', 'reproduce_tournament', (['population', 'ranks', "params['tournament_size']"], {'cull_ratio': "params['cull_ratio']", 'elite_ratio': "params['elite_ratio']", 'num_mutations': "params['num_mutations_per_generation']"}), "(population, ranks, params['tournament_size'],\n cull_ratio=params['cull_ratio'], elite_ratio=params['elite_ratio'],\n num_mutations=params['num_mutations_per_generation'])\n", (9458, 9634), False, 'from evolution import rank_by_dominance, reproduce_tournament\n'), ((2541, 2557), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2555, 2557), True, 'import numpy as np\n'), ((4708, 4719), 'time.time', 'time.time', ([], {}), '()\n', (4717, 4719), False, 'import time\n'), ((6155, 6166), 'time.time', 'time.time', ([], {}), '()\n', (6164, 6166), False, 'import time\n'), ((7766, 7799), 'utils.log_champion_info', 'utils.log_champion_info', (['champion'], {}), '(champion)\n', (7789, 7799), False, 'import utils\n'), ((7812, 7867), 'utils.write_champion_info', 'utils.write_champion_info', (['writer', 'champion', 'generation'], {}), '(writer, champion, generation)\n', (7837, 7867), False, 'import utils\n'), ((7880, 7938), 'utils.write_networks_stats', 'utils.write_networks_stats', (['writer', 'population', 'generation'], {}), '(writer, population, generation)\n', (7906, 7938), False, 'import utils\n'), ((7954, 8009), 'utils.log_network_stats', 'utils.log_network_stats', (['population', 'writer', 'generation'], {}), '(population, writer, generation)\n', (7977, 8009), False, 'import utils\n'), ((8022, 8038), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (8034, 8038), False, 'import logging\n'), ((9222, 9233), 'time.time', 'time.time', ([], {}), '()\n', (9231, 9233), False, 'import time\n'), ((9829, 9840), 'time.time', 'time.time', ([], {}), '()\n', (9838, 9840), False, 'import time\n'), ((9894, 9905), 'time.time', 'time.time', ([], {}), '()\n', (9903, 9905), False, 'import time\n'), ((2894, 2914), 'joblib.delayed', 'joblib.delayed', (['test'], {}), '(test)\n', (2908, 2914), False, 'import joblib\n'), ((5565, 5602), 'copy.deepcopy', 'copy.deepcopy', (['population[best_index]'], {}), '(population[best_index])\n', (5578, 5602), False, 'import copy\n'), ((6072, 6118), 'os.path.join', 'os.path.join', (['out_dir', '"""champion_network.json"""'], {}), "(out_dir, 'champion_network.json')\n", (6084, 6118), False, 'import os\n'), ((7074, 7139), 'learning.train', 'train', (["champion['net']", 'train_dataset', 'long_params'], {'device': 'device'}), "(champion['net'], train_dataset, long_params, device=device)\n", (7079, 7139), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((7290, 7337), 'learning.get_performance_value', 'get_performance_value', (['acc'], {'period': '"""last_epoch"""'}), "(acc, period='last_epoch')\n", (7311, 7337), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((7430, 7488), 'learning.test', 'test', (["champion['net']", 'test_dataset', 'params'], {'device': 'device'}), "(champion['net'], test_dataset, params, device=device)\n", (7434, 7488), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((4091, 4125), 'joblib.delayed', 'joblib.delayed', (['train_and_evaluate'], {}), '(train_and_evaluate)\n', (4105, 4125), False, 'import joblib\n'), ((7193, 7241), 'learning.get_performance_value', 'get_performance_value', (['loss'], {'period': '"""last_epoch"""'}), "(loss, period='last_epoch')\n", (7214, 7241), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((4247, 4319), 'os.path.join', 'os.path.join', (['out_dir', '"""networks"""', 'f"""generation{generation}-net{i}.json"""'], {}), "(out_dir, 'networks', f'generation{generation}-net{i}.json')\n", (4259, 4319), False, 'import os\n')] |
import time as t
import hashlib
class Calibrate:
""" Calibration class for CPU mining """
def __init__(self):
pass
def calibrate(self):
""" Calibrates the cpu power """
time_started = t.time()
for x in range(10000000):
hashlib.sha512('hash'.encode())
hashlib.blake2b('hash'.encode())
time_finished = t.time()
time_passed = time_finished - time_started
# hashes = 100000000 / time_passed
return time_passed
def run(self):
""" Runs the calibration """
cali = []
for x in range(5):
cal = self.calibrate()
cali.append(cal)
total = 0
for x in cali:
total = total + x
average = total / len(cali)
print('calibration done!')
print(average)
return average
class Minging:
""" CPU mining algorithm """
def __init__(self):
calibrate = Calibrate()
self.hashes_a_second = calibrate.run()
def calculate_difficulty(self):
""" Calculates block difficulty """
if self.hashes_a_second < 1:
return '000000000000000000'
if self.hashes_a_second > 1 and self.hashes_a_second < 4:
return '000000000000000'
if self.hashes_a_second > 4:
return '00000000000'
def run(self, previous_proof):
difficulty = self.calculate_difficulty()
start = t.time()
proof = 1
while True:
hashd = hashlib.sha256(str(proof**2 -previous_proof**2).encode()).hexdigest()
if hashd[:len(difficulty)] == difficulty:
end_time = t.time()
passed = end_time - start
print(passed)
return proof
else:
proof = proof + 1
# def random_question()
if __name__ == '__main__':
mining = Minging()
print(len('000000000000000000'))
print(mining.run(10)) | [
"time.time"
] | [((196, 204), 'time.time', 't.time', ([], {}), '()\n', (202, 204), True, 'import time as t\n'), ((322, 330), 'time.time', 't.time', ([], {}), '()\n', (328, 330), True, 'import time as t\n'), ((1205, 1213), 'time.time', 't.time', ([], {}), '()\n', (1211, 1213), True, 'import time as t\n'), ((1381, 1389), 'time.time', 't.time', ([], {}), '()\n', (1387, 1389), True, 'import time as t\n')] |
"""
This module use the networkx package to deal with graphs.
"""
from networkx import Graph, shortest_path
from networkx.algorithms.shortest_paths.unweighted import all_pairs_shortest_path
from networkx.algorithms.shortest_paths.unweighted import single_source_shortest_path
import constants as c
def build_networkXGraph_from_spaCy_depGraph(sentence):
"""
Given a spaCy-parsed sentence, return the relative networkXGraph.
"""
g = Graph()
tokens = list(sentence)
g.add_nodes_from(tokens)
_add_edges_from_spaCy_depGraph(g, sentence.root)
return g
def _add_edges_from_spaCy_depGraph(g, node):
for left_child in node.lefts:
g.add_edge(left_child, node)
_add_edges_from_spaCy_depGraph(g, left_child)
for right_child in node.rights:
g.add_edge(node, right_child)
_add_edges_from_spaCy_depGraph(g, right_child)
def find_shortest_paths(sentence):
g = build_networkXGraph_from_spaCy_depGraph(sentence)
all_shortest_paths = all_pairs_shortest_path(g)
return all_shortest_paths
def find_shortest_paths_from_source(sentence, start_token):
g = build_networkXGraph_from_spaCy_depGraph(sentence)
shortest_paths_from_source = single_source_shortest_path(g, start_token)
return shortest_paths_from_source
def filter_paths(paths_dict):
"""
Filter paths in the form provided by NetworkX (i.e.: {start_node_id: {end_node_id: [path]}})
:return: "happy" paths, i.e. paths which satisfy some requirements
"""
happy_paths = []
for start_node, end_nodes_dict in paths_dict.items():
for end_node, path in end_nodes_dict.items():
cur_path = paths_dict[start_node][end_node]
if satisfyRequirements(cur_path):
happy_paths.append(cur_path)
return happy_paths
def satisfyRequirements(path):
""" Method for check if the path between spaCy tokens is appropriate to be retrieved"""
if not hasVerb(path):
return False
if not hasConceptsAtTheEnds(path):
return False
if not isConceptDefinition(path):
return False
return True
def hasVerb(path):
return sum(1 for t in path if t.pos_ == c.VERB_POSTAG and t.dep_ != "auxpass" and t.dep_ != "aux") <= 1
def hasConceptsAtTheEnds(path):
return (path[0].ent_id_ != c.NULL_BABELNET_ID
or path[0].pos_ == c.PRON_POSTAG
or path[0].pos_ == c.PROPN_POSTAG) and \
path[-1].ent_id_ != c.NULL_BABELNET_ID
def isConceptDefinition(path):
return (path[0].dep_ in c.SUBJ_DEPTAGS) and \
(path[-1].dep_ in c.OBJ_DEPTAGS)
def extract_triple(path):
return (path[0], path[1:-1], path[-1])
| [
"networkx.algorithms.shortest_paths.unweighted.single_source_shortest_path",
"networkx.Graph",
"networkx.algorithms.shortest_paths.unweighted.all_pairs_shortest_path"
] | [((444, 451), 'networkx.Graph', 'Graph', ([], {}), '()\n', (449, 451), False, 'from networkx import Graph, shortest_path\n'), ((996, 1022), 'networkx.algorithms.shortest_paths.unweighted.all_pairs_shortest_path', 'all_pairs_shortest_path', (['g'], {}), '(g)\n', (1019, 1022), False, 'from networkx.algorithms.shortest_paths.unweighted import all_pairs_shortest_path\n'), ((1206, 1249), 'networkx.algorithms.shortest_paths.unweighted.single_source_shortest_path', 'single_source_shortest_path', (['g', 'start_token'], {}), '(g, start_token)\n', (1233, 1249), False, 'from networkx.algorithms.shortest_paths.unweighted import single_source_shortest_path\n')] |
""" import peewee, datetime, argparse
from database.database import db
from database.models import SQLAuthToken
from utils.security.auth import gen_account_keypair """
from utils.security.auth import gen_account_keypair
from database.database import PGDatabase
if __name__ == "__main__":
"""
parser = argparse.ArgumentParser(prog="launch", usage='%(prog)s [options] path', description="configure api launching")
parser.add_argument('--token-name', metavar="token_name", type=str, help="")
args = parser.parse_args()
datenow = datetime.datetime.utcnow()
rsa = gen_account_keypair()
try:
db.initialize(peewee.PostgresqlDatabase("quarkey", host="localhost", port=5432, user="postgres", password="<PASSWORD>", autocommit=True, autorollback=True))
db.connect()
except Exception as e:
print("Failed to connect to database")
exit(0)
q1 = SQLAuthToken.create(token_type=args.token_name, public_key=rsa[0], private_key=rsa[1], updated_at=datenow, created_at=datenow)
"""
db = PGDatabase()
conn = db.connect()
rsa = gen_account_keypair()
q1 = None
with conn.cursor() as cur:
q1 = cur.execute("INSERT INTO auth_token_rsa (type, public_key, private_key) VALUES (%s, %s, %s) RETURNING id", ("account_authentication", rsa[0], rsa[1]))
conn.commit()
print(q1) | [
"utils.security.auth.gen_account_keypair",
"database.database.PGDatabase"
] | [((1049, 1061), 'database.database.PGDatabase', 'PGDatabase', ([], {}), '()\n', (1059, 1061), False, 'from database.database import PGDatabase\n'), ((1097, 1118), 'utils.security.auth.gen_account_keypair', 'gen_account_keypair', ([], {}), '()\n', (1116, 1118), False, 'from utils.security.auth import gen_account_keypair\n')] |
import os
import sys
import numpy as np
import time
from utils_io import get_job_config
from utils.model_utils import read_data
from utils.args import parse_job_args
from fedsem import Fedsem_Trainer
from fedavg import Fedavg_Trainer
from fedprox import Fedprox_Trainer
from fedsgd import Fedsgd_Trainer
from fedbayes import Fedbayes_Sing_Trainer
from modelsaver import Model_Saver
def read_yamlconfig(args):
yaml_file = os.path.join("..", "configs", args.experiment, args.configuration)
job = get_job_config(yaml_file)
params = job['PARAMS']
rounds = params['num-rounds']
print("config rounds: ", rounds)
lr = params['lr']
print("config lr: ", lr )
epochs = params['epochs']
print("config epochs: ", epochs)
clients_per_round = params['clients-per-round']
print("config clients per round: ", clients_per_round)
return params
def main():
args = parse_job_args()
config = read_yamlconfig(args)
# changed 29/08/2021 the follow lines are for google cloud dir
base_dir = os.path.join(os.path.expanduser('~'), 'leaf')
train_data_dir = os.path.join(base_dir, 'data', args.dataset, 'data', 'train')
test_data_dir = os.path.join(base_dir, 'data', args.dataset, 'data', 'test')
users, groups, train_data, test_data = read_data(train_data_dir, test_data_dir)
exp_seeds, book_keep = config["exp-seeds"], [0.] * len(config["exp-seeds"])
for j, rnd_sed in enumerate(exp_seeds):
config["seed"] = rnd_sed
if args.experiment == 'fedavg':
trainer = Fedavg_Trainer(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedprox':
trainer = Fedprox_Trainer(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedcluster':
pass
elif args.experiment == 'fedsgd':
trainer = Fedsgd_Trainer(users, groups, train_data, test_data)
metric =trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedbayes':
trainer = Fedbayes_Sing_Trainer(users, groups, train_data, test_data)
metric =trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'modelsaver':
trainer = Model_Saver(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedsem':
trainer = Fedsem_Trainer(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
else:
print("Applications not defined. Please check configs directory if the name is right.")
break
book_keep[j] = metric
finals = np.array(book_keep) * 100
print(finals)
# print("{} runs - std: {}, med: {}".format(len(exp_seeds),
# np.var(finals),
# np.median(finals)))
main()
| [
"fedbayes.Fedbayes_Sing_Trainer",
"fedprox.Fedprox_Trainer",
"os.path.join",
"utils.args.parse_job_args",
"fedsgd.Fedsgd_Trainer",
"modelsaver.Model_Saver",
"utils.model_utils.read_data",
"numpy.array",
"fedsem.Fedsem_Trainer",
"utils_io.get_job_config",
"fedavg.Fedavg_Trainer",
"os.path.expan... | [((430, 496), 'os.path.join', 'os.path.join', (['""".."""', '"""configs"""', 'args.experiment', 'args.configuration'], {}), "('..', 'configs', args.experiment, args.configuration)\n", (442, 496), False, 'import os\n'), ((507, 532), 'utils_io.get_job_config', 'get_job_config', (['yaml_file'], {}), '(yaml_file)\n', (521, 532), False, 'from utils_io import get_job_config\n'), ((924, 940), 'utils.args.parse_job_args', 'parse_job_args', ([], {}), '()\n', (938, 940), False, 'from utils.args import parse_job_args\n'), ((1131, 1192), 'os.path.join', 'os.path.join', (['base_dir', '"""data"""', 'args.dataset', '"""data"""', '"""train"""'], {}), "(base_dir, 'data', args.dataset, 'data', 'train')\n", (1143, 1192), False, 'import os\n'), ((1213, 1273), 'os.path.join', 'os.path.join', (['base_dir', '"""data"""', 'args.dataset', '"""data"""', '"""test"""'], {}), "(base_dir, 'data', args.dataset, 'data', 'test')\n", (1225, 1273), False, 'import os\n'), ((1320, 1360), 'utils.model_utils.read_data', 'read_data', (['train_data_dir', 'test_data_dir'], {}), '(train_data_dir, test_data_dir)\n', (1329, 1360), False, 'from utils.model_utils import read_data\n'), ((1077, 1100), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1095, 1100), False, 'import os\n'), ((2969, 2988), 'numpy.array', 'np.array', (['book_keep'], {}), '(book_keep)\n', (2977, 2988), True, 'import numpy as np\n'), ((1590, 1642), 'fedavg.Fedavg_Trainer', 'Fedavg_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (1604, 1642), False, 'from fedavg import Fedavg_Trainer\n'), ((1785, 1838), 'fedprox.Fedprox_Trainer', 'Fedprox_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (1800, 1838), False, 'from fedprox import Fedprox_Trainer\n'), ((2043, 2095), 'fedsgd.Fedsgd_Trainer', 'Fedsgd_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2057, 2095), False, 'from fedsgd import Fedsgd_Trainer\n'), ((2250, 2309), 'fedbayes.Fedbayes_Sing_Trainer', 'Fedbayes_Sing_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2271, 2309), False, 'from fedbayes import Fedbayes_Sing_Trainer\n'), ((2454, 2503), 'modelsaver.Model_Saver', 'Model_Saver', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2465, 2503), False, 'from modelsaver import Model_Saver\n'), ((2653, 2705), 'fedsem.Fedsem_Trainer', 'Fedsem_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2667, 2705), False, 'from fedsem import Fedsem_Trainer\n')] |
import argparse
import os
import re
def main(args):
folder_path = os.path.join('..', 'exp', args.dataset)
res = re.compile('Best val accuracy: ([.\d]+)')
best_acc = 0
best_model = None
for folder in os.listdir(folder_path):
train_hist_file = os.path.join(folder_path, folder, 'train_history.txt')
if not os.path.isfile(train_hist_file):
break
with open(train_hist_file, 'r') as f :
lines = f.readlines()
for l in lines:
if 'Best val accuracy:' in l:
m = res.match(l)
acc = float(m.groups()[0])
if best_acc < acc:
best_acc = acc
best_model = folder
print('Best model folder: %s \n Best accuracy: %f\n'%(best_model, best_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cub', type=str)
args = parser.parse_args()
main(args)
| [
"os.listdir",
"argparse.ArgumentParser",
"re.compile",
"os.path.join",
"os.path.isfile"
] | [((71, 110), 'os.path.join', 'os.path.join', (['""".."""', '"""exp"""', 'args.dataset'], {}), "('..', 'exp', args.dataset)\n", (83, 110), False, 'import os\n'), ((121, 163), 're.compile', 're.compile', (['"""Best val accuracy: ([.\\\\d]+)"""'], {}), "('Best val accuracy: ([.\\\\d]+)')\n", (131, 163), False, 'import re\n'), ((221, 244), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (231, 244), False, 'import os\n'), ((841, 866), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (864, 866), False, 'import argparse\n'), ((272, 326), 'os.path.join', 'os.path.join', (['folder_path', 'folder', '"""train_history.txt"""'], {}), "(folder_path, folder, 'train_history.txt')\n", (284, 326), False, 'import os\n'), ((342, 373), 'os.path.isfile', 'os.path.isfile', (['train_hist_file'], {}), '(train_hist_file)\n', (356, 373), False, 'import os\n')] |
import os.path
from setuptools import setup
version = "0.1"
install_requires = ["pyramid", "PyJWT", "cryptography", "requests", "zope.interface"]
tests_require = ["pytest", "pytest-flake8", "requests-mock", "WebTest"]
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.rst")).read()
CHANGES = open(os.path.join(here, "CHANGES.rst")).read()
setup(
name="pyramid_iap",
version=version,
description="Google Cloud Identity-Aware Proxy authentication policy for Pyramid",
long_description=README + "\n\n" + CHANGES,
keywords='Pyramid JWT IAP authentication security',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/lrowe/pyramid_iap",
license="BSD",
packages=["pyramid_iap"],
package_dir={"": "src"},
include_package_data=True,
zip_safe=True,
install_requires=install_requires,
tests_require=tests_require,
extras_require={"tests": tests_require},
setup_requires=["pytest-runner"],
)
| [
"setuptools.setup"
] | [((385, 1414), 'setuptools.setup', 'setup', ([], {'name': '"""pyramid_iap"""', 'version': 'version', 'description': '"""Google Cloud Identity-Aware Proxy authentication policy for Pyramid"""', 'long_description': "(README + '\\n\\n' + CHANGES)", 'keywords': '"""Pyramid JWT IAP authentication security"""', 'classifiers': "['Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Libraries :: Python Modules']", 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/lrowe/pyramid_iap"""', 'license': '"""BSD"""', 'packages': "['pyramid_iap']", 'package_dir': "{'': 'src'}", 'include_package_data': '(True)', 'zip_safe': '(True)', 'install_requires': 'install_requires', 'tests_require': 'tests_require', 'extras_require': "{'tests': tests_require}", 'setup_requires': "['pytest-runner']"}), "(name='pyramid_iap', version=version, description=\n 'Google Cloud Identity-Aware Proxy authentication policy for Pyramid',\n long_description=README + '\\n\\n' + CHANGES, keywords=\n 'Pyramid JWT IAP authentication security', classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Libraries :: Python Modules'], author\n ='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/lrowe/pyramid_iap', license='BSD', packages=[\n 'pyramid_iap'], package_dir={'': 'src'}, include_package_data=True,\n zip_safe=True, install_requires=install_requires, tests_require=\n tests_require, extras_require={'tests': tests_require}, setup_requires=\n ['pytest-runner'])\n", (390, 1414), False, 'from setuptools import setup\n')] |
import urllib2
import json
global base_url
global auth_key
global ticket
def get_ticket():
url = base_url + "/alfresco/service/api/login"
headers = {"Content-Type": "application/json"}
data = auth_key
try:
response = make_post_request(url, data, headers)
return json.load(response).get("data").get("ticket")
except urllib2.HTTPError as err:
print("Failed to Login ")
print(err.code)
def make_request(url, headers):
request = urllib2.Request(url)
for key, value in headers.items():
request.add_header(key, value)
return urllib2.urlopen(request)
def make_post_request(url, data, headers):
request = urllib2.Request(url)
for key, value in headers.items():
request.add_header(key, value)
return urllib2.urlopen(request, json.dumps(data))
| [
"urllib2.Request",
"json.dumps",
"urllib2.urlopen",
"json.load"
] | [((486, 506), 'urllib2.Request', 'urllib2.Request', (['url'], {}), '(url)\n', (501, 506), False, 'import urllib2\n'), ((596, 620), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (611, 620), False, 'import urllib2\n'), ((680, 700), 'urllib2.Request', 'urllib2.Request', (['url'], {}), '(url)\n', (695, 700), False, 'import urllib2\n'), ((815, 831), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (825, 831), False, 'import json\n'), ((297, 316), 'json.load', 'json.load', (['response'], {}), '(response)\n', (306, 316), False, 'import json\n')] |
from cjax.continuation.methods.predictor.base_predictor import Predictor
from cjax.utils.math_trees import pytree_element_add
class NaturalPredictor(Predictor):
"""Natural Predictor only updates continuation parameter"""
def __init__(self, concat_states, delta_s):
super().__init__(concat_states)
self.delta_s = delta_s
def _assign_states(self) -> None:
super()._assign_states()
def prediction_step(self):
"""Given current state predict next state.
Updates (state: problem parameters, bparam: continuation parameter) Tuple
"""
self._assign_states()
self._bparam = pytree_element_add(self._bparam, self.delta_s)
| [
"cjax.utils.math_trees.pytree_element_add"
] | [((649, 695), 'cjax.utils.math_trees.pytree_element_add', 'pytree_element_add', (['self._bparam', 'self.delta_s'], {}), '(self._bparam, self.delta_s)\n', (667, 695), False, 'from cjax.utils.math_trees import pytree_element_add\n')] |
from pydantic import EmailStr
from awesome_sso.mail.mailgun import MailGun
def test_send_message(mailgun: MailGun):
resp = mailgun.send_simple_message(
from_name="test",
from_email=EmailStr("<EMAIL>"),
to=[EmailStr("<EMAIL>")],
subject="test title",
text="test content",
)
resp.close()
def test_send_template(mailgun: MailGun):
resp = mailgun.send_template(
from_name="test",
from_email=EmailStr("<EMAIL>"),
to=[EmailStr("<EMAIL>")],
subject="test title",
template="test.alert",
data={
"title": "hello from unit test",
"content": "test content from unit test",
},
)
resp.close()
| [
"pydantic.EmailStr"
] | [((204, 223), 'pydantic.EmailStr', 'EmailStr', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (212, 223), False, 'from pydantic import EmailStr\n'), ((464, 483), 'pydantic.EmailStr', 'EmailStr', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (472, 483), False, 'from pydantic import EmailStr\n'), ((237, 256), 'pydantic.EmailStr', 'EmailStr', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (245, 256), False, 'from pydantic import EmailStr\n'), ((497, 516), 'pydantic.EmailStr', 'EmailStr', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (505, 516), False, 'from pydantic import EmailStr\n')] |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from onnxruntime.transformers.fusion_options import FusionOptions
from transformers.onnx import validate_model_outputs
from .convert import convert_to_onnx, parser_export
from .optimize_model import optimize, quantize, parser_optimize
SUPPORTED_MODEL_TYPE = {"bert", "distilbert", "albert", "roberta", "bart", "gpt2"}
def main():
parser = ArgumentParser(conflict_handler='resolve', parents=[parser_export(), parser_optimize()])
args = parser.parse_args()
args.output = args.output if args.output.suffix else args.output.joinpath("model.onnx")
if not args.output.parent.exists():
args.output.parent.mkdir(parents=True)
tokenizer, model, onnx_config, onnx_outputs = convert_to_onnx(
args.model_name_or_path,
args.output,
args.feature,
args.opset
)
validate_model_outputs(onnx_config, tokenizer, model, args.output, onnx_outputs, atol=args.atol)
if model.config.model_type not in SUPPORTED_MODEL_TYPE:
raise ValueError(f"{model.config.model_type} ({args.model_name_or_path}) is not supported for ONNX Runtime "
f"optimization. Supported model types are " + ", ".join(SUPPORTED_MODEL_TYPE))
optimization_options = FusionOptions.parse(args)
model_type = getattr(model.config, "model_type")
model_type = "bert" if "bert" in model_type else model_type
num_heads = getattr(model.config, "num_attention_heads", 0)
hidden_size = getattr(model.config, "hidden_size", 0)
args.optimized_output = optimize(
args.output,
model_type,
num_heads=num_heads,
hidden_size=hidden_size,
opt_level=args.opt_level,
optimization_options=optimization_options,
use_gpu=args.use_gpu,
only_onnxruntime=args.only_onnxruntime,
use_external_format=args.use_external_format
)
validate_model_outputs(onnx_config, tokenizer, model, args.optimized_output, onnx_outputs, atol=args.atol)
if args.quantize_dynamic:
args.quantized_output = quantize(args.optimized_output, use_external_format=args.use_external_format)
validate_model_outputs(onnx_config, tokenizer, model, args.quantized_output, onnx_outputs, atol=args.atol)
if __name__ == "__main__":
main()
| [
"onnxruntime.transformers.fusion_options.FusionOptions.parse",
"transformers.onnx.validate_model_outputs"
] | [((1473, 1573), 'transformers.onnx.validate_model_outputs', 'validate_model_outputs', (['onnx_config', 'tokenizer', 'model', 'args.output', 'onnx_outputs'], {'atol': 'args.atol'}), '(onnx_config, tokenizer, model, args.output,\n onnx_outputs, atol=args.atol)\n', (1495, 1573), False, 'from transformers.onnx import validate_model_outputs\n'), ((1880, 1905), 'onnxruntime.transformers.fusion_options.FusionOptions.parse', 'FusionOptions.parse', (['args'], {}), '(args)\n', (1899, 1905), False, 'from onnxruntime.transformers.fusion_options import FusionOptions\n'), ((2514, 2624), 'transformers.onnx.validate_model_outputs', 'validate_model_outputs', (['onnx_config', 'tokenizer', 'model', 'args.optimized_output', 'onnx_outputs'], {'atol': 'args.atol'}), '(onnx_config, tokenizer, model, args.optimized_output,\n onnx_outputs, atol=args.atol)\n', (2536, 2624), False, 'from transformers.onnx import validate_model_outputs\n'), ((2770, 2880), 'transformers.onnx.validate_model_outputs', 'validate_model_outputs', (['onnx_config', 'tokenizer', 'model', 'args.quantized_output', 'onnx_outputs'], {'atol': 'args.atol'}), '(onnx_config, tokenizer, model, args.quantized_output,\n onnx_outputs, atol=args.atol)\n', (2792, 2880), False, 'from transformers.onnx import validate_model_outputs\n')] |
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.postgres.fields import JSONField
from django.conf import settings
class Profile(models.Model):
mobile_number = JSONField(default={})
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='profile',
)
notify_by_email = models.BooleanField(default=True)
notify_by_sms = models.BooleanField(default=False)
| [
"django.db.models.OneToOneField",
"django.contrib.postgres.fields.JSONField",
"django.db.models.BooleanField"
] | [((217, 238), 'django.contrib.postgres.fields.JSONField', 'JSONField', ([], {'default': '{}'}), '(default={})\n', (226, 238), False, 'from django.contrib.postgres.fields import JSONField\n'), ((250, 350), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""profile"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='profile')\n", (270, 350), False, 'from django.db import models\n'), ((400, 433), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (419, 433), False, 'from django.db import models\n'), ((454, 488), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (473, 488), False, 'from django.db import models\n')] |
import pygame
from pygame.locals import *
from constants import *
from pacman import Pacman, LifeIcon
from nodes import NodeGroup
from ghosts import GhostGroup
from pellets import Pellets_Group
from sprites import Spritesheet
from maze import Maze
from welcome import Welcome
class GameController:
"""
This the class that controls our basic game loop, what this method does
is update what happens in game based on other events, and what just keeps
game running until we end it
"""
def __init__(self):
pygame.init()
self.nodes = None
self.pacman = None
self.ghosts = None
self.game = None
self.pellets_eaten = 0
self.screen = pygame.display.set_mode(SCREENSIZE, 0, 32)
self.background = None
self.set_background()
self.clock = pygame.time.Clock()
def set_background(self):
"""
We create a background and set it to the color BLACK that we defined in
the constants.py file.
"""
self.background = pygame.surface.Surface(SCREENSIZE).convert()
self.background.fill(BLACK)
def start_game(self):
self.sheet = Spritesheet()
self.nodes = NodeGroup("maze.txt")
self.pellets = Pellets_Group("maze.txt")
self.pacman = Pacman(self.nodes, self.sheet)
self.ghosts = GhostGroup(self.nodes, self.sheet)
self.life_icons = LifeIcon(self.sheet)
self.paused = False
self.maze = Maze(self.sheet)
self.maze.get_maze("maze")
self.maze.combine_maze(self.background)
def update(self):
"""
The update method is a method that we call once per frame of the game.
It's basically our game loop
"""
dt = self.clock.tick(30) / 1000.0
if not self.paused:
self.pacman.update(dt)
self.ghosts.update(dt, self.pacman)
self.check_updater()
self.render()
def check_ghost_collision(self):
self.ghosts.escape(self.pellets_eaten)
ghost = self.pacman.collide_ghost(self.ghosts.ghosts)
if ghost is not None:
if ghost.mode.name == "FEAR":
ghost.respawn()
elif ghost.mode.name != "SPAWN":
if self.pacman.lives == 0:
self.start_game()
else:
self.pacman.lives -= 1
self.restart_level()
def check_updater(self):
"""
This method checks for certain events.
Right now it is just checking to see when we exit out of the game.
:return:
"""
for event in pygame.event.get():
if event.type == QUIT:
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
self.paused = not self.paused
self.check_collision()
self.check_ghost_collision()
def render(self):
"""
The render method is the method we'll use to draw the images to the screen.
it uses the update method, it is consistently running until the
window is closed, right now it just keeps drawing what we want on screen
"""
self.screen.blit(self.background, (0, 0))
#self.nodes.render(self.screen)
self.pellets.render(self.screen)
self.pacman.render(self.screen)
self.ghosts.render(self.screen)
self.life_icons.render(self.screen, self.pacman.lives-1)
pygame.display.update()
def check_collision(self):
"""
This method finds the pellet collison and it has collided, it will
remove that pellet from the pellet list and then it will also update
the score accordingly.
"""
self.paclives = self.pacman.lives
pellete = self.pacman.collide_pellets(self.pellets.pellets_list)
if pellete:
self.pellets.pellets_list.remove(pellete)
# self.pacman.get_score(pellete.points)
self.pellets_eaten+=1
if pellete.name == "powerpellet":
self.pellets_eaten += 1
self.ghosts.engage_chase()
if self.pellets.is_empty():
self.start_game()
self.pacman.lives = self.paclives
else:
pass
def restart_level(self):
self.paused = True
self.pacman.reset()
self.ghosts = GhostGroup(self.nodes, self.sheet)
def get_score(self, points):
"""
Updates the score with given points and return the final score.
"""
self.pacman.score += points
return self.pacman.score
| [
"pygame.init",
"pacman.Pacman",
"pygame.event.get",
"pygame.surface.Surface",
"pygame.display.set_mode",
"pygame.time.Clock",
"maze.Maze",
"pacman.LifeIcon",
"sprites.Spritesheet",
"pellets.Pellets_Group",
"nodes.NodeGroup",
"pygame.display.update",
"ghosts.GhostGroup"
] | [((536, 549), 'pygame.init', 'pygame.init', ([], {}), '()\n', (547, 549), False, 'import pygame\n'), ((708, 750), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREENSIZE', '(0)', '(32)'], {}), '(SCREENSIZE, 0, 32)\n', (731, 750), False, 'import pygame\n'), ((833, 852), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (850, 852), False, 'import pygame\n'), ((1175, 1188), 'sprites.Spritesheet', 'Spritesheet', ([], {}), '()\n', (1186, 1188), False, 'from sprites import Spritesheet\n'), ((1210, 1231), 'nodes.NodeGroup', 'NodeGroup', (['"""maze.txt"""'], {}), "('maze.txt')\n", (1219, 1231), False, 'from nodes import NodeGroup\n'), ((1255, 1280), 'pellets.Pellets_Group', 'Pellets_Group', (['"""maze.txt"""'], {}), "('maze.txt')\n", (1268, 1280), False, 'from pellets import Pellets_Group\n'), ((1303, 1333), 'pacman.Pacman', 'Pacman', (['self.nodes', 'self.sheet'], {}), '(self.nodes, self.sheet)\n', (1309, 1333), False, 'from pacman import Pacman, LifeIcon\n'), ((1356, 1390), 'ghosts.GhostGroup', 'GhostGroup', (['self.nodes', 'self.sheet'], {}), '(self.nodes, self.sheet)\n', (1366, 1390), False, 'from ghosts import GhostGroup\n'), ((1417, 1437), 'pacman.LifeIcon', 'LifeIcon', (['self.sheet'], {}), '(self.sheet)\n', (1425, 1437), False, 'from pacman import Pacman, LifeIcon\n'), ((1486, 1502), 'maze.Maze', 'Maze', (['self.sheet'], {}), '(self.sheet)\n', (1490, 1502), False, 'from maze import Maze\n'), ((2650, 2668), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2666, 2668), False, 'import pygame\n'), ((3496, 3519), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3517, 3519), False, 'import pygame\n'), ((4427, 4461), 'ghosts.GhostGroup', 'GhostGroup', (['self.nodes', 'self.sheet'], {}), '(self.nodes, self.sheet)\n', (4437, 4461), False, 'from ghosts import GhostGroup\n'), ((1045, 1079), 'pygame.surface.Surface', 'pygame.surface.Surface', (['SCREENSIZE'], {}), '(SCREENSIZE)\n', (1067, 1079), False, 'import pygame\n')] |
# Written by <NAME> 2021-09-23
import itertools
import random
import time
import phue as hue
max_number_of_exercises = 100
enable_hue = True
hue_bridge_ip = '10.0.0.169'
hue_light_name = 'Stue ved skyvedør høyre'
def input_integer_number(message):
while True:
try:
return int(input(message))
except:
pass
# Color space conversion from phue github https://github.com/studioimaginaire/phue/blob/master/examples/rgb_colors.py
def rgb_to_xy(red, green, blue):
""" conversion of RGB colors to CIE1931 XY colors
Formulas implemented from: https://gist.github.com/popcorn245/30afa0f98eea1c2fd34d
Args:
red (float): a number between 0.0 and 1.0 representing red in the RGB space
green (float): a number between 0.0 and 1.0 representing green in the RGB space
blue (float): a number between 0.0 and 1.0 representing blue in the RGB space
Returns:
xy (list): x and y
"""
# gamma correction
red = pow((red + 0.055) / (1.0 + 0.055), 2.4) if red > 0.04045 else (red / 12.92)
green = pow((green + 0.055) / (1.0 + 0.055), 2.4) if green > 0.04045 else (green / 12.92)
blue = pow((blue + 0.055) / (1.0 + 0.055), 2.4) if blue > 0.04045 else (blue / 12.92)
# convert rgb to xyz
x = red * 0.649926 + green * 0.103455 + blue * 0.197109
y = red * 0.234327 + green * 0.743075 + blue * 0.022598
z = green * 0.053077 + blue * 1.035763
# convert xyz to xy
x = x / (x + y + z)
y = y / (x + y + z)
return [x, y]
def connect_hue_bridge():
while True:
try:
hb = hue.Bridge(hue_bridge_ip)
hb.connect()
return hb
except hue.PhueRegistrationException as pre:
print("\nPlease connect to Philips Hue bridge before first use.")
print("Set Hue Bridge IP address and light name for the light to be controlled.")
print("Also put light in color-mode in your Hue-app.")
print("\nIf this is OK, press the button on you Hue bridge now, and within 30 s hit ENTER.")
print("\nNo Hue light available? Set enable_hue to False to get rid of this!")
input("Press ENTER to continue...")
print("\n")
except Exception as e:
print("Unknown error occurred..")
print("\nNo Hue light available? Set enable_hue to False to get rid of this!")
quit(0)
def main():
print("Python Multiplication Table Learner 1.0\n")
if enable_hue:
hb = connect_hue_bridge()
origxy = hb.get_light(hue_light_name, 'xy')
message = "Select number of exercises, maximum {}: ".format(max_number_of_exercises)
number_of_exercises = min(input_integer_number(message), max_number_of_exercises)
print("\n Ready!")
exercises = list(itertools.product(range(0, 10), repeat=2))
random.shuffle(exercises)
for ii, exercise in enumerate(exercises[:number_of_exercises]):
print("\n Exercise number {} of {}:".format(ii + 1, number_of_exercises))
answer = input_integer_number(" {} x {} = ".format(exercise[0], exercise[1]))
while answer != (exercise[0] * exercise[1]):
# command = {'bri': 254, 'hue': 8042, 'sat': 174}
hb.set_light(hue_light_name, 'xy', rgb_to_xy(1.0, 0, 0), transitiontime=5)
print(" Wrong!")
time.sleep(1)
hb.set_light(hue_light_name, 'xy', origxy, transitiontime=50)
answer = input_integer_number(" {} x {} = ".format(exercise[0], exercise[1]))
hb.set_light(hue_light_name, 'xy', rgb_to_xy(0.0, 1.0, 0), transitiontime=5)
print(" CORRECT!")
time.sleep(1)
hb.set_light(hue_light_name, 'xy', origxy, transitiontime=50)
if __name__ == "__main__":
main()
| [
"phue.Bridge",
"random.shuffle",
"time.sleep"
] | [((2881, 2906), 'random.shuffle', 'random.shuffle', (['exercises'], {}), '(exercises)\n', (2895, 2906), False, 'import random\n'), ((3698, 3711), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3708, 3711), False, 'import time\n'), ((1613, 1638), 'phue.Bridge', 'hue.Bridge', (['hue_bridge_ip'], {}), '(hue_bridge_ip)\n', (1623, 1638), True, 'import phue as hue\n'), ((3394, 3407), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3404, 3407), False, 'import time\n')] |
#!/bin/python
# Extracts messages.data into headers and the actual message
import binascii
import pprint
import click
def hexToStr(byte):
return binascii.hexlify(byte).decode()
def printByte(label, byte):
print(label + ':\t' + hexToStr(byte))
@click.command()
@click.argument('path')
def extractMessage(path):
with open(path, 'rb') as f:
firstLabels = ['constr', 'flags', 'id', 'ttl?', 'from', 'to', 'date?', '?']
for label in firstLabels:
byte = f.read(4)
printByte(label, byte)
byte = f.read(1)
messageLength = int.from_bytes(byte, 'little')
print('length:\t' + hexToStr(byte) + ' (' + str(messageLength) + ')')
print('message: ')
byte = f.read(messageLength)
print('\tbytes: ' + hexToStr(byte))
print('\tascii: ' + byte.decode('utf-8'))
finalLabels = ['media', 'magic?', '?', '?']
for label in finalLabels:
byte = f.read(4)
printByte(label, byte)
if __name__ == '__main__':
extractMessage()
| [
"click.argument",
"click.command",
"binascii.hexlify"
] | [((259, 274), 'click.command', 'click.command', ([], {}), '()\n', (272, 274), False, 'import click\n'), ((276, 298), 'click.argument', 'click.argument', (['"""path"""'], {}), "('path')\n", (290, 298), False, 'import click\n'), ((152, 174), 'binascii.hexlify', 'binascii.hexlify', (['byte'], {}), '(byte)\n', (168, 174), False, 'import binascii\n')] |
# polar.py Fast floating point cartesian to polar coordinate conversion
# Author: <NAME>
# 31st Oct 2015 Updated to match latest firmware
# 21st April 2015
# Now uses recently implemented FPU mnemonics
# Arctan is based on the following approximation applicable to octant zero where q = x/y :
# arctan(q) = q*pi/4- q*(q - 1)*(0.2447 + 0.0663*q)
# Arctan approximation: max error about 0.085 deg in my tests.
from math import pi
from array import array
consts = array('f', [0.0, 0.0, 1.0, pi, pi/2, -pi/2, pi/4, 0.2447, 0.0663])
# Entry:
# r0: array of real (x) values
# r1: array of imaginary (y) values
# r2: array element 0 = length of arrays following are constants
# ARM CPU register usage
# r3: Array length (integer)
# r4: Negate flag
# r5, r6: Temporary storage
# Returns:
# The real array holds magnitude values, the imaginary ones phase.
# Phase is in radians compatible with cPython's math.atan2()
@micropython.asm_thumb
def polar(r0, r1, r2): # Array length in r3: convert to integer
vldr(s15, [r2, 0])
vcvt_s32_f32(s15, s15)
vmov(r3, s15)
# Load constants
vldr(s0, [r2, 4]) # 0
vldr(s1, [r2, 8]) # 1
vldr(s2, [r2, 12]) # Pi
vldr(s3, [r2, 16]) # Pi/2
vldr(s4, [r2, 20]) # -Pi/2
vldr(s5, [r2, 24]) # Pi/4
vldr(s6, [r2, 28]) # 0.2447
vldr(s7, [r2, 32]) # 0.0663
b(START)
label(DOCALC)
vldr(s8, [r2, 4]) # c = 0.0
vldr(s14, [r0, 0]) # x
vldr(s15, [r1, 0]) # y
# Calculate magnitude
vmul(s10, s14, s14)
vmul(s9, s15, s15)
vadd(s10, s10, s9)
vsqrt(s10, s10)
vstr(s10, [r0, 0]) # real = hypot
# Start of arctan calculation
mov(r4, 0) # Negate flag
vcmp(s14, s0)
vmrs(APSR_nzcv, FPSCR) # transfer status to ARM status registers
bne(QUADCHECK) # Skip if not x == 0
vcmp(s15, s0)
vmrs(APSR_nzcv, FPSCR) # transfer status to ARM status registers
bne(P01)
vstr(s0, [r1,0]) # result = 0
b(Q0DONE)
label(P01)
vcmp(s15, s0)
vmrs(APSR_nzcv, FPSCR) # transfer status to ARM status registers
ite(ge)
vstr(s3, [r1,0]) # result = pi/2
vstr(s4, [r1,0]) # result = -pi/2
b(Q0DONE)
label(QUADCHECK)
vcmp(s15, s0) # compare y with 0
vmrs(APSR_nzcv, FPSCR)
bge(P02)
vneg(s15, s15) # y = -y
mov(r4, 1) # set negate flag
label(P02) # y now > 0
vcmp(s14, s0) # comp x with 0
vmrs(APSR_nzcv, FPSCR)
bge(P04)
# x < 0
vneg(s14, s14) # x = -x
vcmp(s14, s15) # comp x with y
vmrs(APSR_nzcv, FPSCR)
bgt(P03)
vmov(r5, s14) # swap x and y CONVOLUTED: need to implement vmov(Sd, Sm)
vmov(r6, s15)
vmov(s15, r5)
vmov(s14, r6)
vmov(r5, s3)
vmov(s8, r5) # c = pi/2
b(OCTZERO)
label(P03) # y < x
cmp(r4, 0)
ite(eq)
mov(r4, 1)
mov(r4, 0) # neg = not neg
vmov(r5, s2) # c = pi
vmov(s8, r5)
vneg(s8, s8) # c = -pi
b(OCTZERO)
label(P04) # x > 0
vcmp(s14, s15) # comp x with y
vmrs(APSR_nzcv, FPSCR)
bge(OCTZERO)
vmov(r5, s14) # swap x and y
vmov(r6, s15)
vmov(s15, r5)
vmov(s14, r6)
vmov(r5, s4) # c = -pi/2
vmov(s8, r5)
cmp(r4, 0)
ite(eq)
mov(r4, 1)
mov(r4, 0) # neg = not neg
# Octant zero
label(OCTZERO) # calculate r = x*pi/4 - x*(x - 1)*(0.2447 + 0.0663*x)
vdiv(s14, s15, s14) # x = y/x
vmul(s15, s7, s14) # s15 = 0.0663x
vadd(s15, s6, s15) # s15 = 0.2447 + 0.0663*x
vsub(s13, s14, s1) # s1 = x -1
vmul(s15, s15, s13) # s15 = (x - 1)*(0.2447 + 0.0663*x)
vmul(s15, s14, s15) # s15 = x*(x - 1)*(0.2447 + 0.0663*x)
vmul(s13, s14, s5) # s5 = x*pi/4
vsub(s15, s13, s15)
vadd(s15, s15, s8) # s15 += c
cmp(r4, 0)
it(ne)
vneg(s15, s15)
vstr(s15, [r1, 0]) # imag = angle
label(Q0DONE)
bx(lr) # ! DOCALC
label(START) # r0-> real r1-> imag r3 = length
bl(DOCALC)
add(r0, 4)
add(r1, 4)
sub(r3, 1)
bne(START)
def topolar(re, im, length):
consts[0] = length
polar(re, im, consts)
| [
"array.array"
] | [((462, 534), 'array.array', 'array', (['"""f"""', '[0.0, 0.0, 1.0, pi, pi / 2, -pi / 2, pi / 4, 0.2447, 0.0663]'], {}), "('f', [0.0, 0.0, 1.0, pi, pi / 2, -pi / 2, pi / 4, 0.2447, 0.0663])\n", (467, 534), False, 'from array import array\n')] |
#! /usr/bin/env python3
import rospy
import numpy as np
import random
from geometry_msgs.msg import Twist, Pose
from nav_msgs.msg import Odometry
from std_srvs.srv import SetBool, Trigger
from kr_tracker_msgs.msg import TrajectoryTrackerAction, TrajectoryTrackerGoal, CircleTrackerAction, CircleTrackerGoal
from kr_python_interface.mav_interface import KrMavInterface
def main():
rospy.init_node('mav_example', anonymous=True)
# Creating MAV objects
mav_namespace = 'dragonfly'
mav_id = 1
mav_obj = KrMavInterface('dragonfly', mav_id)
# Motor On / Take Off
mav_obj.motors_on()
mav_obj.take_off()
rospy.sleep(1)
#Send waypoint (open loop, have to sleep until finished)
mav_obj.send_wp(4.0, 0.0, 1.0, 0.0)
rospy.sleep(4)
#Send waypoint blocking
mav_obj.send_wp_block(0.0, 0.0, 1.0, 0.0, 0.5, 0.3, False) #x, y, z, yaw, vel, acc, relative
#Send random twist commands
for i in range(20):
#get current odometry
curr_odom = mav_obj.get_odom();
curr_position = curr_odom.pose.pose.position;
rospy.loginfo('pose %g %g %g', curr_position.x, curr_position.y, curr_position.z);
mav_obj.set_vel(random.uniform(0.1,1.0), random.uniform(0.1,1.0), 0, 0, 0, 0) #vx, vy, vz,
rospy.sleep(0.3)
#Send waypoint blocking
mav_obj.send_wp_block(0.0, 0.0, 1.0, 0.0, 1.0, 0.5, False) #x, y, z, yaw, vel, acc, relative
#Run circle tracker
mav_obj.hover()
goal = CircleTrackerGoal()
goal.Ax = -1.0
goal.Ay = -1.0
goal.T = 4.0
num_repetitions = 1
goal.duration = goal.T*num_repetitions
mav_obj.circle_tracker_client.cancel_all_goals()
rospy.sleep(0.1)
mav_obj.circle_tracker_client.send_goal(goal)
rospy.logwarn("Send circle")
success = mav_obj.transition_service_call('CircleTracker')
if not success:
rospy.logwarn("Failed to transition to circle tracker (is there an active goal?)")
rospy.logwarn("Waiting for circle to run")
mav_obj.circle_tracker_client.wait_for_result()
#Send waypoint blocking
mav_obj.send_wp_block(0.0, 0.0, 1.5, 0.0, 1.0, 0.5, False) #x, y, z, yaw, vel, acc, relative
#Send multiple waypoints by fitting traj
goal = TrajectoryTrackerGoal()
wp = Pose()
wp.position.x = 0.0
wp.position.y = 0.0
wp.position.z = 2.0
goal.waypoints.append(wp)
wp1 = Pose()
wp1.position.x = 0.0
wp1.position.y = 1.0
wp1.position.z = 1.5
goal.waypoints.append(wp1)
wp2 = Pose()
wp2.position.x = 2.0
wp2.position.y = 2.0
wp2.position.z = 1.0
goal.waypoints.append(wp2)
wp3 = Pose()
wp3.position.x = 3.0
wp3.position.y = 0.0
wp3.position.z = 0.5
goal.waypoints.append(wp3)
mav_obj.traj_tracker_client.send_goal(goal)
success = mav_obj.transition_service_call('TrajectoryTracker')
if not success:
rospy.logwarn("Failed to transition to trajectory tracker (is there an active goal?)")
rospy.logwarn("Waiting for traj to run")
mav_obj.traj_tracker_client.wait_for_result()
#Send waypoint blocking
mav_obj.send_wp_block(0.0, 0.0, 1.0, 0.0, 1.5, 0.5, False) #x, y, z, yaw, vel, acc, relative
# Land / Motors off
mav_obj.land()
rospy.sleep(3)
mav_obj.motors_off()
if __name__ == '__main__':
try :
main()
except rospy.ROSInterruptException :
pass
| [
"random.uniform",
"geometry_msgs.msg.Pose",
"rospy.logwarn",
"rospy.init_node",
"kr_python_interface.mav_interface.KrMavInterface",
"rospy.sleep",
"kr_tracker_msgs.msg.TrajectoryTrackerGoal",
"rospy.loginfo",
"kr_tracker_msgs.msg.CircleTrackerGoal"
] | [((385, 431), 'rospy.init_node', 'rospy.init_node', (['"""mav_example"""'], {'anonymous': '(True)'}), "('mav_example', anonymous=True)\n", (400, 431), False, 'import rospy\n'), ((513, 548), 'kr_python_interface.mav_interface.KrMavInterface', 'KrMavInterface', (['"""dragonfly"""', 'mav_id'], {}), "('dragonfly', mav_id)\n", (527, 548), False, 'from kr_python_interface.mav_interface import KrMavInterface\n'), ((620, 634), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (631, 634), False, 'import rospy\n'), ((735, 749), 'rospy.sleep', 'rospy.sleep', (['(4)'], {}), '(4)\n', (746, 749), False, 'import rospy\n'), ((1414, 1433), 'kr_tracker_msgs.msg.CircleTrackerGoal', 'CircleTrackerGoal', ([], {}), '()\n', (1431, 1433), False, 'from kr_tracker_msgs.msg import TrajectoryTrackerAction, TrajectoryTrackerGoal, CircleTrackerAction, CircleTrackerGoal\n'), ((1599, 1615), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (1610, 1615), False, 'import rospy\n'), ((1666, 1694), 'rospy.logwarn', 'rospy.logwarn', (['"""Send circle"""'], {}), "('Send circle')\n", (1679, 1694), False, 'import rospy\n'), ((1865, 1907), 'rospy.logwarn', 'rospy.logwarn', (['"""Waiting for circle to run"""'], {}), "('Waiting for circle to run')\n", (1878, 1907), False, 'import rospy\n'), ((2133, 2156), 'kr_tracker_msgs.msg.TrajectoryTrackerGoal', 'TrajectoryTrackerGoal', ([], {}), '()\n', (2154, 2156), False, 'from kr_tracker_msgs.msg import TrajectoryTrackerAction, TrajectoryTrackerGoal, CircleTrackerAction, CircleTrackerGoal\n'), ((2164, 2170), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (2168, 2170), False, 'from geometry_msgs.msg import Twist, Pose\n'), ((2274, 2280), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (2278, 2280), False, 'from geometry_msgs.msg import Twist, Pose\n'), ((2388, 2394), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (2392, 2394), False, 'from geometry_msgs.msg import Twist, Pose\n'), ((2502, 2508), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (2506, 2508), False, 'from geometry_msgs.msg import Twist, Pose\n'), ((2831, 2871), 'rospy.logwarn', 'rospy.logwarn', (['"""Waiting for traj to run"""'], {}), "('Waiting for traj to run')\n", (2844, 2871), False, 'import rospy\n'), ((3084, 3098), 'rospy.sleep', 'rospy.sleep', (['(3)'], {}), '(3)\n', (3095, 3098), False, 'import rospy\n'), ((1041, 1126), 'rospy.loginfo', 'rospy.loginfo', (['"""pose %g %g %g"""', 'curr_position.x', 'curr_position.y', 'curr_position.z'], {}), "('pose %g %g %g', curr_position.x, curr_position.y,\n curr_position.z)\n", (1054, 1126), False, 'import rospy\n'), ((1225, 1241), 'rospy.sleep', 'rospy.sleep', (['(0.3)'], {}), '(0.3)\n', (1236, 1241), False, 'import rospy\n'), ((1779, 1866), 'rospy.logwarn', 'rospy.logwarn', (['"""Failed to transition to circle tracker (is there an active goal?)"""'], {}), "(\n 'Failed to transition to circle tracker (is there an active goal?)')\n", (1792, 1866), False, 'import rospy\n'), ((2741, 2832), 'rospy.logwarn', 'rospy.logwarn', (['"""Failed to transition to trajectory tracker (is there an active goal?)"""'], {}), "(\n 'Failed to transition to trajectory tracker (is there an active goal?)')\n", (2754, 2832), False, 'import rospy\n'), ((1145, 1169), 'random.uniform', 'random.uniform', (['(0.1)', '(1.0)'], {}), '(0.1, 1.0)\n', (1159, 1169), False, 'import random\n'), ((1170, 1194), 'random.uniform', 'random.uniform', (['(0.1)', '(1.0)'], {}), '(0.1, 1.0)\n', (1184, 1194), False, 'import random\n')] |
from bacprop.bacnet.network import VirtualSensorNetwork
from bacprop.bacnet import network
from bacpypes.pdu import Address
from bacpypes.comm import service_map
from bacprop.bacnet.sensor import Sensor
from pytest_mock import MockFixture
import pytest
# Required for full coverage
network._debug = 1
class TestVirtualSensorNetwork:
def test_init_address(self, mocker: MockFixture) -> None:
mock_router = mocker.patch("bacprop.bacnet.network._VLANRouter")
VirtualSensorNetwork("0.0.0.0")
mock_router.assert_called_once_with(Address("0.0.0.0"), 0)
def test_init_router(self, mocker: MockFixture) -> None:
mocker.patch("bacprop.bacnet.network._VLANRouter")
network = VirtualSensorNetwork("0.0.0.0")
router_node = network.nodes[0]
# The router node on the network should be address 1
assert router_node.address == Address((1).to_bytes(4, "big"))
# pylint: disable=no-member
network._router.bind.assert_called_once_with(router_node, 1) # type: ignore
network._router.start.assert_called_once() # type: ignore
def test_create_sensor(self, mocker: MockFixture) -> None:
mocker.patch("bacprop.bacnet.network._VLANRouter")
network = VirtualSensorNetwork("0.0.0.0")
sensor = network.create_sensor(7)
sensor2 = network.create_sensor(8)
assert len(network.nodes) == 3
assert network.nodes[-1] == sensor2.get_node()
assert network.nodes[-2] == sensor.get_node()
assert sensor._vlan_address == Address((2).to_bytes(4, "big"))
assert sensor2._vlan_address == Address((3).to_bytes(4, "big"))
def test_create_sensor_exists(self, mocker: MockFixture) -> None:
mocker.patch("bacprop.bacnet.network._VLANRouter")
network = VirtualSensorNetwork("0.0.0.0")
network.create_sensor(7)
with pytest.raises(ValueError):
network.create_sensor(7)
def test_get_sensor(self, mocker: MockFixture) -> None:
mocker.patch("bacprop.bacnet.network._VLANRouter")
network = VirtualSensorNetwork("0.0.0.0")
sensor_created = network.create_sensor(7)
sensor_found = network.get_sensor(7)
assert sensor_created == sensor_found
def test_get_sensors(self, mocker: MockFixture) -> None:
mocker.patch("bacprop.bacnet.network._VLANRouter")
network = VirtualSensorNetwork("0.0.0.0")
for i in range(10):
network.create_sensor(i)
sensors = network.get_sensors()
for i in range(10):
assert type(sensors[i]) == Sensor
def test_run(self, mocker: MockFixture) -> None:
mock_run = mocker.patch("bacprop.bacnet.network.run")
network = VirtualSensorNetwork("0.0.0.0")
# Teardown
network._router.mux.close_socket()
service_map.clear()
network.run()
mock_run.assert_called_once()
def test_stop(self, mocker: MockFixture) -> None:
mock_stop = mocker.patch("bacprop.bacnet.network.stop")
network = VirtualSensorNetwork("0.0.0.0")
# Teardown
network._router.mux.close_socket()
service_map.clear()
network.stop()
mock_stop.assert_called_once()
| [
"bacprop.bacnet.network.get_sensor",
"bacpypes.comm.service_map.clear",
"bacprop.bacnet.network.VirtualSensorNetwork",
"bacprop.bacnet.network.create_sensor",
"bacpypes.pdu.Address",
"bacprop.bacnet.network._router.bind.assert_called_once_with",
"bacprop.bacnet.network._router.mux.close_socket",
"bacp... | [((481, 512), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (501, 512), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((719, 750), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (739, 750), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((968, 1028), 'bacprop.bacnet.network._router.bind.assert_called_once_with', 'network._router.bind.assert_called_once_with', (['router_node', '(1)'], {}), '(router_node, 1)\n', (1012, 1028), False, 'from bacprop.bacnet import network\n'), ((1053, 1095), 'bacprop.bacnet.network._router.start.assert_called_once', 'network._router.start.assert_called_once', ([], {}), '()\n', (1093, 1095), False, 'from bacprop.bacnet import network\n'), ((1253, 1284), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (1273, 1284), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((1303, 1327), 'bacprop.bacnet.network.create_sensor', 'network.create_sensor', (['(7)'], {}), '(7)\n', (1324, 1327), False, 'from bacprop.bacnet import network\n'), ((1346, 1370), 'bacprop.bacnet.network.create_sensor', 'network.create_sensor', (['(8)'], {}), '(8)\n', (1367, 1370), False, 'from bacprop.bacnet import network\n'), ((1812, 1843), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (1832, 1843), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((1853, 1877), 'bacprop.bacnet.network.create_sensor', 'network.create_sensor', (['(7)'], {}), '(7)\n', (1874, 1877), False, 'from bacprop.bacnet import network\n'), ((2094, 2125), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (2114, 2125), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((2152, 2176), 'bacprop.bacnet.network.create_sensor', 'network.create_sensor', (['(7)'], {}), '(7)\n', (2173, 2176), False, 'from bacprop.bacnet import network\n'), ((2200, 2221), 'bacprop.bacnet.network.get_sensor', 'network.get_sensor', (['(7)'], {}), '(7)\n', (2218, 2221), False, 'from bacprop.bacnet import network\n'), ((2408, 2439), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (2428, 2439), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((2525, 2546), 'bacprop.bacnet.network.get_sensors', 'network.get_sensors', ([], {}), '()\n', (2544, 2546), False, 'from bacprop.bacnet import network\n'), ((2757, 2788), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (2777, 2788), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((2816, 2850), 'bacprop.bacnet.network._router.mux.close_socket', 'network._router.mux.close_socket', ([], {}), '()\n', (2848, 2850), False, 'from bacprop.bacnet import network\n'), ((2859, 2878), 'bacpypes.comm.service_map.clear', 'service_map.clear', ([], {}), '()\n', (2876, 2878), False, 'from bacpypes.comm import service_map\n'), ((2888, 2901), 'bacprop.bacnet.network.run', 'network.run', ([], {}), '()\n', (2899, 2901), False, 'from bacprop.bacnet import network\n'), ((3078, 3109), 'bacprop.bacnet.network.VirtualSensorNetwork', 'VirtualSensorNetwork', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (3098, 3109), False, 'from bacprop.bacnet.network import VirtualSensorNetwork\n'), ((3137, 3171), 'bacprop.bacnet.network._router.mux.close_socket', 'network._router.mux.close_socket', ([], {}), '()\n', (3169, 3171), False, 'from bacprop.bacnet import network\n'), ((3180, 3199), 'bacpypes.comm.service_map.clear', 'service_map.clear', ([], {}), '()\n', (3197, 3199), False, 'from bacpypes.comm import service_map\n'), ((3209, 3223), 'bacprop.bacnet.network.stop', 'network.stop', ([], {}), '()\n', (3221, 3223), False, 'from bacprop.bacnet import network\n'), ((557, 575), 'bacpypes.pdu.Address', 'Address', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (564, 575), False, 'from bacpypes.pdu import Address\n'), ((1892, 1917), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1905, 1917), False, 'import pytest\n'), ((1931, 1955), 'bacprop.bacnet.network.create_sensor', 'network.create_sensor', (['(7)'], {}), '(7)\n', (1952, 1955), False, 'from bacprop.bacnet import network\n'), ((2481, 2505), 'bacprop.bacnet.network.create_sensor', 'network.create_sensor', (['i'], {}), '(i)\n', (2502, 2505), False, 'from bacprop.bacnet import network\n')] |
import numpy as np
from touchstone.environments.make import make_vec_envs
NUM_ENVS = 2
if __name__ == '__main__':
env = make_vec_envs("Pendulum-v0", 42, NUM_ENVS)
np.random.seed(42)
state = env.reset()
for i in range(1000):
actions = env.action_space.sample()
out = env.step([actions for j in range(NUM_ENVS)])
# env.render()
assert abs(env.mean_reward_per_step) < 0.007
assert len(env.returns) == 1000
| [
"touchstone.environments.make.make_vec_envs",
"numpy.random.seed"
] | [((126, 168), 'touchstone.environments.make.make_vec_envs', 'make_vec_envs', (['"""Pendulum-v0"""', '(42)', 'NUM_ENVS'], {}), "('Pendulum-v0', 42, NUM_ENVS)\n", (139, 168), False, 'from touchstone.environments.make import make_vec_envs\n'), ((173, 191), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (187, 191), True, 'import numpy as np\n')] |
"""Package for playdeliver modules."""
from pkg_resources import get_distribution, DistributionNotFound
import os.path
try:
_dist = get_distribution('playdeliver')
# Normalize case for Windows systems
dist_loc = os.path.normcase(_dist.location)
here = os.path.normcase(__file__)
if not here.startswith(os.path.join(dist_loc, 'playdeliver')):
# not installed, but there is another version that *is*
raise DistributionNotFound
except DistributionNotFound:
__version__ = 'Please install this project with setup.py'
else:
__version__ = _dist.version
__all__ = ('client', 'file_util', 'image',
'listing', 'playdeliver', 'sync_command')
| [
"pkg_resources.get_distribution"
] | [((137, 168), 'pkg_resources.get_distribution', 'get_distribution', (['"""playdeliver"""'], {}), "('playdeliver')\n", (153, 168), False, 'from pkg_resources import get_distribution, DistributionNotFound\n')] |
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import argocd_python_client
from argocd_python_client.model.v1alpha1_application_destination import V1alpha1ApplicationDestination
from argocd_python_client.model.v1alpha1_application_source import V1alpha1ApplicationSource
from argocd_python_client.model.v1alpha1_info import V1alpha1Info
from argocd_python_client.model.v1alpha1_resource_ignore_differences import V1alpha1ResourceIgnoreDifferences
from argocd_python_client.model.v1alpha1_sync_policy import V1alpha1SyncPolicy
globals()['V1alpha1ApplicationDestination'] = V1alpha1ApplicationDestination
globals()['V1alpha1ApplicationSource'] = V1alpha1ApplicationSource
globals()['V1alpha1Info'] = V1alpha1Info
globals()['V1alpha1ResourceIgnoreDifferences'] = V1alpha1ResourceIgnoreDifferences
globals()['V1alpha1SyncPolicy'] = V1alpha1SyncPolicy
from argocd_python_client.model.v1alpha1_application_spec import V1alpha1ApplicationSpec
class TestV1alpha1ApplicationSpec(unittest.TestCase):
"""V1alpha1ApplicationSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1ApplicationSpec(self):
"""Test V1alpha1ApplicationSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = V1alpha1ApplicationSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((1552, 1567), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1565, 1567), False, 'import unittest\n')] |
#!bin/python3
import os
import sys
import time
import random
from datetime import datetime
book = sys.argv[1]
startChapter = sys.argv[2]
endChapter = sys.argv[3]
for it in range(int(startChapter), int(endChapter) + 1):
# Get random wait time
waitTime = random.randrange(2, 5)
# Get current time
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(f'Waiting {waitTime} minutes from {current_time} to continue operation')
# Wait for random time
time.sleep(60 * waitTime)
# Read output and abort if necessary
result = os.popen(f'python3 lsbible.py {book}+{it}').read()
print(result)
if 'Aborting Process' in result or 'Error' in result:
print(f'Aborting Process Loop @: {book}+{it}')
sys.exit() | [
"random.randrange",
"time.sleep",
"datetime.datetime.now",
"os.popen",
"sys.exit"
] | [((263, 285), 'random.randrange', 'random.randrange', (['(2)', '(5)'], {}), '(2, 5)\n', (279, 285), False, 'import random\n'), ((319, 333), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (331, 333), False, 'from datetime import datetime\n'), ((492, 517), 'time.sleep', 'time.sleep', (['(60 * waitTime)'], {}), '(60 * waitTime)\n', (502, 517), False, 'import time\n'), ((762, 772), 'sys.exit', 'sys.exit', ([], {}), '()\n', (770, 772), False, 'import sys\n'), ((572, 615), 'os.popen', 'os.popen', (['f"""python3 lsbible.py {book}+{it}"""'], {}), "(f'python3 lsbible.py {book}+{it}')\n", (580, 615), False, 'import os\n')] |
from rest_framework import routers
from website.api import UserViewSet, PostViewSet, CommentViewSet, \
CategoryViewSet, CrowdViewSet
router = routers.SimpleRouter()
router.register(r'users', UserViewSet)
router.register(r'posts', PostViewSet)
router.register(r'categories', CategoryViewSet)
router.register(r'comments', CommentViewSet)
router.register(r'crowds', CrowdViewSet)
| [
"rest_framework.routers.SimpleRouter"
] | [((147, 169), 'rest_framework.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (167, 169), False, 'from rest_framework import routers\n')] |
import numpy as np
import asyncio
import time
import ntplib
import json
import os
import yaml
import websockets
import threading
import uuid
import logging
MODULE_DIR = os.path.dirname(__file__)
with open(os.path.join(MODULE_DIR, "hardwareconstants.yaml"), "r") as f:
constants = yaml.load(f, Loader=yaml.FullLoader)["liquidhandler"]
tc = constants["timings"]
def expected_timings(drop):
"""Estimate the duration (seconds) liquid aspiration will require for a given drop
Args:
drop (dict): dictionary of drop parameters
Returns:
float: duration, in seconds
"""
ac = tc["aspirate"] # aspiration constants
aspirate_duration = ac["preparetip"] + drop["volume"] / 100 + tc["travel"]
aspirate_duration += drop["pre_mix"][0] * (
ac["premix"]["a"] * drop["pre_mix"][1] + ac["premix"]["b"]
) # overhead time for aspirate+dispense cycles to mix solution prior to final aspiration
if drop["touch_tip"]:
aspirate_duration += ac["touchtip"]
if drop["slow_retract"]:
aspirate_duration += ac["slowretract"]
if drop["air_gap"]:
aspirate_duration += ac["airgap"]
if drop["slow_travel"]:
staging_duration = tc["travel_slow"]
dispense_duration = tc["dispensedelay_slow"]
else:
staging_duration = tc["travel"]
dispense_duration = tc["dispensedelay"]
return aspirate_duration, staging_duration, dispense_duration
class OT2:
def __init__(self):
self.server = OT2Server()
# self.server.start()
self.POLLINGRATE = constants["pollingrate"]
# self.DISPENSE_DELAY = constants[
# "dispense_delay"
# ] # time (seconds) between initiating a dispense and the completion of the dispense
# self.ASPIRATION_DELAY = constants[
# "aspiration_delay"
# ] # time (seconds) to perform an aspiration and stage the pipette
# self.STAGING_DELAY = constants[
# "staging_delay"
# ] # time (seconds) to move pipette into position for drop staging
self.CONSTANTS = constants["timings"]
def drop_perovskite(self, taskid=None, nist_time=None, **kwargs):
taskid = self.server.add_to_queue(
task="dispense_onto_chuck",
taskid=taskid,
nist_time=nist_time,
pipette="perovskite",
# height=height,
# rate=rate,
**kwargs,
)
return taskid
def drop_antisolvent(self, taskid=None, nist_time=None, **kwargs):
taskid = self.server.add_to_queue(
task="dispense_onto_chuck",
taskid=taskid,
nist_time=nist_time,
pipette="antisolvent",
**kwargs,
)
return taskid
def aspirate_for_spincoating(
self,
tray,
well,
volume,
pipette="perovskite",
slow_retract=True,
air_gap=True,
touch_tip=True,
pre_mix=(0, 0),
reuse_tip=False,
taskid=None,
nist_time=None,
**kwargs,
):
taskid = self.server.add_to_queue(
task="aspirate_for_spincoating",
taskid=taskid,
nist_time=nist_time,
tray=tray,
well=well,
volume=volume,
pipette=pipette,
slow_retract=slow_retract,
air_gap=air_gap,
touch_tip=touch_tip,
reuse_tip=reuse_tip,
pre_mix=pre_mix,
**kwargs,
)
return taskid
# def aspirate_both_for_spincoating(
# self,
# tray0,
# well0,
# volume0,
# slow_retract0=True,
# air_gap0=True,
# touch_tip0=True,
# pre_mix0=(0, 0),
# reuse_tip0=False,
# tray1,
# well1,
# volume1,
# slow_retract1=True,
# air_gap1=True,
# touch_tip1=True,
# pre_mix1=(0, 0),
# reuse_tip1=False,
# taskid=None,
# nist_time=None,
# **kwargs,
# ):
# taskid = self.server.add_to_queue(
# task="aspirate_both_for_spincoating",
# taskid=taskid,
# nist_time=nist_time,
# tray0=psk_tray,
# well0=psk_well,
# volume0=psk_volume,
# tray1=antisolvent_tray,
# well1=antisolvent_well,
# volume1=antisolvent_volume,
# slow_retract=slow_retract,
# air_gap=air_gap,
# touch_tip=touch_tip,
# **kwargs,
# )
# return taskid
def stage_perovskite(
self, taskid=None, nist_time=None, slow_travel=False, **kwargs
):
taskid = self.server.add_to_queue(
task="stage_for_dispense",
taskid=taskid,
nist_time=nist_time,
pipette="perovskite",
slow_travel=slow_travel,
**kwargs,
)
return taskid
def stage_antisolvent(
self, taskid=None, nist_time=None, slow_travel=False, **kwargs
):
taskid = self.server.add_to_queue(
task="stage_for_dispense",
taskid=taskid,
nist_time=nist_time,
pipette="antisolvent",
slow_travel=slow_travel,
**kwargs,
)
return taskid
def clear_chuck(self, taskid=None, nist_time=None, **kwargs):
taskid = self.server.add_to_queue(
task="clear_chuck",
taskid=taskid,
nist_time=nist_time,
**kwargs,
)
return taskid
def cleanup(self, taskid=None, nist_time=None, **kwargs):
taskid = self.server.add_to_queue(
task="cleanup",
taskid=taskid,
nist_time=nist_time,
**kwargs,
)
return taskid
def mark_completed(self):
self.server._start_directly()
self.server.mark_completed()
self.server.stop()
def wait_for_task_complete(self, taskid):
while taskid not in self.server.completed_tasks:
time.sleep(self.POLLINGRATE)
# while taskid not in self.server.completed_tasks:
# time.sleep(self.server.POLLINGRATE)
# while self.server.OT2_status == 0: # wait for task to be acknowledged by ot2
# time.sleep(self.INTERVAL)
# while self.server.OT2_status != 0: # wait for task to be marked complete by ot2
# time.sleep(self.INTERVAL)
def __del__(self):
self.server.stop()
class OT2Server:
def __init__(self):
self.__calibrate_time_to_nist()
self.connected = False
self.ip = constants["server"]["ip"]
self.port = constants["server"]["port"]
self.pending_tasks = []
self.completed_tasks = {}
self.POLLINGRATE = 1 # seconds between status checks to OT2
self.loop = asyncio.new_event_loop()
### Time Synchronization with NIST
def __calibrate_time_to_nist(self):
client = ntplib.NTPClient()
response = None
while response is None:
try:
response = client.request("europe.pool.ntp.org", version=3)
except:
pass
t_local = time.time()
self.__local_nist_offset = response.tx_time - t_local
def nist_time(self):
return time.time() + self.__local_nist_offset
### Server Methods
async def __connect_to_websocket(self):
try:
del self.websocket
except:
pass # if this is the first time, we wont have a websocket. thats fine
self.websocket = await websockets.connect(
self.uri, ping_interval=20, ping_timeout=300
)
def start(self, ip=None, port=None):
if ip is not None:
self.ip = ip
if port is not None:
self.port = port
self.uri = f"ws://{self.ip}:{self.port}"
flag = input("confirm that the Listener protocol is running on OT2 (y/n):")
if str.lower(flag) == "y":
def run_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=run_loop, args=(self.loop,))
self.thread.daemon = True
self.thread.start()
asyncio.run_coroutine_threadsafe(self.__connect_to_websocket(), self.loop)
while not hasattr(self, "websocket"):
time.sleep(0.2) # wait to connect
self.connected = True
self._worker = asyncio.run_coroutine_threadsafe(self.worker(), self.loop)
# self._checker = asyncio.run_coroutine_threadsafe(self.checker(), self.loop)
# self.loop.call_soon_threadsafe(self.worker)
# self.loop.call_soon_threadsafe(self.checker)
# self.loop.run_until_complete(self.__connect_to_websocket())
# self.thread.daemon = True
# self.thread.start()
# self.loop.run_forever()
# self._worker = self.loop.create_task(self.worker(), name="maestro_worker")
# self._checker = self.loop.create_task(
# self.checker(), name="maestro_checker"
# )
# def f():
# self.loop = asyncio.new_event_loop()
# self.loop.run_until_complete(self._start_workers())
# # self.loop.run_forever()
# self.thread = threading.Thread(target=f, args=())
# self.thread.run()
# self.loop.run_until_complete(self.__connect_to_websocket())
# self.loop.run_forever()
# self._worker = asyncio.create_task(self.worker(), name="maestro_worker")
# self._checker = asyncio.create_task(self.checker(), name="maestro_checker")
# self.thread = threading.Thread(target=self._start_workers, args=())
# self.thread.start()
# self._start_workers()
else:
print(
"User indicated that Listener protocol is not running - did not attempt to connect to OT2 websocket."
)
def _start_directly(self):
self.uri = f"ws://{self.ip}:{self.port}"
def run_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
self.thread = threading.Thread(target=run_loop, args=(self.loop,))
self.thread.daemon = True
self.thread.start()
asyncio.run_coroutine_threadsafe(self.__connect_to_websocket(), self.loop)
# self.loop.call_soon_threadsafe(self.__connect_to_websocket)
while not hasattr(self, "websocket"):
time.sleep(0.1) # wait to connect
self.connected = True
self._worker = asyncio.run_coroutine_threadsafe(self.worker(), self.loop)
def stop(self):
# self.mark_completed()
self.connected = False
time.sleep(1)
self._worker.cancel()
self.loop.call_soon_threadsafe(self.loop.stop)
# asyncio.gather(self._worker, self._checker)
# self.loop.close()
self.thread.join()
del self.websocket
def _update_completed_tasklist(self, tasklist):
for taskid, nisttime in tasklist.items():
# print(f"{taskid} completed at {nisttime}")
if taskid in self.pending_tasks:
self.pending_tasks.remove(taskid)
self.completed_tasks.update(tasklist)
async def worker(self):
while self.connected:
try:
response = await asyncio.wait_for(self.websocket.recv(), timeout=0.5)
ot2 = json.loads(response)
except asyncio.TimeoutError:
ot2 = {}
except websockets.exceptions.ConnectionClosed: # reconnect
del self.websocket
asyncio.run_coroutine_threadsafe(
self.__connect_to_websocket(), self.loop
)
# self.loop.call_soon_threadsafe(self.__connect_to_websocket)
while not hasattr(self, "websocket"):
time.sleep(0.1) # wait to connect
# print(f"maestro recieved {ot2}")
if "acknowledged" in ot2:
# print(f'{ot2["acknowledged"]} acknowledged by OT2')
self.pending_tasks.append(ot2["acknowledged"])
if "completed" in ot2:
self._update_completed_tasklist(ot2["completed"])
async def __add_task(self, task):
# print(task)
await self.websocket.send(json.dumps(task))
def _add_task(self, task):
asyncio.run_coroutine_threadsafe(self.__add_task(task), loop=self.loop)
# # asyncio.create_task(self.__add_task(task))
# asyncio.run_coroutine_threadsafe(self.__add_task(task), self.loop)
def add_to_queue(self, task, taskid=None, nist_time=None, *args, **kwargs):
if taskid is None:
taskid = str(uuid.uuid4())
if nist_time is None:
nist_time = self.nist_time()
task = {
"task": {
"task": task,
"taskid": taskid,
"nist_time": nist_time,
"args": args,
"kwargs": kwargs,
}
}
self._add_task(task)
return taskid
def status_update(self):
maestro = {"status": 0}
self._add_task(maestro)
def mark_completed(self):
maestro = {"complete": 0}
self._add_task(maestro)
| [
"json.loads",
"asyncio.new_event_loop",
"json.dumps",
"os.path.join",
"yaml.load",
"time.sleep",
"uuid.uuid4",
"os.path.dirname",
"ntplib.NTPClient",
"websockets.connect",
"threading.Thread",
"asyncio.set_event_loop",
"time.time"
] | [((170, 195), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (185, 195), False, 'import os\n'), ((206, 256), 'os.path.join', 'os.path.join', (['MODULE_DIR', '"""hardwareconstants.yaml"""'], {}), "(MODULE_DIR, 'hardwareconstants.yaml')\n", (218, 256), False, 'import os\n'), ((285, 321), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (294, 321), False, 'import yaml\n'), ((6921, 6945), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (6943, 6945), False, 'import asyncio\n'), ((7043, 7061), 'ntplib.NTPClient', 'ntplib.NTPClient', ([], {}), '()\n', (7059, 7061), False, 'import ntplib\n'), ((7270, 7281), 'time.time', 'time.time', ([], {}), '()\n', (7279, 7281), False, 'import time\n'), ((10406, 10458), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_loop', 'args': '(self.loop,)'}), '(target=run_loop, args=(self.loop,))\n', (10422, 10458), False, 'import threading\n'), ((10971, 10984), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10981, 10984), False, 'import time\n'), ((6112, 6140), 'time.sleep', 'time.sleep', (['self.POLLINGRATE'], {}), '(self.POLLINGRATE)\n', (6122, 6140), False, 'import time\n'), ((7385, 7396), 'time.time', 'time.time', ([], {}), '()\n', (7394, 7396), False, 'import time\n'), ((7667, 7731), 'websockets.connect', 'websockets.connect', (['self.uri'], {'ping_interval': '(20)', 'ping_timeout': '(300)'}), '(self.uri, ping_interval=20, ping_timeout=300)\n', (7685, 7731), False, 'import websockets\n'), ((8213, 8237), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (8235, 8237), False, 'import asyncio\n'), ((8264, 8316), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_loop', 'args': '(self.loop,)'}), '(target=run_loop, args=(self.loop,))\n', (8280, 8316), False, 'import threading\n'), ((10323, 10351), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (10345, 10351), False, 'import asyncio\n'), ((10732, 10747), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (10742, 10747), False, 'import time\n'), ((8124, 8152), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (8146, 8152), False, 'import asyncio\n'), ((8540, 8555), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (8550, 8555), False, 'import time\n'), ((11691, 11711), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (11701, 11711), False, 'import json\n'), ((12615, 12631), 'json.dumps', 'json.dumps', (['task'], {}), '(task)\n', (12625, 12631), False, 'import json\n'), ((13010, 13022), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13020, 13022), False, 'import uuid\n'), ((12166, 12181), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (12176, 12181), False, 'import time\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the DLRM benchmark
#
# Utility function(s) to download and pre-process public data sets
# - Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
import os
import sys
from multiprocessing import Manager, Process
import numpy as np
def processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, pre_comp_counts):
# Process Kaggle Display Advertising Challenge or Terabyte Dataset
# by converting unicode strings in X_cat to integers and
# converting negative integer values in X_int.
#
# Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day.
#
# Inputs:
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# i (int): splits in the dataset (typically 0 to 7 or 0 to 24)
# process data if not all files exist
filename_i = npzfile + "_{0}_processed.npz".format(i)
if os.path.exists(filename_i):
print("Using existing " + filename_i, end="\n")
else:
print("Not existing " + filename_i)
with np.load(npzfile + "_{0}.npz".format(i)) as data:
# Approach 2a: using pre-computed dictionaries
X_cat_t = np.zeros(data["X_cat_t"].shape)
for j in range(26):
for k, x in enumerate(data["X_cat_t"][j, :]):
X_cat_t[j, k] = convertDicts[j][x]
# continuous features
X_int = data["X_int"]
X_int[X_int < 0] = 0
# targets
y = data["y"]
np.savez_compressed(
filename_i,
# X_cat = X_cat,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=X_int,
y=y,
)
print("Processed " + filename_i, end="\n")
# sanity check (applicable only if counts have been pre-computed & are re-computed)
# for j in range(26):
# if pre_comp_counts[j] != counts[j]:
# sys.exit("ERROR: Sanity check on counts has failed")
# print("\nSanity check on counts passed")
return
def concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
o_filename
):
# Concatenates different days and saves the result.
#
# Inputs:
# days (int): total number of days in the dataset (typically 7 or 24)
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# o_filename (str): output file name
#
# Output:
# o_file (str): output file path
print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename))
# load and concatenate data
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
if i == 0:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
else:
X_cat = np.concatenate((X_cat, data["X_cat"]))
X_int = np.concatenate((X_int, data["X_int"]))
y = np.concatenate((y, data["y"]))
print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0]))
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
print("Loaded counts!")
np.savez_compressed(
d_path + o_filename + ".npz",
X_cat=X_cat,
X_int=X_int,
y=y,
counts=counts,
)
return d_path + o_filename + ".npz"
def getCriteoAdData(
datafile,
o_filename,
max_ind_range=-1,
sub_sample_rate=0.0,
days=7,
data_split='train',
randomize='total',
dataset_multiprocessing=False,
):
# Passes through entire dataset and defines dictionaries for categorical
# features and determines the number of total categories.
#
# Inputs:
# datafile : path to downloaded raw data file
# o_filename (str): saves results under o_filename if filename is not ""
#
# Output:
# o_file (str): output file path
#split the datafile into path and filename
lstr = datafile.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0]
npzfile = d_path + ((d_file + "_day"))
trafile = d_path + ((d_file + "_fea"))
# count number of datapoints in training set
total_file = d_path + d_file + "_day_count.npz"
if os.path.exists(total_file):
with np.load(total_file) as data:
total_per_file = list(data["total_per_file"])
total_count = np.sum(total_per_file)
print("Skipping counts per file (already exist)")
else:
total_count = 0
total_per_file = []
# WARNING: The raw data consists of a single train.txt file
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
if os.path.exists(datafile):
print("Reading data from path=%s" % (datafile))
with open(str(datafile)) as f:
for _ in f:
total_count += 1
total_per_file.append(total_count)
# reset total per file due to split
num_data_per_split, extras = divmod(total_count, days)
total_per_file = [num_data_per_split] * days
for j in range(extras):
total_per_file[j] += 1
# split into days (simplifies code later on)
file_id = 0
boundary = total_per_file[file_id]
nf = open(npzfile + "_" + str(file_id), "w")
with open(str(datafile)) as f:
for j, line in enumerate(f):
if j == boundary:
nf.close()
file_id += 1
nf = open(npzfile + "_" + str(file_id), "w")
boundary += total_per_file[file_id]
nf.write(line)
nf.close()
else:
sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset")
# process a file worth of data and reinitialize data
# note that a file main contain a single or multiple splits
def process_one_file(
datfile,
npzfile,
split,
num_data_in_split,
dataset_multiprocessing,
convertDictsDay=None,
resultDay=None
):
if dataset_multiprocessing:
convertDicts_day = [{} for _ in range(26)]
with open(str(datfile)) as f:
y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int
X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int
X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int
if sub_sample_rate == 0.0:
rand_u = 1.0
else:
rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split)
i = 0
percent = 0
for k, line in enumerate(f):
# process a line (data point)
line = line.split('\t')
# set missing values to zero
for j in range(len(line)):
if (line[j] == '') or (line[j] == '\n'):
line[j] = '0'
# sub-sample data by dropping zero targets, if needed
target = np.int32(line[0])
if target == 0 and \
(rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate:
continue
y[i] = target
X_int[i] = np.array(line[1:14], dtype=np.int32)
if max_ind_range > 0:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16) % max_ind_range, line[14:])),
dtype=np.int32
)
else:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16), line[14:])),
dtype=np.int32
)
# count uniques
if dataset_multiprocessing:
for j in range(26):
convertDicts_day[j][X_cat[i][j]] = 1
# debug prints
if float(i)/num_data_in_split*100 > percent+1:
percent = int(float(i)/num_data_in_split*100)
print(
"Load %d/%d (%d%%) Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
percent,
split,
target,
y[i],
),
end="\n",
)
else:
for j in range(26):
convertDicts[j][X_cat[i][j]] = 1
# debug prints
print(
"Load %d/%d Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
split,
target,
y[i],
),
end="\r",
)
i += 1
# store num_data_in_split samples or extras at the end of file
# count uniques
# X_cat_t = np.transpose(X_cat)
# for j in range(26):
# for x in X_cat_t[j,:]:
# convertDicts[j][x] = 1
# store parsed
filename_s = npzfile + "_{0}.npz".format(split)
if os.path.exists(filename_s):
print("\nSkip existing " + filename_s)
else:
np.savez_compressed(
filename_s,
X_int=X_int[0:i, :],
# X_cat=X_cat[0:i, :],
X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data
y=y[0:i],
)
print("\nSaved " + npzfile + "_{0}.npz!".format(split))
if dataset_multiprocessing:
resultDay[split] = i
convertDictsDay[split] = convertDicts_day
return
else:
return i
# create all splits (reuse existing files if possible)
recreate_flag = False
convertDicts = [{} for _ in range(26)]
# WARNING: to get reproducable sub-sampling results you must reset the seed below
# np.random.seed(123)
# in this case there is a single split in each day
for i in range(days):
npzfile_i = npzfile + "_{0}.npz".format(i)
npzfile_p = npzfile + "_{0}_processed.npz".format(i)
if os.path.exists(npzfile_i):
print("Skip existing " + npzfile_i)
elif os.path.exists(npzfile_p):
print("Skip existing " + npzfile_p)
else:
recreate_flag = True
if recreate_flag:
if dataset_multiprocessing:
resultDay = Manager().dict()
convertDictsDay = Manager().dict()
processes = [Process(target=process_one_file,
name="process_one_file:%i" % i,
args=(npzfile + "_{0}".format(i),
npzfile,
i,
total_per_file[i],
dataset_multiprocessing,
convertDictsDay,
resultDay,
)
) for i in range(0, days)]
for process in processes:
process.start()
for process in processes:
process.join()
for day in range(days):
total_per_file[day] = resultDay[day]
print("Constructing convertDicts Split: {}".format(day))
convertDicts_tmp = convertDictsDay[day]
for i in range(26):
for j in convertDicts_tmp[i]:
convertDicts[i][j] = 1
else:
for i in range(days):
total_per_file[i] = process_one_file(
npzfile + "_{0}".format(i),
npzfile,
i,
total_per_file[i],
dataset_multiprocessing,
)
# report and save total into a file
total_count = np.sum(total_per_file)
if not os.path.exists(total_file):
np.savez_compressed(total_file, total_per_file=total_per_file)
print("Total number of samples:", total_count)
print("Divided into days/splits:\n", total_per_file)
# dictionary files
counts = np.zeros(26, dtype=np.int32)
if recreate_flag:
# create dictionaries
for j in range(26):
for i, x in enumerate(convertDicts[j]):
convertDicts[j][x] = i
dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j)
if not os.path.exists(dict_file_j):
np.savez_compressed(
dict_file_j,
unique=np.array(list(convertDicts[j]), dtype=np.int32)
)
counts[j] = len(convertDicts[j])
# store (uniques and) counts
count_file = d_path + d_file + "_fea_count.npz"
if not os.path.exists(count_file):
np.savez_compressed(count_file, counts=counts)
else:
# create dictionaries (from existing files)
for j in range(26):
with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data:
unique = data["unique"]
for i, x in enumerate(unique):
convertDicts[j][x] = i
# load (uniques and) counts
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
# process all splits
if dataset_multiprocessing:
processes = [Process(target=processCriteoAdData,
name="processCriteoAdData:%i" % i,
args=(d_path,
d_file,
npzfile,
i,
convertDicts,
counts,
)
) for i in range(0, days)]
for process in processes:
process.start()
for process in processes:
process.join()
else:
for i in range(days):
processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, counts)
o_file = concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
o_filename
)
return o_file
| [
"os.path.exists",
"numpy.transpose",
"multiprocessing.Process",
"numpy.int32",
"numpy.sum",
"numpy.zeros",
"numpy.random.uniform",
"numpy.array",
"numpy.concatenate",
"sys.exit",
"multiprocessing.Manager",
"numpy.savez_compressed",
"numpy.load"
] | [((1153, 1179), 'os.path.exists', 'os.path.exists', (['filename_i'], {}), '(filename_i)\n', (1167, 1179), False, 'import os\n'), ((3617, 3716), 'numpy.savez_compressed', 'np.savez_compressed', (["(d_path + o_filename + '.npz')"], {'X_cat': 'X_cat', 'X_int': 'X_int', 'y': 'y', 'counts': 'counts'}), "(d_path + o_filename + '.npz', X_cat=X_cat, X_int=X_int,\n y=y, counts=counts)\n", (3636, 3716), True, 'import numpy as np\n'), ((4732, 4758), 'os.path.exists', 'os.path.exists', (['total_file'], {}), '(total_file)\n', (4746, 4758), False, 'import os\n'), ((13260, 13282), 'numpy.sum', 'np.sum', (['total_per_file'], {}), '(total_per_file)\n', (13266, 13282), True, 'import numpy as np\n'), ((13538, 13566), 'numpy.zeros', 'np.zeros', (['(26)'], {'dtype': 'np.int32'}), '(26, dtype=np.int32)\n', (13546, 13566), True, 'import numpy as np\n'), ((3499, 3542), 'numpy.load', 'np.load', (["(d_path + d_file + '_fea_count.npz')"], {}), "(d_path + d_file + '_fea_count.npz')\n", (3506, 3542), True, 'import numpy as np\n'), ((4882, 4904), 'numpy.sum', 'np.sum', (['total_per_file'], {}), '(total_per_file)\n', (4888, 4904), True, 'import numpy as np\n'), ((5306, 5330), 'os.path.exists', 'os.path.exists', (['datafile'], {}), '(datafile)\n', (5320, 5330), False, 'import os\n'), ((11441, 11466), 'os.path.exists', 'os.path.exists', (['npzfile_i'], {}), '(npzfile_i)\n', (11455, 11466), False, 'import os\n'), ((13294, 13320), 'os.path.exists', 'os.path.exists', (['total_file'], {}), '(total_file)\n', (13308, 13320), False, 'import os\n'), ((13330, 13392), 'numpy.savez_compressed', 'np.savez_compressed', (['total_file'], {'total_per_file': 'total_per_file'}), '(total_file, total_per_file=total_per_file)\n', (13349, 13392), True, 'import numpy as np\n'), ((1435, 1466), 'numpy.zeros', 'np.zeros', (["data['X_cat_t'].shape"], {}), "(data['X_cat_t'].shape)\n", (1443, 1466), True, 'import numpy as np\n'), ((3052, 3071), 'numpy.load', 'np.load', (['filename_i'], {}), '(filename_i)\n', (3059, 3071), True, 'import numpy as np\n'), ((4773, 4792), 'numpy.load', 'np.load', (['total_file'], {}), '(total_file)\n', (4780, 4792), True, 'import numpy as np\n'), ((6390, 6576), 'sys.exit', 'sys.exit', (['"""ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset"""'], {}), "(\n 'ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset'\n )\n", (6398, 6576), False, 'import sys\n'), ((7058, 7097), 'numpy.zeros', 'np.zeros', (['num_data_in_split'], {'dtype': '"""i4"""'}), "(num_data_in_split, dtype='i4')\n", (7066, 7097), True, 'import numpy as np\n'), ((7132, 7177), 'numpy.zeros', 'np.zeros', (['(num_data_in_split, 13)'], {'dtype': '"""i4"""'}), "((num_data_in_split, 13), dtype='i4')\n", (7140, 7177), True, 'import numpy as np\n'), ((7212, 7257), 'numpy.zeros', 'np.zeros', (['(num_data_in_split, 26)'], {'dtype': '"""i4"""'}), "((num_data_in_split, 26), dtype='i4')\n", (7220, 7257), True, 'import numpy as np\n'), ((10362, 10388), 'os.path.exists', 'os.path.exists', (['filename_s'], {}), '(filename_s)\n', (10376, 10388), False, 'import os\n'), ((11529, 11554), 'os.path.exists', 'os.path.exists', (['npzfile_p'], {}), '(npzfile_p)\n', (11543, 11554), False, 'import os\n'), ((14176, 14202), 'os.path.exists', 'os.path.exists', (['count_file'], {}), '(count_file)\n', (14190, 14202), False, 'import os\n'), ((14216, 14262), 'numpy.savez_compressed', 'np.savez_compressed', (['count_file'], {'counts': 'counts'}), '(count_file, counts=counts)\n', (14235, 14262), True, 'import numpy as np\n'), ((14607, 14650), 'numpy.load', 'np.load', (["(d_path + d_file + '_fea_count.npz')"], {}), "(d_path + d_file + '_fea_count.npz')\n", (14614, 14650), True, 'import numpy as np\n'), ((14775, 14907), 'multiprocessing.Process', 'Process', ([], {'target': 'processCriteoAdData', 'name': "('processCriteoAdData:%i' % i)", 'args': '(d_path, d_file, npzfile, i, convertDicts, counts)'}), "(target=processCriteoAdData, name='processCriteoAdData:%i' % i, args\n =(d_path, d_file, npzfile, i, convertDicts, counts))\n", (14782, 14907), False, 'from multiprocessing import Manager, Process\n'), ((1866, 1887), 'numpy.transpose', 'np.transpose', (['X_cat_t'], {}), '(X_cat_t)\n', (1878, 1887), True, 'import numpy as np\n'), ((3252, 3290), 'numpy.concatenate', 'np.concatenate', (["(X_cat, data['X_cat'])"], {}), "((X_cat, data['X_cat']))\n", (3266, 3290), True, 'import numpy as np\n'), ((3315, 3353), 'numpy.concatenate', 'np.concatenate', (["(X_int, data['X_int'])"], {}), "((X_int, data['X_int']))\n", (3329, 3353), True, 'import numpy as np\n'), ((3374, 3404), 'numpy.concatenate', 'np.concatenate', (["(y, data['y'])"], {}), "((y, data['y']))\n", (3388, 3404), True, 'import numpy as np\n'), ((7383, 7443), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': 'num_data_in_split'}), '(low=0.0, high=1.0, size=num_data_in_split)\n', (7400, 7443), True, 'import numpy as np\n'), ((7896, 7913), 'numpy.int32', 'np.int32', (['line[0]'], {}), '(line[0])\n', (7904, 7913), True, 'import numpy as np\n'), ((8126, 8162), 'numpy.array', 'np.array', (['line[1:14]'], {'dtype': 'np.int32'}), '(line[1:14], dtype=np.int32)\n', (8134, 8162), True, 'import numpy as np\n'), ((13831, 13858), 'os.path.exists', 'os.path.exists', (['dict_file_j'], {}), '(dict_file_j)\n', (13845, 13858), False, 'import os\n'), ((11734, 11743), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11741, 11743), False, 'from multiprocessing import Manager, Process\n'), ((11781, 11790), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11788, 11790), False, 'from multiprocessing import Manager, Process\n'), ((10644, 10671), 'numpy.transpose', 'np.transpose', (['X_cat[0:i, :]'], {}), '(X_cat[0:i, :])\n', (10656, 10671), True, 'import numpy as np\n')] |
from functools import lru_cache
from pydantic import BaseSettings, Field
class Settings(BaseSettings):
LEVEL: str
PROJECT_TITLE: str = 'FastAPI with GraphQL and REST'
GRAPHQL_API: str = '/graphql'
REST_API: str = '/rest'
COMMON_API: str = '/api'
class Config:
env_file = ".env"
class DevelopSettings(Settings):
DB_URL: str = Field(env="DEVELOP_DB_URL")
class ProductSettings(Settings):
DB_URL: str = Field("PRODUCT_DB_URL")
@lru_cache
def get_settings():
return DevelopSettings() if Settings().LEVEL == 'DEVELOP' \
else ProductSettings() | [
"pydantic.Field"
] | [((371, 398), 'pydantic.Field', 'Field', ([], {'env': '"""DEVELOP_DB_URL"""'}), "(env='DEVELOP_DB_URL')\n", (376, 398), False, 'from pydantic import BaseSettings, Field\n'), ((457, 480), 'pydantic.Field', 'Field', (['"""PRODUCT_DB_URL"""'], {}), "('PRODUCT_DB_URL')\n", (462, 480), False, 'from pydantic import BaseSettings, Field\n')] |
"""
family_names wordlists
"""
import os
import csv
import json
from .. import word_list_counter
from .. import parse_assoc
from .. import dictionaries
NAME = 'family_name'
raw_nl_family_names_1_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'raw',
'DutchNameGenerator',
'MarkovDutchNameGenerator',
'LastNames.txt'
)
raw_nl_family_names_2_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'raw',
'family-names-in-the-netherlands',
'family_names.lst' # maybe filter out first names?
)
raw_us_family_names_1_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'raw',
'name-dataset',
'names_dataset',
'last_names.all.txt'
)
raw_association_multipliers_1_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'raw',
'association_multipliers',
'family_name.txt'
)
def get_wordlists(line_printer_cb):
word_list_nl = word_list_counter.WordListCounter()
word_list_int = word_list_counter.WordListCounter()
for path in [raw_nl_family_names_1_path, raw_nl_family_names_2_path]:
with open(path) as f:
data = f.readlines()
for row in data:
row = row.lower().strip()
if row not in dictionaries.popular_words.top_40:
word_list_nl.check_and_add(row)
line_printer_cb('main: {}'.format(word_list_nl.count))
# new line
line_printer_cb(None)
with open(raw_us_family_names_1_path) as f:
data = f.readlines()
for row in data:
row = row.lower().strip()
if row not in dictionaries.popular_words.top_40:
word_list_int.check_and_add(row)
line_printer_cb('international: {}'.format(word_list_int.count))
# new line
line_printer_cb(None)
return {
**{'main': word_list_nl.keys},
**{'int': word_list_int.keys},
**parse_assoc.read_assoc_data([raw_association_multipliers_1_path], line_printer_cb)
}
| [
"os.path.realpath"
] | [((241, 267), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (257, 267), False, 'import os\n'), ((424, 450), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (440, 450), False, 'import os\n'), ((623, 649), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (639, 649), False, 'import os\n'), ((803, 829), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (819, 829), False, 'import os\n')] |
#!/usr/bin/env python
#
# Author: <NAME> (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pathos/blob/master/LICENSE
"""
This module contains map and pipe interfaces to python's multiprocessing module.
Pipe methods provided:
pipe - blocking communication pipe [returns: value]
apipe - asynchronous communication pipe [returns: object]
Map methods provided:
map - blocking and ordered worker pool [returns: list]
imap - non-blocking and ordered worker pool [returns: iterator]
uimap - non-blocking and unordered worker pool [returns: iterator]
amap - asynchronous worker pool [returns: object]
Usage
=====
A typical call to a pathos multiprocessing map will roughly follow this example:
>>> # instantiate and configure the worker pool
>>> from pathos.multiprocessing import ProcessPool
>>> pool = ProcessPool(nodes=4)
>>>
>>> # do a blocking map on the chosen function
>>> print(pool.map(pow, [1,2,3,4], [5,6,7,8]))
>>>
>>> # do a non-blocking map, then extract the results from the iterator
>>> results = pool.imap(pow, [1,2,3,4], [5,6,7,8])
>>> print("...")
>>> print(list(results))
>>>
>>> # do an asynchronous map, then get the results
>>> results = pool.amap(pow, [1,2,3,4], [5,6,7,8])
>>> while not results.ready():
... time.sleep(5); print(".", end=' ')
...
>>> print(results.get())
>>>
>>> # do one item at a time, using a pipe
>>> print(pool.pipe(pow, 1, 5))
>>> print(pool.pipe(pow, 2, 6))
>>>
>>> # do one item at a time, using an asynchronous pipe
>>> result1 = pool.apipe(pow, 1, 5)
>>> result2 = pool.apipe(pow, 2, 6)
>>> print(result1.get())
>>> print(result2.get())
Notes
=====
This worker pool leverages the python's multiprocessing module, and thus
has many of the limitations associated with that module. The function f and
the sequences in args must be serializable. The maps in this worker pool
have full functionality whether run from a script or in the python
interpreter, and work reliably for both imported and interactively-defined
functions. Unlike python's multiprocessing module, pathos.multiprocessing maps
can directly utilize functions that require multiple arguments.
"""
__all__ = ['ProcessPool','_ProcessPool']
#FIXME: probably not good enough... should store each instance with a uid
__STATE = _ProcessPool__STATE = {}
from pathos.abstract_launcher import AbstractWorkerPool
from pathos.helpers.mp_helper import starargs as star
from pathos.helpers import cpu_count, freeze_support, ProcessPool as Pool
try:
from itertools import izip as zip
except ImportError:
pass
# 'forward' compatibility
_ProcessPool = Pool
class ProcessPool(AbstractWorkerPool):
"""
Mapper that leverages python's multiprocessing.
"""
def __init__(self, *args, **kwds):
"""\nNOTE: if number of nodes is not given, will autodetect processors
"""
hasnodes = 'nodes' in kwds; arglen = len(args)
if 'ncpus' in kwds and (hasnodes or arglen):
msg = "got multiple values for keyword argument 'ncpus'"
raise TypeError(msg)
elif hasnodes: #XXX: multiple try/except is faster?
if arglen:
msg = "got multiple values for keyword argument 'nodes'"
raise TypeError(msg)
kwds['ncpus'] = kwds.pop('nodes')
elif arglen:
kwds['ncpus'] = args[0]
self.__nodes = kwds.get('ncpus', cpu_count())
# Create an identifier for the pool
self._id = 'pool'
# Create a new server if one isn't already initialized
self._serve()
return
if AbstractWorkerPool.__init__.__doc__: __init__.__doc__ = AbstractWorkerPool.__init__.__doc__ + __init__.__doc__
#def __exit__(self, *args):
# self._clear()
# return
def _serve(self, nodes=None): #XXX: should be STATE method; use id
"""Create a new server if one isn't already initialized"""
if nodes is None: nodes = self.__nodes
_pool = __STATE.get(self._id, None)
if not _pool or nodes != _pool.__nodes:
self._clear()
_pool = Pool(nodes)
_pool.__nodes = nodes
__STATE[self._id] = _pool
return _pool
def _clear(self): #XXX: should be STATE method; use id
"""Remove server with matching state"""
_pool = __STATE.get(self._id, None)
if _pool and self.__nodes == _pool.__nodes:
_pool.close()
_pool.join()
__STATE.pop(self._id, None)
return #XXX: return _pool?
clear = _clear
def map(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
_pool = self._serve()
return _pool.map(star(f), zip(*args)) # chunksize
map.__doc__ = AbstractWorkerPool.map.__doc__
def imap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
_pool = self._serve()
return _pool.imap(star(f), zip(*args)) # chunksize
imap.__doc__ = AbstractWorkerPool.imap.__doc__
def uimap(self, f, *args, **kwds):
AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
_pool = self._serve()
return _pool.imap_unordered(star(f), zip(*args)) # chunksize
uimap.__doc__ = AbstractWorkerPool.uimap.__doc__
def amap(self, f, *args, **kwds): # register a callback ?
AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds)
_pool = self._serve()
return _pool.map_async(star(f), zip(*args)) # chunksize
amap.__doc__ = AbstractWorkerPool.amap.__doc__
########################################################################
# PIPES
def pipe(self, f, *args, **kwds):
#AbstractWorkerPool._AbstractWorkerPool__pipe(self, f, *args, **kwds)
_pool = self._serve()
return _pool.apply(f, args, kwds)
pipe.__doc__ = AbstractWorkerPool.pipe.__doc__
def apipe(self, f, *args, **kwds): # register a callback ?
#AbstractWorkerPool._AbstractWorkerPool__apipe(self, f, *args, **kwds)
_pool = self._serve()
return _pool.apply_async(f, args, kwds)
apipe.__doc__ = AbstractWorkerPool.apipe.__doc__
########################################################################
def __repr__(self):
mapargs = (self.__class__.__name__, self.ncpus)
return "<pool %s(ncpus=%s)>" % mapargs
def __get_nodes(self):
"""get the number of nodes used in the map"""
return self.__nodes
def __set_nodes(self, nodes):
"""set the number of nodes used in the map"""
self._serve(nodes)
self.__nodes = nodes
return
########################################################################
def restart(self, force=False):
"restart a closed pool"
_pool = __STATE.get(self._id, None)
if _pool and self.__nodes == _pool.__nodes:
RUN = 0
if not force:
assert _pool._state != RUN
# essentially, 'clear' and 'serve'
self._clear()
_pool = Pool(self.__nodes)
_pool.__nodes = self.__nodes
__STATE[self._id] = _pool
return _pool
def close(self):
"close the pool to any new jobs"
_pool = __STATE.get(self._id, None)
if _pool and self.__nodes == _pool.__nodes:
_pool.close()
return
def terminate(self):
"a more abrupt close"
_pool = __STATE.get(self._id, None)
if _pool and self.__nodes == _pool.__nodes:
_pool.terminate()
return
def join(self):
"cleanup the closed worker processes"
_pool = __STATE.get(self._id, None)
if _pool and self.__nodes == _pool.__nodes:
_pool.join()
return
# interface
ncpus = property(__get_nodes, __set_nodes)
nodes = property(__get_nodes, __set_nodes)
__state__ = __STATE
pass
# backward compatibility
from pathos.helpers import ThreadPool
from pathos.threading import ThreadPool as ThreadingPool
ProcessingPool = ProcessPool
# EOF
| [
"pathos.helpers.mp_helper.starargs",
"pathos.abstract_launcher.AbstractWorkerPool._AbstractWorkerPool__map",
"pathos.abstract_launcher.AbstractWorkerPool._AbstractWorkerPool__imap",
"itertools.izip",
"pathos.helpers.cpu_count",
"pathos.helpers.ProcessPool"
] | [((4991, 5058), 'pathos.abstract_launcher.AbstractWorkerPool._AbstractWorkerPool__map', 'AbstractWorkerPool._AbstractWorkerPool__map', (['self', 'f', '*args'], {}), '(self, f, *args, **kwds)\n', (5034, 5058), False, 'from pathos.abstract_launcher import AbstractWorkerPool\n'), ((5242, 5310), 'pathos.abstract_launcher.AbstractWorkerPool._AbstractWorkerPool__imap', 'AbstractWorkerPool._AbstractWorkerPool__imap', (['self', 'f', '*args'], {}), '(self, f, *args, **kwds)\n', (5286, 5310), False, 'from pathos.abstract_launcher import AbstractWorkerPool\n'), ((5498, 5566), 'pathos.abstract_launcher.AbstractWorkerPool._AbstractWorkerPool__imap', 'AbstractWorkerPool._AbstractWorkerPool__imap', (['self', 'f', '*args'], {}), '(self, f, *args, **kwds)\n', (5542, 5566), False, 'from pathos.abstract_launcher import AbstractWorkerPool\n'), ((5789, 5856), 'pathos.abstract_launcher.AbstractWorkerPool._AbstractWorkerPool__map', 'AbstractWorkerPool._AbstractWorkerPool__map', (['self', 'f', '*args'], {}), '(self, f, *args, **kwds)\n', (5832, 5856), False, 'from pathos.abstract_launcher import AbstractWorkerPool\n'), ((3799, 3810), 'pathos.helpers.cpu_count', 'cpu_count', ([], {}), '()\n', (3808, 3810), False, 'from pathos.helpers import cpu_count, freeze_support, ProcessPool as Pool\n'), ((4493, 4504), 'pathos.helpers.ProcessPool', 'Pool', (['nodes'], {}), '(nodes)\n', (4497, 4504), True, 'from pathos.helpers import cpu_count, freeze_support, ProcessPool as Pool\n'), ((5114, 5121), 'pathos.helpers.mp_helper.starargs', 'star', (['f'], {}), '(f)\n', (5118, 5121), True, 'from pathos.helpers.mp_helper import starargs as star\n'), ((5123, 5133), 'itertools.izip', 'zip', (['*args'], {}), '(*args)\n', (5126, 5133), True, 'from itertools import izip as zip\n'), ((5367, 5374), 'pathos.helpers.mp_helper.starargs', 'star', (['f'], {}), '(f)\n', (5371, 5374), True, 'from pathos.helpers.mp_helper import starargs as star\n'), ((5376, 5386), 'itertools.izip', 'zip', (['*args'], {}), '(*args)\n', (5379, 5386), True, 'from itertools import izip as zip\n'), ((5633, 5640), 'pathos.helpers.mp_helper.starargs', 'star', (['f'], {}), '(f)\n', (5637, 5640), True, 'from pathos.helpers.mp_helper import starargs as star\n'), ((5642, 5652), 'itertools.izip', 'zip', (['*args'], {}), '(*args)\n', (5645, 5652), True, 'from itertools import izip as zip\n'), ((5918, 5925), 'pathos.helpers.mp_helper.starargs', 'star', (['f'], {}), '(f)\n', (5922, 5925), True, 'from pathos.helpers.mp_helper import starargs as star\n'), ((5927, 5937), 'itertools.izip', 'zip', (['*args'], {}), '(*args)\n', (5930, 5937), True, 'from itertools import izip as zip\n'), ((7496, 7514), 'pathos.helpers.ProcessPool', 'Pool', (['self.__nodes'], {}), '(self.__nodes)\n', (7500, 7514), True, 'from pathos.helpers import cpu_count, freeze_support, ProcessPool as Pool\n')] |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
import radical.utils as ru
from .base import LaunchMethod
# ------------------------------------------------------------------------------
#
class DPlace(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
LaunchMethod.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def _configure(self):
# dplace: job launcher for SGI systems (e.g. on Blacklight)
self.launch_command = ru.which('dplace')
# --------------------------------------------------------------------------
#
def construct_command(self, t, launch_script_hop):
slots = t['slots']
td = t['description']
task_exec = td['executable']
task_cores = td['cpu_processes'] # FIXME: also use cpu_threads
task_args = td.get('arguments') or []
task_argstr = self._create_arg_string(task_args)
if 'task_offsets' not in slots :
raise RuntimeError('insufficient information to launch via %s: %s'
% (self.name, slots))
# FIXME: This is broken due to changes lot structure
task_offsets = slots['task_offsets']
assert(len(task_offsets) == 1)
dplace_offset = task_offsets[0]
task_command = "%s %s" % (task_exec, task_argstr)
dplace_command = "%s -c %d-%d %s" % (self.launch_command, dplace_offset,
dplace_offset + task_cores - 1,
task_command)
return dplace_command, None
# ------------------------------------------------------------------------------
| [
"radical.utils.which"
] | [((649, 667), 'radical.utils.which', 'ru.which', (['"""dplace"""'], {}), "('dplace')\n", (657, 667), True, 'import radical.utils as ru\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 10:06:05 2021
@author: ngbla
"""
import os
import cv2
import numpy as np
# Command: pip install pillow
from PIL import Image
#Initialize names and path to empty list
names = []
path = []
# Get the names of all the users
for users in os.listdir("img_training"):
names.append(users)
# Get the path to all the images
for name in names:
for image in os.listdir("img_training/{}".format(name)):
path_string = os.path.join("img_training/{}".format(name), image)
path.append(path_string)
faces = []
ids = []
# For each image create a numpy array and add it to faces list
for img_path in path:
image = Image.open(img_path).convert("L")
imgNp = np.array(image, "uint8")
id = int(img_path.split("/")[2].split("_")[0])
faces.append(imgNp)
ids.append(id)
# Convert the ids to numpy array and add it to ids list
ids = np.array(ids)
print("[INFO] Created faces and names Numpy Arrays")
print("[INFO] Initializing the Classifier")
# Make sure contrib is installed
# The command is pip install opencv-contrib-python
# Call the recognizer
trainer = cv2.face.LBPHFaceRecognizer_create()
#or use EigenFaceRecognizer by replacing above line with
#trainer = cv2.face.EigenFaceRecognizer_create()
#or use FisherFaceRecognizer by replacing above line with
#trainer = cv2.face.FisherFaceRecognizer_create() names Numpy Arrays")
# Give the faces and ids numpy arrays
trainer.train(faces, ids)
# Write the generated model to a yml file
trainer.write("training.yml")
#trainer.write("trainingEigen.yml")
print("[INFO] Training Done")
| [
"numpy.array",
"os.listdir",
"cv2.face.LBPHFaceRecognizer_create",
"PIL.Image.open"
] | [((313, 339), 'os.listdir', 'os.listdir', (['"""img_training"""'], {}), "('img_training')\n", (323, 339), False, 'import os\n'), ((940, 953), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (948, 953), True, 'import numpy as np\n'), ((1170, 1206), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (1204, 1206), False, 'import cv2\n'), ((756, 780), 'numpy.array', 'np.array', (['image', '"""uint8"""'], {}), "(image, 'uint8')\n", (764, 780), True, 'import numpy as np\n'), ((709, 729), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (719, 729), False, 'from PIL import Image\n')] |
#%%
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
import matplotlib as mpl
import random
import time
import copy
from matplotlib import animation, rc
from IPython.display import HTML
def _update_plot (i,fig,scat,qax) :
scat.set_offsets(P[i])
qax.set_offsets(P[i])
VVV=[list(x) for x in zip(*nV[i])]
qax.set_UVC(VVV[0],VVV[1])
# qax.quiver(PPP[0],PPP[1],VVV[0],VVV[1],angles='xy')
# print ('Frames:%d' %i)
return scat,qax
def psi(s,b):
a = pow((1+s**2),-b)
return a
def csm(X,V,k):
a=[0,0]
for i in range(N):
s=pow((X[k][0]-X[i][0])**2+(X[k][1]-X[i][1])**2,0.5)
ps = psi(s,beta)
a[0]+= ps * (V[i][0]-V[k][0])
a[1]+= ps * (V[i][1]-V[k][1])
a[0]=a[0]/N
a[1]=a[1]/N
return a
# def ode_rk(X,):
if __name__ == '__main__':
N=int(input("N=?"))
beta=float(input("beta=?"))
T=int(input("T=?"))
Pinit=[]
Vinit=[]
for n in range(N):
x = random.uniform(-100,100)
y = random.uniform(-100,100)
Pinit.append([x,y])
x = random.uniform(5,70)
xd=random.randint(0,1)
y = random.uniform(5,70)
yd=random.randint(0,1)
Vinit.append([x*(2*xd-1),y*(2*yd-1)])
P=[Pinit]
V=[Vinit]
nV=[[[0,0] for row in range(N)]]
print(P[0])
print(V[0])
for n in range(N):
s=pow(V[0][n][0]**2+V[0][n][1]**2,0.5)
if (s==0) :
s=1
nV[0][n][0]=V[0][n][0]/s
nV[0][n][1]=V[0][n][1]/s
# P 위치 V 초기가ㅄ 설정, nV:V를 normalize -> 이거 np 사용하면 더 간단해질 수도 있을 듯
h=0.025
for t in range(1,T):
# print ("start %dth loop" %t)
# print (P[0])
Pnow=copy.deepcopy(P[t-1])
Vnow=copy.deepcopy(V[t-1])
nVnow=[[0,0] for row in range(N)]
K1=[]
K2=[]
K3=[]
K4=[]
# K1-K4가 runge kutta 에서 그 h*k1-h*k4를 가ㄱ각 k별로 구해서 list로 만든 것.
Phk1=copy.deepcopy(Pnow)
Vhk1=copy.deepcopy(Vnow)
for n in range(N):
k1=csm(Pnow,Vnow,n)
k1[0]*=h
k1[1]*=h
Phk1[n][0]+=Vnow[n][0]*h/2
Phk1[n][1]+=Vnow[n][1]*h/2
Vhk1[n][0]+=k1[0]/2
Vhk1[n][1]+=k1[1]/2
K1.append([Vnow[n],k1])
#Vhk1 = y+h*k1/2
Phk2=copy.deepcopy(Pnow)
Vhk2=copy.deepcopy(Vnow)
for n in range(N):
k2=csm(Phk1,Vhk1,n)
k2[0]*=h
k2[1]*=h
Phk2[n][0]+=Vhk1[n][0]*h/2
Phk2[n][1]+=Vhk1[n][1]*h/2
Vhk2[n][0]+=k2[0]/2
Vhk2[n][1]+=k2[1]/2
K2.append([Vhk1[n],k2])
#Vhk2 = y+h*k2/2
Phk3=copy.deepcopy(Pnow)
Vhk3=copy.deepcopy(Vnow)
for n in range(N):
k3=csm(Phk2,Vhk2,n)
k3[0]*=h
k3[1]*=h
Phk3[n][0]+=Vhk2[n][0]*h
Phk3[n][1]+=Vhk2[n][1]*h
Vhk3[n][0]+=k3[0]
Vhk3[n][1]+=k3[1]
K3.append([Vhk2[n],k3])
#Vhk3 = y+h*k3
for n in range(N):
k4=csm(Phk3,Vhk3,n)
k4[0]*=h
k4[1]*=h
K4.append([Vhk3[n],k4])
for n in range(N):
Pnow[n][0]+=(K1[n][0][0]+2*K2[n][0][0]+2*K3[n][0][0]+K4[n][0][0])*h/6
Pnow[n][1]+=(K1[n][0][1]+2*K2[n][0][1]+2*K3[n][0][1]+K4[n][0][1])*h/6
Vnow[n][0]+=(K1[n][1][0]+2*K2[n][1][0]+2*K3[n][1][0]+K4[n][1][0])/6
Vnow[n][1]+=(K1[n][1][1]+2*K2[n][1][1]+2*K3[n][1][1]+K4[n][1][1])/6
s=pow(Vnow[n][0]**2+Vnow[n][1]**2,0.5)
if (s==0):
s=1
nVnow[n][0]=Vnow[n][0]/s
nVnow[n][1]=Vnow[n][1]/s
P.append(Pnow)
V.append(Vnow)
nV.append(nVnow)
# print(P[0])
# print ("end")
print (Pnow)
print (Vnow)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1000,1000])
ax.set_ylim([-1000,1000])
PP=[list(x) for x in zip(*P[0])]
VV=[list(x) for x in zip(*nV[0])]
scat=plt.scatter(PP[0],PP[1],s=20)
scat.set_alpha(0.2)
qax=ax.quiver(PP[0],PP[1],VV[0],VV[1],angles='xy',width=0.001,scale=70)
ani = animation.FuncAnimation(fig,_update_plot,fargs=(fig,scat,qax),frames=T-1,interval=10,save_count=T-1)
#interval이 너무 작으니깐 save가 안됨-파일을 열때 에러남.
plt.show()
ani.save('csm-ode45-simu.mp4')
print("DONE")
# %%
| [
"random.uniform",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"copy.deepcopy",
"random.randint",
"matplotlib.pyplot.show"
] | [((3950, 3962), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3960, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4139, 4170), 'matplotlib.pyplot.scatter', 'plt.scatter', (['PP[0]', 'PP[1]'], {'s': '(20)'}), '(PP[0], PP[1], s=20)\n', (4150, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4281, 4396), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', '_update_plot'], {'fargs': '(fig, scat, qax)', 'frames': '(T - 1)', 'interval': '(10)', 'save_count': '(T - 1)'}), '(fig, _update_plot, fargs=(fig, scat, qax), frames=T -\n 1, interval=10, save_count=T - 1)\n', (4304, 4396), False, 'from matplotlib import animation, rc\n'), ((4436, 4446), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4444, 4446), True, 'import matplotlib.pyplot as plt\n'), ((1010, 1035), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1024, 1035), False, 'import random\n'), ((1047, 1072), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1061, 1072), False, 'import random\n'), ((1121, 1142), 'random.uniform', 'random.uniform', (['(5)', '(70)'], {}), '(5, 70)\n', (1135, 1142), False, 'import random\n'), ((1153, 1173), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1167, 1173), False, 'import random\n'), ((1185, 1206), 'random.uniform', 'random.uniform', (['(5)', '(70)'], {}), '(5, 70)\n', (1199, 1206), False, 'import random\n'), ((1217, 1237), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1231, 1237), False, 'import random\n'), ((1752, 1775), 'copy.deepcopy', 'copy.deepcopy', (['P[t - 1]'], {}), '(P[t - 1])\n', (1765, 1775), False, 'import copy\n'), ((1787, 1810), 'copy.deepcopy', 'copy.deepcopy', (['V[t - 1]'], {}), '(V[t - 1])\n', (1800, 1810), False, 'import copy\n'), ((1992, 2011), 'copy.deepcopy', 'copy.deepcopy', (['Pnow'], {}), '(Pnow)\n', (2005, 2011), False, 'import copy\n'), ((2025, 2044), 'copy.deepcopy', 'copy.deepcopy', (['Vnow'], {}), '(Vnow)\n', (2038, 2044), False, 'import copy\n'), ((2367, 2386), 'copy.deepcopy', 'copy.deepcopy', (['Pnow'], {}), '(Pnow)\n', (2380, 2386), False, 'import copy\n'), ((2400, 2419), 'copy.deepcopy', 'copy.deepcopy', (['Vnow'], {}), '(Vnow)\n', (2413, 2419), False, 'import copy\n'), ((2741, 2760), 'copy.deepcopy', 'copy.deepcopy', (['Pnow'], {}), '(Pnow)\n', (2754, 2760), False, 'import copy\n'), ((2774, 2793), 'copy.deepcopy', 'copy.deepcopy', (['Vnow'], {}), '(Vnow)\n', (2787, 2793), False, 'import copy\n')] |
from typing import List, Dict, Optional, Tuple
import pandas as pd
from datasets import load_dataset
from os.path import join
from datasets import DatasetDict
from nerblackbox.modules.datasets.formatter.base_formatter import (
BaseFormatter,
SENTENCES_ROWS,
)
SUCX_SUBSETS = [
"original_cased",
"original_lower",
"original_lower_mix",
"simple_cased",
"simple_lower",
"simple_lower_mix",
]
SUCX_NER_TAG_LIST = {
"original": [
"person",
"place",
"inst",
"work",
"product",
"animal",
"event",
"myth",
"other",
],
"simple": ["PER", "LOC", "ORG", "WRK", "OBJ", "PRS", "EVN", "PRS", "MSR", "TME"],
}
class SUCXFormatter(BaseFormatter):
def __init__(self, ner_dataset_subset: str):
ner_dataset = "sucx"
assert (
ner_dataset_subset in SUCX_SUBSETS
), f"ERROR! subset = {ner_dataset_subset} unknown."
tag_group = ner_dataset_subset.split("_")[0]
assert tag_group in [
"original",
"simple",
], f"ERROR! tag_group = {tag_group} should be original or simple."
ner_tag_list = SUCX_NER_TAG_LIST[tag_group]
super().__init__(ner_dataset, ner_tag_list, ner_dataset_subset)
self.ner_dataset_subset = ner_dataset_subset
####################################################################################################################
# ABSTRACT BASE METHODS
####################################################################################################################
def get_data(self, verbose: bool) -> None: # pragma: no cover
"""
I: get data
Args:
verbose: [bool]
"""
dataset = load_dataset("KBLab/sucx3_ner", self.ner_dataset_subset)
assert isinstance(
dataset, DatasetDict
), f"ERROR! type(dataset) = {type(dataset)} expected to be DatasetDict."
dataset["val"] = dataset["validation"]
for phase in ["train", "val", "test"]:
sentences_rows_formatted = []
for sample in dataset[phase]:
sentences_rows_formatted.append(
(" ".join(sample["ner_tags"]), " ".join(sample["tokens"]))
)
df = pd.DataFrame(sentences_rows_formatted)
assert (
len(df) == dataset[phase].num_rows
), f"ERROR! number of rows = {len(df)} != {dataset[phase].num_rows}"
file_path = join(self.dataset_path, f"{phase}_original.csv")
df.to_csv(file_path, sep="\t", header=False, index=False)
def create_ner_tag_mapping(self) -> Dict[str, str]:
"""
II: customize ner_training tag mapping if wanted
Returns:
ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
"""
return dict()
def format_data(
self, shuffle: bool = True, write_csv: bool = True
) -> Optional[SENTENCES_ROWS]:
"""
III: format data
Args:
shuffle: whether to shuffle rows of dataset
write_csv: whether to write dataset to csv (should always be True except for testing)
Returns:
sentences_rows: only if write_csv = False
"""
self.set_original_file_paths()
for phase in ["train", "val", "test"]:
file_path_original = join(self.dataset_path, self.file_name[phase])
df = pd.read_csv(file_path_original, sep="\t", header=None)
_sentences_rows = [_np_array for _np_array in df.values]
sentences_rows = [
[[word, tag] for word, tag in zip(words.split(), tags.split())]
for tags, words in _sentences_rows
]
if shuffle:
sentences_rows = self._shuffle_dataset(phase, sentences_rows)
if write_csv: # pragma: no cover
self._write_formatted_csv(phase, sentences_rows)
else:
return sentences_rows
return None
def set_original_file_paths(self) -> None:
"""
III: format data
Changed Attributes:
file_paths: [Dict[str, str]], e.g. {'train': <path_to_train_csv>, 'val': ..}
Returns: -
"""
self.file_name = {
"train": "train_original.csv",
"val": "val_original.csv",
"test": "test_original.csv",
}
def _parse_row(self, _row: str) -> List[str]:
"""
III: format data
Args:
_row: e.g. "Det PER X B"
Returns:
_row_list: e.g. ["Det", "PER", "X", "B"]
"""
pass
def _format_original_file(self, _row_list: List[str]) -> Optional[List[str]]:
"""
III: format data
Args:
_row_list: e.g. ["B-product I-product I-product B-product Audi Coupé Quattro 20V"]
Returns:
_row_list_formatted: e.g. ["test", "B-PER"]
"""
pass
def resplit_data(
self, val_fraction: float = 0.0, write_csv: bool = True
) -> Optional[Tuple[pd.DataFrame, ...]]:
"""
IV: resplit data
Args:
val_fraction: [float], e.g. 0.3
write_csv: whether to write dataset to csv (should always be True except for testing)
Returns:
df_train: only if write_csv = False
df_val: only if write_csv = False
df_test: only if write_csv = False
"""
# train -> train
df_train = self._read_formatted_csvs(["train"])
# val -> val
df_val = self._read_formatted_csvs(["val"])
# test -> test
df_test = self._read_formatted_csvs(["test"])
if write_csv: # pragma: no cover
self._write_final_csv("train", df_train)
self._write_final_csv("val", df_val)
self._write_final_csv("test", df_test)
return None
else:
return df_train, df_val, df_test
| [
"pandas.DataFrame",
"os.path.join",
"datasets.load_dataset",
"pandas.read_csv"
] | [((1778, 1834), 'datasets.load_dataset', 'load_dataset', (['"""KBLab/sucx3_ner"""', 'self.ner_dataset_subset'], {}), "('KBLab/sucx3_ner', self.ner_dataset_subset)\n", (1790, 1834), False, 'from datasets import load_dataset\n'), ((2318, 2356), 'pandas.DataFrame', 'pd.DataFrame', (['sentences_rows_formatted'], {}), '(sentences_rows_formatted)\n', (2330, 2356), True, 'import pandas as pd\n'), ((2534, 2582), 'os.path.join', 'join', (['self.dataset_path', 'f"""{phase}_original.csv"""'], {}), "(self.dataset_path, f'{phase}_original.csv')\n", (2538, 2582), False, 'from os.path import join\n'), ((3457, 3503), 'os.path.join', 'join', (['self.dataset_path', 'self.file_name[phase]'], {}), '(self.dataset_path, self.file_name[phase])\n', (3461, 3503), False, 'from os.path import join\n'), ((3521, 3575), 'pandas.read_csv', 'pd.read_csv', (['file_path_original'], {'sep': '"""\t"""', 'header': 'None'}), "(file_path_original, sep='\\t', header=None)\n", (3532, 3575), True, 'import pandas as pd\n')] |
import os
import sys
sys.path.insert(1, f'{os.path.dirname(os.getcwd())}\\models\\')
import pandas as pd
import requests
from Mapper import df_ISO3_mapper
def get_PL_table(url):
x = requests.get(url)
league_table = pd.read_html(x.content)[0]
del league_table['Last 6']
return league_table
def save_csv(tab):
tab_path = f'{os.path.dirname(os.getcwd())}\\data\\Table\\table.csv'
tab.to_csv(tab_path, index=0, sep=',')
def collect(mapper):
print('Collecting current table...')
table_url = 'https://www.skysports.com/premier-league-table'
pl_table = get_PL_table(table_url)
pl_table = df_ISO3_mapper(pl_table, mapper)
save_csv(pl_table)
| [
"pandas.read_html",
"Mapper.df_ISO3_mapper",
"requests.get",
"os.getcwd"
] | [((193, 210), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (205, 210), False, 'import requests\n'), ((638, 670), 'Mapper.df_ISO3_mapper', 'df_ISO3_mapper', (['pl_table', 'mapper'], {}), '(pl_table, mapper)\n', (652, 670), False, 'from Mapper import df_ISO3_mapper\n'), ((230, 253), 'pandas.read_html', 'pd.read_html', (['x.content'], {}), '(x.content)\n', (242, 253), True, 'import pandas as pd\n'), ((61, 72), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (70, 72), False, 'import os\n'), ((372, 383), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (381, 383), False, 'import os\n')] |
from factoryModel.config import mt_config as confFile
from factoryModel.preprocessing import SentenceSplit, cleanData, TrainMaker, Embedding
from factoryModel.dataLoaders import textLoader
from factoryModel.models import ModelBuilding
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
from factoryModel.utils.helperFunctions import save_clean_data
import numpy as np
FILE_PATH = confFile.DATA_PATH
ss = SentenceSplit(10000)
cd = cleanData()
tm = TrainMaker(confFile.MAX_NUM_WORDS)
em = Embedding()
tL = textLoader(preprocessors=[ss,cd,tm])
tokenizer_input,input_vocab_size,tokenizer_target, target_vocab_size,docArray, input_sequences, target_sequences, target_sequences_OneHot,target_sequences_input, max_len_input, max_len_target = tL.loadDoc(FILE_PATH)
word2idx_inputs = tokenizer_input.word_index
word2idx_target = tokenizer_target.word_index
num_words_output = len(word2idx_target) + 1
embeddingMatrix = em.createEmbeddingMatrix(confFile.EMBEDDING_PATH, confFile.MAX_NUM_WORDS, word2idx_inputs, confFile.EMBEDDING_DIM)
num_words = min(confFile.MAX_NUM_WORDS,len(word2idx_inputs)+1)
Model = ModelBuilding()
model = Model.EncDecbuild(num_words, confFile.LATENT_DIM, confFile.EMBEDDING_DIM, embeddingMatrix, max_len_input, max_len_target, num_words_output)
model.compile(optimizer='adam',loss='categorical_crossentropy')
checkpoint = ModelCheckpoint('model.h5', monitor = 'loss', verbose = 1 , save_best_only = True, mode = min)
z = np.zeros((len(input_sequences), confFile.LATENT_DIM))
"""
h = model.fit(
[input_sequences, target_sequences_input, z, z], target_sequences_OneHot,
batch_size=confFile.BATCH_SIZE,
epochs=confFile.EPOCHS,
callbacks = [checkpoint]
)
"""
model.save_weights("weights.h5")
model.load_weights("weights.h5")
save_clean_data(embeddingMatrix, 'embeddingMatrix.pkl')
save_clean_data(num_words, 'num_words.pkl')
save_clean_data(num_words_output, 'num_words_output.pkl')
save_clean_data(tokenizer_input,'tokenizer_input.pkl')
save_clean_data(input_vocab_size,'input_vocab_size.pkl')
save_clean_data(tokenizer_target,'tokenizer_target.pkl')
save_clean_data(target_vocab_size,'target_vocab_size.pkl')
save_clean_data(input_sequences,'input_sequences.pkl')
save_clean_data(target_sequences,'target_sequences.pkl')
save_clean_data(target_sequences_input,'target_sequences_input.pkl')
save_clean_data(word2idx_inputs,'word2idx_inputs.pkl')
save_clean_data(word2idx_target,'word2idx_target.pkl')
save_clean_data(max_len_input, 'max_len_input.pkl')
save_clean_data(max_len_target, 'max_len_target.pkl')
infmodels = Model.encoderDecoderModel(max_len_input, confFile.LATENT_DIM)
| [
"factoryModel.preprocessing.Embedding",
"factoryModel.preprocessing.TrainMaker",
"factoryModel.utils.helperFunctions.save_clean_data",
"factoryModel.dataLoaders.textLoader",
"factoryModel.preprocessing.cleanData",
"tensorflow.keras.callbacks.ModelCheckpoint",
"factoryModel.models.ModelBuilding",
"fact... | [((459, 479), 'factoryModel.preprocessing.SentenceSplit', 'SentenceSplit', (['(10000)'], {}), '(10000)\n', (472, 479), False, 'from factoryModel.preprocessing import SentenceSplit, cleanData, TrainMaker, Embedding\n'), ((485, 496), 'factoryModel.preprocessing.cleanData', 'cleanData', ([], {}), '()\n', (494, 496), False, 'from factoryModel.preprocessing import SentenceSplit, cleanData, TrainMaker, Embedding\n'), ((502, 536), 'factoryModel.preprocessing.TrainMaker', 'TrainMaker', (['confFile.MAX_NUM_WORDS'], {}), '(confFile.MAX_NUM_WORDS)\n', (512, 536), False, 'from factoryModel.preprocessing import SentenceSplit, cleanData, TrainMaker, Embedding\n'), ((542, 553), 'factoryModel.preprocessing.Embedding', 'Embedding', ([], {}), '()\n', (551, 553), False, 'from factoryModel.preprocessing import SentenceSplit, cleanData, TrainMaker, Embedding\n'), ((560, 598), 'factoryModel.dataLoaders.textLoader', 'textLoader', ([], {'preprocessors': '[ss, cd, tm]'}), '(preprocessors=[ss, cd, tm])\n', (570, 598), False, 'from factoryModel.dataLoaders import textLoader\n'), ((1159, 1174), 'factoryModel.models.ModelBuilding', 'ModelBuilding', ([], {}), '()\n', (1172, 1174), False, 'from factoryModel.models import ModelBuilding\n'), ((1400, 1489), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model.h5"""'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': 'min'}), "('model.h5', monitor='loss', verbose=1, save_best_only=True,\n mode=min)\n", (1415, 1489), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((1810, 1865), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['embeddingMatrix', '"""embeddingMatrix.pkl"""'], {}), "(embeddingMatrix, 'embeddingMatrix.pkl')\n", (1825, 1865), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((1866, 1909), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['num_words', '"""num_words.pkl"""'], {}), "(num_words, 'num_words.pkl')\n", (1881, 1909), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((1910, 1967), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['num_words_output', '"""num_words_output.pkl"""'], {}), "(num_words_output, 'num_words_output.pkl')\n", (1925, 1967), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((1968, 2023), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['tokenizer_input', '"""tokenizer_input.pkl"""'], {}), "(tokenizer_input, 'tokenizer_input.pkl')\n", (1983, 2023), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2023, 2080), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['input_vocab_size', '"""input_vocab_size.pkl"""'], {}), "(input_vocab_size, 'input_vocab_size.pkl')\n", (2038, 2080), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2080, 2137), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['tokenizer_target', '"""tokenizer_target.pkl"""'], {}), "(tokenizer_target, 'tokenizer_target.pkl')\n", (2095, 2137), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2137, 2196), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['target_vocab_size', '"""target_vocab_size.pkl"""'], {}), "(target_vocab_size, 'target_vocab_size.pkl')\n", (2152, 2196), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2196, 2251), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['input_sequences', '"""input_sequences.pkl"""'], {}), "(input_sequences, 'input_sequences.pkl')\n", (2211, 2251), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2251, 2308), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['target_sequences', '"""target_sequences.pkl"""'], {}), "(target_sequences, 'target_sequences.pkl')\n", (2266, 2308), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2308, 2377), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['target_sequences_input', '"""target_sequences_input.pkl"""'], {}), "(target_sequences_input, 'target_sequences_input.pkl')\n", (2323, 2377), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2377, 2432), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['word2idx_inputs', '"""word2idx_inputs.pkl"""'], {}), "(word2idx_inputs, 'word2idx_inputs.pkl')\n", (2392, 2432), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2432, 2487), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['word2idx_target', '"""word2idx_target.pkl"""'], {}), "(word2idx_target, 'word2idx_target.pkl')\n", (2447, 2487), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2487, 2538), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['max_len_input', '"""max_len_input.pkl"""'], {}), "(max_len_input, 'max_len_input.pkl')\n", (2502, 2538), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n'), ((2539, 2592), 'factoryModel.utils.helperFunctions.save_clean_data', 'save_clean_data', (['max_len_target', '"""max_len_target.pkl"""'], {}), "(max_len_target, 'max_len_target.pkl')\n", (2554, 2592), False, 'from factoryModel.utils.helperFunctions import save_clean_data\n')] |
from ..api import _v1
from pathlib import Path
from app.error import Error
import pandas as pd
from app.components._data import dataframeHandler
import numpy as np
from sklearn.impute import KNNImputer
from sklearn import preprocessing
# ** ALL CONTENT COMENTED BETWEEN ASTERISCS MUST BE EDITED **
# ** Set the plugin id inside the internal api, it must be unique **
pluginId = "processorPluginExample"
# ** Set the plugin name, must be equals than the class name, and variable must be pluginName **
pluginName = "ProcessorPluginExample"
# ** Set the plugin description **
pluginDescription = "Plugin description"
# ** Name of the plugin in the interface **
pluginInterfaceName = "Procesar..."
# ** List of implemented actions with their parameters. It will be rendered in the UI forms. **
Actions = [ _v1.Action(
name="exampleAction",
description="example action",
params=[
_v1.Param(name="exampleSelect", kind="select", options=["option1", "option2", "option3"]),
_v1.Param(name="exampleNumber", kind="number"),
_v1.Param(name="exampleString", kind="string"),
_v1.Param(name="exampleFile", kind="file"),
]),
_v1.Action(
name="exampleAction",
description="example action",
params=[
_v1.Param(name="exampleSelect", kind="select", options=["option1", "option2", "option3"]),
_v1.Param(name="exampleNumber", kind="number"),
_v1.Param(name="exampleString", kind="string"),
_v1.Param(name="exampleFile", kind="file"),
])
]
class ProcessorPluginExample:
def __init__(self):
# ** Actions dict must be updated with new actions **
self.actions = {
"default": self.exampleActionHandler,
"exampleAction": self.exampleActionHandler,
}
self.pagination = {
"startRow": None,
"endRow": None,
}
def exampleActionHandler(self, request):
df = dataframeHandler.getDataframe()
column = request.form.get('column')
axis = request.form.get('axis')
# ** HERE YOUR CODE FOR EXAMPLE ACTION HANDLER OF THIS PLUGIN **
# modify df and it will be saved with dataframeHandler class in the
# local cache and then returned in
# Obtain the params from the request
exampleSelect = request.form.get('exampleSelect')
exampleNumber = request.form.get('exampleNumber')
exampleString = request.form.get('exampleString')
exampleFile = request.files['exampleFile']
# do something like print params
print("exampleSelect: ", exampleSelect)
print("exampleNumber: ", exampleNumber)
print("exampleString: ", exampleString)
print("exampleFile: ", exampleFile)
# always save the dataframe in the local cache
dataframeHandler.saveDataframe(df)
# ** add new handlers for aditional actions and then place it in the actions dict **
# Don't change this method if is not necessary
def _updatePagination (self, request: any):
startRowParam = request.args.get('startRow')
endRowParam = request.args.get('endRow')
self.pagination["startRow"] = None if startRowParam is None else int(startRowParam)
self.pagination["endRow"]= None if endRowParam is None else int(endRowParam)
# Don't change this method if is not necessary
def __call__(self, request: any):
print("ProcessorPluginExample called")
self._updatePagination(request)
action = request.args.get("action")
if action is None:
self.actions["default"](request)
elif action not in self.actions:
raise Error('Accion {} desconocida'.format(action))
else:
self.actions[action](request)
return dataframeHandler.getAllData(self.pagination)
# Don't change that if is not necessary
component = _v1.ProcessorPlugin(name=pluginName, description=pluginDescription, interfacename=pluginInterfaceName, actions=Actions, handler_class=eval(pluginName))
_v1.register_processor_plugin(component) | [
"app.components._data.dataframeHandler.getAllData",
"app.components._data.dataframeHandler.getDataframe",
"app.components._data.dataframeHandler.saveDataframe"
] | [((2221, 2252), 'app.components._data.dataframeHandler.getDataframe', 'dataframeHandler.getDataframe', ([], {}), '()\n', (2250, 2252), False, 'from app.components._data import dataframeHandler\n'), ((3105, 3139), 'app.components._data.dataframeHandler.saveDataframe', 'dataframeHandler.saveDataframe', (['df'], {}), '(df)\n', (3135, 3139), False, 'from app.components._data import dataframeHandler\n'), ((4068, 4112), 'app.components._data.dataframeHandler.getAllData', 'dataframeHandler.getAllData', (['self.pagination'], {}), '(self.pagination)\n', (4095, 4112), False, 'from app.components._data import dataframeHandler\n')] |
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail_blog.models import BlogPage, BlogIndexPage
# Add your Wagtail panels here.
BlogIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('headline'),
]
BlogPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('author'),
FieldPanel('date'),
FieldPanel('date_updated'),
FieldPanel('content', classname="full"),
ImageChooserPanel('image'),
FieldPanel('tags'),
]
| [
"wagtail.admin.edit_handlers.FieldPanel",
"wagtail.images.edit_handlers.ImageChooserPanel"
] | [((238, 281), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {'classname': '"""full title"""'}), "('title', classname='full title')\n", (248, 281), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((287, 309), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""headline"""'], {}), "('headline')\n", (297, 309), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((346, 389), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {'classname': '"""full title"""'}), "('title', classname='full title')\n", (356, 389), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((395, 415), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""author"""'], {}), "('author')\n", (405, 415), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((421, 439), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""date"""'], {}), "('date')\n", (431, 439), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((445, 471), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""date_updated"""'], {}), "('date_updated')\n", (455, 471), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((477, 516), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""content"""'], {'classname': '"""full"""'}), "('content', classname='full')\n", (487, 516), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((522, 548), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""image"""'], {}), "('image')\n", (539, 548), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((554, 572), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""tags"""'], {}), "('tags')\n", (564, 572), False, 'from wagtail.admin.edit_handlers import FieldPanel\n')] |
# -*- coding: utf-8 -*-
import os
import time
import unittest
import inspect
from mock import patch
import requests
from configparser import ConfigParser
from kb_Amplicon.kb_AmpliconImpl import kb_Amplicon
from kb_Amplicon.kb_AmpliconServer import MethodContext
from installed_clients.authclient import KBaseAuth as _KBaseAuth
from kb_Amplicon.Utils.MDSUtils import MDSUtils
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace
from installed_clients.AbstractHandleClient import AbstractHandle as HandleService
class kb_AmpliconTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_Amplicon'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
auth_service_url = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(auth_service_url)
user_id = auth_client.get_user(cls.token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': user_id,
'provenance': [
{'service': 'kb_Amplicon',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.shockURL = cls.cfg['shock-url']
cls.serviceImpl = kb_Amplicon(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.dfu = DataFileUtil(cls.callback_url)
cls.mds_util = MDSUtils(cls.cfg)
cls.hs = HandleService(url=cls.cfg['handle-service-url'],
token=cls.token)
suffix = int(time.time() * 1000)
cls.wsName = "test_kb_Amplicon_" + str(suffix)
ret = cls.wsClient.create_workspace({'workspace': cls.wsName})
cls.wsId = ret[0]
small_file = os.path.join(cls.scratch, 'test.txt')
with open(small_file, "w") as f:
f.write("empty content")
cls.test_shock = cls.dfu.file_to_shock({'file_path': small_file, 'make_handle': True})
cls.handles_to_delete = []
cls.nodes_to_delete = []
cls.handles_to_delete.append(cls.test_shock['handle']['hid'])
cls.nodes_to_delete.append(cls.test_shock['shock_id'])
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
if hasattr(cls, 'nodes_to_delete'):
for node in cls.nodes_to_delete:
cls.delete_shock_node(node)
if hasattr(cls, 'handles_to_delete'):
cls.hs.delete_handles(cls.hs.hids_to_handles(cls.handles_to_delete))
print('Deleted handles ' + str(cls.handles_to_delete))
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print('Deleted shock node ' + node_id)
def getMDSUtil(self):
return self.__class__.mds_util
def mock_file_to_shock(params):
print('Mocking DataFileUtilClient.file_to_shock')
print(params)
return kb_AmpliconTest().test_shock
def loadExpressionMatrix(self):
if hasattr(self.__class__, 'expr_matrix_ref'):
return self.__class__.expr_matrix_ref
# matrix_file_name = 'test_import.xlsx'
col_attribute = {'attributes': [{'attribute': 'test_attribute_1',
'attribute_ont_id': 'OBI_0500020',
'source': 'upload',
'unit': 'Hour',
'unit_ont_id': 'UO_0000032'},
{'attribute': 'test_attribute_2',
'attribute_ont_id': 'CHEBI:9168',
'source': 'upload',
'unit': 'nanogram per milliliter',
'unit_ont_id': 'UO_0000275'},
{'attribute': 'test_attribute_3',
'attribute_ont_id': 'CHEBI:9168',
'source': 'upload',
'unit': 'nanogram per milliliter',
'unit_ont_id': 'UO_0000275'}],
'instances': {'instance_1': ['1', '5', '9'],
'instance_2': ['2', '6', '10'],
'instance_3': ['3', '7', '11'],
'instance_4': ['4', '8', '12']},
'ontology_mapping_method': 'User Curation'}
info = self.dfu.save_objects({
'id': self.wsId,
'objects': [{'type': 'KBaseExperiments.AttributeMapping',
'data': col_attribute,
'name': 'test_ExpressionMatrix_col_attribute_mapping'}]})[0]
col_attributemapping_ref = "%s/%s/%s" % (info[6], info[0], info[4])
self.__class__.col_attributemapping_ref = col_attributemapping_ref
print('Loaded Col AttributeMapping: ' + col_attributemapping_ref)
row_attribute = {'attributes': [{'attribute': 'test_attribute_1',
'attribute_ont_id': 'OBI_0500020',
'source': 'upload',
'unit': 'Hour',
'unit_ont_id': 'UO_0000032'},
{'attribute': 'test_attribute_2',
'attribute_ont_id': 'CHEBI:9168',
'source': 'upload',
'unit': 'nanogram per milliliter',
'unit_ont_id': 'UO_0000275'},
{'attribute': 'test_attribute_3',
'attribute_ont_id': 'CHEBI:9168',
'source': 'upload',
'unit': 'nanogram per milliliter',
'unit_ont_id': 'UO_0000275'}],
'instances': {'WRI_RS00010_CDS_1': ['1', '4', '7'],
'WRI_RS00015_CDS_1': ['3', '4', '8'],
'WRI_RS00025_CDS_1': ['3', '6', '7'],
'WRI_RS00030_CDS_1': ['3', '6', '7'],
'WRI_RS00035_CDS_1': ['3', '6', '7']},
'ontology_mapping_method': 'User Curation'}
info = self.dfu.save_objects({
'id': self.wsId,
'objects': [{'type': 'KBaseExperiments.AttributeMapping',
'data': row_attribute,
'name': 'test_ExpressionMatrix_row_attribute_mapping'}]})[0]
row_attributemapping_ref = "%s/%s/%s" % (info[6], info[0], info[4])
self.__class__.row_attributemapping_ref = row_attributemapping_ref
print('Loaded Row AttributeMapping: ' + row_attributemapping_ref)
matrix_data = {'attributes': {'Instrument': 'Old Faithful',
'Scientist': '<NAME>'},
'col_attributemapping_ref': col_attributemapping_ref,
'col_mapping': {'instance_1': 'instance_1',
'instance_2': 'instance_2',
'instance_3': 'instance_3',
'instance_4': 'instance_4'},
'col_normalization': 'test_col_normalization',
'data': {'col_ids': ['instance_1', 'instance_2', 'instance_3',
'instance_4'],
'row_ids': ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1',
'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1',
'WRI_RS00035_CDS_1'],
'values': [[1, 2, 3, 4],
[50, 60, 70, 80],
[9, 10, 11, 12],
[9, 10, 11, 12],
[9, 10, 11, 12]]},
'description': 'test_desc',
'row_attributemapping_ref': row_attributemapping_ref,
'row_mapping': {'WRI_RS00010_CDS_1': 'WRI_RS00010_CDS_1',
'WRI_RS00015_CDS_1': 'WRI_RS00015_CDS_1',
'WRI_RS00025_CDS_1': 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1': 'WRI_RS00030_CDS_1',
'WRI_RS00035_CDS_1': 'WRI_RS00035_CDS_1'},
'row_normalization': 'test_row_normalization',
'scale': 'log2',
'search_attributes': ['Scientist | <NAME>',
'Instrument | Old Faithful']}
info = self.dfu.save_objects({'id': self.wsId,
'objects': [{'type': 'KBaseMatrices.ExpressionMatrix',
'data': matrix_data,
'name': 'test_ExpressionMatrix'}]})[0]
expr_matrix_ref = "%s/%s/%s" % (info[6], info[0], info[4])
self.__class__.expr_matrix_ref = expr_matrix_ref
print('Loaded ExpressionMatrix: ' + expr_matrix_ref)
# load associated matrix
matrix_data = {'attributes': {'Instrument': 'Old Faithful',
'Scientist': '<NAME>'},
'col_attributemapping_ref': col_attributemapping_ref,
'col_mapping': {'instance_1': 'instance_1',
'instance_2': 'instance_2',
'instance_3': 'instance_3',
'instance_4': 'instance_4'},
'col_normalization': 'test_col_normalization',
'data': {'col_ids': ['instance_1', 'instance_2', 'instance_3',
'instance_4'],
'row_ids': ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1',
'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1',
'WRI_RS00035_CDS_1'],
'values': [[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 1, 1.1, 1.2],
[0.9, 1, 1.1, 1.2],
[0.9, 1, 1.1, 1.2]]},
'description': 'test_desc',
'row_attributemapping_ref': row_attributemapping_ref,
'row_mapping': {'WRI_RS00010_CDS_1': 'WRI_RS00010_CDS_1',
'WRI_RS00015_CDS_1': 'WRI_RS00015_CDS_1',
'WRI_RS00025_CDS_1': 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1': 'WRI_RS00030_CDS_1',
'WRI_RS00035_CDS_1': 'WRI_RS00035_CDS_1'},
'row_normalization': 'test_row_normalization',
'scale': 'log2',
'search_attributes': ['Scientist | <NAME>',
'Instrument | Old Faithful']}
info = self.dfu.save_objects({
'id': self.wsId,
'objects': [{'type': 'KBaseMatrices.ExpressionMatrix',
'data': matrix_data,
'name': 'test_associated_ExpressionMatrix'}]})[0]
asso_matrix_ref = "%s/%s/%s" % (info[6], info[0], info[4])
self.__class__.asso_matrix_ref = asso_matrix_ref
print('Loaded Associated ExpressionMatrix: ' + asso_matrix_ref)
def start_test(self):
testname = inspect.stack()[1][3]
print('\n*** starting test: ' + testname + ' **')
def test_init_ok(self):
self.start_test()
class_attri = ['ws_url', 'callback_url', 'token', 'scratch', 'dfu', 'working_dir',
'output_dir']
mds_util = self.getMDSUtil()
self.assertTrue(set(class_attri) <= set(mds_util.__dict__.keys()))
self.assertEqual(mds_util.scratch, self.cfg.get('scratch'))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_scale_by_attri_plot_associated_matrix_without_color(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing col dimension with linked matrix
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.col_attributemapping_ref,
'associated_matrix_obj_ref': self.asso_matrix_ref,
'scale_size_by': {'attribute_size': ["test_attribute_1"]},
# 'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'col'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_scale_by_attri_plot_associated_matrix(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing col dimension with linked matrix
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.col_attributemapping_ref,
'associated_matrix_obj_ref': self.asso_matrix_ref,
'scale_size_by': {'attribute_size': ["test_attribute_1"]},
'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'col'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_with_linked_matrix_ok(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing col dimension with linked matrix
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.col_attributemapping_ref,
'associated_matrix_obj_ref': self.asso_matrix_ref,
'scale_size_by': {'row_size': ['WRI_RS00010_CDS_1']},
'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'col'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_with_row_linked_matrix_ok(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing row dimension with linked matrix
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.row_attributemapping_ref,
'associated_matrix_obj_ref': self.asso_matrix_ref,
'scale_size_by': {'col_size': ['instance_2']},
'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'row'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
expected_col_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_with_linked_matrix_ok_only_scale_size(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing only scale_size_by with linked matrix
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'associated_matrix_obj_ref': self.asso_matrix_ref,
'scale_size_by': {'row_size': ['WRI_RS00010_CDS_1']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'col'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_ok_col_dimension(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing col dimension
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.col_attributemapping_ref,
'scale_size_by': {'attribute_size': ["test_attribute_1"]},
'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'col'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_ok_row_dimension(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing row dimension
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.row_attributemapping_ref,
'scale_size_by': {'attribute_size': ["test_attribute_1"]},
'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'row'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
expected_col_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_with_linked_matrix_ok_only_color_by(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing only color_marker_by
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'attribute_mapping_obj_ref': self.col_attributemapping_ref,
'scale_size_by': None,
'color_marker_by': {'attribute_color': ['test_attribute_2']},
'mds_matrix_name': 'output_mds_from_obj',
'dimension': 'col'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_run_metaMDS_ok_without_grouping(self, file_to_shock):
self.start_test()
self.loadExpressionMatrix()
# testing without grouping
params = {'workspace_name': self.wsName,
'input_obj_ref': self.expr_matrix_ref,
'n_components': 3,
'max_iter': 20,
'plot_script': 'plot(my_data.mds,type="t",display="sites")',
'plot_type': 'ps',
'plot_name': '',
'mds_matrix_name': 'output_mds_from_obj'}
ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0]
self.assertTrue('report_name' in ret)
self.assertTrue('report_ref' in ret)
self.assertTrue('mds_ref' in ret)
pca_matrix_ref = ret.get('mds_ref')
pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data']
expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref',
'rotation_matrix', 'site_ordination', 'species_ordination']
self.assertTrue(set(expected_values) <= set(pca_data.keys()))
expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1',
'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1']
expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4']
result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')]
result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')]
self.assertCountEqual(result_row_ids, expected_row_ids)
self.assertCountEqual(result_col_ids, expected_col_ids)
mds_dir = '/kb/module/work/tmp/mds_output'
expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json',
'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv',
'test_ExpressionMatrix.csv',
'usr_plt_name.ps']
self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir)))
| [
"os.listdir",
"installed_clients.authclient.KBaseAuth",
"configparser.ConfigParser",
"inspect.stack",
"os.environ.get",
"installed_clients.WorkspaceClient.Workspace",
"kb_Amplicon.kb_AmpliconImpl.kb_Amplicon",
"os.path.join",
"mock.patch.object",
"requests.delete",
"kb_Amplicon.Utils.MDSUtils.MD... | [((13524, 13599), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (13536, 13599), False, 'from mock import patch\n'), ((16098, 16173), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (16110, 16173), False, 'from mock import patch\n'), ((18656, 18731), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (18668, 18731), False, 'from mock import patch\n'), ((21193, 21268), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (21205, 21268), False, 'from mock import patch\n'), ((23727, 23802), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (23739, 23802), False, 'from mock import patch\n'), ((26127, 26202), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (26139, 26202), False, 'from mock import patch\n'), ((28576, 28651), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (28588, 28651), False, 'from mock import patch\n'), ((31025, 31100), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (31037, 31100), False, 'from mock import patch\n'), ((33464, 33539), 'mock.patch.object', 'patch.object', (['DataFileUtil', '"""file_to_shock"""'], {'side_effect': 'mock_file_to_shock'}), "(DataFileUtil, 'file_to_shock', side_effect=mock_file_to_shock)\n", (33476, 33539), False, 'from mock import patch\n'), ((684, 721), 'os.environ.get', 'os.environ.get', (['"""KB_AUTH_TOKEN"""', 'None'], {}), "('KB_AUTH_TOKEN', None)\n", (698, 721), False, 'import os\n'), ((744, 788), 'os.environ.get', 'os.environ.get', (['"""KB_DEPLOYMENT_CONFIG"""', 'None'], {}), "('KB_DEPLOYMENT_CONFIG', None)\n", (758, 788), False, 'import os\n'), ((827, 841), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (839, 841), False, 'from configparser import ConfigParser\n'), ((1104, 1132), 'installed_clients.authclient.KBaseAuth', '_KBaseAuth', (['auth_service_url'], {}), '(auth_service_url)\n', (1114, 1132), True, 'from installed_clients.authclient import KBaseAuth as _KBaseAuth\n'), ((1317, 1336), 'kb_Amplicon.kb_AmpliconServer.MethodContext', 'MethodContext', (['None'], {}), '(None)\n', (1330, 1336), False, 'from kb_Amplicon.kb_AmpliconServer import MethodContext\n'), ((1791, 1811), 'installed_clients.WorkspaceClient.Workspace', 'Workspace', (['cls.wsURL'], {}), '(cls.wsURL)\n', (1800, 1811), False, 'from installed_clients.WorkspaceClient import Workspace\n'), ((1882, 1902), 'kb_Amplicon.kb_AmpliconImpl.kb_Amplicon', 'kb_Amplicon', (['cls.cfg'], {}), '(cls.cfg)\n', (1893, 1902), False, 'from kb_Amplicon.kb_AmpliconImpl import kb_Amplicon\n'), ((2021, 2051), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (['cls.callback_url'], {}), '(cls.callback_url)\n', (2033, 2051), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((2075, 2092), 'kb_Amplicon.Utils.MDSUtils.MDSUtils', 'MDSUtils', (['cls.cfg'], {}), '(cls.cfg)\n', (2083, 2092), False, 'from kb_Amplicon.Utils.MDSUtils import MDSUtils\n'), ((2110, 2175), 'installed_clients.AbstractHandleClient.AbstractHandle', 'HandleService', ([], {'url': "cls.cfg['handle-service-url']", 'token': 'cls.token'}), "(url=cls.cfg['handle-service-url'], token=cls.token)\n", (2123, 2175), True, 'from installed_clients.AbstractHandleClient import AbstractHandle as HandleService\n'), ((2423, 2460), 'os.path.join', 'os.path.join', (['cls.scratch', '"""test.txt"""'], {}), "(cls.scratch, 'test.txt')\n", (2435, 2460), False, 'import os\n'), ((3493, 3585), 'requests.delete', 'requests.delete', (["(cls.shockURL + '/node/' + node_id)"], {'headers': 'header', 'allow_redirects': '(True)'}), "(cls.shockURL + '/node/' + node_id, headers=header,\n allow_redirects=True)\n", (3508, 3585), False, 'import requests\n'), ((2229, 2240), 'time.time', 'time.time', ([], {}), '()\n', (2238, 2240), False, 'import time\n'), ((13074, 13089), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (13087, 13089), False, 'import inspect\n'), ((16070, 16089), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (16080, 16089), False, 'import os\n'), ((18628, 18647), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (18638, 18647), False, 'import os\n'), ((21165, 21184), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (21175, 21184), False, 'import os\n'), ((23699, 23718), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (23709, 23718), False, 'import os\n'), ((26099, 26118), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (26109, 26118), False, 'import os\n'), ((28548, 28567), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (28558, 28567), False, 'import os\n'), ((30997, 31016), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (31007, 31016), False, 'import os\n'), ((33436, 33455), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (33446, 33455), False, 'import os\n'), ((35618, 35637), 'os.listdir', 'os.listdir', (['mds_dir'], {}), '(mds_dir)\n', (35628, 35637), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Helper functions to organize CHDI imaging data
Created on Fri Jan 15 11:07:53 2016
@author: <NAME>
Python Version: Python 3.5.1 |Anaconda 2.4.1 (64-bit)
"""
import glob as gl
import pandas as pd
import numpy as np
import os
from functools import partial
def linear_pred(m,b,x):
y = m * x + b
return y
def scan_year(visit, studyid='TON'):
"""
Retrieve the year in which a scan was collected.
Parameters
----------
visit : str or int
Visit number
studyid: str, optional
Specifies the study from which files will be retrieved. Valid
values are 'THD' and 'TON'.
Returns
-------
sc_year : int
Actual scan year
"""
if type(visit) is str:
visit = int(visit[-1:])
if studyid == 'TON':
years = [2012, 2013, 2014]
else:
years = [2008, 2009, 2010, 2011]
sc_year = years[visit-1]
return sc_year
# Define root directories of every type of scan for each study (TON or THD)
# For TON: mritype = (0: unknown; 1: sMRI; 2:fMRI; 3: DTI)
# For THD: mritype = (0,4: DTI; 1,2,3: sMRI)
rootdir_per_scan_type = dict(TON={0: '',
3: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackOn_DTI/DTI'),
1: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackOn_sMRI'),
2: ('/data1/chdi_disks/Disk2/'
'IBM SOW3-part2/TrackOn/fMRI')},
THD={0: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/DTI'),
4: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/DTI'),
1: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/sMRI'),
2: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/sMRI'),
3: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/sMRI')})
#rootdir_per_scan_type = dict(TON={2: ('/data1/chdi_disks/Disk4/TRACKON')})
class Subject:
"""
Subject class that integrates all information about a subject (name,
visits, elegibility data, imaging folders, analyses that have been
performed) into a single object
Parameters
----------
subjid : str
Subject ID
general_df : `pandas.core.frame.DataFrame`
Dataframe loaded from general_ton.csv
mri_df : `pandas.core.frame.DataFrame`
Dataframe loaded from mri.csv
"""
def get_general_info(self, general_df, studyid='TON'):
"""
Retrieve general information about the subjects from general_ton.csv
Parameters
----------
general_df: `pandas.core.frame.DataFrame` or `dict`
Single dataframe with general csv file info or dictionary of
dataframes from general.csv and general_ton.csv
studyid: str, optional
Specifies the study from which files will be retrieved. Valid
values are 'THD' and 'TON'
"""
if isinstance(general_df, dict):
gen_df = general_df[studyid]
else:
gen_df = general_df
if gen_df[gen_df.subjid == self.subjid].shape[0] != 1:
raise ValueError(('The subject ID you requested ({}) is not'
'unique in the general database').
format(self.subjid))
sg = gen_df[gen_df.subjid == self.subjid].iloc[0]
self.studyid = studyid
# Generalized assignment of Subject group attribute (absent on THD)
if studyid == 'TON':
self.group = ['control', 'preHD'][sg.group-1]
self.inclusion_criteria = {'CAG_repeats': sg.ic4,
'disease_burden': sg.ic5,
'motorscores': sg.ic6,
'good_age': sg.ic7}
self.eligibility_criteria = sg[['ec1', 'ec2', 'ec3', 'ec4', 'ec5',
'ec6', 'ec7', 'ec8', 'ec9', 'ec10',
'ec11', 'ec12']].to_dict()
self.eligible = sg.eligible
hand_attr = 'handed'
else:
self.group = ['control', 'preHD', 'earlyHD'][sg.incl02]
self.inclusion_criteria = {'CAG_repeats': sg.incl02c,
'disease_burden': sg.incl02e,
'motorscores': sg.incl02g,
'good_age': not(sg.excl02 | sg.excl03)}
self.exclusion_criteria = sg[['excl01', 'excl04', 'excl05',
'excl06', 'excl07', 'excl08',
'excl09', 'excl10', 'excl11',
'excl12', 'excl13']].to_dict()
hand_attr = 'handness'
sg.fillna(value={hand_attr: 4}, inplace=True)
sg.fillna(value={'ethnic': 7}, inplace=True)
sg.fillna(value={'sex': np.nan}, inplace=True)
sg.fillna(value={'age': np.nan}, inplace=True)
ethnicity_dict = {1: 'caucassian', 11: 'african_black',
12: 'african_north', 13: 'asian_west',
14: 'asian_east', 15: 'mixed',
2: 'american_black', 3: 'american_latin',
6: 'other', 7: 'unknown'}
self.demographics = {'age': sg.age,
'sex': sg.sex,
'ethnicity': ethnicity_dict[sg.ethnic],
'handness': ['right', 'left', 'mixed', 'unknown']\
[int(getattr(sg, hand_attr)) - 1]}
def get_mri_info(self, mri_df):
"""
Retrieve scan-related information from mri.csv
Parameters
----------
mri_df : `pandas.core.frame.DataFrame`
Dataframe loaded from mri.csv
"""
temp = mri_df[mri_df.subjid == self.subjid].copy(deep=True)
if self.studyid == 'TON':
# For TON the dictionary is defined by `subtype` as reported in the
# document (augmented to include extra DTI scans -- blank on csv)
mri_type_dict = {1: 'T1W', 2: 'T1W Repeat', 3: 'T2W',
4: 'Resting State', 5: 'WM Task', 6: 'Motor Task',
7: 'Practice', 8: 'Field Map', 9: 'Generic',
10: 'NODDI', 11: 'CEST/MTR', 12: 'DTI'}
temp.fillna(value={'subytpe': 12}, inplace=True)
temp['subytpe'] = temp['subytpe'].astype(int)
else:
# For THD the dictionary is defined by inspection of `mritype` on
# the mri.csv spreadsheet. As consistent as possible with TON
mri_type_dict = {0: 'DTI', 1: 'T1W', 2: 'T1W Repeat', 3: 'T2W',
4: 'DTI'}
temp['subytpe'] = temp['mritype'].astype(int)
temp.replace({'subytpe': mri_type_dict}, inplace=True)
temp.set_index(['subytpe', 'visit'], inplace=True)
temp.index.set_names('subtype', level=0, inplace=True)
if not temp.index.is_lexsorted():
temp = temp.sort_index()
self.mri = temp
return
def get_subject_info(self, subj_df):
"""
Retrieve general information of a participant that was not compiled at
a specific visit, but rather once or in anually updated manner (from
subject.csv)
Parameters
----------
subj_df : `pandas.core.frame.DataFrame`
Dataframe loaded from subject.csv
"""
ss = subj_df[subj_df.subjid == self.subjid]
siteid = np.unique(ss.siteid.tolist())
if len(siteid) != 1:
raise ValueError(('Subject ID {} has different `site ids` for',
'TRACK and TrackOn').format(self.subjid))
self.siteid = siteid[0]
def __make_CAP_score_function(self, vd):
"""
Estimates the visit_day to CAP score visit transformation, given the
track_on_visit estimates of dbscore
Parameters
----------
vd : dict of `pandas.core.frame.DataFrame`
Dictionary of visit_ton dataframes
Returns
-------
CAP_dy_func: function
Function that takes the day of a TRACK/TON visit and returns a CAP
"""
tk = [vk for vk in sorted(vd.keys())
if (('ton' in vk) and (self.subjid in vd[vk].subjid.values))]
# tk = [df for if self.subjid in ] fix HERE
if len(tk) >=2:
dy_in = np.array([vd[vk][vd[vk]['subjid'] == self.subjid].visdy.iloc[0]
for vk in tk])
db_in = np.array([vd[vk][vd[vk]['subjid'] == self.subjid].dbscore.iloc[0]
for vk in tk])
try:
ok_idx = ~np.isnan(db_in)
x = dy_in[ok_idx]
m, b = np.linalg.lstsq(np.stack((x, np.ones(x.shape)),
axis=1), db_in[ok_idx])[0]
except:
m = b = np.nan
CAP_dy_func = partial(linear_pred, m, b)
return CAP_dy_func
def get_pheno_vars(self, pheno_df):
"""
Produces a datafram with phenotypic variables per visit
Parameters
----------
pheno_df : Pandas datafram
Dataframe with phenotypic variables (e.g., one provided by SOW4)
Returns
-------
CAP_dy_func: function
Function that takes the day of a TRACK/TON visit and returns a CAP
"""
df = pheno_df[pheno_df['subjid'] == self.subjid].copy(deep=True)
vd_to_vis = {dv: dk for dk, dv in self.visdy.items()}
df['visit'] = df['visdy'].replace(vd_to_vis)
df.set_index('visit', inplace=True)
self.pheno_df = df
def get_cog_motor_performance(self, visit_dict):
'''
'''
cog_motor_tasks = ['sdmt', 'stroop', 'paced_tap', 'indirect_circle_trace',
'map_search', 'cancelation', 'spot_change',
'mental_rotation', 'count_backwards', 'grip_var']
field_list = cog_motor_tasks + ['visdy', 'visit']
visits_used = self.visdy.keys()
visits_used.sort(key=lambda x: x[::-1])
all_vis_dfs = []
for v_idx, visit in enumerate(visits_used):
visit_df = visit_dict[visit]
fields_in_dict = [fn for fn in field_list if fn in visit_df.columns]
nan_fields = [fn for fn in field_list if fn not in visit_df.columns]
vis_dict = visit_df[visit_df['subjid'] == self.subjid].iloc[0][
fields_in_dict].to_dict()
for field in nan_fields:
vis_dict[field] = np.nan
vis_dict['visit'] = visit
vis_dict['v_idx'] = v_idx
all_vis_dfs.append(vis_dict)
out_df = pd.DataFrame(all_vis_dfs).set_index('v_idx')
self.cog_motor_performance = out_df
def __init__(self, subjid=None, general_df=None, mri_df=None,
subj_df=None, visit_df_dict=None, pheno_df=None,
studyid='TON'):
# Subject.all_subjects.append(self)
if subjid is not None:
self.subjid = subjid
if general_df is not None:
self.get_general_info(general_df, studyid)
if mri_df is not None:
self.get_mri_info(mri_df)
if subj_df is not None:
self.get_subject_info(subj_df)
if visit_df_dict is not None:
self.CAP_from_visdy = self.__make_CAP_score_function(visit_df_dict)
self.visdy = dict()
self.CAP = dict()
for vk, df in visit_df_dict.iteritems():
if self.subjid in df['subjid'].values:
vd = df[df['subjid'] == self.subjid]['visdy'].iloc[0]
self.visdy[vk] = vd
self.CAP[vk] = self.CAP_from_visdy(vd)
self.get_cog_motor_performance(visit_df_dict)
if pheno_df is not None:
self.get_pheno_vars(pheno_df)
#Continue here: make get_pheno_vars function, duplicate visdy col,
#rename it and apply inverse dictionary
def get_scan_dicom(self, mri_df=None, visit=None, subtype=None):
"""
Retrieve list of dicom filenames (single dicom filename for each
directory) where valid scans of the evaluated subject are located
Parameters
----------
mri_df : `pandas.core.frame.DataFrame`, optional
Dataframe loaded from mri.csv
visit : int, optional
Integer value that specifies the visit number
subtype : str, optional
String that defines the type of image being queried (e.g., "T1W").
For more infoirmation, please refer to
"TRACK-IDS-2015-10-R1-DataDictionary(1).pdf", section 4.15 (MRI)
Returns
-------
dcm_list : list
list of single dicom filenames from directories where valid
scans are located
"""
if 'DTI' in subtype:
if hasattr(subtype, 'extend'):
subtype.extend('Generic')
else:
subtype = [subtype, 'Generic']
if mri_df is None:
mri_df = self.mri
if visit is not None:
visit_str = 'Visit ' + str(visit)
else:
visit_str = None
idx = pd.IndexSlice
if not mri_df.index.is_lexsorted():
mri_df = mri_df.sort_index()
used_df = mri_df.loc[idx[subtype, visit_str], :]
dcm_list = []
#from IPython.terminal.debugger import TerminalPdb; TerminalPdb().set_trace()
for (scandy, mritype, subjid, scandesc,
scanid, scanstatus, this_vst) in zip(
used_df['scandy'], used_df['mritype'],
used_df['subjid'], used_df['scandesc'], used_df['scanid'],
used_df['scanstatus'],
used_df.index.get_level_values('visit')):
try:
scandesc = scandesc.replace(' ', '_')
except:
scandesc = 'NO_SCAN_DESCRIPTION'
dirlist = gl.glob('/'.join([rootdir_per_scan_type[self.studyid][mritype],
subjid, scandesc,
str(scan_year(this_vst,
self.studyid)) + '*',
'S' + str(scanid)]))
cond = dirlist and scanstatus == 1
if cond:
dcm_long_list = gl.glob('/'.join([dirlist[0], '*.dcm']))
cond = cond and dcm_long_list
if cond:
dcm_list.append(dcm_long_list[0])
else:
dcm_list.append('')
return dcm_list
def get_valid_visits(self, mri_df=None, subtype='T1W'):
"""
Retrieve list of visits associated to a given subject where valid scans
of a specific imaging subtype have been acquired. The output is an
array of valid subject/visit pairs.
Parameters
----------
mri_df : `pandas.core.frame.DataFrame`, optional
Dataframe loaded from mri.csv
subtype : str, optional
String that defines the type of image being queried. Default value
is 'T1W'. For more infoirmation, please refer to
"TRACK-IDS-2015-10-R1-DataDictionary(1).pdf", section 4.15 (MRI)
Returns
-------
subj_vst_array : ndarray
Aggregate array of valid subject/visit pairs
"""
subjects_list = []
visits_list = []
if mri_df is None:
mri_df = self.mri
if subtype in mri_df.index.get_level_values('subtype'):
used_df = mri_df.xs((subtype,), level=[0])
for (scandy, mritype, subjid, scandesc,
scanid, scanstatus, this_vst) in zip(
used_df['scandy'], used_df['mritype'], used_df['subjid'],
used_df['scandesc'], used_df['scanid'], used_df['scanstatus'],
used_df.index.get_level_values('visit')):
scandesc = scandesc.replace(' ', '_')
dirlist = gl.glob('/'.join([rootdir_per_scan_type[self.studyid][mritype],
subjid, scandesc,
str(scan_year(this_vst,
self.studyid)) + '*',
'S' + str(scanid)]))
cond = dirlist and scanstatus == 1
if cond:
dcm_long_list = gl.glob('/'.join([dirlist[0], '*.dcm']))
cond = cond and len(dcm_long_list) > 0
if cond:
subjects_list.append(subjid)
vst_nmb = int(this_vst.split(' ')[-1])
visits_list.append(vst_nmb)
# correct for redundant list of visits
inds = np.unique(visits_list, return_index=True)[1]
visits_list = np.array(visits_list)[inds].tolist()
subjects_list = np.array(subjects_list)[inds].tolist()
subj_vst_array = np.array([subjects_list, visits_list])
return subj_vst_array
def make_Track_ON_subjects(datadir, load_full_visit_forms=False):
"""
Create list and dict of subjects from TRACK-ON study,
together with relevant data frames.
Parameters
----------
datadir: str
Specifies directory with csv files.
load_full_visit_forms: bool, optional
Specifies if Visit_X.csv files should be loaded. Generally,
NOT recommmded since it makes things SLOW. Defaults to False.
Returns
-------
subject_list: list
Contains subject id strings.
subjects_dict: dict
Keys: subject id, values: Subject object.
gton: `pandas.core.frame.DataFrame`
Includes contents from general_ton.csv.
mri_TON: `pandas.core.frame.DataFrame`
Contains rows of mri.csv belonguing to the TRACK-ON study only.
"""
cvs_names = [file for file in os.listdir(datadir) if file.endswith('.csv')]
visit_ton_forms = ['visit1_ton', 'visit2_ton', 'visit3_ton']
visit_track_forms = ['visit1', 'visit2', 'visit3', 'visit4']
used_csv_list = ['general_ton', 'mri', 'subject']
THD_cog_motor_dict = {'sdmt_correct': 'sdmt',
'swr_correct': 'stroop',
'ptap_3hz_alltrials_self_intertapinterval_stddev': 'paced_tap',
'circle_indirect_alltrials_annulus_length': 'indirect_circle_trace',
'msearch_totalcorrect_1minute': 'map_search',
# 'cancelation': ''],
'spot_setsize5_k': 'spot_change',
'mrot_all_percentcor': 'mental_rotation',
'gripvarright': 'grip_var'}
#'count_backwards': ''}
TON_cog_motor_dict = {'sdmt_correct': 'sdmt',
'stroop_correct': 'stroop',
'ptap_3hz_all_self_iti_sd': 'paced_tap',
'circle_ind_all_annulus_l': 'indirect_circle_trace',
'msearch_totcorr_1min': 'map_search',
'cancel_digit_totalcorrect_90s': 'cancelation',
'spot_setsize5_k': 'spot_change',
'mrot_all_percentcor':'mental_rotation',
'circle_cnt_direct_totalnumber': 'count_backwards',
'lhx_gf_rx_cvf': 'grip_var'}
# if load_full_visit_forms:
# used_csv_list.extend(visit_ton_forms)
# used_csv_list.extend(visit_track_forms)
# Make a dictionary of dataframes, one for each csv file:
df_dict = {cvs_n.split('.')[0]: pd.read_csv(os.path.join(datadir, cvs_n),
sep='\t') for cvs_n in cvs_names
if cvs_n.split('.')[0] in used_csv_list}
pheno_fn = os.path.join(datadir, 'track_pheno_data.csv')
if os.path.isfile(pheno_fn):
df_dict['track_pheno_data'] = pd.read_csv(pheno_fn, sep=',')
if not load_full_visit_forms:
visit_df_dict = {}
for v_t in visit_track_forms:
csv_used = os.path.join(datadir, v_t + '.csv')
if v_t == 'visit1':
used_cols = ['subjid', 'studyid', 'visit',
'visdy','caglarger__value']
else:
used_cols = ['subjid', 'studyid', 'visit', 'visdy']
used_cols.extend(THD_cog_motor_dict.keys())
with open(csv_used,'r') as f:
head=f.readline()
cols_in_file = head.split('\t')
ok_cols = [col for col in used_cols if col in cols_in_file]
visit_df_dict[v_t] = pd.read_csv(csv_used, sep='\t',
usecols=ok_cols)
visit_df_dict[v_t].rename(columns=THD_cog_motor_dict,
inplace=True)
for visit_ton in visit_ton_forms:
csv_ton_used = os.path.join(datadir, visit_ton + '.csv')
used_cols = ['subjid', 'studyid', 'visdy', 'dbscore']
used_cols.extend(TON_cog_motor_dict.keys())
with open(csv_ton_used,'r') as f:
head=f.readline()
cols_in_file = head.split('\t')
ok_cols = [col for col in used_cols if col in cols_in_file]
visit_df_dict[visit_ton] = pd.read_csv(csv_ton_used,
sep='\t',
usecols=ok_cols)
visit_df_dict[visit_ton].rename(columns=TON_cog_motor_dict,
inplace=True)
else:
long_visit_list = visit_track_forms.extend(visit_ton_forms)
visit_df_dict = {cvs_n.split('.')[0]: pd.read_csv(os.path.join(datadir, cvs_n),
sep='\t') for cvs_n in cvs_names if cvs_n.split('.')[0]
in long_visit_list}
gton = df_dict['general_ton']
mri = df_dict['mri']
subj_df = df_dict['subject']
if 'track_pheno_data' in df_dict.keys():
pheno_df = df_dict['track_pheno_data']
else:
pheno_df = None
# visits_ton = {key: ton_df for key, ton_df in df_dict.iteritems()
# if key in visit_ton_forms}
mri_TON = mri[mri.studyid == 'TON'] # dframe with TRACK-ON scans only
subjects_dict = dict()
subject_list = list()
for subj_ix, subj_name in enumerate(gton['subjid']):
subjects_dict[subj_name] = Subject(subj_name, gton, mri_TON, subj_df,
visit_df_dict, pheno_df)
subject_list.append(subj_name)
return subject_list, subjects_dict, gton, mri_TON, visit_df_dict
def make_Track_subjects_subset(datadir, mristr=3., studyid='TON'):
"""
Create list and dict of subjects from a given TRACK study (Track HD or
Track On) and a specific field strength. It also outputs relevant data
frames.
Parameters
----------
datadir: str
Specifies directory with csv files.
mristr: float, optional
Specifies the field strength of the files to be retrieved.
studyid: str, optional
Specifies the study from which files will be retrieved. Valid values
are 'THD' and 'TON'.
Returns
-------
subject_list: list
Contains subject id strings.
subjects_dict: dict
Keys: subject id, values: Subject object.
gen_tk: `pandas.core.frame.DataFrame`
Includes contents from appropriate Track study general csv file.
mri_tk: `pandas.core.frame.DataFrame`
Contains rows of mri.csv associated to the specified Track study only.
"""
csv_names = [file for file in os.listdir(datadir) if file.endswith('.csv')]
used_csv_list = ['general_ton', 'general', 'mri', 'subject']
# Make a dictionary of dataframes, one for each csv file:
df_dict = {cvs_n.split('.')[0]: pd.read_csv(os.path.join(datadir, cvs_n),
sep='\t') for cvs_n in csv_names if cvs_n.split('.')[0]
in used_csv_list}
gen_tk = {key: df_dict[key] for key in used_csv_list[:2]}
gen_tk['TON'] = gen_tk.pop('general_ton')
gen_tk['THD'] = gen_tk.pop('general')
mri_tk = df_dict['mri']
# Retrieve info only from defined study of interest and field of strength
mri_tk = mri_tk[mri_tk.studyid == studyid]
mri_tk = mri_tk[mri_tk.mristr == mristr]
subj_df = df_dict['subject']
subjects_dict = dict()
subject_list = list()
subjects_ids = np.unique(mri_tk['subjid'])
for subj_name in subjects_ids:
subjects_dict[subj_name] = Subject(subj_name, gen_tk, mri_tk,
subj_df, studyid=studyid)
subject_list.append(subj_name)
return subject_list, subjects_dict, gen_tk, mri_tk
def make_Track_ON_subjs_n_visits(datadir, subtype='T1W'):
"""
Retrieve list of visits for which valid scans of an imaging subtype exist.
This search is performed for all subjects listed in `mri.csv`. The output
is an array of valid subject/visit pairs.
Parameters
----------
datadir: str
Specifies directory with csv files.
subtype : str, optional
String that defines the type of image being queried. Default value
is 'T1W'. For more information, please refer to
"TRACK-IDS-2015-10-R1-DataDictionary(1).pdf", section 4.15 (MRI)
Returns
-------
subj_vst_array : ndarray
Aggregate array of valid subject/visit pairs
"""
csv_names = [file for file in os.listdir(datadir) if file.endswith('.csv')]
used_csv_list = ['general_ton', 'mri', 'subject']
# Make a dictionary of dataframes, one for each csv file:
df_dict = {csv_n.split('.')[0]: pd.read_csv(os.path.join(datadir, csv_n),
sep='\t') for csv_n in csv_names if csv_n.split('.')[0]
in used_csv_list}
gton = df_dict['general_ton']
mri = df_dict['mri']
subj_df = df_dict['subject']
mri_TON = mri[mri.studyid == 'TON'] # dframe with TRACK-ON scans only
subj_visit_array = np.array([])
subjects_ids = np.unique(mri_TON['subjid'])
for subj_name in subjects_ids:
subj_obj = Subject(subj_name, gton, mri_TON, subj_df)
if subj_visit_array.size == 0:
subj_visit_array = subj_obj.get_valid_visits(subtype=subtype)
else:
new_subj_visit = subj_obj.get_valid_visits(subtype=subtype)
if new_subj_visit.size > 0:
subj_visit_array = np.concatenate((subj_visit_array,
new_subj_visit), axis=1)
return subj_visit_array
| [
"os.listdir",
"numpy.unique",
"pandas.read_csv",
"numpy.ones",
"os.path.join",
"os.path.isfile",
"numpy.array",
"functools.partial",
"numpy.isnan",
"numpy.concatenate",
"pandas.DataFrame"
] | [((20722, 20767), 'os.path.join', 'os.path.join', (['datadir', '"""track_pheno_data.csv"""'], {}), "(datadir, 'track_pheno_data.csv')\n", (20734, 20767), False, 'import os\n'), ((20775, 20799), 'os.path.isfile', 'os.path.isfile', (['pheno_fn'], {}), '(pheno_fn)\n', (20789, 20799), False, 'import os\n'), ((25366, 25393), 'numpy.unique', 'np.unique', (["mri_tk['subjid']"], {}), "(mri_tk['subjid'])\n", (25375, 25393), True, 'import numpy as np\n'), ((26941, 26953), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (26949, 26953), True, 'import numpy as np\n'), ((26973, 27001), 'numpy.unique', 'np.unique', (["mri_TON['subjid']"], {}), "(mri_TON['subjid'])\n", (26982, 27001), True, 'import numpy as np\n'), ((9556, 9582), 'functools.partial', 'partial', (['linear_pred', 'm', 'b'], {}), '(linear_pred, m, b)\n', (9563, 9582), False, 'from functools import partial\n'), ((17877, 17915), 'numpy.array', 'np.array', (['[subjects_list, visits_list]'], {}), '([subjects_list, visits_list])\n', (17885, 17915), True, 'import numpy as np\n'), ((20839, 20869), 'pandas.read_csv', 'pd.read_csv', (['pheno_fn'], {'sep': '""","""'}), "(pheno_fn, sep=',')\n", (20850, 20869), True, 'import pandas as pd\n'), ((9027, 9105), 'numpy.array', 'np.array', (["[vd[vk][vd[vk]['subjid'] == self.subjid].visdy.iloc[0] for vk in tk]"], {}), "([vd[vk][vd[vk]['subjid'] == self.subjid].visdy.iloc[0] for vk in tk])\n", (9035, 9105), True, 'import numpy as np\n'), ((9156, 9241), 'numpy.array', 'np.array', (["[vd[vk][vd[vk]['subjid'] == self.subjid].dbscore.iloc[0] for vk in tk]"], {}), "([vd[vk][vd[vk]['subjid'] == self.subjid].dbscore.iloc[0] for vk in tk]\n )\n", (9164, 9241), True, 'import numpy as np\n'), ((18795, 18814), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (18805, 18814), False, 'import os\n'), ((20573, 20601), 'os.path.join', 'os.path.join', (['datadir', 'cvs_n'], {}), '(datadir, cvs_n)\n', (20585, 20601), False, 'import os\n'), ((20992, 21027), 'os.path.join', 'os.path.join', (['datadir', "(v_t + '.csv')"], {}), "(datadir, v_t + '.csv')\n", (21004, 21027), False, 'import os\n'), ((21544, 21592), 'pandas.read_csv', 'pd.read_csv', (['csv_used'], {'sep': '"""\t"""', 'usecols': 'ok_cols'}), "(csv_used, sep='\\t', usecols=ok_cols)\n", (21555, 21592), True, 'import pandas as pd\n'), ((21831, 21872), 'os.path.join', 'os.path.join', (['datadir', "(visit_ton + '.csv')"], {}), "(datadir, visit_ton + '.csv')\n", (21843, 21872), False, 'import os\n'), ((22231, 22283), 'pandas.read_csv', 'pd.read_csv', (['csv_ton_used'], {'sep': '"""\t"""', 'usecols': 'ok_cols'}), "(csv_ton_used, sep='\\t', usecols=ok_cols)\n", (22242, 22283), True, 'import pandas as pd\n'), ((24558, 24577), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (24568, 24577), False, 'import os\n'), ((24779, 24807), 'os.path.join', 'os.path.join', (['datadir', 'cvs_n'], {}), '(datadir, cvs_n)\n', (24791, 24807), False, 'import os\n'), ((26407, 26426), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (26417, 26426), False, 'import os\n'), ((26617, 26645), 'os.path.join', 'os.path.join', (['datadir', 'csv_n'], {}), '(datadir, csv_n)\n', (26629, 26645), False, 'import os\n'), ((9303, 9318), 'numpy.isnan', 'np.isnan', (['db_in'], {}), '(db_in)\n', (9311, 9318), True, 'import numpy as np\n'), ((11356, 11381), 'pandas.DataFrame', 'pd.DataFrame', (['all_vis_dfs'], {}), '(all_vis_dfs)\n', (11368, 11381), True, 'import pandas as pd\n'), ((17676, 17717), 'numpy.unique', 'np.unique', (['visits_list'], {'return_index': '(True)'}), '(visits_list, return_index=True)\n', (17685, 17717), True, 'import numpy as np\n'), ((22653, 22681), 'os.path.join', 'os.path.join', (['datadir', 'cvs_n'], {}), '(datadir, cvs_n)\n', (22665, 22681), False, 'import os\n'), ((27373, 27431), 'numpy.concatenate', 'np.concatenate', (['(subj_visit_array, new_subj_visit)'], {'axis': '(1)'}), '((subj_visit_array, new_subj_visit), axis=1)\n', (27387, 27431), True, 'import numpy as np\n'), ((17747, 17768), 'numpy.array', 'np.array', (['visits_list'], {}), '(visits_list)\n', (17755, 17768), True, 'import numpy as np\n'), ((17812, 17835), 'numpy.array', 'np.array', (['subjects_list'], {}), '(subjects_list)\n', (17820, 17835), True, 'import numpy as np\n'), ((9397, 9413), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (9404, 9413), True, 'import numpy as np\n')] |
import sys
import os
import requests
from datetime import datetime, timedelta
import argparse
import json
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--startdate', nargs='?', default=getTodayStr(), type=str, help="Provide a start date, for example: 2019-06-13. \nDefaults to today's date")
parser.add_argument('--enddate', nargs='?', default=getTodayStr(), type=str, help="Provide an end date, for example: 2019-06-13. \nDefaults to today's date")
parser.add_argument('--ap', nargs='?', type=str, help="Provide an access provider to filter by, for example: optimum")
parser.add_argument('--saltcall', action='store_true', help="Set this flag to run a salt call on gathered store numbers.")
parser.add_argument('--force', action='store_true', help="Set this flag to autoagree to all user prompts.")
parser.add_argument('--e6', action='store_true', help="Set this flag to filter by E6 stores.")
parser.add_argument('--e7', action='store_true', help="Set this flag to filter by E7 stores.")
parser.add_argument('--getstore', nargs='?', type=str, help="Prove a StoreNumber to retrieve.")
args = parser.parse_args()
if(args.getstore is not None):
print("Getting Store Number: {}").format(args.getstore)
return args
files = ["t128_aap.1.log", "t128_aap.log"]
patterns = ["Attempting to add config for stores", "Attempting to remove route filter for these stores"]
def getPath(fileName):
path = "/var/log/128technology/"
fullPath = path + fileName
return fullPath
def getTodayStr():
now = datetime.now()
date = now.strftime("%Y-%m-%d")
return date
def sortByDate(log):
ordered_data = sorted(log.items(), key = lambda x:datetime.strptime(x[1], '%Y-%m-%d'), reverse=False)
return ordered_data
def apiCall(storeNumber):
host = "https://aap.attucs.com/api.php?uname=128t&storeNumber="
host = host + str(storeNumber)
try:
r = requests.get(url = host)
data = r.json()
data = json.loads(data)
return data
except:
"ERROR Making API Call"
def openFiles():
logs = []
for file in files:
path = getPath(file)
with open(path) as f:
for line in f.readlines():
logs.append(str(line))
return logs
def filterByDate(fullLog, startDate, endDate):
startDate = datetime.strptime(startDate, "%Y-%m-%d")
endDate = datetime.strptime(endDate, "%Y-%m-%d")
logs = []
for line in fullLog:
try:
date = datetime.strptime(line[:10], "%Y-%m-%d")
if startDate <= date <= endDate:
logs.append(str(line))
except:
pass
return logs
def getMatches(log, pattern):
logs = []
for line in log:
if pattern in line:
logs.append(line)
return logs
def getStoreNumbers(line):
vals = line[line.find("(")+1:line.find(")")]
content = vals.split(",")
storeNumbers = []
for num in content:
num = num.strip('\'"')
try:
int(num)
storeNumber = str(num).replace(' ','')
storeNumbers.append(storeNumber)
except:
pass
return storeNumbers
def getAllStoreNumbersByDate(log):
logs = {}
for line in log:
storeNumbers = getStoreNumbers(line)
date = line[:10]
for storeNumber in storeNumbers:
if storeNumber not in logs.keys():
logs[storeNumber] = date
else:
oldDate = logs[storeNumber]
oldDate = datetime.strptime(oldDate, "%Y-%m-%d")
currentDate = datetime.strptime(date, "%Y-%m-%d")
if(currentDate > oldDate):
logs[storeNumber] = date
return logs
def updateE6(e6, e7):
log = e6
for storeNumber in log.keys():
if storeNumber in e7.keys():
log.pop(storeNumber, None)
return log
def proccessLog(log, ap):
dataLog = []
previousDate = None
for pair in log:
storeNumber = pair[0]
date = pair[1]
procData = apiCall(storeNumber)
try:
if (ap is None or ap.lower() in procData['AccessProvider'].lower()):
dataLog.append(procData)
if(date != previousDate):
print("{}: \n").format(date)
previousDate = date
routerName = "AAP" + str(procData['State']) + str(procData['StoreNumber']) + "P" + str(procData['Pod'])
print("Router: {}\n\t Store Number: {}\n\t Access Provider: {}\n\t Pod: {}\n\t State: {}\n\t LannerSN-A: {}\n\t LannerSN-B: {}\n").format(routerName, procData['StoreNumber'], procData['AccessProvider'], procData['Pod'], procData['State'], procData['LannerSN-A'], procData['LannerSN-B'])
except:
pass
return dataLog
def saltCall(procLog, force):
saltCMD = "salt-call t128_aap.run_raw_template udp-transform-dia "
numsToCall = ""
for data in procLog:
numsToCall += str(int(data['StoreNumber'])) + " "
saltCMD += numsToCall
print("Preparing to run the following bash script\n{}").format(saltCMD)
if force:
os.system(saltCMD)
elif (raw_input("Run above command? (y/n): ").lower().strip()[:1] == "y"):
os.system(saltCMD)
def buildOutput(args, e6, e7):
outputLog = []
if(args.getstore is not None):
try:
procData = apiCall(args.getstore)
routerName = "AAP" + str(procData['State']) + str(procData['StoreNumber']) + "P" + str(procData['Pod'])
print("\nRouter: {}\n\t Store Number: {}\n\t Access Provider: {}\n\t Pod: {}\n\t State: {}\n\t LannerSN-A: {}\n\t LannerSN-B: {}\n").format(routerName, procData['StoreNumber'], procData['AccessProvider'], procData['Pod'], procData['State'], procData['LannerSN-A'], procData['LannerSN-B'])
return
except Exception as e:
print("\nFailed to get StoreNumber!\nError: \n\t{}\n").format(e)
if(not args.e7):
print("*"*20)
print("{} Total E6 Stores").format(len(e6))
print("*"*20 + "\n")
outputLog += proccessLog(e6, args.ap)
if(not args.e6):
print("*"*20)
print("{} Total E7 Stores").format(len(e7))
print("*"*20 + "\n")
outputLog += proccessLog(e7, args.ap)
if(args.saltcall):
saltCall(outputLog, args.force)
def main():
args = parseArgs()
fullLog = openFiles()
filteredLog = filterByDate(fullLog, args.startdate, args.enddate)
e6FilteredLog = getMatches(filteredLog, patterns[0])
e7FilteredLog = getMatches(filteredLog, patterns[1])
e7FullLog = getMatches(fullLog, patterns[1])
e7FullSN = getAllStoreNumbersByDate(e7FullLog)
e6FilteredSN = getAllStoreNumbersByDate(e6FilteredLog)
e7FilteredSN = sortByDate(getAllStoreNumbersByDate(e7FilteredLog))
e6FilteredSN = sortByDate(updateE6(e6FilteredSN, e7FullSN))
buildOutput(args, e6FilteredSN, e7FilteredSN)
if __name__== "__main__":
main()
| [
"json.loads",
"argparse.ArgumentParser",
"datetime.datetime.strptime",
"requests.get",
"datetime.datetime.now",
"os.system"
] | [((137, 162), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (160, 162), False, 'import argparse\n'), ((1587, 1601), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1599, 1601), False, 'from datetime import datetime, timedelta\n'), ((2374, 2414), 'datetime.datetime.strptime', 'datetime.strptime', (['startDate', '"""%Y-%m-%d"""'], {}), "(startDate, '%Y-%m-%d')\n", (2391, 2414), False, 'from datetime import datetime, timedelta\n'), ((2429, 2467), 'datetime.datetime.strptime', 'datetime.strptime', (['endDate', '"""%Y-%m-%d"""'], {}), "(endDate, '%Y-%m-%d')\n", (2446, 2467), False, 'from datetime import datetime, timedelta\n'), ((1957, 1979), 'requests.get', 'requests.get', ([], {'url': 'host'}), '(url=host)\n', (1969, 1979), False, 'import requests\n'), ((2021, 2037), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2031, 2037), False, 'import json\n'), ((5213, 5231), 'os.system', 'os.system', (['saltCMD'], {}), '(saltCMD)\n', (5222, 5231), False, 'import os\n'), ((2539, 2579), 'datetime.datetime.strptime', 'datetime.strptime', (['line[:10]', '"""%Y-%m-%d"""'], {}), "(line[:10], '%Y-%m-%d')\n", (2556, 2579), False, 'from datetime import datetime, timedelta\n'), ((5319, 5337), 'os.system', 'os.system', (['saltCMD'], {}), '(saltCMD)\n', (5328, 5337), False, 'import os\n'), ((1730, 1765), 'datetime.datetime.strptime', 'datetime.strptime', (['x[1]', '"""%Y-%m-%d"""'], {}), "(x[1], '%Y-%m-%d')\n", (1747, 1765), False, 'from datetime import datetime, timedelta\n'), ((3584, 3622), 'datetime.datetime.strptime', 'datetime.strptime', (['oldDate', '"""%Y-%m-%d"""'], {}), "(oldDate, '%Y-%m-%d')\n", (3601, 3622), False, 'from datetime import datetime, timedelta\n'), ((3653, 3688), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (3670, 3688), False, 'from datetime import datetime, timedelta\n')] |
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import requests
from PyPowerFlex import base_client
from PyPowerFlex import exceptions
from PyPowerFlex import utils
LOG = logging.getLogger(__name__)
class SnapshotDef(dict):
"""PowerFlex definition of snapshot to create.
JSON-serializable, should be used as `snapshot_defs` list item
in `System.snapshot_volumes` method.
"""
def __init__(self, volume_id, name=None):
"""Initialize SnapshotDef object.
:type volume_id: str
:type name: str
"""
params = utils.prepare_params(
{
'volumeId': volume_id,
'snapshotName': name,
},
dump=False
)
super(SnapshotDef, self).__init__(**params)
class System(base_client.EntityRequest):
def __init__(self, token, configuration):
self.__api_version = None
super(System, self).__init__(token, configuration)
def api_version(self, cached=True):
"""Get PowerFlex API version.
:param cached: get version from cache or send API response
:type cached: bool
:rtype: str
"""
url = '/version'
if not self.__api_version or not cached:
r, response = self.send_get_request(url)
if r.status_code != requests.codes.ok:
exc = exceptions.PowerFlexFailQuerying('API version')
LOG.error(exc.message)
raise exc
pattern = re.compile(r'^\d+(\.\d+)*$')
if not pattern.match(response):
msg = (
'Failed to query PowerFlex API version. Invalid version '
'format: {response}.'.format(response=r.text)
)
LOG.error(msg)
raise exceptions.PowerFlexClientException(msg)
self.__api_version = response
return self.__api_version
def remove_cg_snapshots(self, system_id, cg_id, allow_ext_managed=None):
"""Remove PowerFlex ConsistencyGroup snapshots.
:type system_id: str
:type cg_id: str
:type allow_ext_managed: bool
:rtype: dict
"""
action = 'removeConsistencyGroupSnapshots'
params = dict(
snapGroupId=cg_id,
allowOnExtManagedVol=allow_ext_managed
)
r, response = self.send_post_request(self.base_action_url,
action=action,
entity=self.entity,
entity_id=system_id,
params=params)
if r.status_code != requests.codes.ok:
msg = ('Failed to remove consistency group snapshots from '
'PowerFlex {entity} with id {_id}. '
'Error: {response}'.format(entity=self.entity,
_id=system_id,
response=response))
LOG.error(msg)
raise exceptions.PowerFlexClientException(msg)
return response
def snapshot_volumes(self,
system_id,
snapshot_defs,
access_mode=None,
retention_period=None,
allow_ext_managed=None):
"""Create snapshots of PowerFlex volumes.
:type retention_period: str
:type access_mode: str
:type system_id: str
:type snapshot_defs: list[dict]
:type allow_ext_managed: bool
:rtype: dict
"""
action = 'snapshotVolumes'
params = dict(
snapshotDefs=snapshot_defs,
allowOnExtManagedVol=allow_ext_managed,
accessModeLimit=access_mode,
retentionPeriodInMin=retention_period
)
r, response = self.send_post_request(self.base_action_url,
action=action,
entity=self.entity,
entity_id=system_id,
params=params)
if r.status_code != requests.codes.ok:
msg = ('Failed to snapshot volumes on PowerFlex {entity} '
'with id {_id}.'
' Error: {response}'.format(entity=self.entity,
_id=system_id,
response=response))
LOG.error(msg)
raise exceptions.PowerFlexClientException(msg)
return response
| [
"logging.getLogger",
"PyPowerFlex.exceptions.PowerFlexClientException",
"re.compile",
"PyPowerFlex.utils.prepare_params",
"PyPowerFlex.exceptions.PowerFlexFailQuerying"
] | [((775, 802), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (792, 802), False, 'import logging\n'), ((1171, 1250), 'PyPowerFlex.utils.prepare_params', 'utils.prepare_params', (["{'volumeId': volume_id, 'snapshotName': name}"], {'dump': '(False)'}), "({'volumeId': volume_id, 'snapshotName': name}, dump=False)\n", (1191, 1250), False, 'from PyPowerFlex import utils\n'), ((2109, 2139), 're.compile', 're.compile', (['"""^\\\\d+(\\\\.\\\\d+)*$"""'], {}), "('^\\\\d+(\\\\.\\\\d+)*$')\n", (2119, 2139), False, 'import re\n'), ((3698, 3738), 'PyPowerFlex.exceptions.PowerFlexClientException', 'exceptions.PowerFlexClientException', (['msg'], {}), '(msg)\n', (3733, 3738), False, 'from PyPowerFlex import exceptions\n'), ((5238, 5278), 'PyPowerFlex.exceptions.PowerFlexClientException', 'exceptions.PowerFlexClientException', (['msg'], {}), '(msg)\n', (5273, 5278), False, 'from PyPowerFlex import exceptions\n'), ((1974, 2021), 'PyPowerFlex.exceptions.PowerFlexFailQuerying', 'exceptions.PowerFlexFailQuerying', (['"""API version"""'], {}), "('API version')\n", (2006, 2021), False, 'from PyPowerFlex import exceptions\n'), ((2421, 2461), 'PyPowerFlex.exceptions.PowerFlexClientException', 'exceptions.PowerFlexClientException', (['msg'], {}), '(msg)\n', (2456, 2461), False, 'from PyPowerFlex import exceptions\n')] |
from adrian.cgen import Include
stdlib = Include("stdlib.h")
stdint = Include("stdint.h")
stdio = Include("stdio.h")
assert_ = Include("assert.h")
| [
"adrian.cgen.Include"
] | [((43, 62), 'adrian.cgen.Include', 'Include', (['"""stdlib.h"""'], {}), "('stdlib.h')\n", (50, 62), False, 'from adrian.cgen import Include\n'), ((72, 91), 'adrian.cgen.Include', 'Include', (['"""stdint.h"""'], {}), "('stdint.h')\n", (79, 91), False, 'from adrian.cgen import Include\n'), ((100, 118), 'adrian.cgen.Include', 'Include', (['"""stdio.h"""'], {}), "('stdio.h')\n", (107, 118), False, 'from adrian.cgen import Include\n'), ((129, 148), 'adrian.cgen.Include', 'Include', (['"""assert.h"""'], {}), "('assert.h')\n", (136, 148), False, 'from adrian.cgen import Include\n')] |
from dataclasses import dataclass
import pygame
import pymunk
import pymunk.pygame_util
from . import style
from . import display
from . import structures
@dataclass(unsafe_hash=True)
class Game:
__metaclass__ = structures.IterableObject
name = ""
entities = {}
style = style.GGSTYLE()
space: pymunk.Space
_draw_options: pymunk.pygame_util.DrawOptions
screen: display.Screen
def __init__(self, name, width = 800, height = 600):
self.name = name
self.screen = display.Screen(width, height)
pygame.init()
pygame.display.set_caption(name)
self.space = pymunk.Space()
self._draw_options = pymunk.pygame_util.DrawOptions(self.screen.canvas)
self.clock = pygame.time.Clock()
self.particle_effects = {}
def run(self):
self.running = 1
while self.running == 1:
self._handle_quit()
self.screen.clear(self.style.BLACK)
hello = self.style.FONT.render("hi", False, style.GGSTYLE.GREEN)
self.entities.draw(self.screen)
self.screen.blit(hello, (self.screen.width / 2 - hello.get_rect().w / 2, self.scree.height / 2 - hello.get_rect().h / 2))
pygame.display.update()
self.clock.tick(60)
pygame.quit()
def _handle_quit(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = 0
def _update_space(self):
self.space.step(1/60)
def addobject(self, object):
self.entities.addobject(object)
def addtospace(self, body):
self.space.add(body) | [
"pygame.init",
"pymunk.pygame_util.DrawOptions",
"pygame.quit",
"pygame.event.get",
"dataclasses.dataclass",
"pygame.time.Clock",
"pymunk.Space",
"pygame.display.set_caption",
"pygame.display.update"
] | [((159, 186), 'dataclasses.dataclass', 'dataclass', ([], {'unsafe_hash': '(True)'}), '(unsafe_hash=True)\n', (168, 186), False, 'from dataclasses import dataclass\n'), ((567, 580), 'pygame.init', 'pygame.init', ([], {}), '()\n', (578, 580), False, 'import pygame\n'), ((589, 621), 'pygame.display.set_caption', 'pygame.display.set_caption', (['name'], {}), '(name)\n', (615, 621), False, 'import pygame\n'), ((643, 657), 'pymunk.Space', 'pymunk.Space', ([], {}), '()\n', (655, 657), False, 'import pymunk\n'), ((687, 737), 'pymunk.pygame_util.DrawOptions', 'pymunk.pygame_util.DrawOptions', (['self.screen.canvas'], {}), '(self.screen.canvas)\n', (717, 737), False, 'import pymunk\n'), ((759, 778), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (776, 778), False, 'import pygame\n'), ((1314, 1327), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1325, 1327), False, 'import pygame\n'), ((1407, 1425), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1423, 1425), False, 'import pygame\n'), ((1249, 1272), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1270, 1272), False, 'import pygame\n')] |
# TODO: move this to __init__.py? this was in a separate file because
# setup.py used to import porcupine but it doesn't do it anymore
import os
import platform
import appdirs
from porcupine import __author__ as _author
if platform.system() in {'Windows', 'Darwin'}:
# these platforms like path names like "Program Files" or
# "Application Support"
_appname = 'Porcupine'
else:
_appname = 'porcupine'
_author = _author.lower()
cachedir = appdirs.user_cache_dir(_appname, _author)
configdir = appdirs.user_config_dir(_appname, _author)
# this hack shouldn't be a problem because porcupine isn't distributed
# with tools like pyinstaller, and it doesn't need to be because people
# using porcupine have python installed anyway
installdir = os.path.dirname(os.path.abspath(__file__))
def makedirs():
all_paths = [cachedir, configdir, os.path.join(configdir, 'plugins')]
for path in all_paths:
os.makedirs(path, exist_ok=True)
| [
"appdirs.user_cache_dir",
"os.makedirs",
"appdirs.user_config_dir",
"os.path.join",
"platform.system",
"porcupine.__author__.lower",
"os.path.abspath"
] | [((464, 505), 'appdirs.user_cache_dir', 'appdirs.user_cache_dir', (['_appname', '_author'], {}), '(_appname, _author)\n', (486, 505), False, 'import appdirs\n'), ((518, 560), 'appdirs.user_config_dir', 'appdirs.user_config_dir', (['_appname', '_author'], {}), '(_appname, _author)\n', (541, 560), False, 'import appdirs\n'), ((228, 245), 'platform.system', 'platform.system', ([], {}), '()\n', (243, 245), False, 'import platform\n'), ((436, 451), 'porcupine.__author__.lower', '_author.lower', ([], {}), '()\n', (449, 451), True, 'from porcupine import __author__ as _author\n'), ((781, 806), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (796, 806), False, 'import os\n'), ((864, 898), 'os.path.join', 'os.path.join', (['configdir', '"""plugins"""'], {}), "(configdir, 'plugins')\n", (876, 898), False, 'import os\n'), ((935, 967), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (946, 967), False, 'import os\n')] |
from datetime import time
from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from vnpy.app.cta_strategy.base import (
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus,
)
from vnpy.app.cta_strategy.TSMtools import TSMArrayManager
class TSMyoORBStrategy(CtaTemplate):
""""""
author = "TheSuperMyo"
# OR及计数器
or_setup = 30
bar_counter = 0
a_up_setup_counter = 0
c_up_setup_counter = 0
a_down_setup_counter = 0
c_down_setup_counter = 0
stop_long_counter = 0
stop_short_counter = 0
or_h = 0
or_l = 0
or_r = 0
a_up = 0
a_down = 0
c_up = 0
c_down = 0
k1 = 1.1
k2 = 1.5
day_high = 0
day_low = 0
long_intra = 0
short_intra = 0
long_stop = 0
short_stop = 0
is_lock = 0
fixed_size = 1
# 针对不同交易时间的市场
open_time_night = time(hour=21,minute=0)# 商品夜盘
open_time_day_1 = time(hour=9,minute=0)# 商品
open_time_day_2 = time(hour=9,minute=30)# 股指
close_time_day = time(hour=15,minute=0)# 商品/股指(除了利率期货)
close_time_night_1 = time(hour=23,minute=0)# 其他夜盘商品
close_time_night_2 = time(hour=1,minute=0)# 工业金属
close_time_night_3 = time(hour=2,minute=30)# 黄金/白银/原油
break_time_start_1 = time(hour=10,minute=15)# 商品茶歇
break_time_start_2 = time(hour=11,minute=30)# 全体午休
break_time_end_1 = time(hour=10,minute=30)# 商品茶歇
break_time_end_2 = time(hour=13,minute=0)# 股指下午
break_time_end_3 = time(hour=13,minute=30)# 商品下午
parameters = ['or_setup', 'k1', 'k2', 'is_lock', 'fixed_size']
variables = ['a_up_setup_counter', 'c_up_setup_counter',\
'a_down_setup_counter', 'c_down_setup_counter', 'stop_long_counter',\
'stop_short_counter', 'a_up', 'a_down', 'c_up', 'c_down',\
'long_intra','short_intra','long_stop','short_stop']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TSMyoORBStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar)
self.am = TSMArrayManager()
# 策略自身订单管理
self.active_orderids = []
self.bars = []
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
# 根据需要的历史数据长度设定
self.load_bar(5)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def tick_filter(self, tick: TickData):
"""
过滤异常时间的tick
"""
pass
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
am = self.am
am.update_bar(bar)
if not am.inited:
return
self.cancel_all()
self.bars.append(bar)
if len(self.bars) <= 2:
return
else:
self.bars.pop(0)
last_bar = self.bars[-2]
# 今日开盘
if last_bar.datetime.date() != bar.datetime.date():
self.day_high = bar.high_price
self.day_low = bar.low_price
self.bar_counter = 0
self.a_up = 0
self.a_down = 0
self.c_up = 0
self.c_down = 0
else:
self.day_high = max(self.day_high, bar.high_price)
self.day_low = min(self.day_low, bar.low_price)
if self.bar_counter == self.or_setup:
self.or_h, self.or_l = am.donchian(self.or_setup, False)
self.or_r = self.or_h-self.or_l
self.a_up = self.or_h + self.k1*self.or_r
self.a_down = self.or_l - self.k1*self.or_r
self.c_up = self.or_h + self.k2*self.or_r
self.c_down = self.or_l + self.k2*self.or_r
if self.pos == 0 and self.a_up:
# 价格大于+A
if bar.close_price > self.a_up:
if self.a_up_setup_counter >= self.or_setup:
# 且持续一个OR,开多
if self.active_orderids:
self.write_log("存在活跃订单,无法挂单")
return
if self.is_lock:
orderids = self.buy(bar.close_price, self.fixed_size, lock=True)
else:
orderids = self.buy(bar.close_price, self.fixed_size, lock=False)
self.active_orderids.extend(orderids)
else:
self.a_up_setup_counter += 1
# 价格大于+C
if bar.close_price > self.c_up and self.day_low < self.a_down:
if self.c_up_setup_counter >= self.or_setup/2:
# 从低于-A反转且持续1/2个OR,开多
if self.active_orderids:
self.write_log("存在活跃订单,无法挂单")
return
if self.is_lock:
orderids = self.buy(bar.close_price, self.fixed_size, lock=True)
else:
orderids = self.buy(bar.close_price, self.fixed_size, lock=False)
self.active_orderids.extend(orderids)
else:
self.c_up_setup_counter += 1
else:
self.c_up_setup_counter = 0
else:
self.a_up_setup_counter = 0
# 价格小于-A
if bar.close_price < self.a_down:
if self.a_down_setup_counter >= self.or_setup:
# 且持续一个OR,开空
if self.active_orderids:
self.write_log("存在活跃订单,无法挂单")
return
if self.is_lock:
orderids = self.short(bar.close_price, self.fixed_size, lock=True)
else:
orderids = self.short(bar.close_price, self.fixed_size, lock=False)
self.active_orderids.extend(orderids)
else:
self.a_down_setup_counter += 1
# 价格小于-C
if bar.close_price < self.c_down and self.day_high > self.a_up:
if self.c_down_setup_counter >= self.or_setup/2:
# 从低于-A反转且持续1/2个OR,开空
if self.active_orderids:
self.write_log("存在活跃订单,无法挂单")
return
if self.is_lock:
orderids = self.short(bar.close_price, self.fixed_size, lock=True)
else:
orderids = self.short(bar.close_price, self.fixed_size, lock=False)
self.active_orderids.extend(orderids)
else:
self.c_down_setup_counter += 1
else:
self.c_down_setup_counter = 0
else:
self.a_down_setup_counter = 0
if self.pos > 0:
close_long = self.long_stop
if bar.close_price < self.long_intra:
if self.stop_long_counter >= self.or_setup:
# 一个OR不盈利平多
close_long = bar.close_price
else:
self.stop_long_counter = 0
if self.long_stop:
# 统一挂停止单平多
stop_long_price = max(close_long, self.long_stop)
if self.active_orderids:
self.write_log("存在活跃订单,无法挂单")
return
if self.is_lock:
orderids = self.sell(stop_long_price, self.fixed_size, stop=True, lock=True)
else:
orderids = self.sell(stop_long_price, self.fixed_size, stop=True, lock=False)
self.active_orderids.extend(orderids)
if self.pos < 0:
close_short = self.short_stop
if bar.close_price > self.short_intra:
if self.stop_short_counter >= self.or_setup:
# 一个OR不盈利平空
close_short = bar.close_price
else:
self.stop_short_counter = 0
if self.short_stop:
# 统一挂停止单平空
stop_short_price = min(close_short, self.short_stop)
if self.active_orderids:
self.write_log("存在活跃订单,无法挂单")
return
if self.is_lock:
orderids = self.cover(stop_short_price, self.fixed_size, stop=True, lock=True)
else:
orderids = self.cover(stop_short_price, self.fixed_size, stop=True, lock=False)
self.active_orderids.extend(orderids)
self.bar_counter += 1
self.put_event()
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
# 移除已成交或已撤销的订单
if not order.is_active() and order.vt_orderid in self.active_orderids:
self.active_orderids.remove(order.vt_orderid)
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
if self.pos > 0:
self.long_intra = trade.price
self.short_intra = 0
self.long_stop = self.or_l
self.short_stop = 0
elif self.pos < 0:
self.long_intra = 0
self.short_intra = trade.price
self.long_stop = 0
self.short_stop = self.or_h
elif self.pos == 0:
self.long_intra = 0
self.short_intra = 0
self.long_stop = 0
self.short_stop = 0
self.stop_short_counter = 0
self.stop_long_counter = 0
self.a_up_setup_counter = 0
self.c_up_setup_counter = 0
self.a_down_setup_counter = 0
self.c_down_setup_counter = 0
# 邮寄提醒
self.send_email(f"{trade.vt_symbol}在{trade.time}成交,价格{trade.price},方向{trade.direction}{trade.offset},数量{trade.volume}")
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
# 刚刚生成的本地停止单
if stop_order.status == StopOrderStatus.WAITING:
return
# 撤销的本地停止单,从活跃列表移除
if stop_order.status == StopOrderStatus.CANCELLED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
# 触发的本地停止单,停止单移除,限价单加入
if stop_order.status == StopOrderStatus.TRIGGERED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
self.active_orderids.extend(stop_order.vt_orderids)
# 撤掉其他停止单
for other_orderids in self.active_orderids:
if other_orderids.startswith(STOPORDER_PREFIX):
self.cancel_order(other_orderids) | [
"vnpy.app.cta_strategy.TSMtools.TSMArrayManager",
"datetime.time",
"vnpy.app.cta_strategy.BarGenerator"
] | [((964, 987), 'datetime.time', 'time', ([], {'hour': '(21)', 'minute': '(0)'}), '(hour=21, minute=0)\n', (968, 987), False, 'from datetime import time\n'), ((1015, 1037), 'datetime.time', 'time', ([], {'hour': '(9)', 'minute': '(0)'}), '(hour=9, minute=0)\n', (1019, 1037), False, 'from datetime import time\n'), ((1063, 1086), 'datetime.time', 'time', ([], {'hour': '(9)', 'minute': '(30)'}), '(hour=9, minute=30)\n', (1067, 1086), False, 'from datetime import time\n'), ((1112, 1135), 'datetime.time', 'time', ([], {'hour': '(15)', 'minute': '(0)'}), '(hour=15, minute=0)\n', (1116, 1135), False, 'from datetime import time\n'), ((1175, 1198), 'datetime.time', 'time', ([], {'hour': '(23)', 'minute': '(0)'}), '(hour=23, minute=0)\n', (1179, 1198), False, 'from datetime import time\n'), ((1231, 1253), 'datetime.time', 'time', ([], {'hour': '(1)', 'minute': '(0)'}), '(hour=1, minute=0)\n', (1235, 1253), False, 'from datetime import time\n'), ((1284, 1307), 'datetime.time', 'time', ([], {'hour': '(2)', 'minute': '(30)'}), '(hour=2, minute=30)\n', (1288, 1307), False, 'from datetime import time\n'), ((1347, 1371), 'datetime.time', 'time', ([], {'hour': '(10)', 'minute': '(15)'}), '(hour=10, minute=15)\n', (1351, 1371), False, 'from datetime import time\n'), ((1402, 1426), 'datetime.time', 'time', ([], {'hour': '(11)', 'minute': '(30)'}), '(hour=11, minute=30)\n', (1406, 1426), False, 'from datetime import time\n'), ((1455, 1479), 'datetime.time', 'time', ([], {'hour': '(10)', 'minute': '(30)'}), '(hour=10, minute=30)\n', (1459, 1479), False, 'from datetime import time\n'), ((1508, 1531), 'datetime.time', 'time', ([], {'hour': '(13)', 'minute': '(0)'}), '(hour=13, minute=0)\n', (1512, 1531), False, 'from datetime import time\n'), ((1560, 1584), 'datetime.time', 'time', ([], {'hour': '(13)', 'minute': '(30)'}), '(hour=13, minute=30)\n', (1564, 1584), False, 'from datetime import time\n'), ((2164, 2189), 'vnpy.app.cta_strategy.BarGenerator', 'BarGenerator', (['self.on_bar'], {}), '(self.on_bar)\n', (2176, 2189), False, 'from vnpy.app.cta_strategy import CtaTemplate, StopOrder, TickData, BarData, TradeData, OrderData, BarGenerator, ArrayManager\n'), ((2208, 2225), 'vnpy.app.cta_strategy.TSMtools.TSMArrayManager', 'TSMArrayManager', ([], {}), '()\n', (2223, 2225), False, 'from vnpy.app.cta_strategy.TSMtools import TSMArrayManager\n')] |
from __future__ import print_function
import numpy as np
import yt
from hyperion.model import Model
import matplotlib as mpl
mpl.use('Agg')
import powderday.config as cfg
from powderday.grid_construction import yt_octree_generate
from powderday.find_order import find_order
import powderday.powderday_test_octree as pto
import powderday.hyperion_octree_stats as hos
from hyperion.dust import SphericalDust
from powderday.helpers import energy_density_absorbed_by_CMB
from powderday.analytics import dump_cell_info
def sph_m_gen(fname,field_add):
refined,dustdens,fc1,fw1,reg,ds = yt_octree_generate(fname,field_add)
if yt.__version__ == '4.0.dev0':
xmin = (fc1[:,0]-fw1[:,0]/2.).to('cm') #in proper cm
xmax = (fc1[:,0]+fw1[:,0]/2.).to('cm')
ymin = (fc1[:,1]-fw1[:,1]/2.).to('cm')
ymax = (fc1[:,1]+fw1[:,1]/2.).to('cm')
zmin = (fc1[:,2]-fw1[:,2]/2.).to('cm')
zmax = (fc1[:,2]+fw1[:,2]/2.).to('cm')
else:
xmin = (fc1[:,0]-fw1[:,0]/2.).convert_to_units('cm') #in proper cm
xmax = (fc1[:,0]+fw1[:,0]/2.).convert_to_units('cm')
ymin = (fc1[:,1]-fw1[:,1]/2.).convert_to_units('cm')
ymax = (fc1[:,1]+fw1[:,1]/2.).convert_to_units('cm')
zmin = (fc1[:,2]-fw1[:,2]/2.).convert_to_units('cm')
zmax = (fc1[:,2]+fw1[:,2]/2.).convert_to_units('cm')
#dx,dy,dz are the edges of the parent grid
dx = (np.max(xmax)-np.min(xmin)).value
dy = (np.max(ymax)-np.min(ymin)).value
dz = (np.max(zmax)-np.min(zmin)).value
xcent = float(ds.quan(cfg.model.x_cent,"code_length").to('cm').value)
ycent = float(ds.quan(cfg.model.y_cent,"code_length").to('cm').value)
zcent = float(ds.quan(cfg.model.z_cent,"code_length").to('cm').value)
boost = np.array([xcent,ycent,zcent])
print ('[sph_tributary] boost = ',boost)
print ('[sph_tributary] xmin (pc)= ',np.min(xmin.to('pc')))
print ('[sph_tributary] xmax (pc)= ',np.max(xmax.to('pc')))
print ('[sph_tributary] ymin (pc)= ',np.min(ymin.to('pc')))
print ('[sph_tributary] ymax (pc)= ',np.max(ymax.to('pc')))
print ('[sph_tributary] zmin (pc)= ',np.min(zmin.to('pc')))
print ('[sph_tributary] zmax (pc)= ',np.max(zmax.to('pc')))
#<NAME>'s conversion from z-first ordering (yt's default) to
#x-first ordering (the script should work both ways)
refined_array = np.array(refined)
refined_array = np.squeeze(refined_array)
order = find_order(refined_array)
refined_reordered = []
dustdens_reordered = np.zeros(len(order))
for i in range(len(order)):
refined_reordered.append(refined[order[i]])
dustdens_reordered[i] = dustdens[order[i]]
refined = refined_reordered
dustdens=dustdens_reordered
#hyperion octree stats
max_level = hos.hyperion_octree_stats(refined)
pto.test_octree(refined,max_level)
dump_cell_info(refined,fc1,fw1,xmin,xmax,ymin,ymax,zmin,zmax)
np.save('refined.npy',refined)
np.save('density.npy',dustdens)
#========================================================================
#Initialize Hyperion Model
#========================================================================
m = Model()
print ('Setting Octree Grid with Parameters: ')
#m.set_octree_grid(xcent,ycent,zcent,
# dx,dy,dz,refined)
m.set_octree_grid(0,0,0,dx/2,dy/2,dz/2,refined)
#get CMB:
energy_density_absorbed=energy_density_absorbed_by_CMB()
specific_energy = np.repeat(energy_density_absorbed.value,dustdens.shape)
if cfg.par.PAH == True:
# load PAH fractions for usg, vsg, and big (grain sizes)
frac = cfg.par.PAH_frac
# Normalize to 1
total = np.sum(list(frac.values()))
frac = {k: v / total for k, v in frac.items()}
for size in frac.keys():
d = SphericalDust(cfg.par.dustdir+'%s.hdf5'%size)
if cfg.par.SUBLIMATION == True:
d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
#m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
m.add_density_grid(dustdens*frac[size],d,specific_energy=specific_energy)
m.set_enforce_energy_range(cfg.par.enforce_energy_range)
else:
d = SphericalDust(cfg.par.dustdir+cfg.par.dustfile)
if cfg.par.SUBLIMATION == True:
d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
m.add_density_grid(dustdens,d,specific_energy=specific_energy)
#m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)
m.set_specific_energy_type('additional')
return m,xcent,ycent,zcent,dx,dy,dz,reg,ds,boost
| [
"powderday.helpers.energy_density_absorbed_by_CMB",
"numpy.repeat",
"powderday.find_order.find_order",
"matplotlib.use",
"hyperion.model.Model",
"hyperion.dust.SphericalDust",
"numpy.squeeze",
"numpy.max",
"numpy.array",
"powderday.powderday_test_octree.test_octree",
"numpy.min",
"powderday.hy... | [((126, 140), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (133, 140), True, 'import matplotlib as mpl\n'), ((596, 632), 'powderday.grid_construction.yt_octree_generate', 'yt_octree_generate', (['fname', 'field_add'], {}), '(fname, field_add)\n', (614, 632), False, 'from powderday.grid_construction import yt_octree_generate\n'), ((1775, 1806), 'numpy.array', 'np.array', (['[xcent, ycent, zcent]'], {}), '([xcent, ycent, zcent])\n', (1783, 1806), True, 'import numpy as np\n'), ((2377, 2394), 'numpy.array', 'np.array', (['refined'], {}), '(refined)\n', (2385, 2394), True, 'import numpy as np\n'), ((2415, 2440), 'numpy.squeeze', 'np.squeeze', (['refined_array'], {}), '(refined_array)\n', (2425, 2440), True, 'import numpy as np\n'), ((2458, 2483), 'powderday.find_order.find_order', 'find_order', (['refined_array'], {}), '(refined_array)\n', (2468, 2483), False, 'from powderday.find_order import find_order\n'), ((2815, 2849), 'powderday.hyperion_octree_stats.hyperion_octree_stats', 'hos.hyperion_octree_stats', (['refined'], {}), '(refined)\n', (2840, 2849), True, 'import powderday.hyperion_octree_stats as hos\n'), ((2856, 2891), 'powderday.powderday_test_octree.test_octree', 'pto.test_octree', (['refined', 'max_level'], {}), '(refined, max_level)\n', (2871, 2891), True, 'import powderday.powderday_test_octree as pto\n'), ((2896, 2965), 'powderday.analytics.dump_cell_info', 'dump_cell_info', (['refined', 'fc1', 'fw1', 'xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'], {}), '(refined, fc1, fw1, xmin, xmax, ymin, ymax, zmin, zmax)\n', (2910, 2965), False, 'from powderday.analytics import dump_cell_info\n'), ((2962, 2993), 'numpy.save', 'np.save', (['"""refined.npy"""', 'refined'], {}), "('refined.npy', refined)\n", (2969, 2993), True, 'import numpy as np\n'), ((2997, 3029), 'numpy.save', 'np.save', (['"""density.npy"""', 'dustdens'], {}), "('density.npy', dustdens)\n", (3004, 3029), True, 'import numpy as np\n'), ((3231, 3238), 'hyperion.model.Model', 'Model', ([], {}), '()\n', (3236, 3238), False, 'from hyperion.model import Model\n'), ((3488, 3520), 'powderday.helpers.energy_density_absorbed_by_CMB', 'energy_density_absorbed_by_CMB', ([], {}), '()\n', (3518, 3520), False, 'from powderday.helpers import energy_density_absorbed_by_CMB\n'), ((3543, 3599), 'numpy.repeat', 'np.repeat', (['energy_density_absorbed.value', 'dustdens.shape'], {}), '(energy_density_absorbed.value, dustdens.shape)\n', (3552, 3599), True, 'import numpy as np\n'), ((4359, 4408), 'hyperion.dust.SphericalDust', 'SphericalDust', (['(cfg.par.dustdir + cfg.par.dustfile)'], {}), '(cfg.par.dustdir + cfg.par.dustfile)\n', (4372, 4408), False, 'from hyperion.dust import SphericalDust\n'), ((1419, 1431), 'numpy.max', 'np.max', (['xmax'], {}), '(xmax)\n', (1425, 1431), True, 'import numpy as np\n'), ((1432, 1444), 'numpy.min', 'np.min', (['xmin'], {}), '(xmin)\n', (1438, 1444), True, 'import numpy as np\n'), ((1462, 1474), 'numpy.max', 'np.max', (['ymax'], {}), '(ymax)\n', (1468, 1474), True, 'import numpy as np\n'), ((1475, 1487), 'numpy.min', 'np.min', (['ymin'], {}), '(ymin)\n', (1481, 1487), True, 'import numpy as np\n'), ((1505, 1517), 'numpy.max', 'np.max', (['zmax'], {}), '(zmax)\n', (1511, 1517), True, 'import numpy as np\n'), ((1518, 1530), 'numpy.min', 'np.min', (['zmin'], {}), '(zmin)\n', (1524, 1530), True, 'import numpy as np\n'), ((3909, 3958), 'hyperion.dust.SphericalDust', 'SphericalDust', (["(cfg.par.dustdir + '%s.hdf5' % size)"], {}), "(cfg.par.dustdir + '%s.hdf5' % size)\n", (3922, 3958), False, 'from hyperion.dust import SphericalDust\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import fixtures
from keystoneauth1 import session as ks_session
import mock
import six
from testtools import matchers
from ceilometerclient.apiclient import client as api_client
from ceilometerclient import client
from ceilometerclient import exc
from ceilometerclient import shell as ceilometer_shell
from ceilometerclient.tests.unit import utils
FAKE_V2_ENV = {'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://localhost:5000/v2.0'}
FAKE_V3_ENV = {'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_USER_DOMAIN_NAME': 'domain_name',
'OS_PROJECT_ID': '1234567890',
'OS_AUTH_URL': 'http://localhost:5000/v3'}
class ShellTestBase(utils.BaseTestCase):
@mock.patch('sys.stdout', new=six.StringIO())
@mock.patch.object(ks_session, 'Session', mock.MagicMock())
@mock.patch.object(client.client.HTTPClient,
'client_request', mock.MagicMock())
def shell(self, argstr):
try:
_shell = ceilometer_shell.CeilometerShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(0, exc_value.code)
return sys.stdout.getvalue()
# Patch os.environ to avoid required auth info.
def make_env(self, env_version, exclude=None):
env = dict((k, v) for k, v in env_version.items() if k != exclude)
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
class ShellHelpTest(ShellTestBase):
RE_OPTIONS = re.DOTALL | re.MULTILINE
def test_help_unknown_command(self):
self.assertRaises(exc.CommandError, self.shell, 'help foofoo')
def test_help(self):
required = [
'.*?^usage: ceilometer',
'.*?^See "ceilometer help COMMAND" '
'for help on a specific command',
]
for argstr in ['--help', 'help']:
help_text = self.shell(argstr)
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r,
self.RE_OPTIONS))
def test_help_on_subcommand(self):
required = [
'.*?^usage: ceilometer meter-list',
".*?^List the user's meter",
]
argstrings = [
'help meter-list',
]
for argstr in argstrings:
help_text = self.shell(argstr)
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, self.RE_OPTIONS))
def test_get_base_parser(self):
standalone_shell = ceilometer_shell.CeilometerShell()
parser = standalone_shell.get_base_parser()
self.assertEqual(600, parser.get_default('timeout'))
class ShellBashCompletionTest(ShellTestBase):
def test_bash_completion(self):
completion_commands = self.shell("bash-completion")
options = completion_commands.split(' ')
self.assertNotIn('bash_completion', options)
for option in options:
self.assertThat(option,
matchers.MatchesRegex(r'[a-z0-9-]'))
class ShellKeystoneV2Test(ShellTestBase):
@mock.patch.object(ks_session, 'Session')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_debug_switch_raises_error(self, mock_ksclient):
mock_ksclient.side_effect = exc.HTTPUnauthorized
self.make_env(FAKE_V2_ENV)
args = ['--debug', 'event-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
@mock.patch.object(ks_session, 'Session')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_dash_d_switch_raises_error(self, mock_ksclient):
mock_ksclient.side_effect = exc.CommandError("FAIL")
self.make_env(FAKE_V2_ENV)
args = ['-d', 'event-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
@mock.patch('sys.stderr')
@mock.patch.object(ks_session, 'Session')
def test_no_debug_switch_no_raises_errors(self, mock_ksclient, __):
mock_ksclient.side_effect = exc.HTTPUnauthorized("FAIL")
self.make_env(FAKE_V2_ENV)
args = ['event-list']
self.assertRaises(SystemExit, ceilometer_shell.main, args)
class ShellKeystoneV3Test(ShellTestBase):
@mock.patch.object(ks_session, 'Session')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_debug_switch_raises_error(self, mock_ksclient):
mock_ksclient.side_effect = exc.HTTPUnauthorized
self.make_env(FAKE_V3_ENV)
args = ['--debug', 'event-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
@mock.patch.object(ks_session, 'Session')
def test_dash_d_switch_raises_error(self, mock_ksclient):
mock_ksclient.side_effect = exc.CommandError("FAIL")
self.make_env(FAKE_V3_ENV)
args = ['-d', 'event-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
@mock.patch('sys.stderr')
@mock.patch.object(ks_session, 'Session')
def test_no_debug_switch_no_raises_errors(self, mock_ksclient, __):
mock_ksclient.side_effect = exc.HTTPUnauthorized("FAIL")
self.make_env(FAKE_V3_ENV)
args = ['event-list']
self.assertRaises(SystemExit, ceilometer_shell.main, args)
class ShellTimeoutTest(ShellTestBase):
@mock.patch('sys.stderr', new=six.StringIO())
def _test_timeout(self, timeout, expected_msg):
args = ['--timeout', timeout, 'alarm-list']
self.assertRaises(SystemExit, ceilometer_shell.main, args)
self.assertEqual(expected_msg, sys.stderr.getvalue().splitlines()[-1])
def test_timeout_invalid_value(self):
expected_msg = ('ceilometer: error: argument --timeout: '
'abc must be an integer')
self._test_timeout('abc', expected_msg)
def test_timeout_negative_value(self):
expected_msg = ('ceilometer: error: argument --timeout: '
'-1 must be greater than 0')
self._test_timeout('-1', expected_msg)
def test_timeout_float_value(self):
expected_msg = ('ceilometer: error: argument --timeout: '
'1.5 must be an integer')
self._test_timeout('1.5', expected_msg)
def test_timeout_zero(self):
expected_msg = ('ceilometer: error: argument --timeout: '
'0 must be greater than 0')
self._test_timeout('0', expected_msg)
@mock.patch.object(ks_session, 'Session')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_timeout_keystone_session(self, mocked_session):
mocked_session.side_effect = exc.HTTPUnauthorized("FAIL")
self.make_env(FAKE_V2_ENV)
args = ['--debug', '--timeout', '5', 'alarm-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
args, kwargs = mocked_session.call_args
self.assertEqual(5, kwargs.get('timeout'))
class ShellInsecureTest(ShellTestBase):
@mock.patch.object(api_client, 'HTTPClient')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_insecure_true_ceilometer(self, mocked_client):
self.make_env(FAKE_V2_ENV)
args = ['--debug', '--os-insecure', 'true', 'alarm-list']
self.assertIsNone(ceilometer_shell.main(args))
args, kwargs = mocked_client.call_args
self.assertFalse(kwargs.get('verify'))
@mock.patch.object(ks_session, 'Session')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_insecure_true_keystone(self, mocked_session):
mocked_session.side_effect = exc.HTTPUnauthorized("FAIL")
self.make_env(FAKE_V2_ENV)
args = ['--debug', '--os-insecure', 'true', 'alarm-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
args, kwargs = mocked_session.call_args
self.assertFalse(kwargs.get('verify'))
@mock.patch.object(api_client, 'HTTPClient')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_insecure_false_ceilometer(self, mocked_client):
self.make_env(FAKE_V2_ENV)
args = ['--debug', '--os-insecure', 'false', 'alarm-list']
self.assertIsNone(ceilometer_shell.main(args))
args, kwargs = mocked_client.call_args
self.assertTrue(kwargs.get('verify'))
@mock.patch.object(ks_session, 'Session')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock(return_value=None))
def test_insecure_false_keystone(self, mocked_session):
mocked_session.side_effect = exc.HTTPUnauthorized("FAIL")
self.make_env(FAKE_V2_ENV)
args = ['--debug', '--os-insecure', 'false', 'alarm-list']
self.assertRaises(exc.CommandError, ceilometer_shell.main, args)
args, kwargs = mocked_session.call_args
self.assertTrue(kwargs.get('verify'))
class ShellEndpointTest(ShellTestBase):
@mock.patch('ceilometerclient.v2.client.Client')
def _test_endpoint_and_token(self, token_name, endpoint_name, mocked):
args = ['--debug', token_name, 'fake-token',
endpoint_name, 'http://fake-url', 'alarm-list']
self.assertIsNone(ceilometer_shell.main(args))
args, kwargs = mocked.call_args
self.assertEqual('http://fake-url', kwargs.get('endpoint'))
self.assertEqual('fake-token', kwargs.get('token'))
def test_endpoint_and_token(self):
self._test_endpoint_and_token('--os-auth-token', '--ceilometer-url')
self._test_endpoint_and_token('--os-auth-token', '--os-endpoint')
self._test_endpoint_and_token('--os-token', '--ceilometer-url')
self._test_endpoint_and_token('--os-token', '--os-endpoint')
class ShellAlarmUpdateRepeatAction(ShellTestBase):
@mock.patch('ceilometerclient.v2.alarms.AlarmManager.update')
@mock.patch('ceilometerclient.v2.client.Client._get_redirect_client',
mock.Mock())
def test_repeat_action_not_specified(self, mocked):
self.make_env(FAKE_V2_ENV)
def _test(method):
args = ['--debug', method, '--state', 'alarm', '123']
ceilometer_shell.main(args)
args, kwargs = mocked.call_args
self.assertIsNone(kwargs.get('repeat_actions'))
_test('alarm-update')
_test('alarm-threshold-update')
_test('alarm-combination-update')
_test('alarm-event-update')
| [
"fixtures.MonkeyPatch",
"ceilometerclient.shell.CeilometerShell",
"mock.patch",
"testtools.matchers.MatchesRegex",
"ceilometerclient.shell.main",
"mock.Mock",
"sys.stderr.getvalue",
"ceilometerclient.exc.HTTPUnauthorized",
"ceilometerclient.exc.CommandError",
"mock.patch.object",
"sys.exc_info",... | [((3955, 3995), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (3972, 3995), False, 'import mock\n'), ((4389, 4429), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (4406, 4429), False, 'import mock\n'), ((4823, 4847), 'mock.patch', 'mock.patch', (['"""sys.stderr"""'], {}), "('sys.stderr')\n", (4833, 4847), False, 'import mock\n'), ((4853, 4893), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (4870, 4893), False, 'import mock\n'), ((5213, 5253), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (5230, 5253), False, 'import mock\n'), ((5647, 5687), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (5664, 5687), False, 'import mock\n'), ((5961, 5985), 'mock.patch', 'mock.patch', (['"""sys.stderr"""'], {}), "('sys.stderr')\n", (5971, 5985), False, 'import mock\n'), ((5991, 6031), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (6008, 6031), False, 'import mock\n'), ((7469, 7509), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (7486, 7509), False, 'import mock\n'), ((8071, 8114), 'mock.patch.object', 'mock.patch.object', (['api_client', '"""HTTPClient"""'], {}), "(api_client, 'HTTPClient')\n", (8088, 8114), False, 'import mock\n'), ((8551, 8591), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (8568, 8591), False, 'import mock\n'), ((9112, 9155), 'mock.patch.object', 'mock.patch.object', (['api_client', '"""HTTPClient"""'], {}), "(api_client, 'HTTPClient')\n", (9129, 9155), False, 'import mock\n'), ((9593, 9633), 'mock.patch.object', 'mock.patch.object', (['ks_session', '"""Session"""'], {}), "(ks_session, 'Session')\n", (9610, 9633), False, 'import mock\n'), ((10197, 10244), 'mock.patch', 'mock.patch', (['"""ceilometerclient.v2.client.Client"""'], {}), "('ceilometerclient.v2.client.Client')\n", (10207, 10244), False, 'import mock\n'), ((11050, 11110), 'mock.patch', 'mock.patch', (['"""ceilometerclient.v2.alarms.AlarmManager.update"""'], {}), "('ceilometerclient.v2.alarms.AlarmManager.update')\n", (11060, 11110), False, 'import mock\n'), ((1928, 1949), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (1947, 1949), False, 'import sys\n'), ((1509, 1525), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1523, 1525), False, 'import mock\n'), ((1617, 1633), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1631, 1633), False, 'import mock\n'), ((3378, 3412), 'ceilometerclient.shell.CeilometerShell', 'ceilometer_shell.CeilometerShell', ([], {}), '()\n', (3410, 3412), True, 'from ceilometerclient import shell as ceilometer_shell\n'), ((4086, 4114), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (4095, 4114), False, 'import mock\n'), ((4648, 4672), 'ceilometerclient.exc.CommandError', 'exc.CommandError', (['"""FAIL"""'], {}), "('FAIL')\n", (4664, 4672), False, 'from ceilometerclient import exc\n'), ((4520, 4548), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (4529, 4548), False, 'import mock\n'), ((5002, 5030), 'ceilometerclient.exc.HTTPUnauthorized', 'exc.HTTPUnauthorized', (['"""FAIL"""'], {}), "('FAIL')\n", (5022, 5030), False, 'from ceilometerclient import exc\n'), ((5344, 5372), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (5353, 5372), False, 'import mock\n'), ((5786, 5810), 'ceilometerclient.exc.CommandError', 'exc.CommandError', (['"""FAIL"""'], {}), "('FAIL')\n", (5802, 5810), False, 'from ceilometerclient import exc\n'), ((6140, 6168), 'ceilometerclient.exc.HTTPUnauthorized', 'exc.HTTPUnauthorized', (['"""FAIL"""'], {}), "('FAIL')\n", (6160, 6168), False, 'from ceilometerclient import exc\n'), ((7728, 7756), 'ceilometerclient.exc.HTTPUnauthorized', 'exc.HTTPUnauthorized', (['"""FAIL"""'], {}), "('FAIL')\n", (7748, 7756), False, 'from ceilometerclient import exc\n'), ((7600, 7628), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (7609, 7628), False, 'import mock\n'), ((8205, 8233), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (8214, 8233), False, 'import mock\n'), ((8808, 8836), 'ceilometerclient.exc.HTTPUnauthorized', 'exc.HTTPUnauthorized', (['"""FAIL"""'], {}), "('FAIL')\n", (8828, 8836), False, 'from ceilometerclient import exc\n'), ((8682, 8710), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (8691, 8710), False, 'import mock\n'), ((9246, 9274), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (9255, 9274), False, 'import mock\n'), ((9851, 9879), 'ceilometerclient.exc.HTTPUnauthorized', 'exc.HTTPUnauthorized', (['"""FAIL"""'], {}), "('FAIL')\n", (9871, 9879), False, 'from ceilometerclient import exc\n'), ((9724, 9752), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (9733, 9752), False, 'import mock\n'), ((11201, 11212), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (11210, 11212), False, 'import mock\n'), ((1698, 1732), 'ceilometerclient.shell.CeilometerShell', 'ceilometer_shell.CeilometerShell', ([], {}), '()\n', (1730, 1732), True, 'from ceilometerclient import shell as ceilometer_shell\n'), ((1447, 1461), 'six.StringIO', 'six.StringIO', ([], {}), '()\n', (1459, 1461), False, 'import six\n'), ((2153, 2192), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""os.environ"""', 'env'], {}), "('os.environ', env)\n", (2173, 2192), False, 'import fixtures\n'), ((6377, 6391), 'six.StringIO', 'six.StringIO', ([], {}), '()\n', (6389, 6391), False, 'import six\n'), ((8422, 8449), 'ceilometerclient.shell.main', 'ceilometer_shell.main', (['args'], {}), '(args)\n', (8443, 8449), True, 'from ceilometerclient import shell as ceilometer_shell\n'), ((9465, 9492), 'ceilometerclient.shell.main', 'ceilometer_shell.main', (['args'], {}), '(args)\n', (9486, 9492), True, 'from ceilometerclient import shell as ceilometer_shell\n'), ((10463, 10490), 'ceilometerclient.shell.main', 'ceilometer_shell.main', (['args'], {}), '(args)\n', (10484, 10490), True, 'from ceilometerclient import shell as ceilometer_shell\n'), ((11411, 11438), 'ceilometerclient.shell.main', 'ceilometer_shell.main', (['args'], {}), '(args)\n', (11432, 11438), True, 'from ceilometerclient import shell as ceilometer_shell\n'), ((1849, 1863), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1861, 1863), False, 'import sys\n'), ((3868, 3902), 'testtools.matchers.MatchesRegex', 'matchers.MatchesRegex', (['"""[a-z0-9-]"""'], {}), "('[a-z0-9-]')\n", (3889, 3902), False, 'from testtools import matchers\n'), ((2767, 2808), 'testtools.matchers.MatchesRegex', 'matchers.MatchesRegex', (['r', 'self.RE_OPTIONS'], {}), '(r, self.RE_OPTIONS)\n', (2788, 2808), False, 'from testtools import matchers\n'), ((3271, 3312), 'testtools.matchers.MatchesRegex', 'matchers.MatchesRegex', (['r', 'self.RE_OPTIONS'], {}), '(r, self.RE_OPTIONS)\n', (3292, 3312), False, 'from testtools import matchers\n'), ((6603, 6624), 'sys.stderr.getvalue', 'sys.stderr.getvalue', ([], {}), '()\n', (6622, 6624), False, 'import sys\n')] |
#!/usr/bin/env python
# coding: utf-8
import argparse
import os
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import pytorch_lightning as pl
from src.config.config import SEED
from src.dataset.seg_datamodule import Lyft3DdetSegDatamodule
from src.modeling.seg_pl_model import LitModel
from src.utils.util import print_argparse_arguments, set_random_seed
def main(args: argparse.Namespace) -> None:
set_random_seed(SEED)
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
# mypy error due to pl.DataModule.transfer_batch_to_device
det_dm = Lyft3DdetSegDatamodule( # type: ignore[abstract]
args.bev_data_dir,
val_hosts=args.val_hosts,
batch_size=args.batch_size,
aug_mode=args.aug_mode,
num_workers=args.num_workers,
is_debug=args.is_debug,
)
det_dm.prepare_data()
if args.is_test:
det_dm.setup(stage="test" if not args.test_with_val else "fit")
print("\t\t ==== TEST MODE ====")
print("load from: ", args.ckpt_path)
model = LitModel.load_from_checkpoint(
args.ckpt_path,
output_dir=str(Path(args.ckpt_path).parent),
flip_tta=args.flip_tta,
background_threshold=args.background_threshold,
)
# Check the image resolution. Train and test bev resolution should be the same.
assert model.hparams.bev_config.voxel_size_xy == det_dm.bev_config.voxel_size_xy
assert model.hparams.bev_config.voxel_size_z == det_dm.bev_config.voxel_size_z
assert model.hparams.bev_config.box_scale == det_dm.bev_config.box_scale
# Image size can be different between training and test.
model.hparams.bev_config.image_size = det_dm.bev_config.image_size
trainer = pl.Trainer(gpus=len(args.visible_gpus.split(",")))
if args.test_with_val:
trainer.test(model, test_dataloaders=det_dm.val_dataloader())
else:
trainer.test(model, datamodule=det_dm)
# test_gt_path = os.path.join(os.path.dirname(det_dm.test_path), "gt.csv")
# if os.path.exists(test_gt_path):
# print("test mode with validation chopped dataset, and check the metrics")
# print("validation ground truth path: ", test_gt_path)
else:
print("\t\t ==== TRAIN MODE ====")
print(
"training samples: {}, valid samples: {}".format(
len(det_dm.train_dataset), len(det_dm.val_dataset)
)
)
model = LitModel(
det_dm.bev_config,
det_dm.val_hosts,
len(det_dm.train_dataset),
lr=args.lr,
aug_mode=args.aug_mode,
backbone_name=args.backbone_name,
optim_name=args.optim_name,
ba_size=args.batch_size,
epochs=args.epochs,
)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
monitor="val_f1",
save_last=True,
mode="max",
verbose=True,
)
pl.trainer.seed_everything(seed=SEED)
trainer = pl.Trainer(
resume_from_checkpoint=args.resume_from_checkpoint,
gpus=len(args.visible_gpus.split(",")),
max_epochs=args.epochs,
precision=args.precision,
benchmark=True,
deterministic=False,
checkpoint_callback=checkpoint_callback,
)
# Run lr finder
if args.find_lr:
lr_finder = trainer.tuner.lr_find(model, datamodule=det_dm)
lr_finder.plot(suggest=True)
plt.show()
sys.exit()
# Run Training
trainer.fit(model, datamodule=det_dm)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run training for lyft 3d detection with bev image",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--bev_data_dir",
default="/your/dataset/path",
type=str,
help="root directory path for bev, ",
)
parser.add_argument(
"--optim_name",
choices=["adam", "sgd"],
default="adam",
help="optimizer name",
)
parser.add_argument("--lr", default=1.0e-2, type=float, help="learning rate")
parser.add_argument("--batch_size", type=int, default=96, help="batch size")
parser.add_argument("--epochs", type=int, default=50, help="epochs for training")
parser.add_argument(
"--backbone_name",
choices=[
"efficientnet-b1",
"efficientnet-b2",
"timm-resnest50d",
"timm-resnest269e",
],
default="timm-resnest50d",
help="backbone name",
)
parser.add_argument("--is_test", action="store_true", help="test mode")
parser.add_argument(
"--ckpt_path",
type=str,
default="./model.pth",
help="path for model checkpoint at test mode",
)
parser.add_argument(
"--test_with_val", action="store_true", help="test mode with validation data"
)
parser.add_argument(
"--flip_tta", action="store_true", help="test time augmentation h/vflip"
)
parser.add_argument(
"--precision",
default=16,
choices=[16, 32],
type=int,
help="float precision at training",
)
parser.add_argument(
"--val_hosts",
default=0,
choices=[0, 1, 2, 3],
type=int,
help="validation hosts configuration for train/val split",
)
parser.add_argument(
"--aug_mode",
default=0,
choices=[0, 1],
type=int,
help="augmentation mode",
)
parser.add_argument(
"--background_threshold",
default=200,
type=int,
help="background threshold for 2d predicted mask, only used at test mode",
)
parser.add_argument(
"--visible_gpus",
type=str,
default="0",
help="Select gpu ids with comma separated format",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="resume training from a specific checkpoint path",
)
parser.add_argument(
"--find_lr",
action="store_true",
help="find lr with fast ai implementation",
)
parser.add_argument(
"--num_workers",
default="16",
type=int,
help="number of cpus for DataLoader",
)
parser.add_argument("--is_debug", action="store_true", help="debug mode")
args = parser.parse_args()
if args.is_debug:
DEBUG = True
print("\t ---- DEBUG RUN ---- ")
VAL_INTERVAL_SAMPLES = 5000
args.batch_size = 16
else:
DEBUG = False
print("\t ---- NORMAL RUN ---- ")
print_argparse_arguments(args)
main(args)
| [
"pytorch_lightning.callbacks.ModelCheckpoint",
"argparse.ArgumentParser",
"pathlib.Path",
"pytorch_lightning.trainer.seed_everything",
"src.dataset.seg_datamodule.Lyft3DdetSegDatamodule",
"src.utils.util.set_random_seed",
"sys.exit",
"src.utils.util.print_argparse_arguments",
"matplotlib.pyplot.show... | [((429, 450), 'src.utils.util.set_random_seed', 'set_random_seed', (['SEED'], {}), '(SEED)\n', (444, 450), False, 'from src.utils.util import print_argparse_arguments, set_random_seed\n'), ((587, 769), 'src.dataset.seg_datamodule.Lyft3DdetSegDatamodule', 'Lyft3DdetSegDatamodule', (['args.bev_data_dir'], {'val_hosts': 'args.val_hosts', 'batch_size': 'args.batch_size', 'aug_mode': 'args.aug_mode', 'num_workers': 'args.num_workers', 'is_debug': 'args.is_debug'}), '(args.bev_data_dir, val_hosts=args.val_hosts,\n batch_size=args.batch_size, aug_mode=args.aug_mode, num_workers=args.\n num_workers, is_debug=args.is_debug)\n', (609, 769), False, 'from src.dataset.seg_datamodule import Lyft3DdetSegDatamodule\n'), ((3761, 3915), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run training for lyft 3d detection with bev image"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Run training for lyft 3d detection with bev image', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (3784, 3915), False, 'import argparse\n'), ((6844, 6874), 'src.utils.util.print_argparse_arguments', 'print_argparse_arguments', (['args'], {}), '(args)\n', (6868, 6874), False, 'from src.utils.util import print_argparse_arguments, set_random_seed\n'), ((2902, 2994), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', ([], {'monitor': '"""val_f1"""', 'save_last': '(True)', 'mode': '"""max"""', 'verbose': '(True)'}), "(monitor='val_f1', save_last=True, mode='max',\n verbose=True)\n", (2930, 2994), True, 'import pytorch_lightning as pl\n'), ((3058, 3095), 'pytorch_lightning.trainer.seed_everything', 'pl.trainer.seed_everything', ([], {'seed': 'SEED'}), '(seed=SEED)\n', (3084, 3095), True, 'import pytorch_lightning as pl\n'), ((3615, 3625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3623, 3625), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3648), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3646, 3648), False, 'import sys\n'), ((1151, 1171), 'pathlib.Path', 'Path', (['args.ckpt_path'], {}), '(args.ckpt_path)\n', (1155, 1171), False, 'from pathlib import Path\n')] |
"""A simple library to ask the user for a password. Similar to getpass.getpass() but allows to specify a default mask (like '*' instead of blank)."""
__version__ = "0.5.5"
from sys import platform, stdin
if platform == "win32":
from msvcrt import getch as __getch
def getch():
return __getch().decode()
else:
# taken from https://stackoverflow.com/questions/1052107/reading-a-single-character-getch-style-in-python-is-not-working-in-unix
from termios import tcgetattr, tcsetattr, TCSADRAIN
from tty import setraw as tty_setraw
def getch():
old_settings = tcgetattr(stdin)
try:
tty_setraw(stdin)
char = stdin.read(1)
finally:
tcsetattr(stdin, TCSADRAIN, old_settings)
return char
def get_password(prompt="Password: ", mask="*"):
print(prompt, end="", flush=True)
password = ""
while True:
char = getch()
# Enter
if ord(char) == 13:
print()
break
# Ctrl-C, Ctrl-D, Ctrl-Z
elif ord(char) in [3, 4, 26]:
exit(0)
# Backspace, Delete
elif ord(char) in [8, 127]:
if len(password) > 0:
print("\b \b", end="", flush=True)
password = password[:-1]
else:
print(mask, end="", flush=True)
password += char
return password
if __name__ == "__main__":
username = input("Username: ")
password = get_password(mask="#")
print(f"Username: {username}; Password: {password}")
| [
"termios.tcsetattr",
"msvcrt.getch",
"termios.tcgetattr",
"sys.stdin.read",
"tty.setraw"
] | [((602, 618), 'termios.tcgetattr', 'tcgetattr', (['stdin'], {}), '(stdin)\n', (611, 618), False, 'from termios import tcgetattr, tcsetattr, TCSADRAIN\n'), ((644, 661), 'tty.setraw', 'tty_setraw', (['stdin'], {}), '(stdin)\n', (654, 661), True, 'from tty import setraw as tty_setraw\n'), ((681, 694), 'sys.stdin.read', 'stdin.read', (['(1)'], {}), '(1)\n', (691, 694), False, 'from sys import platform, stdin\n'), ((724, 765), 'termios.tcsetattr', 'tcsetattr', (['stdin', 'TCSADRAIN', 'old_settings'], {}), '(stdin, TCSADRAIN, old_settings)\n', (733, 765), False, 'from termios import tcgetattr, tcsetattr, TCSADRAIN\n'), ((304, 313), 'msvcrt.getch', '__getch', ([], {}), '()\n', (311, 313), True, 'from msvcrt import getch as __getch\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Copyright 2012-2021 Smartling, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from smartlingApiSdk.ApiV2 import ApiV2
class EstimatesApi(ApiV2):
def __init__(self, userIdentifier, userSecret, projectId, proxySettings=None, permanentHeaders={}, env='prod'):
ApiV2.__init__(self, userIdentifier, userSecret, projectId, proxySettings, permanentHeaders=permanentHeaders, env=env)
def getJobFuzzyEstimateReports(self, translationJobUid, reportStatus='', contentCoverage='', creatorUserUids=[], translationJobSchemaContents=[], tags=[], createdFrom='', createdTo='', limit=0, offset=0, **kwargs):
"""
method : GET
api url : /estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/fuzzy
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/getJobFuzzyEstimateReports
"""
kw = {
'reportStatus':reportStatus,
'contentCoverage':contentCoverage,
'creatorUserUids':creatorUserUids,
'translationJobSchemaContents':translationJobSchemaContents,
'tags':tags,
'createdFrom':createdFrom,
'createdTo':createdTo,
'limit':limit,
'offset':offset,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/fuzzy', translationJobUid=translationJobUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def generateJobFuzzyEstimateReports(self, translationJobUid, contentType, tags, **kwargs):
"""
method : POST
api url : /estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/fuzzy
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/generateJobFuzzyEstimateReports
"""
kw = {
'contentType':contentType,
'tags':tags,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/fuzzy', translationJobUid=translationJobUid, **kwargs)
response, status = self.commandJson('POST', url, kw)
return response, status
def getJobCostEstimateReports(self, translationJobUid, reportStatus='', contentCoverage='', creatorUserUids=[], translationJobSchemaContents=[], tags=[], createdFrom='', createdTo='', limit=0, offset=0, **kwargs):
"""
method : GET
api url : /estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/cost
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/getJobCostEstimateReports
"""
kw = {
'reportStatus':reportStatus,
'contentCoverage':contentCoverage,
'creatorUserUids':creatorUserUids,
'translationJobSchemaContents':translationJobSchemaContents,
'tags':tags,
'createdFrom':createdFrom,
'createdTo':createdTo,
'limit':limit,
'offset':offset,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/cost', translationJobUid=translationJobUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def generateJobCostEstimateReports(self, translationJobUid, contentType, tags, localeWorkflows, fuzzyProfileUid, **kwargs):
"""
method : POST
api url : /estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/cost
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/generateJobCostEstimateReports
"""
kw = {
'contentType':contentType,
'tags':tags,
'localeWorkflows':localeWorkflows,
'fuzzyProfileUid':fuzzyProfileUid,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/jobs/{translationJobUid}/reports/cost', translationJobUid=translationJobUid, **kwargs)
response, status = self.commandJson('POST', url, kw)
return response, status
def getJobEstimateReportStatus(self, reportUid, reportStatus='', reportType='', **kwargs):
"""
method : GET
api url : /estimates-api/v2/projects/{projectId}/reports/{reportUid}/status
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/getJobEstimateReportStatus
"""
kw = {
'reportStatus':reportStatus,
'reportType':reportType,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/reports/{reportUid}/status', reportUid=reportUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def getJobEstimateReport(self, reportUid, reportStatus='', reportType='', **kwargs):
"""
method : GET
api url : /estimates-api/v2/projects/{projectId}/reports/{reportUid}
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/getJobEstimateReport
"""
kw = {
'reportStatus':reportStatus,
'reportType':reportType,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/reports/{reportUid}', reportUid=reportUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def deleteJobEstimateReport(self, reportUid, **kwargs):
"""
method : DELETE
api url : /estimates-api/v2/projects/{projectId}/reports/{reportUid}
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/deleteJobEstimateReport
"""
kw = {
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/reports/{reportUid}', reportUid=reportUid, **kwargs)
response, status = self.command('DELETE', url, kw)
return response, status
def modifyJobEstimateReportTags(self, reportUid, tags, **kwargs):
"""
method : PUT
api url : /estimates-api/v2/projects/{projectId}/reports/{reportUid}/tags
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/modifyJobEstimateReportTags
"""
kw = {
'tags':tags,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectId}/reports/{reportUid}/tags', reportUid=reportUid, **kwargs)
response, status = self.commandJson('PUT', url, kw)
return response, status
def exportJobEstimationReport(self, projectUid, reportUid, format, **kwargs):
"""
method : GET
api url : /estimates-api/v2/projects/{projectUid}/reports/{reportUid}/download
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/exportJobEstimationReport
"""
kw = {
'format':format,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/estimates-api/v2/projects/{projectUid}/reports/{reportUid}/download', projectUid=projectUid, reportUid=reportUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
| [
"smartlingApiSdk.ApiV2.ApiV2.__init__"
] | [((863, 985), 'smartlingApiSdk.ApiV2.ApiV2.__init__', 'ApiV2.__init__', (['self', 'userIdentifier', 'userSecret', 'projectId', 'proxySettings'], {'permanentHeaders': 'permanentHeaders', 'env': 'env'}), '(self, userIdentifier, userSecret, projectId, proxySettings,\n permanentHeaders=permanentHeaders, env=env)\n', (877, 985), False, 'from smartlingApiSdk.ApiV2 import ApiV2\n')] |
# List all valid strings containing n opening and n closing parenthesis.
# Note that parens1 happens to be faster and more space efficient than parens2,
# which is faster than parens3. The slowest is parens4 only because it is not
# memoized.
def parens1(n):
parens_of_length = [[""]]
if n == 0:
return parens_of_length[0]
for length in xrange(1, n + 1):
parens_of_length.append([])
for i in xrange(length):
for inside in parens_of_length[i]:
for outside in parens_of_length[length - i - 1]:
parens_of_length[length].append("(" + inside + ")" + outside)
return parens_of_length[n]
def parens2(n, open_count=0, close_count=0, memo=None):
if open_count + close_count == 2 * n:
return [""]
key = (n - open_count - close_count, open_count)
if memo is None:
memo = {}
elif key in memo:
return memo[key]
parens = []
if open_count < n:
parens += ["(" + end for end in parens2(n, open_count+1, close_count, memo)]
if close_count < open_count:
parens += [")" + end for end in parens2(n, open_count, close_count+1, memo)]
memo[key] = parens
return parens
def parens3(n):
return parens_memo3(n, 0, 0, {})
def parens_memo3(n, open_count, close_count, memo):
if open_count + close_count == 2 * n:
return [""]
key = (n - open_count - close_count, open_count)
if key in memo:
return memo[key]
parens = []
if open_count < n:
for end in parens_memo3(n, open_count + 1, close_count, memo):
parens.append("(" + end)
if close_count < open_count:
for end in parens_memo3(n, open_count, close_count + 1, memo):
parens.append(")" + end)
memo[key] = parens
return parens
def parens4(n, partial="", open_count=0, close_count=0):
if open_count + close_count == 2 * n:
return [partial]
parens = []
if open_count < n:
parens += parens4(n, partial + "(", open_count + 1, close_count)
if close_count < open_count:
parens += parens4(n, partial + ")", open_count, close_count + 1)
return parens
import unittest
class Test(unittest.TestCase):
def test_parens1(self):
self.assertEqual(parens1(1), ["()"])
self.assertEqual(parens1(2), ["()()", "(())"])
self.assertEqual(parens1(3), ["()()()", "()(())", "(())()", "(()())",
"((()))"])
def test_parens2(self):
self.assertEqual(parens2(1), ["()"])
self.assertEqual(parens2(2), ["(())", "()()"])
self.assertEqual(parens2(3), ["((()))", "(()())", "(())()", "()(())",
"()()()"])
self.assertEqual(set(parens1(7)), set(parens2(7)))
def test_parens3(self):
self.assertEqual(parens3(1), ["()"])
self.assertEqual(parens3(2), ["(())", "()()"])
self.assertEqual(parens3(3), ["((()))", "(()())", "(())()", "()(())",
"()()()"])
self.assertEqual(set(parens1(7)), set(parens3(7)))
def test_parens4(self):
self.assertEqual(parens4(1), ["()"])
self.assertEqual(parens4(2), ["(())", "()()"])
self.assertEqual(parens4(3), ["((()))", "(()())", "(())()", "()(())",
"()()()"])
self.assertEqual(set(parens1(7)), set(parens4(7)))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((3115, 3130), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3128, 3130), False, 'import unittest\n')] |
# -*- coding: utf-8 -*-
"""
Calculations provided by aiida_ase3.
Register calculations via the "aiida.calculations" entry point in setup.json.
"""
from aiida.common import datastructures
from aiida.engine import CalcJob
from aiida.orm import SinglefileData, Str
from aiida.plugins import DataFactory
DiffParameters = DataFactory('ase3')
class Ase3Calculation(CalcJob):
"""
ASE calculation which operates currently on:
1. inout: This mode takes the input file and reads in
the output file, nothing fancy does what it is
asked to do - Thanks to Leopold for this idea!
Other possibilities:
2. gpaw-ready: A gpaw compatibility based setup which
will automatically take care of a bunch of parsing
and input output options
"""
@classmethod
def define(cls, spec):
"""Define inputs and outputs of the calculation."""
# yapf: disable
super(Ase3Calculation, cls).define(spec)
# set default values for AiiDA options
spec.inputs['metadata']['options']['resources'].default = {
'num_machines': 1,
'num_mpiprocs_per_machine': 1,
}
spec.inputs['metadata']['options']['parser_name'].default = 'ase3'
# new ports
spec.input('metadata.options.output_filename', valid_type=str, default='aiida.out')
spec.input('operation_mode', valid_type=Str, default=lambda: Str('inout'))
spec.input('input_file', valid_type=SinglefileData, help='Input file which will be used', required=False)
spec.input('output_filename', valid_type=Str, default=lambda: Str('aiida.txt'), help='AiiDA output file by default')
# outputs
spec.output('ase3_output', valid_type=SinglefileData, help='Output file which will be read in')
# Error messages
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
def prepare_for_submission(self, folder):
"""
Create input files.
TODO: Currently implemented only for input-output options
:param folder: an `aiida.common.folders.Folder` where the plugin should temporarily place all files
needed by the calculation.
:return: `aiida.common.datastructures.CalcInfo` instance
"""
codeinfo = datastructures.CodeInfo()
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
codeinfo.cmdline_params = ['python', self.inputs.input_file.filename]
# Prepare a `CalcInfo` to be returned to the engine
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(self.inputs.input_file.uuid, self.inputs.input_file.filename, self.inputs.input_file.filename),
]
calcinfo.retrieve_list = [self.metadata.options.output_filename, self.inputs.output_filename.value]
return calcinfo
| [
"aiida.common.datastructures.CalcInfo",
"aiida.orm.Str",
"aiida.plugins.DataFactory",
"aiida.common.datastructures.CodeInfo"
] | [((319, 338), 'aiida.plugins.DataFactory', 'DataFactory', (['"""ase3"""'], {}), "('ase3')\n", (330, 338), False, 'from aiida.plugins import DataFactory\n'), ((2369, 2394), 'aiida.common.datastructures.CodeInfo', 'datastructures.CodeInfo', ([], {}), '()\n', (2392, 2394), False, 'from aiida.common import datastructures\n'), ((2738, 2763), 'aiida.common.datastructures.CalcInfo', 'datastructures.CalcInfo', ([], {}), '()\n', (2761, 2763), False, 'from aiida.common import datastructures\n'), ((1445, 1457), 'aiida.orm.Str', 'Str', (['"""inout"""'], {}), "('inout')\n", (1448, 1457), False, 'from aiida.orm import SinglefileData, Str\n'), ((1643, 1659), 'aiida.orm.Str', 'Str', (['"""aiida.txt"""'], {}), "('aiida.txt')\n", (1646, 1659), False, 'from aiida.orm import SinglefileData, Str\n')] |
# Generated by Django 3.2.12 on 2022-04-08 12:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("webhook", "0008_webhook_subscription_query"),
("core", "0004_delete_delivery_without_webhook"),
]
operations = [
migrations.AlterField(
model_name="eventdelivery",
name="webhook",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="webhook.webhook"
),
),
]
| [
"django.db.models.ForeignKey"
] | [((444, 533), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""webhook.webhook"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'webhook.webhook')\n", (461, 533), False, 'from django.db import migrations, models\n')] |
import json
from flask import Flask
from flask import render_template
import csv
import os
import pandas as pd
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
APP_STATIC = os.path.join(APP_ROOT, 'static')
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', name='abc')
@app.route('/names')
def names():
df = pd.read_csv(os.path.join(APP_STATIC, 'metadata.csv'))
data = ['%s_%s' % ('BB', val) for val in df.sort_values(by='SAMPLEID')['SAMPLEID'].tolist()]
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/otu')
def otu():
data = []
with open(os.path.join(APP_STATIC, 'otu.csv')) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
data.append(row[1])
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/metadata/<sample>')
def metadata(sample):
sample = sample.split('_')
if len(sample) > 1:
sample = sample[1]
data = {}
with open(os.path.join(APP_STATIC, 'metadata.csv')) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
if row[0] == sample:
data['ETHNICITY'] = row[2]
data['GENDER'] = row[3]
data['AGE'] = row[4]
data['BBTYPE'] = row[6]
data['LOCATION'] = row[7]
data['SAMPLEID'] = row[0]
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/wfreq/<sample>')
def wfreq(sample):
import pdb;
pdb.set_trace()
sample = sample.split('_')
if len(sample) > 1:
sample = sample[1]
data = []
with open(os.path.join(APP_STATIC, 'metadata.csv')) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
if row[0] == sample:
data.append(row[5])
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/samples/<sample>')
def sample(sample):
data = {}
df = pd.read_csv(os.path.join(APP_STATIC, 'samples.csv'))
selected_sample = sample.upper()
try:
data['otu_ids'] = df.sort_values(by=selected_sample, ascending=False)['otu_id'].tolist()[:10]
data['sample_values'] = df.sort_values(by=selected_sample, ascending=False)[selected_sample].tolist()[:10]
except:
data['sample_values'] = []
data['otu_ids'] = []
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
app.run()
| [
"flask.render_template",
"flask.Flask",
"json.dumps",
"os.path.join",
"pdb.set_trace",
"os.path.abspath",
"csv.reader"
] | [((180, 212), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""static"""'], {}), "(APP_ROOT, 'static')\n", (192, 212), False, 'import os\n'), ((219, 234), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (224, 234), False, 'from flask import Flask\n'), ((140, 165), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (155, 165), False, 'import os\n'), ((277, 318), 'flask.render_template', 'render_template', (['"""index.html"""'], {'name': '"""abc"""'}), "('index.html', name='abc')\n", (292, 318), False, 'from flask import render_template\n'), ((1862, 1877), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1875, 1877), False, 'import pdb\n'), ((376, 416), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""metadata.csv"""'], {}), "(APP_STATIC, 'metadata.csv')\n", (388, 416), False, 'import os\n'), ((798, 832), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (808, 832), False, 'import csv\n'), ((1294, 1328), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1304, 1328), False, 'import csv\n'), ((2065, 2099), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2075, 2099), False, 'import csv\n'), ((2443, 2482), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""samples.csv"""'], {}), "(APP_STATIC, 'samples.csv')\n", (2455, 2482), False, 'import os\n'), ((567, 583), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (577, 583), False, 'import json\n'), ((727, 762), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""otu.csv"""'], {}), "(APP_STATIC, 'otu.csv')\n", (739, 762), False, 'import os\n'), ((950, 966), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (960, 966), False, 'import json\n'), ((1218, 1258), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""metadata.csv"""'], {}), "(APP_STATIC, 'metadata.csv')\n", (1230, 1258), False, 'import os\n'), ((1691, 1707), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1701, 1707), False, 'import json\n'), ((1989, 2029), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""metadata.csv"""'], {}), "(APP_STATIC, 'metadata.csv')\n", (2001, 2029), False, 'import os\n'), ((2254, 2270), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2264, 2270), False, 'import json\n'), ((2875, 2891), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2885, 2891), False, 'import json\n')] |
"""Compile, run and lint files."""
import dataclasses
import logging
import os
import pathlib
import shlex
import sys
from functools import partial
from typing import List, Optional
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
from porcupine import get_tab_manager, menubar, tabs
from . import no_terminal, terminal
log = logging.getLogger(__name__)
@dataclasses.dataclass
class CommandsConfig:
compile: str = ""
run: str = ""
lint: str = ""
def get_command(
tab: tabs.FileTab, which_command: Literal["compile", "run", "lint"], basename: str
) -> Optional[List[str]]:
assert os.sep not in basename, f"{basename!r} is not a basename"
commands = tab.settings.get("commands", CommandsConfig)
assert isinstance(commands, CommandsConfig)
template = getattr(commands, which_command)
if not template.strip():
return None
exts = "".join(pathlib.Path(basename).suffixes)
no_ext = pathlib.Path(basename).stem
format_args = {
"file": basename,
"no_ext": no_ext,
"no_exts": basename[: -len(exts)] if exts else basename,
"python": "py" if sys.platform == "win32" else "python3",
"exe": f"{no_ext}.exe" if sys.platform == "win32" else f"./{no_ext}",
}
result = [
part.format(**format_args)
for part in shlex.split(template, posix=(sys.platform != "win32"))
]
return result
def do_something(something: Literal["compile", "run", "compilerun", "lint"]) -> None:
tab = get_tab_manager().select()
assert isinstance(tab, tabs.FileTab)
tab.save()
if tab.path is None:
# user cancelled a save as dialog
return
workingdir = tab.path.parent
basename = tab.path.name
if something == "run":
command = get_command(tab, "run", basename)
if command is not None:
terminal.run_command(workingdir, command)
elif something == "compilerun":
def run_after_compile() -> None:
assert isinstance(tab, tabs.FileTab)
command = get_command(tab, "run", basename)
if command is not None:
terminal.run_command(workingdir, command)
compile_command = get_command(tab, "compile", basename)
if compile_command is not None:
no_terminal.run_command(workingdir, compile_command, run_after_compile)
else:
command = get_command(tab, something, basename)
if command is not None:
no_terminal.run_command(workingdir, command)
def on_new_tab(tab: tabs.Tab) -> None:
if isinstance(tab, tabs.FileTab):
tab.settings.add_option("commands", CommandsConfig())
def setup() -> None:
get_tab_manager().add_tab_callback(on_new_tab)
menubar.get_menu("Run").add_command(label="Compile", command=partial(do_something, "compile"))
menubar.get_menu("Run").add_command(label="Run", command=partial(do_something, "run"))
menubar.get_menu("Run").add_command(
label="Compile and Run", command=partial(do_something, "compilerun")
)
menubar.get_menu("Run").add_command(label="Lint", command=partial(do_something, "compilerun"))
# TODO: disable the menu items when they don't correspond to actual commands
for label in {"Compile", "Run", "Compile and Run", "Lint"}:
menubar.set_enabled_based_on_tab(
f"Run/{label}", (lambda tab: isinstance(tab, tabs.FileTab))
)
| [
"logging.getLogger",
"pathlib.Path",
"shlex.split",
"porcupine.menubar.get_menu",
"functools.partial",
"porcupine.get_tab_manager"
] | [((392, 419), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (409, 419), False, 'import logging\n'), ((999, 1021), 'pathlib.Path', 'pathlib.Path', (['basename'], {}), '(basename)\n', (1011, 1021), False, 'import pathlib\n'), ((953, 975), 'pathlib.Path', 'pathlib.Path', (['basename'], {}), '(basename)\n', (965, 975), False, 'import pathlib\n'), ((1384, 1436), 'shlex.split', 'shlex.split', (['template'], {'posix': "(sys.platform != 'win32')"}), "(template, posix=sys.platform != 'win32')\n", (1395, 1436), False, 'import shlex\n'), ((1561, 1578), 'porcupine.get_tab_manager', 'get_tab_manager', ([], {}), '()\n', (1576, 1578), False, 'from porcupine import get_tab_manager, menubar, tabs\n'), ((2747, 2764), 'porcupine.get_tab_manager', 'get_tab_manager', ([], {}), '()\n', (2762, 2764), False, 'from porcupine import get_tab_manager, menubar, tabs\n'), ((2799, 2822), 'porcupine.menubar.get_menu', 'menubar.get_menu', (['"""Run"""'], {}), "('Run')\n", (2815, 2822), False, 'from porcupine import get_tab_manager, menubar, tabs\n'), ((2860, 2892), 'functools.partial', 'partial', (['do_something', '"""compile"""'], {}), "(do_something, 'compile')\n", (2867, 2892), False, 'from functools import partial\n'), ((2898, 2921), 'porcupine.menubar.get_menu', 'menubar.get_menu', (['"""Run"""'], {}), "('Run')\n", (2914, 2921), False, 'from porcupine import get_tab_manager, menubar, tabs\n'), ((2955, 2983), 'functools.partial', 'partial', (['do_something', '"""run"""'], {}), "(do_something, 'run')\n", (2962, 2983), False, 'from functools import partial\n'), ((2989, 3012), 'porcupine.menubar.get_menu', 'menubar.get_menu', (['"""Run"""'], {}), "('Run')\n", (3005, 3012), False, 'from porcupine import get_tab_manager, menubar, tabs\n'), ((3067, 3102), 'functools.partial', 'partial', (['do_something', '"""compilerun"""'], {}), "(do_something, 'compilerun')\n", (3074, 3102), False, 'from functools import partial\n'), ((3113, 3136), 'porcupine.menubar.get_menu', 'menubar.get_menu', (['"""Run"""'], {}), "('Run')\n", (3129, 3136), False, 'from porcupine import get_tab_manager, menubar, tabs\n'), ((3171, 3206), 'functools.partial', 'partial', (['do_something', '"""compilerun"""'], {}), "(do_something, 'compilerun')\n", (3178, 3206), False, 'from functools import partial\n')] |
from price_picker.common.database import CRUDMixin
from price_picker import db
class Shop(CRUDMixin, db.Model):
""" Shops """
__tablename__ = 'shops'
name = db.Column(db.String(128), primary_key=True, unique=True, default="Zentrale")
@classmethod
def query_factory_all(cls):
# insert default if no shop exists
if cls.query.first() is None:
cls.create()
return cls.query.order_by(cls.name)
def __str__(self):
return self.name
__repr__ = __str__
| [
"price_picker.db.String"
] | [((181, 195), 'price_picker.db.String', 'db.String', (['(128)'], {}), '(128)\n', (190, 195), False, 'from price_picker import db\n')] |
import numpy as np
from lib.lif import LIF, ParamsLIF
from lib.causal import causaleffect
#Set x = 0, sigma = 10
#wvals = 2..20
sigma = 10
mu = 1
tau = 1
t = 500
params = ParamsLIF(sigma = sigma, mu = mu, tau = tau)
lif = LIF(params, t = t)
lif.x = 0
#Simulate for a range of $W$ values.
N = 19
nsims = 1
wmax = 20
n = params.n
deltaT = 50
#Play with different c values
cvals = [0.01, 0.25, 0.5, 0.75, 0.99]
for c in cvals:
print("Running simulations for c = %f"%c)
outfile = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_counterfactual_simulations.npz'%(N, nsims, c)
params.c = c
lif.setup(params)
lif.x = 0
wvals = np.linspace(2, wmax, N)
vs = np.zeros((N, N, nsims, n, lif.T), dtype=np.float16)
hs = np.zeros((N, N, nsims, n, lif.T), dtype=np.bool)
us = np.zeros((N, N, nsims, n, lif.T), dtype=np.float16)
for i,w0 in enumerate(wvals):
for j,w1 in enumerate(wvals):
print("Running %d simulations with w0=%f, w1=%f"%(nsims, w0, w1))
lif.W = np.array([w0, w1])
for k in range(nsims):
(v, h, Cost, betas, u) = lif.simulate(deltaT)
vs[i,j,k,:] = v
hs[i,j,k,:] = h
us[i,j,k,:] = u
#Save output
np.savez(outfile, vs = vs, hs = hs, params = params, wvals = wvals\
, nsims = nsims, us = us) | [
"numpy.savez",
"lib.lif.ParamsLIF",
"lib.lif.LIF",
"numpy.array",
"numpy.linspace",
"numpy.zeros"
] | [((172, 210), 'lib.lif.ParamsLIF', 'ParamsLIF', ([], {'sigma': 'sigma', 'mu': 'mu', 'tau': 'tau'}), '(sigma=sigma, mu=mu, tau=tau)\n', (181, 210), False, 'from lib.lif import LIF, ParamsLIF\n'), ((223, 239), 'lib.lif.LIF', 'LIF', (['params'], {'t': 't'}), '(params, t=t)\n', (226, 239), False, 'from lib.lif import LIF, ParamsLIF\n'), ((625, 648), 'numpy.linspace', 'np.linspace', (['(2)', 'wmax', 'N'], {}), '(2, wmax, N)\n', (636, 648), True, 'import numpy as np\n'), ((655, 706), 'numpy.zeros', 'np.zeros', (['(N, N, nsims, n, lif.T)'], {'dtype': 'np.float16'}), '((N, N, nsims, n, lif.T), dtype=np.float16)\n', (663, 706), True, 'import numpy as np\n'), ((713, 761), 'numpy.zeros', 'np.zeros', (['(N, N, nsims, n, lif.T)'], {'dtype': 'np.bool'}), '((N, N, nsims, n, lif.T), dtype=np.bool)\n', (721, 761), True, 'import numpy as np\n'), ((768, 819), 'numpy.zeros', 'np.zeros', (['(N, N, nsims, n, lif.T)'], {'dtype': 'np.float16'}), '((N, N, nsims, n, lif.T), dtype=np.float16)\n', (776, 819), True, 'import numpy as np\n'), ((1190, 1269), 'numpy.savez', 'np.savez', (['outfile'], {'vs': 'vs', 'hs': 'hs', 'params': 'params', 'wvals': 'wvals', 'nsims': 'nsims', 'us': 'us'}), '(outfile, vs=vs, hs=hs, params=params, wvals=wvals, nsims=nsims, us=us)\n', (1198, 1269), True, 'import numpy as np\n'), ((978, 996), 'numpy.array', 'np.array', (['[w0, w1]'], {}), '([w0, w1])\n', (986, 996), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from argparse import ArgumentParser
import os
import sys
if __name__ == '__main__':
arg_parser = ArgumentParser(description='list all files with given '
'extension in directory')
arg_parser.add_argument('--dir', default='.',
help='directory to search')
arg_parser.add_argument('ext', help='extension to use')
arg_parser.add_argument('--verbose', action='store_true',
help='show progress info')
options = arg_parser.parse_args()
for directory, _, files in os.walk(options.dir):
if options.verbose:
print("### checking directory '{}'".format(directory),
file=sys.stderr)
for file_name in files:
_, ext = os.path.splitext(file_name)
if ext == options.ext:
print(os.path.join(directory, file_name))
| [
"os.path.join",
"os.walk",
"os.path.splitext",
"argparse.ArgumentParser"
] | [((126, 204), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""list all files with given extension in directory"""'}), "(description='list all files with given extension in directory')\n", (140, 204), False, 'from argparse import ArgumentParser\n'), ((604, 624), 'os.walk', 'os.walk', (['options.dir'], {}), '(options.dir)\n', (611, 624), False, 'import os\n'), ((809, 836), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (825, 836), False, 'import os\n'), ((894, 928), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (906, 928), False, 'import os\n')] |
from redesigned_barnacle.buffer import CircularBuffer
from redesigned_barnacle.graph import Sparkline
from redesigned_barnacle.mock import MockFramebuffer
from unittest import TestCase
class SparkTest(TestCase):
def test_line(self):
buf = CircularBuffer()
sl = Sparkline(32, 64, buf)
sl.push(16)
sl.draw(MockFramebuffer(), 0, 0) | [
"redesigned_barnacle.mock.MockFramebuffer",
"redesigned_barnacle.buffer.CircularBuffer",
"redesigned_barnacle.graph.Sparkline"
] | [((247, 263), 'redesigned_barnacle.buffer.CircularBuffer', 'CircularBuffer', ([], {}), '()\n', (261, 263), False, 'from redesigned_barnacle.buffer import CircularBuffer\n'), ((273, 295), 'redesigned_barnacle.graph.Sparkline', 'Sparkline', (['(32)', '(64)', 'buf'], {}), '(32, 64, buf)\n', (282, 295), False, 'from redesigned_barnacle.graph import Sparkline\n'), ((324, 341), 'redesigned_barnacle.mock.MockFramebuffer', 'MockFramebuffer', ([], {}), '()\n', (339, 341), False, 'from redesigned_barnacle.mock import MockFramebuffer\n')] |
import os
import sys
import codecs
from setuptools import setup
tests_require = [
'pytest',
'pytest-mock',
]
if sys.version_info < (3, 0):
tests_require.append('mock')
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='stingconf',
version='0.0.3',
author='',
author_email='',
maintainer='rsp9u',
maintainer_email='<EMAIL>',
license='MIT',
url='https://github.com/rsp9u/stingconf',
description='Layered configuration library',
long_description=read('README.md'),
long_description_content_type='text/markdown',
zip_safe=False,
packages=['stingconf'],
install_requires=[
'PyYAML',
],
setup_requires=[
'pytest-runner'
],
tests_require=tests_require,
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
)
| [
"os.path.dirname",
"codecs.open"
] | [((231, 256), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (246, 256), False, 'import os\n'), ((276, 316), 'codecs.open', 'codecs.open', (['file_path'], {'encoding': '"""utf-8"""'}), "(file_path, encoding='utf-8')\n", (287, 316), False, 'import codecs\n')] |
from setuptools import find_packages, setup
# Read more here: https://pypi.org/project/twine/
setup(
name='simple_django_logger',
# packages=[
# 'simple_django_logger', # this must be the same as the name above
# 'simple_django_logger.middleware',
# 'simple_django_logger.migrations'],
packages=find_packages(),
include_package_data=True,
version='3.1.0',
description='A basic logger for Django',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/eshandas/simple_django_logger', # use the URL to the github repo
download_url='https://github.com/eshandas/simple_django_logger/archive/3.1.0.tar.gz', # Create a tag in github
keywords=['django', 'logger'],
classifiers=[],
install_requires=[
'Django>=2.0',
'requests>=2.0',
'djangorestframework>=3.8',
'user-agents>=1.1.0',
'django-user-agents>=0.3.2'],
)
| [
"setuptools.find_packages"
] | [((333, 348), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (346, 348), False, 'from setuptools import find_packages, setup\n')] |
"""Strategic cache-control.
Module usage:
1. Plan your cache-control storategy. (ex: "long" caches content until 3600 seconds)
2. Set storategy store to your app.state and add rules.
3. Set cache strategy as Depends to your path routing.
.. code-block: python
app = FastAPI()
strategy = StrategyStore()
strategy.add_rule("long", 3600)
strategy.add_rule("short", 300, 600)
app.state.cache_control_strategy = strategy
@app.get("/", dependencies=[Depends(cache_control_strategy("long"))]
async def hello():
return "hello world"
"""
import os
from dataclasses import dataclass, field
from typing import Callable, Dict
from fastapi import HTTPException, Request, Response
from . import CacheControl
from ..env_helper import filter_envmap, parse_as_dict
@dataclass
class StrategyStore(object):
"""Name-based cache-control strategy
"""
rules: Dict[str, CacheControl] = field(default_factory=dict)
def get_rule(self, name: str) -> CacheControl:
return self.rules[name]
def add_rule(self, name: str, max_age: int = None, s_maxage: int = None):
if name in self.rules:
raise Exception(f"Rule '{name}' is already exists.")
self.rules[name] = CacheControl(max_age, s_maxage)
def store_from_env(prefix: str = None) -> StrategyStore:
"""Create strategy-store from environment variables.
1. Find prefixed key from environment-variables.
2. Create rule from environment.
3. Create store and register these rules.
:param prefix: Using prefix of environment variables.
"""
prefix = "CACHE_CONTROL_" if prefix is None else prefix
strategy = StrategyStore()
for name, value in filter_envmap(prefix, os.environ).items():
value_dict = parse_as_dict(value, convert=int)
strategy.add_rule(name, **value_dict)
return strategy
def cache_control_strategy(name: str) -> Callable:
"""Dependency Injection using cache-control strategy.
If stategy is not exists. raise http-500.
Currently spec:
- app.state must have "cache_control_strategy" attribute.
- strategy object must be set all rule before request.
"""
async def _cache_control_strategy(request: Request, response: Response):
if request.method == "GET":
try:
store: StrategyStore = request.app.state.cache_control_strategy
cc = store.get_rule(name)
response.headers[cc.header_name] = cc.header_value
except KeyError:
raise HTTPException(status_code=500, detail="invalid-cache-control")
return _cache_control_strategy
| [
"fastapi.HTTPException",
"dataclasses.field"
] | [((912, 939), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (917, 939), False, 'from dataclasses import dataclass, field\n'), ((2536, 2598), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(500)', 'detail': '"""invalid-cache-control"""'}), "(status_code=500, detail='invalid-cache-control')\n", (2549, 2598), False, 'from fastapi import HTTPException, Request, Response\n')] |
# Generated by Django 3.2.6 on 2021-08-30 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("village_api", "0007_auto_20210830_1327"),
]
operations = [
migrations.AlterField(
model_name="relationship",
name="people",
field=models.ManyToManyField(
related_name="relationships",
through="village_api.Relation",
to="village_api.Person",
),
),
]
| [
"django.db.models.ManyToManyField"
] | [((346, 460), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""relationships"""', 'through': '"""village_api.Relation"""', 'to': '"""village_api.Person"""'}), "(related_name='relationships', through=\n 'village_api.Relation', to='village_api.Person')\n", (368, 460), False, 'from django.db import migrations, models\n')] |
#-
# ==========================================================================
# Copyright (C) 1995 - 2006 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of
# the Software, unless such copies or derivative works are solely
# in the form of machine-executable object code generated by a
# source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
# AUTODESK DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED
# WARRANTIES INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF
# NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
# PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE, OR
# TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS LICENSORS
# BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK
# AND/OR ITS LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY
# OR PROBABILITY OF SUCH DAMAGES.
#
# ==========================================================================
#+
#
# Creation Date: 13 October 2006
#
# Author: mlausch
#
# Example Plugin: mathTableControl.py
#
# Creates a simple table control
#
# Example:
# import maya.cmds as cmds
# window = cmds.window(title="Math Table",widthHeight=(400,150))
# cmds.paneLayout()
# cmds.showWindow()
# cmds.spMathTableControl()
#
import math, sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMayaUI as OpenMayaUI
kPluginCmdName = "spMathTableControl"
kNopFlag = "-nop"
kNopLongFlag = "-noOperation"
kMultFlag = "-mul"
kMultLongFlag = "-multiplyVals"
kAddFlag = "-add"
kAddLongFlag = "-addVals"
kRedrawFlag = "-rd"
kRedrawFlagLong = "-redraw"
# operations on cell coordinates, must return strings
kNop = lambda x,y: "cell(%d,%d)" % (x,y)
kMult = lambda x,y: str(x*y)
kAdd = lambda x,y: str(x+y)
kPythonPtrTable = {}
class MathTableControlCmd(OpenMayaMPx.MPxControlCommand):
def __init__(self):
OpenMayaMPx.MPxControlCommand.__init__(self)
def makeControl(self):
control = MathTableControl(self)
control.setOperation(kNop)
control.setNumberOfRows(5)
control.setNumberOfColumns(3)
return OpenMayaMPx.asMPxPtr(control)
def doEditFlags(self):
theParser = self._parser()
theControl = kPythonPtrTable.get(OpenMayaMPx.asHashable(self._control()), None)
if theParser.isFlagSet(kNopFlag):
theControl.setOperation(kNop)
elif theParser.isFlagSet(kMultFlag):
theControl.setOperation(kMult)
elif theParser.isFlagSet(kAddFlag):
theControl.setOperation(kAdd)
elif theParser.isFlagSet(kRedrawFlag):
theControl.redrawCells()
theControl.redrawLabels()
else:
OpenMayaMPx.MPxControlCommand.doEditFlags(self)
def doQueryFlags(self):
return OpenMayaMPx.MPxControlCommand.doQueryFlags(self)
def appendSyntax(self):
theSyntax = self._syntax()
theSyntax.addFlag(kNopFlag, kNopLongFlag)
theSyntax.addFlag(kMultFlag, kMultLongFlag)
theSyntax.addFlag(kAddFlag, kAddLongFlag)
theSyntax.addFlag(kRedrawFlag, kRedrawFlagLong)
class MathTableControl(OpenMayaMPx.MPxUITableControl):
def __init__(self, command):
OpenMayaMPx.MPxUITableControl.__init__(self, command)
self.__myOperation = None
kPythonPtrTable[OpenMayaMPx.asHashable(self)] = self
def __del__(self):
del kPythonPtrTable[OpenMayaMPx.asHashable(self)]
def cellString(self, row, column, isValidCell):
result = ""
if callable(self.__myOperation):
result = self.__myOperation(row, column)
OpenMaya.MScriptUtil.setBool(isValidCell, bool(result))
return result
def labelString(self, labelType, index):
value = ""
if labelType == OpenMayaMPx.MPxUITableControl.kRowLabel:
value = "[Row %d]" % index
elif labelType == OpenMayaMPx.MPxUITableControl.kColumnLabel:
value = "[Col %d]" % index
return value
def setOperation(self, op):
self.__myOperation = op
self.redrawCells()
################################################################
def cmdCreator():
return OpenMayaMPx.asMPxPtr(MathTableControlCmd())
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Autodesk", "1.0", "Any")
try:
mplugin.registerControlCommand(kPluginCmdName, cmdCreator)
except:
sys.stderr.write( "Failed to register command: %s\n" % kPluginCmdName)
raise
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterControlCommand(kPluginCmdName)
except:
sys.stderr.write("Failed to unregister command: %s\n" % kPluginCmdName)
raise
| [
"maya.OpenMayaMPx.MPxControlCommand.__init__",
"maya.OpenMayaMPx.MPxUITableControl.__init__",
"maya.OpenMayaMPx.MPxControlCommand.doEditFlags",
"maya.OpenMayaMPx.MFnPlugin",
"sys.stderr.write",
"maya.OpenMayaMPx.asMPxPtr",
"maya.OpenMayaMPx.MPxControlCommand.doQueryFlags",
"maya.OpenMayaMPx.asHashable... | [((5003, 5059), 'maya.OpenMayaMPx.MFnPlugin', 'OpenMayaMPx.MFnPlugin', (['mobject', '"""Autodesk"""', '"""1.0"""', '"""Any"""'], {}), "(mobject, 'Autodesk', '1.0', 'Any')\n", (5024, 5059), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((5272, 5302), 'maya.OpenMayaMPx.MFnPlugin', 'OpenMayaMPx.MFnPlugin', (['mobject'], {}), '(mobject)\n', (5293, 5302), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((2820, 2864), 'maya.OpenMayaMPx.MPxControlCommand.__init__', 'OpenMayaMPx.MPxControlCommand.__init__', (['self'], {}), '(self)\n', (2858, 2864), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((3033, 3062), 'maya.OpenMayaMPx.asMPxPtr', 'OpenMayaMPx.asMPxPtr', (['control'], {}), '(control)\n', (3053, 3062), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((3628, 3676), 'maya.OpenMayaMPx.MPxControlCommand.doQueryFlags', 'OpenMayaMPx.MPxControlCommand.doQueryFlags', (['self'], {}), '(self)\n', (3670, 3676), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((4019, 4072), 'maya.OpenMayaMPx.MPxUITableControl.__init__', 'OpenMayaMPx.MPxUITableControl.__init__', (['self', 'command'], {}), '(self, command)\n', (4057, 4072), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((4121, 4149), 'maya.OpenMayaMPx.asHashable', 'OpenMayaMPx.asHashable', (['self'], {}), '(self)\n', (4143, 4149), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((4206, 4234), 'maya.OpenMayaMPx.asHashable', 'OpenMayaMPx.asHashable', (['self'], {}), '(self)\n', (4228, 4234), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((5142, 5211), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to register command: %s\\n' % kPluginCmdName)"], {}), "('Failed to register command: %s\\n' % kPluginCmdName)\n", (5158, 5211), False, 'import math, sys\n'), ((5375, 5446), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to unregister command: %s\\n' % kPluginCmdName)"], {}), "('Failed to unregister command: %s\\n' % kPluginCmdName)\n", (5391, 5446), False, 'import math, sys\n'), ((3540, 3587), 'maya.OpenMayaMPx.MPxControlCommand.doEditFlags', 'OpenMayaMPx.MPxControlCommand.doEditFlags', (['self'], {}), '(self)\n', (3581, 3587), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n')] |
#! /usr/bin/env python
import sys
import pathogenseq as ps
import json
infile = sys.argv[1]
ref = sys.argv[2]
outfile = sys.argv[3]
bcf = ps.bcf(infile)
stats = bcf.load_stats(convert=True,ref=ref)
genome_len = sum([len(x) for x in ps.fasta(ref).fa_dict.values()])
print("sample\tnRefHom\tnNonRefHom\tnHets\tnMissing")
for sample in stats["PSC"]:
s = stats["PSC"][sample]
s["id"] = sample
tot_sum = s["nRefHom"]+s["nNonRefHom"]+s["nHets"]
s["missing"] = genome_len-tot_sum
print("%s\t%s\t%s\t%s\t%s" % (sample,s["nRefHom"],s["nNonRefHom"],s["nHets"],s["missing"]))
json.dump(s,open(outfile,"w"))
| [
"pathogenseq.fasta",
"pathogenseq.bcf"
] | [((139, 153), 'pathogenseq.bcf', 'ps.bcf', (['infile'], {}), '(infile)\n', (145, 153), True, 'import pathogenseq as ps\n'), ((233, 246), 'pathogenseq.fasta', 'ps.fasta', (['ref'], {}), '(ref)\n', (241, 246), True, 'import pathogenseq as ps\n')] |
import re
import yaml
from mapproxy.wsgiapp import make_wsgi_app
import gws
import gws.config
import gws.tools.os2
import gws.tools.json2
import gws.types as t
class _Config:
def __init__(self):
self.c = 0
self.services = {
'wms': {
'image_formats': ['image/png'],
'max_output_pixels': [9000, 9000]
},
'wmts': {
'kvp': True,
'restful': False
}
}
self.globals = {
# https://mapproxy.org/docs/1.11.0/configuration.html#id14
# "By default MapProxy assumes lat/long (north/east) order for all geographic and x/y (east/north) order for all projected SRS."
# we need to change that because our extents are always x/y (lon/lat) even if a CRS says otherwise
'srs': {
'axis_order_en': ['EPSG:4326']
},
'cache': {
'base_dir': gws.MAPPROXY_CACHE_DIR,
'lock_dir': gws.TMP_DIR + '/mpx/locks_' + gws.random_string(16),
'tile_lock_dir': gws.TMP_DIR + '/mpx/tile_locks_' + gws.random_string(16),
'concurrent_tile_creators': 1,
'max_tile_limit': 5000,
},
'image': {
'resampling_method': 'bicubic',
'stretch_factor': 1.15,
'max_shrink_factor': 4.0,
'formats': {
'png8': {
'format': 'image/png',
'mode': 'P',
'colors': 256,
'transparent': True,
'resampling_method': 'bicubic',
},
'png24': {
'format': 'image/png',
'mode': 'RGBA',
'colors': 0,
'transparent': True,
'resampling_method': 'bicubic',
}
}
}
}
self.cfg = {}
def _add(self, kind, c):
# mpx doesn't like tuples
for k, v in c.items():
if isinstance(v, tuple):
c[k] = list(v)
uid = kind + '_' + gws.tools.json2.to_hash(c)
# clients might add their hash params starting with '$'
c = {
k: v
for k, v in c.items()
if not k.startswith('$')
}
self.cfg[uid] = {'kind': kind, 'c': c}
return uid
def _items(self, kind):
for k, v in self.cfg.items():
if v['kind'] == kind:
yield k, v['c']
def cache(self, c):
return self._add('cache', c)
def source(self, c):
return self._add('source', c)
def grid(self, c):
# self._transform_extent(c)
return self._add('grid', c)
def layer(self, c):
c['title'] = ''
return self._add('layer', c)
def as_dict(self):
d = {
'services': self.services,
'globals': self.globals,
}
kinds = ['source', 'grid', 'cache', 'layer']
for kind in kinds:
d[kind + 's'] = {
key: c
for key, c in self._items(kind)
}
d['layers'] = sorted(d['layers'].values(), key=lambda x: x['name'])
return d
def create(root: t.IRootObject):
mc = _Config()
r: t.ILayer
for r in root.find_all('gws.ext.layer'):
r.mapproxy_config(mc)
cfg = mc.as_dict()
if not cfg['layers']:
return
crs = set()
for p in root.find_all('gws.common.map'):
crs.add(gws.get(p, 'crs'))
for p in root.find_all('gws.ext.ows.service'):
crs.update(gws.get(p, 'supported_crs', default=[]))
cfg['services']['wms']['srs'] = sorted(crs)
return cfg
def create_and_save(root: t.IRootObject, path):
test_path = path + '.test.yaml'
gws.tools.os2.unlink(test_path)
cfg = create(root)
if not cfg:
gws.log.warn('mapproxy: NO CONFIG')
gws.tools.os2.unlink(path)
return
gws.write_file(test_path, yaml.dump(cfg))
# make sure the config is ok before starting the server!
try:
make_wsgi_app(test_path)
except Exception as e:
raise gws.config.MapproxyConfigError(*e.args) from e
gws.tools.os2.unlink(test_path)
gws.write_file(path, yaml.dump(cfg))
| [
"gws.tools.os2.unlink",
"gws.config.MapproxyConfigError",
"gws.random_string",
"yaml.dump",
"gws.log.warn",
"mapproxy.wsgiapp.make_wsgi_app",
"gws.get",
"gws.tools.json2.to_hash"
] | [((3969, 4000), 'gws.tools.os2.unlink', 'gws.tools.os2.unlink', (['test_path'], {}), '(test_path)\n', (3989, 4000), False, 'import gws\n'), ((4379, 4410), 'gws.tools.os2.unlink', 'gws.tools.os2.unlink', (['test_path'], {}), '(test_path)\n', (4399, 4410), False, 'import gws\n'), ((4049, 4084), 'gws.log.warn', 'gws.log.warn', (['"""mapproxy: NO CONFIG"""'], {}), "('mapproxy: NO CONFIG')\n", (4061, 4084), False, 'import gws\n'), ((4093, 4119), 'gws.tools.os2.unlink', 'gws.tools.os2.unlink', (['path'], {}), '(path)\n', (4113, 4119), False, 'import gws\n'), ((4166, 4180), 'yaml.dump', 'yaml.dump', (['cfg'], {}), '(cfg)\n', (4175, 4180), False, 'import yaml\n'), ((4261, 4285), 'mapproxy.wsgiapp.make_wsgi_app', 'make_wsgi_app', (['test_path'], {}), '(test_path)\n', (4274, 4285), False, 'from mapproxy.wsgiapp import make_wsgi_app\n'), ((4436, 4450), 'yaml.dump', 'yaml.dump', (['cfg'], {}), '(cfg)\n', (4445, 4450), False, 'import yaml\n'), ((2267, 2293), 'gws.tools.json2.to_hash', 'gws.tools.json2.to_hash', (['c'], {}), '(c)\n', (2290, 2293), False, 'import gws\n'), ((3685, 3702), 'gws.get', 'gws.get', (['p', '"""crs"""'], {}), "(p, 'crs')\n", (3692, 3702), False, 'import gws\n'), ((3774, 3813), 'gws.get', 'gws.get', (['p', '"""supported_crs"""'], {'default': '[]'}), "(p, 'supported_crs', default=[])\n", (3781, 3813), False, 'import gws\n'), ((4327, 4366), 'gws.config.MapproxyConfigError', 'gws.config.MapproxyConfigError', (['*e.args'], {}), '(*e.args)\n', (4357, 4366), False, 'import gws\n'), ((1055, 1076), 'gws.random_string', 'gws.random_string', (['(16)'], {}), '(16)\n', (1072, 1076), False, 'import gws\n'), ((1146, 1167), 'gws.random_string', 'gws.random_string', (['(16)'], {}), '(16)\n', (1163, 1167), False, 'import gws\n')] |
from flask import Flask
app = Flask(__name__)
posts = {
0: {
'title': 'Hello, world',
'content': 'This is my first blog post!'
}
}
@app.route('/')
def home():
return 'Hello, world!'
# This route expects to be in the format of /post/0 (for example).
# Then it will pass 0 as argument to the post() function.
@app.route('/post/<int:post_id>')
def post(post_id):
"""
This function runs when a user visits route such as:
- /post/0
- /post/2
- /post/99
But not:
- /post/a
- /post/something/else
- /posts/1
Then we get the 0 as a number (not a string!) as argument, so we can use it.
"""
post = posts.get(post_id) # Retrieve the post from our global posts dictionary by the ID passed in as argument.
return f"Post {post['title']}, content:\n\n{post['content']}" # Return the title and content formatted a bit nicer.
if __name__ == '__main__':
app.run(debug=True)
| [
"flask.Flask"
] | [((32, 47), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (37, 47), False, 'from flask import Flask\n')] |
from datetime import date
ano_nascimento = int(input("Informe seu ano de nascimento: "))
ano_atual = date.today().year
idade = ano_atual - ano_nascimento
if(idade <= 9):
categoria = "MIRIM"
elif(idade <= 14):
categoria = "INFANTIL"
elif(idade <= 19):
categoria = "JUNIOR"
elif(idade <= 20):
categoria = "SENIOR"
else:
categoria = "MASTER"
print("Ano de nascimento: {0}.\nIdade: {1} anos.\nCategoria: {2}.".format(ano_nascimento, idade, categoria))
| [
"datetime.date.today"
] | [((103, 115), 'datetime.date.today', 'date.today', ([], {}), '()\n', (113, 115), False, 'from datetime import date\n')] |
from impala.dbapi import connect
import api.resources.configurator as Config
def create_connection():
impala_host, impala_port = Config.impala()
db = Config.db()
conn = connect(host=impala_host, port=int(impala_port),database=db)
return conn.cursor()
def execute_query(query,fetch=False):
impala_cursor = create_connection()
impala_cursor.execute(query)
return impala_cursor if not fetch else impala_cursor.fetchall()
def execute_query_as_list(query):
query_results = execute_query(query)
row_result = {}
results = []
for row in query_results:
x=0
for header in query_results.description:
row_result[header[0]] = row[x]
x +=1
results.append(row_result)
row_result = {}
return results
| [
"api.resources.configurator.db",
"api.resources.configurator.impala"
] | [((136, 151), 'api.resources.configurator.impala', 'Config.impala', ([], {}), '()\n', (149, 151), True, 'import api.resources.configurator as Config\n'), ((161, 172), 'api.resources.configurator.db', 'Config.db', ([], {}), '()\n', (170, 172), True, 'import api.resources.configurator as Config\n')] |
from spacenetutilities.labeltools import coreLabelTools
import json
import glob
import argparse
from datetime import datetime
import os
def modifyTimeField(geoJson, geoJsonNew, featureItemsToAdd=['ingest_tim', 'ingest_time', 'edit_date'], featureKeyListToRemove=[]):
now = datetime.today()
with open(geoJson) as json_data:
d = json.load(json_data)
featureList = d['features']
newFeatureList = []
for feature in featureList:
tmpFeature = dict(feature)
for featureKey in featureKeyListToRemove:
if featureKey in tmpFeature['properties']:
del tmpFeature['properties'][featureKey]
for featureKey in featureItemsToAdd:
if not (featureKey in tmpFeature['properties']):
print('inserting missing field')
print(now.isoformat())
tmpFeature['properties'][featureKey] = now.isoformat()
else:
if not tmpFeature['properties'][featureKey]:
print('filling empty field')
tmpFeature['properties'][featureKey] = now.isoformat()
newFeatureList.append(tmpFeature)
d['features']=newFeatureList
if os.path.exists(geoJsonNew):
os.remove(geoJsonNew)
with open(geoJsonNew, 'w') as json_data:
json.dump(d, json_data)
def removeIdFieldFromJsonEntries(geoJson, geoJsonNew, featureKeyListToRemove=['Id', 'id'], featureItemsToAdd={}):
with open(geoJson) as json_data:
d = json.load(json_data)
featureList = d['features']
newFeatureList = []
for feature in featureList:
tmpFeature = dict(feature)
for featureKey in featureKeyListToRemove:
if featureKey in tmpFeature['properties']:
del tmpFeature['properties'][featureKey]
tmpFeature.update(featureItemsToAdd)
newFeatureList.append(tmpFeature)
d['features']=newFeatureList
if os.path.exists(geoJsonNew):
os.remove(geoJsonNew)
with open(geoJsonNew, 'w') as json_data:
json.dump(d, json_data)
def removeIdinGeoJSONFolder(folder, modifier='noid'):
geoJsonList = glob.glob(os.path.join(folder, '*.geojson'))
for geojsonName in geoJsonList:
removeIdFieldFromJsonEntries(geojsonName, geojsonName.replace('.geojson', '{}.geojson'.format(modifier)))
| [
"os.path.exists",
"os.path.join",
"json.load",
"datetime.datetime.today",
"json.dump",
"os.remove"
] | [((279, 295), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (293, 295), False, 'from datetime import datetime\n'), ((1207, 1233), 'os.path.exists', 'os.path.exists', (['geoJsonNew'], {}), '(geoJsonNew)\n', (1221, 1233), False, 'import os\n'), ((1945, 1971), 'os.path.exists', 'os.path.exists', (['geoJsonNew'], {}), '(geoJsonNew)\n', (1959, 1971), False, 'import os\n'), ((345, 365), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (354, 365), False, 'import json\n'), ((1243, 1264), 'os.remove', 'os.remove', (['geoJsonNew'], {}), '(geoJsonNew)\n', (1252, 1264), False, 'import os\n'), ((1318, 1341), 'json.dump', 'json.dump', (['d', 'json_data'], {}), '(d, json_data)\n', (1327, 1341), False, 'import json\n'), ((1507, 1527), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (1516, 1527), False, 'import json\n'), ((1981, 2002), 'os.remove', 'os.remove', (['geoJsonNew'], {}), '(geoJsonNew)\n', (1990, 2002), False, 'import os\n'), ((2056, 2079), 'json.dump', 'json.dump', (['d', 'json_data'], {}), '(d, json_data)\n', (2065, 2079), False, 'import json\n'), ((2165, 2198), 'os.path.join', 'os.path.join', (['folder', '"""*.geojson"""'], {}), "(folder, '*.geojson')\n", (2177, 2198), False, 'import os\n')] |
from os import times
import smtplib
from time import sleep
from getpass import getpass
import sys
class colors():
red = "\u001b[31m"
yel = "\u001b[33m"
gre = "\u001b[32m"
blu = "\u001b[34m"
pur = "\u001b[35m"
cya = "\u001b[36m"
whi = "\u001b[37m"
res = "\u001b[0m"
bred = "\u001b[31;1m"
byel = "\u001b[33;1m"
bgre = "\u001b[32;1m"
bblu = "\u001b[34;1m"
bpur = "\u001b[35;1m"
bcya = "\u001b[36;1m"
bwhi = "\u001b[37;1m"
class OptionNotValid(Exception):
### Exception to be raised if an invalid choice is made.
def __init__(self, prv):
self.prv = prv
self.message = ("The given option wasn't valid. Please try again, using the numbers provided.")
super().__init__(self.message)
mdbg = False
print(colors.bcya + "╔═════════════════════════════════════════════════════════╗")
print("║ ________ __ __ ║")
print("║ / ____/ ____ ____ ____/ ____ _____ _/ /____ _____ ║")
print("║ / /_ / / __ \/ __ \/ __ / __ `/ __ `/ __/ _ \/ ___/ ║")
print("║ / __/ / / /_/ / /_/ / /_/ / /_/ / /_/ / /_/ __(__ ) ║")
print("║ /_/ /_/\____/\____/\____/\__ /\____/\__/\___/____/ ║")
print("║ /____/ by simbyte ║")
print("╚═════════════════════════════════════════════════════════╝" + colors.res)
# Floodgates vA_0402-1
def flooder():
# This section verifies your provider to allow a proper SMTP connection.
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Please select your email provider from the following list; ┃
┃ ┃
┃ ▷ 1: Gmail ┃
┃ ▷ 2: Live/Outlook/Hotmail ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
prov = input('> ').lower()
if prov == "1":
mail = smtplib.SMTP('smtp.gmail.com',587)
print(colors.bpur + "╼Set mail provider to Gmail.╾ " + colors.res)
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Please enter the email address you wish to send messages from. ┃
┃ ''' + colors.bred + '<< Warning >>' + colors.res + ''' It is recommended to use a burner address. ┃
┃ Your email address must use the provider listed above. ┃
┃ ''' + colors.bred + '<< Warning >>' + colors.res + ''' You will not be able to use this through a ┃
┃ Google account if you have Two-Step Verification, ┃
┃ or have not allowed less-secure apps. ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
elif prov == "2":
mail = smtplib.SMTP('smtp-mail.outlook.com',587)
print(colors.bpur + "╼Set mail provider to Live.╾ " + colors.res)
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Please enter the email address you wish to send messages from. ┃
┃ ''' + colors.bred + '<< Warning >>' + colors.res + ''' It is recommended to use a burner address. ┃
┃ Your email address must use the provider listed above. ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
else:
raise OptionNotValid(prov)
# This section gets the information of the user to send the messages.
email = input("> ")
while True:
# This is an escape sequence allowing someone to type an escape sequence and retry their password.
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Please enter the password to the email you provided. ┃
┃ ''' + colors.bred + '<< Warning >>' + colors.res + ''' This will NOT echo back. ┃
┃ Ensure you typed it correctly. If you believe you have ┃
┃ input it incorrectly, you can enter ;;ex1 at any point ┃
┃ to retry its entry. ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
password = getpass("> ")
if password.find(";;ex1") == -1:
break
else:
pass
# This section verifies the target's info, the message, and the frequency of sending.
target = input("Please enter target email address: ")
print("Please enter the mail body you wish to send:")
message = input("")
getCountDelay = True
timesLooped = 0
while getCountDelay:
try:
print("Please enter the number of messages you wish to send:")
count = int(input("> "))
if count > 1:
print("Please enter the delay in-between sending you want, in seconds.")
print(colors.bred + '<< Warning >>' + colors.res + ' Numbers closer to 0 may get limited, and result in the blocking of your provided email address.')
print("The delay will be ignored if your message count is 1.")
delay = int(input("> "))
getCountDelay = False
else:
delay = 0
getCountDelay = False
except ValueError as e:
en = str(type(e).__name__)
print(colors.bred + 'Oops!' + colors.bblu + ' "' + str(e.__context__) + '" is not a valid number.' + colors.res)
print("Please try again. Make sure that your input is a number (e.g. 1, 2, 3, 4 etc.)")
print(colors.bcya + '<< Notice >>' + colors.res + ' Beginning to send messages. Please wait...' + colors.gre)
# Establishes the connection to the SMTP server for sending.
if mdbg:
mail.set_debuglevel(1)
mail.ehlo()
mail.starttls()
mail.login(email,password)
# Sends the message(s)!
for x in range(0,count):
mail.sendmail(email,target,message)
print(colors.res + "Messages sent: " + str(x+1) + colors.gre)
sleep(delay)
mail.close()
print(colors.bgre + "Success! All messages were successfully sent to the recipient you provided." + colors.res)
try:
sleep(1)
for i in range (0,2):
print('')
i += 1
print(colors.bgre + '''
d8b
88P
888
?88 d8P d8P d8888b 888 d8888b d8888b 88bd8b,d88b d8888b
d88 d8P' d8P d8b_,dp 88 d8P' ` d8P' ?88 88P'`?8P'?8 d8b_,dP
?8b ,88b ,88' 88b 88b 88b 88b d88 d88 d88 88 88b
`?888P'888P' ` ?888P' 88b`?888P` ?8888P d88' d88' d88 `?888P' ''' + colors.res)
# This is the main menu. This allows you to enable debugging if you need.
while True:
if not mdbg:
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ What do you want to do today? ┃
┃ ┃
┃ ▷ 1: Begin Flooding ┃
┃ ▷ 2: Toggle Debugging (OFF) ┃
┃ ▷ 3: Exit ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
else:
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ What do you want to do today? ┃
┃ ┃
┃ ▷ 1: Begin Flooding ┃
┃ ▷ 2: Toggle Debugging (ON) ┃
┃ ▷ 3: Exit ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
opt1 = input("> ")
if opt1 == "1":
flooder()
elif opt1 == "2":
if not mdbg:
mdbg = True
else:
mdbg = False
print("")
print(colors.bpur + "Toggled debugging to " + colors.bblu + str(mdbg) + colors.bpur + "!" + colors.res)
print("")
elif opt1 == "3":
print('''┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Are you sure you want to exit? ┃
┃ ┃
┃ ▷ 1: Yes ┃
┃ ▷ 2: No ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛''')
opt2 = input("> ")
if opt2 == "1":
sys.exit(colors.bgre + "Thank you for using Floodgates!" + colors.res)
elif opt2 == "2":
pass
else:
raise OptionNotValid(opt2)
else:
raise OptionNotValid(opt1)
except Exception as e:
en = str(type(e).__name__)
if en == "SMTPAuthenticationError":
print(colors.bred + '[FATAL] Something went wrong!' + colors.res)
print('The SMTP connection closed due to an authentication issue.')
print('Python specifically returned with this information;')
print(en + ": " + colors.bblu + str(e) + colors.res)
else:
print(colors.bred + '[FATAL] Something went wrong!' + colors.res)
print('Python reported an error with the following information:')
print(colors.yel + en + ": " + colors.bblu + str(e) + colors.res)
print('Try to run the script again. Make sure all values are valid.')
| [
"smtplib.SMTP",
"getpass.getpass",
"time.sleep",
"sys.exit"
] | [((6032, 6040), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (6037, 6040), False, 'from time import sleep\n'), ((1948, 1983), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (1960, 1983), False, 'import smtplib\n'), ((4043, 4056), 'getpass.getpass', 'getpass', (['"""> """'], {}), "('> ')\n", (4050, 4056), False, 'from getpass import getpass\n'), ((5876, 5888), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (5881, 5888), False, 'from time import sleep\n'), ((2734, 2776), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp-mail.outlook.com"""', '(587)'], {}), "('smtp-mail.outlook.com', 587)\n", (2746, 2776), False, 'import smtplib\n'), ((7934, 8004), 'sys.exit', 'sys.exit', (["(colors.bgre + 'Thank you for using Floodgates!' + colors.res)"], {}), "(colors.bgre + 'Thank you for using Floodgates!' + colors.res)\n", (7942, 8004), False, 'import sys\n')] |
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wttest
from wtscenario import make_scenarios
# test_rollback_to_stable24.py
# Exercise a recno-counting bug in column store.
#
# Prior to August 2021 a cell for which there's a pending stable update was counted (in the
# column-store RTS code) as having RLE count 1 regardless of what the actual count was.
#
# In order to exploit this we have to do janky things with timestamps, but I think they're
# allowable.
#
# Construct a cell with RLE count of 3 by writing 3 copies of aaaaaa at timestamp 10.
# Then at the next key write bbbbbb at timestamp 10 and cccccc at timestamp 50.
# Evict the page to reconcile it and produce the RLE cell.
#
# Then post an update to the first key of the RLE cell at timestamp 30 (to dddddd), and roll
# back to 40.
#
# Reading at 40, we should at that point see dddddd and two aaaaaa's followed by bbbbbb, but
# with the bad counting we get a key error on the second key.
#
# This happens because it goes to process key 4 but thinks it's on key 2; it finds that it
# needs to roll back the value it's looking at (the cccccc from timestamp 50) but because it
# thinks it's on key to it asks the history store for key 2 and finds nothing. (The bbbbbb
# from timestamp 10 is in the history store, but under key 4; there's nothing in the history
# store for key 2.) So it issues a tombstone, and issues it for key 2, so key 2 improperly
# disappears.
#
# Run this test on rows as well as columns to help make sure the test itself is valid (and
# stays so over time...)
#
# Don't run it on FLCS because FLCS doesn't do RLE encoding so there's no point.
class test_rollback_to_stable24(wttest.WiredTigerTestCase):
conn_config = 'in_memory=false'
key_format_values = [
('column', dict(key_format='r')),
('row_integer', dict(key_format='i')),
]
scenarios = make_scenarios(key_format_values)
def test_rollback_to_stable24(self):
# Create a table without logging.
uri = "table:rollback_to_stable24"
self.session.create(uri, 'key_format={},value_format=S'.format(self.key_format))
# Pin oldest timestamp to 5.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5))
# Start stable timestamp at 5.
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(5))
value_a = "aaaaa" * 100
value_b = "bbbbb" * 100
value_c = "ccccc" * 100
value_d = "ddddd" * 100
s = self.conn.open_session()
cursor = s.open_cursor(uri)
# Write some keys at time 10.
s.begin_transaction()
cursor[1] = value_a
cursor[2] = value_a
cursor[3] = value_a
cursor[4] = value_b
s.commit_transaction('commit_timestamp=' + self.timestamp_str(10))
# Update key 4 at time 50.
s.begin_transaction()
cursor[4] = value_c
s.commit_transaction('commit_timestamp=' + self.timestamp_str(50))
cursor.close()
# Evict the page to force reconciliation.
evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
s.begin_transaction()
# Search the key to evict it.
v = evict_cursor[1]
self.assertEqual(v, value_a)
self.assertEqual(evict_cursor.reset(), 0)
s.rollback_transaction()
evict_cursor.close()
# Now update key 1 at time 30.
cursor = s.open_cursor(uri)
s.begin_transaction()
cursor[1] = value_d
s.commit_transaction('commit_timestamp=' + self.timestamp_str(30))
cursor.close()
# Roll back to 40.
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))
self.conn.rollback_to_stable()
# Now read at 40.
cursor = s.open_cursor(uri)
s.begin_transaction('read_timestamp=' + self.timestamp_str(40))
self.assertEqual(cursor[1], value_d)
self.assertEqual(cursor[2], value_a)
self.assertEqual(cursor[3], value_a)
self.assertEqual(cursor[4], value_b)
s.rollback_transaction()
cursor.close()
| [
"wtscenario.make_scenarios"
] | [((3127, 3160), 'wtscenario.make_scenarios', 'make_scenarios', (['key_format_values'], {}), '(key_format_values)\n', (3141, 3160), False, 'from wtscenario import make_scenarios\n')] |
from fastapi import APIRouter
from ormar import Model
from typing import Type, Dict, NewType
from pydantic import BaseModel
from fastapi_helpers.crud import BaseCrud
from typing import (
List, Dict, Optional, Union, TypeVar
)
from fastapi import (
APIRouter, Request, Depends,
)
from fastapi_helpers.crud import BaseCrud
from fastapi_helpers.routes.models import PaginateOptions, PaginateResult
from fastapi_helpers.routes.routers.DefaultModelRouter import DefaultModelRouter
ID_ROUTE_LABEL = "/{id}/"
T = TypeVar("T", bound = Model)
pydantic_instances:Dict[str, Type[BaseModel]] ={}
def get_router(
model: Type[Model],
crud: BaseCrud[T],
headers: Dict = None,
model_in: Optional[Type[T]] = None,
model_out: Optional[Type[T]] = None,
) -> DefaultModelRouter[T]:
global pydantic_instances
model_name = model.get_name()
if model_name not in pydantic_instances:
pydantic_instances[model_name] = model.get_pydantic()
pydantic_instance = pydantic_instances[model_name]
ModelType = NewType(f"{model_name}", pydantic_instance)
key_type = model.pk._field.__type__
if model_in is None:
ModelIn = NewType(f"{model_name}In", pydantic_instance)
else:
ModelIn = model_in
if model_out is None:
ModelOut = NewType(f"{model_name}Out", pydantic_instance)
else:
ModelOut = model_out
KeyType = NewType(f"{model_name}_{key_type}", key_type)
class ModelRouter(DefaultModelRouter):
def __init__(
self,
model: ModelType,
crud: BaseCrud,
headers: Dict = None,
response_model: Optional[ModelOut] = None,
) -> None:
self.model = model
self.crud = crud
self.router = APIRouter()
self.response_model = response_model
self.router.add_api_route("/", self.read_list, methods=["GET"])
self.router.add_api_route(ID_ROUTE_LABEL, self.read, methods=["GET"])
self.router.add_api_route("/", self.create, methods=["POST"])
self.router.add_api_route(ID_ROUTE_LABEL, self.update, methods=["PUT"])
self.router.add_api_route(ID_ROUTE_LABEL, self.delete, methods=["DELETE"])
self.headers = headers
async def read_list(
self,
*,
request: Request,
options: PaginateOptions = Depends(),
) -> Union[Union[List[ModelOut], PaginateResult[ModelOut]], Dict]:
return await super().read_list(
request=request,
options=options,
)
async def read(
self,
*,
id: KeyType,
) -> Optional[Union[ModelOut, Dict]]:
return await super().read(
id=id,
)
async def create(
self,
*,
model_in: ModelIn,
) -> Optional[Union[ModelOut, Dict]]:
return await super().create(
model_in=model_in,
)
async def update(
self,
*,
id: KeyType,
model_in: Union[ModelIn, Dict],
) -> Optional[Union[ModelOut, Dict]]:
return await super().update(
id=id,
model_in=model_in,
)
async def delete(
self,
*,
id: KeyType,
) -> Optional[Union[ModelOut, Dict]]:
return await super().delete(
id=id,
)
if model_out:
return ModelRouter(model, crud, headers, ModelOut)
return ModelRouter(model, crud, headers,) | [
"fastapi.APIRouter",
"fastapi.Depends",
"typing.NewType",
"typing.TypeVar"
] | [((517, 542), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'Model'}), "('T', bound=Model)\n", (524, 542), False, 'from typing import List, Dict, Optional, Union, TypeVar\n'), ((1040, 1083), 'typing.NewType', 'NewType', (['f"""{model_name}"""', 'pydantic_instance'], {}), "(f'{model_name}', pydantic_instance)\n", (1047, 1083), False, 'from typing import Type, Dict, NewType\n'), ((1402, 1447), 'typing.NewType', 'NewType', (['f"""{model_name}_{key_type}"""', 'key_type'], {}), "(f'{model_name}_{key_type}', key_type)\n", (1409, 1447), False, 'from typing import Type, Dict, NewType\n'), ((1168, 1213), 'typing.NewType', 'NewType', (['f"""{model_name}In"""', 'pydantic_instance'], {}), "(f'{model_name}In', pydantic_instance)\n", (1175, 1213), False, 'from typing import Type, Dict, NewType\n'), ((1297, 1343), 'typing.NewType', 'NewType', (['f"""{model_name}Out"""', 'pydantic_instance'], {}), "(f'{model_name}Out', pydantic_instance)\n", (1304, 1343), False, 'from typing import Type, Dict, NewType\n'), ((1785, 1796), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1794, 1796), False, 'from fastapi import APIRouter, Request, Depends\n'), ((2416, 2425), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (2423, 2425), False, 'from fastapi import APIRouter, Request, Depends\n')] |
from hlo import ShardingSpec, ShardingSpecType
from cluster_env import ClusterEnvironment
from common import compute_bytes
def test_tile():
cluster_env = ClusterEnvironment([[0, 1, 2], [3, 4, 5]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [0, 1], [0, 1], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1, 0], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [0, 1], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [0], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 1, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [0], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 1, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 3, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12, 12), [0, 1], [0, 1], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3, 1)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12, 12), [0, 1], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 2, 1)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 2, 1, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
def test_tile2():
cluster_env = ClusterEnvironment([[0, 1, 2, 3]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 4)
assert sharding.tile_assignment_devices == (0, 1, 2, 3)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.type == ShardingSpecType.REPLICATED
cluster_env = ClusterEnvironment([[0], [1], [2], [3]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 4)
assert sharding.tile_assignment_devices == (0, 1, 2, 3)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.type == ShardingSpecType.REPLICATED
def test_tile3():
cluster_env = ClusterEnvironment([[0, 1], [2, 3]], [1,1], [1,1], None)
shape = (12, 12)
src = ShardingSpec.split(shape, 1, cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
print(src)
print(dst)
cost = cluster_env.resharding_cost(shape, src, dst)
print(cost)
def assert_allclose(x, y):
assert abs((x - y) / (y + 1e-8)) < 0.01
def test_resharding_cost():
cluster_env = ClusterEnvironment([[0, 1, 2], [3, 4, 5]], [1, 1], [1, 1], None)
shape = (128, 128)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [1, 0], [1, 0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, cluster_env.all_gather_cost(compute_bytes(shape), 1))
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.replicated(cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, cluster_env.all_gather_cost(compute_bytes(shape), 0)
+ cluster_env.all_gather_cost(compute_bytes(shape), 1))
def test_resharding_cost2():
cluster_env = ClusterEnvironment([[0], [1], [2], [3]], [1,1], [1,1], None)
shape = (128, 128)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
if __name__ == "__main__":
test_tile()
test_tile2()
#test_tile3()
test_resharding_cost()
test_resharding_cost2()
| [
"hlo.ShardingSpec.tile",
"hlo.ShardingSpec.replicated",
"common.compute_bytes",
"hlo.ShardingSpec.split",
"cluster_env.ClusterEnvironment"
] | [((160, 224), 'cluster_env.ClusterEnvironment', 'ClusterEnvironment', (['[[0, 1, 2], [3, 4, 5]]', '[1, 1]', '[1, 1]', 'None'], {}), '([[0, 1, 2], [3, 4, 5]], [1, 1], [1, 1], None)\n', (178, 224), False, 'from cluster_env import ClusterEnvironment\n'), ((239, 295), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '((12, 12), [0, 1], [0, 1], cluster_env)\n', (256, 295), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((491, 547), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1, 0]', '[1, 0]', 'cluster_env'], {}), '((12, 12), [1, 0], [1, 0], cluster_env)\n', (508, 547), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((743, 799), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[0, 1]', '[1, 0]', 'cluster_env'], {}), '((12, 12), [0, 1], [1, 0], cluster_env)\n', (760, 799), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((995, 1045), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[0]', '[0]', 'cluster_env'], {}), '((12, 12), [0], [0], cluster_env)\n', (1012, 1045), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((1243, 1293), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[0]', '[1]', 'cluster_env'], {}), '((12, 12), [0], [1], cluster_env)\n', (1260, 1293), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((1491, 1541), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1]', '[1]', 'cluster_env'], {}), '((12, 12), [1], [1], cluster_env)\n', (1508, 1541), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((1739, 1789), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1]', '[0]', 'cluster_env'], {}), '((12, 12), [1], [0], cluster_env)\n', (1756, 1789), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((1987, 2047), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12, 12)', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '((12, 12, 12), [0, 1], [0, 1], cluster_env)\n', (2004, 2047), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((2246, 2306), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12, 12)', '[0, 1]', '[1, 0]', 'cluster_env'], {}), '((12, 12, 12), [0, 1], [1, 0], cluster_env)\n', (2263, 2306), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((2505, 2559), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12, 12)', '[1]', '[0]', 'cluster_env'], {}), '((12, 12, 12), [1], [0], cluster_env)\n', (2522, 2559), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((2782, 2838), 'cluster_env.ClusterEnvironment', 'ClusterEnvironment', (['[[0, 1, 2, 3]]', '[1, 1]', '[1, 1]', 'None'], {}), '([[0, 1, 2, 3]], [1, 1], [1, 1], None)\n', (2800, 2838), False, 'from cluster_env import ClusterEnvironment\n'), ((2852, 2902), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1]', '[1]', 'cluster_env'], {}), '((12, 12), [1], [1], cluster_env)\n', (2869, 2902), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((3092, 3142), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1]', '[0]', 'cluster_env'], {}), '((12, 12), [1], [0], cluster_env)\n', (3109, 3142), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((3218, 3280), 'cluster_env.ClusterEnvironment', 'ClusterEnvironment', (['[[0], [1], [2], [3]]', '[1, 1]', '[1, 1]', 'None'], {}), '([[0], [1], [2], [3]], [1, 1], [1, 1], None)\n', (3236, 3280), False, 'from cluster_env import ClusterEnvironment\n'), ((3294, 3344), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1]', '[0]', 'cluster_env'], {}), '((12, 12), [1], [0], cluster_env)\n', (3311, 3344), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((3534, 3584), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['(12, 12)', '[1]', '[1]', 'cluster_env'], {}), '((12, 12), [1], [1], cluster_env)\n', (3551, 3584), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((3679, 3737), 'cluster_env.ClusterEnvironment', 'ClusterEnvironment', (['[[0, 1], [2, 3]]', '[1, 1]', '[1, 1]', 'None'], {}), '([[0, 1], [2, 3]], [1, 1], [1, 1], None)\n', (3697, 3737), False, 'from cluster_env import ClusterEnvironment\n'), ((3767, 3808), 'hlo.ShardingSpec.split', 'ShardingSpec.split', (['shape', '(1)', 'cluster_env'], {}), '(shape, 1, cluster_env)\n', (3785, 3808), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((3819, 3866), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (3836, 3866), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4093, 4157), 'cluster_env.ClusterEnvironment', 'ClusterEnvironment', (['[[0, 1, 2], [3, 4, 5]]', '[1, 1]', '[1, 1]', 'None'], {}), '([[0, 1, 2], [3, 4, 5]], [1, 1], [1, 1], None)\n', (4111, 4157), False, 'from cluster_env import ClusterEnvironment\n'), ((4192, 4239), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (4209, 4239), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4250, 4297), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (4267, 4297), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4394, 4447), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '(shape, [0, 1], [0, 1], cluster_env)\n', (4411, 4447), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4458, 4511), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[1, 0]', '[1, 0]', 'cluster_env'], {}), '(shape, [1, 0], [1, 0], cluster_env)\n', (4475, 4511), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4608, 4655), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (4625, 4655), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4666, 4719), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '(shape, [0, 1], [0, 1], cluster_env)\n', (4683, 4719), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4816, 4863), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (4833, 4863), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((4874, 4927), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '(shape, [0, 1], [0, 1], cluster_env)\n', (4891, 4927), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5024, 5077), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '(shape, [0, 1], [0, 1], cluster_env)\n', (5041, 5077), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5088, 5135), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (5105, 5135), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5283, 5336), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '(shape, [0, 1], [0, 1], cluster_env)\n', (5300, 5336), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5347, 5383), 'hlo.ShardingSpec.replicated', 'ShardingSpec.replicated', (['cluster_env'], {}), '(cluster_env)\n', (5370, 5383), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5648, 5710), 'cluster_env.ClusterEnvironment', 'ClusterEnvironment', (['[[0], [1], [2], [3]]', '[1, 1]', '[1, 1]', 'None'], {}), '([[0], [1], [2], [3]], [1, 1], [1, 1], None)\n', (5666, 5710), False, 'from cluster_env import ClusterEnvironment\n'), ((5743, 5796), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0, 1]', '[0, 1]', 'cluster_env'], {}), '(shape, [0, 1], [0, 1], cluster_env)\n', (5760, 5796), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5807, 5854), 'hlo.ShardingSpec.tile', 'ShardingSpec.tile', (['shape', '[0]', '[0]', 'cluster_env'], {}), '(shape, [0], [0], cluster_env)\n', (5824, 5854), False, 'from hlo import ShardingSpec, ShardingSpecType\n'), ((5246, 5266), 'common.compute_bytes', 'compute_bytes', (['shape'], {}), '(shape)\n', (5259, 5266), False, 'from common import compute_bytes\n'), ((5494, 5514), 'common.compute_bytes', 'compute_bytes', (['shape'], {}), '(shape)\n', (5507, 5514), False, 'from common import compute_bytes\n'), ((5573, 5593), 'common.compute_bytes', 'compute_bytes', (['shape'], {}), '(shape)\n', (5586, 5593), False, 'from common import compute_bytes\n')] |
#!/usr/bin/env python3
import sys
import numpy as np
import cv2
import time
def get_time(start_time):
return int((time.time() - start_time) * 1000)
def is_inside(inside, outside, limit_val=-1):
point_limit = limit_val * len(inside)
if limit_val < 0:
point_limit = 1
in_point = 0;
for i in inside:
is_in = cv2.pointPolygonTest(outside, tuple(i[0]), False)
if is_in >= 0:
in_point += 1
if in_point >= point_limit:
return True
return False
start_time = time.time()
# checking arguments
arg = {}
for a in sys.argv[1:]:
if (a[0] == "-"):
a = a[1:]
a = a.split("=")
if len(a) == 2:
arg[a[0]] = a[1]
elif len(a) == 1:
arg[a[0]] = ""
else:
sys.exit(3)
else:
sys.exit(2)
if "input" not in arg:
sys.exit(1)
input_name, input_ext = arg["input"].split(".")
img = cv2.imread(input_name + "." + input_ext)
if img is None:
sys.exit(1)
# resizing image if bigger than max values
h, w, c = img.shape
max_width = 1920
max_height = 1080
if (w > max_width) or (h > max_height):
ratio = min(max_width / w, max_height / h)
new_size = (round(w * ratio), round(h * ratio))
if "silent" not in arg:
print("%-6s ms| Resizing image, new size: %dx%d, %.2f%%"%(get_time(start_time), new_size[0], new_size[1], ratio))
img = cv2.resize(img, new_size, interpolation=cv2.INTER_AREA)
# denoising the image
img_blur = cv2.fastNlMeansDenoisingColored(img, None, 15, 10, 7, 21)
# applying blur until desired values
blur_limit = float(15)
if "blur-limit" in arg:
blur_limit = float(arg["blur-limit"])
ok = False
while ok == False:
img_blur = cv2.GaussianBlur(img_blur, (3, 3), 0)
detected_blur = cv2.Laplacian(img_blur, cv2.CV_64F).var() * 100000 / (img.shape[0] * img.shape[1])
if "silent" not in arg:
print("%-6s ms| Blur value: %.2f"%(get_time(start_time), detected_blur))
if detected_blur <= blur_limit:
ok = True
# grayscaling and thresholding the image
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
thr = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[0]
# detecting edges and finding contours
img_edges = cv2.Canny(img_gray, thr, 0.5 * thr)
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,1))
#img_edges = cv2.morphologyEx(img_edges, cv2.MORPH_CLOSE, kernel)
cnt, hier = cv2.findContours(img_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ch = []
if True:
img_area = img_edges.shape[0] * img_edges.shape[1]
cnt = [cv2.convexHull(cnt[i], False) for i in range(len(cnt))]
good = []
for c in cnt:
if (img_area / 100) >= cv2.contourArea(c) >= (img_area / 10000):
good.append(c)
cnt = good
ch = [[cnt[i], i] for i in range(len(cnt))]
ch_top = sorted(ch, key=lambda x : cv2.contourArea(x[0]), reverse=True)[:50]
if "silent" not in arg:
print("%-6s ms| Found %d contours."%(get_time(start_time), len(ch)))
img_filtered = img.copy()
possible = []
for t in ch_top:
inner = 0
for b in ch:
if is_inside(b[0], t[0]):
inner += 1
if inner >= 3:
possible.append(t[1])
break
if inner < 3:
# orange
img_filtered = cv2.drawContours(img_filtered, [t[0]], -1, (0, 126, 255), 1)
ch = [ch[p] for p in possible]
#ch = [[cv2.convexHull(c[0]), c[1]] for c in ch]
plates = []
for c, idx in ch:
og = c
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
c = np.int0(box)
if ((cv2.contourArea(c) / cv2.contourArea(og)) - 1) <= 0.2:
desired = 520 / 110;
current = max(rect[1]) / min(rect[1])
margin = 0.3
if desired * (1 - margin) <= current <= desired * (1 + margin):
plates.append([c, og])
else:
# red
img_filtered = cv2.drawContours(img_filtered, [c], -1, (0, 0, 255), 1)
else:
# red
img_filtered = cv2.drawContours(img_filtered, [c], -1, (0, 0, 255), 1)
good = []
for i in range(len(plates)):
ok = True
for j in range(len(plates)):
if (i != j) and is_inside(plates[j][1], plates[i][1], 1):
ok = False
break
if ok:
good.append(plates[i])
else:
# turquoise
img_filtered = cv2.drawContours(img_filtered, [plates[i][1]], -1, (255, 255, 0), 1)
plates = good
img_detected = img.copy()
candidates = []
index = 0
for p, og in plates:
mask = np.zeros(img_gray.shape, np.uint8)
img_masked = cv2.drawContours(mask, [p], 0, 255, -1,)
img_masked = cv2.bitwise_and(img_edges, img_edges, mask=mask)
cv2.drawContours(img_masked, [og], 0, 0, 2)
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
#print(kernel)
#img_masked = cv2.dilate(img_masked, kernel)
x, y, w, h = cv2.boundingRect(p)
crop_masked = img_masked[y:y+h, x:x+w]
crop_detected = img_detected[y:y+h, x:x+w]
cnt, hier = cv2.findContours(crop_masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
if hier is None:
# purple
img_filtered = cv2.drawContours(img_filtered, [p], -1, (255, 0, 255), 1)
continue
hier = hier[0]
ch = [[cnt[i], hier[i]] for i in range(len(cnt)) if (hier[i][0] != -1) or (hier[i][1] != -1)]
for i in range(len(ch)):
ch[i][0] = cv2.convexHull(ch[i][0], False)
good = []
for i in range(len(ch)):
ok = True
for j in range(len(ch)):
if (i != j) and is_inside(ch[i][0], ch[j][0], 0.8):
ok = False
break
if ok:
good.append(ch[i])
ch = sorted(good, key=lambda x : cv2.contourArea(x[0]) * cv2.boundingRect(x[0])[3], reverse=True)[:6]
if (len(ch) >= 6):
chars = []
img_detected = cv2.drawContours(img_detected, [og], -1, (0, 255, 0), 2)
cnt = [c[0] for c in ch]
#crop_detected = cv2.drawContours(crop_detected, cnt, -1, (255, 0, 0), 1)
num = -1
for c in cnt:
num += 1
#box = cv2.boxPoints(cv2.minAreaRect(c))
#box = np.int0(box)
#crop_detected = cv2.drawContours(crop_detected, [box], -1, (255, 0, 0), 1)
x, y, w, h = cv2.boundingRect(c)
chars.append([crop_detected.copy()[y:y+h, x:x+w], x])
crop_detected = cv2.rectangle(crop_detected, (x,y), (x+w,y+h), (255, 0, 0), 1)
chars = sorted(chars, key=lambda x : x[1])
candidates.append([c[0] for c in chars])
index += 1
#cv2.imshow("Last plate", crop_masked.astype(np.uint8))
else:
# yellow
img_filtered = cv2.drawContours(img_filtered, [p], -1, (0, 255, 255), 1)
if "silent" not in arg:
print("%-6s ms| %d plates found."%(get_time(start_time), index))
idx = 0
t_num = "0123456789"
t_char = "abcdefghijklmnoprstuvwxyz"
for cnd in candidates:
idx += 1
plate = ""
pos = 0
for c in cnd:
if pos > 2:
templates = t_num
else:
templates = t_char
pos += 1
vals = []
for t in templates:
template = cv2.imread("templates/" + t + ".jpg")
h, w, col = c.shape
template = cv2.resize(template, (w, h), interpolation=cv2.INTER_AREA)
t_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
c_gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)
t_gray = cv2.adaptiveThreshold(t_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 0)
c_gray = cv2.adaptiveThreshold(c_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 0)
#template = cv2.threshold(template, 126, 255, cv2.THRESH_BINARY)[1]
#cv2.imshow("org", c_gray.astype(np.uint8))
#cv2.imshow("tmp", t_gray.astype(np.uint8))
vals.append([t, cv2.matchTemplate(t_gray, c_gray, cv2.TM_SQDIFF)[0][0]])
plate += sorted(vals, key=lambda x : x[1])[0][0]
plate = plate.upper()
plate = plate[:3] + "-" + plate[3:]
if "silent" not in arg:
print("Plate " + str(idx) + " number:", plate)
else:
print(plate)
if "silent" not in arg:
print("Executed in %d ms" % get_time(start_time))
if "no-image" not in arg:
concat = np.concatenate((cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR), img_filtered), axis = 1)
if (index > 0):
concat2 = np.concatenate((img_detected, np.zeros(img_detected.shape, dtype = np.uint8)), axis = 1)
concat = np.concatenate((concat, concat2), axis = 0)
#cv2.imshow("First detected plate", crop_masked.astype(np.uint8))
cv2.namedWindow("images", cv2.WINDOW_NORMAL)
cv2.resizeWindow("images", (1280, 720))
cv2.imshow("images", concat)
while cv2.getWindowProperty("images", cv2.WND_PROP_VISIBLE) >= 1:
if cv2.waitKey(1000) == 32: # 27
break
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"sys.exit",
"cv2.Laplacian",
"cv2.resizeWindow",
"cv2.threshold",
"cv2.contourArea",
"cv2.minAreaRect",
"numpy.concatenate",
"cv2.matchTemplate",
"cv2.waitKey",
"cv2.drawContours",
"cv2.boxPoints",
"numpy.int0",
"cv2.cvtColor",
... | [((542, 553), 'time.time', 'time.time', ([], {}), '()\n', (551, 553), False, 'import time\n'), ((943, 983), 'cv2.imread', 'cv2.imread', (["(input_name + '.' + input_ext)"], {}), "(input_name + '.' + input_ext)\n", (953, 983), False, 'import cv2\n'), ((1506, 1563), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', (['img', 'None', '(15)', '(10)', '(7)', '(21)'], {}), '(img, None, 15, 10, 7, 21)\n', (1537, 1563), False, 'import cv2\n'), ((2094, 2136), 'cv2.cvtColor', 'cv2.cvtColor', (['img_blur', 'cv2.COLOR_BGR2GRAY'], {}), '(img_blur, cv2.COLOR_BGR2GRAY)\n', (2106, 2136), False, 'import cv2\n'), ((2268, 2303), 'cv2.Canny', 'cv2.Canny', (['img_gray', 'thr', '(0.5 * thr)'], {}), '(img_gray, thr, 0.5 * thr)\n', (2277, 2303), False, 'import cv2\n'), ((2443, 2510), 'cv2.findContours', 'cv2.findContours', (['img_edges', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (2459, 2510), False, 'import cv2\n'), ((875, 886), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (883, 886), False, 'import sys\n'), ((1005, 1016), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1013, 1016), False, 'import sys\n'), ((1416, 1471), 'cv2.resize', 'cv2.resize', (['img', 'new_size'], {'interpolation': 'cv2.INTER_AREA'}), '(img, new_size, interpolation=cv2.INTER_AREA)\n', (1426, 1471), False, 'import cv2\n'), ((1737, 1774), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img_blur', '(3, 3)', '(0)'], {}), '(img_blur, (3, 3), 0)\n', (1753, 1774), False, 'import cv2\n'), ((2144, 2212), 'cv2.threshold', 'cv2.threshold', (['img_gray', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (2157, 2212), False, 'import cv2\n'), ((3500, 3518), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (3515, 3518), False, 'import cv2\n'), ((3530, 3549), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3543, 3549), False, 'import cv2\n'), ((3558, 3570), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (3565, 3570), True, 'import numpy as np\n'), ((4517, 4551), 'numpy.zeros', 'np.zeros', (['img_gray.shape', 'np.uint8'], {}), '(img_gray.shape, np.uint8)\n', (4525, 4551), True, 'import numpy as np\n'), ((4569, 4608), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[p]', '(0)', '(255)', '(-1)'], {}), '(mask, [p], 0, 255, -1)\n', (4585, 4608), False, 'import cv2\n'), ((4627, 4675), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img_edges', 'img_edges'], {'mask': 'mask'}), '(img_edges, img_edges, mask=mask)\n', (4642, 4675), False, 'import cv2\n'), ((4681, 4724), 'cv2.drawContours', 'cv2.drawContours', (['img_masked', '[og]', '(0)', '(0)', '(2)'], {}), '(img_masked, [og], 0, 0, 2)\n', (4697, 4724), False, 'import cv2\n'), ((4874, 4893), 'cv2.boundingRect', 'cv2.boundingRect', (['p'], {}), '(p)\n', (4890, 4893), False, 'import cv2\n'), ((5001, 5073), 'cv2.findContours', 'cv2.findContours', (['crop_masked', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(crop_masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n', (5017, 5073), False, 'import cv2\n'), ((8659, 8703), 'cv2.namedWindow', 'cv2.namedWindow', (['"""images"""', 'cv2.WINDOW_NORMAL'], {}), "('images', cv2.WINDOW_NORMAL)\n", (8674, 8703), False, 'import cv2\n'), ((8708, 8747), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""images"""', '(1280, 720)'], {}), "('images', (1280, 720))\n", (8724, 8747), False, 'import cv2\n'), ((8752, 8780), 'cv2.imshow', 'cv2.imshow', (['"""images"""', 'concat'], {}), "('images', concat)\n", (8762, 8780), False, 'import cv2\n'), ((8916, 8939), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8937, 8939), False, 'import cv2\n'), ((835, 846), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (843, 846), False, 'import sys\n'), ((2596, 2625), 'cv2.convexHull', 'cv2.convexHull', (['cnt[i]', '(False)'], {}), '(cnt[i], False)\n', (2610, 2625), False, 'import cv2\n'), ((3304, 3364), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[t[0]]', '(-1)', '(0, 126, 255)', '(1)'], {}), '(img_filtered, [t[0]], -1, (0, 126, 255), 1)\n', (3320, 3364), False, 'import cv2\n'), ((4002, 4057), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[c]', '(-1)', '(0, 0, 255)', '(1)'], {}), '(img_filtered, [c], -1, (0, 0, 255), 1)\n', (4018, 4057), False, 'import cv2\n'), ((4347, 4415), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[plates[i][1]]', '(-1)', '(255, 255, 0)', '(1)'], {}), '(img_filtered, [plates[i][1]], -1, (255, 255, 0), 1)\n', (4363, 4415), False, 'import cv2\n'), ((5136, 5193), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[p]', '(-1)', '(255, 0, 255)', '(1)'], {}), '(img_filtered, [p], -1, (255, 0, 255), 1)\n', (5152, 5193), False, 'import cv2\n'), ((5378, 5409), 'cv2.convexHull', 'cv2.convexHull', (['ch[i][0]', '(False)'], {}), '(ch[i][0], False)\n', (5392, 5409), False, 'import cv2\n'), ((5837, 5893), 'cv2.drawContours', 'cv2.drawContours', (['img_detected', '[og]', '(-1)', '(0, 255, 0)', '(2)'], {}), '(img_detected, [og], -1, (0, 255, 0), 2)\n', (5853, 5893), False, 'import cv2\n'), ((6677, 6734), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[p]', '(-1)', '(0, 255, 255)', '(1)'], {}), '(img_filtered, [p], -1, (0, 255, 255), 1)\n', (6693, 6734), False, 'import cv2\n'), ((8536, 8577), 'numpy.concatenate', 'np.concatenate', (['(concat, concat2)'], {'axis': '(0)'}), '((concat, concat2), axis=0)\n', (8550, 8577), True, 'import numpy as np\n'), ((8793, 8846), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""images"""', 'cv2.WND_PROP_VISIBLE'], {}), "('images', cv2.WND_PROP_VISIBLE)\n", (8814, 8846), False, 'import cv2\n'), ((2716, 2734), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (2731, 2734), False, 'import cv2\n'), ((3899, 3954), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[c]', '(-1)', '(0, 0, 255)', '(1)'], {}), '(img_filtered, [c], -1, (0, 0, 255), 1)\n', (3915, 3954), False, 'import cv2\n'), ((6267, 6286), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (6283, 6286), False, 'import cv2\n'), ((6381, 6449), 'cv2.rectangle', 'cv2.rectangle', (['crop_detected', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(1)'], {}), '(crop_detected, (x, y), (x + w, y + h), (255, 0, 0), 1)\n', (6394, 6449), False, 'import cv2\n'), ((7159, 7196), 'cv2.imread', 'cv2.imread', (["('templates/' + t + '.jpg')"], {}), "('templates/' + t + '.jpg')\n", (7169, 7196), False, 'import cv2\n'), ((7252, 7310), 'cv2.resize', 'cv2.resize', (['template', '(w, h)'], {'interpolation': 'cv2.INTER_AREA'}), '(template, (w, h), interpolation=cv2.INTER_AREA)\n', (7262, 7310), False, 'import cv2\n'), ((7333, 7375), 'cv2.cvtColor', 'cv2.cvtColor', (['template', 'cv2.COLOR_BGR2GRAY'], {}), '(template, cv2.COLOR_BGR2GRAY)\n', (7345, 7375), False, 'import cv2\n'), ((7397, 7432), 'cv2.cvtColor', 'cv2.cvtColor', (['c', 'cv2.COLOR_BGR2GRAY'], {}), '(c, cv2.COLOR_BGR2GRAY)\n', (7409, 7432), False, 'import cv2\n'), ((7455, 7556), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['t_gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY_INV', '(15)', '(0)'], {}), '(t_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY_INV, 15, 0)\n', (7476, 7556), False, 'import cv2\n'), ((7573, 7674), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['c_gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY_INV', '(15)', '(0)'], {}), '(c_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY_INV, 15, 0)\n', (7594, 7674), False, 'import cv2\n'), ((8321, 8364), 'cv2.cvtColor', 'cv2.cvtColor', (['img_edges', 'cv2.COLOR_GRAY2BGR'], {}), '(img_edges, cv2.COLOR_GRAY2BGR)\n', (8333, 8364), False, 'import cv2\n'), ((8864, 8881), 'cv2.waitKey', 'cv2.waitKey', (['(1000)'], {}), '(1000)\n', (8875, 8881), False, 'import cv2\n'), ((120, 131), 'time.time', 'time.time', ([], {}), '()\n', (129, 131), False, 'import time\n'), ((805, 816), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (813, 816), False, 'import sys\n'), ((2885, 2906), 'cv2.contourArea', 'cv2.contourArea', (['x[0]'], {}), '(x[0])\n', (2900, 2906), False, 'import cv2\n'), ((3581, 3599), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (3596, 3599), False, 'import cv2\n'), ((3602, 3621), 'cv2.contourArea', 'cv2.contourArea', (['og'], {}), '(og)\n', (3617, 3621), False, 'import cv2\n'), ((8460, 8504), 'numpy.zeros', 'np.zeros', (['img_detected.shape'], {'dtype': 'np.uint8'}), '(img_detected.shape, dtype=np.uint8)\n', (8468, 8504), True, 'import numpy as np\n'), ((1795, 1830), 'cv2.Laplacian', 'cv2.Laplacian', (['img_blur', 'cv2.CV_64F'], {}), '(img_blur, cv2.CV_64F)\n', (1808, 1830), False, 'import cv2\n'), ((5702, 5723), 'cv2.contourArea', 'cv2.contourArea', (['x[0]'], {}), '(x[0])\n', (5717, 5723), False, 'import cv2\n'), ((5726, 5748), 'cv2.boundingRect', 'cv2.boundingRect', (['x[0]'], {}), '(x[0])\n', (5742, 5748), False, 'import cv2\n'), ((7892, 7940), 'cv2.matchTemplate', 'cv2.matchTemplate', (['t_gray', 'c_gray', 'cv2.TM_SQDIFF'], {}), '(t_gray, c_gray, cv2.TM_SQDIFF)\n', (7909, 7940), False, 'import cv2\n')] |
# Part of code was adpated from https://github.com/r9y9/deepvoice3_pytorch/tree/master/compute_timestamp_ratio.py
# Copyright (c) 2017: <NAME>.
import argparse
import sys
import numpy as np
from hparams import hparams, hparams_debug_string
from deepvoice3_paddle.data import TextDataSource, MelSpecDataSource
from nnmnkwii.datasets import FileSourceDataset
from tqdm import trange
from deepvoice3_paddle import frontend
def build_parser():
parser = argparse.ArgumentParser(
description="Compute output/input timestamp ratio.")
parser.add_argument(
"--hparams", type=str, default="", help="Hyper parameters.")
parser.add_argument(
"--preset",
type=str,
required=True,
help="Path of preset parameters (json).")
parser.add_argument("data_root", type=str, help="path of the dataset.")
return parser
if __name__ == "__main__":
parser = build_parser()
args, _ = parser.parse_known_args()
data_root = args.data_root
preset = args.preset
# Load preset if specified
if preset is not None:
with open(preset) as f:
hparams.parse_json(f.read())
# Override hyper parameters
hparams.parse(args.hparams)
assert hparams.name == "deepvoice3"
# Code below
X = FileSourceDataset(TextDataSource(data_root))
Mel = FileSourceDataset(MelSpecDataSource(data_root))
in_sizes = []
out_sizes = []
for i in trange(len(X)):
x, m = X[i], Mel[i]
if X.file_data_source.multi_speaker:
x = x[0]
in_sizes.append(x.shape[0])
out_sizes.append(m.shape[0])
in_sizes = np.array(in_sizes)
out_sizes = np.array(out_sizes)
input_timestamps = np.sum(in_sizes)
output_timestamps = np.sum(
out_sizes) / hparams.outputs_per_step / hparams.downsample_step
print(input_timestamps, output_timestamps,
output_timestamps / input_timestamps)
sys.exit(0)
| [
"argparse.ArgumentParser",
"deepvoice3_paddle.data.TextDataSource",
"deepvoice3_paddle.data.MelSpecDataSource",
"hparams.hparams.parse",
"numpy.array",
"numpy.sum",
"sys.exit"
] | [((456, 532), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute output/input timestamp ratio."""'}), "(description='Compute output/input timestamp ratio.')\n", (479, 532), False, 'import argparse\n'), ((1188, 1215), 'hparams.hparams.parse', 'hparams.parse', (['args.hparams'], {}), '(args.hparams)\n', (1201, 1215), False, 'from hparams import hparams, hparams_debug_string\n'), ((1635, 1653), 'numpy.array', 'np.array', (['in_sizes'], {}), '(in_sizes)\n', (1643, 1653), True, 'import numpy as np\n'), ((1670, 1689), 'numpy.array', 'np.array', (['out_sizes'], {}), '(out_sizes)\n', (1678, 1689), True, 'import numpy as np\n'), ((1714, 1730), 'numpy.sum', 'np.sum', (['in_sizes'], {}), '(in_sizes)\n', (1720, 1730), True, 'import numpy as np\n'), ((1935, 1946), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1943, 1946), False, 'import sys\n'), ((1300, 1325), 'deepvoice3_paddle.data.TextDataSource', 'TextDataSource', (['data_root'], {}), '(data_root)\n', (1314, 1325), False, 'from deepvoice3_paddle.data import TextDataSource, MelSpecDataSource\n'), ((1355, 1383), 'deepvoice3_paddle.data.MelSpecDataSource', 'MelSpecDataSource', (['data_root'], {}), '(data_root)\n', (1372, 1383), False, 'from deepvoice3_paddle.data import TextDataSource, MelSpecDataSource\n'), ((1755, 1772), 'numpy.sum', 'np.sum', (['out_sizes'], {}), '(out_sizes)\n', (1761, 1772), True, 'import numpy as np\n')] |
from google.cloud import storage
import os
client = storage.Client()
bucket = client.get_bucket('noah-water.appspot.com')
blobs = bucket.list_blobs(prefix='trends3/Part6')
os.system("gsutil acl ch -u <EMAIL>:W gs://noah-water.appspot.com")
for blob in blobs:
print(blob.name)
file_read_perm = "gsutil acl ch -u <EMAIL>:R gs://noah-water.appspot.com/" + blob.name
os.system(file_read_perm)
# print(file_read_perm)
file_import = "gcloud sql import csv precipitation gs://noah-water.appspot.com/" + blob.name + " --database=prec_anomaly --table=precipitation_trend -q"
os.system(file_import)
# print(file_import) | [
"google.cloud.storage.Client",
"os.system"
] | [((53, 69), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (67, 69), False, 'from google.cloud import storage\n'), ((176, 243), 'os.system', 'os.system', (['"""gsutil acl ch -u <EMAIL>:W gs://noah-water.appspot.com"""'], {}), "('gsutil acl ch -u <EMAIL>:W gs://noah-water.appspot.com')\n", (185, 243), False, 'import os\n'), ((371, 396), 'os.system', 'os.system', (['file_read_perm'], {}), '(file_read_perm)\n', (380, 396), False, 'import os\n'), ((577, 599), 'os.system', 'os.system', (['file_import'], {}), '(file_import)\n', (586, 599), False, 'import os\n')] |
from math import sin, cos, sqrt, atan2, radians
from random import *
from datetime import *
from dateutil.parser import parse
from enum import Enum
import csv
class DataAccess:
DOWNVOTE = 0
UPVOTE = 1
Type = Enum('Type', 'sport party animal hackathon culture food')
def __init__(self):
self.votes = []
self.events = []
self.addFakeEvents()
self.addFakeVotes()
def addVote(self, latitude, longitude, vote, place):
record = {
'timestamp' : datetime.now(),
'latitude' : latitude,
'longitude' : longitude,
'vote' : vote,
'place': place
}
self.votes.append(record)
def deleteAllVotes(self):
self.votes = []
def deleteAllEvents(self):
self.events = []
def getHotspots(self):
hotspots = {}
for row in self.votes:
if row['vote'] != self.UPVOTE:
continue
latitude = '{0:.3f}'.format(row['latitude'])
longitude = '{0:.3f}'.format(row['longitude'])
location = latitude + ',' + longitude
place = row['place']
if place in hotspots:
hot = int(hotspots[place].split(',')[2]) + 1
hotspots[place] = latitude + ',' + longitude + ',' + str(hot)
else:
hotspots[place] = latitude + ',' + longitude + ',' + '1'
records = []
for key, val in hotspots.items():
location = val.split(',')
hot = int(location[2])
if 250 <= hot:
hot_color = '#700000'
elif 150 <= hot <= 249:
hot_color = '#f20000'
elif 50 <= hot <= 149:
hot_color = '#ff6600'
else:
hot_color = '#f7a61b'
records.append({
'latitude' : float(location[0]),
'longitude' : float(location[1]),
'hotness' : hot,
'hotness_color': hot_color,
'place': key
})
return records
def addEvent(self, id, name, description, event_type, latitude, longitude, start_time, end_time):
record = {
'id': id,
'name' : name,
'description' : description,
'type': event_type.name,
'latitude' : latitude,
'longitude' : longitude,
'start_time' : start_time,
'end_time' : end_time
}
self.events.append(record)
def getEvents(self):
return self.events
def getSurpriseEvent(self, longitude, latitude, range):
candidates = [x for x in self.events
if (range * 0.9) <= self.calculateDistance(
longitude,
latitude,
x['longitude'],
x['latitude'])
<= range * 1.1]
if not candidates:
event_distances = []
for event in self.events:
distance = self.calculateDistance(longitude, latitude, event['longitude'], event['latitude'])
if distance > range:
continue
event_distances.append({'distance': distance, 'event':event})
return sorted(event_distances, key = lambda x: x['distance'], reverse=True)[0]['event']
return choice(candidates)
# src: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
def calculateDistance(self, lon1, lat1, lon2, lat2):
# approximate radius of earth in km
R = 6373.0
lat1r = radians(lat1)
lon1r = radians(lon1)
lat2r = radians(lat2)
lon2r = radians(lon2)
dlon = lon2r - lon1r
dlat = lat2r - lat1r
a = sin(dlat / 2)**2 + cos(lat1r) * cos(lat2r) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
print(distance)
return distance
def addFakeVotes(self):
locations = []
with open('./data/locations.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
locations.append((
float(row[0]),
float(row[1]),
int(row[2]),
row[3]))
for loc in locations:
for i in range(loc[2]):
self.addVote(
loc[0] + random() / 1000,
loc[1] + random() / 1000,
self.UPVOTE,
loc[3])
def addFakeEvents(self):
events = []
with open('./data/events.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
events.append(row)
for i in range(len(events)):
self.addEvent(
i+1,
events[i][0],
events[i][1],
self.Type[events[i][2]],
float(events[i][3]),
float(events[i][4]),
parse(events[i][5]),
parse(events[i][6])
) | [
"dateutil.parser.parse",
"math.sqrt",
"math.radians",
"math.cos",
"enum.Enum",
"math.sin",
"csv.reader"
] | [((224, 281), 'enum.Enum', 'Enum', (['"""Type"""', '"""sport party animal hackathon culture food"""'], {}), "('Type', 'sport party animal hackathon culture food')\n", (228, 281), False, 'from enum import Enum\n'), ((3675, 3688), 'math.radians', 'radians', (['lat1'], {}), '(lat1)\n', (3682, 3688), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3705, 3718), 'math.radians', 'radians', (['lon1'], {}), '(lon1)\n', (3712, 3718), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3735, 3748), 'math.radians', 'radians', (['lat2'], {}), '(lat2)\n', (3742, 3748), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3765, 3778), 'math.radians', 'radians', (['lon2'], {}), '(lon2)\n', (3772, 3778), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((4166, 4200), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4176, 4200), False, 'import csv\n'), ((4779, 4813), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4789, 4813), False, 'import csv\n'), ((3851, 3864), 'math.sin', 'sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (3854, 3864), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3935, 3942), 'math.sqrt', 'sqrt', (['a'], {}), '(a)\n', (3939, 3942), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3944, 3955), 'math.sqrt', 'sqrt', (['(1 - a)'], {}), '(1 - a)\n', (3948, 3955), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((5157, 5176), 'dateutil.parser.parse', 'parse', (['events[i][5]'], {}), '(events[i][5])\n', (5162, 5176), False, 'from dateutil.parser import parse\n'), ((5194, 5213), 'dateutil.parser.parse', 'parse', (['events[i][6]'], {}), '(events[i][6])\n', (5199, 5213), False, 'from dateutil.parser import parse\n'), ((3870, 3880), 'math.cos', 'cos', (['lat1r'], {}), '(lat1r)\n', (3873, 3880), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3883, 3893), 'math.cos', 'cos', (['lat2r'], {}), '(lat2r)\n', (3886, 3893), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((3896, 3909), 'math.sin', 'sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (3899, 3909), False, 'from math import sin, cos, sqrt, atan2, radians\n')] |
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Pylint Rule Overrides
# Modules
import json
from json import JSONDecodeError
from github.GithubException import GithubException
from github.GithubException import BadAttributeException
from lib.constants import constants
# Globals
# Public Functions ------------------------------------------------------------>
def log_exception(logger, err): # pylint: disable=unused-variable
"""
Log a general exception
"""
separator = "\n"
exception_name = type(err).__name__
exception_message = str(err)
string_buffer = (
"Exception:",
"Name: {0}.".format(exception_name),
"Message: {0}.".format(exception_message)
)
content = separator.join(string_buffer)
logger.exception(content)
def log_json_error(logger, err): # pylint: disable=unused-variable
"""
Log a JSON decode exception
"""
# See: https://docs.python.org/3/library/json.html
exception_name = type(err).__name__
if not isinstance(err, JSONDecodeError):
message = "Exception is not an instance of JSONDecodeError: {0}".format(
exception_name)
logger.error(message)
log_exception(logger, err)
return
separator = "\n"
exception_message = str(err)
string_buffer = (
"Exception:",
"Name: {0}.".format(exception_name),
"Message: {0}.".format(err.msg),
"Character Index: {0}.".format(err.pos),
"Line Number: {0}.".format(err.lineno),
"Column Number: {0}.".format(err.colno),
"Error: {0}.".format(exception_message)
)
content = separator.join(string_buffer)
logger.exception(content)
logger.error("JSON Document:\n%s", err.doc)
def log_github_exception(logger, err): # pylint: disable=unused-variable
"""
Log a GitHub exception
"""
exception_name = type(err).__name__
if not isinstance(err, GithubException):
message = "Exception is not an instance of GithubException: {0}".format(
exception_name)
logger.error(message)
log_exception(logger, err)
return
separator = "\n"
exception_message = str(err)
formatted_body = json.dumps(err.data, indent=constants.JSON_FORMAT_INDENT)
string_buffer = (
"Exception:",
"Name: {0}.".format(exception_name),
"Message: {0}.".format(exception_message),
"Status Code: {0}.".format(err.status),
"Data: {0}.".format(formatted_body),
)
content = separator.join(string_buffer)
logger.exception(content)
def log_bad_attribute_exception(logger, err): # pylint: disable=unused-variable
"""
Log a GitHub bad attribute exception
"""
exception_name = type(err).__name__
if not isinstance(err, BadAttributeException):
message = "Exception is not an instance of BadAttributeException: {0}".format(
exception_name)
logger.error(message)
log_exception(logger, err)
return
separator = "\n"
exception_message = str(err)
string_buffer = (
"Exception:",
"Name: {0}.".format(exception_name),
"Message: {0}.".format(exception_message),
"Actual Value: {0}.".format(err.actual_value),
"Expected Type: {0}.".format(err.expected_type),
"Transformation Exception: {0}.".format(err.transformation_exception)
)
content = separator.join(string_buffer)
logger.exception(content)
| [
"json.dumps"
] | [((2768, 2825), 'json.dumps', 'json.dumps', (['err.data'], {'indent': 'constants.JSON_FORMAT_INDENT'}), '(err.data, indent=constants.JSON_FORMAT_INDENT)\n', (2778, 2825), False, 'import json\n')] |