code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from typing import Sequence, Union
import numpy as np
from scipy.ndimage.interpolation import rotate as np_rotate
from PIL.Image import Image
from torch import Tensor, tensor
from torchvision.transforms.functional import rotate
class ImageRotation(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img: Union[Image, Tensor, np.ndarray]):
if isinstance(img, np.ndarray):
img = np_rotate(img, angle=self.degree, reshape=False)
elif isinstance(img, Image):
img = img.rotate(self.degree)
elif isinstance(img, Tensor):
img = rotate(img, angle=self.degree)
else:
raise ValueError(f'Accepted types are: '
f'[ndarray, PIL Image, Tensor] {type(img)}')
return img
class PixelsPermutation(object):
def __init__(self, index_permutation: Sequence[int]):
self.permutation = index_permutation
def __call__(self, img: Union[Image, Tensor, np.ndarray]):
if isinstance(img, np.ndarray):
img = img.reshape(-1)[self.permutation].reshape(*img.shape)
elif isinstance(img, Image):
img = img.getdata()
img = img.reshape(-1)[self.permutation].reshape(*img.shape)
img = Image.fromarray(img)
elif isinstance(img, Tensor):
img = img.numpy()
img = img.reshape(-1)[self.permutation].reshape(*img.shape)
img = tensor(img)
else:
raise ValueError(f'Accepted types are: '
f'[ndarray, PIL Image, Tensor] {type(img)}')
return img
| [
"torch.tensor",
"torchvision.transforms.functional.rotate",
"PIL.Image.Image.fromarray",
"scipy.ndimage.interpolation.rotate"
] | [((443, 491), 'scipy.ndimage.interpolation.rotate', 'np_rotate', (['img'], {'angle': 'self.degree', 'reshape': '(False)'}), '(img, angle=self.degree, reshape=False)\n', (452, 491), True, 'from scipy.ndimage.interpolation import rotate as np_rotate\n'), ((1291, 1311), 'PIL.Image.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1306, 1311), False, 'from PIL.Image import Image\n'), ((627, 657), 'torchvision.transforms.functional.rotate', 'rotate', (['img'], {'angle': 'self.degree'}), '(img, angle=self.degree)\n', (633, 657), False, 'from torchvision.transforms.functional import rotate\n'), ((1470, 1481), 'torch.tensor', 'tensor', (['img'], {}), '(img)\n', (1476, 1481), False, 'from torch import Tensor, tensor\n')] |
# Course: CS261 - Data Structures
# Author: <NAME>
# Assignment: 06
# Description: Directed graph implementation.
from collections import deque
import heapq
class DirectedGraph:
"""
Class to implement directed weighted graph
- duplicate edges not allowed
- loops not allowed
- only positive edge weights
- vertex names are integers
"""
def __init__(self, start_edges=None):
"""
Store graph info as adjacency matrix
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
self.v_count = 0
self.adj_matrix = []
# populate graph with initial vertices and edges (if provided)
# before using, implement add_vertex() and add_edge() methods
if start_edges is not None:
v_count = 0
for u, v, _ in start_edges:
v_count = max(v_count, u, v)
for _ in range(v_count + 1):
self.add_vertex()
for u, v, weight in start_edges:
self.add_edge(u, v, weight)
def __str__(self):
"""
Return content of the graph in human-readable form
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
if self.v_count == 0:
return 'EMPTY GRAPH\n'
out = ' |'
out += ' '.join(['{:2}'.format(i) for i in range(self.v_count)]) + '\n'
out += '-' * (self.v_count * 3 + 3) + '\n'
for i in range(self.v_count):
row = self.adj_matrix[i]
out += '{:2} |'.format(i)
out += ' '.join(['{:2}'.format(w) for w in row]) + '\n'
out = f"GRAPH ({self.v_count} vertices):\n{out}"
return out
def add_vertex(self) -> int:
"""
Adds new vertex to the graph.
Returns new number of vertices in graph.
"""
# Extend matrix by one column.
for row in self.adj_matrix:
row.append(0)
# Extend matrix by one row.
self.adj_matrix.append([0] * (self.v_count + 1))
# Update vertex count.
self.v_count += 1
return self.v_count
def add_edge(self, src: int, dst: int, weight=1) -> None:
"""
Adds a new edge to the graph, connecting src vertex to dst vertex.
If src and dst point to the same vertex, or if weight is not positive, does nothing and returns.
If edge already exists, updates weight of edge.
"""
# Only update weight if src and dst exist and don't point to same vertex and weight is positive.
if self._is_valid_edge(src, dst) and weight >= 1:
self.adj_matrix[src][dst] = weight
def remove_edge(self, src: int, dst: int) -> None:
"""
Removes an edge between src and dst vertices.
If either vertex does not exist in the graph, or if there is no edge between them, does nothing and returns.
"""
# Only remove edge if vertices exist.
if self._is_valid_edge(src, dst):
self.adj_matrix[src][dst] = 0
def get_vertices(self) -> []:
"""Returns a list of vertices of the graph."""
return [_ for _ in range(self.v_count)]
def get_edges(self) -> []:
"""
Returns a list of 3-tuples containing the source vertex, destination vertex, and edge weight for all edges
in graph.
"""
edges: list = list()
for i in range(self.v_count):
for j in range(self.v_count):
# Edge exists between vertex i and j.
if self.adj_matrix[i][j] > 0:
edges.append((i, j, self.adj_matrix[i][j]))
return edges
def is_valid_path(self, path: []) -> bool:
"""
Return True if the provided path is valid.
An empty path or a path with a single vertex is considered valid.
"""
# Check if path is empty or contains only a single vertex.
if len(path) == 0:
return True
elif len(path) == 1:
if 0 <= path[0] < self.v_count:
return True
else:
return False
# Iterate through vertices in path, checking if they are adjacent to each other so that they form a path.
step: int = 0
while step < len(path) - 1:
src, dst = path[step], path[step + 1]
if not self.are_adjacent(src, dst):
return False
step += 1
return True
def dfs(self, v_start: int, v_end: int = None) -> []:
"""
Return list of vertices visited during DFS search from v_start vertex up to optional v_end vertex.
If v_start is not in the graph, returns empty list.
If v_end is not in the graph, will treat it as having no v_end parameter.
Vertices are picked in ascending order.
"""
visited: list = list()
# Check if v_start is in graph.
if not 0 <= v_start < self.v_count:
return visited
# Check if v_end is in graph.
if isinstance(v_end, int) and not 0 <= v_end < self.v_count:
v_end = None
# Traverse graph until we either reach v_end or traverse every vertex.
return self._dfs(v_start, v_end)
def bfs(self, v_start: int, v_end: int = None) -> []:
"""
Return list of vertices visited during BFS search from v_start vertex up to optional v_end vertex.
If v_start is not in the graph, returns empty list.
If v_end is not in the graph, will treat it as having no v_end parameter.
Vertices are picked in ascending order.
"""
visited: list = list()
# Check if v_start is in graph.
if not 0 <= v_start < self.v_count:
return visited
# Check if v_end is in graph.
if isinstance(v_end, int) and not 0 <= v_end < self.v_count:
v_end = None
# Traverse graph until we either reach v_end or traverse every vertex.
vertices: deque = deque()
vertices.appendleft(v_start)
while len(vertices) > 0:
v: int = vertices.pop()
if v not in visited:
# Add vertex to visited vertices.
visited.append(v)
# Stop if vertex is equal to v_end.
if v == v_end:
break
# Add all neighbors of vertex in descending order so that they are popped in ascending order.
for neighbor in self.neighbors(v):
vertices.appendleft(neighbor)
return visited
def has_cycle(self):
"""Return True if graph contains a cycle."""
# If any of the strongly connected components (SCC) of the graph contain more than one vertex, then that SCC
# contains a cycle and so does the graph.
for component in self.connected_components():
if len(component) > 1:
return True
return False
def dijkstra(self, src: int) -> []:
"""
Returns a list of distances of src vertex to every other vertex.
If a vertex is not reachable, then its distance is infinity.
"""
distances: list = list()
if self.is_empty() or not 0 <= src < self.v_count:
return distances
# Create priority queue containing first vertex with distance 0.
vertices: list = list()
heapq.heappush(vertices, (0, src))
visited: dict = dict()
# Iterate through priority queue, updating min distance for each vertex.
while vertices:
dist_v, v = heapq.heappop(vertices)
if v not in visited:
visited[v] = dist_v
for neighbor in self.neighbors(v):
d_neighbor: int = self.adj_matrix[v][neighbor]
heapq.heappush(vertices, (dist_v + d_neighbor, neighbor))
# Update distances with min distance for each vertex, or inf if they are not reachable.
for v in self.get_vertices():
dist: int = visited.get(v, float("inf"))
distances.append(dist)
return distances
def are_adjacent(self, src: int, dst: int) -> bool:
"""Returns True if src vertex has an outgoing edge that connects to dst vertex."""
# Check if vertices are valid.
if not self._is_valid_edge(src, dst):
return False
return self.adj_matrix[src][dst] > 0
def connected_components(self) -> []:
"""
Return a list of lists containing all strongly connected components (SCC) of the graph.
Uses Kosaraju's algorithm to detect all SCCs.
"""
components: list = list()
if self.is_empty():
return components
# Iterate through all vertices via DFS.
# The top_stack maintains a topological sorting of all visited vertices.
top_stack: deque = deque()
vertices: deque = deque()
for v in self.get_vertices():
vertices.appendleft(v)
_: list = self._dfs_complete(vertices, top_stack=top_stack)
# Reverse graph to perform second round of DFS.
d_reverse: DirectedGraph = self.reversed()
self.adj_matrix, d_reverse.adj_matrix = d_reverse.adj_matrix, self.adj_matrix
# Iterate through all vertices in reverse order via DFS.
components = self._dfs_complete(top_stack)
# Reverse graph again to return to original form.
self.adj_matrix = d_reverse.adj_matrix
return components
def reversed(self) -> "DirectedGraph":
"""Returns a new DirectedGraph with outgoing edges swapped with incoming and vice versa."""
# Initialize new empty digraph with similar number of vertices.
d_graph: DirectedGraph = DirectedGraph()
for _ in range(self.v_count):
d_graph.add_vertex()
# Reflect edges over matrix diagonal to reverse their orientation then add them to new digraph.
for i in range(self.v_count):
for j in range(self.v_count):
d_graph.adj_matrix[i][j] = self.adj_matrix[j][i]
return d_graph
def neighbors(self, v: int) -> []:
"""Return all vertices that vertex v has an outgoing edge to."""
neighbors: list = list()
for i in range(self.v_count):
if self.adj_matrix[v][i] > 0:
neighbors.append(i)
return neighbors
def is_empty(self) -> bool:
"""Return True if the graph contains no vertices."""
return self.v_count == 0
def _is_valid_edge(self, src: int, dst: int) -> bool:
"""
Returns True if an edge between a src and dst vertex is valid.
An edge is invalid if the src and dst point to the same vertex, or if either vertex is not on the graph.
"""
return src != dst and 0 <= src < self.v_count and 0 <= dst < self.v_count
def _dfs_complete(self, vertices: deque, top_stack: deque = None) -> []:
"""
Returns a list of weakly connected components using DFS traversal.
An optional top_stack parameter tracks the topological sorting of the graph and in turn ensures that
the returned components are strongly connected.
"""
components: list = list()
unvisited: list = [True] * self.v_count
while vertices:
v: int = vertices.popleft()
# Grab the next vertex that hasn't been visited.
while not unvisited[v] and vertices:
v = vertices.popleft()
# All vertices have been visited, so we can stop.
if v is None:
break
component: list = self._dfs(v_start=v, unvisited=unvisited, top_stack=top_stack)
if len(component) > 0:
components.append(component)
return components
def _dfs(self, v_start: int, v_end: int = None, unvisited: list = None, top_stack: deque = None) -> []:
"""
Returns a list containing all vertices visited starting from the vertex v_start up to the optional vertex
v_end via DFS.
An optional list of unvisited vertices ensures vertices are visited exactly once during multiple calls to
this method.
An optional top_stack parameter maintains a topological sorting of all visited vertices.
"""
# The backstack holds any visited vertices in the order that they were visited.
backstack: deque = deque()
vertices: deque = deque()
visited: list = list()
if unvisited is None:
unvisited = [True] * self.v_count
vertices.appendleft(v_start)
while vertices:
v: int = vertices.popleft()
if unvisited[v]:
unvisited[v] = False
visited.append(v)
backstack.appendleft(v)
# Unroll backstack so that its top points to a vertex with at least one unvisited neighbor. Update
# top_stack in the process.
if top_stack is not None:
self._backtrack(unvisited, backstack, top_stack)
if v == v_end:
break
# Neighbors are pushed in descending order so that they are visited in ascending order.
for neighbor in reversed(self.neighbors(v)):
if unvisited[neighbor]:
vertices.appendleft(neighbor)
return visited
def _backtrack(self, unvisited: list, backstack: deque, top_stack: deque) -> None:
"""
While the vertex at the top of the backstack has no unvisited neighbors, pops the vertex and pushes it to the
top_stack.
This effectively rolls back the backstack so that either the stack is emptied or the top points to a vertex
that has unvisited neighbors.
The top_stack will contain the topological sorting of the graph in return.
"""
while backstack:
v: int = backstack[0]
v_unvisited: list = list()
for neighbor in self.neighbors(v):
if unvisited[neighbor]:
v_unvisited.append(neighbor)
if not v_unvisited:
top_stack.appendleft(backstack.popleft())
else:
break
| [
"heapq.heappop",
"heapq.heappush",
"collections.deque"
] | [((5981, 5988), 'collections.deque', 'deque', ([], {}), '()\n', (5986, 5988), False, 'from collections import deque\n'), ((7389, 7423), 'heapq.heappush', 'heapq.heappush', (['vertices', '(0, src)'], {}), '(vertices, (0, src))\n', (7403, 7423), False, 'import heapq\n'), ((8897, 8904), 'collections.deque', 'deque', ([], {}), '()\n', (8902, 8904), False, 'from collections import deque\n'), ((8931, 8938), 'collections.deque', 'deque', ([], {}), '()\n', (8936, 8938), False, 'from collections import deque\n'), ((12478, 12485), 'collections.deque', 'deque', ([], {}), '()\n', (12483, 12485), False, 'from collections import deque\n'), ((12513, 12520), 'collections.deque', 'deque', ([], {}), '()\n', (12518, 12520), False, 'from collections import deque\n'), ((7585, 7608), 'heapq.heappop', 'heapq.heappop', (['vertices'], {}), '(vertices)\n', (7598, 7608), False, 'import heapq\n'), ((7816, 7873), 'heapq.heappush', 'heapq.heappush', (['vertices', '(dist_v + d_neighbor, neighbor)'], {}), '(vertices, (dist_v + d_neighbor, neighbor))\n', (7830, 7873), False, 'import heapq\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2020/5/14 20:41
# @Author: Mecthew
import time
import numpy as np
import pandas as pd
import scipy
from sklearn.svm import LinearSVC
from sklearn.linear_model import logistic
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
import scipy.sparse as sp
from utils.logger import get_logger
logger = get_logger("INFO")
class SVM:
def __init__(self, **kwargs):
self.name = "SVM"
self._model = CalibratedClassifierCV(LinearSVC(C=1.0, max_iter=500, class_weight=None, random_state=666))
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
class LR:
def __init__(self, **kwargs):
self.name = "LR"
self._model = logistic.LogisticRegression(C=1.0, solver="liblinear", multi_class="auto",
class_weight=None, max_iter=100, random_state=666)
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
def prepredict(graph_df, train_indices, use_valid, use_ohe=False):
t1 = time.time()
fea_table = graph_df['fea_table'].set_index(keys="node_index")
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']]
x_train, y_train = fea_table.loc[train_indices].to_numpy(), train_label.to_numpy()
x_test = fea_table.loc[test_indices].to_numpy()
lr = LR()
lr.fit(x_train, y_train)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(y_train.reshape(-1, 1))
x_train_feat, x_test_feat = ohe.transform(np.argmax(lr.predict(x_train), axis=1).reshape(-1, 1)).toarray(), \
ohe.transform(np.argmax(lr.predict(x_test), axis=1).reshape(-1, 1)).toarray()
else:
x_train_feat, x_test_feat = lr.predict(x_train), \
lr.predict(x_test)
pre_feat = np.concatenate([x_train_feat, x_test_feat], axis=0)
total_indices = np.concatenate([train_indices, test_indices], axis=0)
train_predict = np.argmax(x_train_feat, axis=1)
train_acc = accuracy_score(y_true=y_train, y_pred=train_predict)
t2 = time.time()
logger.info("Time cost for training {}: {}s, train acc {}".format(lr.name, t2-t1, train_acc))
return pd.DataFrame(data=pre_feat, index=total_indices)
def lpa_predict(graph_df, n_class, train_indices, use_valid, max_iter=100, tol=1e-3, use_ohe=False):
t1 = time.time()
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']].to_numpy()
print("Train label shape {}".format(train_label.shape))
train_label = train_label.reshape(-1)
edges = graph_df['edge_file'][['src_idx', 'dst_idx', 'edge_weight']].to_numpy()
edge_index = edges[:, :2].astype(np.int).transpose() # transpose to (2, num_edges)
edge_weight = edges[:, 2].astype(np.float)
num_nodes = len(train_indices) + len(test_indices)
t2 = time.time()
total_indices = np.concatenate([train_indices, test_indices], axis=0)
adj = sp.coo_matrix((edge_weight, edge_index), shape=(num_nodes, num_nodes)).tocsr()
adj = adj[total_indices] # reorder
adj = adj[:, total_indices]
t3 = time.time()
logger.debug("Time cost for transform adj {}s".format(t3 - t2))
row_sum = np.array(adj.sum(axis=1), dtype=np.float)
d_inv = np.power(row_sum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
normal_adj = sp.diags(d_inv).dot(adj).tocsr().transpose()
Pll = normal_adj[:len(train_indices), :len(train_indices)].copy()
Plu = normal_adj[:len(train_indices), len(train_indices):].copy()
Pul = normal_adj[len(train_indices):, :len(train_indices)].copy()
Puu = normal_adj[len(train_indices):, len(train_indices):].copy()
label_mat = np.eye(n_class)[train_label]
label_mat_prob = label_mat.copy()
print("Pul shape {}, label_mat shape {}".format(Pul.shape, label_mat_prob.shape))
Pul_dot_lable_mat = Pul.dot(label_mat)
unlabel_mat = np.zeros(shape=(len(test_indices), n_class))
iter, changed = 0, np.inf
t4 = time.time()
logger.debug("Time cost for prepare matrix {}s".format(t4-t3))
while iter < max_iter and changed > tol:
if iter % 10 == 0:
logger.debug("---> Iteration %d/%d, changed: %f" % (iter, max_iter, changed))
iter += 1
pre_unlabel_mat = unlabel_mat
unlabel_mat = Puu.dot(unlabel_mat) + Pul_dot_lable_mat
label_mat_prob = Pll.dot(label_mat_prob) + Plu.dot(pre_unlabel_mat)
changed = np.abs(pre_unlabel_mat - unlabel_mat).sum()
logger.debug("Time cost for training lpa {}".format(time.time() - t4))
# preds = np.argmax(np.array(unlabel_mat), axis=1)
# unlabel_mat = np.eye(n_class)[preds]
train_acc = accuracy_score(y_true=train_label, y_pred=np.argmax(label_mat_prob, axis=1))
logger.info("LPA training acc {}".format(train_acc))
logger.info("Time cost for LPA {}s".format(time.time() - t1))
total_indices = np.concatenate([train_indices, test_indices], axis=0)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(train_label.reshape(-1, 1))
label_mat_ohe = ohe.transform(np.argmax(label_mat_prob, axis=1).reshape(-1, 1)).toarray()
unlabel_mat_ohe = ohe.transform(np.argmax(unlabel_mat, axis=1).reshape(-1, 1)).toarray()
lu_mat_ohe = np.concatenate([label_mat_ohe, unlabel_mat_ohe], axis=0)
return pd.DataFrame(data=lu_mat_ohe, index=total_indices), train_acc
else:
unlabel_mat_prob = unlabel_mat
lu_mat_prob = np.concatenate([label_mat_prob, unlabel_mat_prob], axis=0)
return pd.DataFrame(data=lu_mat_prob, index=total_indices), train_acc
def is_nonnegative_integer(x_feats):
is_nonnegative = (x_feats >= 0).all()
is_integer = True
for feat in x_feats:
feat_int_sum = np.array(feat, dtype=np.int).sum()
feat_sum = np.array(feat, dtype=np.float).sum()
is_integer = (feat_int_sum == feat_sum)
if is_integer is False:
break
return is_nonnegative and is_integer
| [
"sklearn.metrics.accuracy_score",
"numpy.abs",
"numpy.eye",
"scipy.sparse.diags",
"numpy.power",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.svm.LinearSVC",
"numpy.argmax",
"numpy.array",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"pandas.DataFrame",
"sklearn.linear_model.logistic.L... | [((452, 470), 'utils.logger.get_logger', 'get_logger', (['"""INFO"""'], {}), "('INFO')\n", (462, 470), False, 'from utils.logger import get_logger\n'), ((1327, 1338), 'time.time', 'time.time', ([], {}), '()\n', (1336, 1338), False, 'import time\n'), ((2410, 2461), 'numpy.concatenate', 'np.concatenate', (['[x_train_feat, x_test_feat]'], {'axis': '(0)'}), '([x_train_feat, x_test_feat], axis=0)\n', (2424, 2461), True, 'import numpy as np\n'), ((2482, 2535), 'numpy.concatenate', 'np.concatenate', (['[train_indices, test_indices]'], {'axis': '(0)'}), '([train_indices, test_indices], axis=0)\n', (2496, 2535), True, 'import numpy as np\n'), ((2557, 2588), 'numpy.argmax', 'np.argmax', (['x_train_feat'], {'axis': '(1)'}), '(x_train_feat, axis=1)\n', (2566, 2588), True, 'import numpy as np\n'), ((2605, 2657), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_train', 'y_pred': 'train_predict'}), '(y_true=y_train, y_pred=train_predict)\n', (2619, 2657), False, 'from sklearn.metrics import accuracy_score\n'), ((2667, 2678), 'time.time', 'time.time', ([], {}), '()\n', (2676, 2678), False, 'import time\n'), ((2789, 2837), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_feat', 'index': 'total_indices'}), '(data=pre_feat, index=total_indices)\n', (2801, 2837), True, 'import pandas as pd\n'), ((2950, 2961), 'time.time', 'time.time', ([], {}), '()\n', (2959, 2961), False, 'import time\n'), ((3713, 3724), 'time.time', 'time.time', ([], {}), '()\n', (3722, 3724), False, 'import time\n'), ((3745, 3798), 'numpy.concatenate', 'np.concatenate', (['[train_indices, test_indices]'], {'axis': '(0)'}), '([train_indices, test_indices], axis=0)\n', (3759, 3798), True, 'import numpy as np\n'), ((3975, 3986), 'time.time', 'time.time', ([], {}), '()\n', (3984, 3986), False, 'import time\n'), ((4845, 4856), 'time.time', 'time.time', ([], {}), '()\n', (4854, 4856), False, 'import time\n'), ((5753, 5806), 'numpy.concatenate', 'np.concatenate', (['[train_indices, test_indices]'], {'axis': '(0)'}), '([train_indices, test_indices], axis=0)\n', (5767, 5806), True, 'import numpy as np\n'), ((912, 1041), 'sklearn.linear_model.logistic.LogisticRegression', 'logistic.LogisticRegression', ([], {'C': '(1.0)', 'solver': '"""liblinear"""', 'multi_class': '"""auto"""', 'class_weight': 'None', 'max_iter': '(100)', 'random_state': '(666)'}), "(C=1.0, solver='liblinear', multi_class='auto',\n class_weight=None, max_iter=100, random_state=666)\n", (939, 1041), False, 'from sklearn.linear_model import logistic\n'), ((4165, 4180), 'numpy.isinf', 'np.isinf', (['d_inv'], {}), '(d_inv)\n', (4173, 4180), True, 'import numpy as np\n'), ((4546, 4561), 'numpy.eye', 'np.eye', (['n_class'], {}), '(n_class)\n', (4552, 4561), True, 'import numpy as np\n'), ((6124, 6180), 'numpy.concatenate', 'np.concatenate', (['[label_mat_ohe, unlabel_mat_ohe]'], {'axis': '(0)'}), '([label_mat_ohe, unlabel_mat_ohe], axis=0)\n', (6138, 6180), True, 'import numpy as np\n'), ((6329, 6387), 'numpy.concatenate', 'np.concatenate', (['[label_mat_prob, unlabel_mat_prob]'], {'axis': '(0)'}), '([label_mat_prob, unlabel_mat_prob], axis=0)\n', (6343, 6387), True, 'import numpy as np\n'), ((589, 656), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1.0)', 'max_iter': '(500)', 'class_weight': 'None', 'random_state': '(666)'}), '(C=1.0, max_iter=500, class_weight=None, random_state=666)\n', (598, 656), False, 'from sklearn.svm import LinearSVC\n'), ((3809, 3879), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(edge_weight, edge_index)'], {'shape': '(num_nodes, num_nodes)'}), '((edge_weight, edge_index), shape=(num_nodes, num_nodes))\n', (3822, 3879), True, 'import scipy.sparse as sp\n'), ((4123, 4144), 'numpy.power', 'np.power', (['row_sum', '(-1)'], {}), '(row_sum, -1)\n', (4131, 4144), True, 'import numpy as np\n'), ((5575, 5608), 'numpy.argmax', 'np.argmax', (['label_mat_prob'], {'axis': '(1)'}), '(label_mat_prob, axis=1)\n', (5584, 5608), True, 'import numpy as np\n'), ((6196, 6246), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'lu_mat_ohe', 'index': 'total_indices'}), '(data=lu_mat_ohe, index=total_indices)\n', (6208, 6246), True, 'import pandas as pd\n'), ((6403, 6454), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'lu_mat_prob', 'index': 'total_indices'}), '(data=lu_mat_prob, index=total_indices)\n', (6415, 6454), True, 'import pandas as pd\n'), ((1972, 2010), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (1985, 2010), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((5300, 5337), 'numpy.abs', 'np.abs', (['(pre_unlabel_mat - unlabel_mat)'], {}), '(pre_unlabel_mat - unlabel_mat)\n', (5306, 5337), True, 'import numpy as np\n'), ((5400, 5411), 'time.time', 'time.time', ([], {}), '()\n', (5409, 5411), False, 'import time\n'), ((5714, 5725), 'time.time', 'time.time', ([], {}), '()\n', (5723, 5725), False, 'import time\n'), ((5837, 5875), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5850, 5875), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((6617, 6645), 'numpy.array', 'np.array', (['feat'], {'dtype': 'np.int'}), '(feat, dtype=np.int)\n', (6625, 6645), True, 'import numpy as np\n'), ((6671, 6701), 'numpy.array', 'np.array', (['feat'], {'dtype': 'np.float'}), '(feat, dtype=np.float)\n', (6679, 6701), True, 'import numpy as np\n'), ((4204, 4219), 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), '(d_inv)\n', (4212, 4219), True, 'import scipy.sparse as sp\n'), ((5946, 5979), 'numpy.argmax', 'np.argmax', (['label_mat_prob'], {'axis': '(1)'}), '(label_mat_prob, axis=1)\n', (5955, 5979), True, 'import numpy as np\n'), ((6046, 6076), 'numpy.argmax', 'np.argmax', (['unlabel_mat'], {'axis': '(1)'}), '(unlabel_mat, axis=1)\n', (6055, 6076), True, 'import numpy as np\n')] |
import os
import scrapy
from scrapy.crawler import CrawlerProcess
import requests
from disaster_data.sources.noaa_coast.utils import get_geoinfo, get_fgdcinfo
class NoaaImageryCollections(scrapy.Spider):
name = 'noaa-coast-imagery-collections'
start_urls = [
'https://coast.noaa.gov/htdata/raster2/index.html#imagery',
]
@classmethod
def crawl(cls, outfile='output.json', ids=None, items=False):
cls.ids = ids
cls.items = items
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'FEED_FORMAT': 'json',
'FEED_URI': outfile
})
process.crawl(cls)
# Blocked while crawling
process.start()
def parse(self, response):
"""
Generate a STAC Collection for each NOAA imagery project, optionally filtering by ID.
"""
dem_table, imagery_table = response.xpath('//*[@class="sortable"]')
imagery_head = imagery_table.xpath('.//thead//tr/th//text()').getall()
collections = []
collection_items = []
ret = {}
for row in imagery_table.xpath('.//tbody//tr'):
values = row.xpath('.//td')
id = values[-1].xpath('.//text()').get()
if self.ids:
if id not in self.ids:
continue
feature = {
"stac_version": "0.7.0",
"properties": {},
"assets": {},
"extent": {}
}
# Converting HTML table into STAC Item
for head, value in zip(imagery_head, values):
links = value.xpath('.//a/@href').getall()
data = value.xpath('.//text()').getall()
if head == 'Dataset Name':
feature['assets'].update({
"metadata_xml": {
"href": links[0],
"type": "xml"
},
"metadata_html": {
"href": links[1],
"type": "html"
}
})
elif head == 'https':
feature['assets'].update({
"assets_http": {
"href": links[0],
"type": "html"
}
})
elif head == 'ftp':
feature['assets'].update({
"assets_ftp": {
"href": links[0],
"type": "ftp"
}
})
elif head == 'DAV':
feature['assets'].update({
"asset_viewer": {
"href": links[0],
"type": "html"
}
})
elif head == 'Tile Index':
feature['assets'].update({
"tile_index": {
"href": links[0],
"type": "shp"
}
})
elif head == 'ID #':
feature.update({'id': int(data[0])})
# Geometry handling
geoinfo = get_geoinfo('/vsizip//vsicurl/{}/0tileindex.shp'.format(feature['assets']['tile_index']['href']))
feature.update(geoinfo['geometry'])
feature['extent'].update({'spatial': geoinfo['bbox']})
# FGDC metadata
fgdcinfo = get_fgdcinfo(feature['assets']['metadata_xml']['href'])
feature['extent'].update({'temporal': [
fgdcinfo['start_date'],
fgdcinfo['end_date'],
]})
feature.update({
'title': fgdcinfo['title'],
'description': fgdcinfo['description'],
'processing': fgdcinfo['processing'],
})
collections.append(feature)
# Scrape items
if self.items:
items_url = os.path.join(feature['assets']['assets_http']['href'], 'urllist{}.txt'.format(feature['id']))
collection_items.append(self.parse_collection_items(items_url))
ret.update({'collections': collections})
if self.items:
ret.update({'items': collection_items})
return ret
def parse_collection_items(self, file_list_url):
r = requests.get(file_list_url)
collection_items = r.content.decode('utf-8').splitlines()
return ['/vsicurl/'+x for x in collection_items if x.endswith('.tif')] | [
"scrapy.crawler.CrawlerProcess",
"requests.get",
"disaster_data.sources.noaa_coast.utils.get_fgdcinfo"
] | [((498, 634), 'scrapy.crawler.CrawlerProcess', 'CrawlerProcess', (["{'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n 'FEED_FORMAT': 'json', 'FEED_URI': outfile}"], {}), "({'USER_AGENT':\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)', 'FEED_FORMAT':\n 'json', 'FEED_URI': outfile})\n", (512, 634), False, 'from scrapy.crawler import CrawlerProcess\n'), ((4582, 4609), 'requests.get', 'requests.get', (['file_list_url'], {}), '(file_list_url)\n', (4594, 4609), False, 'import requests\n'), ((3673, 3728), 'disaster_data.sources.noaa_coast.utils.get_fgdcinfo', 'get_fgdcinfo', (["feature['assets']['metadata_xml']['href']"], {}), "(feature['assets']['metadata_xml']['href'])\n", (3685, 3728), False, 'from disaster_data.sources.noaa_coast.utils import get_geoinfo, get_fgdcinfo\n')] |
import pytest
from rlo import factory
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
@pytest.mark.parametrize("loss", ["pinball=0.6", "huber"])
def test_torch_model_from_config(use_subtree_match_edges, loss):
# Check we can construct a Model
config = {
"num_embeddings": 3,
"hidden_dim": 2,
"num_gnn_blocks": 5,
"output_hidden_dim": 2,
"simulation_depth_train": 10,
"lr": 0.01,
"loss": loss,
"repetition": 1,
"decoder_readout": "sum",
"graph_state_keep_prob": 0.9,
"output_keep_prob": 0.2,
"aggregation_over_edge_types": "sum",
"use_subtree_match_edges": use_subtree_match_edges,
}
factory.torch_model_from_config(config)
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
def test_torch_data_converter_from_config(use_subtree_match_edges):
# Check we can construct a DataConverter
config = {
"simulation_depth_train": 11,
"use_subtree_match_edges": use_subtree_match_edges,
"cost_normalization": "none",
}
factory.data_converter_from_config(config)
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
@pytest.mark.parametrize("loss", ["pinball=0.3", "huber"])
def test_torch_regressor_from_config(use_subtree_match_edges, loss):
# Check we can construct a TorchModelWrapper
config = {
"num_embeddings": 3,
"hidden_dim": 2,
"num_gnn_blocks": 5,
"output_hidden_dim": 2,
"lr": 0.01,
"loss": loss,
"repetition": 1,
"use_subtree_match_edges": use_subtree_match_edges,
"cost_normalization": "none",
"tensorflow": False,
"simulation_depth_eval": 10,
"decoder_readout": "sum",
"graph_state_keep_prob": 0.99,
"output_keep_prob": 0.2,
"aggregation_over_edge_types": "sum",
"simulation_depth_train": 10,
}
factory.single_regressor_from_config(config)
| [
"pytest.mark.parametrize",
"rlo.factory.single_regressor_from_config",
"rlo.factory.torch_model_from_config",
"rlo.factory.data_converter_from_config"
] | [((42, 107), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_subtree_match_edges"""', '[True, False]'], {}), "('use_subtree_match_edges', [True, False])\n", (65, 107), False, 'import pytest\n'), ((109, 166), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loss"""', "['pinball=0.6', 'huber']"], {}), "('loss', ['pinball=0.6', 'huber'])\n", (132, 166), False, 'import pytest\n'), ((769, 834), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_subtree_match_edges"""', '[True, False]'], {}), "('use_subtree_match_edges', [True, False])\n", (792, 834), False, 'import pytest\n'), ((1155, 1220), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_subtree_match_edges"""', '[True, False]'], {}), "('use_subtree_match_edges', [True, False])\n", (1178, 1220), False, 'import pytest\n'), ((1222, 1279), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loss"""', "['pinball=0.3', 'huber']"], {}), "('loss', ['pinball=0.3', 'huber'])\n", (1245, 1279), False, 'import pytest\n'), ((726, 765), 'rlo.factory.torch_model_from_config', 'factory.torch_model_from_config', (['config'], {}), '(config)\n', (757, 765), False, 'from rlo import factory\n'), ((1109, 1151), 'rlo.factory.data_converter_from_config', 'factory.data_converter_from_config', (['config'], {}), '(config)\n', (1143, 1151), False, 'from rlo import factory\n'), ((1959, 2003), 'rlo.factory.single_regressor_from_config', 'factory.single_regressor_from_config', (['config'], {}), '(config)\n', (1995, 2003), False, 'from rlo import factory\n')] |
# coding=utf-8
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
base = 'Console'
executables = [
Executable('probe.py', copyDependentFiles=True)
]
includefiles = []
packages = ['pyprobe', 'psutil']
includes = []
setup(name='pyprobe',
version='1.0',
description='x',
options={
'build_exe': {
'include_files': includefiles,
'packages': packages,
'excludes': [],
'includes': ['requests']
},
'bdist_mac': {
'bundle_name': 'pyprobe'
}
},
executables=executables, requires=['requests', 'psutil'])
| [
"cx_Freeze.Executable",
"cx_Freeze.setup"
] | [((860, 1144), 'cx_Freeze.setup', 'setup', ([], {'name': '"""pyprobe"""', 'version': '"""1.0"""', 'description': '"""x"""', 'options': "{'build_exe': {'include_files': includefiles, 'packages': packages,\n 'excludes': [], 'includes': ['requests']}, 'bdist_mac': {'bundle_name':\n 'pyprobe'}}", 'executables': 'executables', 'requires': "['requests', 'psutil']"}), "(name='pyprobe', version='1.0', description='x', options={'build_exe':\n {'include_files': includefiles, 'packages': packages, 'excludes': [],\n 'includes': ['requests']}, 'bdist_mac': {'bundle_name': 'pyprobe'}},\n executables=executables, requires=['requests', 'psutil'])\n", (865, 1144), False, 'from cx_Freeze import setup, Executable\n'), ((743, 790), 'cx_Freeze.Executable', 'Executable', (['"""probe.py"""'], {'copyDependentFiles': '(True)'}), "('probe.py', copyDependentFiles=True)\n", (753, 790), False, 'from cx_Freeze import setup, Executable\n')] |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import redirect, request, session
from werkzeug.exceptions import NotFound
from indico.core.db import db
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.static.models.static import StaticSite, StaticSiteState
from indico.modules.events.static.tasks import build_static_site
from indico.modules.events.static.views import WPStaticSites
from indico.web.flask.util import url_for
class RHStaticSiteBase(RHManageEventBase):
pass
class RHStaticSiteList(RHStaticSiteBase):
def _process(self):
static_sites = self.event.static_sites.order_by(StaticSite.requested_dt.desc()).all()
return WPStaticSites.render_template('static_sites.html', self.event, static_sites=static_sites)
class RHStaticSiteBuild(RHStaticSiteBase):
ALLOW_LOCKED = True
def _process(self):
static_site = StaticSite(creator=session.user, event=self.event)
db.session.add(static_site)
db.session.commit()
build_static_site.delay(static_site)
return redirect(url_for('.list', self.event))
class RHStaticSiteDownload(RHStaticSiteBase):
normalize_url_spec = {
'locators': {
lambda self: self.static_site
}
}
def _process_args(self):
RHStaticSiteBase._process_args(self)
self.static_site = StaticSite.get_one(request.view_args['id'])
def _process(self):
if self.static_site.state != StaticSiteState.success:
raise NotFound
return self.static_site.send()
| [
"indico.core.db.db.session.commit",
"indico.modules.events.static.tasks.build_static_site.delay",
"indico.modules.events.static.views.WPStaticSites.render_template",
"indico.modules.events.static.models.static.StaticSite.get_one",
"indico.modules.events.static.models.static.StaticSite.requested_dt.desc",
... | [((929, 1023), 'indico.modules.events.static.views.WPStaticSites.render_template', 'WPStaticSites.render_template', (['"""static_sites.html"""', 'self.event'], {'static_sites': 'static_sites'}), "('static_sites.html', self.event, static_sites\n =static_sites)\n", (958, 1023), False, 'from indico.modules.events.static.views import WPStaticSites\n'), ((1135, 1185), 'indico.modules.events.static.models.static.StaticSite', 'StaticSite', ([], {'creator': 'session.user', 'event': 'self.event'}), '(creator=session.user, event=self.event)\n', (1145, 1185), False, 'from indico.modules.events.static.models.static import StaticSite, StaticSiteState\n'), ((1194, 1221), 'indico.core.db.db.session.add', 'db.session.add', (['static_site'], {}), '(static_site)\n', (1208, 1221), False, 'from indico.core.db import db\n'), ((1230, 1249), 'indico.core.db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1247, 1249), False, 'from indico.core.db import db\n'), ((1258, 1294), 'indico.modules.events.static.tasks.build_static_site.delay', 'build_static_site.delay', (['static_site'], {}), '(static_site)\n', (1281, 1294), False, 'from indico.modules.events.static.tasks import build_static_site\n'), ((1606, 1649), 'indico.modules.events.static.models.static.StaticSite.get_one', 'StaticSite.get_one', (["request.view_args['id']"], {}), "(request.view_args['id'])\n", (1624, 1649), False, 'from indico.modules.events.static.models.static import StaticSite, StaticSiteState\n'), ((1319, 1347), 'indico.web.flask.util.url_for', 'url_for', (['""".list"""', 'self.event'], {}), "('.list', self.event)\n", (1326, 1347), False, 'from indico.web.flask.util import url_for\n'), ((876, 906), 'indico.modules.events.static.models.static.StaticSite.requested_dt.desc', 'StaticSite.requested_dt.desc', ([], {}), '()\n', (904, 906), False, 'from indico.modules.events.static.models.static import StaticSite, StaticSiteState\n')] |
from os import path
import autolens as al
import autolens.plot as aplt
from test_autogalaxy.simulators.imaging import instrument_util
test_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "..", "..")
def pixel_scale_from_instrument(instrument):
"""
Returns the pixel scale from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return (0.2, 0.2)
elif instrument in "euclid":
return (0.1, 0.1)
elif instrument in "hst":
return (0.05, 0.05)
elif instrument in "hst_up":
return (0.03, 0.03)
elif instrument in "ao":
return (0.01, 0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def grid_from_instrument(instrument):
"""
Returns the `Grid` from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.GridIterate.uniform(shape_2d=(80, 80), pixel_scales=0.2)
elif instrument in "euclid":
return al.GridIterate.uniform(shape_2d=(120, 120), pixel_scales=0.1)
elif instrument in "hst":
return al.GridIterate.uniform(shape_2d=(200, 200), pixel_scales=0.05)
elif instrument in "hst_up":
return al.GridIterate.uniform(shape_2d=(300, 300), pixel_scales=0.03)
elif instrument in "ao":
return al.GridIterate.uniform(shape_2d=(800, 800), pixel_scales=0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def psf_from_instrument(instrument):
"""
Returns the *PSF* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.5, pixel_scales=0.2, renormalize=True
)
elif instrument in "euclid":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.1, pixel_scales=0.1, renormalize=True
)
elif instrument in "hst":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.05, renormalize=True
)
elif instrument in "hst_up":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.03, renormalize=True
)
elif instrument in "ao":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.025, pixel_scales=0.01, renormalize=True
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulator_from_instrument(instrument):
"""
Returns the *Simulator* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
grid = grid_from_instrument(instrument=instrument)
psf = psf_from_instrument(instrument=instrument)
if instrument in "vro":
return al.SimulatorImaging(
exposure_time=100.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "euclid":
return al.SimulatorImaging(
exposure_time=2260.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst_up":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "ao":
return al.SimulatorImaging(
exposure_time=1000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulate_imaging_from_instrument(instrument, dataset_name, galaxies):
# Simulate the imaging data, remembering that we use a special image which ensures edge-effects don't
# degrade our modeling of the telescope optics (e.al. the PSF convolution).
grid = instrument_util.grid_from_instrument(instrument=instrument)
simulator = simulator_from_instrument(instrument=instrument)
# Use the input galaxies to setup a tracer, which will generate the image for the simulated imaging data.
tracer = al.Tracer.from_galaxies(galaxies=galaxies)
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
# Now, lets output this simulated imaging-data to the test_autoarray/simulator folder.
test_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "..", ".."
)
dataset_path = path.join(test_path, "dataset", "imaging", dataset_name, instrument)
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
plotter = aplt.MatPlot2D(output=aplt.Output(path=dataset_path, format="png"))
plotter = aplt.MatPlot2D(output=aplt.Output(path=dataset_path, format="png"))
aplt.Imaging.subplot_imaging(imaging=imaging, plotter=plotter)
aplt.imaging.individual(
imaging=imaging,
image=True,
noise_map=True,
psf=True,
signal_to_noise_map=True,
plotter=plotter,
)
aplt.Tracer.subplot_tracer(tracer=tracer, grid=grid, plotter=plotter)
aplt.Tracer.figures(
tracer=tracer,
grid=grid,
image=True,
source_plane=True,
convergence=True,
potential=True,
deflections=True,
plotter=plotter,
)
def load_test_imaging(dataset_name, instrument, name=None):
pixel_scales = instrument_util.pixel_scale_from_instrument(instrument=instrument)
test_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "..", ".."
)
dataset_path = path.join(test_path, "dataset", "imaging", dataset_name, instrument)
return al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
pixel_scales=pixel_scales,
name=name,
)
| [
"autolens.Tracer.from_galaxies",
"autolens.plot.Tracer.figures",
"os.path.join",
"autolens.plot.Imaging.subplot_imaging",
"os.path.realpath",
"autolens.plot.imaging.individual",
"test_autogalaxy.simulators.imaging.instrument_util.pixel_scale_from_instrument",
"autolens.plot.Output",
"autolens.Simula... | [((5330, 5389), 'test_autogalaxy.simulators.imaging.instrument_util.grid_from_instrument', 'instrument_util.grid_from_instrument', ([], {'instrument': 'instrument'}), '(instrument=instrument)\n', (5366, 5389), False, 'from test_autogalaxy.simulators.imaging import instrument_util\n'), ((5585, 5627), 'autolens.Tracer.from_galaxies', 'al.Tracer.from_galaxies', ([], {'galaxies': 'galaxies'}), '(galaxies=galaxies)\n', (5608, 5627), True, 'import autolens as al\n'), ((5925, 5993), 'os.path.join', 'path.join', (['test_path', '"""dataset"""', '"""imaging"""', 'dataset_name', 'instrument'], {}), "(test_path, 'dataset', 'imaging', dataset_name, instrument)\n", (5934, 5993), False, 'from os import path\n'), ((6413, 6475), 'autolens.plot.Imaging.subplot_imaging', 'aplt.Imaging.subplot_imaging', ([], {'imaging': 'imaging', 'plotter': 'plotter'}), '(imaging=imaging, plotter=plotter)\n', (6441, 6475), True, 'import autolens.plot as aplt\n'), ((6483, 6609), 'autolens.plot.imaging.individual', 'aplt.imaging.individual', ([], {'imaging': 'imaging', 'image': '(True)', 'noise_map': '(True)', 'psf': '(True)', 'signal_to_noise_map': '(True)', 'plotter': 'plotter'}), '(imaging=imaging, image=True, noise_map=True, psf=\n True, signal_to_noise_map=True, plotter=plotter)\n', (6506, 6609), True, 'import autolens.plot as aplt\n'), ((6674, 6743), 'autolens.plot.Tracer.subplot_tracer', 'aplt.Tracer.subplot_tracer', ([], {'tracer': 'tracer', 'grid': 'grid', 'plotter': 'plotter'}), '(tracer=tracer, grid=grid, plotter=plotter)\n', (6700, 6743), True, 'import autolens.plot as aplt\n'), ((6751, 6900), 'autolens.plot.Tracer.figures', 'aplt.Tracer.figures', ([], {'tracer': 'tracer', 'grid': 'grid', 'image': '(True)', 'source_plane': '(True)', 'convergence': '(True)', 'potential': '(True)', 'deflections': '(True)', 'plotter': 'plotter'}), '(tracer=tracer, grid=grid, image=True, source_plane=True,\n convergence=True, potential=True, deflections=True, plotter=plotter)\n', (6770, 6900), True, 'import autolens.plot as aplt\n'), ((7064, 7130), 'test_autogalaxy.simulators.imaging.instrument_util.pixel_scale_from_instrument', 'instrument_util.pixel_scale_from_instrument', ([], {'instrument': 'instrument'}), '(instrument=instrument)\n', (7107, 7130), False, 'from test_autogalaxy.simulators.imaging import instrument_util\n'), ((7262, 7330), 'os.path.join', 'path.join', (['test_path', '"""dataset"""', '"""imaging"""', 'dataset_name', 'instrument'], {}), "(test_path, 'dataset', 'imaging', dataset_name, instrument)\n", (7271, 7330), False, 'from os import path\n'), ((1473, 1532), 'autolens.GridIterate.uniform', 'al.GridIterate.uniform', ([], {'shape_2d': '(80, 80)', 'pixel_scales': '(0.2)'}), '(shape_2d=(80, 80), pixel_scales=0.2)\n', (1495, 1532), True, 'import autolens as al\n'), ((2509, 2602), 'autolens.Kernel.from_gaussian', 'al.Kernel.from_gaussian', ([], {'shape_2d': '(31, 31)', 'sigma': '(0.5)', 'pixel_scales': '(0.2)', 'renormalize': '(True)'}), '(shape_2d=(31, 31), sigma=0.5, pixel_scales=0.2,\n renormalize=True)\n', (2532, 2602), True, 'import autolens as al\n'), ((3941, 4044), 'autolens.SimulatorImaging', 'al.SimulatorImaging', ([], {'exposure_time': '(100.0)', 'psf': 'psf', 'background_sky_level': '(1.0)', 'add_poisson_noise': '(True)'}), '(exposure_time=100.0, psf=psf, background_sky_level=1.0,\n add_poisson_noise=True)\n', (3960, 4044), True, 'import autolens as al\n'), ((189, 212), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (202, 212), False, 'from os import path\n'), ((1583, 1644), 'autolens.GridIterate.uniform', 'al.GridIterate.uniform', ([], {'shape_2d': '(120, 120)', 'pixel_scales': '(0.1)'}), '(shape_2d=(120, 120), pixel_scales=0.1)\n', (1605, 1644), True, 'import autolens as al\n'), ((2675, 2768), 'autolens.Kernel.from_gaussian', 'al.Kernel.from_gaussian', ([], {'shape_2d': '(31, 31)', 'sigma': '(0.1)', 'pixel_scales': '(0.1)', 'renormalize': '(True)'}), '(shape_2d=(31, 31), sigma=0.1, pixel_scales=0.1,\n renormalize=True)\n', (2698, 2768), True, 'import autolens as al\n'), ((4155, 4259), 'autolens.SimulatorImaging', 'al.SimulatorImaging', ([], {'exposure_time': '(2260.0)', 'psf': 'psf', 'background_sky_level': '(1.0)', 'add_poisson_noise': '(True)'}), '(exposure_time=2260.0, psf=psf, background_sky_level=1.0,\n add_poisson_noise=True)\n', (4174, 4259), True, 'import autolens as al\n'), ((6045, 6082), 'os.path.join', 'path.join', (['dataset_path', '"""image.fits"""'], {}), "(dataset_path, 'image.fits')\n", (6054, 6082), False, 'from os import path\n'), ((6102, 6137), 'os.path.join', 'path.join', (['dataset_path', '"""psf.fits"""'], {}), "(dataset_path, 'psf.fits')\n", (6111, 6137), False, 'from os import path\n'), ((6163, 6204), 'os.path.join', 'path.join', (['dataset_path', '"""noise_map.fits"""'], {}), "(dataset_path, 'noise_map.fits')\n", (6172, 6204), False, 'from os import path\n'), ((6277, 6321), 'autolens.plot.Output', 'aplt.Output', ([], {'path': 'dataset_path', 'format': '"""png"""'}), "(path=dataset_path, format='png')\n", (6288, 6321), True, 'import autolens.plot as aplt\n'), ((6360, 6404), 'autolens.plot.Output', 'aplt.Output', ([], {'path': 'dataset_path', 'format': '"""png"""'}), "(path=dataset_path, format='png')\n", (6371, 6404), True, 'import autolens.plot as aplt\n'), ((7387, 7424), 'os.path.join', 'path.join', (['dataset_path', '"""image.fits"""'], {}), "(dataset_path, 'image.fits')\n", (7396, 7424), False, 'from os import path\n'), ((7444, 7479), 'os.path.join', 'path.join', (['dataset_path', '"""psf.fits"""'], {}), "(dataset_path, 'psf.fits')\n", (7453, 7479), False, 'from os import path\n'), ((7505, 7546), 'os.path.join', 'path.join', (['dataset_path', '"""noise_map.fits"""'], {}), "(dataset_path, 'noise_map.fits')\n", (7514, 7546), False, 'from os import path\n'), ((1692, 1754), 'autolens.GridIterate.uniform', 'al.GridIterate.uniform', ([], {'shape_2d': '(200, 200)', 'pixel_scales': '(0.05)'}), '(shape_2d=(200, 200), pixel_scales=0.05)\n', (1714, 1754), True, 'import autolens as al\n'), ((2836, 2931), 'autolens.Kernel.from_gaussian', 'al.Kernel.from_gaussian', ([], {'shape_2d': '(31, 31)', 'sigma': '(0.05)', 'pixel_scales': '(0.05)', 'renormalize': '(True)'}), '(shape_2d=(31, 31), sigma=0.05, pixel_scales=0.05,\n renormalize=True)\n', (2859, 2931), True, 'import autolens as al\n'), ((4367, 4471), 'autolens.SimulatorImaging', 'al.SimulatorImaging', ([], {'exposure_time': '(2000.0)', 'psf': 'psf', 'background_sky_level': '(1.0)', 'add_poisson_noise': '(True)'}), '(exposure_time=2000.0, psf=psf, background_sky_level=1.0,\n add_poisson_noise=True)\n', (4386, 4471), True, 'import autolens as al\n'), ((5858, 5881), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (5871, 5881), False, 'from os import path\n'), ((7195, 7218), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (7208, 7218), False, 'from os import path\n'), ((1805, 1867), 'autolens.GridIterate.uniform', 'al.GridIterate.uniform', ([], {'shape_2d': '(300, 300)', 'pixel_scales': '(0.03)'}), '(shape_2d=(300, 300), pixel_scales=0.03)\n', (1827, 1867), True, 'import autolens as al\n'), ((3002, 3097), 'autolens.Kernel.from_gaussian', 'al.Kernel.from_gaussian', ([], {'shape_2d': '(31, 31)', 'sigma': '(0.05)', 'pixel_scales': '(0.03)', 'renormalize': '(True)'}), '(shape_2d=(31, 31), sigma=0.05, pixel_scales=0.03,\n renormalize=True)\n', (3025, 3097), True, 'import autolens as al\n'), ((4582, 4686), 'autolens.SimulatorImaging', 'al.SimulatorImaging', ([], {'exposure_time': '(2000.0)', 'psf': 'psf', 'background_sky_level': '(1.0)', 'add_poisson_noise': '(True)'}), '(exposure_time=2000.0, psf=psf, background_sky_level=1.0,\n add_poisson_noise=True)\n', (4601, 4686), True, 'import autolens as al\n'), ((1914, 1976), 'autolens.GridIterate.uniform', 'al.GridIterate.uniform', ([], {'shape_2d': '(800, 800)', 'pixel_scales': '(0.01)'}), '(shape_2d=(800, 800), pixel_scales=0.01)\n', (1936, 1976), True, 'import autolens as al\n'), ((3164, 3260), 'autolens.Kernel.from_gaussian', 'al.Kernel.from_gaussian', ([], {'shape_2d': '(31, 31)', 'sigma': '(0.025)', 'pixel_scales': '(0.01)', 'renormalize': '(True)'}), '(shape_2d=(31, 31), sigma=0.025, pixel_scales=0.01,\n renormalize=True)\n', (3187, 3260), True, 'import autolens as al\n'), ((4793, 4897), 'autolens.SimulatorImaging', 'al.SimulatorImaging', ([], {'exposure_time': '(1000.0)', 'psf': 'psf', 'background_sky_level': '(1.0)', 'add_poisson_noise': '(True)'}), '(exposure_time=1000.0, psf=psf, background_sky_level=1.0,\n add_poisson_noise=True)\n', (4812, 4897), True, 'import autolens as al\n')] |
from .conftest import GoProCameraTest
from socket import timeout
from urllib import error
class GpControlSetTest(GoProCameraTest):
def test_gp_control_set(self):
# on success, this is an empty json blob
self.responses['/gp/gpControl/setting/foo/bar'] = '{}'
assert '{}' == self.goprocam.gpControlSet('foo', 'bar')
def test_gp_control_set_error(self):
assert isinstance(self.goprocam.gpControlSet('foo', 'bar'),
error.HTTPError)
def test_gp_control_set_timeout(self):
self.responses['/gp/gpControl/setting/foo/bar'] = timeout()
assert isinstance(self.goprocam.gpControlSet('foo', 'bar'), timeout)
| [
"socket.timeout"
] | [((600, 609), 'socket.timeout', 'timeout', ([], {}), '()\n', (607, 609), False, 'from socket import timeout\n')] |
from system.db import db
from telegram_bot.handlers.utils.decorators import remember_new_user, \
send_typing, write_logs
from telegram_bot.handlers.utils.menu_entries import MenuEntry
from telegram_bot.handlers.utils.reply_markup import create_main_reply_markup
from telegram_bot.models import User
@write_logs
@send_typing
@remember_new_user
def handle_detailed_mode_cmd(bot, update) -> int:
db.session.query(User).filter_by(
telegram_id=update.message.from_user.id
).update({
'simple_mode': False
})
db.session.commit()
bot.send_message(
chat_id=update.message.chat_id,
text='Switched to detailed mode',
reply_markup=create_main_reply_markup()
)
return MenuEntry.START_MENU.value
| [
"system.db.db.session.query",
"system.db.db.session.commit",
"telegram_bot.handlers.utils.reply_markup.create_main_reply_markup"
] | [((540, 559), 'system.db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (557, 559), False, 'from system.db import db\n'), ((685, 711), 'telegram_bot.handlers.utils.reply_markup.create_main_reply_markup', 'create_main_reply_markup', ([], {}), '()\n', (709, 711), False, 'from telegram_bot.handlers.utils.reply_markup import create_main_reply_markup\n'), ((403, 425), 'system.db.db.session.query', 'db.session.query', (['User'], {}), '(User)\n', (419, 425), False, 'from system.db import db\n')] |
# -*- coding: utf-8-*-
import os
import base64
import tempfile
import pypinyin
from aip import AipSpeech
from . import utils, config, constants
from robot import logging
from pathlib import Path
from pypinyin import lazy_pinyin
from pydub import AudioSegment
from abc import ABCMeta, abstractmethod
from .sdk import TencentSpeech, AliSpeech, XunfeiSpeech, atc
logger = logging.getLogger(__name__)
class AbstractTTS(object):
"""
Generic parent class for all TTS engines
"""
__metaclass__ = ABCMeta
@classmethod
def get_config(cls):
return {}
@classmethod
def get_instance(cls):
profile = cls.get_config()
instance = cls(**profile)
return instance
@abstractmethod
def get_speech(self, phrase):
pass
class HanTTS(AbstractTTS):
"""
HanTTS:https://github.com/junzew/HanTTS
要使用本模块, 需要先从 SourceForge 下载语音库 syllables.zip :
https://sourceforge.net/projects/hantts/files/?source=navbar
并解压到 ~/.wukong 目录下
"""
SLUG = "han-tts"
CHUNK = 1024
punctuation = [',', '。','?','!','“','”',';',':','(',")",":",";",",",".","?","!","\"","\'","(",")"]
def __init__(self, voice='syllables', **args):
super(self.__class__, self).__init__()
self.voice = voice
@classmethod
def get_config(cls):
# Try to get han-tts config from config
return config.get('han-tts', {})
def get_speech(self, phrase):
"""
Synthesize .wav from text
"""
src = os.path.join(constants.CONFIG_PATH, self.voice)
text = phrase
def preprocess(syllables):
temp = []
for syllable in syllables:
for p in self.punctuation:
syllable = syllable.replace(p, "")
if syllable.isdigit():
syllable = atc.num2chinese(syllable)
new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)
for e in new_sounds:
temp.append(e)
else:
temp.append(syllable)
return temp
if not os.path.exists(src):
logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))
return None
logger.debug("{} 合成中...".format(self.SLUG))
delay = 0
increment = 355 # milliseconds
pause = 500 # pause for punctuation
syllables = lazy_pinyin(text, style=pypinyin.TONE3)
syllables = preprocess(syllables)
# initialize to be complete silence, each character takes up ~500ms
result = AudioSegment.silent(duration=500*len(text))
for syllable in syllables:
path = os.path.join(src, syllable+".wav")
sound_file = Path(path)
# insert 500 ms silence for punctuation marks
if syllable in self.punctuation:
short_silence = AudioSegment.silent(duration=pause)
result = result.overlay(short_silence, position=delay)
delay += increment
continue
# skip sound file that doesn't exist
if not sound_file.is_file():
continue
segment = AudioSegment.from_wav(path)
result = result.overlay(segment, position=delay)
delay += increment
tmpfile = ''
with tempfile.NamedTemporaryFile() as f:
tmpfile = f.name
result.export(tmpfile, format="wav")
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
class BaiduTTS(AbstractTTS):
"""
使用百度语音合成技术
要使用本模块, 首先到 yuyin.baidu.com 注册一个开发者账号,
之后创建一个新应用, 然后在应用管理的"查看key"中获得 API Key 和 Secret Key
填入 config.yml 中.
...
baidu_yuyin:
appid: '9670645'
api_key: '<KEY>'
secret_key: '<KEY>'
dev_pid: 1936
per: 1
lan: 'zh'
...
"""
SLUG = "baidu-tts"
def __init__(self, appid, api_key, secret_key, per=1, lan='zh', **args):
super(self.__class__, self).__init__()
self.client = AipSpeech(appid, api_key, secret_key)
self.per, self.lan = str(per), lan
@classmethod
def get_config(cls):
# Try to get baidu_yuyin config from config
return config.get('baidu_yuyin', {})
def get_speech(self, phrase):
result = self.client.synthesis(phrase, self.lan, 1, {'per': self.per});
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
tmpfile = utils.write_temp_file(result, '.mp3')
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
else:
logger.critical('{} 合成失败!'.format(self.SLUG), exc_info=True)
class TencentTTS(AbstractTTS):
"""
腾讯的语音合成
region: 服务地域,挑个离自己最近的区域有助于提升速度。
有效值:https://cloud.tencent.com/document/api/441/17365#.E5.9C.B0.E5.9F.9F.E5.88.97.E8.A1.A8
voiceType:
- 0:女声1,亲和风格(默认)
- 1:男声1,成熟风格
- 2:男声2,成熟风格
language:
- 1: 中文,最大100个汉字(标点符号算一个汉子)
- 2: 英文,最大支持400个字母(标点符号算一个字母)
"""
SLUG = "tencent-tts"
def __init__(self, appid, secretid, secret_key, region='ap-guangzhou', voiceType=0, language=1, **args):
super(self.__class__, self).__init__()
self.engine = TencentSpeech.tencentSpeech(secret_key, secretid)
self.region, self.voiceType, self.language = region, voiceType, language
@classmethod
def get_config(cls):
# Try to get tencent_yuyin config from config
return config.get('tencent_yuyin', {})
def get_speech(self, phrase):
result = self.engine.TTS(phrase, self.voiceType, self.language, self.region)
if 'Response' in result and 'Audio' in result['Response']:
audio = result['Response']['Audio']
data = base64.b64decode(audio)
tmpfile = utils.write_temp_file(data, '.wav')
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
else:
logger.critical('{} 合成失败!'.format(self.SLUG), exc_info=True)
class XunfeiTTS(AbstractTTS):
"""
科大讯飞的语音识别API.
"""
SLUG = "xunfei-tts"
def __init__(self, appid, api_key, api_secret, voice='xiaoyan'):
super(self.__class__, self).__init__()
self.appid, self.api_key, self.api_secret, self.voice_name = appid, api_key, api_secret, voice
@classmethod
def get_config(cls):
# Try to get xunfei_yuyin config from config
return config.get('xunfei_yuyin', {})
def get_speech(self, phrase):
return XunfeiSpeech.synthesize(phrase, self.appid, self.api_key, self.api_secret, self.voice_name)
class AliTTS(AbstractTTS):
"""
阿里的TTS
voice: 发音人,默认是 xiaoyun
全部发音人列表:https://help.aliyun.com/document_detail/84435.html?spm=a2c4g.11186623.2.24.67ce5275q2RGsT
"""
SLUG = "ali-tts"
def __init__(self, appKey, token, voice='xiaoyun', **args):
super(self.__class__, self).__init__()
self.appKey, self.token, self.voice = appKey, token, voice
@classmethod
def get_config(cls):
# Try to get ali_yuyin config from config
return config.get('ali_yuyin', {})
def get_speech(self, phrase):
tmpfile = AliSpeech.tts(self.appKey, self.token, self.voice, phrase)
if tmpfile is not None:
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
else:
logger.critical('{} 合成失败!'.format(self.SLUG), exc_info=True)
def get_engine_by_slug(slug=None):
"""
Returns:
A TTS Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("无效的 TTS slug '%s'", slug)
selected_engines = list(filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines()))
if len(selected_engines) == 0:
raise ValueError("错误:找不到名为 {} 的 TTS 引擎".format(slug))
else:
if len(selected_engines) > 1:
logger.warning("注意: 有多个 TTS 名称与指定的引擎名 {} 匹配").format(slug)
engine = selected_engines[0]
logger.info("使用 {} TTS 引擎".format(engine.SLUG))
return engine.get_instance()
def get_engines():
def get_subclasses(cls):
subclasses = set()
for subclass in cls.__subclasses__():
subclasses.add(subclass)
subclasses.update(get_subclasses(subclass))
return subclasses
return [engine for engine in
list(get_subclasses(AbstractTTS))
if hasattr(engine, 'SLUG') and engine.SLUG]
| [
"os.path.exists",
"aip.AipSpeech",
"pathlib.Path",
"os.path.join",
"pypinyin.lazy_pinyin",
"base64.b64decode",
"tempfile.NamedTemporaryFile",
"pydub.AudioSegment.silent",
"robot.logging.getLogger",
"pydub.AudioSegment.from_wav"
] | [((370, 397), 'robot.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (387, 397), False, 'from robot import logging\n'), ((1518, 1565), 'os.path.join', 'os.path.join', (['constants.CONFIG_PATH', 'self.voice'], {}), '(constants.CONFIG_PATH, self.voice)\n', (1530, 1565), False, 'import os\n'), ((2522, 2561), 'pypinyin.lazy_pinyin', 'lazy_pinyin', (['text'], {'style': 'pypinyin.TONE3'}), '(text, style=pypinyin.TONE3)\n', (2533, 2561), False, 'from pypinyin import lazy_pinyin\n'), ((4221, 4258), 'aip.AipSpeech', 'AipSpeech', (['appid', 'api_key', 'secret_key'], {}), '(appid, api_key, secret_key)\n', (4230, 4258), False, 'from aip import AipSpeech\n'), ((2148, 2167), 'os.path.exists', 'os.path.exists', (['src'], {}), '(src)\n', (2162, 2167), False, 'import os\n'), ((2804, 2840), 'os.path.join', 'os.path.join', (['src', "(syllable + '.wav')"], {}), "(src, syllable + '.wav')\n", (2816, 2840), False, 'import os\n'), ((2864, 2874), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2868, 2874), False, 'from pathlib import Path\n'), ((3314, 3341), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['path'], {}), '(path)\n', (3335, 3341), False, 'from pydub import AudioSegment\n'), ((3469, 3498), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3496, 3498), False, 'import tempfile\n'), ((6000, 6023), 'base64.b64decode', 'base64.b64decode', (['audio'], {}), '(audio)\n', (6016, 6023), False, 'import base64\n'), ((3010, 3045), 'pydub.AudioSegment.silent', 'AudioSegment.silent', ([], {'duration': 'pause'}), '(duration=pause)\n', (3029, 3045), False, 'from pydub import AudioSegment\n'), ((1912, 1955), 'pypinyin.lazy_pinyin', 'lazy_pinyin', (['syllable'], {'style': 'pypinyin.TONE3'}), '(syllable, style=pypinyin.TONE3)\n', (1923, 1955), False, 'from pypinyin import lazy_pinyin\n')] |
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable= no-member, arguments-differ, invalid-name
#
# Utilities for using pre-trained models.
import torch
from dgl.data.utils import _get_dgl_url, download
from .moleculenet import *
from .generative_models import *
from .property_prediction import *
from .reaction import *
__all__ = ['load_pretrained']
url = {**moleculenet_url, **generative_url, **property_url, **reaction_url}
def download_and_load_checkpoint(model_name, model, model_postfix,
local_pretrained_path='pre_trained.pth', log=True):
"""Download pretrained model checkpoint
The model will be loaded to CPU.
Parameters
----------
model_name : str
Name of the model
model : nn.Module
Instantiated model instance
model_postfix : str
Postfix for pretrained model checkpoint
local_pretrained_path : str
Local name for the downloaded model checkpoint
log : bool
Whether to print progress for model loading
Returns
-------
model : nn.Module
Pretrained model
"""
url_to_pretrained = _get_dgl_url(model_postfix)
local_pretrained_path = '_'.join([model_name, local_pretrained_path])
download(url_to_pretrained, path=local_pretrained_path, log=log)
checkpoint = torch.load(local_pretrained_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
if log:
print('Pretrained model loaded')
return model
# pylint: disable=I1101
def load_pretrained(model_name, log=True):
"""Load a pretrained model
Parameters
----------
model_name : str
Currently supported options include
* ``'GCN_Tox21'``: A GCN-based model for molecular property prediction on Tox21
* ``'GAT_Tox21'``: A GAT-based model for molecular property prediction on Tox21
* ``'Weave_Tox21'``: A Weave model for molecular property prediction on Tox21
* ``'AttentiveFP_Aromaticity'``: An AttentiveFP model for predicting number of
aromatic atoms on a subset of Pubmed
* ``'DGMG_ChEMBL_canonical'``: A DGMG model trained on ChEMBL with a canonical
atom order
* ``'DGMG_ChEMBL_random'``: A DGMG model trained on ChEMBL for molecule generation
with a random atom order
* ``'DGMG_ZINC_canonical'``: A DGMG model trained on ZINC for molecule generation
with a canonical atom order
* ``'DGMG_ZINC_random'``: A DGMG model pre-trained on ZINC for molecule generation
with a random atom order
* ``'JTNN_ZINC'``: A JTNN model pre-trained on ZINC for molecule generation
* ``'wln_center_uspto'``: A WLN model pre-trained on USPTO for reaction prediction
* ``'wln_rank_uspto'``: A WLN model pre-trained on USPTO for candidate product ranking
* ``'gin_supervised_contextpred'``: A GIN model pre-trained with supervised learning
and context prediction
* ``'gin_supervised_infomax'``: A GIN model pre-trained with supervised learning
and deep graph infomax
* ``'gin_supervised_edgepred'``: A GIN model pre-trained with supervised learning
and edge prediction
* ``'gin_supervised_masking'``: A GIN model pre-trained with supervised learning
and attribute masking
* ``'GCN_canonical_BACE'``: A GCN model trained on BACE with canonical
featurization for atoms
* ``'GCN_attentivefp_BACE'``: A GCN model trained on BACE with attentivefp
featurization for atoms
* ``'GAT_canonical_BACE'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_BACE'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_BACE'``: A Weave model trained on BACE with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BACE'``: A Weave model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BACE'``: An MPNN model trained on BACE with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BACE'``: An MPNN model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BACE'``: An AttentiveFP model trained on BACE with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on BACE with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BACE'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BACE
* ``'gin_supervised_infomax_BACE'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BACE
* ``'gin_supervised_edgepred_BACE'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BACE
* ``'gin_supervised_masking_BACE'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BACE
* ``'NF_canonical_BACE'``: An NF model trained on BACE with canonical
featurization for atoms
* ``'GCN_canonical_BBBP'``: A GCN model trained on BBBP with canonical
featurization for atoms
* ``'GCN_attentivefp_BBBP'``: A GCN model trained on BBBP with attentivefp
featurization for atoms
* ``'GAT_canonical_BBBP'``: A GAT model trained on BBBP with canonical
featurization for atoms
* ``'GAT_attentivefp_BBBP'``: A GAT model trained on BBBP with attentivefp
featurization for atoms
* ``'Weave_canonical_BBBP'``: A Weave model trained on BBBP with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BBBP'``: A Weave model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BBBP'``: An MPNN model trained on BBBP with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BBBP'``: An MPNN model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BBBP'``: An AttentiveFP model trained on BBBP with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BBBP'``: An AttentiveFP model trained on BBBP with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BBBP'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BBBP
* ``'gin_supervised_infomax_BBBP'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BBBP
* ``'gin_supervised_edgepred_BBBP'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BBBP
* ``'gin_supervised_masking_BBBP'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BBBP
* ``'NF_canonical_BBBP'``: An NF model pre-trained on BBBP with canonical
featurization for atoms
* ``'GCN_canonical_ClinTox'``: A GCN model trained on ClinTox with canonical
featurization for atoms
* ``'GCN_attentivefp_ClinTox'``: A GCN model trained on ClinTox with attentivefp
featurization for atoms
* ``'GAT_canonical_ClinTox'``: A GAT model trained on ClinTox with canonical
featurization for atoms
* ``'GAT_attentivefp_ClinTox'``: A GAT model trained on ClinTox with attentivefp
featurization for atoms
* ``'Weave_canonical_ClinTox'``: A Weave model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ClinTox'``: A Weave model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ClinTox'``: An MPNN model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ClinTox'``: An MPNN model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ClinTox'``: An AttentiveFP model trained on ClinTox with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on ClinTox with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_ESOL'``: A GCN model trained on ESOL with canonical
featurization for atoms
* ``'GCN_attentivefp_ESOL'``: A GCN model trained on ESOL with attentivefp
featurization for atoms
* ``'GAT_canonical_ESOL'``: A GAT model trained on ESOL with canonical
featurization for atoms
* ``'GAT_attentivefp_ESOL'``: A GAT model trained on ESOL with attentivefp
featurization for atoms
* ``'Weave_canonical_ESOL'``: A Weave model trained on ESOL with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ESOL'``: A Weave model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ESOL'``: An MPNN model trained on ESOL with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ESOL'``: An MPNN model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ESOL'``: An AttentiveFP model trained on ESOL with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ESOL'``: An AttentiveFP model trained on ESOL with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ESOL'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ESOL
* ``'gin_supervised_infomax_ESOL'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ESOL
* ``'gin_supervised_edgepred_ESOL'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ESOL
* ``'gin_supervised_masking_ESOL'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ESOL
* ``'GCN_canonical_FreeSolv'``: A GCN model trained on FreeSolv with canonical
featurization for atoms
* ``'GCN_attentivefp_FreeSolv'``: A GCN model trained on FreeSolv with attentivefp
featurization for atoms
* ``'GAT_canonical_FreeSolv'``: A GAT model trained on FreeSolv with canonical
featurization for atoms
* ``'GAT_attentivefp_FreeSolv'``: A GAT model trained on FreeSolv with attentivefp
featurization for atoms
* ``'Weave_canonical_FreeSolv'``: A Weave model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_FreeSolv'``: A Weave model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_FreeSolv'``: An MPNN model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_FreeSolv'``: An MPNN model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_FreeSolv'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_infomax_FreeSolv'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on FreeSolv
* ``'gin_supervised_edgepred_FreeSolv'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_masking_FreeSolv'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on FreeSolv
* ``'GCN_canonical_HIV'``: A GCN model trained on HIV with canonical
featurization for atoms
* ``'GCN_attentivefp_HIV'``: A GCN model trained on HIV with attentivefp
featurization for atoms
* ``'GAT_canonical_HIV'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_HIV'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_HIV'``: A Weave model trained on HIV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_HIV'``: A Weave model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_HIV'``: An MPNN model trained on HIV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_HIV'``: An MPNN model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_HIV'``: An AttentiveFP model trained on HIV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_HIV'``: An AttentiveFP model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_HIV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on HIV
* ``'gin_supervised_infomax_HIV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on HIV
* ``'gin_supervised_edgepred_HIV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on HIV
* ``'gin_supervised_masking_HIV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on HIV
* ``'NF_canonical_HIV'``: An NF model trained on HIV with canonical
featurization for atoms
* ``'GCN_canonical_Lipophilicity'``: A GCN model trained on Lipophilicity with canonical
featurization for atoms
* ``'GCN_attentivefp_Lipophilicity'``: A GCN model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'GAT_canonical_Lipophilicity'``: A GAT model trained on Lipophilicity with canonical
featurization for atoms
* ``'GAT_attentivefp_Lipophilicity'``: A GAT model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'Weave_canonical_Lipophilicity'``: A Weave model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'Weave_attentivefp_Lipophilicity'``: A Weave model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'MPNN_canonical_Lipophilicity'``: An MPNN model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'MPNN_attentivefp_Lipophilicity'``: An MPNN model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'AttentiveFP_canonical_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_infomax_Lipophilicity'``: A GIN model pre-trained with supervised
learning and infomax, and fine-tuned on Lipophilicity
* ``'gin_supervised_edgepred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and edge prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_masking_Lipophilicity'``: A GIN model pre-trained with supervised
learning and masking, and fine-tuned on Lipophilicity
* ``'GCN_canonical_MUV'``: A GCN model trained on MUV with canonical
featurization for atoms
* ``'GCN_attentivefp_MUV'``: A GCN model trained on MUV with attentivefp
featurization for atoms
* ``'GAT_canonical_MUV'``: A GAT model trained on MUV with canonical
featurization for atoms
* ``'GAT_attentivefp_MUV'``: A GAT model trained on MUV with attentivefp
featurization for atoms
* ``'Weave_canonical_MUV'``: A Weave model trained on MUV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_MUV'``: A Weave model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_MUV'``: An MPNN model trained on MUV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_MUV'``: An MPNN model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_MUV'``: An AttentiveFP model trained on MUV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_MUV'``: An AttentiveFP model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_MUV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on MUV
* ``'gin_supervised_infomax_MUV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on MUV
* ``'gin_supervised_edgepred_MUV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on MUV
* ``'gin_supervised_masking_MUV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on MUV
* ``'GCN_canonical_PCBA'``: A GCN model trained on PCBA with canonical
featurization for atoms
* ``'GCN_attentivefp_PCBA'``: A GCN model trained on PCBA with attentivefp
featurization for atoms
* ``'GAT_canonical_PCBA'``: A GAT model trained on PCBA with canonical
featurization for atoms
* ``'GAT_attentivefp_PCBA'``: A GAT model trained on PCBA with attentivefp
featurization for atoms
* ``'Weave_canonical_PCBA'``: A Weave model trained on PCBA with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_PCBA'``: A Weave model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_PCBA'``: An MPNN model trained on PCBA with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_PCBA'``: An MPNN model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_PCBA'``: An AttentiveFP model trained on PCBA with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_PCBA'``: An AttentiveFP model trained on PCBA with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_SIDER'``: A GCN model trained on SIDER with canonical
featurization for atoms
* ``'GCN_attentivefp_SIDER'``: A GCN model trained on SIDER with attentivefp
featurization for atoms
* ``'GAT_canonical_SIDER'``: A GAT model trained on SIDER with canonical
featurization for atoms
* ``'GAT_attentivefp_SIDER'``: A GAT model trained on SIDER with attentivefp
featurization for atoms
* ``'Weave_canonical_SIDER'``: A Weave model trained on SIDER with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_SIDER'``: A Weave model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_SIDER'``: An MPNN model trained on SIDER with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_SIDER'``: An MPNN model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_SIDER'``: An AttentiveFP model trained on SIDER with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_SIDER'``: An AttentiveFP model trained on SIDER with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_SIDER'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on SIDER
* ``'gin_supervised_infomax_SIDER'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on SIDER
* ``'gin_supervised_edgepred_SIDER'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on SIDER
* ``'gin_supervised_masking_SIDER'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on SIDER
* ``'NF_canonical_SIDER'``: An NF model trained on SIDER with canonical
featurization for atoms
* ``'GCN_canonical_Tox21'``: A GCN model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_attentivefp_Tox21'``: A GCN model trained on Tox21 with attentivefp
featurization for atoms
* ``'GAT_canonical_Tox21'``: A GAT model trained on Tox21 with canonical
featurization for atoms
* ``'GAT_attentivefp_Tox21'``: A GAT model trained on Tox21 with attentivefp
featurization for atoms
* ``'Weave_canonical_Tox21'``: A Weave model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_Tox21'``: A Weave model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_Tox21'``: An MPNN model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_Tox21'``: An MPNN model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_Tox21'``: An AttentiveFP model trained on Tox21 with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Tox21'``: An AttentiveFP model trained on Tox21 with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Tox21'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Tox21
* ``'gin_supervised_infomax_Tox21'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on Tox21
* ``'gin_supervised_edgepred_Tox21'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on Tox21
* ``'gin_supervised_masking_Tox21'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on Tox21
* ``'NF_canonical_Tox21'``: An NF model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_canonical_ToxCast'``: A GCN model trained on ToxCast with canonical
featurization for atoms
* ``'GCN_attentivefp_ToxCast'``: A GCN model trained on ToxCast with attentivefp
featurization for atoms
* ``'GAT_canonical_ToxCast'``: A GAT model trained on ToxCast with canonical
featurization for atoms
* ``'GAT_attentivefp_ToxCast'``: A GAT model trained on ToxCast with attentivefp
featurization for atoms
* ``'Weave_canonical_ToxCast'``: A Weave model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ToxCast'``: A Weave model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ToxCast'``: An MPNN model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ToxCast'``: An MPNN model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ToxCast'``: An AttentiveFP model trained on ToxCast with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ToxCast'``: An AttentiveFP model trained on ToxCast with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ToxCast'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ToxCast
* ``'gin_supervised_infomax_ToxCast'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ToxCast
* ``'gin_supervised_edgepred_ToxCast'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ToxCast
* ``'gin_supervised_masking_ToxCast'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ToxCast
* ``'NF_canonical_ToxCast'``: An NF model trained on ToxCast with canonical
featurization for atoms and bonds
log : bool
Whether to print progress for model loading
Returns
-------
model
"""
if model_name not in url:
raise RuntimeError("Cannot find a pretrained model with name {}".format(model_name))
for func in [create_moleculenet_model, create_generative_model,
create_property_model, create_reaction_model]:
model = func(model_name)
if model is not None:
break
return download_and_load_checkpoint(model_name, model, url[model_name], log=log)
| [
"torch.load",
"dgl.data.utils.download",
"dgl.data.utils._get_dgl_url"
] | [((1234, 1261), 'dgl.data.utils._get_dgl_url', '_get_dgl_url', (['model_postfix'], {}), '(model_postfix)\n', (1246, 1261), False, 'from dgl.data.utils import _get_dgl_url, download\n'), ((1340, 1404), 'dgl.data.utils.download', 'download', (['url_to_pretrained'], {'path': 'local_pretrained_path', 'log': 'log'}), '(url_to_pretrained, path=local_pretrained_path, log=log)\n', (1348, 1404), False, 'from dgl.data.utils import _get_dgl_url, download\n'), ((1422, 1475), 'torch.load', 'torch.load', (['local_pretrained_path'], {'map_location': '"""cpu"""'}), "(local_pretrained_path, map_location='cpu')\n", (1432, 1475), False, 'import torch\n')] |
import numpy as np
import unittest
from chainer.dataset import DatasetMixin
from chainer import testing
from chainercv.utils import assert_is_bbox_dataset
from chainercv.utils import generate_random_bbox
class BboxDataset(DatasetMixin):
def __init__(self, options=(), empty_bbox=False):
self.options = options
self.empty_bbox = empty_bbox
def __len__(self):
return 10
def get_example(self, i):
img = np.random.randint(0, 256, size=(3, 48, 64))
if self.empty_bbox:
n_bbox = 0
else:
n_bbox = np.random.randint(10, 20)
bbox = generate_random_bbox(n_bbox, (48, 64), 5, 20)
label = np.random.randint(0, 20, size=n_bbox).astype(np.int32)
return (img, bbox, label) + self.options
class InvalidSampleSizeDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(
InvalidSampleSizeDataset, self).get_example(i)[:3]
return img, bbox
class InvalidImageDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidImageDataset, self).get_example(i)[:3]
return img[0], bbox, label
class InvalidBboxDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidBboxDataset, self).get_example(i)[:3]
bbox += 1000
return img, bbox, label
class InvalidLabelDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidLabelDataset, self).get_example(i)[:3]
label += 1000
return img, bbox, label
class MismatchLengthDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(
MismatchLengthDataset, self).get_example(i)[:3]
return img, bbox, label[1:]
@testing.parameterize(
{'dataset': BboxDataset(), 'valid': True},
{'dataset': BboxDataset(empty_bbox=True), 'valid': True},
{'dataset': BboxDataset(('option',)), 'valid': True},
{'dataset': InvalidSampleSizeDataset(), 'valid': False},
{'dataset': InvalidImageDataset(), 'valid': False},
{'dataset': InvalidBboxDataset(), 'valid': False},
{'dataset': InvalidLabelDataset(), 'valid': False},
{'dataset': MismatchLengthDataset(), 'valid': False},
)
class TestAssertIsBboxDataset(unittest.TestCase):
def test_assert_is_bbox_dataset(self):
if self.valid:
assert_is_bbox_dataset(self.dataset, 20)
else:
with self.assertRaises(AssertionError):
assert_is_bbox_dataset(self.dataset, 20)
testing.run_module(__name__, __file__)
| [
"chainercv.utils.generate_random_bbox",
"numpy.random.randint",
"chainer.testing.run_module",
"chainercv.utils.assert_is_bbox_dataset"
] | [((2565, 2603), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (2583, 2603), False, 'from chainer import testing\n'), ((451, 494), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(3, 48, 64)'}), '(0, 256, size=(3, 48, 64))\n', (468, 494), True, 'import numpy as np\n'), ((622, 667), 'chainercv.utils.generate_random_bbox', 'generate_random_bbox', (['n_bbox', '(48, 64)', '(5)', '(20)'], {}), '(n_bbox, (48, 64), 5, 20)\n', (642, 667), False, 'from chainercv.utils import generate_random_bbox\n'), ((581, 606), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (598, 606), True, 'import numpy as np\n'), ((2399, 2439), 'chainercv.utils.assert_is_bbox_dataset', 'assert_is_bbox_dataset', (['self.dataset', '(20)'], {}), '(self.dataset, 20)\n', (2421, 2439), False, 'from chainercv.utils import assert_is_bbox_dataset\n'), ((684, 721), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {'size': 'n_bbox'}), '(0, 20, size=n_bbox)\n', (701, 721), True, 'import numpy as np\n'), ((2522, 2562), 'chainercv.utils.assert_is_bbox_dataset', 'assert_is_bbox_dataset', (['self.dataset', '(20)'], {}), '(self.dataset, 20)\n', (2544, 2562), False, 'from chainercv.utils import assert_is_bbox_dataset\n')] |
"""Provide default settgins"""
from pathlib import Path
BIOPIPEN_DIR = Path(__file__).parent.parent.resolve()
REPORT_DIR = BIOPIPEN_DIR / "reports"
SCRIPT_DIR = BIOPIPEN_DIR / "scripts"
| [
"pathlib.Path"
] | [((72, 86), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (76, 86), False, 'from pathlib import Path\n')] |
#!/usr/bin/python3
import shutil
import os
import base64
from time import sleep
import flask
import requests.exceptions
import blueprint
from flask_cors import CORS
from confhttpproxy import ProxyRouter, ProxyRouterException
from flask import Flask, jsonify
import rest_routes
from lmsrvcore.utilities.migrate import migrate_work_dir_structure_v2
from gtmcore.dispatcher import Dispatcher
from gtmcore.dispatcher.jobs import update_environment_repositories
from gtmcore.configuration import Configuration
from gtmcore.logging import LMLogger
from gtmcore.auth.identity import AuthenticationError, get_identity_manager_class
from gtmcore.labbook.lock import reset_all_locks
logger = LMLogger.get_logger()
def configure_chp(proxy_dict: dict, is_hub_client: bool) -> str:
"""Set up the configurable HTTP proxy (CHP)
Args:
proxy_dict: obtained from the config dict inside the config instance
is_hub_client: are we running on the hub? (also obtained from config instance)
Returns:
the final api_prefix used by the router
We define this as a function mostly so we can optionally wrap it in a try block below
"""
# /api by default
api_prefix = proxy_dict["labmanager_api_prefix"]
proxy_router = ProxyRouter.get_proxy(proxy_dict)
# Wait up to 10 seconds for the CHP to be available
for _ in range(20):
try:
# This property raises an exception if the underlying request doesn't yield a status code of 200
proxy_router.routes # noqa
except (requests.exceptions.ConnectionError, ProxyRouterException):
sleep(0.5)
continue
# If there was no exception, the CHP is up and responding
break
else:
# We exhausted our for-loop
logger.error("Could not reach router after 20 tries (10 seconds), proxy_router.add() will likely fail")
if is_hub_client:
# Use full route prefix, including run/<client_id> if running in the Hub
api_target = f"run/{os.environ['GIGANTUM_CLIENT_ID']}{api_prefix}"
api_prefix = f"/{api_target}"
# explicit routes for UI with full route prefix
proxy_router.add("http://localhost:10002", f"run/{os.environ['GIGANTUM_CLIENT_ID']}")
else:
api_target = "api"
proxy_router.add("http://localhost:10001", api_target)
logger.info(f"Proxy routes ({type(proxy_router)}): {proxy_router.routes}")
return api_prefix
def configure_default_server(config_instance: Configuration) -> None:
"""Function to check if a server has been configured, and if not, configure and select the default server"""
try:
# Load the server configuration. If you get a FileNotFoundError there is no configured server
config_instance.get_server_configuration()
except FileNotFoundError:
default_server = config_instance.config['core']['default_server']
logger.info(f"Configuring Client with default server via auto-discovery: {default_server}")
try:
server_id = config_instance.add_server(default_server)
config_instance.set_current_server(server_id)
# Migrate any user dirs if needed. Here we assume all projects belong to the default server, since
# at the time it was the only available server.
migrate_work_dir_structure_v2(server_id)
except Exception as err:
logger.exception(f"Failed to configure default server! Restart Client to try again: {err}")
# Re-raise the exception so the API doesn't come up
raise
# Start Flask Server Initialization and app configuration
app = Flask("lmsrvlabbook")
random_bytes = os.urandom(32)
app.config["SECRET_KEY"] = base64.b64encode(random_bytes).decode('utf-8')
app.config["LABMGR_CONFIG"] = config = Configuration(wait_for_cache=10)
configure_default_server(config)
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
# Set Debug mode
app.config['DEBUG'] = config.config["flask"]["DEBUG"]
app.register_blueprint(blueprint.complete_labbook_service)
# Set starting flags
# If flask is run in debug mode the service will restart when code is changed, and some tasks
# we only want to happen once (ON_FIRST_START)
# The WERKZEUG_RUN_MAIN environmental variable is set only when running under debugging mode
ON_FIRST_START = app.config['DEBUG'] is False or os.environ.get('WERKZEUG_RUN_MAIN') != 'true'
ON_RESTART = os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
if os.environ.get('CIRCLECI') == 'true':
try:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
except requests.exceptions.ConnectionError:
url_prefix = config.config['proxy']["labmanager_api_prefix"]
else:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
# Add rest routes
app.register_blueprint(rest_routes.rest_routes, url_prefix=url_prefix)
if config.config["flask"]["allow_cors"]:
# Allow CORS
CORS(app, max_age=7200)
if ON_FIRST_START:
# Empty container-container share dir as it is ephemeral
share_dir = os.path.join(os.path.sep, 'mnt', 'share')
logger.info("Emptying container-container share folder: {}.".format(share_dir))
try:
for item in os.listdir(share_dir):
item_path = os.path.join(share_dir, item)
if os.path.isfile(item_path):
os.unlink(item_path)
else:
shutil.rmtree(item_path)
except Exception as e:
logger.error(f"Failed to empty share folder: {e}.")
raise
post_save_hook_code = """
import subprocess, os
def post_save_hook(os_path, model, contents_manager, **kwargs):
try:
client_ip = os.environ.get('GIGANTUM_CLIENT_IP')
if os.environ.get('HUB_CLIENT_ID'):
# Running in the Hub
service_route = "run/{}/api/savehook".format(os.environ.get('HUB_CLIENT_ID'))
else:
# Running locally
service_route = "api/savehook"
tokens = open('/home/giguser/jupyter_token').read().strip()
username, owner, lbname, jupyter_token = tokens.split(',')
url_args = "file={}&jupyter_token={}&email={}".format(os.path.basename(os_path), jupyter_token, os.environ['GIGANTUM_EMAIL'])
url = "http://{}:10001/{}/{}/{}/{}?{}".format(client_ip,service_route,username,owner,lbname,url_args)
subprocess.run(['wget', '--spider', url], cwd='/tmp')
except Exception as e:
print(e)
"""
os.makedirs(os.path.join(share_dir, 'jupyterhooks'))
with open(os.path.join(share_dir, 'jupyterhooks', '__init__.py'), 'w') as initpy:
initpy.write(post_save_hook_code)
# Reset distributed lock, if desired
if config.config["lock"]["reset_on_start"]:
logger.info("Resetting ALL distributed locks")
reset_all_locks(config.config['lock'])
# Create local data (for local dataset types) dir if it doesn't exist
local_data_dir = os.path.join(config.config['git']['working_directory'], 'local_data')
if os.path.isdir(local_data_dir) is False:
os.makedirs(local_data_dir, exist_ok=True)
logger.info(f'Created `local_data` dir for Local Filesystem Dataset Type: {local_data_dir}')
# Create certificates file directory for custom CA certificate support.
certificate_dir = os.path.join(config.config['git']['working_directory'], 'certificates', 'ssl')
if os.path.isdir(certificate_dir) is False:
os.makedirs(certificate_dir, exist_ok=True)
logger.info(f'Created `certificates` dir for SSL and custom CA certificates: {certificate_dir}')
# make sure temporary upload directory exists and is empty
tempdir = config.upload_dir
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
logger.info(f'Cleared upload temp dir: {tempdir}')
os.makedirs(tempdir)
# Start background startup tasks
d = Dispatcher()
# Make sure the queue is up before we start using RQ
for _ in range(20):
if d.ready_for_job(update_environment_repositories):
break
sleep(0.5)
else:
# We exhausted our for-loop
err_message = "Worker queue not ready after 20 tries (10 seconds) - fatal error"
logger.error(err_message)
raise RuntimeError(err_message)
# Run job to update Base images in the background
d.dispatch_task(update_environment_repositories, persist=True)
# Set auth error handler
@app.errorhandler(AuthenticationError)
def handle_auth_error(ex):
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
# TEMPORARY KLUDGE
# Due to GitPython implementation, resources leak. This block deletes all GitPython instances at the end of the request
# Future work will remove GitPython, at which point this block should be removed.
@app.after_request
def cleanup_git(response):
loader = getattr(flask.request, 'labbook_loader', None)
if loader:
for key in loader.__dict__["_promise_cache"]:
try:
lb = loader.__dict__["_promise_cache"][key].value
lb.git.repo.__del__()
except AttributeError:
continue
return response
# TEMPORARY KLUDGE
def main(debug=False) -> None:
try:
# Run app on 0.0.0.0, assuming not an issue since it should be in a container
# Please note: Debug mode must explicitly be set to False when running integration
# tests, due to properties of Flask werkzeug dynamic package reloading.
if debug:
# This is to support integration tests, which will call main
# with debug=False in order to avoid runtime reloading of Python code
# which causes the interpreter to crash.
app.run(host="0.0.0.0", port=10001, debug=debug)
else:
# If debug arg is not explicitly given then it is loaded from config
app.run(host="0.0.0.0", port=10001)
except Exception as err:
logger.exception(err)
raise
if __name__ == '__main__':
main()
| [
"flask_cors.CORS",
"flask.Flask",
"base64.b64encode",
"time.sleep",
"flask.jsonify",
"os.path.exists",
"os.listdir",
"lmsrvcore.utilities.migrate.migrate_work_dir_structure_v2",
"os.path.isdir",
"os.unlink",
"gtmcore.labbook.lock.reset_all_locks",
"gtmcore.auth.identity.get_identity_manager_cl... | [((688, 709), 'gtmcore.logging.LMLogger.get_logger', 'LMLogger.get_logger', ([], {}), '()\n', (707, 709), False, 'from gtmcore.logging import LMLogger\n'), ((3657, 3678), 'flask.Flask', 'Flask', (['"""lmsrvlabbook"""'], {}), "('lmsrvlabbook')\n", (3662, 3678), False, 'from flask import Flask, jsonify\n'), ((3695, 3709), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (3705, 3709), False, 'import os\n'), ((3823, 3855), 'gtmcore.configuration.Configuration', 'Configuration', ([], {'wait_for_cache': '(10)'}), '(wait_for_cache=10)\n', (3836, 3855), False, 'from gtmcore.configuration import Configuration\n'), ((3916, 3950), 'gtmcore.auth.identity.get_identity_manager_class', 'get_identity_manager_class', (['config'], {}), '(config)\n', (3942, 3950), False, 'from gtmcore.auth.identity import AuthenticationError, get_identity_manager_class\n'), ((1256, 1289), 'confhttpproxy.ProxyRouter.get_proxy', 'ProxyRouter.get_proxy', (['proxy_dict'], {}), '(proxy_dict)\n', (1277, 1289), False, 'from confhttpproxy import ProxyRouter, ProxyRouterException\n'), ((4448, 4483), 'os.environ.get', 'os.environ.get', (['"""WERKZEUG_RUN_MAIN"""'], {}), "('WERKZEUG_RUN_MAIN')\n", (4462, 4483), False, 'import os\n'), ((4498, 4524), 'os.environ.get', 'os.environ.get', (['"""CIRCLECI"""'], {}), "('CIRCLECI')\n", (4512, 4524), False, 'import os\n'), ((4980, 5003), 'flask_cors.CORS', 'CORS', (['app'], {'max_age': '(7200)'}), '(app, max_age=7200)\n', (4984, 5003), False, 'from flask_cors import CORS\n'), ((5101, 5142), 'os.path.join', 'os.path.join', (['os.path.sep', '"""mnt"""', '"""share"""'], {}), "(os.path.sep, 'mnt', 'share')\n", (5113, 5142), False, 'import os\n'), ((6973, 7042), 'os.path.join', 'os.path.join', (["config.config['git']['working_directory']", '"""local_data"""'], {}), "(config.config['git']['working_directory'], 'local_data')\n", (6985, 7042), False, 'import os\n'), ((7341, 7419), 'os.path.join', 'os.path.join', (["config.config['git']['working_directory']", '"""certificates"""', '"""ssl"""'], {}), "(config.config['git']['working_directory'], 'certificates', 'ssl')\n", (7353, 7419), False, 'import os\n'), ((7728, 7751), 'os.path.exists', 'os.path.exists', (['tempdir'], {}), '(tempdir)\n', (7742, 7751), False, 'import os\n'), ((7847, 7867), 'os.makedirs', 'os.makedirs', (['tempdir'], {}), '(tempdir)\n', (7858, 7867), False, 'import os\n'), ((7914, 7926), 'gtmcore.dispatcher.Dispatcher', 'Dispatcher', ([], {}), '()\n', (7924, 7926), False, 'from gtmcore.dispatcher import Dispatcher\n'), ((8545, 8562), 'flask.jsonify', 'jsonify', (['ex.error'], {}), '(ex.error)\n', (8552, 8562), False, 'from flask import Flask, jsonify\n'), ((3737, 3767), 'base64.b64encode', 'base64.b64encode', (['random_bytes'], {}), '(random_bytes)\n', (3753, 3767), False, 'import base64\n'), ((4389, 4424), 'os.environ.get', 'os.environ.get', (['"""WERKZEUG_RUN_MAIN"""'], {}), "('WERKZEUG_RUN_MAIN')\n", (4403, 4424), False, 'import os\n'), ((5256, 5277), 'os.listdir', 'os.listdir', (['share_dir'], {}), '(share_dir)\n', (5266, 5277), False, 'import os\n'), ((6516, 6555), 'os.path.join', 'os.path.join', (['share_dir', '"""jupyterhooks"""'], {}), "(share_dir, 'jupyterhooks')\n", (6528, 6555), False, 'import os\n'), ((6838, 6876), 'gtmcore.labbook.lock.reset_all_locks', 'reset_all_locks', (["config.config['lock']"], {}), "(config.config['lock'])\n", (6853, 6876), False, 'from gtmcore.labbook.lock import reset_all_locks\n'), ((7050, 7079), 'os.path.isdir', 'os.path.isdir', (['local_data_dir'], {}), '(local_data_dir)\n', (7063, 7079), False, 'import os\n'), ((7098, 7140), 'os.makedirs', 'os.makedirs', (['local_data_dir'], {'exist_ok': '(True)'}), '(local_data_dir, exist_ok=True)\n', (7109, 7140), False, 'import os\n'), ((7427, 7457), 'os.path.isdir', 'os.path.isdir', (['certificate_dir'], {}), '(certificate_dir)\n', (7440, 7457), False, 'import os\n'), ((7476, 7519), 'os.makedirs', 'os.makedirs', (['certificate_dir'], {'exist_ok': '(True)'}), '(certificate_dir, exist_ok=True)\n', (7487, 7519), False, 'import os\n'), ((7761, 7783), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (7774, 7783), False, 'import shutil\n'), ((8095, 8105), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (8100, 8105), False, 'from time import sleep\n'), ((5303, 5332), 'os.path.join', 'os.path.join', (['share_dir', 'item'], {}), '(share_dir, item)\n', (5315, 5332), False, 'import os\n'), ((5348, 5373), 'os.path.isfile', 'os.path.isfile', (['item_path'], {}), '(item_path)\n', (5362, 5373), False, 'import os\n'), ((6571, 6625), 'os.path.join', 'os.path.join', (['share_dir', '"""jupyterhooks"""', '"""__init__.py"""'], {}), "(share_dir, 'jupyterhooks', '__init__.py')\n", (6583, 6625), False, 'import os\n'), ((1620, 1630), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1625, 1630), False, 'from time import sleep\n'), ((3331, 3371), 'lmsrvcore.utilities.migrate.migrate_work_dir_structure_v2', 'migrate_work_dir_structure_v2', (['server_id'], {}), '(server_id)\n', (3360, 3371), False, 'from lmsrvcore.utilities.migrate import migrate_work_dir_structure_v2\n'), ((5391, 5411), 'os.unlink', 'os.unlink', (['item_path'], {}), '(item_path)\n', (5400, 5411), False, 'import os\n'), ((5446, 5470), 'shutil.rmtree', 'shutil.rmtree', (['item_path'], {}), '(item_path)\n', (5459, 5470), False, 'import shutil\n')] |
import click
import logging
import sys
from typing import Tuple
from kubails.commands import helpers
from kubails.services.config_store import ConfigStore
from kubails.services.service import Service
from kubails.resources.templates import SERVICE_TEMPLATES
from kubails.utils.command_helpers import log_command_args_factory
logger = logging.getLogger(__name__)
log_command_args = log_command_args_factory(logger, "Service '{}' args")
config_store = None
service_service = None
@click.group()
def service():
"""Manage the services for your project."""
global config_store
global service_service
config_store = ConfigStore()
service_service = Service()
@service.command()
@click.argument("service", nargs=-1)
@log_command_args
def start(service: Tuple[str]) -> None:
"""
Start up SERVICE locally.
If SERVICE is not specified, start all services.
"""
service_service.start(list(service))
@service.command()
@log_command_args
def destroy() -> None:
"""Teardown your local services."""
service_service.destroy()
@service.command()
@click.argument("service", nargs=-1)
@click.option("--tag")
@log_command_args
def lint(service: Tuple[str], tag: str) -> None:
"""
Lint SERVICE.
If SERVICE is not specified, lint all services.
"""
if not service_service.lint(list(service), tag):
sys.exit(1)
@service.command()
@click.argument("service", nargs=-1)
@click.option("--tag")
@log_command_args
def test(service: Tuple[str], tag) -> None:
"""
Test SERVICE.
If SERVICE is not specified, test all services.
"""
if not service_service.test(list(service), tag):
sys.exit(1)
@service.command()
@click.argument("service", nargs=-1)
@click.option("--tag")
@log_command_args
def ci(service: Tuple[str], tag: str) -> None:
"""
Run CI on SERVICE.
If SERVICE is not specified, run CI on all services.
"""
if not service_service.ci(list(service), tag):
sys.exit(1)
@service.command()
@click.argument("command", required=True)
@log_command_args
def make(command: str) -> None:
"""Execute a Make COMMAND on all your services."""
if not service_service.make(command):
sys.exit(1)
@service.command()
@click.option(
"--type", "service_type",
prompt=helpers.SERVICE_GENERATION_PROMPTS["without_index"]["service_type"],
type=click.Choice(SERVICE_TEMPLATES),
default=SERVICE_TEMPLATES[0],
help="The template to base the service off of."
)
@click.option(
"--subdomain",
prompt=helpers.SERVICE_GENERATION_PROMPTS["without_index"]["subdomain"],
default="",
help="The subdomain the service will have when deployed."
)
@click.option(
"--title",
prompt=helpers.SERVICE_GENERATION_PROMPTS["without_index"]["title"],
help="The title of the service."
)
@log_command_args
def generate(service_type: str, subdomain: str, title: str) -> None:
"""Generate a new service."""
helpers.generate_service(
service_service,
service_type=service_type,
subdomain=subdomain,
title=title,
)
@service.command()
@click.argument("service")
@click.option("--current-branch", default="master")
@log_command_args
def has_changed(current_branch: str, service: str) -> None:
"""Returns whether or not the given service has changed since the last build."""
if not config_store.is_changed_service(service, current_branch):
sys.exit(1)
############################################################
# Images sub-group
############################################################
@service.group()
def images():
"""Build and push Docker images for your services."""
pass
@images.command()
@click.argument("service", nargs=-1)
@click.option("--branch", help="The branch to tag the image with.")
@click.option("--commit", help="The commit to tag the image with.")
@log_command_args
def build(service: Tuple[str], branch: str, commit: str) -> None:
"""
Build the Docker image for SERVICE.
If SERVICE is not specified, build all services' Docker images.
"""
if not service_service.build(list(service), branch, commit):
sys.exit(1)
@images.command()
@click.argument("service", nargs=-1)
@click.option("--branch", help="The branch the image was tagged with.")
@click.option("--commit", help="The commit the image was tagged with.")
@log_command_args
def push(service: Tuple[str], branch: str, commit: str) -> None:
"""
Push the Docker image for SERVICE.
If SERVICE is not specified, push all services' Docker images.
"""
if not service_service.push(list(service), branch, commit):
sys.exit(1)
| [
"logging.getLogger",
"click.Choice",
"click.argument",
"click.group",
"click.option",
"kubails.services.service.Service",
"sys.exit",
"kubails.utils.command_helpers.log_command_args_factory",
"kubails.commands.helpers.generate_service",
"kubails.services.config_store.ConfigStore"
] | [((336, 363), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (353, 363), False, 'import logging\n'), ((383, 436), 'kubails.utils.command_helpers.log_command_args_factory', 'log_command_args_factory', (['logger', '"""Service \'{}\' args"""'], {}), '(logger, "Service \'{}\' args")\n', (407, 436), False, 'from kubails.utils.command_helpers import log_command_args_factory\n'), ((484, 497), 'click.group', 'click.group', ([], {}), '()\n', (495, 497), False, 'import click\n'), ((700, 735), 'click.argument', 'click.argument', (['"""service"""'], {'nargs': '(-1)'}), "('service', nargs=-1)\n", (714, 735), False, 'import click\n'), ((1089, 1124), 'click.argument', 'click.argument', (['"""service"""'], {'nargs': '(-1)'}), "('service', nargs=-1)\n", (1103, 1124), False, 'import click\n'), ((1126, 1147), 'click.option', 'click.option', (['"""--tag"""'], {}), "('--tag')\n", (1138, 1147), False, 'import click\n'), ((1397, 1432), 'click.argument', 'click.argument', (['"""service"""'], {'nargs': '(-1)'}), "('service', nargs=-1)\n", (1411, 1432), False, 'import click\n'), ((1434, 1455), 'click.option', 'click.option', (['"""--tag"""'], {}), "('--tag')\n", (1446, 1455), False, 'import click\n'), ((1700, 1735), 'click.argument', 'click.argument', (['"""service"""'], {'nargs': '(-1)'}), "('service', nargs=-1)\n", (1714, 1735), False, 'import click\n'), ((1737, 1758), 'click.option', 'click.option', (['"""--tag"""'], {}), "('--tag')\n", (1749, 1758), False, 'import click\n'), ((2014, 2054), 'click.argument', 'click.argument', (['"""command"""'], {'required': '(True)'}), "('command', required=True)\n", (2028, 2054), False, 'import click\n'), ((2499, 2680), 'click.option', 'click.option', (['"""--subdomain"""'], {'prompt': "helpers.SERVICE_GENERATION_PROMPTS['without_index']['subdomain']", 'default': '""""""', 'help': '"""The subdomain the service will have when deployed."""'}), "('--subdomain', prompt=helpers.SERVICE_GENERATION_PROMPTS[\n 'without_index']['subdomain'], default='', help=\n 'The subdomain the service will have when deployed.')\n", (2511, 2680), False, 'import click\n'), ((2690, 2821), 'click.option', 'click.option', (['"""--title"""'], {'prompt': "helpers.SERVICE_GENERATION_PROMPTS['without_index']['title']", 'help': '"""The title of the service."""'}), "('--title', prompt=helpers.SERVICE_GENERATION_PROMPTS[\n 'without_index']['title'], help='The title of the service.')\n", (2702, 2821), False, 'import click\n'), ((3120, 3145), 'click.argument', 'click.argument', (['"""service"""'], {}), "('service')\n", (3134, 3145), False, 'import click\n'), ((3147, 3197), 'click.option', 'click.option', (['"""--current-branch"""'], {'default': '"""master"""'}), "('--current-branch', default='master')\n", (3159, 3197), False, 'import click\n'), ((3714, 3749), 'click.argument', 'click.argument', (['"""service"""'], {'nargs': '(-1)'}), "('service', nargs=-1)\n", (3728, 3749), False, 'import click\n'), ((3751, 3817), 'click.option', 'click.option', (['"""--branch"""'], {'help': '"""The branch to tag the image with."""'}), "('--branch', help='The branch to tag the image with.')\n", (3763, 3817), False, 'import click\n'), ((3819, 3885), 'click.option', 'click.option', (['"""--commit"""'], {'help': '"""The commit to tag the image with."""'}), "('--commit', help='The commit to tag the image with.')\n", (3831, 3885), False, 'import click\n'), ((4201, 4236), 'click.argument', 'click.argument', (['"""service"""'], {'nargs': '(-1)'}), "('service', nargs=-1)\n", (4215, 4236), False, 'import click\n'), ((4238, 4308), 'click.option', 'click.option', (['"""--branch"""'], {'help': '"""The branch the image was tagged with."""'}), "('--branch', help='The branch the image was tagged with.')\n", (4250, 4308), False, 'import click\n'), ((4310, 4380), 'click.option', 'click.option', (['"""--commit"""'], {'help': '"""The commit the image was tagged with."""'}), "('--commit', help='The commit the image was tagged with.')\n", (4322, 4380), False, 'import click\n'), ((632, 645), 'kubails.services.config_store.ConfigStore', 'ConfigStore', ([], {}), '()\n', (643, 645), False, 'from kubails.services.config_store import ConfigStore\n'), ((668, 677), 'kubails.services.service.Service', 'Service', ([], {}), '()\n', (675, 677), False, 'from kubails.services.service import Service\n'), ((2956, 3062), 'kubails.commands.helpers.generate_service', 'helpers.generate_service', (['service_service'], {'service_type': 'service_type', 'subdomain': 'subdomain', 'title': 'title'}), '(service_service, service_type=service_type,\n subdomain=subdomain, title=title)\n', (2980, 3062), False, 'from kubails.commands import helpers\n'), ((1363, 1374), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1371, 1374), False, 'import sys\n'), ((1666, 1677), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1674, 1677), False, 'import sys\n'), ((1980, 1991), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1988, 1991), False, 'import sys\n'), ((2210, 2221), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2218, 2221), False, 'import sys\n'), ((2377, 2408), 'click.Choice', 'click.Choice', (['SERVICE_TEMPLATES'], {}), '(SERVICE_TEMPLATES)\n', (2389, 2408), False, 'import click\n'), ((3438, 3449), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3446, 3449), False, 'import sys\n'), ((4168, 4179), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4176, 4179), False, 'import sys\n'), ((4659, 4670), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4667, 4670), False, 'import sys\n')] |
from math import exp,sqrt
from random import randrange
class neurone:
def __init__(self,a,b):
self.a=a
self.b=b
def proceed(self,z):
t = z[0]*self.a + z[1]*self.b
return 1/(1+exp(-t))
n = 100
X_app = [(randrange(-500,501)/1000,randrange(-500,501)/1000) for i in range(n)]
Y_app = [1 if ((x[0]-0.3)+(x[1]-0.3))<0.2 else 0 for x in X_app]
a=1
Y_pred,Y_score = [None for i in range(1001)], [None for i in range(1001)]
for i in range(1001):
b=i/1000*4-1
ne = neurone(a,b)
Y_pred[i] = [ne.proceed(z) for z in X_app]
Y_score[i] = sum([abs(Y_pred[i][j]-Y_app[j]) for j in range(n)])
opt = min(Y_score)
print(Y_score) | [
"math.exp",
"random.randrange"
] | [((244, 264), 'random.randrange', 'randrange', (['(-500)', '(501)'], {}), '(-500, 501)\n', (253, 264), False, 'from random import randrange\n'), ((269, 289), 'random.randrange', 'randrange', (['(-500)', '(501)'], {}), '(-500, 501)\n', (278, 289), False, 'from random import randrange\n'), ((216, 223), 'math.exp', 'exp', (['(-t)'], {}), '(-t)\n', (219, 223), False, 'from math import exp, sqrt\n')] |
import json
import datetime
import mimetypes
from urllib.parse import urlparse
from arcgis import env
from arcgis.gis import GIS
from arcgis.gis import Item
from ._ref import reference
class JournalStoryMap(object):
"""
Represents a Journal Story Map
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
item Optional Item. The storymap item.
--------------- --------------------------------------------------------------------
gis Optional GIS. The connection to the Enterprise.
=============== ====================================================================
"""
_properties = None
_gis = None
_itemid = None
_item = None
def __init__(self, item=None, gis=None):
"""initializer"""
if gis is None:
self._gis = env.active_gis
else:
self._gis = gis
if item and isinstance(item, str):
self._item = gis.content.get(item)
self._itemid = self._item.itemid
self._properties = self._item.get_data()
elif item and isinstance(item, Item) and \
'MapJournal' in item.typeKeywords:
self._item = item
self._itemid = self._item.itemid
self._properties = self._item.get_data()
elif item and isinstance(item, Item) and \
'MapJournal' not in item.typeKeywords:
raise ValueError("Item is not a Journal Story Map")
else:
self._properties = reference['journal']
#----------------------------------------------------------------------
def __str__(self):
return json.dumps(self._properties)
#----------------------------------------------------------------------
def __repr__(self):
return self.__str__()
#----------------------------------------------------------------------
def _refresh(self):
if self._item:
self._properties = json.loads(self._item.get_data())
#----------------------------------------------------------------------
@property
def properties(self):
"""returns the storymap's JSON"""
return self._properties
#----------------------------------------------------------------------
def add(self, title,
url_or_item, content=None,
actions=None, visible=True,
alt_text="", display='stretch',
**kwargs):
"""
Adds a new section to the StoryMap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url_or_item Required string/Item. The web address to the resource or a Web Map
item.
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
**WebMap Options**
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
show_legend Optional boolean. If True, the legend will be visible.
-------------------- --------------------------------------------------------------------
show_default_legend Optional boolean. Shows the legend on default.
-------------------- --------------------------------------------------------------------
extent Optional dict/Envelope. The extent of the webmap.
-------------------- --------------------------------------------------------------------
layer_visibility Optional list. The visibility of the layers in a webmap. This is a
list of dictionaries where the syntax is as follows:
Syntax:
[
{
"id" : "<id>",
"visibility" : "<true/false>"
}
]
Example:
[
{
"id" : "csv_6005_0",
"visibility" : False,
},
{
"id" : "csv_6006_0",
"visibility" : True,
}
]
-------------------- --------------------------------------------------------------------
popup Optional dict. The popup definition for the webmap.
==================== ====================================================================
:return: Boolean
"""
if isinstance(url_or_item, Item):
show_legend = kwargs.pop("show_legend", False)
show_default_legend = kwargs.pop("show_default_legend", False)
extent = kwargs.pop("extent", None)
layer_visibility = kwargs.pop("layer_visibility", None)
popup = kwargs.pop("popup", None)
if layer_visibility:
layer_visibility = json.dumps(layer_visibility)
return self._add_webmap(item=url_or_item, title=title, content=content,
actions=actions, visible=visible, alt_text=alt_text,
display=display,
show_legend=show_legend,
show_default_legend=show_default_legend,
extent=extent,
layer_visibility=layer_visibility,
popup=popup)
elif isinstance(url_or_item, str):
mt = mimetypes.guess_type(url=url_or_item)
if mt[0].lower().find('video') > -1:
return self._add_video(url=url_or_item,
title=title,
content=content,
actions=actions,
visible=visible,
alt_text=alt_text,
display=display)
elif mt[0].lower().find('image') > -1:
return self._add_image(title=title, image=url_or_item,
content=content, actions=actions, visible=visible,
alt_text=alt_text, display=display)
else:
return self._add_webpage(title=title, url=url_or_item,
content=content, actions=actions, visible=visible,
alt_text=alt_text, display=display)
return False
#----------------------------------------------------------------------
def _add_webpage(self,
title,
url,
content=None,
actions=None,
visible=True,
alt_text="",
display='stretch'):
"""
Adds a webpage to the storymap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the webpage
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
self._properties['values']['story']['sections'].append(
{
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "webpage",
"webpage": {
"url": url,
"type": "webpage",
"altText": alt_text,
"display": display,
"unload": True,
"hash": "5"
}
}
}
)
return True
#----------------------------------------------------------------------
def _add_video(self,
url,
title,
content,
actions=None,
visible=True,
alt_text="",
display='stretch'
):
"""
Adds a video section to the StoryMap.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
video = {
"title": title,
"content": content,
"contentActions": actions,
"creaDate": 1523450612336,
"pubDate": 1523450580000,
"status": visible,
"media": {
"type": "video",
"video": {
"url": url,
"type": "video",
"altText": alt_text,
"display": display
}
}
}
self._properties['values']['story']['sections'].append(video)
return True
#----------------------------------------------------------------------
def _add_webmap(self,
item,
title,
content,
actions=None,
visible=True,
alt_text="",
display='stretch',
show_legend=False,
show_default_legend=False,
extent=None,
layer_visibility=None,
popup=None
):
"""
Adds a WebMap to the Section.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
item Required string/Item. The webmap Item Id or Item of a webmap.
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if isinstance(item, Item):
item = item.itemid
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
wm = {
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "webmap",
"webmap": {
"id": item,
"extent": extent,
"layers": layer_visibility,
"popup": popup,
"overview": {
"enable": False,
"openByDefault": True
},
"legend": {
"enable": show_legend,
"openByDefault": show_default_legend
},
"geocoder": {
"enable": False
},
"altText": alt_text
}
}
}
self._properties['values']['story']['sections'].append(wm)
return True
#----------------------------------------------------------------------
def _add_image(self,
title,
image,
content=None,
actions=None,
visible=True,
alt_text=None,
display='fill'):
"""
Adds a new image section to the storymap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
self._properties['values']['story']['sections'].append(
{
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "image",
"image": {
"url": image,
"type": "image",
"altText": alt_text,
"display": display
}
}
}
)
return True
#----------------------------------------------------------------------
def remove(self, index):
"""
Removes a section by index.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
index Required integer. The position of the section to remove.
=============== ====================================================================
:return: Boolean
"""
try:
item = self._properties['values']['story']['sections'][index]
self._properties['values']['story']['sections'].remove(item)
return True
except:
return False
#----------------------------------------------------------------------
def save(self, title=None, tags=None, description=None):
"""
Saves an Journal StoryMap to the GIS
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Optional string. The title of the StoryMap.
--------------- --------------------------------------------------------------------
tags Optional string. The tags of the StoryMap.
--------------- --------------------------------------------------------------------
description Optional string. The description of the StoryMap
=============== ====================================================================
:return: Boolean
"""
import uuid
if self._item:
p = {
'text' : json.dumps(self._properties)
}
if title:
p['title'] = title
if tags:
p['tags'] = tags
return self._item.update(item_properties=p)
else:
if title is None:
title = "Map Journal, %s" % uuid.uuid4().hex[:10]
if tags is None:
tags = "Story Map,Map Journal"
typeKeywords = ",".join(['JavaScript', 'layout-side', 'Map', 'MapJournal',
'Mapping Site', 'Online Map', 'Ready To Use',
'selfConfigured', 'Story Map', 'Story Maps',
'Web Map'])
item = self._gis.content.add(item_properties={
'title' : title,
'tags' : tags,
'text' : json.dumps(self._properties),
'typeKeywords' : typeKeywords,
'itemType' : 'text',
'type' : "Web Mapping Application",
})
parse = urlparse(self._gis._con.baseurl)
isinstance(self._gis, GIS)
if self._gis._portal.is_arcgisonline:
url = "%s://%s/apps/MapJournal/index.html?appid=%s" % (parse.scheme, parse.netloc, item.itemid)
else:
import os
wa = os.path.dirname(parse.path[1:])
url = "%s://%s/%s/sharing/rest/apps/MapJournal/index.html?appid=%s" % (parse.scheme, parse.netloc, wa, item.itemid)
return item.update(item_properties={
'url' : url
})
return False
#----------------------------------------------------------------------
def delete(self):
"""Deletes the saved item on ArcGIS Online/Portal"""
if self._item:
return self._item.delete()
return False
#----------------------------------------------------------------------
@property
def panel(self):
"""
Gets/Sets the panel state for the Journal Story Map
"""
return self._properties["values"]["settings"]["layout"]["id"]
#----------------------------------------------------------------------
@panel.setter
def panel(self, value):
"""
Gets/Sets the panel state for the Journal Story Map
"""
if value.lower() == "float":
self._properties["values"]["settings"]["layout"]["id"] = "float"
else:
self._properties["values"]["settings"]["layout"]["id"] = "side"
#----------------------------------------------------------------------
@property
def header(self):
"""gets/sets the headers for the Journal StoryMap"""
default = {
"social": {
"bitly": True,
"twitter": True,
"facebook": True
},
"logoURL": None,
"linkURL": "https://storymaps.arcgis.com",
"logoTarget": "",
"linkText": "A Story Map"
}
if 'header' in self._properties['values']['settings']:
return self._properties['values']['settings']['header']
else:
self._properties['values']['settings']['header'] = default
return default
#----------------------------------------------------------------------
@header.setter
def header(self, value):
""""""
if value is None:
default = {
"social": {
"bitly": True,
"twitter": True,
"facebook": True
},
"logoURL": None,
"linkURL": "https://storymaps.arcgis.com",
"logoTarget": "",
"linkText": "A Story Map"
}
self._properties['values']['settings']['header'] = default
else:
self._properties['values']['settings']['header'] = value
#----------------------------------------------------------------------
@property
def theme(self):
""""""
default = {
"colors": {
"text": "#FFFFFF",
"name": "float-default-1",
"softText": "#FFF",
"media": "#a0a0a0",
"themeMajor": "black",
"panel": "#000000",
"textLink": "#DDD",
"esriLogo": "white",
"dotNav": "#000000",
"softBtn": "#AAA"
},
"fonts": {
"sectionTitle": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
},
"sectionContent": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
}
}
}
if 'theme' in self._properties['values']['settings']:
return self._properties['values']['settings']['theme']
else:
self._properties['values']['settings']['theme'] = default
return self._properties['values']['settings']['theme']
return default
#----------------------------------------------------------------------
@theme.setter
def theme(self, value):
""""""
default = {
"colors": {
"text": "#FFFFFF",
"name": "float-default-1",
"softText": "#FFF",
"media": "#a0a0a0",
"themeMajor": "black",
"panel": "#000000",
"textLink": "#DDD",
"esriLogo": "white",
"dotNav": "#000000",
"softBtn": "#AAA"
},
"fonts": {
"sectionTitle": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
},
"sectionContent": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
}
}
}
if 'theme' in self._properties['values']['settings']:
self._properties['values']['settings']['theme'] = value
elif not 'theme' in self._properties['values']['settings']:
self._properties['values']['settings']['theme'] = value
elif value is None:
self._properties['values']['settings']['theme'] = default
| [
"urllib.parse.urlparse",
"json.dumps",
"uuid.uuid4",
"os.path.dirname",
"datetime.datetime.now",
"mimetypes.guess_type"
] | [((1826, 1854), 'json.dumps', 'json.dumps', (['self._properties'], {}), '(self._properties)\n', (1836, 1854), False, 'import json\n'), ((23626, 23658), 'urllib.parse.urlparse', 'urlparse', (['self._gis._con.baseurl'], {}), '(self._gis._con.baseurl)\n', (23634, 23658), False, 'from urllib.parse import urlparse\n'), ((6828, 6856), 'json.dumps', 'json.dumps', (['layer_visibility'], {}), '(layer_visibility)\n', (6838, 6856), False, 'import json\n'), ((7452, 7489), 'mimetypes.guess_type', 'mimetypes.guess_type', ([], {'url': 'url_or_item'}), '(url=url_or_item)\n', (7472, 7489), False, 'import mimetypes\n'), ((22576, 22604), 'json.dumps', 'json.dumps', (['self._properties'], {}), '(self._properties)\n', (22586, 22604), False, 'import json\n'), ((23925, 23956), 'os.path.dirname', 'os.path.dirname', (['parse.path[1:]'], {}), '(parse.path[1:])\n', (23940, 23956), False, 'import os\n'), ((23425, 23453), 'json.dumps', 'json.dumps', (['self._properties'], {}), '(self._properties)\n', (23435, 23453), False, 'import json\n'), ((16849, 16872), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16870, 16872), False, 'import datetime\n'), ((16921, 16944), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16942, 16944), False, 'import datetime\n'), ((22878, 22890), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (22888, 22890), False, 'import uuid\n'), ((10837, 10860), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10858, 10860), False, 'import datetime\n'), ((10913, 10936), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10934, 10936), False, 'import datetime\n'), ((20159, 20182), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20180, 20182), False, 'import datetime\n'), ((20235, 20258), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20256, 20258), False, 'import datetime\n')] |
import sys
import os
sys.path.append(os.getcwd())
import argparse
import numpy as np
import pmdarima
import torch
import torch.nn.functional as F
from torch import nn
from fusions.common_fusions import Stack
from unimodals.common_models import LSTMWithLinear
from datasets.stocks.get_data import get_dataloader
parser = argparse.ArgumentParser()
parser.add_argument('--input-stocks', metavar='input', help='input stocks')
parser.add_argument('--target-stock', metavar='target', help='target stock')
args = parser.parse_args()
print('Input: ' + args.input_stocks)
print('Target: ' + args.target_stock)
stocks = sorted(args.input_stocks.split(' '))
train_loader, val_loader, test_loader = get_dataloader(stocks, stocks, [args.target_stock], modality_first=True)
def baselines():
def best_constant(y_prev, y):
return float(nn.MSELoss()(torch.ones_like(y) * torch.mean(y), y))
def copy_last(y_prev, y):
return nn.MSELoss()(torch.cat([y_prev[-1:], y[:-1]]), y).item()
def arima(y_prev, y):
arr = y_prev.cpu()
arima = pmdarima.arima.auto_arima(arr)
pred = arima.predict(len(y))
return nn.MSELoss()(torch.tensor(pred, device='cuda').reshape(y.shape), y)
print('Best constant val MSE loss: ' + str(best_constant(train_loader.dataset.Y, val_loader.dataset.Y)))
print('Best constant test MSE loss: ' + str(best_constant(val_loader.dataset.Y, test_loader.dataset.Y)))
print('Copy-last val MSE loss: ' + str(copy_last(train_loader.dataset.Y, val_loader.dataset.Y)))
print('Copy-last test MSE loss: ' + str(copy_last(val_loader.dataset.Y, test_loader.dataset.Y)))
print('ARIMA val MSE loss: ' + str(arima(train_loader.dataset.Y, val_loader.dataset.Y)))
print('ARIMA test MSE loss: ' + str(arima(torch.cat([train_loader.dataset.Y, val_loader.dataset.Y]), test_loader.dataset.Y)))
baselines()
| [
"torch.ones_like",
"argparse.ArgumentParser",
"datasets.stocks.get_data.get_dataloader",
"torch.mean",
"os.getcwd",
"torch.nn.MSELoss",
"torch.tensor",
"torch.cat",
"pmdarima.arima.auto_arima"
] | [((323, 348), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (346, 348), False, 'import argparse\n'), ((692, 764), 'datasets.stocks.get_data.get_dataloader', 'get_dataloader', (['stocks', 'stocks', '[args.target_stock]'], {'modality_first': '(True)'}), '(stocks, stocks, [args.target_stock], modality_first=True)\n', (706, 764), False, 'from datasets.stocks.get_data import get_dataloader\n'), ((37, 48), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46, 48), False, 'import os\n'), ((1062, 1092), 'pmdarima.arima.auto_arima', 'pmdarima.arima.auto_arima', (['arr'], {}), '(arr)\n', (1087, 1092), False, 'import pmdarima\n'), ((1145, 1157), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1155, 1157), False, 'from torch import nn\n'), ((838, 850), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (848, 850), False, 'from torch import nn\n'), ((851, 869), 'torch.ones_like', 'torch.ones_like', (['y'], {}), '(y)\n', (866, 869), False, 'import torch\n'), ((872, 885), 'torch.mean', 'torch.mean', (['y'], {}), '(y)\n', (882, 885), False, 'import torch\n'), ((936, 948), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (946, 948), False, 'from torch import nn\n'), ((949, 981), 'torch.cat', 'torch.cat', (['[y_prev[-1:], y[:-1]]'], {}), '([y_prev[-1:], y[:-1]])\n', (958, 981), False, 'import torch\n'), ((1158, 1191), 'torch.tensor', 'torch.tensor', (['pred'], {'device': '"""cuda"""'}), "(pred, device='cuda')\n", (1170, 1191), False, 'import torch\n'), ((1773, 1830), 'torch.cat', 'torch.cat', (['[train_loader.dataset.Y, val_loader.dataset.Y]'], {}), '([train_loader.dataset.Y, val_loader.dataset.Y])\n', (1782, 1830), False, 'import torch\n')] |
import os
from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeQuestionData
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(CommitteeQuestionData,)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/", base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result["question"] for result in results]
self.assertIn(
self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions,
)
self.assertIn(
self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions,
)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000",
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result["question"] for result in results]
self.assertNotIn(
self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions,
)
self.assertIn(
self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions,
)
| [
"tests.fixtures.dbfixture.data"
] | [((229, 266), 'tests.fixtures.dbfixture.data', 'dbfixture.data', (['CommitteeQuestionData'], {}), '(CommitteeQuestionData)\n', (243, 266), False, 'from tests.fixtures import dbfixture, CommitteeQuestionData\n')] |
from datetime import datetime
# Rest framework
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import RetrieveModelMixin, ListModelMixin, UpdateModelMixin, CreateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework.permissions import IsAuthenticated
# Serializers
from csv_analyzer.apps.dataset.serializers import (
DataSetModelSerializer,
CreateDataSetModelSerializer,
FileDataSetModelSerializer,
)
# Models
from csv_analyzer.apps.dataset.models import DataSet
# Permissions
from csv_analyzer.apps.dataset.permissions.dataset import IsDataSetOwner
# Celery
from csv_analyzer.apps.dataset.tasks import populate_dataset_file
# MongoDB utils
from csv_analyzer.apps.mongodb.utils import MongoDBConnection
class DataSetViewSet(CreateModelMixin, ListModelMixin, RetrieveModelMixin, UpdateModelMixin, GenericViewSet):
permission_classes = (IsAuthenticated, IsDataSetOwner)
def get_queryset(self, *args, **kwargs):
# Using prefetch related to improve query performance.
return DataSet.objects.filter(owner=self.request.user).prefetch_related('files')
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
data['weather_date'] = self._get_data_set_weather_data(
from_date=request.GET.get('from_date'),
to_date=request.GET.get('to_date'),
data_set_id=str(instance.id)
)
return Response(data)
@staticmethod
def _get_data_set_weather_data(from_date, to_date, data_set_id):
"""
Get a data set's weather data.
:param from_date: String or None. Data Set from date filter. e.g. 2011-09-01
:param to_date: String or None. Data Set to date filter. e.g. 2011-09-21
:param data_set_id: String, Data Set Id.
:return: Dict with count of results and the data.
"""
mongo_client = MongoDBConnection()
mongo_query = {
'data_set_id': data_set_id,
}
if from_date or to_date:
mongo_query['date'] = {}
if from_date:
from_date = datetime.strptime(from_date, '%Y-%m-%d')
from_date = datetime.combine(from_date.date(), datetime.min.time())
mongo_query['date']['$gte'] = from_date
if to_date:
to_date = datetime.strptime(to_date, '%Y-%m-%d')
to_date = datetime.combine(to_date.date(), datetime.max.time())
mongo_query['date']['$lt'] = to_date
files_data = mongo_client.get_list(query=mongo_query)
return {
'count': len(files_data),
'data': files_data,
}
def get_serializer_class(self):
"""Return serializer based on action."""
if self.action == 'create':
return CreateDataSetModelSerializer
elif self.action == 'add_file':
return FileDataSetModelSerializer
return DataSetModelSerializer
def create(self, request, *args, **kwargs):
data = request.data.copy()
data.update({
'owner': request.user.id,
'is_analyzed': False,
})
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=True, methods=["POST"], url_path='add-file')
def add_file(self, request, *args, **kwargs):
dataset = self.get_object()
serializer_class = self.get_serializer_class()
try:
data = request.data.copy()
except Exception:
data = request.data
data.update({
'data_set': dataset.id,
'is_analyzed': False,
})
serializer = serializer_class(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
populate_dataset_file.delay(dataset_file_id=serializer.instance.id)
dataset = self.get_object()
data = DataSetModelSerializer(dataset).data
return Response(data=data, status=status.HTTP_201_CREATED)
| [
"datetime.datetime.min.time",
"datetime.datetime.strptime",
"csv_analyzer.apps.mongodb.utils.MongoDBConnection",
"csv_analyzer.apps.dataset.tasks.populate_dataset_file.delay",
"rest_framework.response.Response",
"csv_analyzer.apps.dataset.serializers.DataSetModelSerializer",
"csv_analyzer.apps.dataset.m... | [((3654, 3712), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']", 'url_path': '"""add-file"""'}), "(detail=True, methods=['POST'], url_path='add-file')\n", (3660, 3712), False, 'from rest_framework.decorators import action\n'), ((1623, 1637), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (1631, 1637), False, 'from rest_framework.response import Response\n'), ((2085, 2104), 'csv_analyzer.apps.mongodb.utils.MongoDBConnection', 'MongoDBConnection', ([], {}), '()\n', (2102, 2104), False, 'from csv_analyzer.apps.mongodb.utils import MongoDBConnection\n'), ((3573, 3647), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED', 'headers': 'headers'}), '(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n', (3581, 3647), False, 'from rest_framework.response import Response\n'), ((4204, 4271), 'csv_analyzer.apps.dataset.tasks.populate_dataset_file.delay', 'populate_dataset_file.delay', ([], {'dataset_file_id': 'serializer.instance.id'}), '(dataset_file_id=serializer.instance.id)\n', (4231, 4271), False, 'from csv_analyzer.apps.dataset.tasks import populate_dataset_file\n'), ((4377, 4428), 'rest_framework.response.Response', 'Response', ([], {'data': 'data', 'status': 'status.HTTP_201_CREATED'}), '(data=data, status=status.HTTP_201_CREATED)\n', (4385, 4428), False, 'from rest_framework.response import Response\n'), ((4325, 4356), 'csv_analyzer.apps.dataset.serializers.DataSetModelSerializer', 'DataSetModelSerializer', (['dataset'], {}), '(dataset)\n', (4347, 4356), False, 'from csv_analyzer.apps.dataset.serializers import DataSetModelSerializer, CreateDataSetModelSerializer, FileDataSetModelSerializer\n'), ((1147, 1194), 'csv_analyzer.apps.dataset.models.DataSet.objects.filter', 'DataSet.objects.filter', ([], {'owner': 'self.request.user'}), '(owner=self.request.user)\n', (1169, 1194), False, 'from csv_analyzer.apps.dataset.models import DataSet\n'), ((2306, 2346), 'datetime.datetime.strptime', 'datetime.strptime', (['from_date', '"""%Y-%m-%d"""'], {}), "(from_date, '%Y-%m-%d')\n", (2323, 2346), False, 'from datetime import datetime\n'), ((2538, 2576), 'datetime.datetime.strptime', 'datetime.strptime', (['to_date', '"""%Y-%m-%d"""'], {}), "(to_date, '%Y-%m-%d')\n", (2555, 2576), False, 'from datetime import datetime\n'), ((2410, 2429), 'datetime.datetime.min.time', 'datetime.min.time', ([], {}), '()\n', (2427, 2429), False, 'from datetime import datetime\n'), ((2636, 2655), 'datetime.datetime.max.time', 'datetime.max.time', ([], {}), '()\n', (2653, 2655), False, 'from datetime import datetime\n')] |
import csv
import os
import shutil
from datetime import datetime
from grid import *
#from cluster import *
from regions import *
start_time = datetime.now()
print("Allocating...")
#grid2
#gridSystem = GridSystem(-74.04, -73.775, 5, 40.63, 40.835, 5)
#gridname = "grid2"
#grid3
#gridSystem = GridSystem(-74.02, -73.938, 4, 40.7, 40.815, 6)
#gridname = "grid3"
#cluster1
#gridSystem = ClusterSystem("cluster1/clusters.csv")
#gridname = "cluster1"
gridSystem = RegionSystem("4year_features")
gridname = "region1"
invalids = 0
for y in ["FOIL2010", "FOIL2011", "FOIL2012", "FOIL2013"]:
for n in range(1,13):
filename = "../../new_chron/" + y + "/trip_data_" + str(n) + ".csv"
print("Reading file " + filename)
r = csv.reader(open(filename, "r"))
i = 0
header = True
for line in r:
if(header):
Trip.initHeader(line)
header = False
else:
trip = None
try:
trip = Trip(line)
except ValueError:
invalids += 1
if(trip!= None and (y!="FOIL" + str(trip.date.year) or n!= trip.date.month)):
trip.has_other_error = True
gridSystem.record(trip)
i += 1
if(i%1000000==0):
print("Read " + str(i) + " rows")
gridSystem.close()
end_time = datetime.now()
program_duration = end_time - start_time
print("Processing took " + str(program_duration))
| [
"datetime.datetime.now"
] | [((144, 158), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (156, 158), False, 'from datetime import datetime\n'), ((1233, 1247), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1245, 1247), False, 'from datetime import datetime\n')] |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.stateful."""
from absl.testing import absltest
from haiku._src import base
from haiku._src import module
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class StatefulTest(absltest.TestCase):
@test_utils.transform_and_run
def test_grad(self):
x = jnp.array(3.)
g = stateful.grad(SquareModule())(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_value_and_grad(self):
x = jnp.array(2.)
y, g = stateful.value_and_grad(SquareModule())(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_value_and_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.value_and_grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
g, aux = stateful.grad(f, has_aux=True)(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
@test_utils.transform_and_run
def test_value_and_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
(y, aux), g = stateful.value_and_grad(f, has_aux=True)(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
def test_grad_and_jit(self):
def f(x):
g = stateful.grad(SquareModule())(x)
return g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
g, state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
def test_value_and_grad_and_jit(self):
def f(x):
y, g = stateful.value_and_grad(SquareModule())(x)
return y, g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
(y, g), state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
@test_utils.transform_and_run
def test_jit(self):
mod = SquareModule()
x = jnp.array(2)
y = stateful.jit(mod)(x)
self.assertEqual(y, x ** 2)
def test_jit_no_transform(self):
x = jnp.array(2)
with self.assertRaises(ValueError, msg="Use jax.jit() instead"):
stateful.jit(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_remat(self):
forward, backward = [], []
callback = _callback_prim(lambda: forward.append(None),
lambda: backward.append(None))
def test(remat):
x = jnp.array(3.)
mod = CountingModule()
self.assertEqual(mod.count, 0)
f = lambda x: callback(mod(x))
if remat:
f = stateful.remat(f)
y, g = stateful.value_and_grad(f)(x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
self.assertEqual(mod.count, 1)
num_forward = len(forward)
num_backward = len(backward)
del forward[:], backward[:]
return num_forward, num_backward
# Sanity check.
self.assertEqual(test(remat=True), test(remat=True))
self.assertEqual(test(remat=False), test(remat=False))
# NOTE: JAX does not guarantee to execute primitives once and only once for
# a given function (we observe f=2,b=1 without remat and f=5,b=1 with
# remat), but we do expect that JAX will execute our primitive forward at
# least one more time with remat than without it.
num_forward_remat, num_backward_remat = test(remat=True)
num_forward_no_remat, num_backward_no_remat = test(remat=False)
self.assertGreater(num_forward_remat, num_forward_no_remat)
self.assertEqual(num_backward_remat, num_backward_no_remat)
def test_remat_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.remat() instead"):
stateful.remat(lambda x: x**2)(x)
def test_cond(self):
def f(x):
mod = SquareModule()
return stateful.cond(x == 2, x, mod, x, lambda x: mod(x + 1))
f = transform.transform_with_state(f)
for x, y in ((1, 4), (2, 4), (3, 16)):
x, y = map(jnp.array, (x, y))
params, state = f.init(None, x)
out, state = f.apply(params, state, None, x)
self.assertEqual(state, {"square_module": {"y": y}})
self.assertEqual(out, y)
def test_cond_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.cond() instead"):
stateful.cond(x == 2, x, lambda x: x**2, x, lambda x: (x + 1)**2)
def _callback_prim(forward, backward):
def f_impl(x):
forward()
return x
def b_impl(x):
backward()
return (x,)
prim = jax.core.Primitive("hk_callback")
prim.def_impl(f_impl)
prim.def_abstract_eval(f_impl)
jax.ad.deflinear(prim, b_impl)
return prim.bind
class CountingModule(module.Module):
@property
def count(self):
return base.get_state("count", [], init=jnp.zeros)
def __call__(self, x):
y = x ** 2
base.set_state("count", self.count + 1)
return y
class SquareModule(module.Module):
def __call__(self, x):
assert x.ndim == 0
p = base.get_parameter("p", [], jnp.int32, init=lambda *_: jnp.array(2))
y = x ** p
base.set_state("y", y)
return y
if __name__ == "__main__":
absltest.main()
| [
"haiku._src.stateful.grad",
"haiku._src.stateful.cond",
"jax.ad.deflinear",
"numpy.testing.assert_allclose",
"haiku._src.stateful.remat",
"absl.testing.absltest.main",
"jax.numpy.array",
"haiku._src.stateful.jit",
"haiku._src.base.set_state",
"jax.core.Primitive",
"haiku._src.stateful.value_and_... | [((5840, 5873), 'jax.core.Primitive', 'jax.core.Primitive', (['"""hk_callback"""'], {}), "('hk_callback')\n", (5858, 5873), False, 'import jax\n'), ((5933, 5963), 'jax.ad.deflinear', 'jax.ad.deflinear', (['prim', 'b_impl'], {}), '(prim, b_impl)\n', (5949, 5963), False, 'import jax\n'), ((6456, 6471), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (6469, 6471), False, 'from absl.testing import absltest\n'), ((1104, 1118), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1113, 1118), True, 'import jax.numpy as jnp\n'), ((1163, 1212), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (1189, 1212), True, 'import numpy as np\n'), ((1256, 1270), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1265, 1270), True, 'import jax.numpy as jnp\n'), ((1453, 1467), 'jax.numpy.array', 'jnp.array', (['(2.0)'], {}), '(2.0)\n', (1462, 1467), True, 'import jax.numpy as jnp\n'), ((1557, 1606), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (1583, 1606), True, 'import numpy as np\n'), ((1660, 1674), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1669, 1674), True, 'import jax.numpy as jnp\n'), ((1940, 1954), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1949, 1954), True, 'import jax.numpy as jnp\n'), ((2005, 2054), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (2031, 2054), True, 'import numpy as np\n'), ((2236, 2250), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (2245, 2250), True, 'import jax.numpy as jnp\n'), ((2348, 2397), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (2374, 2397), True, 'import numpy as np\n'), ((2535, 2549), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (2544, 2549), True, 'import jax.numpy as jnp\n'), ((2557, 2590), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (2587, 2590), False, 'from haiku._src import transform\n'), ((2696, 2744), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.001)'}), '(g, 2 * x, rtol=0.001)\n', (2722, 2744), True, 'import numpy as np\n'), ((2883, 2897), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (2892, 2897), True, 'import jax.numpy as jnp\n'), ((2905, 2938), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (2935, 2938), False, 'from haiku._src import transform\n'), ((3049, 3098), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', '(x ** 2)'], {'rtol': '(0.001)'}), '(y, x ** 2, rtol=0.001)\n', (3075, 3098), True, 'import numpy as np\n'), ((3102, 3150), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.001)'}), '(g, 2 * x, rtol=0.001)\n', (3128, 3150), True, 'import numpy as np\n'), ((3238, 3250), 'jax.numpy.array', 'jnp.array', (['(2)'], {}), '(2)\n', (3247, 3250), True, 'import jax.numpy as jnp\n'), ((3356, 3368), 'jax.numpy.array', 'jnp.array', (['(2)'], {}), '(2)\n', (3365, 3368), True, 'import jax.numpy as jnp\n'), ((4937, 4951), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (4946, 4951), True, 'import jax.numpy as jnp\n'), ((5203, 5236), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (5233, 5236), False, 'from haiku._src import transform\n'), ((5540, 5554), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (5549, 5554), True, 'import jax.numpy as jnp\n'), ((6065, 6108), 'haiku._src.base.get_state', 'base.get_state', (['"""count"""', '[]'], {'init': 'jnp.zeros'}), "('count', [], init=jnp.zeros)\n", (6079, 6108), False, 'from haiku._src import base\n'), ((6154, 6193), 'haiku._src.base.set_state', 'base.set_state', (['"""count"""', '(self.count + 1)'], {}), "('count', self.count + 1)\n", (6168, 6193), False, 'from haiku._src import base\n'), ((6389, 6411), 'haiku._src.base.set_state', 'base.set_state', (['"""y"""', 'y'], {}), "('y', y)\n", (6403, 6411), False, 'from haiku._src import base\n'), ((1967, 1997), 'haiku._src.stateful.grad', 'stateful.grad', (['f'], {'has_aux': '(True)'}), '(f, has_aux=True)\n', (1980, 1997), False, 'from haiku._src import stateful\n'), ((2268, 2308), 'haiku._src.stateful.value_and_grad', 'stateful.value_and_grad', (['f'], {'has_aux': '(True)'}), '(f, has_aux=True)\n', (2291, 2308), False, 'from haiku._src import stateful\n'), ((2611, 2626), 'jax.jit', 'jax.jit', (['f.init'], {}), '(f.init)\n', (2618, 2626), False, 'import jax\n'), ((2651, 2667), 'jax.jit', 'jax.jit', (['f.apply'], {}), '(f.apply)\n', (2658, 2667), False, 'import jax\n'), ((2959, 2974), 'jax.jit', 'jax.jit', (['f.init'], {}), '(f.init)\n', (2966, 2974), False, 'import jax\n'), ((3004, 3020), 'jax.jit', 'jax.jit', (['f.apply'], {}), '(f.apply)\n', (3011, 3020), False, 'import jax\n'), ((3259, 3276), 'haiku._src.stateful.jit', 'stateful.jit', (['mod'], {}), '(mod)\n', (3271, 3276), False, 'from haiku._src import stateful\n'), ((3717, 3731), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (3726, 3731), True, 'import jax.numpy as jnp\n'), ((3929, 3978), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', '(x ** 2)'], {'rtol': '(0.001)'}), '(y, x ** 2, rtol=0.001)\n', (3955, 3978), True, 'import numpy as np\n'), ((3984, 4032), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.001)'}), '(g, 2 * x, rtol=0.001)\n', (4010, 4032), True, 'import numpy as np\n'), ((5630, 5699), 'haiku._src.stateful.cond', 'stateful.cond', (['(x == 2)', 'x', '(lambda x: x ** 2)', 'x', '(lambda x: (x + 1) ** 2)'], {}), '(x == 2, x, lambda x: x ** 2, x, lambda x: (x + 1) ** 2)\n', (5643, 5699), False, 'from haiku._src import stateful\n'), ((1346, 1377), 'haiku._src.stateful.grad', 'stateful.grad', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (1359, 1377), False, 'from haiku._src import stateful\n'), ((1750, 1791), 'haiku._src.stateful.value_and_grad', 'stateful.value_and_grad', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (1773, 1791), False, 'from haiku._src import stateful\n'), ((3444, 3474), 'haiku._src.stateful.jit', 'stateful.jit', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (3456, 3474), False, 'from haiku._src import stateful\n'), ((3862, 3879), 'haiku._src.stateful.remat', 'stateful.remat', (['f'], {}), '(f)\n', (3876, 3879), False, 'from haiku._src import stateful\n'), ((3893, 3919), 'haiku._src.stateful.value_and_grad', 'stateful.value_and_grad', (['f'], {}), '(f)\n', (3916, 3919), False, 'from haiku._src import stateful\n'), ((5028, 5060), 'haiku._src.stateful.remat', 'stateful.remat', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (5042, 5060), False, 'from haiku._src import stateful\n'), ((6356, 6368), 'jax.numpy.array', 'jnp.array', (['(2)'], {}), '(2)\n', (6365, 6368), True, 'import jax.numpy as jnp\n')] |
def run_rename(args):
from pocketbook.address_book import AddressBook
from pocketbook.key_store import KeyStore
address_book = AddressBook()
key_store = KeyStore()
# make sure that the new name is not present either as a key, or as an address
new_present = args.new in address_book.keys() or args.new in key_store.list_keys()
if new_present:
print('{} is already present, please choose a different destination name'.format(args.new))
return 1
# check the old address or key name
old_is_address = args.old in address_book.keys()
old_is_key = args.old in key_store.list_keys()
success = False
if old_is_address and old_is_key:
raise RuntimeError('Data store corrupting, key looks like an address + key')
elif old_is_address:
success = address_book.rename(args.old, args.new)
elif old_is_key:
success = key_store.rename_key(args.old, args.new)
else:
print('{} doesn\'t appear to be a valid key or address name, please check and try again'.format(args.old))
return 1
if not success:
print('Failed to rename {} to {}'.format(args.old, args.new))
return 1
return 0
| [
"pocketbook.key_store.KeyStore",
"pocketbook.address_book.AddressBook"
] | [((140, 153), 'pocketbook.address_book.AddressBook', 'AddressBook', ([], {}), '()\n', (151, 153), False, 'from pocketbook.address_book import AddressBook\n'), ((170, 180), 'pocketbook.key_store.KeyStore', 'KeyStore', ([], {}), '()\n', (178, 180), False, 'from pocketbook.key_store import KeyStore\n')] |
from app.database import MOCK_PRODUCT_DATA
import re
from app.products.base_handler import BaseHandler
class ProductInfoHandler(BaseHandler):
"""
A class used to represent a mini-bot to handle product queries.
"""
def __init__(self) -> None:
super().__init__()
def create_match_paterns(self):
# Product-related patterns
self.price_pattern = re.compile(
r"(price|cost|how much|money)", re.IGNORECASE)
self.stock_pattern = re.compile(r"(stock|how many|amount)", re.IGNORECASE)
self.nutrition_pattern = re.compile(
r"(calories|protein|carbs|carbohydrates|sugar|fat|nutrition|nutritional|weight|health|healthy)", re.IGNORECASE)
def dispose(self):
super().dispose()
def handle_prod_intent(self, product: str, intent: str) -> str:
intent = intent.split("-")[1] # hardcoded to filter intent: product-<intent> Ex. product-price -> intent = price
request = None
cursor = self.db.execute_query(
"SELECT product.id FROM product WHERE product.name = ? OR product.names = ?",
params=tuple([product, product]))
data = cursor.fetchone()
if (not data):
return None
request = {"request": intent, "id": data[0]}
return self.handle_product_info(None, **request)
def handle(self, message: str, intent=None) -> str: # if 2 args => message = product_name
if intent is not None:
return self.handle_prod_intent(message, intent)
# Call parser
kwargs = self.parse(message=message)
# If there is a topic detected, we find the response
# By calling the handler with the message (for convenience) and its necessary arguments
response = None
if kwargs:
response = self.handle_product_info(message, **kwargs)
return response
def parse(self, message: str) -> dict:
request = None
# Check for keywords for prices
if self.nutrition_pattern.search(message):
request = "nutrition"
elif self.price_pattern.search(message):
request = "price"
elif self.stock_pattern.search(message):
request = "stock"
# If the request is truly about product
if request:
id = None
for prod in MOCK_PRODUCT_DATA:
prod_name = prod["name"]
prod_id = prod["id"]
prod_names = prod["names"]
if prod_name in message or prod_id in message or prod_names in message:
id = prod["id"]
return {"request": request, "id": id} if request else None
def handle_product_info(self, message=None, **kwargs) -> str:
# kwargs are arguments such as product_name, price, operators (<. >)
# This really depends on how you define your parser
prod_id = kwargs["id"]
# Get the product information
products = self.db.get_product("id", prod_id)
# Since id is unique, we can assume there is only one product
product = products[0]
reply = None
prod_msg_type = kwargs.get("request")
if prod_msg_type == "price":
reply = "%s cost $%s %s." % (
product['names'].capitalize(), product['price'], product['price_scale'])
elif prod_msg_type == "stock":
if product['in_stock']:
reply = "%s are in stock." % (product['names'].capitalize())
else:
reply = "%s are out of stock." % (
product['names'].capitalize())
elif prod_msg_type == "nutrition":
reply = "%s Nutrition Facts: Calories = %s, Protein = %s, Carbs = %s, Sugar = %s, Fat = %s." % (
product['name'].capitalize(), product['calories'], product['protein'], product['carbs'], product['sugar'], product['fat'])
return reply | [
"re.compile"
] | [((389, 445), 're.compile', 're.compile', (['"""(price|cost|how much|money)"""', 're.IGNORECASE'], {}), "('(price|cost|how much|money)', re.IGNORECASE)\n", (399, 445), False, 'import re\n'), ((489, 541), 're.compile', 're.compile', (['"""(stock|how many|amount)"""', 're.IGNORECASE'], {}), "('(stock|how many|amount)', re.IGNORECASE)\n", (499, 541), False, 'import re\n'), ((576, 707), 're.compile', 're.compile', (['"""(calories|protein|carbs|carbohydrates|sugar|fat|nutrition|nutritional|weight|health|healthy)"""', 're.IGNORECASE'], {}), "(\n '(calories|protein|carbs|carbohydrates|sugar|fat|nutrition|nutritional|weight|health|healthy)'\n , re.IGNORECASE)\n", (586, 707), False, 'import re\n')] |
import sys
import copy
import pprint
pp = pprint.PrettyPrinter(width=120)
import inspect
import numpy as np
from .. import __version__
from .. import qcvars
from ..driver.driver_helpers import print_variables
from ..exceptions import *
from ..molecule import Molecule
from ..pdict import PreservingDict
from .worker import psi4_subprocess
from .botanist import muster_inherited_options
def run_psi4(name, molecule, options, **kwargs):
#print('run_psi4 options <<<\n', options.print_changed(), '\n>>>')
#calledby = inspect.stack()
#print('CALLEDBY')
#for cur in calledby:
# print('CUR', cur[3])
if kwargs['ptype'] not in ['energy', 'properties', 'gradient', 'hessian']:
raise ValidationError("""run_psi4: ptype not regonized: {}""".format(ptype))
jobrec = {}
jobrec['error'] = ''
jobrec['success'] = None
jobrec['return_output'] = True
prov = {}
prov['creator'] = 'QCDB'
prov['version'] = __version__
prov['routine'] = sys._getframe().f_code.co_name
jobrec['provenance'] = [prov]
jobrec['molecule'] = molecule.to_dict(np_out=False)
jobrec['method'] = name
jobrec['driver'] = kwargs['ptype']
jobrec['kwargs'] = kwargs
jobrec['options'] = copy.deepcopy(options)
jobrec['hooks'] = kwargs.get('hooks', {})
jobrec = psi4_driver(jobrec)
return jobrec
def psi4_driver(jobrec):
import json
try:
jobrec['molecule']
jobrec['method']
except KeyError as err:
#raise KeyError(
# 'Required fields missing from ({})'.format(jobrec.keys())) from err
jobrec['error'] += repr(err) + 'Required fields missing from ({})'.format(jobrec.keys())
return jobrec
#print('[1] PSI4 JOBREC PRE-PLANT (j@i) <<<')
#pp.pprint(jobrec)
#print('>>>')
psi4rec = psi4_plant(jobrec)
# test json roundtrip
jpsi4rec = json.dumps(psi4rec)
psi4rec = json.loads(jpsi4rec)
#print('[2] PSI4REC PRE-SUBPROCESS (x@i) <<<')
#pp.pprint(psi4rec)
#print('>>>\n')
psi4_subprocess(psi4rec) # updates psi4rec
#print('[3] PSI4REC POST-SUBPROCESS (x@io) <<<')
#pp.pprint(psi4rec)
#print('>>>\n')
psi4_harvest(jobrec, psi4rec) # updates jobrec
#print('[4] PSI4 JOBREC POST-HARVEST (j@io) <<<')
#pp.pprint(jobrec)
#print('>>>')
return jobrec
def psi4_plant(jobrec): # jobrec@i -> psi4@i
psi4rec = {}
psi4rec['json'] = {}
opts = jobrec['options']
# NOTE TODO very limited OPTIONS HANDSHAKE
muster_inherited_options(opts)
omem = opts.scroll['QCDB'].pop('MEMORY')
psi4rec['json']['memory'] = omem.value
psi4rec['json']['molecule'] = {'qm': jobrec['molecule']}
psi4rec['json']['driver'] = jobrec['driver']
mtd = jobrec['method']
psi4rec['json']['method'] = mtd[3:] if mtd.startswith('p4-') else mtd
#psi4rec['json']['args'] =
psi4rec['json']['kwargs'] = jobrec['kwargs']
#psi4rec['json']['scratch_location'] =
psi4rec['json']['return_output'] = True
#for hookkey, hookfunc in jobrec['hooks']['pre'].items():
# psi4rec['json']['in_' + hookkey] = hookfunc()
if opts.scroll['PSI4']['GRIDDAT'].value != '':
psi4rec['json']['infile_' + 'grid.dat'] = opts.scroll['PSI4']['GRIDDAT'].value
popts = {}
for k, v in opts.scroll['QCDB'].items():
if v.disputed():
popts[k] = v.value
for k, v in opts.scroll['PSI4'].items():
if v.disputed():
popts[k] = v.value
psi4rec['json']['options'] = popts
# Handle qcdb keywords implying cfour keyword values
# if core.get_option('CFOUR', 'TRANSLATE_PSI4'):
# harvester.muster_inherited_options(jobrec['options'])
# Handle conversion of qcdb keyword structure into psi4 format
# * psi wants python anyways, so no action needed
#psi4rec['command'] = ['psi4', '--json']
psi4rec['command'] = ['psi4', '--json', '--nthread', '6'] # TODO
return psi4rec
def psi4_harvest(jobrec, psi4rec): # jobrec@i, psi<EMAIL> -> jobrec@io
"""Processes raw results from read-only `psi4rec` into QCAspect fields in returned `jobrec`."""
psi4rec = psi4rec['json'] # TODO NOT how this should be done figure out 1-tier/2-tier
try:
pass
#jobrec['molecule']['real']
#jobrec['do_gradient']
except KeyError as err:
raise KeyError(
'Required fields missing from ({})'.format(jobrec.keys())) from err
try:
psi4rec['raw_output']
#if jobrec['do_gradient'] is True:
# dftd3rec['dftd3_gradient']
except KeyError as err:
raise KeyError('Required fields missing from ({})'.format(
psi4rec.keys())) from err
if psi4rec['error']:
raise RuntimeError(psi4rec['error'])
#c4files = {}
for fl in psi4rec.keys():
if fl.startswith('outfile_'):
jobrec[fl] = psi4rec[fl]
#for fl in ['GRD', 'FCMFINAL', 'DIPOL']:
# field = 'output_' + fl.lower()
# if field in cfourrec:
# text += ' Cfour scratch file {} has been read\n'.format(fl)
# text += cfourrec[field]
# c4files[fl] = cfourrec[field]
# Absorb results into qcdb data structures
progvars = PreservingDict(psi4rec['psivars'])
import psi4
progarrs = {k: np.array(psi4.core.Matrix.from_serial(v)) for k, v in psi4rec['psiarrays'].items()}
progvars.update(progarrs)
qcvars.build_out(progvars)
calcinfo = qcvars.certify(progvars)
jobrec['raw_output'] = psi4rec['raw_output']
jobrec['qcvars'] = calcinfo
#prov = {}
#prov['creator'] = 'Psi4'
#prov['routine'] = sys._getframe().f_code.co_name
#prov['version'] = version
jobrec['provenance'].append(psi4rec['provenance'])
return jobrec
"""
Required Input Fields
---------------------
Optional Input Fields
---------------------
Output Fields
-------------
"""
| [
"json.loads",
"psi4.core.Matrix.from_serial",
"json.dumps",
"sys._getframe",
"pprint.PrettyPrinter",
"copy.deepcopy"
] | [((42, 73), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'width': '(120)'}), '(width=120)\n', (62, 73), False, 'import pprint\n'), ((1234, 1256), 'copy.deepcopy', 'copy.deepcopy', (['options'], {}), '(options)\n', (1247, 1256), False, 'import copy\n'), ((1881, 1900), 'json.dumps', 'json.dumps', (['psi4rec'], {}), '(psi4rec)\n', (1891, 1900), False, 'import json\n'), ((1915, 1935), 'json.loads', 'json.loads', (['jpsi4rec'], {}), '(jpsi4rec)\n', (1925, 1935), False, 'import json\n'), ((991, 1006), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (1004, 1006), False, 'import sys\n'), ((5316, 5347), 'psi4.core.Matrix.from_serial', 'psi4.core.Matrix.from_serial', (['v'], {}), '(v)\n', (5344, 5347), False, 'import psi4\n')] |
import unittest
from typing import Dict, List
from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS
from src.student_info import StudentInfo
class TestChallenge(unittest.TestCase):
def test_preprocess(self):
self.assertEqual(Challenge.preprocess_ans("a. 37th floor, b. 42nd floor, c. 39th floor, d. 40th floor"), 37423940)
self.assertEqual(Challenge.preprocess_ans("answer is 25 ducklings"), 25)
self.assertEqual(Challenge.preprocess_ans("4 c.1"), 41)
self.assertEqual(Challenge.preprocess_ans(a="abcd efgh. ij"), DEFAULT_EMPTY_ANS)
self.assertEqual(Challenge.preprocess_ans(a="5 blue. ", text_retain_dict={"blue":1}), 51)
self.assertEqual(Challenge.preprocess_ans(a="5 {blue}. __OR__ 6 {brown}", text_retain_dict={"blue":0, "brown":1}), 5061)
self.assertEqual(Challenge.preprocess_ans(a="5 blue. ", text_retain_dict={"blue":1}), 51)
def test_gold_loading(self):
g, challenge_wise_retaining = Challenge.load_gold_answers(fp="test_data/test_correct_answers.csv")
assert len(g) == 2
assert g["MC2"].challenge_name == "MC2"
assert not g["MC2"].student # Gold has no student name.
# We did not pass any retaining text (i.e., usually all text
# except numbers is removed, except the special retaining strings)
assert challenge_wise_retaining["MC2"][8] == {"blue":0, "red":1}
def test_student_ans_loading(self):
s: Dict[StudentInfo, List["Challenge"]] = Challenge.load_student_answers(fp="test_data/test_student_answers.csv", challenge_wise_retaining={})
assert len(s) == 5, f"There should be 5 student entries, assuming no repeated entries in the test file."
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"src.math_challenge.Challenge.preprocess_ans",
"src.math_challenge.Challenge.load_gold_answers",
"src.math_challenge.Challenge.load_student_answers"
] | [((1739, 1754), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1752, 1754), False, 'import unittest\n'), ((975, 1043), 'src.math_challenge.Challenge.load_gold_answers', 'Challenge.load_gold_answers', ([], {'fp': '"""test_data/test_correct_answers.csv"""'}), "(fp='test_data/test_correct_answers.csv')\n", (1002, 1043), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((1492, 1596), 'src.math_challenge.Challenge.load_student_answers', 'Challenge.load_student_answers', ([], {'fp': '"""test_data/test_student_answers.csv"""', 'challenge_wise_retaining': '{}'}), "(fp='test_data/test_student_answers.csv',\n challenge_wise_retaining={})\n", (1522, 1596), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((246, 337), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', (['"""a. 37th floor, b. 42nd floor, c. 39th floor, d. 40th floor"""'], {}), "(\n 'a. 37th floor, b. 42nd floor, c. 39th floor, d. 40th floor')\n", (270, 337), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((369, 419), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', (['"""answer is 25 ducklings"""'], {}), "('answer is 25 ducklings')\n", (393, 419), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((450, 483), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', (['"""4 c.1"""'], {}), "('4 c.1')\n", (474, 483), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((514, 557), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', ([], {'a': '"""abcd efgh. ij"""'}), "(a='abcd efgh. ij')\n", (538, 557), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((603, 671), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', ([], {'a': '"""5 blue. """', 'text_retain_dict': "{'blue': 1}"}), "(a='5 blue. ', text_retain_dict={'blue': 1})\n", (627, 671), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((701, 804), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', ([], {'a': '"""5 {blue}. __OR__ 6 {brown}"""', 'text_retain_dict': "{'blue': 0, 'brown': 1}"}), "(a='5 {blue}. __OR__ 6 {brown}', text_retain_dict={\n 'blue': 0, 'brown': 1})\n", (725, 804), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n'), ((830, 898), 'src.math_challenge.Challenge.preprocess_ans', 'Challenge.preprocess_ans', ([], {'a': '"""5 blue. """', 'text_retain_dict': "{'blue': 1}"}), "(a='5 blue. ', text_retain_dict={'blue': 1})\n", (854, 898), False, 'from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS\n')] |
import hlt
import logging
from collections import OrderedDict
# GAME START
game = hlt.Game("Spoof_v7")
logging.info('Starting my %s bot!', game._name)
TURN = 0
def navigate(ship, entity, multiplier = 1):
navigate_command = ship.navigate(
ship.closest_point_to(entity),
game_map,
speed=int(hlt.constants.MAX_SPEED * multiplier),
ignore_ships=True)
if navigate_command:
command_queue.append(navigate_command)
def kamikazee(ship, entity):
navigate_command = ship.navigate(
entity,
game_map,
speed=int(hlt.constants.MAX_SPEED),
ignore_ships=False)
if navigate_command:
command_queue.append(navigate_command)
while True:
# TURN START
TURN += 1
group_attack_limit = 3
attack_ship_modifier = .4
game_map = game.update_map()
command_queue = []
me = game_map.get_me()
enemies = [enemy for enemy in game_map.all_players() if enemy.id != me.id]
my_ships = me.all_ships()
my_docked_ships = [ship for ship in my_ships if ship.docking_status != ship.DockingStatus.UNDOCKED]
#planet_docking_status = []
enemy_ships = [ship for ship in game_map._all_ships() if ship not in my_ships]
docked_enemy_ships = [ship for ship in enemy_ships if ship.docking_status != ship.DockingStatus.UNDOCKED]
unowned_planets = [planet for planet in game_map.all_planets() if not planet.is_owned()]
my_planets = [planet for planet in game_map.all_planets() if planet.is_owned() and planet.owner.id == me.id]
enemy_planets = [planet for planet in game_map.all_planets() if planet.is_owned() and planet.owner.id != me.id]
targeted_planets = []
targeted_ships = []
# find center of enemy mass
planet_x = [planet.x for planet in enemy_planets]
ship_x = [ship.x for ship in enemy_ships]
planet_y = [planet.y for planet in enemy_planets]
ship_y = [ship.y for ship in enemy_ships]
x = planet_x + ship_x
y = planet_y + ship_y
enemy_centroid = hlt.entity.Position(0,0)
if len(x):
enemy_centroid = hlt.entity.Position(sum(x) / len(x), sum(y) / len(y))
entities_by_distance_to_enemy_centroid = OrderedDict(sorted(game_map.nearby_entities_by_distance(enemy_centroid).items(), key=lambda t: t[0]))
my_ships_by_distance_to_enemy_centroid = [entities_by_distance_to_enemy_centroid[distance][0]
for distance in entities_by_distance_to_enemy_centroid
if entities_by_distance_to_enemy_centroid[distance][0] in my_ships
and entities_by_distance_to_enemy_centroid[distance][0] not in my_docked_ships]
# adjust limits based on ship counts
my_ship_count = len(my_ships)
enemy_ship_count = len(enemy_ships)
if my_ship_count > 0 and enemy_ship_count > 0:
ratio = (my_ship_count / enemy_ship_count)
if ratio > 1:
group_attack_limit *= ratio
# logging.info('group attack limit: %s', group_attack_limit)
#logging.info(enemy_centroid)
# find undocked ships that are closest to action and make them fighters first set the rest as miners
attack_ships = my_ships_by_distance_to_enemy_centroid[0 : int(len(my_ships_by_distance_to_enemy_centroid) * attack_ship_modifier)]
# logging.info('Number of attack ships: %s', len(attack_ships))
# For every ship that I control
for ship in my_ships:
# If the ship is docked
if ship.docking_status != ship.DockingStatus.UNDOCKED:
# Skip this ship
continue
entities_by_distance = OrderedDict(sorted(game_map.nearby_entities_by_distance(ship).items(), key=lambda t: t[0]))
target_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in game_map.all_planets() and entities_by_distance[distance][0] not in targeted_planets]
target_unowned_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in unowned_planets and entities_by_distance[distance][0] not in targeted_planets]
target_enemy_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in enemy_planets]
target_ships = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in enemy_ships]
target_docked_ships = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in docked_enemy_ships]
# if ship in attack_ships attack
if ship in attack_ships:
for enemy_ship in target_ships:
# if unowned planet is closer, then dock, otherwise attack
# if target_unowned_planets[0]:
# if ship.calculate_distance_between(target_unowned_planets[0]) < ship.calculate_distance_between(enemy_ship):
# if ship.can_dock(target_unowned_planets[0]):
# command_queue.append(ship.dock(target_unowned_planets[0]))
# else:
# navigate(ship, enemy_ship, 1)
# else:
# if enemy is targeted by n ships then get next closest ship
if enemy_ship in targeted_ships:
if targeted_ships.count(enemy_ship) >= group_attack_limit:
# logging.info('group attack limit met, trying next ship')
continue
targeted_ships.append(enemy_ship)
navigate(ship, enemy_ship, 1)
break
else:
for planet in target_planets:
# If we can dock, let's (try to) dock. If two ships try to dock at once, neither will be able to.
if ship.can_dock(planet) and planet in unowned_planets:
command_queue.append(ship.dock(planet))
elif ship.can_dock(planet) and planet in my_planets and not planet.is_full():
command_queue.append(ship.dock(planet))
# if planet is owned then attack
elif planet.is_owned() and planet in enemy_planets:
for enemy_ship in planet.all_docked_ships():
if enemy_ship:
navigate(ship, enemy_ship)
break
else:
targeted_planets.append(planet)
navigate(ship, planet)
break
# Send our set of commands to the Halite engine for this turn
game.send_command_queue(command_queue)
# TURN END
# GAME END
| [
"hlt.entity.Position",
"logging.info",
"hlt.Game"
] | [((83, 103), 'hlt.Game', 'hlt.Game', (['"""Spoof_v7"""'], {}), "('Spoof_v7')\n", (91, 103), False, 'import hlt\n'), ((104, 151), 'logging.info', 'logging.info', (['"""Starting my %s bot!"""', 'game._name'], {}), "('Starting my %s bot!', game._name)\n", (116, 151), False, 'import logging\n'), ((2005, 2030), 'hlt.entity.Position', 'hlt.entity.Position', (['(0)', '(0)'], {}), '(0, 0)\n', (2024, 2030), False, 'import hlt\n')] |
import pytest
from eph.horizons import *
@pytest.fixture(params=[
('earth', '399'),
('\'earth\'', '399'),
('Earth', '399'),
('399', '399'),
('\'399\'', '399'),
('pluto', 'pluto'),
])
def codify_obj_data(request):
return request.param
def test_codify_obj(codify_obj_data):
data, result = codify_obj_data
assert codify_obj(data) == result
@pytest.fixture(params=[
('earth', '@399'),
('\'earth\'', '@399'),
('\'@earth\'', '@earth'),
('399', '@399'),
('\'399\'', '@399'),
('\'@399\'', '@399'),
])
def codify_site_data(request):
return request.param
def test_codify_site(codify_site_data):
data, result = codify_site_data
assert codify_site(data) == result
@pytest.fixture(params=[
('399', 'earth'),
('299', 'venus'),
('@499', 'mars'),
('1@399', '1@399'),
('@earth', '@earth'),
])
def humanify_data(request):
return request.param
def test_humanify(humanify_data):
data, result = humanify_data
assert humanify(data) == result
@pytest.fixture(params=[
'2017-04-22 00:00',
Time('2017-4-22'),
])
def format_time_data(request):
return request.param
def test_format_time(format_time_data):
assert str(format_time(format_time_data)) == '2017-04-22 00:00'
@pytest.fixture(params=[
('COMMAND', 'COMMAND'),
('Command', 'COMMAND'),
('target', 'COMMAND'),
('OBJECT', 'COMMAND'),
('table-type', 'TABLE_TYPE'),
('key', None),
])
def transformkey_data(request):
return request.param
def test_transformkey(transformkey_data):
key, jplparam = transformkey_data
try:
assert transform_key(key) == jplparam
except Exception as e:
assert e.__class__ == JplBadParamError
@pytest.fixture(params=[
('COMMAND', 'earth', '399'),
('CENTER', '@399', '@399'),
('CENTER', '399', '@399'),
])
def transformvalue_data(request):
return request.param
def test_transformvalue(transformvalue_data):
key, value, result = transformvalue_data
assert transform_value(key, value) == result
@pytest.fixture(params=[
(('target', 'earth'), ('COMMAND', '399')),
(('Command', 'Earth'), ('COMMAND', '399')),
(('OBJECT', '399'), ('COMMAND', '399')),
(('Origin', 'earth'), ('CENTER', '@399')),
(('key', 'value'), (None, None)),
])
def transform_data(request):
return request.param
def test_transform(transform_data):
data, result = transform_data
key, value = data
try:
assert transform(key, value) == result
except Exception as e:
assert e.__class__ == JplBadParamError
@pytest.fixture(params=[
('START_TIME', True),
('object', True),
('key', False),
])
def is_jpl_param_data(request):
return request.param
def test_is_jpl_param(is_jpl_param_data):
key, result = is_jpl_param_data
assert is_jpl_param(key) == result
| [
"pytest.fixture"
] | [((45, 182), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[(\'earth\', \'399\'), ("\'earth\'", \'399\'), (\'Earth\', \'399\'), (\'399\', \'399\'), (\n "\'399\'", \'399\'), (\'pluto\', \'pluto\')]'}), '(params=[(\'earth\', \'399\'), ("\'earth\'", \'399\'), (\'Earth\',\n \'399\'), (\'399\', \'399\'), ("\'399\'", \'399\'), (\'pluto\', \'pluto\')])\n', (59, 182), False, 'import pytest\n'), ((381, 528), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[(\'earth\', \'@399\'), ("\'earth\'", \'@399\'), ("\'@earth\'", \'@earth\'), (\'399\',\n \'@399\'), ("\'399\'", \'@399\'), ("\'@399\'", \'@399\')]'}), '(params=[(\'earth\', \'@399\'), ("\'earth\'", \'@399\'), ("\'@earth\'",\n \'@earth\'), (\'399\', \'@399\'), ("\'399\'", \'@399\'), ("\'@399\'", \'@399\')])\n', (395, 528), False, 'import pytest\n'), ((736, 859), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('399', 'earth'), ('299', 'venus'), ('@499', 'mars'), ('1@399', '1@399'),\n ('@earth', '@earth')]"}), "(params=[('399', 'earth'), ('299', 'venus'), ('@499', 'mars'),\n ('1@399', '1@399'), ('@earth', '@earth')])\n", (750, 859), False, 'import pytest\n'), ((1283, 1454), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('COMMAND', 'COMMAND'), ('Command', 'COMMAND'), ('target', 'COMMAND'), (\n 'OBJECT', 'COMMAND'), ('table-type', 'TABLE_TYPE'), ('key', None)]"}), "(params=[('COMMAND', 'COMMAND'), ('Command', 'COMMAND'), (\n 'target', 'COMMAND'), ('OBJECT', 'COMMAND'), ('table-type',\n 'TABLE_TYPE'), ('key', None)])\n", (1297, 1454), False, 'import pytest\n'), ((1744, 1855), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('COMMAND', 'earth', '399'), ('CENTER', '@399', '@399'), ('CENTER', '399',\n '@399')]"}), "(params=[('COMMAND', 'earth', '399'), ('CENTER', '@399',\n '@399'), ('CENTER', '399', '@399')])\n", (1758, 1855), False, 'import pytest\n'), ((2071, 2313), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[(('target', 'earth'), ('COMMAND', '399')), (('Command', 'Earth'), (\n 'COMMAND', '399')), (('OBJECT', '399'), ('COMMAND', '399')), (('Origin',\n 'earth'), ('CENTER', '@399')), (('key', 'value'), (None, None))]"}), "(params=[(('target', 'earth'), ('COMMAND', '399')), ((\n 'Command', 'Earth'), ('COMMAND', '399')), (('OBJECT', '399'), (\n 'COMMAND', '399')), (('Origin', 'earth'), ('CENTER', '@399')), (('key',\n 'value'), (None, None))])\n", (2085, 2313), False, 'import pytest\n'), ((2604, 2683), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('START_TIME', True), ('object', True), ('key', False)]"}), "(params=[('START_TIME', True), ('object', True), ('key', False)])\n", (2618, 2683), False, 'import pytest\n')] |
import os
import shutil
def delete_configs(config, dataset, username):
if config != 'all':
paths = [os.path.join('user_data', username, dataset, config)]
else:
paths = [os.path.join('user_data', username, dataset, d) for d in
os.listdir(os.path.join('user_data', username, dataset)) if
os.path.isdir(os.path.join('user_data', username, dataset, d)) and d != 'input' and d != 'factor']
for path in paths:
shutil.rmtree(path)
def delete_dataset(APP_ROOT, username, dataset):
path = os.path.join(APP_ROOT, 'user_data', username, dataset)
print('removing ...' + str(path))
shutil.rmtree(path)
| [
"os.path.join",
"shutil.rmtree"
] | [((558, 612), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""user_data"""', 'username', 'dataset'], {}), "(APP_ROOT, 'user_data', username, dataset)\n", (570, 612), False, 'import os\n'), ((655, 674), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (668, 674), False, 'import shutil\n'), ((476, 495), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (489, 495), False, 'import shutil\n'), ((114, 166), 'os.path.join', 'os.path.join', (['"""user_data"""', 'username', 'dataset', 'config'], {}), "('user_data', username, dataset, config)\n", (126, 166), False, 'import os\n'), ((195, 242), 'os.path.join', 'os.path.join', (['"""user_data"""', 'username', 'dataset', 'd'], {}), "('user_data', username, dataset, d)\n", (207, 242), False, 'import os\n'), ((280, 324), 'os.path.join', 'os.path.join', (['"""user_data"""', 'username', 'dataset'], {}), "('user_data', username, dataset)\n", (292, 324), False, 'import os\n'), ((360, 407), 'os.path.join', 'os.path.join', (['"""user_data"""', 'username', 'dataset', 'd'], {}), "('user_data', username, dataset, d)\n", (372, 407), False, 'import os\n')] |
import os
import bz2
import json
import random
import pickle
from collections import defaultdict, Counter
from tqdm import tqdm
import torch
from data.crosswoz.data_process.dst.trade_preprocess import (
EXPERIMENT_DOMAINS,
Lang,
get_seq,
get_slot_information,
)
class CNEmbedding:
def __init__(self, vector_path):
self.word2vec = {}
with bz2.open(vector_path, "rt", encoding="utf8") as fin:
lines = fin.readlines()
# 第一行是元信息
for line in tqdm(lines[1:], desc="Generating pretrained embedding"):
line = line.strip()
tokens = line.split()
word = tokens[0]
vec = tokens[1:]
vec = [float(item) for item in vec]
self.word2vec[word] = vec
self.embed_size = 300
def emb(self, token, default="zero"):
get_default = {
"none": lambda: None,
"zero": lambda: 0.0,
"random": lambda: random.uniform(-0.1, 0.1),
}[default]
vec = self.word2vec.get(token, None)
if vec is None:
vec = [get_default()] * self.embed_size
return vec
def dump_pretrained_emb(orig_embedding_path, index2word, dump_path):
print("Dumping pretrained embeddings...")
embeddings = [CNEmbedding(orig_embedding_path)]
embedding = []
count = [0.0, 0.0]
for i in tqdm(range(len(index2word))):
w = index2word[i]
e = []
for emb in embeddings:
e += emb.emb(w, default="zero")
# stat embed existence
count[1] += 1.0 # 总词数
if w in embeddings[0].word2vec:
count[0] += 1.0 # 存在于 embedding 中的词数
# e += [0.] * 300
embedding.append(e)
with open(dump_path, "w") as f:
json.dump(embedding, f)
print(f"Word exists in embedding mat: {count[0] / count[1] * 100}")
def fix_general_label_error(belief_state):
"""
:param belief_state:
"belief_state": [
{
"slots": [
[
"餐馆-推荐菜",
"驴 杂汤"
]
]
},
{
"slots": [
[
"餐馆-人均消费",
"100 - 150 元"
]
]
}
]
:return:
"""
belief_state_dict = {
slot_value["slots"][0][0]: slot_value["slots"][0][1]
for slot_value in belief_state
}
return belief_state_dict
def read_langs(
file_name, gating_dict, slots, dataset, lang, mem_lang, load_lang, config
):
print(("Reading from {}".format(file_name)))
data = []
max_resp_len, max_value_len = 0, 0
domain_counter = defaultdict(int) # 每个 domain 有多少个
gate_counter = []
with open(file_name, "r", encoding="utf8") as f:
dials = json.load(f)
if config["debug"]:
dials = dials[:10]
# create vocab first
for dial_dict in dials: # 一个 dial_dict 就是一个对话,包括多轮
if not load_lang and (config["all_vocab"] or dataset == "train"):
for ti, turn in enumerate(dial_dict["dialogue"]):
# 生成 utterance 的词表
lang.index_words(turn["system_transcript"], "utter")
lang.index_words(turn["transcript"], "utter")
for dial_dict in dials:
dialog_history = ""
# Filtering and counting domains
for domain in dial_dict["domains"]:
if domain not in EXPERIMENT_DOMAINS:
continue
domain_counter[domain] += 1
# Reading data
for ti, turn in enumerate(dial_dict["dialogue"]):
turn_domain = turn["domain"]
turn_id = turn["turn_idx"] # 数据集里都是 0,好像有问题
turn_uttr = turn["system_transcript"] + " ; " + turn["transcript"]
turn_uttr_strip = turn_uttr.strip()
dialog_history += (
turn["system_transcript"] + " ; " + turn["transcript"] + " ; "
)
source_text = dialog_history.strip()
# 用于英文数据集, {"餐馆-推荐菜": "驴 杂汤"} dev_dials.json 第一个
turn_belief_dict = fix_general_label_error(turn["belief_state"])
# List['domain-slot-value']
turn_belief_list = [
str(k) + "-" + str(v) for k, v in turn_belief_dict.items()
]
if not load_lang and (config["all_vocab"] or dataset == "train"):
# 生成 slot-value 的词表
mem_lang.index_words(turn_belief_dict, "belief")
class_label, generate_y, slot_mask, gating_label = [], [], [], []
# 一个轮次的 slot 的 values 和 ontology 中的数量一样多
for slot in slots: # ontology
# 只关注本轮需要的 ontology
if slot in turn_belief_dict.keys(): # dialogue
generate_y.append(turn_belief_dict[slot])
# ontology 中是有 none 的情况的
if turn_belief_dict[slot] == "none": # none 存在也只能是下面那种情况
gating_label.append(gating_dict["none"])
else:
gating_label.append(gating_dict["ptr"])
if max_value_len < len(turn_belief_dict[slot]):
max_value_len = len(turn_belief_dict[slot])
else:
generate_y.append("none")
gating_label.append(gating_dict["none"])
gate_counter.extend(gating_label)
# 可以根据ID和turn_idx将内容复原
data_detail = {
"ID": dial_dict["dialogue_idx"],
"domains": dial_dict["domains"],
"turn_domain": turn_domain,
"turn_id": turn_id, # 好像都是 0
"dialog_history": source_text,
"turn_belief": turn_belief_list,
"gating_label": gating_label,
"turn_uttr": turn_uttr_strip, # 每一轮的系统和人的话语
"generate_y": generate_y,
}
data.append(data_detail)
if max_resp_len < len(source_text.split()):
max_resp_len = len(source_text.split()) # 对话数量,系统和人各算一个
# add t{} to the lang file 用来干啥的
if "t{}".format(max_value_len - 1) not in mem_lang.word2index.keys():
for time_i in range(max_value_len):
mem_lang.index_words("t{}".format(time_i), "utter")
print("domain_counter", domain_counter)
print("gate counter", Counter(gate_counter))
return data, max_resp_len
def prepare_data_seq(config):
eval_batch = (
config["eval_batch_size"] if config["eval_batch_size"] else config["batch_size"]
)
train_file_path = config["train_dials"]
dev_file_path = config["dev_dials"]
test_file_path = config["test_dials"]
ontology_file_path = config["ontology"]
# load domain-slot pairs from ontology
ontology = json.load(open(ontology_file_path, "r", encoding="utf8"))
slots = get_slot_information(ontology)
gating_dict = {"ptr": 0, "none": 1}
# Vocabulary
lang_name = "lang-all.pkl" if config["all_vocab"] else "lang-train.pkl"
mem_lang_name = "mem-lang-all.pkl" if config["all_vocab"] else "mem-lang-train.pkl"
if config["debug"]:
lang_name = "debug-" + lang_name
mem_lang_name = "debug-" + mem_lang_name
lang_file_path = os.path.join(config["data_path"], lang_name)
mem_lang_file_path = os.path.join(config["data_path"], mem_lang_name)
load_lang = False
if (
os.path.exists(lang_file_path) and os.path.exists(mem_lang_file_path)
) and not config["clean_cache"]:
print("Loading saved lang files...")
load_lang = True
with open(lang_file_path, "rb") as f:
lang = pickle.load(f)
with open(mem_lang_file_path, "rb") as f:
mem_lang = pickle.load(f)
else:
lang, mem_lang = Lang(config), Lang(config)
# 都包含了 ontology 中的 domain 和 slot,之后分别包含 utterance 和 domain-slot-value
lang.index_words(slots, "slot")
mem_lang.index_words(slots, "slot")
# 生成 dataloader
pair_train, train_max_len = read_langs(
train_file_path, gating_dict, slots, "train", lang, mem_lang, load_lang, config
)
train_loader = get_seq(
pair_train,
lang,
mem_lang,
config["batch_size"],
config["n_gpus"],
shuffle=True,
config=config,
)
train_vocab_size = lang.n_words
pair_dev, dev_max_len = read_langs(
dev_file_path, gating_dict, slots, "dev", lang, mem_lang, load_lang, config
)
dev_loader = get_seq(
pair_dev,
lang,
mem_lang,
eval_batch,
config["n_gpus"],
shuffle=False,
config=config,
)
pair_test, test_max_len = read_langs(
test_file_path, gating_dict, slots, "tests", lang, mem_lang, load_lang, config
)
test_loader = get_seq(
pair_test,
lang,
mem_lang,
eval_batch,
config["n_gpus"],
shuffle=False,
config=config,
)
# 保存中间数据
if (
not (os.path.exists(lang_file_path) and os.path.exists(mem_lang_file_path))
or config["clean_cache"]
):
print("Dumping lang files...")
with open(lang_file_path, "wb") as f:
pickle.dump(lang, f)
with open(mem_lang_file_path, "wb") as f:
pickle.dump(mem_lang, f)
emb_dump_path = os.path.join(config["data_path"], f"emb{len(lang.index2word)}")
if (not os.path.exists(emb_dump_path) or config["clean_cache"]) and config[
"load_embedding"
]:
dump_pretrained_emb(
config["orig_pretrained_embedding"], lang.index2word, emb_dump_path
)
max_dialogue_history_length = max(train_max_len, dev_max_len, test_max_len) + 1
print("Read %s pairs train" % len(pair_train))
print("Read %s pairs dev" % len(pair_dev))
print("Read %s pairs tests" % len(pair_test))
print("Vocab_size: %s " % lang.n_words)
print("Vocab_size Training %s" % train_vocab_size)
print("Vocab_size Belief %s" % mem_lang.n_words)
print("Max. length of dialog words for RNN: %s " % max_dialogue_history_length)
langs = [lang, mem_lang]
# dataloader, dataloader, dataloader, dataloader, List[Lang], List[Dict[str, str]], Dict[str, int], int
return train_loader, dev_loader, test_loader, langs, slots, gating_dict
def masked_cross_entropy_for_value(logits, target, mask):
# logits: b * |s| * m * |v|
# target: b * |s| * m
# mask: b * |s|
logits_flat = logits.view(-1, logits.size(-1))
# print(logits_flat.size())
log_probs_flat = torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s| * m
loss = masking(losses, mask)
return loss
def masking(losses, mask):
mask_ = []
batch_size = mask.size(0)
max_len = losses.size(2)
for si in range(mask.size(1)):
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if mask[:, si].is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (
mask[:, si].unsqueeze(1).expand_as(seq_range_expand)
) # (bs, max_len)
mask_.append((seq_range_expand < seq_length_expand))
mask_ = torch.stack(mask_)
mask_ = mask_.transpose(0, 1) # (bs, num_slots, max_len)
if losses.is_cuda:
mask_ = mask_.cuda()
losses = losses * mask_.float()
loss = losses.sum() / (mask_.sum().float())
return loss
def reformat_belief_state(raw_state):
belief_state = []
for item in raw_state:
dsv_triple = item.split("-", 2)
domain = dsv_triple[0].strip()
slot = dsv_triple[1].strip()
value = dsv_triple[2].strip()
belief_state.append({"slots": [[domain + "-" + slot, value]]})
return belief_state
def compute_acc(gold, pred, slot_temp):
# TODO 为什么不求交集直接算
miss_gold = 0
miss_slot = []
for g in gold:
if g not in pred:
miss_gold += 1
miss_slot.append(g.rsplit("-", 1)[0]) # g=domain-slot-value
wrong_pred = 0
for p in pred:
if p not in gold and p.rsplit("-", 1)[0] not in miss_slot:
wrong_pred += 1
acc_total = len(slot_temp)
# slot_temp 包含所有 80 个 domain-slot,一轮对话总共可能就几个,这么算不合适吧
acc = len(slot_temp) - miss_gold - wrong_pred
acc = acc / float(acc_total)
return acc
def compute_prf(gold, pred):
tp, fp, fn = 0, 0, 0
if len(gold) != 0:
count = 1
for g in gold:
if g in pred:
tp += 1
else:
fn += 1
for p in pred:
if p not in gold:
fp += 1
precision = tp / float(tp + fp) if (tp + fp) != 0 else 0
recall = tp / float(tp + fn) if (tp + fn) != 0 else 0
f1 = (
2 * precision * recall / float(precision + recall)
if (precision + recall) != 0
else 0
)
else:
if not pred:
precision, recall, f1, count = 1, 1, 1, 1
else:
precision, recall, f1, count = 0, 0, 0, 1
return f1, recall, precision, count
def evaluate_metrics(all_prediction, from_which, slot_temp):
total, turn_acc, joint_acc, f1_pred, f1_count = 0, 0, 0, 0, 0
for d, v in all_prediction.items():
for t in range(len(v)):
cv = v[t]
if set(cv["turn_belief"]) == set(cv[from_which]):
joint_acc += 1
total += 1
# Compute prediction slot accuracy
temp_acc = compute_acc(
set(cv["turn_belief"]), set(cv[from_which]), slot_temp
)
turn_acc += temp_acc
# Compute prediction joint F1 score
temp_f1, temp_r, temp_p, count = compute_prf(
set(cv["turn_belief"]), set(cv[from_which])
)
f1_pred += temp_f1
f1_count += count
joint_acc_score = joint_acc / float(total) if total != 0 else 0
turn_acc_score = turn_acc / float(total) if total != 0 else 0
f1_score = f1_pred / float(f1_count) if f1_count != 0 else 0
return joint_acc_score, f1_score, turn_acc_score
| [
"os.path.exists",
"random.uniform",
"data.crosswoz.data_process.dst.trade_preprocess.Lang",
"torch.log",
"pickle.dump",
"torch.stack",
"os.path.join",
"tqdm.tqdm",
"pickle.load",
"collections.Counter",
"data.crosswoz.data_process.dst.trade_preprocess.get_seq",
"collections.defaultdict",
"dat... | [((2716, 2732), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2727, 2732), False, 'from collections import defaultdict, Counter\n'), ((7173, 7203), 'data.crosswoz.data_process.dst.trade_preprocess.get_slot_information', 'get_slot_information', (['ontology'], {}), '(ontology)\n', (7193, 7203), False, 'from data.crosswoz.data_process.dst.trade_preprocess import EXPERIMENT_DOMAINS, Lang, get_seq, get_slot_information\n'), ((7561, 7605), 'os.path.join', 'os.path.join', (["config['data_path']", 'lang_name'], {}), "(config['data_path'], lang_name)\n", (7573, 7605), False, 'import os\n'), ((7631, 7679), 'os.path.join', 'os.path.join', (["config['data_path']", 'mem_lang_name'], {}), "(config['data_path'], mem_lang_name)\n", (7643, 7679), False, 'import os\n'), ((8466, 8574), 'data.crosswoz.data_process.dst.trade_preprocess.get_seq', 'get_seq', (['pair_train', 'lang', 'mem_lang', "config['batch_size']", "config['n_gpus']"], {'shuffle': '(True)', 'config': 'config'}), "(pair_train, lang, mem_lang, config['batch_size'], config['n_gpus'],\n shuffle=True, config=config)\n", (8473, 8574), False, 'from data.crosswoz.data_process.dst.trade_preprocess import EXPERIMENT_DOMAINS, Lang, get_seq, get_slot_information\n'), ((8818, 8916), 'data.crosswoz.data_process.dst.trade_preprocess.get_seq', 'get_seq', (['pair_dev', 'lang', 'mem_lang', 'eval_batch', "config['n_gpus']"], {'shuffle': '(False)', 'config': 'config'}), "(pair_dev, lang, mem_lang, eval_batch, config['n_gpus'], shuffle=\n False, config=config)\n", (8825, 8916), False, 'from data.crosswoz.data_process.dst.trade_preprocess import EXPERIMENT_DOMAINS, Lang, get_seq, get_slot_information\n'), ((9129, 9228), 'data.crosswoz.data_process.dst.trade_preprocess.get_seq', 'get_seq', (['pair_test', 'lang', 'mem_lang', 'eval_batch', "config['n_gpus']"], {'shuffle': '(False)', 'config': 'config'}), "(pair_test, lang, mem_lang, eval_batch, config['n_gpus'], shuffle=\n False, config=config)\n", (9136, 9228), False, 'from data.crosswoz.data_process.dst.trade_preprocess import EXPERIMENT_DOMAINS, Lang, get_seq, get_slot_information\n'), ((10883, 10905), 'torch.log', 'torch.log', (['logits_flat'], {}), '(logits_flat)\n', (10892, 10905), False, 'import torch\n'), ((11763, 11781), 'torch.stack', 'torch.stack', (['mask_'], {}), '(mask_)\n', (11774, 11781), False, 'import torch\n'), ((1809, 1832), 'json.dump', 'json.dump', (['embedding', 'f'], {}), '(embedding, f)\n', (1818, 1832), False, 'import json\n'), ((2842, 2854), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2851, 2854), False, 'import json\n'), ((6675, 6696), 'collections.Counter', 'Counter', (['gate_counter'], {}), '(gate_counter)\n', (6682, 6696), False, 'from collections import defaultdict, Counter\n'), ((11048, 11102), 'torch.gather', 'torch.gather', (['log_probs_flat'], {'dim': '(1)', 'index': 'target_flat'}), '(log_probs_flat, dim=1, index=target_flat)\n', (11060, 11102), False, 'import torch\n'), ((379, 423), 'bz2.open', 'bz2.open', (['vector_path', '"""rt"""'], {'encoding': '"""utf8"""'}), "(vector_path, 'rt', encoding='utf8')\n", (387, 423), False, 'import bz2\n'), ((514, 569), 'tqdm.tqdm', 'tqdm', (['lines[1:]'], {'desc': '"""Generating pretrained embedding"""'}), "(lines[1:], desc='Generating pretrained embedding')\n", (518, 569), False, 'from tqdm import tqdm\n'), ((7719, 7749), 'os.path.exists', 'os.path.exists', (['lang_file_path'], {}), '(lang_file_path)\n', (7733, 7749), False, 'import os\n'), ((7754, 7788), 'os.path.exists', 'os.path.exists', (['mem_lang_file_path'], {}), '(mem_lang_file_path)\n', (7768, 7788), False, 'import os\n'), ((7961, 7975), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7972, 7975), False, 'import pickle\n'), ((8049, 8063), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8060, 8063), False, 'import pickle\n'), ((8099, 8111), 'data.crosswoz.data_process.dst.trade_preprocess.Lang', 'Lang', (['config'], {}), '(config)\n', (8103, 8111), False, 'from data.crosswoz.data_process.dst.trade_preprocess import EXPERIMENT_DOMAINS, Lang, get_seq, get_slot_information\n'), ((8113, 8125), 'data.crosswoz.data_process.dst.trade_preprocess.Lang', 'Lang', (['config'], {}), '(config)\n', (8117, 8125), False, 'from data.crosswoz.data_process.dst.trade_preprocess import EXPERIMENT_DOMAINS, Lang, get_seq, get_slot_information\n'), ((9531, 9551), 'pickle.dump', 'pickle.dump', (['lang', 'f'], {}), '(lang, f)\n', (9542, 9551), False, 'import pickle\n'), ((9614, 9638), 'pickle.dump', 'pickle.dump', (['mem_lang', 'f'], {}), '(mem_lang, f)\n', (9625, 9638), False, 'import pickle\n'), ((9323, 9353), 'os.path.exists', 'os.path.exists', (['lang_file_path'], {}), '(lang_file_path)\n', (9337, 9353), False, 'import os\n'), ((9358, 9392), 'os.path.exists', 'os.path.exists', (['mem_lang_file_path'], {}), '(mem_lang_file_path)\n', (9372, 9392), False, 'import os\n'), ((9736, 9765), 'os.path.exists', 'os.path.exists', (['emb_dump_path'], {}), '(emb_dump_path)\n', (9750, 9765), False, 'import os\n'), ((11371, 11395), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (11383, 11395), False, 'import torch\n'), ((999, 1024), 'random.uniform', 'random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1013, 1024), False, 'import random\n')] |
from django.conf.urls import url
from django.views.generic import TemplateView
from reports import views
urlpatterns = [
url(r'balance/$', views.balance, name='report_balance'),
url(r'performance/$', views.performance, name='report_performance'),
url(r'last_activity/$', views.last_activity, name='last_activity'),
url(r'collection/$', views.CollectionListView.as_view(),
name='report_collection'),
url(r'saleschart/$', TemplateView.as_view(
template_name='reports/sales_chart.html'), name='chart_sales'),
url(r'paymentchart/$', TemplateView.as_view(
template_name='reports/payment_chart.html'), name='chart_payment'),
url(r'callchart/$', TemplateView.as_view(
template_name='reports/calls_chart.html'), name='chart_call'),
]
| [
"django.views.generic.TemplateView.as_view",
"django.conf.urls.url",
"reports.views.CollectionListView.as_view"
] | [((128, 182), 'django.conf.urls.url', 'url', (['"""balance/$"""', 'views.balance'], {'name': '"""report_balance"""'}), "('balance/$', views.balance, name='report_balance')\n", (131, 182), False, 'from django.conf.urls import url\n'), ((189, 255), 'django.conf.urls.url', 'url', (['"""performance/$"""', 'views.performance'], {'name': '"""report_performance"""'}), "('performance/$', views.performance, name='report_performance')\n", (192, 255), False, 'from django.conf.urls import url\n'), ((262, 327), 'django.conf.urls.url', 'url', (['"""last_activity/$"""', 'views.last_activity'], {'name': '"""last_activity"""'}), "('last_activity/$', views.last_activity, name='last_activity')\n", (265, 327), False, 'from django.conf.urls import url\n'), ((355, 389), 'reports.views.CollectionListView.as_view', 'views.CollectionListView.as_view', ([], {}), '()\n', (387, 389), False, 'from reports import views\n'), ((451, 513), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""reports/sales_chart.html"""'}), "(template_name='reports/sales_chart.html')\n", (471, 513), False, 'from django.views.generic import TemplateView\n'), ((572, 636), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""reports/payment_chart.html"""'}), "(template_name='reports/payment_chart.html')\n", (592, 636), False, 'from django.views.generic import TemplateView\n'), ((694, 756), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""reports/calls_chart.html"""'}), "(template_name='reports/calls_chart.html')\n", (714, 756), False, 'from django.views.generic import TemplateView\n')] |
from pycam.Utils.events import get_event_handler, get_mainloop
class ProgressContext:
def __init__(self, title):
self._title = title
self._progress = get_event_handler().get("progress")
def __enter__(self):
if self._progress:
self._progress.update(text=self._title, percent=0)
# start an indefinite pulse (until we receive more details)
self._progress.update()
else:
self._progress = None
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._progress:
self._progress.finish()
def update(self, *args, **kwargs):
mainloop = get_mainloop()
if mainloop is None:
return False
mainloop.update()
if self._progress:
return self._progress.update(*args, **kwargs)
else:
return False
def set_multiple(self, count, base_text=None):
if self._progress:
self._progress.set_multiple(count, base_text=base_text)
def update_multiple(self):
if self._progress:
self._progress.update_multiple()
| [
"pycam.Utils.events.get_mainloop",
"pycam.Utils.events.get_event_handler"
] | [((680, 694), 'pycam.Utils.events.get_mainloop', 'get_mainloop', ([], {}), '()\n', (692, 694), False, 'from pycam.Utils.events import get_event_handler, get_mainloop\n'), ((173, 192), 'pycam.Utils.events.get_event_handler', 'get_event_handler', ([], {}), '()\n', (190, 192), False, 'from pycam.Utils.events import get_event_handler, get_mainloop\n')] |
#! /usr/bin/env python
# License: Apache 2.0. See LICENSE file in root directory.
#
# For simple behaviors that can run syncronously, Python provides
# a simple way to implement this. Add the work of your behavior
# in the execute_cb callback
#
import rospy
import actionlib
import behavior_common.msg
import time
import random
from std_msgs.msg import Float64
from std_msgs.msg import UInt16
from std_msgs.msg import UInt32
from std_msgs.msg import Bool
from std_msgs.msg import Empty
# for talking
import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
# for servos
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.right_arm_servo_publishers import *
#from sheldon_servos.left_arm_servo_publishers import *
from sheldon_servos.standard_servo_positions import *
from sheldon_servos.set_servo_speed import *
from sheldon_servos.set_servo_torque import *
class BehaviorAction(object):
_feedback = behavior_common.msg.behaviorFeedback()
_result = behavior_common.msg.behaviorResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo('%s: Initializing Sleep behavior service' % (self._action_name))
# enable/disable microphone when robot is moving servos.
# (Note system_enable vs. speech_enable vs. user_enable)
self.mic_system_enable_pub = rospy.Publisher('/microphone/system_enable', Bool, queue_size=1)
def execute_cb(self, goal):
rospy.loginfo('%s: Executing behavior' % (self._action_name))
rospy.loginfo( "Param1: '%s'", goal.param1)
rospy.loginfo( "Param2: '%s'", goal.param2)
# =========== Behavior Implementation ==============
success = True
r = rospy.Rate(1.0)
pub_eye_cmd = rospy.Publisher('/head/eye_cmd', UInt16, queue_size=10)
pub_light_mode = rospy.Publisher('/arm_led_mode', UInt16, queue_size=10)
pub_ear_cmd = rospy.Publisher('/head/ear_cmd', UInt16, queue_size=10)
rospy.loginfo("Waiting for speech server (press ctrl-c to cancel at anytime)")
client = actionlib.SimpleActionClient("/speech_service", audio_and_speech_common.msg.speechAction)
client.wait_for_server()
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="moving into shipping position")
client.send_goal(goal)
result = client.wait_for_result() # wait for speech to complete
rospy.loginfo("Speech goal returned result: %d", result)
# mute the microphone
self.mic_system_enable_pub.publish(False)
# Move head and arms to sleep position
SetServoTorque(0.8, all_servo_joints)
SetServoSpeed(0.5, head_joints)
SetServoSpeed(1.0, right_arm_joints)
SetServoSpeed(1.0, left_arm_joints)
# Move elbows at fast speed to lock
SetSingleServoSpeed(2.0, "right_arm_elbow_bend_joint")
SetSingleServoSpeed(2.0, "left_arm_elbow_bend_joint")
time.sleep(0.5)
all_sleep() # Move all servos to sleep position 1
time.sleep(2)
# lock arms
pub_right_arm_elbow_bend.publish(3.00)
pub_left_arm_elbow_bend.publish(3.13)
time.sleep(1)
# Move arms forward, so they point down after waist moves
#pub_right_arm_shoulder_rotate.publish(0.78)
#pub_left_arm_shoulder_rotate.publish(0.78)
# Turn off servo torque
rospy.loginfo("Turning off servo torque and eyes")
SetServoTorque(0.0, all_servo_joints)
pub_eye_cmd.publish(0) # 0 = Turn eyes off
pub_ear_cmd.publish(0) # 0 = Turn ear lights off
pub_light_mode.publish(0) # 0 = Turn lights off
# Move Waist into position
time.sleep(3)
waist_full_down()
time.sleep(5.0) # seconds
# Turn off servo torque
#SetServoTorque(0.0, all_servo_joints)
#time.sleep(5.0) # seconds
rospy.loginfo(' Ship Complete. Running until some other behavior preempts, to suppress Idle behavior...')
#rospy.loginfo('%s: Running behavior' % (self._action_name))
self._feedback.running = True
self._as.publish_feedback(self._feedback)
# Run forever to keep Idle behavior from running.
# may be prempted by any other behavior (such as wake)
while True:
# check that preempt has not been requested by the client
if self._as.is_preempt_requested():
rospy.loginfo('%s: Behavior preempted' % self._action_name)
self._as.set_preempted()
success = True
break
r.sleep()
if success:
rospy.loginfo('%s: Behavior complete' % self._action_name)
self._as.set_succeeded(self._result)
# un-mute the microphone
self.mic_system_enable_pub.publish(True)
if __name__ == '__main__':
rospy.init_node('ship_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
| [
"rospy.init_node",
"actionlib.SimpleActionServer",
"time.sleep",
"rospy.Rate",
"rospy.spin",
"rospy.get_name",
"rospy.Publisher",
"rospy.loginfo",
"actionlib.SimpleActionClient"
] | [((5135, 5167), 'rospy.init_node', 'rospy.init_node', (['"""ship_behavior"""'], {}), "('ship_behavior')\n", (5150, 5167), False, 'import rospy\n'), ((5218, 5230), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5228, 5230), False, 'import rospy\n'), ((1132, 1266), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['self._action_name', 'behavior_common.msg.behaviorAction'], {'execute_cb': 'self.execute_cb', 'auto_start': '(False)'}), '(self._action_name, behavior_common.msg.\n behaviorAction, execute_cb=self.execute_cb, auto_start=False)\n', (1160, 1266), False, 'import actionlib\n'), ((1297, 1373), 'rospy.loginfo', 'rospy.loginfo', (["('%s: Initializing Sleep behavior service' % self._action_name)"], {}), "('%s: Initializing Sleep behavior service' % self._action_name)\n", (1310, 1373), False, 'import rospy\n'), ((1545, 1609), 'rospy.Publisher', 'rospy.Publisher', (['"""/microphone/system_enable"""', 'Bool'], {'queue_size': '(1)'}), "('/microphone/system_enable', Bool, queue_size=1)\n", (1560, 1609), False, 'import rospy\n'), ((1665, 1724), 'rospy.loginfo', 'rospy.loginfo', (["('%s: Executing behavior' % self._action_name)"], {}), "('%s: Executing behavior' % self._action_name)\n", (1678, 1724), False, 'import rospy\n'), ((1736, 1778), 'rospy.loginfo', 'rospy.loginfo', (['"""Param1: \'%s\'"""', 'goal.param1'], {}), '("Param1: \'%s\'", goal.param1)\n', (1749, 1778), False, 'import rospy\n'), ((1788, 1830), 'rospy.loginfo', 'rospy.loginfo', (['"""Param2: \'%s\'"""', 'goal.param2'], {}), '("Param2: \'%s\'", goal.param2)\n', (1801, 1830), False, 'import rospy\n'), ((1931, 1946), 'rospy.Rate', 'rospy.Rate', (['(1.0)'], {}), '(1.0)\n', (1941, 1946), False, 'import rospy\n'), ((1970, 2025), 'rospy.Publisher', 'rospy.Publisher', (['"""/head/eye_cmd"""', 'UInt16'], {'queue_size': '(10)'}), "('/head/eye_cmd', UInt16, queue_size=10)\n", (1985, 2025), False, 'import rospy\n'), ((2059, 2114), 'rospy.Publisher', 'rospy.Publisher', (['"""/arm_led_mode"""', 'UInt16'], {'queue_size': '(10)'}), "('/arm_led_mode', UInt16, queue_size=10)\n", (2074, 2114), False, 'import rospy\n'), ((2145, 2200), 'rospy.Publisher', 'rospy.Publisher', (['"""/head/ear_cmd"""', 'UInt16'], {'queue_size': '(10)'}), "('/head/ear_cmd', UInt16, queue_size=10)\n", (2160, 2200), False, 'import rospy\n'), ((2218, 2296), 'rospy.loginfo', 'rospy.loginfo', (['"""Waiting for speech server (press ctrl-c to cancel at anytime)"""'], {}), "('Waiting for speech server (press ctrl-c to cancel at anytime)')\n", (2231, 2296), False, 'import rospy\n'), ((2314, 2408), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/speech_service"""', 'audio_and_speech_common.msg.speechAction'], {}), "('/speech_service', audio_and_speech_common.msg\n .speechAction)\n", (2342, 2408), False, 'import actionlib\n'), ((2650, 2706), 'rospy.loginfo', 'rospy.loginfo', (['"""Speech goal returned result: %d"""', 'result'], {}), "('Speech goal returned result: %d', result)\n", (2663, 2706), False, 'import rospy\n'), ((3188, 3203), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3198, 3203), False, 'import time\n'), ((3274, 3287), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3284, 3287), False, 'import time\n'), ((3410, 3423), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3420, 3423), False, 'import time\n'), ((3637, 3687), 'rospy.loginfo', 'rospy.loginfo', (['"""Turning off servo torque and eyes"""'], {}), "('Turning off servo torque and eyes')\n", (3650, 3687), False, 'import rospy\n'), ((3943, 3956), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3953, 3956), False, 'import time\n'), ((3991, 4006), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (4001, 4006), False, 'import time\n'), ((4141, 4258), 'rospy.loginfo', 'rospy.loginfo', (['""" Ship Complete. Running until some other behavior preempts, to suppress Idle behavior..."""'], {}), "(\n ' Ship Complete. Running until some other behavior preempts, to suppress Idle behavior...'\n )\n", (4154, 4258), False, 'import rospy\n'), ((5196, 5212), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (5210, 5212), False, 'import rospy\n'), ((4903, 4961), 'rospy.loginfo', 'rospy.loginfo', (["('%s: Behavior complete' % self._action_name)"], {}), "('%s: Behavior complete' % self._action_name)\n", (4916, 4961), False, 'import rospy\n'), ((4683, 4742), 'rospy.loginfo', 'rospy.loginfo', (["('%s: Behavior preempted' % self._action_name)"], {}), "('%s: Behavior preempted' % self._action_name)\n", (4696, 4742), False, 'import rospy\n')] |
from collections import defaultdict, namedtuple
import torch
# When using the sliding window trick for long sequences,
# we take the representation of each token with maximal context.
# Take average of the BERT embeddings of these BPE sub-tokens
# as the embedding for the word.
# Take *weighted* average of the word embeddings through all layers.
def extract_bert_ques_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, turn_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, turn_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = []
for t, para_feature in enumerate(ex_feature): # Turn
para_token_count = defaultdict(int)
for j, chunk_feature in enumerate(para_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, t, doc_word_idx] += all_encoder_layers[:, i, t, j, k]
para_token_count[doc_word_idx] += 1
ex_token_count.append(para_token_count)
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for t, para_token_count in enumerate(ex_token_count):
for doc_word_idx, count in para_token_count.items():
out_features[:, i, t, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def extract_bert_ctx_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = defaultdict(int)
for j, chunk_feature in enumerate(ex_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, doc_word_idx] += all_encoder_layers[:, i, j, k]
ex_token_count[doc_word_idx] += 1
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for doc_word_idx, count in ex_token_count.items():
out_features[:, i, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def convert_text_to_bert_features(text, bert_tokenizer, max_seq_length, doc_stride):
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(text):
sub_tokens = bert_tokenizer.wordpiece_tokenizer.tokenize(token.lower())
for sub_ in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_)
# The -2 accounts for [CLS] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
out_features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
feature = BertInputFeatures(
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
out_features.append(feature)
return out_features
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class BertInputFeatures(object):
"""A single set of BERT features of data."""
def __init__(self,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
| [
"torch.mean",
"collections.namedtuple",
"collections.defaultdict",
"torch.Tensor"
] | [((4069, 4111), 'collections.namedtuple', 'namedtuple', (['"""DocSpan"""', "['start', 'length']"], {}), "('DocSpan', ['start', 'length'])\n", (4079, 4111), False, 'from collections import defaultdict, namedtuple\n'), ((1781, 1808), 'torch.mean', 'torch.mean', (['out_features', '(0)'], {}), '(out_features, 0)\n', (1791, 1808), False, 'import torch\n'), ((2384, 2400), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2395, 2400), False, 'from collections import defaultdict, namedtuple\n'), ((3018, 3045), 'torch.mean', 'torch.mean', (['out_features', '(0)'], {}), '(out_features, 0)\n', (3028, 3045), False, 'import torch\n'), ((568, 638), 'torch.Tensor', 'torch.Tensor', (['num_layers', 'batch_size', 'turn_size', 'max_doc_len', 'bert_dim'], {}), '(num_layers, batch_size, turn_size, max_doc_len, bert_dim)\n', (580, 638), False, 'import torch\n'), ((1012, 1028), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1023, 1028), False, 'from collections import defaultdict, namedtuple\n'), ((2036, 2095), 'torch.Tensor', 'torch.Tensor', (['num_layers', 'batch_size', 'max_doc_len', 'bert_dim'], {}), '(num_layers, batch_size, max_doc_len, bert_dim)\n', (2048, 2095), False, 'import torch\n')] |
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
(r'^$', 'suggestions.views.list_all'),
(r'^post/$', 'suggestions.views.add_suggestion'),
(r'^vote/(?P<suggestion_id>.*)/$', 'suggestions.views.add_vote'),
(r'^unvote/(?P<suggestion_id>.*)/$', 'suggestions.views.remove_vote'),
(r'^close/(?P<suggestion_id>.*)/$', 'suggestions.views.close'),
)
| [
"django.conf.urls.defaults.patterns"
] | [((76, 389), 'django.conf.urls.defaults.patterns', 'patterns', (['""""""', "('^$', 'suggestions.views.list_all')", "('^post/$', 'suggestions.views.add_suggestion')", "('^vote/(?P<suggestion_id>.*)/$', 'suggestions.views.add_vote')", "('^unvote/(?P<suggestion_id>.*)/$', 'suggestions.views.remove_vote')", "('^close/(?P<suggestion_id>.*)/$', 'suggestions.views.close')"], {}), "('', ('^$', 'suggestions.views.list_all'), ('^post/$',\n 'suggestions.views.add_suggestion'), ('^vote/(?P<suggestion_id>.*)/$',\n 'suggestions.views.add_vote'), ('^unvote/(?P<suggestion_id>.*)/$',\n 'suggestions.views.remove_vote'), ('^close/(?P<suggestion_id>.*)/$',\n 'suggestions.views.close'))\n", (84, 389), False, 'from django.conf.urls.defaults import patterns, include, url\n')] |
import json
import os.path
class StorageHelper:
__document = None
__path = None
@staticmethod
def write(key, data):
StorageHelper.__init()
StorageHelper.__document[key] = json.dumps(data)
StorageHelper.__store()
@staticmethod
def read(key):
StorageHelper.__init()
data = StorageHelper.__document[key]
if data is None:
return None
return json.loads(data)
@staticmethod
def __init():
if StorageHelper.__path is None:
if 'LOCAL_STORAGE' in os.environ:
StorageHelper.__path = os.environ['LOCAL_STORAGE'] + '/document.json'
else:
StorageHelper.__path = '/data/document.json'
if StorageHelper.__document is None:
if os.path.isfile(StorageHelper.__path) is False:
StorageHelper.__document = dict()
else:
file = open(StorageHelper.__path)
StorageHelper.__document = json.load(file)
@staticmethod
def __store():
with open(StorageHelper.__path, 'w+') as file:
json.dump(StorageHelper.__document, file)
| [
"json.load",
"json.loads",
"json.dumps",
"json.dump"
] | [((206, 222), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (216, 222), False, 'import json\n'), ((433, 449), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (443, 449), False, 'import json\n'), ((1129, 1170), 'json.dump', 'json.dump', (['StorageHelper.__document', 'file'], {}), '(StorageHelper.__document, file)\n', (1138, 1170), False, 'import json\n'), ((1008, 1023), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1017, 1023), False, 'import json\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import time
import shutil
from six.moves import urllib
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except)
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
shutil.copy(temp_file_name, filepath)
size = os.path.getsize(filepath)
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| [
"os.path.exists",
"os.path.getsize",
"collections.namedtuple",
"random.uniform",
"os.makedirs",
"os.path.join",
"time.sleep",
"six.moves.urllib.request.urlretrieve",
"shutil.copy"
] | [((954, 1007), 'collections.namedtuple', 'collections.namedtuple', (['"""Dataset"""', "['data', 'target']"], {}), "('Dataset', ['data', 'target'])\n", (976, 1007), False, 'import collections\n'), ((1019, 1086), 'collections.namedtuple', 'collections.namedtuple', (['"""Datasets"""', "['train', 'validation', 'test']"], {}), "('Datasets', ['train', 'validation', 'test'])\n", (1041, 1086), False, 'import collections\n'), ((3223, 3264), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'filename'], {}), '(url, filename)\n', (3249, 3264), False, 'from six.moves import urllib\n'), ((3736, 3774), 'os.path.join', 'os.path.join', (['work_directory', 'filename'], {}), '(work_directory, filename)\n', (3748, 3774), False, 'import os\n'), ((3653, 3683), 'os.path.exists', 'os.path.exists', (['work_directory'], {}), '(work_directory)\n', (3667, 3683), False, 'import os\n'), ((3693, 3720), 'os.makedirs', 'os.makedirs', (['work_directory'], {}), '(work_directory)\n', (3704, 3720), False, 'import os\n'), ((3786, 3810), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (3800, 3810), False, 'import os\n'), ((3883, 3920), 'shutil.copy', 'shutil.copy', (['temp_file_name', 'filepath'], {}), '(temp_file_name, filepath)\n', (3894, 3920), False, 'import shutil\n'), ((3936, 3961), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (3951, 3961), False, 'import os\n'), ((2184, 2222), 'random.uniform', 'random.uniform', (['(1 - jitter)', '(1 + jitter)'], {}), '(1 - jitter, 1 + jitter)\n', (2198, 2222), False, 'import random\n'), ((2773, 2790), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2783, 2790), False, 'import time\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
import argparse
import os
import re
import sqlite3
import requests
import bs4
from termcolor import colored
# Python2 compatibility
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
# Default database path is ~/.iSearch.
DEFAULT_PATH = os.path.join(os.path.expanduser('~'), '.iSearch')
CREATE_TABLE_WORD = '''
CREATE TABLE IF NOT EXISTS Word
(
name TEXT PRIMARY KEY,
expl TEXT,
pr INT DEFAULT 1,
aset CHAR[1],
addtime TIMESTAMP NOT NULL DEFAULT (DATETIME('NOW', 'LOCALTIME'))
)
'''
def get_text(url):
my_headers = {
'Accept': 'text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN, zh;q=0.8',
'Upgrade-Insecure-Requests': '1',
'Host': 'dict.youdao.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/48.0.2564.116 Safari/537.36'
}
res = requests.get(url, headers=my_headers)
data = res.text
soup = bs4.BeautifulSoup(data, 'html.parser')
expl = ''
# -----------------collins-----------------------
collins = soup.find('div', id="collinsResult")
ls1 = []
if collins:
for s in collins.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls1.append(s.strip())
if ls1[1].startswith('('):
# Phrase
expl = expl + ls1[0] + '\n'
line = ' '.join(ls1[2:])
else:
expl = expl + (' '.join(ls1[:2])) + '\n'
line = ' '.join(ls1[3:])
text1 = re.sub('例:', '\n\n例:', line)
text1 = re.sub(r'(\d+\. )', r'\n\n\1', text1)
text1 = re.sub(r'(\s+?→\s+)', r' → ', text1)
text1 = re.sub('(\")', '\'', text1)
text1 = re.sub('\s{10}\s+', '', text1)
expl += text1
# -----------------word_group--------------------
word_group = soup.find('div', id='word_group')
ls2 = []
if word_group:
for s in word_group.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls2.append(s.strip())
text2 = ''
expl = expl + '\n\n' + '【词组】\n\n'
if len(ls2) < 3:
text2 = text2 + ls2[0] + ' ' + ls2[1] + '\n'
else:
for i, x in enumerate(ls2[:-3]):
if i % 2:
text2 = text2 + x + '\n'
else:
text2 = text2 + x + ' '
text2 = re.sub('(\")', '\'', text2)
expl += text2
# ------------------synonyms---------------------
synonyms = soup.find('div', id='synonyms')
ls3 = []
if synonyms:
for s in synonyms.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls3.append(s.strip())
text3 = ''
tmp_flag = True
for i in ls3:
if '.' in i:
if tmp_flag:
tmp_flag = False
text3 = text3 + '\n' + i + '\n'
else:
text3 = text3 + '\n\n' + i + '\n'
else:
text3 = text3 + i
text3 = re.sub('(\")', '\'', text3)
expl = expl + '\n\n' + '【同近义词】\n'
expl += text3
# ------------------discriminate------------------
discriminate = soup.find('div', id='discriminate')
ls4 = []
if discriminate:
for s in discriminate.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls4.append(s.strip())
expl = expl + '\n\n' + '【词语辨析】\n\n'
text4 = '-' * 40 + '\n' + format('↓ ' + ls4[0] + ' 的辨析 ↓', '^40s') + '\n' + '-' * 40 + '\n\n'
for x in ls4[1:]:
if x in '以上来源于':
break
if re.match(r'^[a-zA-Z]+$', x):
text4 = text4 + x + ' >> '
else:
text4 = text4 + x + '\n\n'
text4 = re.sub('(\")', '\'', text4)
expl += text4
# ------------------else------------------
# If no text found, then get other information
examples = soup.find('div', id='bilingual')
ls5 = []
if examples:
for s in examples.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls5.append(s.strip())
text5 = '\n\n【双语例句】\n\n'
pt = re.compile(r'.*?\..*?\..*?|《.*》')
for word in ls5:
if not pt.match(word):
if word.endswith(('(', '。', '?', '!', '。”', ')')):
text5 = text5 + word + '\n\n'
continue
if u'\u4e00' <= word[0] <= u'\u9fa5':
if word != '更多双语例句':
text5 += word
else:
text5 = text5 + ' ' + word
text5 = re.sub('(\")', '\'', text5)
expl += text5
return expl
def colorful_print(raw):
'''print colorful text in terminal.'''
lines = raw.split('\n')
colorful = True
detail = False
for line in lines:
if line:
if colorful:
colorful = False
print(colored(line, 'white', 'on_green') + '\n')
continue
elif line.startswith('例'):
print(line + '\n')
continue
elif line.startswith('【'):
print(colored(line, 'white', 'on_green') + '\n')
detail = True
continue
if not detail:
print(colored(line + '\n', 'yellow'))
else:
print(colored(line, 'cyan') + '\n')
def normal_print(raw):
''' no colorful text, for output.'''
lines = raw.split('\n')
for line in lines:
if line:
print(line + '\n')
def search_online(word, printer=True):
'''search the word or phrase on http://dict.youdao.com.'''
url = 'http://dict.youdao.com/w/ %s' % word
expl = get_text(url)
if printer:
colorful_print(expl)
return expl
def search_database(word):
'''offline search.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute(r'SELECT expl, pr FROM Word WHERE name LIKE "%s%%"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' 在数据库中存在', 'white', 'on_green'))
print()
print(colored('★ ' * res[0][1], 'red'), colored('☆ ' * (5 - res[0][1]), 'yellow'), sep='')
colorful_print(res[0][0])
else:
print(colored(word + ' 不在本地,从有道词典查询', 'white', 'on_red'))
search_online(word)
input_msg = '若存入本地,请输入优先级(1~5) ,否则 Enter 跳过\n>>> '
if sys.version_info[0] == 2:
add_in_db_pr = raw_input(input_msg)
else:
add_in_db_pr = input(input_msg)
if add_in_db_pr and add_in_db_pr.isdigit():
if(int(add_in_db_pr) >= 1 and int(add_in_db_pr) <= 5):
add_word(word, int(add_in_db_pr))
print(colored('单词 {word} 已加入数据库中'.format(word=word), 'white', 'on_red'))
curs.close()
conn.close()
def add_word(word, default_pr):
'''add the word or phrase to database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' 在数据库中已存在,不需要添加', 'white', 'on_red'))
sys.exit()
try:
expl = search_online(word, printer=False)
curs.execute('insert into word(name, expl, pr, aset) values ("%s", "%s", %d, "%s")' % (
word, expl, default_pr, word[0].upper()))
except Exception as e:
print(colored('something\'s wrong, you can\'t add the word', 'white', 'on_red'))
print(e)
else:
conn.commit()
print(colored('%s has been inserted into database' % word, 'green'))
finally:
curs.close()
conn.close()
def delete_word(word):
'''delete the word or phrase from database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
# search fisrt
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('DELETE FROM Word WHERE name = "%s"' % word)
except Exception as e:
print(e)
else:
print(colored('%s has been deleted from database' % word, 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def set_priority(word, pr):
'''
set the priority of the word.
priority(from 1 to 5) is the importance of the word.
'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('UPDATE Word SET pr= %d WHERE name = "%s"' % (pr, word))
except Exception as e:
print(colored('something\'s wrong, you can\'t reset priority', 'white', 'on_red'))
print(e)
else:
print(colored('the priority of %s has been reset to %s' % (word, pr), 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def list_letter(aset, vb=False, output=False):
'''list words by letter, from a-z (ingore case).'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
curs.execute('SELECT name, pr FROM Word WHERE aset = "%s"' % aset)
else:
curs.execute('SELECT expl, pr FROM Word WHERE aset = "%s"' % aset)
except Exception as e:
print(colored('something\'s wrong, catlog is from A to Z', 'red'))
print(e)
else:
if not output:
print(colored(format(aset, '-^40s'), 'green'))
else:
print(format(aset, '-^40s'))
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored('★ ' * pr, 'red', ), colored('☆ ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print('★ ' * pr + '☆ ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def list_priority(pr, vb=False, output=False):
'''
list words by priority, like this:
1 : list words which the priority is 1,
2+ : list words which the priority is lager than 2,
3-4 : list words which the priority is from 3 to 4.
'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
if len(pr) == 1:
curs.execute('SELECT name, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 2 and pr[1] == '+':
curs.execute('SELECT name, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 3 and pr[1] == '-':
curs.execute('SELECT name, pr FROM Word WHERE pr >= %d AND pr<= % d ORDER by pr, name' % (
int(pr[0]), int(pr[2])))
else:
if len(pr) == 1:
curs.execute('SELECT expl, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 2 and pr[1] == '+':
curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 3 and pr[1] == '-':
curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d AND pr<= %d ORDER by pr, name' % (
int(pr[0]), int(pr[2])))
except Exception as e:
print(colored('something\'s wrong, priority must be 1-5', 'red'))
print(e)
else:
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored('★ ' * pr, 'red', ), colored('☆ ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print('★ ' * pr + '☆ ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def list_latest(limit, vb=False, output=False):
'''list words by latest time you add to database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
curs.execute('SELECT name, pr, addtime FROM Word ORDER by datetime(addtime) DESC LIMIT %d' % limit)
else:
curs.execute('SELECT expl, pr, addtime FROM Word ORDER by datetime(addtime) DESC LIMIT %d' % limit)
except Exception as e:
print(e)
print(colored('something\'s wrong, please set the limit', 'red'))
else:
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored('★ ' * pr, 'red'), colored('☆ ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print('★ ' * pr + '☆ ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def super_insert(input_file_path):
log_file_path = os.path.join(DEFAULT_PATH, 'log.txt')
baseurl = 'http://dict.youdao.com/w/'
word_list = open(input_file_path, 'r', encoding='utf-8')
log_file = open(log_file_path, 'w', encoding='utf-8')
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
for line in word_list.readlines():
word = line.strip()
print(word)
url = baseurl + word
expl = get_text(url)
try:
# insert into database.
curs.execute("INSERT INTO Word(name, expl, pr, aset) VALUES (\"%s\", \"%s\", %d, \"%s\")" \
% (word, expl, 1, word[0].upper()))
except Exception as e:
print(word, "can't insert into database")
# save the error in log file.
print(e)
log_file.write(word + '\n')
conn.commit()
curs.close()
conn.close()
log_file.close()
word_list.close()
def count_word(arg):
'''count the number of words'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
if arg[0].isdigit():
if len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE pr == %d' % (int(arg[0])))
elif len(arg) == 2 and arg[1] == '+':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d' % (int(arg[0])))
elif len(arg) == 3 and arg[1] == '-':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d AND pr<= % d' % (int(arg[0]), int(arg[2])))
elif arg[0].isalpha():
if arg == 'all':
curs.execute('SELECT count(*) FROM Word')
elif len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE aset == "%s"' % arg.upper())
res = curs.fetchall()
print(res[0][0])
curs.close()
conn.close()
def main():
parser = argparse.ArgumentParser(description='Search words')
parser.add_argument(dest='word', help='the word you want to search.', nargs='*')
parser.add_argument('-f', '--file', dest='file',
action='store', help='add words list from text file.')
parser.add_argument('-a', '--add', dest='add',
action='store', nargs='+', help='insert word into database.')
parser.add_argument('-d', '--delete', dest='delete',
action='store', nargs='+', help='delete word from database.')
parser.add_argument('-s', '--set', dest='set',
action='store', help='set priority.')
parser.add_argument('-v', '--verbose', dest='verbose',
action='store_true', help='verbose mode.')
parser.add_argument('-o', '--output', dest='output',
action='store_true', help='output mode.')
parser.add_argument('-p', '--priority', dest='priority',
action='store', help='list words by priority.')
parser.add_argument('-t', '--time', dest='time',
action='store', help='list words by time.')
parser.add_argument('-l', '--letter', dest='letter',
action='store', help='list words by letter.')
parser.add_argument('-c', '--count', dest='count',
action='store', help='count the word.')
args = parser.parse_args()
is_verbose = args.verbose
is_output = args.output
if args.add:
default_pr = 1 if not args.set else int(args.set)
add_word(' '.join(args.add), default_pr)
elif args.delete:
delete_word(' '.join(args.delete))
elif args.set:
number = args.set
if not number.isdigit():
print(colored('you forget to set the number', 'white', 'on_red'))
sys.exit()
priority = int(number)
if args.word:
set_priority(' '.join(args.word), priority)
else:
print(colored('please set the priority', 'white', 'on_red'))
elif args.letter:
list_letter(args.letter[0].upper(), is_verbose, is_output)
elif args.time:
limit = int(args.time)
list_latest(limit, is_verbose, is_output)
elif args.priority:
list_priority(args.priority, is_verbose, is_output)
elif args.file:
input_file_path = args.file
if input_file_path.endswith('.txt'):
super_insert(input_file_path)
elif input_file_path == 'default':
super_insert(os.path.join(DEFAULT_PATH, 'word_list.txt'))
else:
print(colored('please use a correct path of text file', 'white', 'on_red'))
elif args.count:
count_word(args.count)
elif args.word:
if not os.path.exists(os.path.join(DEFAULT_PATH, 'word.db')):
os.mkdir(DEFAULT_PATH)
with open(os.path.join(DEFAULT_PATH, 'word_list.txt'), 'w') as f:
pass
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute(CREATE_TABLE_WORD)
conn.commit()
curs.close()
conn.close()
word = ' '.join(args.word)
search_database(word)
if __name__ == '__main__':
main()
| [
"sys.setdefaultencoding",
"termcolor.colored",
"argparse.ArgumentParser",
"re.compile",
"os.path.join",
"re.match",
"requests.get",
"bs4.BeautifulSoup",
"os.mkdir",
"sys.exit",
"re.sub",
"os.path.expanduser"
] | [((273, 304), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (295, 304), False, 'import sys\n'), ((373, 396), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (391, 396), False, 'import os\n'), ((1115, 1152), 'requests.get', 'requests.get', (['url'], {'headers': 'my_headers'}), '(url, headers=my_headers)\n', (1127, 1152), False, 'import requests\n'), ((1184, 1222), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['data', '"""html.parser"""'], {}), "(data, 'html.parser')\n", (1201, 1222), False, 'import bs4\n'), ((14042, 14079), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""log.txt"""'], {}), "(DEFAULT_PATH, 'log.txt')\n", (14054, 14079), False, 'import os\n'), ((15884, 15935), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Search words"""'}), "(description='Search words')\n", (15907, 15935), False, 'import argparse\n'), ((1796, 1824), 're.sub', 're.sub', (['"""例:"""', '"""\n\n例:"""', 'line'], {}), "('例:', '\\n\\n例:', line)\n", (1802, 1824), False, 'import re\n'), ((1841, 1881), 're.sub', 're.sub', (['"""(\\\\d+\\\\. )"""', '"""\\\\n\\\\n\\\\1"""', 'text1'], {}), "('(\\\\d+\\\\. )', '\\\\n\\\\n\\\\1', text1)\n", (1847, 1881), False, 'import re\n'), ((1895, 1933), 're.sub', 're.sub', (['"""(\\\\s+?→\\\\s+)"""', '""" → """', 'text1'], {}), "('(\\\\s+?→\\\\s+)', ' → ', text1)\n", (1901, 1933), False, 'import re\n'), ((1950, 1975), 're.sub', 're.sub', (['"""(")"""', '"""\'"""', 'text1'], {}), '(\'(")\', "\'", text1)\n', (1956, 1975), False, 'import re\n'), ((1994, 2026), 're.sub', 're.sub', (['"""\\\\s{10}\\\\s+"""', '""""""', 'text1'], {}), "('\\\\s{10}\\\\s+', '', text1)\n", (2000, 2026), False, 'import re\n'), ((2713, 2738), 're.sub', 're.sub', (['"""(")"""', '"""\'"""', 'text2'], {}), '(\'(")\', "\'", text2)\n', (2719, 2738), False, 'import re\n'), ((3419, 3444), 're.sub', 're.sub', (['"""(")"""', '"""\'"""', 'text3'], {}), '(\'(")\', "\'", text3)\n', (3425, 3444), False, 'import re\n'), ((4221, 4246), 're.sub', 're.sub', (['"""(")"""', '"""\'"""', 'text4'], {}), '(\'(")\', "\'", text4)\n', (4227, 4246), False, 'import re\n'), ((4669, 4703), 're.compile', 're.compile', (['""".*?\\\\..*?\\\\..*?|《.*》"""'], {}), "('.*?\\\\..*?\\\\..*?|《.*》')\n", (4679, 4703), False, 'import re\n'), ((5129, 5154), 're.sub', 're.sub', (['"""(")"""', '"""\'"""', 'text5'], {}), '(\'(")\', "\'", text5)\n', (5135, 5154), False, 'import re\n'), ((6421, 6458), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (6433, 6458), False, 'import os\n'), ((7519, 7556), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (7531, 7556), False, 'import os\n'), ((7768, 7778), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7776, 7778), False, 'import sys\n'), ((8390, 8427), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (8402, 8427), False, 'import os\n'), ((9162, 9199), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (9174, 9199), False, 'import os\n'), ((10003, 10040), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (10015, 10040), False, 'import os\n'), ((11274, 11311), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (11286, 11311), False, 'import os\n'), ((13078, 13115), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (13090, 13115), False, 'import os\n'), ((14269, 14306), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (14281, 14306), False, 'import os\n'), ((15063, 15100), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (15075, 15100), False, 'import os\n'), ((4071, 4097), 're.match', 're.match', (['"""^[a-zA-Z]+$"""', 'x'], {}), "('^[a-zA-Z]+$', x)\n", (4079, 4097), False, 'import re\n'), ((6614, 6661), 'termcolor.colored', 'colored', (["(word + ' 在数据库中存在')", '"""white"""', '"""on_green"""'], {}), "(word + ' 在数据库中存在', 'white', 'on_green')\n", (6621, 6661), False, 'from termcolor import colored\n'), ((6693, 6725), 'termcolor.colored', 'colored', (["('★ ' * res[0][1])", '"""red"""'], {}), "('★ ' * res[0][1], 'red')\n", (6700, 6725), False, 'from termcolor import colored\n'), ((6729, 6770), 'termcolor.colored', 'colored', (["('☆ ' * (5 - res[0][1]))", '"""yellow"""'], {}), "('☆ ' * (5 - res[0][1]), 'yellow')\n", (6736, 6770), False, 'from termcolor import colored\n'), ((6836, 6886), 'termcolor.colored', 'colored', (["(word + ' 不在本地,从有道词典查询')", '"""white"""', '"""on_red"""'], {}), "(word + ' 不在本地,从有道词典查询', 'white', 'on_red')\n", (6843, 6886), False, 'from termcolor import colored\n'), ((7706, 7758), 'termcolor.colored', 'colored', (["(word + ' 在数据库中已存在,不需要添加')", '"""white"""', '"""on_red"""'], {}), "(word + ' 在数据库中已存在,不需要添加', 'white', 'on_red')\n", (7713, 7758), False, 'from termcolor import colored\n'), ((8168, 8229), 'termcolor.colored', 'colored', (["('%s has been inserted into database' % word)", '"""green"""'], {}), "('%s has been inserted into database' % word, 'green')\n", (8175, 8229), False, 'from termcolor import colored\n'), ((8929, 8995), 'termcolor.colored', 'colored', (["('%s not exists in the database' % word)", '"""white"""', '"""on_red"""'], {}), "('%s not exists in the database' % word, 'white', 'on_red')\n", (8936, 8995), False, 'from termcolor import colored\n'), ((9802, 9868), 'termcolor.colored', 'colored', (["('%s not exists in the database' % word)", '"""white"""', '"""on_red"""'], {}), "('%s not exists in the database' % word, 'white', 'on_red')\n", (9809, 9868), False, 'from termcolor import colored\n'), ((8030, 8101), 'termcolor.colored', 'colored', (['"""something\'s wrong, you can\'t add the word"""', '"""white"""', '"""on_red"""'], {}), '("something\'s wrong, you can\'t add the word", \'white\', \'on_red\')\n', (8037, 8101), False, 'from termcolor import colored\n'), ((8750, 8810), 'termcolor.colored', 'colored', (["('%s has been deleted from database' % word)", '"""green"""'], {}), "('%s has been deleted from database' % word, 'green')\n", (8757, 8810), False, 'from termcolor import colored\n'), ((9609, 9683), 'termcolor.colored', 'colored', (["('the priority of %s has been reset to %s' % (word, pr))", '"""green"""'], {}), "('the priority of %s has been reset to %s' % (word, pr), 'green')\n", (9616, 9683), False, 'from termcolor import colored\n'), ((10308, 10366), 'termcolor.colored', 'colored', (['"""something\'s wrong, catlog is from A to Z"""', '"""red"""'], {}), '("something\'s wrong, catlog is from A to Z", \'red\')\n', (10315, 10366), False, 'from termcolor import colored\n'), ((12407, 12464), 'termcolor.colored', 'colored', (['"""something\'s wrong, priority must be 1-5"""', '"""red"""'], {}), '("something\'s wrong, priority must be 1-5", \'red\')\n', (12414, 12464), False, 'from termcolor import colored\n'), ((13468, 13525), 'termcolor.colored', 'colored', (['"""something\'s wrong, please set the limit"""', '"""red"""'], {}), '("something\'s wrong, please set the limit", \'red\')\n', (13475, 13525), False, 'from termcolor import colored\n'), ((5830, 5860), 'termcolor.colored', 'colored', (["(line + '\\n')", '"""yellow"""'], {}), "(line + '\\n', 'yellow')\n", (5837, 5860), False, 'from termcolor import colored\n'), ((9479, 9552), 'termcolor.colored', 'colored', (['"""something\'s wrong, you can\'t reset priority"""', '"""white"""', '"""on_red"""'], {}), '("something\'s wrong, you can\'t reset priority", \'white\', \'on_red\')\n', (9486, 9552), False, 'from termcolor import colored\n'), ((10714, 10739), 'termcolor.colored', 'colored', (["('★ ' * pr)", '"""red"""'], {}), "('★ ' * pr, 'red')\n", (10721, 10739), False, 'from termcolor import colored\n'), ((10745, 10779), 'termcolor.colored', 'colored', (["('☆ ' * (5 - pr))", '"""yellow"""'], {}), "('☆ ' * (5 - pr), 'yellow')\n", (10752, 10779), False, 'from termcolor import colored\n'), ((12674, 12699), 'termcolor.colored', 'colored', (["('★ ' * pr)", '"""red"""'], {}), "('★ ' * pr, 'red')\n", (12681, 12699), False, 'from termcolor import colored\n'), ((12705, 12739), 'termcolor.colored', 'colored', (["('☆ ' * (5 - pr))", '"""yellow"""'], {}), "('☆ ' * (5 - pr), 'yellow')\n", (12712, 12739), False, 'from termcolor import colored\n'), ((13718, 13743), 'termcolor.colored', 'colored', (["('★ ' * pr)", '"""red"""'], {}), "('★ ' * pr, 'red')\n", (13725, 13743), False, 'from termcolor import colored\n'), ((13747, 13781), 'termcolor.colored', 'colored', (["('☆ ' * (5 - pr))", '"""yellow"""'], {}), "('☆ ' * (5 - pr), 'yellow')\n", (13754, 13781), False, 'from termcolor import colored\n'), ((17756, 17766), 'sys.exit', 'sys.exit', ([], {}), '()\n', (17764, 17766), False, 'import sys\n'), ((5454, 5488), 'termcolor.colored', 'colored', (['line', '"""white"""', '"""on_green"""'], {}), "(line, 'white', 'on_green')\n", (5461, 5488), False, 'from termcolor import colored\n'), ((5902, 5923), 'termcolor.colored', 'colored', (['line', '"""cyan"""'], {}), "(line, 'cyan')\n", (5909, 5923), False, 'from termcolor import colored\n'), ((17684, 17742), 'termcolor.colored', 'colored', (['"""you forget to set the number"""', '"""white"""', '"""on_red"""'], {}), "('you forget to set the number', 'white', 'on_red')\n", (17691, 17742), False, 'from termcolor import colored\n'), ((17908, 17961), 'termcolor.colored', 'colored', (['"""please set the priority"""', '"""white"""', '"""on_red"""'], {}), "('please set the priority', 'white', 'on_red')\n", (17915, 17961), False, 'from termcolor import colored\n'), ((5682, 5716), 'termcolor.colored', 'colored', (['line', '"""white"""', '"""on_green"""'], {}), "(line, 'white', 'on_green')\n", (5689, 5716), False, 'from termcolor import colored\n'), ((18452, 18495), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word_list.txt"""'], {}), "(DEFAULT_PATH, 'word_list.txt')\n", (18464, 18495), False, 'import os\n'), ((18529, 18597), 'termcolor.colored', 'colored', (['"""please use a correct path of text file"""', '"""white"""', '"""on_red"""'], {}), "('please use a correct path of text file', 'white', 'on_red')\n", (18536, 18597), False, 'from termcolor import colored\n'), ((18754, 18776), 'os.mkdir', 'os.mkdir', (['DEFAULT_PATH'], {}), '(DEFAULT_PATH)\n', (18762, 18776), False, 'import os\n'), ((18702, 18739), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (18714, 18739), False, 'import os\n'), ((18911, 18948), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word.db"""'], {}), "(DEFAULT_PATH, 'word.db')\n", (18923, 18948), False, 'import os\n'), ((18799, 18842), 'os.path.join', 'os.path.join', (['DEFAULT_PATH', '"""word_list.txt"""'], {}), "(DEFAULT_PATH, 'word_list.txt')\n", (18811, 18842), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Title: GCN models
Description:
The original Graph convolutional network model and GCN layer.
Refer to: https://arxiv.org/abs/1609.02907
"""
# =======================================
# @author Zhibin.Lu
# @email <EMAIL>
# =======================================
import collections
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class GraphConvolutionLayer(nn.Module):
"""Original Graph Convolutional Layer
Reference GCN equation:
F = A(relu(AW))W
"""
def __init__(
self,
input_dim,
output_dim,
support,
act_func=None,
featureless=False,
dropout_rate=0.0,
bias=False,
):
super().__init__()
self.support = support
self.featureless = featureless
for i in range(len(self.support)):
setattr(
self,
"W{}".format(i),
nn.Parameter(torch.randn(input_dim, output_dim)),
)
if bias:
self.b = nn.Parameter(torch.zeros(1, output_dim))
self.act_func = act_func
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
if not self.featureless:
x = self.dropout(x)
for i in range(len(self.support)):
if self.featureless:
pre_sup = getattr(self, "W{}".format(i))
else:
pre_sup = x.mm(getattr(self, "W{}".format(i)))
if i == 0:
out = self.support[i].mm(pre_sup)
else:
out += self.support[i].mm(pre_sup)
if self.act_func is not None:
out = self.act_func(out)
self.embedding = out
return out
class GraphConvolutionLayer_NoActBtwLayer(nn.Module):
""" GraphConvolution Layer without the activation
function between 2 graph convolution layers.
No-activation-func GCN equation:
F = (relu(A(AW)))W
"""
def __init__(
self,
input_dim,
output_dim,
support,
act_func=None,
featureless=False,
dropout_rate=0.0,
bias=False,
):
super().__init__()
self.support = support
self.featureless = featureless
for i in range(len(self.support)):
setattr(
self,
"W{}".format(i),
nn.Parameter(torch.randn(input_dim, output_dim)),
)
if bias:
self.b = nn.Parameter(torch.zeros(1, output_dim))
self.act_func = act_func
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
if not self.featureless:
x = self.dropout(x)
for i in range(len(self.support)):
if self.featureless:
pre_sup = self.support[i]
else:
pre_sup = self.support[i].mm(x)
if self.act_func is not None:
pre_sup = self.act_func(pre_sup)
if i == 0:
out = pre_sup.mm(getattr(self, "W{}".format(i)))
else:
out += pre_sup.mm(getattr(self, "W{}".format(i)))
self.embedding = out
return out
class GCN_2Layers(nn.Module):
""" The 2-layer GCN
1. Original GCN model when mode is "only_gcn_act",
equation is A(relu(AW))W
2. No act func btw graph layer when mode is "only_fc_act",
equation is (relu(A(AW)))W
"""
def __init__(
self,
input_dim,
support,
hid_dim=200,
dropout_rate=0.0,
num_classes=10,
act_func=None,
mode="only_gcn_act",
):
super().__init__()
# GraphConvolution
if mode == "only_gcn_act": # original Text_GCN
# A(relu(AW))W
self.layer1 = GraphConvolutionLayer(
input_dim,
hid_dim,
support,
act_func=act_func,
featureless=True,
dropout_rate=dropout_rate,
)
self.layer2 = GraphConvolutionLayer(
hid_dim, num_classes, support, dropout_rate=dropout_rate
)
elif mode == "only_fc_act":
# (relu(A(AW)))W
self.layer1 = GraphConvolutionLayer_NoActBtwLayer(
input_dim,
hid_dim,
support,
featureless=True,
dropout_rate=dropout_rate,
)
self.layer2 = GraphConvolutionLayer_NoActBtwLayer(
hid_dim,
num_classes,
support,
act_func=act_func,
dropout_rate=dropout_rate,
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
return out
| [
"torch.nn.Dropout",
"torch.zeros",
"torch.randn"
] | [((1233, 1257), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (1243, 1257), True, 'import torch.nn as nn\n'), ((2688, 2712), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2698, 2712), True, 'import torch.nn as nn\n'), ((1148, 1174), 'torch.zeros', 'torch.zeros', (['(1)', 'output_dim'], {}), '(1, output_dim)\n', (1159, 1174), False, 'import torch\n'), ((2603, 2629), 'torch.zeros', 'torch.zeros', (['(1)', 'output_dim'], {}), '(1, output_dim)\n', (2614, 2629), False, 'import torch\n'), ((1045, 1079), 'torch.randn', 'torch.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (1056, 1079), False, 'import torch\n'), ((2500, 2534), 'torch.randn', 'torch.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (2511, 2534), False, 'import torch\n')] |
#Method-1 guess the number game
import random
number = random.randint(1,10)
guess = 0
count = 0
print("You can exit the game anytime. Just enter 'exit'.")
while guess != number and guess != "exit":
guess = input("Guess a number between 1 to 10 :- ")
if guess == "exit":
print("Closing the game...")
break
guess = int(guess)
count += 1
if guess < number:
print("Too low!")
elif guess > number:
print("Too high!")
else:
print("\nCongratulation, You got it!")
print("You have tried ", count ," times")
| [
"random.randint"
] | [((56, 77), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (70, 77), False, 'import random\n')] |
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
"""
import os
import time
import tempfile
import numpy as np
from rdkit import Chem
from md import builders, minimizer
from fe import pdb_writer, free_energy
from ff import Forcefield
from ff.handlers.deserialize import deserialize_handlers
from timemachine.lib import custom_ops, LangevinIntegrator
from docking import report
def dock_and_equilibrate(
host_pdbfile,
guests_sdfile,
max_lambda,
insertion_steps,
eq_steps,
outdir,
fewer_outfiles=False,
constant_atoms=[],
):
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
Parameters
----------
host_pdbfile: path to host pdb file to dock into
guests_sdfile: path to input sdf with guests to pose/dock
max_lambda: lambda value the guest should insert from or delete to
(recommended: 1.0 for work calulation, 0.25 to stay close to original pose)
(must be =1 for work calculation to be applicable)
insertion_steps: how many steps to insert the guest over (recommended: 501)
eq_steps: how many steps of equilibration to do after insertion (recommended: 15001)
outdir: where to write output (will be created if it does not already exist)
fewer_outfiles: if True, will only write frames for the equilibration, not insertion
constant_atoms: atom numbers from the host_pdbfile to hold mostly fixed across the simulation
(1-indexed, like PDB files)
Output
------
A pdb & sdf file for the last step of insertion
(outdir/<guest_name>/<guest_name>_ins_<step>_[host.pdb/guest.sdf])
A pdb & sdf file every 1000 steps of equilibration
(outdir/<guest_name>/<guest_name>_eq_<step>_[host.pdb/guest.sdf])
stdout corresponding to the files written noting the lambda value and energy
stdout for each guest noting the work of transition, if applicable
stdout for each guest noting how long it took to run
Note
----
The work will not be calculated if the du_dl endpoints are not close to 0 or if any norm of
force per atom exceeds 20000 kJ/(mol*nm) [MAX_NORM_FORCE defined in docking/report.py]
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
print(
f"""
HOST_PDBFILE = {host_pdbfile}
GUESTS_SDFILE = {guests_sdfile}
OUTDIR = {outdir}
MAX_LAMBDA = {max_lambda}
INSERTION_STEPS = {insertion_steps}
EQ_STEPS = {eq_steps}
"""
)
# Prepare host
# TODO: handle extra (non-transitioning) guests?
print("Solvating host...")
(
solvated_host_system,
solvated_host_coords,
_,
_,
host_box,
solvated_topology,
) = builders.build_protein_system(host_pdbfile)
_, solvated_host_pdb = tempfile.mkstemp(suffix=".pdb", text=True)
writer = pdb_writer.PDBWriter([solvated_topology], solvated_host_pdb)
writer.write_frame(solvated_host_coords)
writer.close()
solvated_host_mol = Chem.MolFromPDBFile(solvated_host_pdb, removeHs=False)
os.remove(solvated_host_pdb)
guest_ff_handlers = deserialize_handlers(
open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"ff/params/smirnoff_1_1_0_ccc.py",
)
).read()
)
ff = Forcefield(guest_ff_handlers)
# Run the procedure
print("Getting guests...")
suppl = Chem.SDMolSupplier(guests_sdfile, removeHs=False)
for guest_mol in suppl:
start_time = time.time()
guest_name = guest_mol.GetProp("_Name")
guest_conformer = guest_mol.GetConformer(0)
orig_guest_coords = np.array(guest_conformer.GetPositions(), dtype=np.float64)
orig_guest_coords = orig_guest_coords / 10 # convert to md_units
minimized_coords = minimizer.minimize_host_4d(
[guest_mol], solvated_host_system, solvated_host_coords, ff, host_box
)
afe = free_energy.AbsoluteFreeEnergy(guest_mol, ff)
ups, sys_params, combined_masses, _ = afe.prepare_host_edge(
ff.get_ordered_params(), solvated_host_system, minimized_coords
)
combined_bps = []
for up, sp in zip(ups, sys_params):
combined_bps.append(up.bind(sp))
x0 = np.concatenate([minimized_coords, orig_guest_coords])
v0 = np.zeros_like(x0)
print(f"SYSTEM", f"guest_name: {guest_name}", f"num_atoms: {len(x0)}")
for atom_num in constant_atoms:
combined_masses[atom_num - 1] += 50000
seed = 2021
intg = LangevinIntegrator(300.0, 1.5e-3, 1.0, combined_masses, seed).impl()
u_impls = []
for bp in combined_bps:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
ctxt = custom_ops.Context(x0, v0, host_box, intg, u_impls)
# insert guest
insertion_lambda_schedule = np.linspace(max_lambda, 0.0, insertion_steps)
calc_work = True
# collect a du_dl calculation once every other step
subsample_interval = 1
full_du_dls, _, _ = ctxt.multiple_steps(insertion_lambda_schedule, subsample_interval)
step = len(insertion_lambda_schedule) - 1
lamb = insertion_lambda_schedule[-1]
ctxt.step(lamb)
report.report_step(
ctxt,
step,
lamb,
host_box,
combined_bps,
u_impls,
guest_name,
insertion_steps,
"INSERTION",
)
if not fewer_outfiles:
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(insertion_steps))),
"ins",
)
if report.too_much_force(ctxt, lamb, host_box, combined_bps, u_impls):
print("Not calculating work (too much force)")
calc_work = False
continue
# Note: this condition only applies for ABFE, not RBFE
if abs(full_du_dls[0]) > 0.001 or abs(full_du_dls[-1]) > 0.001:
print("Not calculating work (du_dl endpoints are not ~0)")
calc_work = False
if calc_work:
work = np.trapz(full_du_dls, insertion_lambda_schedule[::subsample_interval])
print(f"guest_name: {guest_name}\tinsertion_work: {work:.2f}")
# equilibrate
for step in range(eq_steps):
ctxt.step(0.00)
if step % 1000 == 0:
report.report_step(
ctxt,
step,
0.00,
host_box,
combined_bps,
u_impls,
guest_name,
eq_steps,
"EQUILIBRATION",
)
if (not fewer_outfiles) or (step == eq_steps - 1):
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(eq_steps))),
"eq",
)
if step in (0, int(eq_steps / 2), eq_steps - 1):
if report.too_much_force(ctxt, 0.00, host_box, combined_bps, u_impls):
break
end_time = time.time()
print(f"{guest_name} took {(end_time - start_time):.2f} seconds")
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-p",
"--host_pdbfile",
default="tests/data/hif2a_nowater_min.pdb",
help="host to dock into",
)
parser.add_argument(
"-s",
"--guests_sdfile",
default="tests/data/ligands_40__first-two-ligs.sdf",
help="guests to pose",
)
parser.add_argument(
"--max_lambda",
type=float,
default=1.0,
help=(
"lambda value the guest should insert from or delete to "
"(must be =1 for the work calculation to be applicable)"
),
)
parser.add_argument(
"--insertion_steps",
type=int,
default=501,
help="how many steps to take while phasing in each guest",
)
parser.add_argument(
"--eq_steps",
type=int,
default=15001,
help="equilibration length (1 step = 1.5 femtoseconds)",
)
parser.add_argument("-o", "--outdir", default="dock_equil_out", help="where to write output")
parser.add_argument("--fewer_outfiles", action="store_true", help="write fewer output pdb/sdf files")
parser.add_argument(
"-c",
"--constant_atoms_file",
help="file containing comma-separated atom numbers to hold ~fixed (1-indexed)",
)
args = parser.parse_args()
constant_atoms_list = []
if args.constant_atoms_file:
with open(args.constant_atoms_file, "r") as rfile:
for line in rfile.readlines():
atoms = [int(x.strip()) for x in line.strip().split(",")]
constant_atoms_list += atoms
dock_and_equilibrate(
args.host_pdbfile,
args.guests_sdfile,
args.max_lambda,
args.insertion_steps,
args.eq_steps,
args.outdir,
args.fewer_outfiles,
constant_atoms_list,
)
if __name__ == "__main__":
main()
| [
"rdkit.Chem.SDMolSupplier",
"os.remove",
"os.path.exists",
"rdkit.Chem.MolFromPDBFile",
"argparse.ArgumentParser",
"docking.report.too_much_force",
"numpy.linspace",
"fe.free_energy.AbsoluteFreeEnergy",
"numpy.concatenate",
"docking.report.report_step",
"timemachine.lib.custom_ops.Context",
"t... | [((2726, 2769), 'md.builders.build_protein_system', 'builders.build_protein_system', (['host_pdbfile'], {}), '(host_pdbfile)\n', (2755, 2769), False, 'from md import builders, minimizer\n'), ((2798, 2840), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""', 'text': '(True)'}), "(suffix='.pdb', text=True)\n", (2814, 2840), False, 'import tempfile\n'), ((2854, 2914), 'fe.pdb_writer.PDBWriter', 'pdb_writer.PDBWriter', (['[solvated_topology]', 'solvated_host_pdb'], {}), '([solvated_topology], solvated_host_pdb)\n', (2874, 2914), False, 'from fe import pdb_writer, free_energy\n'), ((3003, 3057), 'rdkit.Chem.MolFromPDBFile', 'Chem.MolFromPDBFile', (['solvated_host_pdb'], {'removeHs': '(False)'}), '(solvated_host_pdb, removeHs=False)\n', (3022, 3057), False, 'from rdkit import Chem\n'), ((3062, 3090), 'os.remove', 'os.remove', (['solvated_host_pdb'], {}), '(solvated_host_pdb)\n', (3071, 3090), False, 'import os\n'), ((3357, 3386), 'ff.Forcefield', 'Forcefield', (['guest_ff_handlers'], {}), '(guest_ff_handlers)\n', (3367, 3386), False, 'from ff import Forcefield\n'), ((3455, 3504), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['guests_sdfile'], {'removeHs': '(False)'}), '(guests_sdfile, removeHs=False)\n', (3473, 3504), False, 'from rdkit import Chem\n'), ((8057, 8136), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (8080, 8136), False, 'import argparse\n'), ((2202, 2224), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (2216, 2224), False, 'import os\n'), ((2234, 2253), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (2245, 2253), False, 'import os\n'), ((3554, 3565), 'time.time', 'time.time', ([], {}), '()\n', (3563, 3565), False, 'import time\n'), ((3855, 3956), 'md.minimizer.minimize_host_4d', 'minimizer.minimize_host_4d', (['[guest_mol]', 'solvated_host_system', 'solvated_host_coords', 'ff', 'host_box'], {}), '([guest_mol], solvated_host_system,\n solvated_host_coords, ff, host_box)\n', (3881, 3956), False, 'from md import builders, minimizer\n'), ((3990, 4035), 'fe.free_energy.AbsoluteFreeEnergy', 'free_energy.AbsoluteFreeEnergy', (['guest_mol', 'ff'], {}), '(guest_mol, ff)\n', (4020, 4035), False, 'from fe import pdb_writer, free_energy\n'), ((4322, 4375), 'numpy.concatenate', 'np.concatenate', (['[minimized_coords, orig_guest_coords]'], {}), '([minimized_coords, orig_guest_coords])\n', (4336, 4375), True, 'import numpy as np\n'), ((4389, 4406), 'numpy.zeros_like', 'np.zeros_like', (['x0'], {}), '(x0)\n', (4402, 4406), True, 'import numpy as np\n'), ((4847, 4898), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['x0', 'v0', 'host_box', 'intg', 'u_impls'], {}), '(x0, v0, host_box, intg, u_impls)\n', (4865, 4898), False, 'from timemachine.lib import custom_ops, LangevinIntegrator\n'), ((4959, 5004), 'numpy.linspace', 'np.linspace', (['max_lambda', '(0.0)', 'insertion_steps'], {}), '(max_lambda, 0.0, insertion_steps)\n', (4970, 5004), True, 'import numpy as np\n'), ((5346, 5461), 'docking.report.report_step', 'report.report_step', (['ctxt', 'step', 'lamb', 'host_box', 'combined_bps', 'u_impls', 'guest_name', 'insertion_steps', '"""INSERTION"""'], {}), "(ctxt, step, lamb, host_box, combined_bps, u_impls,\n guest_name, insertion_steps, 'INSERTION')\n", (5364, 5461), False, 'from docking import report\n'), ((6073, 6139), 'docking.report.too_much_force', 'report.too_much_force', (['ctxt', 'lamb', 'host_box', 'combined_bps', 'u_impls'], {}), '(ctxt, lamb, host_box, combined_bps, u_impls)\n', (6094, 6139), False, 'from docking import report\n'), ((7923, 7934), 'time.time', 'time.time', ([], {}), '()\n', (7932, 7934), False, 'import time\n'), ((6530, 6600), 'numpy.trapz', 'np.trapz', (['full_du_dls', 'insertion_lambda_schedule[::subsample_interval]'], {}), '(full_du_dls, insertion_lambda_schedule[::subsample_interval])\n', (6538, 6600), True, 'import numpy as np\n'), ((4614, 4675), 'timemachine.lib.LangevinIntegrator', 'LangevinIntegrator', (['(300.0)', '(0.0015)', '(1.0)', 'combined_masses', 'seed'], {}), '(300.0, 0.0015, 1.0, combined_masses, seed)\n', (4632, 4675), False, 'from timemachine.lib import custom_ops, LangevinIntegrator\n'), ((6813, 6924), 'docking.report.report_step', 'report.report_step', (['ctxt', 'step', '(0.0)', 'host_box', 'combined_bps', 'u_impls', 'guest_name', 'eq_steps', '"""EQUILIBRATION"""'], {}), "(ctxt, step, 0.0, host_box, combined_bps, u_impls,\n guest_name, eq_steps, 'EQUILIBRATION')\n", (6831, 6924), False, 'from docking import report\n'), ((7809, 7874), 'docking.report.too_much_force', 'report.too_much_force', (['ctxt', '(0.0)', 'host_box', 'combined_bps', 'u_impls'], {}), '(ctxt, 0.0, host_box, combined_bps, u_impls)\n', (7830, 7874), False, 'from docking import report\n'), ((3210, 3235), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3225, 3235), False, 'import os\n')] |
import unittest
from unittest import TestCase
from src.gifGenerator import GifGenerator
class TestGifGenerator(TestCase):
def setUp(self) -> None:
self.gg = GifGenerator()
def test_set_text_position(self):
position = (50, 90)
self.gg.setTextPosition(position)
self.assertEqual(self.gg.text_position, position)
def test_set_font(self):
self.assertTrue(True)
def test_load_image(self):
# path='test.png'
self.assertTrue(True)
def test_crop_images(self):
self.assertTrue(True)
def test_generate(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"src.gifGenerator.GifGenerator"
] | [((659, 674), 'unittest.main', 'unittest.main', ([], {}), '()\n', (672, 674), False, 'import unittest\n'), ((173, 187), 'src.gifGenerator.GifGenerator', 'GifGenerator', ([], {}), '()\n', (185, 187), False, 'from src.gifGenerator import GifGenerator\n')] |
"""
pyexcel.sources.file
~~~~~~~~~~~~~~~~~~~
Representation of file sources
:copyright: (c) 2015-2016 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel import params
from pyexcel.factory import FileSource
from pyexcel.sources.rendererfactory import RendererFactory
from pyexcel.sources import renderer
RendererFactory.register_renderers(renderer.renderers)
try:
import pyexcel_text as text
RendererFactory.register_renderers(text.renderers)
except ImportError:
pass
file_types = tuple(RendererFactory.renderer_factories.keys())
class IOSource(FileSource):
"""
Get excel data from file source
"""
@classmethod
def can_i_handle(cls, action, file_type):
if action == params.WRITE_ACTION:
status = file_type in file_types
else:
status = False
return status
class SheetSource(IOSource):
"""Pick up 'file_name' field and do single sheet based read and write
"""
fields = [params.FILE_NAME]
targets = (params.SHEET,)
actions = (params.WRITE_ACTION,)
def __init__(self, file_name=None, **keywords):
self.file_name = file_name
self.keywords = keywords
self.file_type = file_name.split(".")[-1]
self.renderer = RendererFactory.get_renderer(self.file_type)
def write_data(self, sheet):
self.renderer.render_sheet_to_file(self.file_name,
sheet, **self.keywords)
class BookSource(SheetSource):
"""Pick up 'file_name' field and do multiple sheet based read and write
"""
targets = (params.BOOK,)
def write_data(self, book):
self.renderer.render_book_to_file(self.file_name, book,
**self.keywords)
class WriteOnlySheetSource(IOSource):
fields = [params.FILE_TYPE]
targets = (params.SHEET,)
actions = (params.WRITE_ACTION,)
def __init__(self, file_type=None, file_stream=None, **keywords):
self.renderer = RendererFactory.get_renderer(file_type)
if file_stream:
self.content = file_stream
else:
self.content = self.renderer.get_io()
self.file_type = file_type
self.keywords = keywords
def write_data(self, sheet):
self.renderer.render_sheet_to_stream(self.content,
sheet, **self.keywords)
class WriteOnlyBookSource(WriteOnlySheetSource):
"""
Multiple sheet data source for writting back to memory
"""
targets = (params.BOOK,)
def write_data(self, book):
self.renderer.render_book_to_stream(self.content, book,
**self.keywords)
sources = (
WriteOnlySheetSource,
WriteOnlyBookSource,
SheetSource,
BookSource
)
| [
"pyexcel.sources.rendererfactory.RendererFactory.register_renderers",
"pyexcel.sources.rendererfactory.RendererFactory.renderer_factories.keys",
"pyexcel.sources.rendererfactory.RendererFactory.get_renderer"
] | [((358, 412), 'pyexcel.sources.rendererfactory.RendererFactory.register_renderers', 'RendererFactory.register_renderers', (['renderer.renderers'], {}), '(renderer.renderers)\n', (392, 412), False, 'from pyexcel.sources.rendererfactory import RendererFactory\n'), ((459, 509), 'pyexcel.sources.rendererfactory.RendererFactory.register_renderers', 'RendererFactory.register_renderers', (['text.renderers'], {}), '(text.renderers)\n', (493, 509), False, 'from pyexcel.sources.rendererfactory import RendererFactory\n'), ((565, 606), 'pyexcel.sources.rendererfactory.RendererFactory.renderer_factories.keys', 'RendererFactory.renderer_factories.keys', ([], {}), '()\n', (604, 606), False, 'from pyexcel.sources.rendererfactory import RendererFactory\n'), ((1337, 1381), 'pyexcel.sources.rendererfactory.RendererFactory.get_renderer', 'RendererFactory.get_renderer', (['self.file_type'], {}), '(self.file_type)\n', (1365, 1381), False, 'from pyexcel.sources.rendererfactory import RendererFactory\n'), ((2101, 2140), 'pyexcel.sources.rendererfactory.RendererFactory.get_renderer', 'RendererFactory.get_renderer', (['file_type'], {}), '(file_type)\n', (2129, 2140), False, 'from pyexcel.sources.rendererfactory import RendererFactory\n')] |
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
VERSION = '0.1.0'
PACKAGE_NAME = 'MDF_DALEC_GRASS'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/vmyrgiotis/MDF_DALEC_GRASS'
LICENSE = 'MIT'
DESCRIPTION = 'A Bayesian model-data fusion algorithm for simulating carbon dynamics in grassland ecosystems'
LONG_DESCRIPTION = (HERE / "README.md").read_text()
LONG_DESC_TYPE = "text/markdown"
INSTALL_REQUIRES = ["numpy", "pandas","spotpy","sklearn","sentinelhub", "shapely", "datetime", "geopandas", "cdsapi"]
PYTHON_REQUIRES = '>=3.8'
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages()
)
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((67, 89), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import pathlib\n'), ((874, 889), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (887, 889), False, 'from setuptools import setup, find_packages\n')] |
import setuptools
from croo import croo_args
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='croo',
version=croo_args.__version__,
scripts=['bin/croo'],
python_requires='>3.4.1',
author='<NAME>',
author_email='<EMAIL>',
description='CRomwell Output Organizer',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ENCODE-DCC/croo',
packages=setuptools.find_packages(exclude=['examples', 'docs']),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
],
install_requires=['caper']
)
| [
"setuptools.find_packages"
] | [((484, 538), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['examples', 'docs']"}), "(exclude=['examples', 'docs'])\n", (508, 538), False, 'import setuptools\n')] |
from typing import Dict
import numpy as np
from ..envs.env import StructuralModel
from ..utils.lik_func import *
from ..utils.useful_class import ParameterGrid
class Estimator(ABC):
"""An Estimator takes in a (trained) solver and relevant params
and outputs estimated structural params
"""
def __init__(self, solver: Solver = None, estimator_params: dict = None):
self.solver = solver
self.env = solver.env
self.estimator_params = estimator_params
self.num_structural_params = self.env.env_params['num_structural_params']
self.estimated_params = None
@abstractmethod
def estimate(self) -> dict:
"""Outputs estimation using a dict (e.g. dict['k'] = 0.95)"""
"""How?"""
return self.estimator_params
class SMMEstimator(Estimator, ABC):
"""Estimator using Simulated Method of Moments"""
def __init__(self,
data: np.ndarray = None, # (nsamples, N, T) or (N, T); N: obs dim, T: eps length
solver: Solver = None,
env: StructuralModel = None,
estimator_params: dict = None):
super().__init__(solver=solver, env=env, estimator_params=estimator_params)
self.data = data
self.estimator_params.setdefault("verbose", True)
self.estimator_params.setdefault("weight_matrix", "identity") # weight matrix type for GMM
self.estimator_params.setdefault("sample_size", 1000)
assert "grid" in self.estimator_params
assert "num_moments" in self.estimator_params
self.estimator_params.setdefault("grid", ParameterGrid({'this_is_an_example': [0.1]}))
self.estimator_params.setdefault("n_moment", 1)
if self.estimator_params['weight_matrix'] not in ["identity"]:
raise ValueError(f"No weight matrix {self.estimator_params['weight_matrix']}")
if self.estimator_params['weight_matrix'] == 'identity':
self.weight_matrix = np.eye(self.estimator_params['n_moment'])
def estimate(self) -> Dict[str, float]:
"""Use SMM to estimate structural params
Returns a dict of estimated structural params"""
running_min_error = np.inf
running_best_param = None
for param_dict in self.estimator_params['grid']:
gmm_error = self._gmm_error(param_dict, self.data)
if gmm_error < running_min_error:
running_min_error = gmm_error
running_best_param = param_dict
return running_best_param
@staticmethod
def _data_moments(obs_vec: np.ndarray) -> np.ndarray:
moments = []
if obs_vec.ndim == 2: # (N, T)
for i in range(obs_vec.shape[0]):
mean = obs_vec[i, :].mean()
moments = np.append(moments, mean)
variance = obs_vec[i, :].var()
moments = np.append(moments, variance)
else:
assert obs_vec.ndim == 3 # (nsample, N, T)
for i in range(obs_vec.shape[1]):
mean = obs_vec[:, i, :].mean(axis=1).mean()
moments = np.append(moments, mean)
variance = obs_vec[:, i, :].var(axis=1).mean()
moments = np.append(moments, variance)
return moments
def _gmm_error(self, param_dict: Dict[str, float], data_obs_vec: np.ndarray):
"""Perform GMM on a single param dict
:parameter: param_dict a dict like {'delta': 0.1, 'gamma': 1}
:returns an error term that is float of how much error this param_dict generates in simulated samples"""
sample_size = self.estimator_params['sample_size']
# use: param_dict, sample_size, self.weight_matrix, self.solver, self.env
sim_obs_vec = None
for n in range(sample_size):
obs_sample = self.solver.sample(
param_dict=param_dict) # np array of size (N, T); in WhitedBasicModel N=2 (k, i)
obs_sample = obs_sample.reshape(1, *obs_sample.shape) # obs_sample.shape = (1, N, T)
# some method to concat/aggregate samples
sim_obs_vec = obs_sample if sim_obs_vec is None else np.append(sim_obs_vec, obs_sample, axis=0)
moms_data = self._data_moments(data_obs_vec)
moms_model = self._data_moments(sim_obs_vec)
err = (moms_model - moms_data) / (moms_data + 1.e-9)
crit_val = err.T @ self.weight_matrix @ err
return crit_val
class LikelihoodEstimator(Estimator, ABC):
"""General likelihood estimator using some kind of given likelihood function"""
def __init__(self, solver: Solver = None, estimator_params: dict = None):
super().__init__(solver=solver, estimator_params=estimator_params)
assert "lik_func" in estimator_params # class LikFunc object (likelihood function) from utils.lik_func
self.lik_func = estimator_params['lik_func']
assert isinstance(self.lik_func, LikFunc)
# TODO: JZH
if __name__ == "__main__":
grid = {
'delta': [0.1, 0.2, 0.3],
'gamma': [1, 10]
}
pg = ParameterGrid(grid)
for g in pg:
print(g)
| [
"numpy.append",
"numpy.eye"
] | [((1986, 2027), 'numpy.eye', 'np.eye', (["self.estimator_params['n_moment']"], {}), "(self.estimator_params['n_moment'])\n", (1992, 2027), True, 'import numpy as np\n'), ((2800, 2824), 'numpy.append', 'np.append', (['moments', 'mean'], {}), '(moments, mean)\n', (2809, 2824), True, 'import numpy as np\n'), ((2898, 2926), 'numpy.append', 'np.append', (['moments', 'variance'], {}), '(moments, variance)\n', (2907, 2926), True, 'import numpy as np\n'), ((3129, 3153), 'numpy.append', 'np.append', (['moments', 'mean'], {}), '(moments, mean)\n', (3138, 3153), True, 'import numpy as np\n'), ((3243, 3271), 'numpy.append', 'np.append', (['moments', 'variance'], {}), '(moments, variance)\n', (3252, 3271), True, 'import numpy as np\n'), ((4172, 4214), 'numpy.append', 'np.append', (['sim_obs_vec', 'obs_sample'], {'axis': '(0)'}), '(sim_obs_vec, obs_sample, axis=0)\n', (4181, 4214), True, 'import numpy as np\n')] |
from django.db import models
from django.contrib.auth.models import User
class Link(models.Model):
url = models.URLField()
title = models.CharField(max_length=255)
reporter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='reported_links',
null=True,
blank=False,
)
def __str__(self):
return '{self.title} ({self.url})'.format(self=self)
def get_num_of_positive_votes(self):
return self.votes.filter(positive=True).count()
def get_num_of_negative_votes(self):
return self.votes.filter(negative=True).count()
class LinkVote(models.Model):
class Meta:
unique_together = (
('link', 'voter'),
)
link = models.ForeignKey(
Link,
on_delete=models.CASCADE,
related_name='votes',
)
voter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='votes',
null=True,
blank=False,
)
positive = models.BooleanField()
negative = models.BooleanField()
def __str__(self):
if self.positive:
vote = 'positive'
elif self.negative:
vote = 'negative'
else:
vote = 'neutral'
return '{vote} vote for {self.link} by {self.voter}'.format(
vote=vote, self=self)
| [
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ForeignKey"
] | [((111, 128), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (126, 128), False, 'from django.db import models\n'), ((141, 173), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (157, 173), False, 'from django.db import models\n'), ((189, 299), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.SET_NULL', 'related_name': '"""reported_links"""', 'null': '(True)', 'blank': '(False)'}), "(User, on_delete=models.SET_NULL, related_name=\n 'reported_links', null=True, blank=False)\n", (206, 299), False, 'from django.db import models\n'), ((753, 824), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Link'], {'on_delete': 'models.CASCADE', 'related_name': '"""votes"""'}), "(Link, on_delete=models.CASCADE, related_name='votes')\n", (770, 824), False, 'from django.db import models\n'), ((868, 968), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.SET_NULL', 'related_name': '"""votes"""', 'null': '(True)', 'blank': '(False)'}), "(User, on_delete=models.SET_NULL, related_name='votes',\n null=True, blank=False)\n", (885, 968), False, 'from django.db import models\n'), ((1027, 1048), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1046, 1048), False, 'from django.db import models\n'), ((1064, 1085), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1083, 1085), False, 'from django.db import models\n')] |
from prometheus_client import Counter
from raiden.utils.typing import TokenAmount
from raiden_libs.metrics import ( # noqa: F401, pylint: disable=unused-import
ERRORS_LOGGED,
EVENTS_EXCEPTIONS_RAISED,
EVENTS_PROCESSING_TIME,
MESSAGES_EXCEPTIONS_RAISED,
MESSAGES_PROCESSING_TIME,
REGISTRY,
ErrorCategory,
MetricsEnum,
collect_event_metrics,
collect_message_metrics,
get_metrics_for_label,
)
class Who(MetricsEnum):
US = "us"
THEY = "they"
REWARD_CLAIMS = Counter(
"economics_reward_claims_successful_total",
"The number of overall successful reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
REWARD_CLAIMS_TOKEN = Counter(
"economics_reward_claims_token_total",
"The amount of token earned by reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
def report_increased_reward_claims(amount: TokenAmount, who: Who) -> None:
get_metrics_for_label(REWARD_CLAIMS, who).inc()
get_metrics_for_label(REWARD_CLAIMS_TOKEN, who).inc(float(amount))
| [
"raiden_libs.metrics.get_metrics_for_label"
] | [((950, 991), 'raiden_libs.metrics.get_metrics_for_label', 'get_metrics_for_label', (['REWARD_CLAIMS', 'who'], {}), '(REWARD_CLAIMS, who)\n', (971, 991), False, 'from raiden_libs.metrics import ERRORS_LOGGED, EVENTS_EXCEPTIONS_RAISED, EVENTS_PROCESSING_TIME, MESSAGES_EXCEPTIONS_RAISED, MESSAGES_PROCESSING_TIME, REGISTRY, ErrorCategory, MetricsEnum, collect_event_metrics, collect_message_metrics, get_metrics_for_label\n'), ((1002, 1049), 'raiden_libs.metrics.get_metrics_for_label', 'get_metrics_for_label', (['REWARD_CLAIMS_TOKEN', 'who'], {}), '(REWARD_CLAIMS_TOKEN, who)\n', (1023, 1049), False, 'from raiden_libs.metrics import ERRORS_LOGGED, EVENTS_EXCEPTIONS_RAISED, EVENTS_PROCESSING_TIME, MESSAGES_EXCEPTIONS_RAISED, MESSAGES_PROCESSING_TIME, REGISTRY, ErrorCategory, MetricsEnum, collect_event_metrics, collect_message_metrics, get_metrics_for_label\n')] |
# modified from TikNib/tiknib/ida/fetch_funcdata_v7.5.py
import os
import sys
import string
from hashlib import sha1
from collections import defaultdict
import time
import pprint as pp
import idautils
import idc
import idaapi
import ida_pro
import ida_nalt
import ida_bytes
sys.path.append(os.path.abspath("./TikNib"))
from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data
printset = set(string.printable)
isprintable = lambda x: set(x).issubset(printset)
# find consts
def get_consts(start_addr, end_addr):
consts = []
for h in idautils.Heads(start_addr, end_addr):
insn = DecodeInstruction(h)
if insn:
for op in insn.ops:
if op.type == idaapi.o_imm:
# get operand value
imm_value = op.value
# check if addres is loaded in idb
if not ida_bytes.is_loaded(imm_value):
consts.append(imm_value)
return consts
# find strings
def get_strings(start_addr, end_addr):
strings = []
for h in idautils.Heads(start_addr, end_addr):
for ref in idautils.DataRefsFrom(h):
t = idc.get_str_type(ref)
if isinstance(t, int) and t >= 0:
s = idc.get_strlit_contents(ref)
if isinstance(s, bytes):
s = s.decode()
if s and isprintable(s):
strings.append([h, s, t, ref])
return strings
# This function returns a caller map, and callee map for each function.
def get_call_graph():
callee_map = defaultdict(list)
caller_map = defaultdict(list)
for callee_ea in idautils.Functions():
callee = idaapi.get_func(callee_ea)
# TODO: Sometimes, IDA returns false result. so we need to check this
if not callee:
continue
callee_name = idc.get_func_name(callee_ea)
# TODO: check flow boolean 1
for caller_ea in CodeRefsTo(callee_ea, 1):
caller = idaapi.get_func(caller_ea)
# TODO: Sometimes, IDA returns false result. so we need to check
if not caller:
continue
caller_name = idc.get_func_name(caller_ea)
# TODO: check the correction - caller_ea -> callee_ea
callee_map[caller_name].append([callee_name, callee_ea])
caller_map[callee_name].append([caller_name, caller_ea])
return caller_map, callee_map
# This function returns edges, and updates caller_map, and callee_map
def get_bb_graph(caller_map, callee_map):
edge_map = {}
bb_callee_map = {}
for func_ea in idautils.Functions():
func = idaapi.get_func(func_ea)
if not func or func.start_ea == idaapi.BADADDR or func.end_ea == idaapi.BADADDR:
continue
# TODO: study how to use flags
graph = idaapi.FlowChart(func, flags=idaapi.FC_PREDS)
func_name = idc.get_func_name(func.start_ea)
edge_map[func_name] = []
bb_callee_map[func_name] = []
for bb in graph:
if bb.start_ea == idaapi.BADADDR or bb.end_ea == idaapi.BADADDR:
continue
for succbb in bb.succs():
edge_map[func_name].append((bb.id, succbb.id))
for callee_name, callee_ea in callee_map[func_name]:
# Get address where current function calls a callee.
if bb.start_ea <= callee_ea < bb.end_ea:
bb_callee_map[func_name].append((bb.id, callee_name, callee_ea))
return edge_map, bb_callee_map
def get_type(addr):
tif = idaapi.tinfo_t()
ida_nalt.get_tinfo(tif, addr)
funcdata = idaapi.func_type_data_t()
tif.get_func_details(funcdata)
func_type = idaapi.print_tinfo("", 0, 0, PRTYPE_1LINE, tif, "", "")
ret_type = idaapi.print_tinfo("", 0, 0, PRTYPE_1LINE, funcdata.rettype, "", "")
args = []
for i in range(funcdata.size()):
arg_type = idaapi.print_tinfo("", 0, 0, PRTYPE_1LINE, funcdata[i].type, "", "")
args.append([i, funcdata[i].name, arg_type, funcdata[i].argloc.atype()])
return [func_type, ret_type, args]
def get_bin_path():
bin_path = ida_nalt.get_input_file_path()
if not os.path.exists(bin_path):
bin_path = idc.get_idb_path().replace(".idb", "")
return bin_path
def main():
# Get IDA default information
bin_path = get_bin_path()
with open(bin_path, "rb") as f:
bin_hash = sha1(f.read()).hexdigest()
img_base = idaapi.get_imagebase()
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
endian = "little"
if info.is_be():
endian = "big"
arch = "_".join([info.procName, str(bits), endian])
arch = get_arch(arch)
package = ""
compiler = ""
opti = ""
other_option = ""
bin_name = os.path.basename(bin_path)
# Parse option information
# package, compiler, arch, opti, bin_name = parse_fname(bin_path)
# if "_noinline" in bin_path:
# other_option = "noinline"
# elif "_pie" in bin_path:
# other_option = "pie"
# elif "_lto" in bin_path:
# other_option = "lto"
# else:
# other_option = "normal"
# Prepare default information for processing
caller_map, callee_map = get_call_graph()
edge_map, bb_callee_map = get_bb_graph(caller_map, callee_map)
# Now extract function information
func_data = []
for idx, addr in enumerate(list(idautils.Functions())):
function = idaapi.get_func(addr)
if (
not function
or function.start_ea == idaapi.BADADDR
or function.end_ea == idaapi.BADADDR
):
continue
# IDA's default function information
func_name = get_func_name(addr).strip()
demangled_name, demangled_full_name = demangle(func_name)
graph = idaapi.FlowChart(function, flags=idaapi.FC_PREDS)
data = idc.get_bytes(addr, function.size()) or ""
data_hash = sha1(data).hexdigest()
stack_size = get_frame_size(addr)
# Get imported callees. Note that the segment name is used because
# idaapi.get_import_module_name() sometimes returns bad results ...
imported_callees = []
if func_name in callee_map:
imported_callees = list(
filter(
lambda x: get_segm_name(x[1]) != get_segm_name(addr),
callee_map[func_name],
)
)
# Get type information from IDA
func_type, ret_type, args = get_type(addr)
# Prepare basic block information for feature extraction
func_strings = []
func_consts = []
bb_data = []
for bb in graph:
if bb.start_ea == idaapi.BADADDR or bb.end_ea == idaapi.BADADDR:
continue
bb_size = bb.end_ea - bb.start_ea
block_data = idc.get_bytes(bb.start_ea, bb_size) or b""
block_data_hash = sha1(block_data).hexdigest()
bb_strings = get_strings(bb.start_ea, bb.end_ea)
bb_consts = get_consts(bb.start_ea, bb.end_ea)
bb_callees = list(filter(lambda x: x[0] == bb.id, bb_callee_map[func_name]))
bb_data.append(
{
"size": bb_size,
"block_id": bb.id,
"startEA": bb.start_ea,
"endEA": bb.end_ea,
"type": bb.type,
"is_ret": idaapi.is_ret_block(bb.type),
"hash": block_data_hash,
"callees": bb_callees,
"strings": bb_strings,
"consts": bb_consts,
}
)
func_strings.extend(bb_strings)
func_consts.extend(bb_consts)
func_data.append(
{
"ida_idx": idx,
"seg_name": get_segm_name(addr),
"name": func_name,
"demangled_name": demangled_name,
"demangled_full_name": demangled_full_name,
"hash": data_hash,
"size": function.size(),
"startEA": function.start_ea,
"endEA": function.end_ea,
"cfg_size": graph.size,
"img_base": img_base,
"bin_path": bin_path,
"bin_hash": bin_hash,
"bin_offset": addr - img_base,
"stack_size": stack_size,
"package": package,
"compiler": compiler,
"arch": arch,
"opti": opti,
"others": other_option,
"bin_name": bin_name,
"func_type": func_type,
"ret_type": ret_type,
"args": args,
"callers": caller_map[func_name],
"callees": callee_map[func_name],
"imported_callees": imported_callees,
"cfg": edge_map[func_name],
"strings": func_strings,
"consts": func_consts,
"bb_data": bb_data,
}
)
return func_data
init_idc()
try:
func_data = main()
except:
import traceback
traceback.print_exc()
ida_pro.qexit(1)
else:
bin_path = get_bin_path()
store_func_data(bin_path, func_data)
ida_pro.qexit(0)
| [
"idc.get_idb_path",
"idautils.DataRefsFrom",
"idautils.Functions",
"hashlib.sha1",
"idc.get_func_name",
"os.path.exists",
"idc.get_str_type",
"idaapi.get_imagebase",
"tiknib.utils.store_func_data",
"ida_nalt.get_input_file_path",
"tiknib.utils.get_arch",
"traceback.print_exc",
"idaapi.get_fu... | [((9548, 9558), 'tiknib.utils.init_idc', 'init_idc', ([], {}), '()\n', (9556, 9558), False, 'from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data\n'), ((312, 339), 'os.path.abspath', 'os.path.abspath', (['"""./TikNib"""'], {}), "('./TikNib')\n", (327, 339), False, 'import os\n'), ((600, 636), 'idautils.Heads', 'idautils.Heads', (['start_addr', 'end_addr'], {}), '(start_addr, end_addr)\n', (614, 636), False, 'import idautils\n'), ((1131, 1167), 'idautils.Heads', 'idautils.Heads', (['start_addr', 'end_addr'], {}), '(start_addr, end_addr)\n', (1145, 1167), False, 'import idautils\n'), ((1661, 1678), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1672, 1678), False, 'from collections import defaultdict\n'), ((1697, 1714), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1708, 1714), False, 'from collections import defaultdict\n'), ((1737, 1757), 'idautils.Functions', 'idautils.Functions', ([], {}), '()\n', (1755, 1757), False, 'import idautils\n'), ((2737, 2757), 'idautils.Functions', 'idautils.Functions', ([], {}), '()\n', (2755, 2757), False, 'import idautils\n'), ((3735, 3751), 'idaapi.tinfo_t', 'idaapi.tinfo_t', ([], {}), '()\n', (3749, 3751), False, 'import idaapi\n'), ((3757, 3786), 'ida_nalt.get_tinfo', 'ida_nalt.get_tinfo', (['tif', 'addr'], {}), '(tif, addr)\n', (3775, 3786), False, 'import ida_nalt\n'), ((3803, 3828), 'idaapi.func_type_data_t', 'idaapi.func_type_data_t', ([], {}), '()\n', (3826, 3828), False, 'import idaapi\n'), ((3882, 3937), 'idaapi.print_tinfo', 'idaapi.print_tinfo', (['""""""', '(0)', '(0)', 'PRTYPE_1LINE', 'tif', '""""""', '""""""'], {}), "('', 0, 0, PRTYPE_1LINE, tif, '', '')\n", (3900, 3937), False, 'import idaapi\n'), ((3954, 4022), 'idaapi.print_tinfo', 'idaapi.print_tinfo', (['""""""', '(0)', '(0)', 'PRTYPE_1LINE', 'funcdata.rettype', '""""""', '""""""'], {}), "('', 0, 0, PRTYPE_1LINE, funcdata.rettype, '', '')\n", (3972, 4022), False, 'import idaapi\n'), ((4328, 4358), 'ida_nalt.get_input_file_path', 'ida_nalt.get_input_file_path', ([], {}), '()\n', (4356, 4358), False, 'import ida_nalt\n'), ((4660, 4682), 'idaapi.get_imagebase', 'idaapi.get_imagebase', ([], {}), '()\n', (4680, 4682), False, 'import idaapi\n'), ((4695, 4721), 'idaapi.get_inf_structure', 'idaapi.get_inf_structure', ([], {}), '()\n', (4719, 4721), False, 'import idaapi\n'), ((4982, 4996), 'tiknib.utils.get_arch', 'get_arch', (['arch'], {}), '(arch)\n', (4990, 4996), False, 'from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data\n'), ((5090, 5116), 'os.path.basename', 'os.path.basename', (['bin_path'], {}), '(bin_path)\n', (5106, 5116), False, 'import os\n'), ((9714, 9750), 'tiknib.utils.store_func_data', 'store_func_data', (['bin_path', 'func_data'], {}), '(bin_path, func_data)\n', (9729, 9750), False, 'from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data\n'), ((9756, 9772), 'ida_pro.qexit', 'ida_pro.qexit', (['(0)'], {}), '(0)\n', (9769, 9772), False, 'import ida_pro\n'), ((1189, 1213), 'idautils.DataRefsFrom', 'idautils.DataRefsFrom', (['h'], {}), '(h)\n', (1210, 1213), False, 'import idautils\n'), ((1777, 1803), 'idaapi.get_func', 'idaapi.get_func', (['callee_ea'], {}), '(callee_ea)\n', (1792, 1803), False, 'import idaapi\n'), ((1954, 1982), 'idc.get_func_name', 'idc.get_func_name', (['callee_ea'], {}), '(callee_ea)\n', (1971, 1982), False, 'import idc\n'), ((2775, 2799), 'idaapi.get_func', 'idaapi.get_func', (['func_ea'], {}), '(func_ea)\n', (2790, 2799), False, 'import idaapi\n'), ((2971, 3016), 'idaapi.FlowChart', 'idaapi.FlowChart', (['func'], {'flags': 'idaapi.FC_PREDS'}), '(func, flags=idaapi.FC_PREDS)\n', (2987, 3016), False, 'import idaapi\n'), ((3038, 3070), 'idc.get_func_name', 'idc.get_func_name', (['func.start_ea'], {}), '(func.start_ea)\n', (3055, 3070), False, 'import idc\n'), ((4096, 4164), 'idaapi.print_tinfo', 'idaapi.print_tinfo', (['""""""', '(0)', '(0)', 'PRTYPE_1LINE', 'funcdata[i].type', '""""""', '""""""'], {}), "('', 0, 0, PRTYPE_1LINE, funcdata[i].type, '', '')\n", (4114, 4164), False, 'import idaapi\n'), ((4371, 4395), 'os.path.exists', 'os.path.exists', (['bin_path'], {}), '(bin_path)\n', (4385, 4395), False, 'import os\n'), ((5774, 5795), 'idaapi.get_func', 'idaapi.get_func', (['addr'], {}), '(addr)\n', (5789, 5795), False, 'import idaapi\n'), ((6116, 6135), 'tiknib.utils.demangle', 'demangle', (['func_name'], {}), '(func_name)\n', (6124, 6135), False, 'from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data\n'), ((6153, 6202), 'idaapi.FlowChart', 'idaapi.FlowChart', (['function'], {'flags': 'idaapi.FC_PREDS'}), '(function, flags=idaapi.FC_PREDS)\n', (6169, 6202), False, 'import idaapi\n'), ((9627, 9648), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (9646, 9648), False, 'import traceback\n'), ((9654, 9670), 'ida_pro.qexit', 'ida_pro.qexit', (['(1)'], {}), '(1)\n', (9667, 9670), False, 'import ida_pro\n'), ((1232, 1253), 'idc.get_str_type', 'idc.get_str_type', (['ref'], {}), '(ref)\n', (1248, 1253), False, 'import idc\n'), ((2095, 2121), 'idaapi.get_func', 'idaapi.get_func', (['caller_ea'], {}), '(caller_ea)\n', (2110, 2121), False, 'import idaapi\n'), ((2283, 2311), 'idc.get_func_name', 'idc.get_func_name', (['caller_ea'], {}), '(caller_ea)\n', (2300, 2311), False, 'import idc\n'), ((5730, 5750), 'idautils.Functions', 'idautils.Functions', ([], {}), '()\n', (5748, 5750), False, 'import idautils\n'), ((1322, 1350), 'idc.get_strlit_contents', 'idc.get_strlit_contents', (['ref'], {}), '(ref)\n', (1345, 1350), False, 'import idc\n'), ((4417, 4435), 'idc.get_idb_path', 'idc.get_idb_path', ([], {}), '()\n', (4433, 4435), False, 'import idc\n'), ((6283, 6293), 'hashlib.sha1', 'sha1', (['data'], {}), '(data)\n', (6287, 6293), False, 'from hashlib import sha1\n'), ((7231, 7266), 'idc.get_bytes', 'idc.get_bytes', (['bb.start_ea', 'bb_size'], {}), '(bb.start_ea, bb_size)\n', (7244, 7266), False, 'import idc\n'), ((7305, 7321), 'hashlib.sha1', 'sha1', (['block_data'], {}), '(block_data)\n', (7309, 7321), False, 'from hashlib import sha1\n'), ((7827, 7855), 'idaapi.is_ret_block', 'idaapi.is_ret_block', (['bb.type'], {}), '(bb.type)\n', (7846, 7855), False, 'import idaapi\n'), ((938, 968), 'ida_bytes.is_loaded', 'ida_bytes.is_loaded', (['imm_value'], {}), '(imm_value)\n', (957, 968), False, 'import ida_bytes\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: MIT-0
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from functools import lru_cache
import json
from typing import List, Dict, Optional, Iterable, Any
from aws_lambda_powertools import Logger
import boto3
import botocore
from ..constants import AI_OPT_OUT_POLICY_NAME, AI_OPT_OUT_POLICY
from ..exceptions import OrganizationNotFoundException
logger = Logger(child=True)
__all__ = ["Organizations"]
class Organizations:
def __init__(self, session: boto3.Session, region: str) -> None:
self.client = session.client("organizations", region_name=region)
self.region = region
self._roots = []
self._accounts = []
def describe_organization(self) -> Dict[str, Any]:
"""
Describe the organization the account belongs to
"""
try:
response = self.client.describe_organization()
except self.client.exceptions.AWSOrganizationsNotInUseException:
raise OrganizationNotFoundException("Organization Not Found")
except botocore.exceptions.ClientError:
logger.exception(f"[{self.region} Unable to describe organization")
raise
return response["Organization"]
def list_accounts(self) -> List[Dict[str, str]]:
"""
List all of the accounts in an organization
"""
if self._accounts:
return self._accounts
accounts = []
paginator = self.client.get_paginator("list_accounts")
page_iterator = paginator.paginate(PaginationConfig={"PageSize": 20})
for page in page_iterator:
for account in page.get("Accounts", []):
if account.get("Status") != "ACTIVE":
continue
accounts.append(account)
self._accounts = accounts
return accounts
def list_policies(self, policy_type: str) -> List[Dict[str, str]]:
"""
List all of the policies in an organization
"""
policies = []
paginator = self.client.get_paginator("list_policies")
page_iterator = paginator.paginate(Filter=policy_type)
for page in page_iterator:
policies.extend(page.get("Policies", []))
return policies
def list_roots(self) -> List[Dict[str, str]]:
"""
List all the roots in an organization
"""
if self._roots:
return self._roots
roots = []
paginator = self.client.get_paginator("list_roots")
page_iterator = paginator.paginate()
for page in page_iterator:
roots.extend(page.get("Roots", []))
self._roots = roots
return roots
def enable_all_features(self) -> None:
"""
Enable all features in an organization
"""
logger.info(f"[{self.region}] Enabling all features in the organization")
try:
self.client.enable_all_features()
logger.debug(f"[{self.region}] Enabled all features in organization")
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "HandshakeConstraintViolationException"
):
logger.exception(
f"[{self.region}] Unable to enable all features in organization"
)
raise
def enable_aws_service_access(self, principals: Iterable[str]) -> None:
"""
Enable AWS service access in organization
"""
for principal in principals:
logger.info(f"[{self.region}] Enabling AWS service access for {principal}")
try:
self.client.enable_aws_service_access(ServicePrincipal=principal)
logger.debug(
f"[{self.region}] Enabled AWS service access for {principal}"
)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] != "ServiceException":
logger.exception(
f"[{self.region}] Unable enable AWS service access for {principal}"
)
raise error
def enable_all_policy_types(self) -> None:
"""
Enables all policy types in an organization
"""
logger.info(f"[{self.region}] Enabling all policy types in organization")
for root in self.list_roots():
root_id = root["Id"]
disabled_types = [
policy_type.get("Type")
for policy_type in root.get("PolicyTypes", [])
if policy_type.get("Status") != "ENABLED"
]
for disabled_type in disabled_types:
logger.info(
f"[{self.region}] Enabling policy type {disabled_type} on root {root_id}"
)
try:
self.client.enable_policy_type(
RootId=root_id, PolicyType=disabled_type
)
logger.debug(
f"[{self.region}] Enabled policy type {disabled_type} on root {root_id}"
)
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "PolicyTypeAlreadyEnabledException"
):
logger.exception(
f"[{self.region}] Unable to enable policy type"
)
raise error
logger.debug(f"[{self.region}] Enabled all policy types in organization")
def get_ai_optout_policy(self) -> str:
"""
Return the AI opt-out policy ID
"""
for policy in self.list_policies("AISERVICES_OPT_OUT_POLICY"):
if policy["Name"] == AI_OPT_OUT_POLICY_NAME:
logger.info(f"Found existing {AI_OPT_OUT_POLICY_NAME} policy")
return policy["Id"]
logger.info(
f"[{self.region}] {AI_OPT_OUT_POLICY_NAME} policy not found, creating"
)
try:
response = self.client.create_policy(
Content=json.dumps(AI_OPT_OUT_POLICY),
Description="Opt-out of all AI services",
Name=AI_OPT_OUT_POLICY_NAME,
Type="AISERVICES_OPT_OUT_POLICY",
)
policy_id = response.get("Policy", {}).get("PolicySummary", {}).get("Id")
logger.debug(
f"[{self.region}] Created policy {AI_OPT_OUT_POLICY_NAME} ({policy_id})"
)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "DuplicatePolicyException":
return self.get_ai_optout_policy()
raise error
return policy_id
def attach_ai_optout_policy(self) -> None:
"""
Attach the AI opt-out policy to the root
"""
policy_id = self.get_ai_optout_policy()
if not policy_id:
logger.warn(
f"[{self.region}] Unable to find {AI_OPT_OUT_POLICY_NAME} policy"
)
return
for root in self.list_roots():
root_id = root["Id"]
logger.info(
f"[{self.region}] Attaching {AI_OPT_OUT_POLICY_NAME} ({policy_id}) to root {root_id}"
)
try:
self.client.attach_policy(PolicyId=policy_id, TargetId=root_id)
logger.debug(
f"[{self.region}] Attached {AI_OPT_OUT_POLICY_NAME} ({policy_id}) to root {root_id}"
)
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "DuplicatePolicyAttachmentException"
):
logger.exception(f"[{self.region}] Unable to attach policy")
raise error
def register_delegated_administrator(
self, account_id: str, principals: Iterable[str]
) -> None:
"""
Register a delegated administrator
"""
for principal in principals:
logger.info(
f"[{self.region}] Registering {account_id} as a delegated administrator for {principal}"
)
try:
self.client.register_delegated_administrator(
AccountId=account_id, ServicePrincipal=principal
)
logger.debug(
f"[{self.region}] Registered {account_id} as a delegated administrator for {principal}"
)
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "AccountAlreadyRegisteredException"
):
logger.exception(
f"[{self.region}] Unable to register {account_id} as a delegated administrator for {principal}"
)
raise error
@lru_cache
def get_account_id(self, name: str) -> Optional[str]:
"""
Return the Account ID for an account
"""
for account in self.list_accounts():
if account.get("Name") == name:
return account["Id"]
return None
| [
"json.dumps",
"aws_lambda_powertools.Logger"
] | [((1343, 1361), 'aws_lambda_powertools.Logger', 'Logger', ([], {'child': '(True)'}), '(child=True)\n', (1349, 1361), False, 'from aws_lambda_powertools import Logger\n'), ((7209, 7238), 'json.dumps', 'json.dumps', (['AI_OPT_OUT_POLICY'], {}), '(AI_OPT_OUT_POLICY)\n', (7219, 7238), False, 'import json\n')] |
import numpy
import pandas
import hts.hierarchy
from hts.functions import (
_create_bl_str_col,
get_agg_series,
get_hierarchichal_df,
to_sum_mat,
)
def test_sum_mat_uv(uv_tree):
mat, sum_mat_labels = to_sum_mat(uv_tree)
assert isinstance(mat, numpy.ndarray)
shp = mat.shape
assert shp[0] == uv_tree.num_nodes() + 1
assert shp[1] == uv_tree.leaf_sum()
def test_sum_mat_mv(mv_tree):
mat, sum_mat_labels = to_sum_mat(mv_tree)
assert isinstance(mat, numpy.ndarray)
shp = mat.shape
assert shp[0] == mv_tree.num_nodes() + 1
assert shp[1] == mv_tree.leaf_sum()
def test_sum_mat_hierarchical():
hierarchy = {"total": ["A", "B"], "A": ["A_X", "A_Y", "A_Z"], "B": ["B_X", "B_Y"]}
hier_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"A_X": [],
"A_Y": [],
"A_Z": [],
"B_X": [],
"B_Y": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1], # total
[0, 0, 0, 1, 1], # B
[1, 1, 1, 0, 0], # A
[1, 0, 0, 0, 0], # A_X
[0, 1, 0, 0, 0], # A_Y
[0, 0, 1, 0, 0], # A_Z
[0, 0, 0, 1, 0], # B_X
[0, 0, 0, 0, 1],
]
) # B_Y
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
assert sum_mat_labels == ["total", "B", "A", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
def test_sum_mat_grouped():
hierarchy = {
"total": ["A", "B", "X", "Y"],
"A": ["A_X", "A_Y"],
"B": ["B_X", "B_Y"],
}
grouped_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"X": [],
"Y": [],
"A_X": [],
"A_Y": [],
"B_X": [],
"B_Y": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, grouped_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1], # total
[0, 1, 0, 1], # Y
[1, 0, 1, 0], # X
[0, 0, 1, 1], # B
[1, 1, 0, 0], # A
[1, 0, 0, 0], # A_X
[0, 1, 0, 0], # A_Y
[0, 0, 1, 0], # B_X
[0, 0, 0, 1], # B_Y
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
assert sum_mat_labels == ["total", "Y", "X", "B", "A", "A_X", "A_Y", "B_X", "B_Y"]
def test_sum_mat_visnights_hier(visnights_hier):
hier_df = pandas.DataFrame(
data={
"total": [],
"VIC": [],
"QLD": [],
"SAU": [],
"WAU": [],
"OTH": [],
"NSW": [],
"NSW_Metro": [],
"NSW_NthCo": [],
"NSW_NthIn": [],
"NSW_SthCo": [],
"NSW_SthIn": [],
"OTH_Metro": [],
"OTH_NoMet": [],
"QLD_Cntrl": [],
"QLD_Metro": [],
"QLD_NthCo": [],
"SAU_Coast": [],
"SAU_Inner": [],
"SAU_Metro": [],
"VIC_EstCo": [],
"VIC_Inner": [],
"VIC_Metro": [],
"VIC_WstCo": [],
"WAU_Coast": [],
"WAU_Inner": [],
"WAU_Metro": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(visnights_hier, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # total
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], # VIC
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # QLD
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # SAU
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_Metro
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_NthCo
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_NthIn
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_SthCo
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_SthIn
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH_Metro
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH_NoMet
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Coast
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # SAU_Coast
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # SAU_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # SAU_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # QLD_Cntrl
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # QLD_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], # QLD_NthCo
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # VIC_EstCo
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # VIC_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], # VIC_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # VIC_WstCo
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
def test_demo_unique_constraint():
# Example https://otexts.com/fpp2/hts.html
# Does not work when you have elements that are named the same, but represent
# different levels in the hierarchy. See expected_sum_mat below for example.
hierarchy = {"total": ["A", "B"], "A": ["AA", "AB", "AC"], "B": ["BA", "BB"]}
hier_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"AA": [],
"AB": [],
"AC": [],
"BA": [],
"BB": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1], # total
[0, 1, 0, 1, 1], # B, Incorrectly finds B in AB
[1, 1, 1, 1, 0], # A, Incorrectly finds A in BA
[1, 0, 0, 0, 0], # AA
[0, 1, 0, 0, 0], # AB
[0, 0, 1, 0, 0], # AC
[0, 0, 0, 1, 0], # BA
[0, 0, 0, 0, 1], # BB
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
def test_1lev():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B", "B"], "lev2": ["X", "Y", "X", "Y"],}
)
levels = get_agg_series(grouped_df, [["lev1"]])
expected_levels = ["A", "B"]
assert sorted(levels) == sorted(expected_levels)
levels = get_agg_series(grouped_df, [["lev2"]])
expected_levels = ["X", "Y"]
assert sorted(levels) == sorted(expected_levels)
def test_2lev():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B", "B"], "lev2": ["X", "Y", "X", "Y"],}
)
levels = get_agg_series(grouped_df, [["lev1", "lev2"]])
expected_levels = ["A_X", "A_Y", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_hierarchichal():
hier_df = pandas.DataFrame(
data={"lev1": ["A", "A", "A", "B", "B"], "lev2": ["X", "Y", "Z", "X", "Y"],}
)
levels = get_agg_series(hier_df, [["lev1"], ["lev1", "lev2"]])
expected_levels = ["A", "B", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_grouped():
hier_df = pandas.DataFrame(
data={"lev1": ["A", "A", "A", "B", "B"], "lev2": ["X", "Y", "Z", "X", "Y"],}
)
hierarchy = [["lev1"], ["lev2"], ["lev1", "lev2"]]
levels = get_agg_series(hier_df, hierarchy)
expected_levels = ["A", "B", "X", "Y", "Z", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_grouped_create_df():
hier_df = pandas.DataFrame(
data={
"ds": ["2020-01", "2020-02"] * 5,
"lev1": ["A", "A", "A", "A", "A", "A", "B", "B", "B", "B"],
"lev2": ["X", "X", "Y", "Y", "Z", "Z", "X", "X", "Y", "Y"],
"val": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
}
)
level_names = ["lev1", "lev2"]
hierarchy = [["lev1"], ["lev2"]]
gts_df, sum_mat, sum_mat_labels = get_hierarchichal_df(
hier_df,
level_names=level_names,
hierarchy=hierarchy,
date_colname="ds",
val_colname="val",
)
expected_columns = [
"A_X",
"A_Y",
"A_Z",
"B_X",
"B_Y",
"A",
"B",
"X",
"Y",
"Z",
"total",
]
assert sorted(list(gts_df.columns)) == sorted(expected_columns)
def test_parent_child():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B"], "lev2": ["X", "Y", "Z"],}
)
levels = get_agg_series(grouped_df, [["lev1", "lev2"]])
expected_levels = ["A_X", "A_Y", "B_Z"]
assert sorted(levels) == sorted(expected_levels)
def test_create_bl_str_col():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B"], "lev2": ["X", "Y", "Z"],}
)
col = _create_bl_str_col(grouped_df, ["lev1", "lev2"])
assert col == ["A_X", "A_Y", "B_Z"]
| [
"hts.functions.get_hierarchichal_df",
"hts.functions._create_bl_str_col",
"hts.functions.get_agg_series",
"numpy.array",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"hts.functions.to_sum_mat"
] | [((223, 242), 'hts.functions.to_sum_mat', 'to_sum_mat', (['uv_tree'], {}), '(uv_tree)\n', (233, 242), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((448, 467), 'hts.functions.to_sum_mat', 'to_sum_mat', (['mv_tree'], {}), '(mv_tree)\n', (458, 467), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((751, 864), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'A': [], 'B': [], 'A_X': [], 'A_Y': [], 'A_Z': [], 'B_X': [],\n 'B_Y': []}"}), "(data={'total': [], 'A': [], 'B': [], 'A_X': [], 'A_Y': [],\n 'A_Z': [], 'B_X': [], 'B_Y': []})\n", (767, 864), False, 'import pandas\n'), ((1083, 1099), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (1093, 1099), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((1124, 1277), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1], [0, 0, 0, 1, 1], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 1], [0, 0, 0, 1, 1], [1, 1, 1, 0, 0], [1, 0, 0, 0,\n 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]])\n', (1135, 1277), False, 'import numpy\n'), ((1454, 1513), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (1486, 1513), False, 'import numpy\n'), ((1766, 1886), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'A': [], 'B': [], 'X': [], 'Y': [], 'A_X': [], 'A_Y': [],\n 'B_X': [], 'B_Y': []}"}), "(data={'total': [], 'A': [], 'B': [], 'X': [], 'Y': [],\n 'A_X': [], 'A_Y': [], 'B_X': [], 'B_Y': []})\n", (1782, 1886), False, 'import pandas\n'), ((2120, 2136), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (2130, 2136), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((2161, 2304), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 0], [1, \n 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1,\n 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (2172, 2304), False, 'import numpy\n'), ((2496, 2555), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (2528, 2555), False, 'import numpy\n'), ((2708, 3174), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'VIC': [], 'QLD': [], 'SAU': [], 'WAU': [], 'OTH': [], 'NSW':\n [], 'NSW_Metro': [], 'NSW_NthCo': [], 'NSW_NthIn': [], 'NSW_SthCo': [],\n 'NSW_SthIn': [], 'OTH_Metro': [], 'OTH_NoMet': [], 'QLD_Cntrl': [],\n 'QLD_Metro': [], 'QLD_NthCo': [], 'SAU_Coast': [], 'SAU_Inner': [],\n 'SAU_Metro': [], 'VIC_EstCo': [], 'VIC_Inner': [], 'VIC_Metro': [],\n 'VIC_WstCo': [], 'WAU_Coast': [], 'WAU_Inner': [], 'WAU_Metro': []}"}), "(data={'total': [], 'VIC': [], 'QLD': [], 'SAU': [], 'WAU':\n [], 'OTH': [], 'NSW': [], 'NSW_Metro': [], 'NSW_NthCo': [], 'NSW_NthIn':\n [], 'NSW_SthCo': [], 'NSW_SthIn': [], 'OTH_Metro': [], 'OTH_NoMet': [],\n 'QLD_Cntrl': [], 'QLD_Metro': [], 'QLD_NthCo': [], 'SAU_Coast': [],\n 'SAU_Inner': [], 'SAU_Metro': [], 'VIC_EstCo': [], 'VIC_Inner': [],\n 'VIC_Metro': [], 'VIC_WstCo': [], 'WAU_Coast': [], 'WAU_Inner': [],\n 'WAU_Metro': []})\n", (2724, 3174), False, 'import pandas\n'), ((3606, 3622), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (3616, 3622), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((3647, 5443), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0,\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (3658, 5443), False, 'import numpy\n'), ((6000, 6059), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (6032, 6059), False, 'import numpy\n'), ((6403, 6511), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'A': [], 'B': [], 'AA': [], 'AB': [], 'AC': [], 'BA': [],\n 'BB': []}"}), "(data={'total': [], 'A': [], 'B': [], 'AA': [], 'AB': [],\n 'AC': [], 'BA': [], 'BB': []})\n", (6419, 6511), False, 'import pandas\n'), ((6730, 6746), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (6740, 6746), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((6771, 6924), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1], [0, 1, 0, 1, 1], [1, 1, 1, 1, 0], [1, 0, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 1], [0, 1, 0, 1, 1], [1, 1, 1, 1, 0], [1, 0, 0, 0,\n 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]])\n', (6782, 6924), False, 'import numpy\n'), ((7150, 7209), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (7182, 7209), False, 'import numpy\n'), ((7246, 7333), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X',\n 'Y']})\n", (7262, 7333), False, 'import pandas\n'), ((7359, 7397), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev1']]"], {}), "(grouped_df, [['lev1']])\n", (7373, 7397), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((7498, 7536), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev2']]"], {}), "(grouped_df, [['lev2']])\n", (7512, 7536), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((7659, 7746), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X',\n 'Y']})\n", (7675, 7746), False, 'import pandas\n'), ((7772, 7818), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev1', 'lev2']]"], {}), "(grouped_df, [['lev1', 'lev2']])\n", (7786, 7818), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((7967, 8064), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'Z', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y',\n 'Z', 'X', 'Y']})\n", (7983, 8064), False, 'import pandas\n'), ((8090, 8143), 'hts.functions.get_agg_series', 'get_agg_series', (['hier_df', "[['lev1'], ['lev1', 'lev2']]"], {}), "(hier_df, [['lev1'], ['lev1', 'lev2']])\n", (8104, 8143), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((8301, 8398), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'Z', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y',\n 'Z', 'X', 'Y']})\n", (8317, 8398), False, 'import pandas\n'), ((8479, 8513), 'hts.functions.get_agg_series', 'get_agg_series', (['hier_df', 'hierarchy'], {}), '(hier_df, hierarchy)\n', (8493, 8513), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((8696, 8921), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'ds': ['2020-01', '2020-02'] * 5, 'lev1': ['A', 'A', 'A', 'A', 'A', 'A',\n 'B', 'B', 'B', 'B'], 'lev2': ['X', 'X', 'Y', 'Y', 'Z', 'Z', 'X', 'X',\n 'Y', 'Y'], 'val': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}"}), "(data={'ds': ['2020-01', '2020-02'] * 5, 'lev1': ['A', 'A',\n 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B'], 'lev2': ['X', 'X', 'Y', 'Y',\n 'Z', 'Z', 'X', 'X', 'Y', 'Y'], 'val': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})\n", (8712, 8921), False, 'import pandas\n'), ((9098, 9215), 'hts.functions.get_hierarchichal_df', 'get_hierarchichal_df', (['hier_df'], {'level_names': 'level_names', 'hierarchy': 'hierarchy', 'date_colname': '"""ds"""', 'val_colname': '"""val"""'}), "(hier_df, level_names=level_names, hierarchy=hierarchy,\n date_colname='ds', val_colname='val')\n", (9118, 9215), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((9560, 9633), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']}"}), "(data={'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']})\n", (9576, 9633), False, 'import pandas\n'), ((9663, 9709), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev1', 'lev2']]"], {}), "(grouped_df, [['lev1', 'lev2']])\n", (9677, 9709), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((9856, 9929), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']}"}), "(data={'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']})\n", (9872, 9929), False, 'import pandas\n'), ((9956, 10004), 'hts.functions._create_bl_str_col', '_create_bl_str_col', (['grouped_df', "['lev1', 'lev2']"], {}), "(grouped_df, ['lev1', 'lev2'])\n", (9974, 10004), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n')] |
import os
import warnings
warnings.simplefilter('always')
test_dir = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
'default': {
'NAME': os.path.join(test_dir, 'db.sqlite'),
'ENGINE': 'django.db.backends.sqlite3',
},
}
USE_I18N = True
USE_L10N = True
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django_t10e',
'tests',
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MIDDLEWARE_CLASSES = ()
TEMPLATE_DIRS = (
os.path.join(test_dir, 'templates'),
)
STATIC_URL = '/static/'
SECRET_KEY = '0'
SITE_ID = 1
| [
"warnings.simplefilter",
"os.path.abspath",
"os.path.join"
] | [((26, 57), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (47, 57), False, 'import warnings\n'), ((86, 111), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n'), ((549, 584), 'os.path.join', 'os.path.join', (['test_dir', '"""templates"""'], {}), "(test_dir, 'templates')\n", (561, 584), False, 'import os\n'), ((161, 196), 'os.path.join', 'os.path.join', (['test_dir', '"""db.sqlite"""'], {}), "(test_dir, 'db.sqlite')\n", (173, 196), False, 'import os\n')] |
import json
import logging
import unittest
from typing import Dict
from SPARQLWrapper import JSONLD
from obasparql.query_manager import QueryManager, QUERIES_TYPES, QUERY_TYPE_GET_ONE_USER
from obasparql.utils import generate_uri
from tests.settings import *
logger = logging.getLogger('testing')
graph_user = generate_uri(model_catalog_graph_base, "<EMAIL>")
class TestQueryManager(unittest.TestCase):
def setUp(self):
self.query_manager = QueryManager(queries_dir=model_catalog_queries,
context_dir=model_catalog_context,
endpoint=model_catalog_endpoint,
named_graph_base=model_catalog_graph_base,
uri_prefix=model_catalog_prefix)
def test_dispatch_sparqlquery(self):
endpoint = "http://dbpedia.org/sparql"
query_template = '''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
CONSTRUCT {
<http://dbpedia.org/resource/Indemnity_Act_1717> ?predicate ?prop .
?prop a ?type .
?prop rdfs:label ?label
}
WHERE {
<http://dbpedia.org/resource/Indemnity_Act_1717> ?predicate ?prop
OPTIONAL {
?prop a ?type
OPTIONAL {
?prop rdfs:label ?label
}
}
}
'''
results = self.query_manager.dispatch_sparql_query(raw_sparql_query=query_template,
request_args={},
return_format=JSONLD)
self.assertIsNotNone(json.loads(results))
def test_dispatch_sparqlquery_model_catalog(self):
"""
Testing to get the resource Travis
Travis is a Region
Returns:
"""
owl_class_name = "Region"
owl_resource_iri = "https://w3id.org/okn/i/mint/United_States"
query_directory = owl_class_name
query_type = QUERY_TYPE_GET_ONE_USER
request_args: Dict[str, str] = {
"resource": owl_resource_iri,
"g": graph_user
}
query_template = getattr(self.query_manager, query_directory)[query_type]
results = self.query_manager.dispatch_sparql_query(raw_sparql_query=query_template,
request_args=request_args,
return_format=JSONLD)
self.assertIsNotNone(json.loads(results))
def test_framed_get_one(self):
owl_class_uri = "https://w3id.org/okn/o/sdm#Region"
owl_resource_uri = "https://w3id.org/okn/i/mint/Travis"
response = '''{
"@graph" : [ {
"@id" : "https://w3id.org/okn/i/mint/Texas",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "Texas (USA)"
}, {
"@id" : "https://w3id.org/okn/i/mint/Travis",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "Travis",
"description" : "Travis (Texas)",
"partOf" : "https://w3id.org/okn/i/mint/Texas"
} ],
"@context" : {
"label" : {
"@id" : "http://www.w3.org/2000/01/rdf-schema#label"
},
"partOf" : {
"@id" : "https://w3id.org/okn/o/sdm#partOf",
"@type" : "@id"
},
"description" : {
"@id" : "https://w3id.org/okn/o/sd#description"
},
"sd" : "https://w3id.org/okn/o/sd#",
"rdfs" : "http://www.w3.org/2000/01/rdf-schema#"
}
}'''
framed = self.query_manager.frame_results(response, owl_class_uri, owl_resource_uri)
self.assertEqual(owl_resource_uri, framed[0]["id"])
def test_framed_get_one_reflexive(self):
owl_class_uri = "https://w3id.org/okn/o/sdm#Region"
owl_resource_uri = "https://w3id.org/okn/i/mint/United_States"
response = '''{
"@graph" : [ {
"@id" : "https://w3id.org/okn/i/mint/Texas",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "Texas (USA)",
"description" : "Texas is the second largest state in the United States by area (after Alaska) and population (after California). Located in the South Central region, Texas shares borders with the states of Louisiana to the east, Arkansas to the northeast, Oklahoma to the north, New Mexico to the west, and the Mexican states of Chihuahua, Coahuila, Nuevo Leon, and Tamaulipas to the southwest, and has a coastline with the Gulf of Mexico to the southeast.",
"geo" : "https://w3id.org/okn/i/mint/Texas_Shape",
"partOf" : "https://w3id.org/okn/i/mint/United_States"
}, {
"@id" : "https://w3id.org/okn/i/mint/Texas_Shape",
"@type" : "https://w3id.org/okn/o/sdm#GeoShape",
"label" : "Bounding box for Texas region"
}, {
"@id" : "https://w3id.org/okn/i/mint/United_States",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "United States of America"
}, {
"@id" : "https://w3id.org/okn/o/sdm#Region",
"@type" : "http://www.w3.org/2002/07/owl#Class"
} ],
"@context" : {
"partOf" : {
"@id" : "https://w3id.org/okn/o/sdm#partOf",
"@type" : "@id"
},
"geo" : {
"@id" : "https://w3id.org/okn/o/sdm#geo",
"@type" : "@id"
},
"description" : {
"@id" : "https://w3id.org/okn/o/sd#description"
},
"label" : {
"@id" : "http://www.w3.org/2000/01/rdf-schema#label"
},
"rdfs" : "http://www.w3.org/2000/01/rdf-schema#"
}
}
'''
framed = self.query_manager.frame_results(response, owl_class_uri, owl_resource_uri)
self.assertEqual(owl_resource_uri, framed[0]["id"])
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"json.loads",
"obasparql.query_manager.QueryManager",
"obasparql.utils.generate_uri",
"unittest.main"
] | [((271, 299), 'logging.getLogger', 'logging.getLogger', (['"""testing"""'], {}), "('testing')\n", (288, 299), False, 'import logging\n'), ((313, 362), 'obasparql.utils.generate_uri', 'generate_uri', (['model_catalog_graph_base', '"""<EMAIL>"""'], {}), "(model_catalog_graph_base, '<EMAIL>')\n", (325, 362), False, 'from obasparql.utils import generate_uri\n'), ((5630, 5645), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5643, 5645), False, 'import unittest\n'), ((458, 658), 'obasparql.query_manager.QueryManager', 'QueryManager', ([], {'queries_dir': 'model_catalog_queries', 'context_dir': 'model_catalog_context', 'endpoint': 'model_catalog_endpoint', 'named_graph_base': 'model_catalog_graph_base', 'uri_prefix': 'model_catalog_prefix'}), '(queries_dir=model_catalog_queries, context_dir=\n model_catalog_context, endpoint=model_catalog_endpoint,\n named_graph_base=model_catalog_graph_base, uri_prefix=model_catalog_prefix)\n', (470, 658), False, 'from obasparql.query_manager import QueryManager, QUERIES_TYPES, QUERY_TYPE_GET_ONE_USER\n'), ((1666, 1685), 'json.loads', 'json.loads', (['results'], {}), '(results)\n', (1676, 1685), False, 'import json\n'), ((2539, 2558), 'json.loads', 'json.loads', (['results'], {}), '(results)\n', (2549, 2558), False, 'import json\n')] |
import argparse
import logging
log = logging.getLogger(__name__)
def get_main_parser():
parser = argparse.ArgumentParser(prog="tpi")
return parser
def main(argv=None):
parser = get_main_parser()
args = parser.parse_args(argv)
log.debug(args)
| [
"logging.getLogger",
"argparse.ArgumentParser"
] | [((38, 65), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (55, 65), False, 'import logging\n'), ((104, 139), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""tpi"""'}), "(prog='tpi')\n", (127, 139), False, 'import argparse\n')] |
from flask import render_template, Blueprint
hello_blueprint = Blueprint('hello', __name__)
@hello_blueprint.route('/')
@hello_blueprint.route('/hello')
def index():
return render_template('index.html')
| [
"flask.render_template",
"flask.Blueprint"
] | [((64, 92), 'flask.Blueprint', 'Blueprint', (['"""hello"""', '__name__'], {}), "('hello', __name__)\n", (73, 92), False, 'from flask import render_template, Blueprint\n'), ((179, 208), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (194, 208), False, 'from flask import render_template, Blueprint\n')] |
"""Query graph utilities."""
import re
from bmt import Toolkit
BMT = Toolkit()
def get_subcategories(category):
"""Get sub-categories, according to the Biolink model."""
return [
descendant.replace("_", "")
for descendant in BMT.get_descendants(category, formatted=True, reflexive=True)
]
def camelcase_to_snakecase(string):
"""Convert CamelCase to snake_case."""
return re.sub(r"(?<!^)(?=[A-Z])", "_", string).lower()
def get_subpredicates(predicate):
"""Get sub-predicates, according to the Biolink model."""
curies = BMT.get_descendants(predicate, formatted=True, reflexive=True)
return [
"biolink:" + camelcase_to_snakecase(curie[8:])
for curie in curies
]
def normalize_qgraph(qgraph):
"""Normalize query graph."""
for node in qgraph["nodes"].values():
node["categories"] = [
descendant
for category in node.get("categories", None) or ["biolink:NamedThing"]
for descendant in get_subcategories(category)
]
if "biolink:SmallMolecule" in node["categories"]:
node["categories"].append("biolink:ChemicalSubstance")
node.pop("is_set", None)
for edge in qgraph["edges"].values():
edge["predicates"] = [
descendant
for predicate in edge.get("predicates", None) or ["biolink:related_to"]
for descendant in get_subpredicates(predicate)
]
| [
"re.sub",
"bmt.Toolkit"
] | [((71, 80), 'bmt.Toolkit', 'Toolkit', ([], {}), '()\n', (78, 80), False, 'from bmt import Toolkit\n'), ((413, 451), 're.sub', 're.sub', (['"""(?<!^)(?=[A-Z])"""', '"""_"""', 'string'], {}), "('(?<!^)(?=[A-Z])', '_', string)\n", (419, 451), False, 'import re\n')] |
from pathlib import Path
import shutil
import unittest
import numpy as np
import siml.optimize as optimize
import siml.setting as setting
class TestOptimize(unittest.TestCase):
def test_generate_dict(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
objective = optimize.Objective(main_setting, None)
dict_replace_1 = {
'inputs': [{'name': 'abc', 'dim': 6}],
'n_node': 35,
'hidden_layers': 11,
'dropout': 0.01}
replaced_setting_1 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_1)
dict_replace_2 = {
'inputs': [
{'name': 'elemental_strain', 'dim': 6},
{'name': 'something', 'dim': 100}],
'n_node': 135,
'hidden_layers': 111,
'dropout': 0.11}
replaced_setting_2 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_2)
self.assertEqual(
replaced_setting_1['trainer']['inputs'][0]['name'],
'abc')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][0]['name'],
'elemental_strain')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][1]['name'],
'something')
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_nodes'], 135)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_layers'], 111)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_dropout'], 0.11)
def test_perform_study(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
study = optimize.Study(main_setting)
study.perform_study()
self.assertLess(
study.study.best_trial.value,
np.max([t.value for t in study.study.trials]))
def test_perform_study_step_by_step(self):
main_setting_yml = Path('tests/data/deform/optuna.yml')
main_setting = setting.MainSetting.read_settings_yaml(
main_setting_yml)
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
db_setting = setting.DBSetting(use_sqlite=True)
study = optimize.Study(main_setting, db_setting, step_by_step=True)
for _ in range(3):
try:
study.perform_study()
except SystemExit:
continue
self.assertEqual(len(study.study.get_trials()), 3)
| [
"siml.setting.MainSetting.read_settings_yaml",
"pathlib.Path",
"numpy.max",
"shutil.rmtree",
"siml.optimize.Objective",
"siml.optimize.Study",
"siml.setting.DBSetting"
] | [((349, 387), 'siml.optimize.Objective', 'optimize.Objective', (['main_setting', 'None'], {}), '(main_setting, None)\n', (367, 387), True, 'import siml.optimize as optimize\n'), ((1979, 2007), 'siml.optimize.Study', 'optimize.Study', (['main_setting'], {}), '(main_setting)\n', (1993, 2007), True, 'import siml.optimize as optimize\n'), ((2239, 2275), 'pathlib.Path', 'Path', (['"""tests/data/deform/optuna.yml"""'], {}), "('tests/data/deform/optuna.yml')\n", (2243, 2275), False, 'from pathlib import Path\n'), ((2299, 2355), 'siml.setting.MainSetting.read_settings_yaml', 'setting.MainSetting.read_settings_yaml', (['main_setting_yml'], {}), '(main_setting_yml)\n', (2337, 2355), True, 'import siml.setting as setting\n'), ((2523, 2557), 'siml.setting.DBSetting', 'setting.DBSetting', ([], {'use_sqlite': '(True)'}), '(use_sqlite=True)\n', (2540, 2557), True, 'import siml.setting as setting\n'), ((2574, 2633), 'siml.optimize.Study', 'optimize.Study', (['main_setting', 'db_setting'], {'step_by_step': '(True)'}), '(main_setting, db_setting, step_by_step=True)\n', (2588, 2633), True, 'import siml.optimize as optimize\n'), ((291, 327), 'pathlib.Path', 'Path', (['"""tests/data/deform/optuna.yml"""'], {}), "('tests/data/deform/optuna.yml')\n", (295, 327), False, 'from pathlib import Path\n'), ((1793, 1829), 'pathlib.Path', 'Path', (['"""tests/data/deform/optuna.yml"""'], {}), "('tests/data/deform/optuna.yml')\n", (1797, 1829), False, 'from pathlib import Path\n'), ((1906, 1962), 'shutil.rmtree', 'shutil.rmtree', (['main_setting.optuna.output_base_directory'], {}), '(main_setting.optuna.output_base_directory)\n', (1919, 1962), False, 'import shutil\n'), ((2117, 2162), 'numpy.max', 'np.max', (['[t.value for t in study.study.trials]'], {}), '([t.value for t in study.study.trials])\n', (2123, 2162), True, 'import numpy as np\n'), ((2444, 2500), 'shutil.rmtree', 'shutil.rmtree', (['main_setting.optuna.output_base_directory'], {}), '(main_setting.optuna.output_base_directory)\n', (2457, 2500), False, 'import shutil\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http.response import HttpResponseRedirect, HttpResponseBadRequest
from django.urls.base import reverse
from django.utils import timezone
from django.views.generic import (
CreateView,
DayArchiveView,
DetailView,
RedirectView,
TemplateView,
UpdateView,
)
from qanda.forms import QuestionForm, AnswerForm, AnswerAcceptanceForm
from qanda.models import Question, Answer
from qanda.service.elasticsearch import search_for_questions
from django.shortcuts import render
# Creating my views here.
class SearchView(TemplateView):
template_name = 'qanda/search.html'
def get_context_data(self, **kwargs):
query = self.request.GET.get('q', None)
ctx = super().get_context_data(query=query, **kwargs)
if query:
results = search_for_questions(query)
ctx['hits'] = results
return ctx
class TodaysQuestionList(RedirectView):
def get_redirect_url(self, *args, **kwargs):
today = timezone.now()
return reverse(
'questions:daily_questions',
kwargs={
'day': today.day,
'month': today.month,
'year': today.year,
}
)
class DailyQuestionList(DayArchiveView):
queryset = Question.objects.all()
date_field = 'created'
month_format = '%m'
allow_empty = True
class UpdateAnswerAcceptanceView(LoginRequiredMixin, UpdateView):
form_class = AnswerAcceptanceForm
queryset = Answer.objects.all()
def get_success_url(self):
return self.object.question.get_absolute_url()
def form_invalid(self, form):
return HttpResponseRedirect(
redirect_to=self.object.question.get_absolute_url())
class AskQuestionView(LoginRequiredMixin, CreateView):
form_class = QuestionForm
template_name = 'qanda/ask.html'
def get_initial(self):
return {
'user': self.request.user.id
}
def form_valid(self, form):
action = self.request.POST.get('action')
if action =='SAVE':
#save and redirect as usual
return super().form_valid(form)
elif action == 'PREVIEW':
preview = Question(
question=form.cleaned_data['question'],
title=form.cleaned_data['title'])
ctx = self.get_context_data(preview=preview)
return self.render_to_response(context=ctx)
return HttpResponseBadRequest()
class QuestionDetailView(DetailView):
model = Question
ACCEPT_FORM = AnswerAcceptanceForm(initial={'accepted': True})
REJECT_FORM = AnswerAcceptanceForm(initial={'accepted': False})
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'answer_form': AnswerForm(initial={
'user': self.request.user.id,
'question': self.object.id,
})
})
if self.object.can_accept_answers(self.request.user):
ctx.update({
'accept_form': self.ACCEPT_FORM,
'reject_form': self.REJECT_FORM,
})
return ctx
class CreateAnswerView(LoginRequiredMixin, CreateView):
form_class = AnswerForm
template_name = 'qanda/create_answer.html'
def get_initial(self):
return {
'question': self.get_question().id,
'user': self.request.user.id,
}
def get_context_data(self, **kwargs):
return super().get_context_data(question=self.get_question(),
**kwargs)
def get_success_url(self):
return self.object.question.get_absolute_url()
def form_valid(self, form):
action = self.request.POST.get('action')
if action =='SAVE':
#save and redirect as usual
return super().form_valid(form)
elif action == 'PREVIEW':
ctx = self.get_context_data(preview=form.cleaned_data['answer'])
return self.render_to_response(context=ctx)
return HttpResponseBadRequest()
def get_question(self):
return Question.objects.get(pk=self.kwargs['pk'])
| [
"django.http.response.HttpResponseBadRequest",
"django.urls.base.reverse",
"qanda.models.Question.objects.get",
"qanda.service.elasticsearch.search_for_questions",
"qanda.forms.AnswerForm",
"qanda.forms.AnswerAcceptanceForm",
"django.utils.timezone.now",
"qanda.models.Question.objects.all",
"qanda.m... | [((1243, 1265), 'qanda.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (1263, 1265), False, 'from qanda.models import Question, Answer\n'), ((1456, 1476), 'qanda.models.Answer.objects.all', 'Answer.objects.all', ([], {}), '()\n', (1474, 1476), False, 'from qanda.models import Question, Answer\n'), ((2397, 2445), 'qanda.forms.AnswerAcceptanceForm', 'AnswerAcceptanceForm', ([], {'initial': "{'accepted': True}"}), "(initial={'accepted': True})\n", (2417, 2445), False, 'from qanda.forms import QuestionForm, AnswerForm, AnswerAcceptanceForm\n'), ((2462, 2511), 'qanda.forms.AnswerAcceptanceForm', 'AnswerAcceptanceForm', ([], {'initial': "{'accepted': False}"}), "(initial={'accepted': False})\n", (2482, 2511), False, 'from qanda.forms import QuestionForm, AnswerForm, AnswerAcceptanceForm\n'), ((1009, 1023), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1021, 1023), False, 'from django.utils import timezone\n'), ((1034, 1143), 'django.urls.base.reverse', 'reverse', (['"""questions:daily_questions"""'], {'kwargs': "{'day': today.day, 'month': today.month, 'year': today.year}"}), "('questions:daily_questions', kwargs={'day': today.day, 'month':\n today.month, 'year': today.year})\n", (1041, 1143), False, 'from django.urls.base import reverse\n'), ((2294, 2318), 'django.http.response.HttpResponseBadRequest', 'HttpResponseBadRequest', ([], {}), '()\n', (2316, 2318), False, 'from django.http.response import HttpResponseRedirect, HttpResponseBadRequest\n'), ((3700, 3724), 'django.http.response.HttpResponseBadRequest', 'HttpResponseBadRequest', ([], {}), '()\n', (3722, 3724), False, 'from django.http.response import HttpResponseRedirect, HttpResponseBadRequest\n'), ((3763, 3805), 'qanda.models.Question.objects.get', 'Question.objects.get', ([], {'pk': "self.kwargs['pk']"}), "(pk=self.kwargs['pk'])\n", (3783, 3805), False, 'from qanda.models import Question, Answer\n'), ((836, 863), 'qanda.service.elasticsearch.search_for_questions', 'search_for_questions', (['query'], {}), '(query)\n', (856, 863), False, 'from qanda.service.elasticsearch import search_for_questions\n'), ((2093, 2180), 'qanda.models.Question', 'Question', ([], {'question': "form.cleaned_data['question']", 'title': "form.cleaned_data['title']"}), "(question=form.cleaned_data['question'], title=form.cleaned_data[\n 'title'])\n", (2101, 2180), False, 'from qanda.models import Question, Answer\n'), ((2633, 2711), 'qanda.forms.AnswerForm', 'AnswerForm', ([], {'initial': "{'user': self.request.user.id, 'question': self.object.id}"}), "(initial={'user': self.request.user.id, 'question': self.object.id})\n", (2643, 2711), False, 'from qanda.forms import QuestionForm, AnswerForm, AnswerAcceptanceForm\n')] |
# 888 888
# 888 888
# 888 888
# .d8888b 88888b. 8888b. 88888b. .d88b. .d88b. 888 .d88b. .d88b.
# d88P" 888 "88b "88b 888 "88b d88P"88b d8P Y8b 888 d88""88b d88P"88b
# 888 888 888 .d888888 888 888 888 888 88888888 888 888 888 888 888
# Y88b. 888 888 888 888 888 888 Y88b 888 Y8b. 888 Y88..88P Y88b 888
# "Y8888P 888 888 "Y888888 888 888 "Y88888 "Y8888 888 "Y88P" "Y88888
# 888 888
# Y8b d88P Y8b d88P
# "Y88P" "Y88P"
#
# 210504 mh:
# * Adding support for minimum groups and project capabilities for read and owner Groups
# * Exception handling for root-groups to avoid duplicate groups and projects capabilities
# 210610 mh:
# * Adding RAW DBs and Datasets for Groups {env}:allprojects:{owner/read} and {env}:{group}:allprojects:{owner/read}
# * Adding functionality for updating dataset details (external id, description, etc) based on the config.yml
# 210910 pa:
# * extended acl_default_types by labels, relationships, functions
# * removed labels from acl_admin_types
# * functions don't have dataset scope
# 211013 pa:
# * renamed "adfs" to "aad" terminology => aad_mappings
# * for AAD 'root:client' and 'root:user' can be merged into 'root'
# 211014 pa:
# * adding new capabilities
# extractionpipelinesAcl
# extractionrunsAcl
# 211108 pa:
# * adding new capabilities
# entitymatchingAcl
# * refactor list of acl types which only support "all" scope
# acl_all_scope_only_types
# * support "labels" for non admin groups
# 211110 pa:
# * adding new capabilities
# sessionsAcl
# 220202 pa:
# * adding new capabilities
# typesAcl
# 220216 pa:
# * adding 'generate_special_groups()' to handle
# 'extractors' and 'transformations' and their 'aad_mappings'
# * configurable through `deploy --with-special-groups=[yes|no]` parameter
# * adding new capabilities:
# transformationsAcl (replacing the need for magic "transformations" CDF Group)
# 220404 pa:
# * v1.4.0 limited datasets for 'owner' that they cannot edit or create datasets
# * removed `datasets:write` capability
# * moved that capability to action_dimensions['admin']
# 220405 sd:
# * v1.5.0 added dry-run mode as global parameter for all commands
# 220405 pa:
# * v1.6.0
# * removed 'transformation' acl from 'acl_all_scope_only_types'
# as it now supports dataset scopes too!
# * refactor variable names to match the new documentation
# 1. group_types_dimensions > group_bootstrap_hierarchy
# 2. group_type > ns_name (namespace: src, ca, uc)
# 3. group_prefix > node_name (src:001:sap)
# 220406 pa/sd:
# * v1.7.0
# * added 'diagram' command which creates a Mermaid (diagram as code) output
# 220406 pa:
# * v1.7.1
# * started to use '# fmt:skip' to save intended multiline formatted and indented code
# from black auto-format
# 220420 pa:
# * v.1.9.2
# * fixed Poetry on Windows issues
# 220422 pa:
# * v1.10.0
# * issue #28 possibility to skip creation of RAW DBs
# * added '--with-raw-capability' parameter for 'deploy' and 'diagram' commands
# 220424 pa:
# * introduced CommandMode enums to support more detailed BootstrapCore initialization
# * started with validation-functions ('validate_config_is_cdf_project_in_mappings')
# * for 'diagram' command
# - made 'cognite' section optional
# - added support for parameter '--cdf-project' to explicit diagram a specific CDF Project
# - Added cdf-project name to diagram "IdP Groups for CDF: <>" subgraph title
# - renamed mermaid properties from 'name/short' to 'id_name/display'
# * documented config-deploy-example-v2.yml
# 220511 pa: v2.0.0 release :)
import logging
import time
# from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from itertools import islice
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, TypeVar
import click
import pandas as pd
import yaml
from click import Context
from cognite.client.data_classes import DataSet, Group
from cognite.client.data_classes.data_sets import DataSetUpdate
from cognite.extractorutils.configtools import CogniteClient
from dotenv import load_dotenv
# cli internal
from incubator.bootstrap_cli import __version__
from incubator.bootstrap_cli.configuration import (
BootstrapConfigError,
BootstrapDeleteConfig,
BootstrapDeployConfig,
BootstrapValidationError,
CommandMode,
SharedAccess,
YesNoType,
)
from incubator.bootstrap_cli.mermaid_generator.mermaid import (
AssymetricNode,
DottedEdge,
Edge,
GraphRegistry,
Node,
RoundedNode,
SubroutineNode,
TrapezNode,
)
# '''
# 888 888 888 .d888 d8b
# 888 888 888 d88P" Y8P
# 888 888 888 888
# .d88b. 888 .d88b. 88888b. 8888b. 888 .d8888b .d88b. 88888b. 888888 888 .d88b. .d8888b
# d88P"88b 888 d88""88b 888 "88b "88b 888 d88P" d88""88b 888 "88b 888 888 d88P"88b 88K
# 888 888 888 888 888 888 888 .d888888 888 888 888 888 888 888 888 888 888 888 "Y8888b.
# Y88b 888 888 Y88..88P 888 d88P 888 888 888 Y88b. Y88..88P 888 888 888 888 Y88b 888 X88
# "Y88888 888 "Y88P" 88888P" "Y888888 888 "Y8888P "Y88P" 888 888 888 888 "Y88888 88888P'
# 888 888
# Y8b d88P Y8b d88P
# "Y88P" "Y88P"
# '''
_logger = logging.getLogger(__name__)
# because within f'' strings no backslash-character is allowed
NEWLINE = "\n"
# capabilities (acl) which only support scope: {"all":{}}
acl_all_scope_only_types = set(
[
"projects",
"sessions",
"functions",
"entitymatching",
"types",
"threed",
]
)
# lookup of non-default actions per capability (acl) and role (owner/read/admin)
action_dimensions = {
# owner datasets might only need READ and OWNER
"owner": { # else ["READ","WRITE"]
"raw": ["READ", "WRITE", "LIST"],
"datasets": ["READ", "OWNER"],
"groups": ["LIST"],
"projects": ["LIST"],
"sessions": ["LIST", "CREATE"],
"threed": ["READ", "CREATE", "UPDATE", "DELETE"],
},
"read": { # else ["READ"]
"raw": ["READ", "LIST"],
"groups": ["LIST"],
"projects": ["LIST"],
"sessions": ["LIST"],
},
"admin": {
"datasets": ["READ", "WRITE", "OWNER"],
"groups": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"],
"projects": ["READ", "UPDATE", "LIST"],
},
}
#
# GENERIC configurations
# extend when new capability (acl) is available
# check if action_dimensions must be extended with non-default capabilities:
# which are owner: ["READ","WRITE"]
# and read: ["READ"])
#
acl_default_types = [
"assets",
"datasets",
"entitymatching",
"events",
"extractionPipelines",
"extractionRuns",
"files",
"functions",
"groups",
"labels",
"projects",
"raw",
"relationships",
"sequences",
"sessions",
"timeSeries",
"transformations",
"types",
"threed",
]
# give precedence when merging over acl_default_types
acl_admin_types = list(action_dimensions["admin"].keys())
# '''
# 888888b. 888 888 .d8888b.
# 888 "88b 888 888 d88P Y88b
# 888 .88P 888 888 888 888
# 8888888K. .d88b. .d88b. 888888 .d8888b 888888 888d888 8888b. 88888b. 888 .d88b. 888d888 .d88b.
# 888 "Y88b d88""88b d88""88b 888 88K 888 888P" "88b 888 "88b 888 d88""88b 888P" d8P Y8b
# 888 888 888 888 888 888 888 "Y8888b. 888 888 .d888888 888 888 888 888 888 888 888 88888888
# 888 d88P Y88..88P Y88..88P Y88b. X88 Y88b. 888 888 888 888 d88P Y88b d88P Y88..88P 888 Y8b.
# 8888888P" "Y88P" "Y88P" "Y888 88888P' "Y888 888 "Y888888 88888P" "Y8888P" "Y88P" 888 "Y8888
# 888
# 888
# 888
# '''
# type-hint for ExtpipesCore instance response
T_BootstrapCore = TypeVar("T_BootstrapCore", bound="BootstrapCore")
class BootstrapCore:
# CDF Group prefix, i.e. "cdf:", to make bootstrap created CDF Groups easy recognizable in Fusion
GROUP_NAME_PREFIX = ""
# mandatory for hierarchical-namespace
AGGREGATED_LEVEL_NAME = ""
# rawdbs creation support additional variants, for special purposes (like saving statestores)
# - default-suffix is ':rawdb' with no variant-suffix (represented by "")
# - additional variant-suffixes can be added like this ["", ":state"]
RAW_VARIANTS = [""]
def __init__(self, configpath: str, command: CommandMode):
if command == CommandMode.DELETE:
self.config: BootstrapDeleteConfig = BootstrapDeleteConfig.from_yaml(configpath)
self.delete_or_deprecate: Dict[str, Any] = self.config.delete_or_deprecate
if not self.config.cognite:
BootstrapConfigError("'cognite' section required in configuration")
elif command in (CommandMode.DEPLOY, CommandMode.DIAGRAM, CommandMode.PREPARE):
self.config: BootstrapDeployConfig = BootstrapDeployConfig.from_yaml(configpath)
self.bootstrap_config: BootstrapDeployConfig = self.config.bootstrap
self.idp_cdf_mappings = self.bootstrap_config.idp_cdf_mappings
# CogniteClient is optional for diagram
if command != CommandMode.DIAGRAM:
# mandatory section
if not self.config.cognite:
BootstrapConfigError("'cognite' section required in configuration")
#
# load 'bootstrap.features'
#
# unpack and process features
features = self.bootstrap_config.features
# [OPTIONAL] default: False
self.with_special_groups: bool = features.with_special_groups
# [OPTIONAL] default: True
self.with_raw_capability: bool = features.with_raw_capability
# [OPTIONAL] default: "allprojects"
BootstrapCore.AGGREGATED_LEVEL_NAME = features.aggregated_level_name
# [OPTIONAL] default: "cdf:"
# support for '' empty string
BootstrapCore.GROUP_NAME_PREFIX = f"{features.group_prefix}:" if features.group_prefix else ""
# [OPTIONAL] default: "dataset"
# support for '' empty string
BootstrapCore.DATASET_SUFFIX = f":{features.dataset_suffix}" if features.dataset_suffix else ""
# [OPTIONAL] default: "rawdb"
# support for '' empty string
BootstrapCore.RAW_SUFFIX = f":{features.rawdb_suffix}" if features.rawdb_suffix else ""
# [OPTIONAL] default: ["", ":"state"]
BootstrapCore.RAW_VARIANTS = [""] + [f":{suffix}" for suffix in features.rawdb_additional_variants]
self.deployed: Dict[str, Any] = {}
self.all_scope_ctx: Dict[str, Any] = {}
self.is_dry_run: bool = False
self.client: CogniteClient = None
self.cdf_project = None
# TODO debug
# print(f"self.config= {self.config}")
# TODO: support 'logger' section optional, provide default config for logger with console only
#
# Logger initialisation
#
# make sure the optional folders in logger.file.path exists
# to avoid: FileNotFoundError: [Errno 2] No such file or directory: '/github/workspace/logs/test-deploy.log'
if self.config.logger.file:
(Path.cwd() / self.config.logger.file.path).parent.mkdir(parents=True, exist_ok=True)
self.config.logger.setup_logging()
_logger.info("Starting CDF Bootstrap configuration")
# debug new features
if getattr(self, "bootstrap_config", False):
# TODO: not available for 'delete' but there must be aa smarter solution
_logger.debug(
"Features from yaml-config or defaults (can be overridden by cli-parameters!): "
f"{self.bootstrap_config.features=}"
)
#
# Cognite initialisation (optional for 'diagram')
#
if self.config.cognite:
self.client: CogniteClient = self.config.cognite.get_cognite_client( # noqa
client_name="inso-bootstrap-cli", token_custom_args=self.config.token_custom_args
)
self.cdf_project = self.client.config.project
_logger.info("Successful connection to CDF client")
@staticmethod
def acl_template(actions, scope):
return {"actions": actions, "scope": scope}
@staticmethod
def get_allprojects_name_template(ns_name=None):
return f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}" if ns_name else BootstrapCore.AGGREGATED_LEVEL_NAME
@staticmethod
def get_dataset_name_template():
return "{node_name}" + BootstrapCore.DATASET_SUFFIX
@staticmethod
def get_raw_dbs_name_template():
return "{node_name}" + BootstrapCore.RAW_SUFFIX + "{raw_variant}"
@staticmethod
def get_timestamp():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def validate_config_length_limits(self):
"""
Validate features in config
"""
#
# CHECK 1 (availability)
#
if not self.AGGREGATED_LEVEL_NAME:
raise BootstrapValidationError(
"Features validation error: 'features.aggregated-level-name' is required, "
f"but provided as <{self.AGGREGATED_LEVEL_NAME}>"
)
#
# CHECK 2 (length limits)
#
# TODO: GROUP_NAME_LENGTH_LIMIT = ??
RAWDB_NAME_LENGTH_LIMIT = 32
DATASET_NAME_LENGTH_LIMIT = 50
DATASET_EXTERNALID_LENGTH_LIMIT = 255
# create all required scopes to check name lengths
all_scopes = {
# generate_target_raw_dbs -> returns a Set[str]
"raw": self.generate_target_raw_dbs(), # all raw_dbs
# generate_target_datasets -> returns a Dict[str, Any]
"datasets": self.generate_target_datasets(), # all datasets
}
errors = []
if self.with_raw_capability:
errors.extend(
[
("RAW DB", rawdb_name, len(rawdb_name), RAWDB_NAME_LENGTH_LIMIT)
for rawdb_name in all_scopes["raw"]
if len(rawdb_name) > RAWDB_NAME_LENGTH_LIMIT
]
)
errors.extend(
[
("DATA SET name", dataset_name, len(dataset_name), DATASET_NAME_LENGTH_LIMIT)
for dataset_name, dataset_details in all_scopes["datasets"].items()
if len(dataset_name) > DATASET_NAME_LENGTH_LIMIT
]
)
errors.extend(
[
(
"DATA SET external_id",
dataset_details["external_id"],
len(dataset_name),
DATASET_EXTERNALID_LENGTH_LIMIT,
)
for dataset_name, dataset_details in all_scopes["datasets"].items()
if len(dataset_details["external_id"]) > DATASET_EXTERNALID_LENGTH_LIMIT
]
)
if errors:
raise BootstrapValidationError(
"Features validation error(s):\n"
# RAW DB src:002:weather:rawdbiswaytoolongtofit : len(38) > 32
f"""{NEWLINE.join(
[
f'{scope_type} {scope_error} : len({scope_length}) > {max_length}'
for (scope_type, scope_error, scope_length, max_length) in errors
])}"""
)
# return self for chaining
return self
def validate_config_is_cdf_project_in_mappings(self):
# check if mapping exists for configured cdf-project
is_cdf_project_in_mappings = self.cdf_project in [mapping.cdf_project for mapping in self.idp_cdf_mappings]
if not is_cdf_project_in_mappings:
_logger.warning(f"No 'idp-cdf-mapping' found for CDF Project <{self.cdf_project}>")
# log or raise?
# raise ValueError(f'No mapping for CDF project {self.cdf_project}')
# return self for chaining
return self
def generate_default_action(self, action, acl_type):
return action_dimensions[action].get(acl_type, ["READ", "WRITE"] if action == "owner" else ["READ"])
def generate_admin_action(self, acl_admin_type):
return action_dimensions["admin"][acl_admin_type]
def get_ns_node_shared_access_by_name(self, node_name) -> SharedAccess:
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
if node_name == ns_node.node_name:
return ns_node.shared_access
return SharedAccess([], [])
def get_group_raw_dbs_groupedby_action(self, action, ns_name, node_name=None):
raw_db_names: Dict[str, Any] = {"owner": [], "read": []}
if node_name:
raw_db_names[action].extend(
# the dataset which belongs directly to this node_name
[
self.get_raw_dbs_name_template().format(node_name=node_name, raw_variant=raw_variant)
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
# for owner groups add "shared_owner_access" raw_dbs too
if action == "owner":
raw_db_names["owner"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_node.node_name, raw_variant=raw_variant
)
# find the group_config which matches the name,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).owner
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
raw_db_names["read"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_node.node_name, raw_variant=raw_variant
)
# find the group_config which matches the name,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).read
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
else: # handling the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}
raw_db_names[action].extend(
[
self.get_raw_dbs_name_template().format(node_name=ns_node.node_name, raw_variant=raw_variant)
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for raw_variant in BootstrapCore.RAW_VARIANTS
]
# adding the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME} rawdbs
+ [ # noqa
self.get_raw_dbs_name_template().format(
node_name=self.get_allprojects_name_template(ns_name=ns_name), raw_variant=raw_variant
)
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
# only owner-groups support "shared_access" rawdbs
if action == "owner":
raw_db_names["owner"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_access_node.node_name, raw_variant=raw_variant
)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.owner
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
raw_db_names["read"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_access_node.node_name, raw_variant=raw_variant
)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.read
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
# returns clear names grouped by action
return raw_db_names
def get_group_datasets_groupedby_action(self, action, ns_name, node_name=None):
dataset_names: Dict[str, Any] = {"owner": [], "read": []}
# for example fac:001:wasit, uc:002:meg, etc.
if node_name:
dataset_names[action].extend(
# the dataset which belongs directly to this node_name
[self.get_dataset_name_template().format(node_name=node_name)]
)
# for owner groups add "shared_access" datasets too
if action == "owner":
dataset_names["owner"].extend(
[
self.get_dataset_name_template().format(node_name=shared_node.node_name)
# find the group_config which matches the id,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).owner
]
)
dataset_names["read"].extend(
[
self.get_dataset_name_template().format(node_name=shared_node.node_name)
# find the group_config which matches the id,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).read
]
)
# for example src, fac, uc, ca
else: # handling the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}
dataset_names[action].extend(
[
# all datasets for each of the nodes of the given namespace
self.get_dataset_name_template().format(node_name=ns_node.node_name)
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
]
# adding the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME} dataset
+ [ # noqa
self.get_dataset_name_template().format(
node_name=self.get_allprojects_name_template(ns_name=ns_name)
)
]
)
# only owner-groups support "shared_access" datasets
if action == "owner":
dataset_names["owner"].extend(
[
self.get_dataset_name_template().format(node_name=shared_access_node.node_name)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.owner
]
)
dataset_names["read"].extend(
[
self.get_dataset_name_template().format(node_name=shared_access_node.node_name)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.read
]
)
# returns clear names
return dataset_names
def dataset_names_to_ids(self, dataset_names):
return self.deployed["datasets"].query("name in @dataset_names")["id"].tolist()
def get_scope_ctx_groupedby_action(self, action, ns_name, node_name=None):
ds_by_action = self.get_group_datasets_groupedby_action(action, ns_name, node_name)
rawdbs_by_action = self.get_group_raw_dbs_groupedby_action(action, ns_name, node_name)
return {
action: {"raw": rawdbs_by_action[action], "datasets": ds_by_action[action]}
for action in ["owner", "read"]
} # fmt: skip
def generate_scope(self, acl_type, scope_ctx):
if acl_type == "raw":
# { "tableScope": { "dbsToTables": { "foo:db": {}, "bar:db": {} } }
return {"tableScope": {"dbsToTables": {raw: {} for raw in scope_ctx["raw"]}}}
elif acl_type == "datasets":
# { "idScope": { "ids": [ 2695894113527579, 4254268848874387 ] } }
return {"idScope": {"ids": self.dataset_names_to_ids(scope_ctx["datasets"])}}
# adding minimum projects and groups scopes for non-root groups
# TODO: adding documentation link
elif acl_type in acl_all_scope_only_types:
return {"all": {}}
elif acl_type == "groups":
return {"currentuserscope": {}}
else: # like 'assets', 'events', 'files', 'sequences', 'timeSeries', ..
# { "datasetScope": { "ids": [ 2695894113527579, 4254268848874387 ] } }
return {"datasetScope": {"ids": self.dataset_names_to_ids(scope_ctx["datasets"])}}
def generate_group_name_and_capabilities(
self, action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Tuple[str, List[Dict[str, Any]]]:
"""Create the group-name and its capabilities.
The function supports following levels expressed by parameter combinations:
- core: {action} + {ns_name} + {node_name}
- namespace: {action} + {ns_name}
- top-level: {action}
- root: {root_account}
Args:
action (str, optional):
One of the action_dimensions ["read", "owner"].
Defaults to None.
ns_name (str, optional):
Namespace like "src" or "uc".
Defaults to None.
node_name (str, optional):
Core group like "src:001:sap" or "uc:003:demand".
Defaults to None.
root_account (str, optional):
Name of the root-account.
Defaults to None.
Returns:
Tuple[str, List[Dict[str, Any]]]: group-name and list of capabilities
"""
capabilities = []
# detail level like cdf:src:001:public:read
if action and ns_name and node_name:
# group for each dedicated group-core id
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{node_name}:{action}"
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action(shared_action, acl_type),
scope=self.generate_scope(acl_type, scope_ctx),
)
}
)
for acl_type in acl_default_types
for shared_action, scope_ctx in self.get_scope_ctx_groupedby_action(action, ns_name, node_name).items()
# don't create empty scopes
# enough to check one as they have both same length, but that's more explicit
if scope_ctx["raw"] and scope_ctx["datasets"]
]
# group-type level like cdf:src:all:read
elif action and ns_name:
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action(shared_action, acl_type),
scope=self.generate_scope(acl_type, scope_ctx),
)
}
)
for acl_type in acl_default_types
for shared_action, scope_ctx in self.get_scope_ctx_groupedby_action(action, ns_name).items()
# don't create empty scopes
# enough to check one as they have both same length, but that's more explicit
if scope_ctx["raw"] and scope_ctx["datasets"]
]
# top level like cdf:all:read
elif action:
# 'all' groups on action level (no limits to datasets or raw-dbs)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action(action, acl_type),
# scope = { "all": {} }
# create scope for all raw_dbs and datasets
scope=self.generate_scope(acl_type, self.all_scope_ctx),
)
}
)
for acl_type in acl_default_types
]
# root level like cdf:root
elif root_account: # no parameters
# all (no limits)
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{root_account}"
# all default ACLs
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action("owner", acl_type),
scope={"all": {}},
)
}
)
# skipping admin types from default types to avoid duplicates
for acl_type in (set(acl_default_types) - set(acl_admin_types))
]
# plus admin ACLs
[
capabilities.append( # type: ignore
{
f"{acl_admin_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_admin_action(acl_admin_type),
scope={"all": {}},
)
}
)
for acl_admin_type in acl_admin_types
]
return group_name_full_qualified, capabilities
def get_group_ids_by_name(self, group_name: str) -> List[int]:
"""Lookup if CDF Group name exists (could be more than one!)
and return list of all CDF Group IDs
Args:
group_name (str): CDF Group name to check
Returns:
List[int]: of CDF Group IDs
"""
return self.deployed["groups"].query("name == @group_name")["id"].tolist()
# return self.deployed["groups"].query("name == @group_payload['name']")["id"].tolist()
# TODO 220203 pa: explicit providing 'group_name'
# to bypass a strange bug under Docker which throws a
# pandas.core.computation.ops.UndefinedVariableError:
# name 'str_0_0x900xd80x90xec0x870x7f0x00x0' is not defined
def create_group(
self,
group_name: str,
group_capabilities: Dict[str, Any] = None,
idp_mapping: Tuple[str] = None,
) -> Group:
"""Creating a CDF Group
- with upsert support the same way Fusion updates CDF Groups
if a group with the same name exists:
1. a new group with the same name will be created
2. then the old group will be deleted (by its 'id')
- with support of explicit given aad-mapping or internal lookup from config
Args:
group_name (str): name of the CDF Group (always prefixed with GROUP_NAME_PREFIX)
group_capabilities (List[Dict[str, Any]], optional): Defining the CDF Group capabilities.
aad_mapping (Tuple[str, str], optional):
Tuple of ({AAD SourceID}, {AAD SourceName})
to link the CDF Group to
Returns:
Group: the new created CDF Group
"""
idp_source_id, idp_source_name = None, None
if idp_mapping:
# explicit given
# TODO: change from tuple to dataclass
if len(idp_mapping) != 2:
raise ValueError(f"Expected a tuple of length 2, got {idp_mapping=} instead")
idp_source_id, idp_source_name = idp_mapping
else:
# check lookup from provided config
mapping = self.bootstrap_config.get_idp_cdf_mapping_for_group(
cdf_project=self.cdf_project, cdf_group=group_name
)
# unpack
idp_source_id, idp_source_name = mapping.idp_source_id, mapping.idp_source_name
# check if group already exists, if yes it will be deleted after a new one is created
old_group_ids = self.get_group_ids_by_name(group_name)
new_group = Group(name=group_name, capabilities=group_capabilities)
if idp_source_id:
# inject (both will be pushed through the API call!)
new_group.source_id = idp_source_id # 'S-314159-1234'
new_group.source = idp_source_name # type: ignore
# print(f"group_create_object:<{group_create_object}>")
# overwrite new_group as it now contains id too
if self.is_dry_run:
_logger.info(f"Dry run - Creating group with name: <{new_group.name}>")
_logger.debug(f"Dry run - Creating group details: <{new_group}>")
else:
new_group = self.client.iam.groups.create(new_group)
# if the group name existed before, delete those groups now
# same upsert approach Fusion is using to update a CDF Group: create new with changes => then delete old one
if old_group_ids:
if self.is_dry_run:
_logger.info(f"Dry run - Deleting groups with ids: <{old_group_ids}>")
else:
self.client.iam.groups.delete(old_group_ids)
return new_group
def process_group(
self, action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Group:
# to avoid complex upsert logic, all groups will be recreated and then the old ones deleted
# to be merged with existing code
# print(f"=== START: action<{action}> | ns_name<{ns_name}> | node_name<{node_name}> ===")
group_name, group_capabilities = self.generate_group_name_and_capabilities(
action, ns_name, node_name, root_account
)
group: Group = self.create_group(group_name, group_capabilities)
return group
def generate_target_datasets(self) -> Dict[str, Any]:
# list of all targets: autogenerated dataset names
target_datasets = {
# dictionary generator
# dataset_name : {Optional[dataset_description], Optional[dataset_metadata], ..}
# key:
(fq_ns_name := self.get_dataset_name_template().format(node_name=ns_node.node_name)):
# value
{
"description": ns_node.description,
"metadata": ns_node.metadata,
# if not explicit provided, same template as name
"external_id": ns_node.external_id or fq_ns_name,
}
for ns in self.bootstrap_config.namespaces
for ns_node in ns.ns_nodes
}
# update target datasets to include 'allproject' and '{ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}' datasets
target_datasets.update(
{ # dictionary generator
# key:
self.get_dataset_name_template().format(
node_name=f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME
):
# value
{
"description": f"Dataset for '{BootstrapCore.AGGREGATED_LEVEL_NAME}' Owner Groups",
# "metadata": "",
"external_id": f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME,
}
# creating 'all' at group type level + top-level
for ns_name in list([ns.ns_name for ns in self.bootstrap_config.namespaces]) + [""]
}
)
return target_datasets
def generate_missing_datasets(self) -> Tuple[List[str], List[str]]:
target_datasets = self.generate_target_datasets()
# TODO: SDK should do this fine, that was an older me still learning :)
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
# which targets are not already deployed?
missing_datasets = {
name: payload
for name, payload in target_datasets.items()
if name not in self.deployed["datasets"]["name"].tolist()
}
if missing_datasets:
# create all datasets which are not already deployed
# https://docs.cognite.com/api/v1/#operation/createDataSets
for chunked_missing_datasets in chunks(missing_datasets, 10):
datasets_to_be_created = [
DataSet(
name=name,
description=payload.get("description"),
external_id=payload.get("external_id"),
metadata=payload.get("metadata"),
write_protected=True,
)
for name, payload in chunked_missing_datasets.items()
]
if self.is_dry_run:
for data_set_to_be_created in datasets_to_be_created:
_logger.info(f"Dry run - Creating dataset with name: <{data_set_to_be_created.name}>")
_logger.debug(f"Dry run - Creating dataset: <{data_set_to_be_created}>")
else:
self.client.data_sets.create(datasets_to_be_created)
# which targets are already deployed?
existing_datasets = {
# dictionary generator
# key:
dataset_columns["name"]:
# value
# Merge dataset 'id' from CDF with dataset arguments from config.yml
dict(id=dataset_columns["id"], **target_datasets[dataset_columns["name"]])
for row_id, dataset_columns in self.deployed["datasets"].iterrows() # iterating pd dataframe
if dataset_columns["name"] in target_datasets.keys()
}
if existing_datasets:
# update datasets which are already deployed
# https://docs.cognite.com/api/v1/#operation/createDataSets
# TODO: description, metadata, externalId
for chunked_existing_datasets in chunks(existing_datasets, 10):
datasets_to_be_updated = [
DataSetUpdate(id=dataset["id"])
.name.set(name)
.description.set(dataset.get("description"))
.external_id.set(dataset.get("external_id"))
.metadata.set(dataset.get("metadata"))
for name, dataset in chunked_existing_datasets.items()
]
if self.is_dry_run:
for data_set_to_be_updated in datasets_to_be_updated:
_logger.info(f"Dry run - Updating dataset with name: <{data_set_to_be_updated.name}>")
_logger.debug(f"Dry run - Updating dataset: <{data_set_to_be_updated}>")
# _logger.info(f"Dry run - Updating dataset: <{data_set_to_be_updated}>")
else:
self.client.data_sets.update(datasets_to_be_updated)
return list(target_datasets.keys()), list(missing_datasets.keys())
def generate_target_raw_dbs(self) -> Set[str]:
# list of all targets: autogenerated raw_db names
target_raw_db_names = set(
[
self.get_raw_dbs_name_template().format(node_name=ns_node.node_name, raw_variant=raw_variant)
for ns in self.bootstrap_config.namespaces
for ns_node in ns.ns_nodes
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
target_raw_db_names.update(
# add RAW DBs for 'all' users
[
self.get_raw_dbs_name_template().format(
node_name=f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME,
raw_variant=raw_variant,
)
# creating allprojects at group type level + top-level
for ns_name in list([ns.ns_name for ns in self.bootstrap_config.namespaces]) + [""]
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
return target_raw_db_names
def generate_missing_raw_dbs(self) -> Tuple[List[str], List[str]]:
target_raw_db_names = self.generate_target_raw_dbs()
try:
# which targets are not already deployed?
missing_rawdbs = target_raw_db_names - set(self.deployed["raw_dbs"]["name"])
except Exception as exc:
_logger.info(f"Raw databases do not exist in CDF:\n{exc}")
missing_rawdbs = target_raw_db_names
if missing_rawdbs:
# create all raw_dbs which are not already deployed
if self.is_dry_run:
for raw_db in list(missing_rawdbs):
_logger.info(f"Dry run - Creating rawdb: <{raw_db}>")
else:
self.client.raw.databases.create(list(missing_rawdbs))
return target_raw_db_names, missing_rawdbs
"""
"Special CDF Groups" are groups which don't have capabilities but have an effect by their name only.
1. 'transformations' group: grants access to "Fusion > Integrate > Transformations"
2. 'extractors' group: grants access to "Fusion > Integrate > Extract Data" which allows dowload of extractors
Both of them are about getting deprecated in the near future (time of writing: Q4 '21).
- 'transformations' can already be replaced with dedicated 'transformationsAcl' capabilities
- 'extractors' only used to grant access to extractor-download page
"""
def generate_special_groups(self):
special_group_names = ["extractors", "transformations"]
_logger.info(f"Generating special groups:\n{special_group_names}")
for special_group_name in special_group_names:
self.create_group(group_name=special_group_name)
# generate all groups - iterating through the 3-level hierarchy
def generate_groups(self):
# permutate the combinations
for action in ["read", "owner"]: # action_dimensions w/o 'admin'
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
# group for each dedicated group-type id
self.process_group(action, ns.ns_name, ns_node.node_name)
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
self.process_group(action, ns.ns_name)
# 'all' groups on action level (no limits to datasets or raw-dbs)
self.process_group(action)
# creating CDF Group for root_account (highest admin-level)
for root_account in ["root"]:
self.process_group(root_account=root_account)
def load_deployed_config_from_cdf(self, groups_only=False) -> None:
"""Load CDF Groups, Datasets and RAW DBs as pd.DataFrames
and store them in 'self.deployed' dictionary.
Args:
groups_only (bool, optional): Limit to CDF Groups only (used by 'prepare' command). Defaults to False.
"""
NOLIMIT = -1
#
# Groups
#
groups_df = self.client.iam.groups.list(all=True).to_pandas()
available_group_columns = [
column
for column in groups_df.columns
if column in ["name", "id", "sourceId", "capabilities"]
] # fmt: skip
if groups_only:
#
# early exit
#
self.deployed = {"groups": groups_df[available_group_columns]}
return
#
# Data Sets
#
datasets_df = self.client.data_sets.list(limit=NOLIMIT).to_pandas()
if len(datasets_df) == 0:
# create an empty dataframe with columns, as SDK responded with no columns
datasets_df = pd.DataFrame(columns=["name", "id"])
else:
datasets_df = datasets_df[["name", "id"]]
#
# RAW DBs
#
rawdbs_df = self.client.raw.databases.list(limit=NOLIMIT).to_pandas()
if len(rawdbs_df) == 0:
# create an empty dataframe with columns, as SDK responded with no columns
rawdbs_df = pd.DataFrame(columns=["name"])
else:
rawdbs_df = rawdbs_df[["name"]]
# store DataFrames
# deployed: Dict[str, pd.DataFrame]
self.deployed = {
"groups": groups_df[available_group_columns],
"datasets": datasets_df,
"raw_dbs": rawdbs_df,
}
# prepare a yaml for "delete" job
def dump_delete_template_to_yaml(self) -> None:
# and reload again now with latest group config too
time.sleep(5) # wait for groups to be created!
self.load_deployed_config_from_cdf()
delete_template = yaml.dump(
{
"delete_or_deprecate": {
"raw_dbs": [],
"datasets": [],
"groups": [],
},
"latest_deployment": {
"raw_dbs": sorted(self.deployed["raw_dbs"].sort_values(["name"])["name"].tolist()),
# fillna('') because dataset names can be empty (NaN value)
"datasets": sorted(self.deployed["datasets"].fillna("").sort_values(["name"])["name"].tolist()),
# fillna('') because group names can be empty (NaN value)
"groups": sorted(self.deployed["groups"].fillna("").sort_values(["name"])["name"].tolist()),
},
# TODO: 220509 pa: this dict cannot support (possible) duplicate dataset names
# and why is this dumped anyway? Is this just for info?
"dataset_ids": {
row["name"]: row["id"] for i, row in sorted(self.deployed["datasets"][["name", "id"]].iterrows())
},
}
)
_logger.info(f"Delete template:\n{delete_template}")
# return delete_template
"""
### create / delete
* new in config
* delete removed from config
"""
def dry_run(self, dry_run: YesNoType) -> T_BootstrapCore:
self.is_dry_run = dry_run == YesNoType.yes
# return self for command chaining
return self
# '''
# oo.ooooo. oooo d8b .ooooo. oo.ooooo. .oooo. oooo d8b .ooooo.
# 888' `88b `888""8P d88' `88b 888' `88b `P )88b `888""8P d88' `88b
# 888 888 888 888ooo888 888 888 .oP"888 888 888ooo888
# 888 888 888 888 .o 888 888 d8( 888 888 888 .o
# 888bod8P' d888b `Y8bod8P' 888bod8P' `Y888""8o d888b `Y8bod8P'
# 888 888
# o888o o888o
# '''
def prepare(self, idp_source_id: str) -> None:
group_name = "cdf:bootstrap"
# group_name = f"{create_config.environment}:bootstrap"
group_capabilities = [
{"datasetsAcl": {"actions": ["READ", "WRITE", "OWNER"], "scope": {"all": {}}}},
{"rawAcl": {"actions": ["READ", "WRITE", "LIST"], "scope": {"all": {}}}},
{"groupsAcl": {"actions": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"], "scope": {"all": {}}}},
{"projectsAcl": {"actions": ["READ", "UPDATE"], "scope": {"all": {}}}},
]
# TODO: replace with dataclass
idp_mapping = [
# sourceId
idp_source_id,
# sourceName
f"IdP Group ID: {idp_source_id}",
]
# load deployed groups with their ids and metadata
self.load_deployed_config_from_cdf(groups_only=True)
_logger.debug(f"GROUPS in CDF:\n{self.deployed['groups']}")
# allows idempotent creates, as it cleans up old groups with same names after creation
self.create_group(group_name=group_name, group_capabilities=group_capabilities, idp_mapping=idp_mapping)
if not self.is_dry_run:
_logger.info(f"Created CDF Group {group_name}")
_logger.info("Finished CDF Project Bootstrapper in 'prepare' mode ")
# '''
# .o8 oooo .
# "888 `888 .o8
# .oooo888 .ooooo. 888 .ooooo. .o888oo .ooooo.
# d88' `888 d88' `88b 888 d88' `88b 888 d88' `88b
# 888 888 888ooo888 888 888ooo888 888 888ooo888
# 888 888 888 .o 888 888 .o 888 . 888 .o
# `Y8bod88P" `Y8bod8P' o888o `Y8bod8P' "888" `Y8bod8P'
# '''
def delete(self):
# load deployed groups, datasets, raw_dbs with their ids and metadata
self.load_deployed_config_from_cdf()
# groups
group_names = self.delete_or_deprecate["groups"]
if group_names:
delete_group_ids = self.deployed["groups"].query("name in @group_names")["id"].tolist()
if delete_group_ids:
# only delete groups which exist
_logger.info(f"DELETE groups: {group_names}")
if not self.is_dry_run:
self.client.iam.groups.delete(delete_group_ids)
else:
_logger.info(f"Groups already deleted: {group_names}")
else:
_logger.info("No Groups to delete")
# raw_dbs
raw_db_names = self.delete_or_deprecate["raw_dbs"]
if raw_db_names:
delete_raw_db_names = list(set(raw_db_names).intersection(set(self.deployed["raw_dbs"]["name"])))
if delete_raw_db_names:
# only delete dbs which exist
# print("DELETE raw_dbs recursive with tables: ", raw_db_names)
_logger.info(f"DELETE raw_dbs recursive with tables: {raw_db_names}")
if not self.is_dry_run:
self.client.raw.databases.delete(delete_raw_db_names, recursive=True)
else:
# print(f"RAW DBs already deleted: {raw_db_names}")
_logger.info(f"RAW DBs already deleted: {raw_db_names}")
else:
_logger.info("No RAW Databases to delete")
# datasets cannot be deleted by design
# deprecate/archive them by prefix name with "_DEPR_", setting
# "archive=true" and a "description" with timestamp of deprecation
dataset_names = self.delete_or_deprecate["datasets"]
if dataset_names:
# get datasets which exists by name
delete_datasets_df = self.deployed["datasets"].query("name in @dataset_names")
if not delete_datasets_df.empty:
for i, row in delete_datasets_df.iterrows():
_logger.info(f"DEPRECATE dataset: {row['name']}")
update_dataset = self.client.data_sets.retrieve(id=row["id"])
update_dataset.name = (
f"_DEPR_{update_dataset.name}"
if not update_dataset.name.startswith("_DEPR_")
else f"{update_dataset.name}"
) # don't stack the DEPR prefixes
update_dataset.description = "Deprecated {}".format(self.get_timestamp())
update_dataset.metadata = dict(update_dataset.metadata, archived=True) # or dict(a, **b)
update_dataset.external_id = f"_DEPR_{update_dataset.external_id}_[{self.get_timestamp()}]"
if self.is_dry_run:
_logger.info(f"Dry run - Deprecating dataset: <{update_dataset}>")
self.client.data_sets.update(update_dataset)
else:
_logger.info("No Datasets to archive (and mark as deprecated)")
# dump all configs to yaml, as cope/paste template for delete_or_deprecate step
self.dump_delete_template_to_yaml()
# TODO: write to file or standard output
_logger.info("Finished deleting CDF Groups, Datasets and RAW Databases")
# '''
# .o8 oooo
# "888 `888
# .oooo888 .ooooo. oo.ooooo. 888 .ooooo. oooo ooo
# d88' `888 d88' `88b 888' `88b 888 d88' `88b `88. .8'
# 888 888 888ooo888 888 888 888 888 888 `88..8'
# 888 888 888 .o 888 888 888 888 888 `888'
# `Y8bod88P" `Y8bod8P' 888bod8P' o888o `Y8bod8P' .8'
# 888 .o..P'
# o888o `Y8P'
# '''
def deploy(self, with_special_groups: YesNoType, with_raw_capability: YesNoType) -> None:
# store parameter as bool
# if provided they override configuration or defaults from yaml-config
if with_special_groups:
self.with_special_groups = with_special_groups == YesNoType.yes
if with_raw_capability:
self.with_raw_capability = with_raw_capability == YesNoType.yes
# debug new features and override with cli-parameters
_logger.info(f"From cli: {with_special_groups=} / {with_raw_capability=}")
_logger.info(f"Effective: {self.with_special_groups=} / {self.with_raw_capability=}")
# load deployed groups, datasets, raw_dbs with their ids and metadata
self.load_deployed_config_from_cdf()
_logger.debug(f"RAW_DBS in CDF:\n{self.deployed['raw_dbs']}")
_logger.debug(f"DATASETS in CDF:\n{self.deployed['datasets']}")
_logger.debug(f"GROUPS in CDF:\n{self.deployed['groups']}")
# run generate steps (only print results atm)
target_raw_dbs: List[str] = []
new_created_raw_dbs: List[str] = []
if self.with_raw_capability:
target_raw_dbs, new_created_raw_dbs = self.generate_missing_raw_dbs()
_logger.info(f"All RAW_DBS from config:\n{target_raw_dbs}")
_logger.info(f"New RAW_DBS to CDF:\n{new_created_raw_dbs}")
else:
# no RAW DBs means no access to RAW at all
# which means no 'rawAcl' capability to create
# remove it form the default types
_logger.info("Creating no RAW_DBS and no 'rawAcl' capability")
acl_default_types.remove("raw")
target_datasets, new_created_datasets = self.generate_missing_datasets()
_logger.info(f"All DATASETS from config:\n{target_datasets}")
_logger.info(f"New DATASETS to CDF:\n{new_created_datasets}")
# store all raw_dbs and datasets in scope of this configuration
self.all_scope_ctx = {
"raw": target_raw_dbs, # all raw_dbs
"datasets": target_datasets, # all datasets
}
# reload deployed configs to be used as reference for group creation
time.sleep(5) # wait for datasets and raw_dbs to be created!
self.load_deployed_config_from_cdf()
# Special CDF Groups and their aad_mappings
if with_special_groups == YesNoType.yes:
self.generate_special_groups()
# CDF Groups from configuration
self.generate_groups()
if not self.is_dry_run:
_logger.info("Created new CDF Groups")
# and reload again now with latest group config too
# dump all configs to yaml, as cope/paste template for delete_or_deprecate step
self.dump_delete_template_to_yaml()
_logger.info("Finished creating CDF Groups, Datasets and RAW Databases")
# _logger.info(f'Bootstrap Pipelines: created: {len(created)}, deleted: {len(delete_ids)}')
# '''
# .o8 o8o
# "888 `"'
# .oooo888 oooo .oooo. .oooooooo oooo d8b .oooo. ooo. .oo. .oo.
# d88' `888 `888 `P )88b 888' `88b `888""8P `P )88b `888P"Y88bP"Y88b
# 888 888 888 .oP"888 888 888 888 .oP"888 888 888 888
# 888 888 888 d8( 888 `88bod8P' 888 d8( 888 888 888 888
# `Y8bod88P" o888o `Y888""8o `8oooooo. d888b `Y888""8o o888o o888o o888o
# d" YD
# "Y88888P'
# '''
def diagram(
self,
to_markdown: YesNoType = YesNoType.no,
with_raw_capability: YesNoType = YesNoType.yes,
cdf_project: str = None,
) -> None:
"""Diagram mode used to document the given configuration as a Mermaid diagram.
Args:
to_markdown (YesNoType, optional):
- Encapsulate Mermaid diagram in Markdown syntax.
- Defaults to 'YesNoType.no'.
with_raw_capability (YesNoType, optional):
- Create RAW DBs and 'rawAcl' capability. Defaults to 'YesNoType.tes'.
cdf_project (str, optional):
- Provide the CDF Project to use for the diagram 'idp-cdf-mappings'.
Example:
# requires a 'cognite' configuration section
➟ poetry run bootstrap-cli diagram configs/config-deploy-example-v2.yml | clip.exe
# precedence over 'cognite.project' which CDF Project to diagram 'bootstrap.idp-cdf-mappings'
# making a 'cognite' section optional
➟ poetry run bootstrap-cli diagram --cdf-project shiny-dev configs/config-deploy-example-v2.yml | clip.exe
# precedence over configuration 'bootstrap.features.with-raw-capability'
➟ poetry run bootstrap-cli diagram --with-raw-capability no --cdf-project shiny-prod configs/config-deploy-example-v2.yml
""" # noqa
diagram_cdf_project = cdf_project if cdf_project else self.cdf_project
# same handling as in 'deploy' command
# store parameter as bool
# if available it overrides configuration or defaults from yaml-config
if with_raw_capability:
self.with_raw_capability = with_raw_capability == YesNoType.yes
# debug new features and override with cli-parameters
_logger.info(f"From cli: {with_raw_capability=}")
_logger.info(f"Effective: {self.with_raw_capability=}")
# store all raw_dbs and datasets in scope of this configuration
self.all_scope_ctx = {
"owner": (
all_scopes := {
# generate_target_raw_dbs -> returns a Set[str]
"raw": list(self.generate_target_raw_dbs()), # all raw_dbs
# generate_target_datasets -> returns a Dict[str, Any]
"datasets": list(self.generate_target_datasets().keys()), # all datasets
}
),
# and copy the same to 'read'
"read": all_scopes,
}
def get_group_name_and_scopes(
action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Tuple[str, Dict[str, Any]]:
"""Adopted generate_group_name_and_capabilities() and get_scope_ctx_groupedby_action()
to respond with
- the full-qualified CDF Group name and
- all scopes sorted by action [read|owner] and [raw|datasets]
TODO: support 'root'
Args:
action (str, optional):
One of the action_dimensions ["read", "owner"].
Defaults to None.
ns_name (str, optional):
Namespace like "src" or "uc".
Defaults to None.
node_name (str, optional):
Core group like "src:001:sap" or "uc:003:demand".
Defaults to None.
root_account (str, optional):
Name of the root-account.
Defaults to None.
Returns:
Tuple[str, Dict[str, Any]]: (group_name, scope_ctx_by_action)
scope_ctx_by_action is a dictionary with the following structure:
{'owner': {
'raw': ['src:002:weather:rawdb', 'src:002:weather:rawdb:state'],
'datasets': ['src:002:weather:dataset']
},
'read': {
'raw': [],
'datasets': []
}}
"""
group_name_full_qualified, scope_ctx_by_action = None, None
# detail level like cdf:src:001:public:read
if action and ns_name and node_name:
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{node_name}:{action}"
scope_ctx_by_action = self.get_scope_ctx_groupedby_action(action, ns_name, node_name)
# group-type level like cdf:src:all:read
elif action and ns_name:
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
scope_ctx_by_action = self.get_scope_ctx_groupedby_action(action, ns_name)
# top level like cdf:all:read
elif action:
# 'all' groups on action level (no limits to datasets or raw-dbs)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
# limit all_scopes to 'action'
scope_ctx_by_action = {action: self.all_scope_ctx[action]}
# root level like cdf:root
elif root_account: # no parameters
# all (no limits)
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{root_account}"
return group_name_full_qualified, scope_ctx_by_action
class SubgraphTypes(str, Enum):
idp = "IdP Groups"
owner = "'Owner' Groups"
read = "'Read' Groups"
# OWNER
core_cdf_owner = "Node Level (Owner)"
ns_cdf_owner = "Namespace Level (Owner)"
scope_owner = "Scopes (Owner)"
# READ
core_cdf_read = "Node Level (Read)"
ns_cdf_read = "Namespace Level (Read)"
scope_read = "Scopes (Read)"
# TODO: refactoring required
def group_to_graph(
graph: GraphRegistry,
action: str = None,
ns_name: str = None,
node_name: str = None,
root_account: str = None,
) -> None:
if root_account:
return
group_name, scope_ctx_by_action = get_group_name_and_scopes(action, ns_name, node_name, root_account)
# check lookup from provided config
mapping = self.bootstrap_config.get_idp_cdf_mapping_for_group(
# diagram explicit given cdf_project, or configured in 'cognite' configuration section
cdf_project=diagram_cdf_project,
cdf_group=group_name,
)
# unpack
# idp_source_id, idp_source_name = self.aad_mapping_lookup.get(node_name, [None, None])
idp_source_id, idp_source_name = mapping.idp_source_id, mapping.idp_source_name
_logger.info(f"{ns_name=} : {group_name=} : {scope_ctx_by_action=} [{idp_source_name=}]")
# preload master subgraphs
core_cdf = graph.get_or_create(getattr(SubgraphTypes, f"core_cdf_{action}"))
ns_cdf_graph = graph.get_or_create(getattr(SubgraphTypes, f"ns_cdf_{action}"))
scope_graph = graph.get_or_create(getattr(SubgraphTypes, f"scope_{action}"))
#
# NODE - IDP GROUP
#
idp = graph.get_or_create(SubgraphTypes.idp)
if idp_source_name and (idp_source_name not in idp):
idp.elements.append(
TrapezNode(
id_name=idp_source_name,
display=idp_source_name,
comments=[f'IdP objectId: {idp_source_id}']
)
) # fmt: skip
graph.edges.append(
Edge(
id_name=idp_source_name,
dest=group_name,
annotation=None,
comments=[]
)
) # fmt: skip
# {'owner': {'raw': ['src:002:weather:rawdb', 'src:002:weather:rawdb:state'],
# 'datasets': ['src:002:weather:dataset']},
# 'read': {'raw': [], 'datasets': []}}
#
# NODE - CORE LEVEL
# 'cdf:src:001:public:read'
#
if action and ns_name and node_name:
core_cdf.elements.append(
RoundedNode(
id_name=group_name,
display=group_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM PARENT 'src:all' to 'src:001:sap'
#
edge_type_cls = Edge if action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
# link from all:{ns}
# multiline f-string split as it got too long
# TODO: refactor into string-templates
id_name=f"{BootstrapCore.GROUP_NAME_PREFIX}{ns_name}:"
f"{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}",
dest=group_name,
annotation="",
comments=[],
)
) # fmt: skip
# add core and all scopes
# shared_action: [read|owner]
for shared_action, scope_ctx in scope_ctx_by_action.items():
# scope_type: [raw|datasets]
# scopes: List[str]
for scope_type, scopes in scope_ctx.items():
if not self.with_raw_capability and scope_type == "raw":
continue # SKIP RAW
for scope_name in scopes:
#
# NODE DATASET or RAW scope
# 'src:001:sap:rawdb'
#
if scope_name not in scope_graph:
node_type_cls = SubroutineNode if scope_type == "raw" else AssymetricNode
scope_graph.elements.append(
node_type_cls(
id_name=f"{scope_name}__{action}__{scope_type}",
display=scope_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM actual processed group-node to added scope
# cdf:src:001:sap:read to 'src:001:sap:rawdb'
#
edge_type_cls = Edge if shared_action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=group_name,
dest=f"{scope_name}__{action}__{scope_type}",
annotation=shared_action,
comments=[],
)
) # fmt: skip
#
# NODE - NAMESPACE LEVEL
# 'src:all:read' or 'src:all:owner'
elif action and ns_name:
ns_cdf_graph.elements.append(
Node(
id_name=group_name,
display=group_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM PARENT top LEVEL to NAMESPACE LEVEL
# 'all' to 'src:all'
#
edge_type_cls = Edge if action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=f"{BootstrapCore.GROUP_NAME_PREFIX}{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}",
dest=group_name,
annotation="",
comments=[],
)
) # fmt: skip
# add namespace-node and all scopes
# shared_action: [read|owner]
for shared_action, scope_ctx in scope_ctx_by_action.items():
# scope_type: [raw|datasets]
# scopes: List[str]
for scope_type, scopes in scope_ctx.items():
if not self.with_raw_capability and scope_type == "raw":
continue # SKIP RAW
for scope_name in scopes:
# LIMIT only to direct scopes for readability
# which have for example 'src:all:' as prefix
if not scope_name.startswith(f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}:"):
continue
#
# NODE DATASET or RAW scope
# 'src:all:rawdb'
#
if scope_name not in scope_graph:
node_type_cls = SubroutineNode if scope_type == "raw" else AssymetricNode
scope_graph.elements.append(
node_type_cls(
id_name=f"{scope_name}__{action}__{scope_type}",
display=scope_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM actual processed group-node to added scope
# cdf:src:all:read to 'src:all:rawdb'
#
edge_type_cls = Edge if shared_action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=group_name,
dest=f"{scope_name}__{action}__{scope_type}",
annotation=shared_action,
comments=[],
)
) # fmt: skip
#
# NODE - TOP LEVEL
# like `cdf:all:read`
#
elif action:
ns_cdf_graph.elements.append(
Node(
id_name=group_name,
display=group_name,
comments=""
)
) # fmt: skip
# add namespace-node and all scopes
# shared_action: [read|owner]
for shared_action, scope_ctx in scope_ctx_by_action.items():
# scope_type: [raw|datasets]
# scopes: List[str]
for scope_type, scopes in scope_ctx.items():
if not self.with_raw_capability and scope_type == "raw":
continue # SKIP RAW
for scope_name in scopes:
# LIMIT only to direct scopes for readability
# which have for example 'src:all:' as prefix
if not scope_name.startswith(f"{BootstrapCore.AGGREGATED_LEVEL_NAME}:"):
continue
# _logger.info(f"> {action=} {shared_action=} process {scope_name=} : all {scopes=}")
#
# NODE DATASET or RAW scope
# 'all:rawdb'
#
if scope_name not in scope_graph:
# _logger.info(f">> add {scope_name=}__{action=}")
node_type_cls = SubroutineNode if scope_type == "raw" else AssymetricNode
scope_graph.elements.append(
node_type_cls(
id_name=f"{scope_name}__{action}__{scope_type}",
display=scope_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM actual processed group-node to added scope
# cdf:all:read to 'all:rawdb'
#
edge_type_cls = Edge if shared_action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=group_name,
dest=f"{scope_name}__{action}__{scope_type}",
annotation=shared_action,
comments=[],
)
) # fmt: skip
#
# finished inline helper-methods
# starting diagram logic
#
if not self.with_raw_capability:
# no RAW DBs means no access to RAW at all
# which means no 'rawAcl' capability to create
# remove it form the default types
_logger.info("Without RAW_DBS and 'rawAcl' capability")
acl_default_types.remove("raw")
# sorting relationship output into potential subgraphs
graph = GraphRegistry()
# top subgraphs (three columns layout)
# provide Subgraphs with a 'subgraph_name' and a 'subgraph_short_name'
# using the SubgraphTypes enum 'name' (default) and 'value' properties
idp_group = graph.get_or_create(
SubgraphTypes.idp, f"{SubgraphTypes.idp.value} for CDF: '{diagram_cdf_project}'"
)
owner = graph.get_or_create(SubgraphTypes.owner, SubgraphTypes.owner.value)
read = graph.get_or_create(SubgraphTypes.read, SubgraphTypes.read.value)
# nested subgraphs
core_cdf_owner = graph.get_or_create(SubgraphTypes.core_cdf_owner, SubgraphTypes.core_cdf_owner.value)
ns_cdf_owner = graph.get_or_create(SubgraphTypes.ns_cdf_owner, SubgraphTypes.ns_cdf_owner.value)
core_cdf_read = graph.get_or_create(SubgraphTypes.core_cdf_read, SubgraphTypes.core_cdf_read.value)
ns_cdf_read = graph.get_or_create(SubgraphTypes.ns_cdf_read, SubgraphTypes.ns_cdf_read.value)
scope_owner = graph.get_or_create(SubgraphTypes.scope_owner, SubgraphTypes.scope_owner.value)
scope_read = graph.get_or_create(SubgraphTypes.scope_read, SubgraphTypes.scope_read.value)
# add the three top level groups to our graph
graph.elements.extend(
[
idp_group,
owner,
read,
# doc_group
]
)
# add/nest the owner-subgraphs to its parent subgraph
owner.elements.extend(
[
core_cdf_owner,
ns_cdf_owner,
scope_owner,
]
)
# add/nest the read-subgraphs to its parent subgraph
read.elements.extend(
[
core_cdf_read,
ns_cdf_read,
scope_read,
]
)
# permutate the combinations
for action in ["read", "owner"]: # action_dimensions w/o 'admin'
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
# group for each dedicated group-type id
group_to_graph(graph, action, ns.ns_name, ns_node.node_name)
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
group_to_graph(graph, action, ns.ns_name)
# 'all' groups on action level (no limits to datasets or raw-dbs)
group_to_graph(graph, action)
# all (no limits + admin)
# 211013 pa: for AAD root:client and root:user can be merged into 'root'
# for root_account in ["root:client", "root:user"]:
for root_account in ["root"]:
group_to_graph(graph, root_account=root_account)
mermaid_code = graph.to_mermaid()
_logger.info(f"Generated {len(mermaid_code)} characters")
markdown_wrapper_template = """
## auto-generated by bootstrap-cli
```mermaid
{mermaid_code}
```"""
# print to stdout that only the diagram can be piped to clipboard or file
print(
markdown_wrapper_template.format(mermaid_code=mermaid_code)
if to_markdown == YesNoType.yes
else mermaid_code
)
# '''
# 888 d8b 888
# 888 Y8P 888
# 888 888
# .d8888b 888 888 .d8888b 888 888
# d88P" 888 888 d88P" 888 .88P
# 888 888 888 888 888888K
# Y88b. 888 888 Y88b. 888 "88b
# "Y8888P 888 888 "Y8888P 888 888
# '''
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option(prog_name="bootstrap_cli", version=__version__)
@click.option(
"--cdf-project-name",
help="CDF Project to interact with CDF API, the 'BOOTSTRAP_CDF_PROJECT',"
"environment variable can be used instead. Required for OAuth2 and optional for api-keys.",
envvar="BOOTSTRAP_CDF_PROJECT",
)
# TODO: is cluster and alternative for host?
@click.option(
"--cluster",
default="westeurope-1",
help="The CDF cluster where CDF Project is hosted (e.g. greenfield, europe-west1-1),"
"Provide this or make sure to set the 'BOOTSTRAP_CDF_CLUSTER' environment variable. "
"Default: westeurope-1",
envvar="BOOTSTRAP_CDF_CLUSTER",
)
@click.option(
"--host",
default="https://bluefield.cognitedata.com/",
help="The CDF host where CDF Project is hosted (e.g. https://bluefield.cognitedata.com),"
"Provide this or make sure to set the 'BOOTSTRAP_CDF_HOST' environment variable."
"Default: https://bluefield.cognitedata.com/",
envvar="BOOTSTRAP_CDF_HOST",
)
@click.option(
"--api-key",
help="API key to interact with CDF API. Provide this or make sure to set the 'BOOTSTRAP_CDF_API_KEY',"
"environment variable if you want to authenticate with API keys.",
envvar="BOOTSTRAP_CDF_API_KEY",
)
@click.option(
"--client-id",
help="IdP Client ID to interact with CDF API. Provide this or make sure to set the "
"'BOOTSTRAP_IDP_CLIENT_ID' environment variable if you want to authenticate with OAuth2.",
envvar="BOOTSTRAP_IDP_CLIENT_ID",
)
@click.option(
"--client-secret",
help="IdP Client secret to interact with CDF API. Provide this or make sure to set the "
"'BOOTSTRAP_IDP_CLIENT_SECRET' environment variable if you want to authenticate with OAuth2.",
envvar="BOOTSTRAP_IDP_CLIENT_SECRET",
)
@click.option(
"--token-url",
help="IdP Token URL to interact with CDF API. Provide this or make sure to set the "
"'BOOTSTRAP_IDP_TOKEN_URL' environment variable if you want to authenticate with OAuth2.",
envvar="BOOTSTRAP_IDP_TOKEN_URL",
)
@click.option(
"--scopes",
help="IdP Scopes to interact with CDF API, relevant for OAuth2 authentication method. "
"The 'BOOTSTRAP_IDP_SCOPES' environment variable can be used instead.",
envvar="BOOTSTRAP_IDP_SCOPES",
)
@click.option(
"--audience",
help="IdP Audience to interact with CDF API, relevant for OAuth2 authentication method. "
"The 'BOOTSTRAP_IDP_AUDIENCE' environment variable can be used instead.",
envvar="BOOTSTRAP_IDP_AUDIENCE",
)
@click.option(
"--dotenv-path",
help="Provide a relative or absolute path to an .env file (for commandline usage only)",
)
@click.option(
"--debug",
is_flag=True,
help="Print debug information",
)
@click.option(
"--dry-run",
default="no",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Only logging planned CDF API action while doing nothing." " Defaults to 'no'",
)
@click.pass_context
def bootstrap_cli(
# click.core.Context
context: Context,
# cdf
cluster: str = "westeurope-1",
cdf_project_name: Optional[str] = None,
host: str = None,
api_key: Optional[str] = None,
# cdf idp
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
scopes: Optional[str] = None,
token_url: Optional[str] = None,
audience: Optional[str] = None,
# cli
# TODO: dotenv_path: Optional[click.Path] = None,
dotenv_path: Optional[str] = None,
debug: bool = False,
dry_run: str = "no",
) -> None:
# load .env from file if exists, use given dotenv_path if provided
load_dotenv(dotenv_path=dotenv_path)
context.obj = {
# cdf
"cluster": cluster,
"cdf_project_name": cdf_project_name,
"host": host,
"api_key": api_key,
# cdf idp
"client_id": client_id,
"client_secret": client_secret,
"scopes": scopes,
"token_url": token_url,
"audience": audience,
# cli
"dotenv_path": dotenv_path,
"debug": debug,
"dry_run": dry_run,
}
@click.command(help="Deploy a set of bootstrap from a config-file")
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
@click.option(
"--with-special-groups",
# having this as a flag is not working for gh-action 'actions.yml' manifest
# instead using explicit choice options
# is_flag=True,
# default="no",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Create special CDF Groups, which don't have capabilities (extractions, transformations). Defaults to 'no'",
)
@click.option(
"--with-raw-capability",
# default="yes", # default defined in 'configuration.BootstrapFeatures'
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Create RAW DBs and 'rawAcl' capability. Defaults to 'yes'",
)
@click.pass_obj
def deploy(
# click.core.Context obj
obj: Dict,
config_file: str,
with_special_groups: YesNoType,
with_raw_capability: YesNoType,
) -> None:
click.echo(click.style("Deploying CDF Project bootstrap...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.DEPLOY)
.validate_config_length_limits()
.validate_config_is_cdf_project_in_mappings()
.dry_run(obj["dry_run"])
.deploy(
with_special_groups=with_special_groups,
with_raw_capability=with_raw_capability,
)
) # fmt:skip
click.echo(click.style("CDF Project bootstrap deployed", fg="blue"))
except BootstrapConfigError as e:
exit(e.message)
@click.command(
help="Prepare an elevated CDF Group 'cdf:bootstrap', using the same AAD Group link "
"as your initially provided 'oidc-admin-group'. "
"With additional capabilities to run the 'deploy' and 'delete' commands next. "
"The 'prepare' command is only required once per CDF Project."
)
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
# TODO: support '--idp-source-id' as an option too, to match v2 naming changes?
@click.option(
"--aad-source-id",
"--idp-source-id",
"idp_source_id", # explicit named variable for alternatives
required=True,
help="Provide the IdP Source ID to use for the 'cdf:bootstrap' Group. "
"Typically for a new project its the same configured for the initial provided "
"CDF Group named 'oidc-admin-group'. "
"The parameter option '--aad-source-id' will be deprecated in next major release",
)
@click.pass_obj
def prepare(
# click.core.Context obj
obj: Dict,
config_file: str,
idp_source_id: str,
dry_run: YesNoType = YesNoType.no,
) -> None:
click.echo(click.style("Prepare CDF Project ...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.PREPARE)
# .validate_config() # TODO
.dry_run(obj["dry_run"])
.prepare(idp_source_id=idp_source_id)
) # fmt:skip
click.echo(click.style("CDF Project bootstrap prepared for running 'deploy' command next.", fg="blue"))
except BootstrapConfigError as e:
exit(e.message)
@click.command(
help="Delete mode used to delete CDF Groups, Datasets and Raw Databases, "
"CDF Groups and RAW Databases will be deleted, while Datasets will be archived "
"and deprecated (as they cannot be deleted)."
)
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
@click.pass_obj
def delete(
# click.core.Context obj
obj: Dict,
config_file: str,
) -> None:
click.echo(click.style("Delete CDF Project ...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.DELETE)
# .validate_config() # TODO
.dry_run(obj["dry_run"]).delete()
)
click.echo(
click.style(
"CDF Project relevant groups and raw_dbs are deleted and/or datasets are archived and deprecated ",
fg="blue",
)
)
except BootstrapConfigError as e:
exit(e.message)
@click.command(help="Diagram mode used to document the given configuration as a Mermaid diagram")
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
@click.option(
"--markdown",
default="no",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Encapsulate Mermaid diagram in Markdown syntax. " "Defaults to 'no'",
)
@click.option(
"--with-raw-capability",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Create RAW DBs and 'rawAcl' capability. " "Defaults to 'yes'",
)
@click.option(
"--cdf-project",
help="[optional] Provide the CDF Project name to use for the diagram 'idp-cdf-mappings'.",
)
@click.pass_obj
def diagram(
# click.core.Context obj
obj: Dict,
config_file: str,
markdown: YesNoType,
with_raw_capability: YesNoType,
cdf_project: str,
) -> None:
# click.echo(click.style("Diagram CDF Project ...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.DIAGRAM)
.validate_config_length_limits()
.validate_config_is_cdf_project_in_mappings()
# .dry_run(obj['dry_run'])
.diagram(
to_markdown=markdown,
with_raw_capability=with_raw_capability,
cdf_project=cdf_project,
)
) # fmt:skip
# click.echo(
# click.style(
# "CDF Project relevant groups and raw_dbs are documented as Mermaid",
# fg="blue",
# )
# )
except BootstrapConfigError as e:
exit(e.message)
bootstrap_cli.add_command(deploy)
bootstrap_cli.add_command(prepare)
bootstrap_cli.add_command(delete)
bootstrap_cli.add_command(diagram)
def main() -> None:
# call click.pass_context
bootstrap_cli()
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"click.Choice",
"incubator.bootstrap_cli.mermaid_generator.mermaid.RoundedNode",
"cognite.client.data_classes.Group",
"time.sleep",
"incubator.bootstrap_cli.configuration.BootstrapValidationError",
"incubator.bootstrap_cli.configuration.SharedAccess",
"incubator.bootstrap_cli.conf... | [((6062, 6089), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6079, 6089), False, 'import logging\n'), ((9008, 9057), 'typing.TypeVar', 'TypeVar', (['"""T_BootstrapCore"""'], {'bound': '"""BootstrapCore"""'}), "('T_BootstrapCore', bound='BootstrapCore')\n", (9015, 9057), False, 'from typing import Any, Dict, List, Optional, Set, Tuple, TypeVar\n'), ((81667, 81736), 'click.group', 'click.group', ([], {'context_settings': "{'help_option_names': ['-h', '--help']}"}), "(context_settings={'help_option_names': ['-h', '--help']})\n", (81678, 81736), False, 'import click\n'), ((81738, 81806), 'click.version_option', 'click.version_option', ([], {'prog_name': '"""bootstrap_cli"""', 'version': '__version__'}), "(prog_name='bootstrap_cli', version=__version__)\n", (81758, 81806), False, 'import click\n'), ((81808, 82047), 'click.option', 'click.option', (['"""--cdf-project-name"""'], {'help': '"""CDF Project to interact with CDF API, the \'BOOTSTRAP_CDF_PROJECT\',environment variable can be used instead. Required for OAuth2 and optional for api-keys."""', 'envvar': '"""BOOTSTRAP_CDF_PROJECT"""'}), '(\'--cdf-project-name\', help=\n "CDF Project to interact with CDF API, the \'BOOTSTRAP_CDF_PROJECT\',environment variable can be used instead. Required for OAuth2 and optional for api-keys."\n , envvar=\'BOOTSTRAP_CDF_PROJECT\')\n', (81820, 82047), False, 'import click\n'), ((82106, 82388), 'click.option', 'click.option', (['"""--cluster"""'], {'default': '"""westeurope-1"""', 'help': '"""The CDF cluster where CDF Project is hosted (e.g. greenfield, europe-west1-1),Provide this or make sure to set the \'BOOTSTRAP_CDF_CLUSTER\' environment variable. Default: westeurope-1"""', 'envvar': '"""BOOTSTRAP_CDF_CLUSTER"""'}), '(\'--cluster\', default=\'westeurope-1\', help=\n "The CDF cluster where CDF Project is hosted (e.g. greenfield, europe-west1-1),Provide this or make sure to set the \'BOOTSTRAP_CDF_CLUSTER\' environment variable. Default: westeurope-1"\n , envvar=\'BOOTSTRAP_CDF_CLUSTER\')\n', (82118, 82388), False, 'import click\n'), ((82413, 82733), 'click.option', 'click.option', (['"""--host"""'], {'default': '"""https://bluefield.cognitedata.com/"""', 'help': '"""The CDF host where CDF Project is hosted (e.g. https://bluefield.cognitedata.com),Provide this or make sure to set the \'BOOTSTRAP_CDF_HOST\' environment variable.Default: https://bluefield.cognitedata.com/"""', 'envvar': '"""BOOTSTRAP_CDF_HOST"""'}), '(\'--host\', default=\'https://bluefield.cognitedata.com/\', help=\n "The CDF host where CDF Project is hosted (e.g. https://bluefield.cognitedata.com),Provide this or make sure to set the \'BOOTSTRAP_CDF_HOST\' environment variable.Default: https://bluefield.cognitedata.com/"\n , envvar=\'BOOTSTRAP_CDF_HOST\')\n', (82425, 82733), False, 'import click\n'), ((82758, 82992), 'click.option', 'click.option', (['"""--api-key"""'], {'help': '"""API key to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_CDF_API_KEY\',environment variable if you want to authenticate with API keys."""', 'envvar': '"""BOOTSTRAP_CDF_API_KEY"""'}), '(\'--api-key\', help=\n "API key to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_CDF_API_KEY\',environment variable if you want to authenticate with API keys."\n , envvar=\'BOOTSTRAP_CDF_API_KEY\')\n', (82770, 82992), False, 'import click\n'), ((83006, 83250), 'click.option', 'click.option', (['"""--client-id"""'], {'help': '"""IdP Client ID to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_IDP_CLIENT_ID\' environment variable if you want to authenticate with OAuth2."""', 'envvar': '"""BOOTSTRAP_IDP_CLIENT_ID"""'}), '(\'--client-id\', help=\n "IdP Client ID to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_IDP_CLIENT_ID\' environment variable if you want to authenticate with OAuth2."\n , envvar=\'BOOTSTRAP_IDP_CLIENT_ID\')\n', (83018, 83250), False, 'import click\n'), ((83264, 83524), 'click.option', 'click.option', (['"""--client-secret"""'], {'help': '"""IdP Client secret to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_IDP_CLIENT_SECRET\' environment variable if you want to authenticate with OAuth2."""', 'envvar': '"""BOOTSTRAP_IDP_CLIENT_SECRET"""'}), '(\'--client-secret\', help=\n "IdP Client secret to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_IDP_CLIENT_SECRET\' environment variable if you want to authenticate with OAuth2."\n , envvar=\'BOOTSTRAP_IDP_CLIENT_SECRET\')\n', (83276, 83524), False, 'import click\n'), ((83538, 83782), 'click.option', 'click.option', (['"""--token-url"""'], {'help': '"""IdP Token URL to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_IDP_TOKEN_URL\' environment variable if you want to authenticate with OAuth2."""', 'envvar': '"""BOOTSTRAP_IDP_TOKEN_URL"""'}), '(\'--token-url\', help=\n "IdP Token URL to interact with CDF API. Provide this or make sure to set the \'BOOTSTRAP_IDP_TOKEN_URL\' environment variable if you want to authenticate with OAuth2."\n , envvar=\'BOOTSTRAP_IDP_TOKEN_URL\')\n', (83550, 83782), False, 'import click\n'), ((83796, 84018), 'click.option', 'click.option', (['"""--scopes"""'], {'help': '"""IdP Scopes to interact with CDF API, relevant for OAuth2 authentication method. The \'BOOTSTRAP_IDP_SCOPES\' environment variable can be used instead."""', 'envvar': '"""BOOTSTRAP_IDP_SCOPES"""'}), '(\'--scopes\', help=\n "IdP Scopes to interact with CDF API, relevant for OAuth2 authentication method. The \'BOOTSTRAP_IDP_SCOPES\' environment variable can be used instead."\n , envvar=\'BOOTSTRAP_IDP_SCOPES\')\n', (83808, 84018), False, 'import click\n'), ((84032, 84262), 'click.option', 'click.option', (['"""--audience"""'], {'help': '"""IdP Audience to interact with CDF API, relevant for OAuth2 authentication method. The \'BOOTSTRAP_IDP_AUDIENCE\' environment variable can be used instead."""', 'envvar': '"""BOOTSTRAP_IDP_AUDIENCE"""'}), '(\'--audience\', help=\n "IdP Audience to interact with CDF API, relevant for OAuth2 authentication method. The \'BOOTSTRAP_IDP_AUDIENCE\' environment variable can be used instead."\n , envvar=\'BOOTSTRAP_IDP_AUDIENCE\')\n', (84044, 84262), False, 'import click\n'), ((84276, 84404), 'click.option', 'click.option', (['"""--dotenv-path"""'], {'help': '"""Provide a relative or absolute path to an .env file (for commandline usage only)"""'}), "('--dotenv-path', help=\n 'Provide a relative or absolute path to an .env file (for commandline usage only)'\n )\n", (84288, 84404), False, 'import click\n'), ((84407, 84476), 'click.option', 'click.option', (['"""--debug"""'], {'is_flag': '(True)', 'help': '"""Print debug information"""'}), "('--debug', is_flag=True, help='Print debug information')\n", (84419, 84476), False, 'import click\n'), ((85849, 85915), 'click.command', 'click.command', ([], {'help': '"""Deploy a set of bootstrap from a config-file"""'}), "(help='Deploy a set of bootstrap from a config-file')\n", (85862, 85915), False, 'import click\n'), ((85917, 85980), 'click.argument', 'click.argument', (['"""config_file"""'], {'default': '"""./config-bootstrap.yml"""'}), "('config_file', default='./config-bootstrap.yml')\n", (85931, 85980), False, 'import click\n'), ((87536, 87829), 'click.command', 'click.command', ([], {'help': '"""Prepare an elevated CDF Group \'cdf:bootstrap\', using the same AAD Group link as your initially provided \'oidc-admin-group\'. With additional capabilities to run the \'deploy\' and \'delete\' commands next. The \'prepare\' command is only required once per CDF Project."""'}), '(help=\n "Prepare an elevated CDF Group \'cdf:bootstrap\', using the same AAD Group link as your initially provided \'oidc-admin-group\'. With additional capabilities to run the \'deploy\' and \'delete\' commands next. The \'prepare\' command is only required once per CDF Project."\n )\n', (87549, 87829), False, 'import click\n'), ((87848, 87911), 'click.argument', 'click.argument', (['"""config_file"""'], {'default': '"""./config-bootstrap.yml"""'}), "('config_file', default='./config-bootstrap.yml')\n", (87862, 87911), False, 'import click\n'), ((88004, 88365), 'click.option', 'click.option', (['"""--aad-source-id"""', '"""--idp-source-id"""', '"""idp_source_id"""'], {'required': '(True)', 'help': '"""Provide the IdP Source ID to use for the \'cdf:bootstrap\' Group. Typically for a new project its the same configured for the initial provided CDF Group named \'oidc-admin-group\'. The parameter option \'--aad-source-id\' will be deprecated in next major release"""'}), '(\'--aad-source-id\', \'--idp-source-id\', \'idp_source_id\',\n required=True, help=\n "Provide the IdP Source ID to use for the \'cdf:bootstrap\' Group. Typically for a new project its the same configured for the initial provided CDF Group named \'oidc-admin-group\'. The parameter option \'--aad-source-id\' will be deprecated in next major release"\n )\n', (88016, 88365), False, 'import click\n'), ((89195, 89415), 'click.command', 'click.command', ([], {'help': '"""Delete mode used to delete CDF Groups, Datasets and Raw Databases, CDF Groups and RAW Databases will be deleted, while Datasets will be archived and deprecated (as they cannot be deleted)."""'}), "(help=\n 'Delete mode used to delete CDF Groups, Datasets and Raw Databases, CDF Groups and RAW Databases will be deleted, while Datasets will be archived and deprecated (as they cannot be deleted).'\n )\n", (89208, 89415), False, 'import click\n'), ((89427, 89490), 'click.argument', 'click.argument', (['"""config_file"""'], {'default': '"""./config-bootstrap.yml"""'}), "('config_file', default='./config-bootstrap.yml')\n", (89441, 89490), False, 'import click\n'), ((90238, 90344), 'click.command', 'click.command', ([], {'help': '"""Diagram mode used to document the given configuration as a Mermaid diagram"""'}), "(help=\n 'Diagram mode used to document the given configuration as a Mermaid diagram'\n )\n", (90251, 90344), False, 'import click\n'), ((90336, 90399), 'click.argument', 'click.argument', (['"""config_file"""'], {'default': '"""./config-bootstrap.yml"""'}), "('config_file', default='./config-bootstrap.yml')\n", (90350, 90399), False, 'import click\n'), ((90784, 90914), 'click.option', 'click.option', (['"""--cdf-project"""'], {'help': '"""[optional] Provide the CDF Project name to use for the diagram \'idp-cdf-mappings\'."""'}), '(\'--cdf-project\', help=\n "[optional] Provide the CDF Project name to use for the diagram \'idp-cdf-mappings\'."\n )\n', (90796, 90914), False, 'import click\n'), ((85364, 85400), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'dotenv_path'}), '(dotenv_path=dotenv_path)\n', (85375, 85400), False, 'from dotenv import load_dotenv\n'), ((17859, 17879), 'incubator.bootstrap_cli.configuration.SharedAccess', 'SharedAccess', (['[]', '[]'], {}), '([], [])\n', (17871, 17879), False, 'from incubator.bootstrap_cli.configuration import BootstrapConfigError, BootstrapDeleteConfig, BootstrapDeployConfig, BootstrapValidationError, CommandMode, SharedAccess, YesNoType\n'), ((35613, 35668), 'cognite.client.data_classes.Group', 'Group', ([], {'name': 'group_name', 'capabilities': 'group_capabilities'}), '(name=group_name, capabilities=group_capabilities)\n', (35618, 35668), False, 'from cognite.client.data_classes import DataSet, Group\n'), ((48460, 48473), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (48470, 48473), False, 'import time\n'), ((58467, 58480), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (58477, 58480), False, 'import time\n'), ((78101, 78116), 'incubator.bootstrap_cli.mermaid_generator.mermaid.GraphRegistry', 'GraphRegistry', ([], {}), '()\n', (78114, 78116), False, 'from incubator.bootstrap_cli.mermaid_generator.mermaid import AssymetricNode, DottedEdge, Edge, GraphRegistry, Node, RoundedNode, SubroutineNode, TrapezNode\n'), ((84551, 84600), 'click.Choice', 'click.Choice', (["['yes', 'no']"], {'case_sensitive': '(False)'}), "(['yes', 'no'], case_sensitive=False)\n", (84563, 84600), False, 'import click\n'), ((86825, 86884), 'click.style', 'click.style', (['"""Deploying CDF Project bootstrap..."""'], {'fg': '"""red"""'}), "('Deploying CDF Project bootstrap...', fg='red')\n", (86836, 86884), False, 'import click\n'), ((86209, 86258), 'click.Choice', 'click.Choice', (["['yes', 'no']"], {'case_sensitive': '(False)'}), "(['yes', 'no'], case_sensitive=False)\n", (86221, 86258), False, 'import click\n'), ((86509, 86558), 'click.Choice', 'click.Choice', (["['yes', 'no']"], {'case_sensitive': '(False)'}), "(['yes', 'no'], case_sensitive=False)\n", (86521, 86558), False, 'import click\n'), ((88625, 88673), 'click.style', 'click.style', (['"""Prepare CDF Project ..."""'], {'fg': '"""red"""'}), "('Prepare CDF Project ...', fg='red')\n", (88636, 88673), False, 'import click\n'), ((89623, 89670), 'click.style', 'click.style', (['"""Delete CDF Project ..."""'], {'fg': '"""red"""'}), "('Delete CDF Project ...', fg='red')\n", (89634, 89670), False, 'import click\n'), ((90471, 90520), 'click.Choice', 'click.Choice', (["['yes', 'no']"], {'case_sensitive': '(False)'}), "(['yes', 'no'], case_sensitive=False)\n", (90483, 90520), False, 'import click\n'), ((90657, 90706), 'click.Choice', 'click.Choice', (["['yes', 'no']"], {'case_sensitive': '(False)'}), "(['yes', 'no'], case_sensitive=False)\n", (90669, 90706), False, 'import click\n'), ((9716, 9759), 'incubator.bootstrap_cli.configuration.BootstrapDeleteConfig.from_yaml', 'BootstrapDeleteConfig.from_yaml', (['configpath'], {}), '(configpath)\n', (9747, 9759), False, 'from incubator.bootstrap_cli.configuration import BootstrapConfigError, BootstrapDeleteConfig, BootstrapDeployConfig, BootstrapValidationError, CommandMode, SharedAccess, YesNoType\n'), ((14345, 14503), 'incubator.bootstrap_cli.configuration.BootstrapValidationError', 'BootstrapValidationError', (['f"""Features validation error: \'features.aggregated-level-name\' is required, but provided as <{self.AGGREGATED_LEVEL_NAME}>"""'], {}), '(\n f"Features validation error: \'features.aggregated-level-name\' is required, but provided as <{self.AGGREGATED_LEVEL_NAME}>"\n )\n', (14369, 14503), False, 'from incubator.bootstrap_cli.configuration import BootstrapConfigError, BootstrapDeleteConfig, BootstrapDeployConfig, BootstrapValidationError, CommandMode, SharedAccess, YesNoType\n'), ((47609, 47645), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'id']"}), "(columns=['name', 'id'])\n", (47621, 47645), True, 'import pandas as pd\n'), ((47974, 48004), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name']"}), "(columns=['name'])\n", (47986, 48004), True, 'import pandas as pd\n'), ((87412, 87468), 'click.style', 'click.style', (['"""CDF Project bootstrap deployed"""'], {'fg': '"""blue"""'}), "('CDF Project bootstrap deployed', fg='blue')\n", (87423, 87468), False, 'import click\n'), ((89036, 89132), 'click.style', 'click.style', (['"""CDF Project bootstrap prepared for running \'deploy\' command next."""'], {'fg': '"""blue"""'}), '("CDF Project bootstrap prepared for running \'deploy\' command next."\n , fg=\'blue\')\n', (89047, 89132), False, 'import click\n'), ((89992, 90124), 'click.style', 'click.style', (['"""CDF Project relevant groups and raw_dbs are deleted and/or datasets are archived and deprecated """'], {'fg': '"""blue"""'}), "(\n 'CDF Project relevant groups and raw_dbs are deleted and/or datasets are archived and deprecated '\n , fg='blue')\n", (90003, 90124), False, 'import click\n'), ((9903, 9970), 'incubator.bootstrap_cli.configuration.BootstrapConfigError', 'BootstrapConfigError', (['"""\'cognite\' section required in configuration"""'], {}), '("\'cognite\' section required in configuration")\n', (9923, 9970), False, 'from incubator.bootstrap_cli.configuration import BootstrapConfigError, BootstrapDeleteConfig, BootstrapDeployConfig, BootstrapValidationError, CommandMode, SharedAccess, YesNoType\n'), ((10109, 10152), 'incubator.bootstrap_cli.configuration.BootstrapDeployConfig.from_yaml', 'BootstrapDeployConfig.from_yaml', (['configpath'], {}), '(configpath)\n', (10140, 10152), False, 'from incubator.bootstrap_cli.configuration import BootstrapConfigError, BootstrapDeleteConfig, BootstrapDeployConfig, BootstrapValidationError, CommandMode, SharedAccess, YesNoType\n'), ((14079, 14093), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14091, 14093), False, 'from datetime import datetime\n'), ((67671, 67781), 'incubator.bootstrap_cli.mermaid_generator.mermaid.TrapezNode', 'TrapezNode', ([], {'id_name': 'idp_source_name', 'display': 'idp_source_name', 'comments': "[f'IdP objectId: {idp_source_id}']"}), "(id_name=idp_source_name, display=idp_source_name, comments=[\n f'IdP objectId: {idp_source_id}'])\n", (67681, 67781), False, 'from incubator.bootstrap_cli.mermaid_generator.mermaid import AssymetricNode, DottedEdge, Edge, GraphRegistry, Node, RoundedNode, SubroutineNode, TrapezNode\n'), ((67966, 68042), 'incubator.bootstrap_cli.mermaid_generator.mermaid.Edge', 'Edge', ([], {'id_name': 'idp_source_name', 'dest': 'group_name', 'annotation': 'None', 'comments': '[]'}), '(id_name=idp_source_name, dest=group_name, annotation=None, comments=[])\n', (67970, 68042), False, 'from incubator.bootstrap_cli.mermaid_generator.mermaid import AssymetricNode, DottedEdge, Edge, GraphRegistry, Node, RoundedNode, SubroutineNode, TrapezNode\n'), ((68618, 68682), 'incubator.bootstrap_cli.mermaid_generator.mermaid.RoundedNode', 'RoundedNode', ([], {'id_name': 'group_name', 'display': 'group_name', 'comments': '""""""'}), "(id_name=group_name, display=group_name, comments='')\n", (68629, 68682), False, 'from incubator.bootstrap_cli.mermaid_generator.mermaid import AssymetricNode, DottedEdge, Edge, GraphRegistry, Node, RoundedNode, SubroutineNode, TrapezNode\n'), ((10509, 10576), 'incubator.bootstrap_cli.configuration.BootstrapConfigError', 'BootstrapConfigError', (['"""\'cognite\' section required in configuration"""'], {}), '("\'cognite\' section required in configuration")\n', (10529, 10576), False, 'from incubator.bootstrap_cli.configuration import BootstrapConfigError, BootstrapDeleteConfig, BootstrapDeployConfig, BootstrapValidationError, CommandMode, SharedAccess, YesNoType\n'), ((71743, 71800), 'incubator.bootstrap_cli.mermaid_generator.mermaid.Node', 'Node', ([], {'id_name': 'group_name', 'display': 'group_name', 'comments': '""""""'}), "(id_name=group_name, display=group_name, comments='')\n", (71747, 71800), False, 'from incubator.bootstrap_cli.mermaid_generator.mermaid import AssymetricNode, DottedEdge, Edge, GraphRegistry, Node, RoundedNode, SubroutineNode, TrapezNode\n'), ((12493, 12503), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (12501, 12503), False, 'from pathlib import Path\n'), ((39550, 39566), 'itertools.islice', 'islice', (['it', 'SIZE'], {}), '(it, SIZE)\n', (39556, 39566), False, 'from itertools import islice\n'), ((74968, 75025), 'incubator.bootstrap_cli.mermaid_generator.mermaid.Node', 'Node', ([], {'id_name': 'group_name', 'display': 'group_name', 'comments': '""""""'}), "(id_name=group_name, display=group_name, comments='')\n", (74972, 75025), False, 'from incubator.bootstrap_cli.mermaid_generator.mermaid import AssymetricNode, DottedEdge, Edge, GraphRegistry, Node, RoundedNode, SubroutineNode, TrapezNode\n'), ((41808, 41839), 'cognite.client.data_classes.data_sets.DataSetUpdate', 'DataSetUpdate', ([], {'id': "dataset['id']"}), "(id=dataset['id'])\n", (41821, 41839), False, 'from cognite.client.data_classes.data_sets import DataSetUpdate\n')] |
from typing import List, Dict
from pyhcl.ir.low_ir import *
from pyhcl.ir.low_prim import *
from pyhcl.passes._pass import Pass
from pyhcl.passes.utils import get_binary_width
DEFAULT_READ_LATENCY = 0
DEFAULT_WRITE_LATENCY = 1
@dataclass
class ExpandMemory(Pass):
def run(self, c: Circuit):
def get_mem_ports(stmts: List[Statement], writes: Dict[str, List[Statement]], reads: Dict[str, List[Statement]]):
for stmt in stmts:
if isinstance(stmt, DefMemPort):
if stmt.rw is True:
if stmt.mem.name in reads:
reads[stmt.mem.name] = reads[stmt.mem.name] + [stmt.name]
else:
reads[stmt.mem.name] = [stmt.name]
else:
if stmt.mem.name in writes:
writes[stmt.mem.name] = writes[stmt.mem.name] + [stmt.name]
else:
writes[stmt.mem.name] = [stmt.name]
def expand_mem_port(stmts: List[Statement], target: Statement):
addr_width = IntWidth(get_binary_width(target.mem.typ.size))
# addr
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, UIntType(addr_width)),target.name, UIntType(addr_width)), 'addr', UIntType(addr_width)),
UIntLiteral(target.index.value, addr_width)))
# en
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, UIntType(IntWidth(1))),target.name, UIntType(IntWidth(1))), 'en', UIntType(IntWidth(1))),
UIntLiteral(1, IntWidth(1))))
# clk
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, ClockType()),target.name, ClockType()), 'clk', ClockType()),
target.clk))
# mask
if target.rw is False:
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, UIntType(IntWidth(1))),target.name, UIntType(IntWidth(1))), 'mask', UIntType(IntWidth(1))),
UIntLiteral(1, IntWidth(1))))
def expand_memory_e(s: Statement, ports: Dict[str, Statement]) -> Statement:
loc, expr = s.loc, s.expr
if isinstance(loc, Reference) and loc.name in ports:
loc = SubField(SubField(Reference(ports[loc.name].mem.name, loc.typ), loc.name, loc.typ), 'data', loc.typ)
elif isinstance(expr, Reference) and expr.name in ports:
expr = SubField(SubField(Reference(ports[expr.name].mem.name, expr.typ), expr.name, expr.typ), 'data', expr.typ)
return Connect(loc, expr, s.info, s.blocking, s.bidirection, s.mem)
def expand_memory_s(stmts: List[Statement]) -> List[Statement]:
new_stmts: List[Statement] = []
writes: Dict[str, List[Statement]] = {}
reads: Dict[str, List[Statement]] = {}
ports: Dict[str, List[Statement]] = {}
get_mem_ports(stmts, writes, reads)
for stmt in stmts:
if isinstance(stmt, DefMemory):
new_stmts.append(WDefMemory(
stmt.name,
stmt.memType,
stmt.memType.typ,
stmt.memType.size,
DEFAULT_READ_LATENCY,
DEFAULT_WRITE_LATENCY,
reads[stmt.name],
writes[stmt.name]))
elif isinstance(stmt, DefMemPort):
expand_mem_port(new_stmts, stmt)
ports[stmt.name] = stmt
elif isinstance(stmt, Connect):
new_stmts.append(expand_memory_e(stmt, ports))
else:
new_stmts.append(stmt)
return new_stmts
def expand_memory_m(m: DefModule) -> DefModule:
return Module(
m.name,
m.ports,
Block(expand_memory_s(m.body.stmts)),
m.typ,
m.info
)
new_modules = []
for m in c.modules:
if isinstance(m, Module):
new_modules.append(expand_memory_m(m))
else:
new_modules.append(m)
return Circuit(new_modules, c.main, c.info) | [
"pyhcl.passes.utils.get_binary_width"
] | [((1136, 1173), 'pyhcl.passes.utils.get_binary_width', 'get_binary_width', (['target.mem.typ.size'], {}), '(target.mem.typ.size)\n', (1152, 1173), False, 'from pyhcl.passes.utils import get_binary_width\n')] |
import logging
import time
import os
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
format = "%(asctime)s %(levelname)-10s %(message)s"
id = time.strftime("%Y%m%d-%H%M%S")
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED,
'PASS': GREEN
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
PASS_LEVEL_NUM = 45
logging.addLevelName(PASS_LEVEL_NUM, 'PASS')
def success(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
self._log(PASS_LEVEL_NUM, message, args, **kws)
logging.Logger.success = success
def getLogger(name = 'clyde_log'):
return logging.getLogger();
log = getLogger()
log.setLevel(logging.DEBUG)
# Make sure log directory exists
if not os.path.exists('log'):
os.makedirs('log')
# Log to file
formatter = logging.Formatter(format)
filehandler = logging.FileHandler("log/clyde_%s.log" % id, "w")
filehandler.setLevel(logging.INFO)
filehandler.setFormatter(formatter)
log.addHandler(filehandler)
COLOR_FORMAT = formatter_message(format, True)
color_formatter = ColoredFormatter(COLOR_FORMAT)
# Log to stdout too
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
streamhandler.setFormatter(color_formatter)
log.addHandler(streamhandler)
| [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"os.makedirs",
"logging.Formatter",
"time.strftime",
"logging.Formatter.format",
"logging.FileHandler",
"logging.Formatter.__init__",
"logging.addLevelName"
] | [((161, 191), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (174, 191), False, 'import time\n'), ((1155, 1199), 'logging.addLevelName', 'logging.addLevelName', (['PASS_LEVEL_NUM', '"""PASS"""'], {}), "(PASS_LEVEL_NUM, 'PASS')\n", (1175, 1199), False, 'import logging\n'), ((1612, 1637), 'logging.Formatter', 'logging.Formatter', (['format'], {}), '(format)\n', (1629, 1637), False, 'import logging\n'), ((1652, 1701), 'logging.FileHandler', 'logging.FileHandler', (["('log/clyde_%s.log' % id)", '"""w"""'], {}), "('log/clyde_%s.log' % id, 'w')\n", (1671, 1701), False, 'import logging\n'), ((1935, 1958), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1956, 1958), False, 'import logging\n'), ((1428, 1447), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1445, 1447), False, 'import logging\n'), ((1537, 1558), 'os.path.exists', 'os.path.exists', (['"""log"""'], {}), "('log')\n", (1551, 1558), False, 'import os\n'), ((1562, 1580), 'os.makedirs', 'os.makedirs', (['"""log"""'], {}), "('log')\n", (1573, 1580), False, 'import os\n'), ((775, 812), 'logging.Formatter.__init__', 'logging.Formatter.__init__', (['self', 'msg'], {}), '(self, msg)\n', (801, 812), False, 'import logging\n'), ((1090, 1128), 'logging.Formatter.format', 'logging.Formatter.format', (['self', 'record'], {}), '(self, record)\n', (1114, 1128), False, 'import logging\n')] |
from fastapi import Depends, HTTPException, Path, status
from pydantic import UUID4
from api.dependencies.database import get_repository
from db.errors import EntityDoesNotExist, ResourceIsNotDeployed
from db.repositories.user_resources import UserResourceRepository
from db.repositories.workspace_services import WorkspaceServiceRepository
from db.repositories.workspaces import WorkspaceRepository
from models.domain.user_resource import UserResource
from models.domain.workspace import Workspace
from models.domain.workspace_service import WorkspaceService
from resources import strings
def get_workspace_by_id(workspace_id: UUID4, workspaces_repo) -> Workspace:
try:
return workspaces_repo.get_workspace_by_id(workspace_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST)
async def get_workspace_by_id_from_path(workspace_id: UUID4 = Path(...), workspaces_repo=Depends(get_repository(WorkspaceRepository))) -> Workspace:
return get_workspace_by_id(workspace_id, workspaces_repo)
async def get_deployed_workspace_by_id_from_path(workspace_id: UUID4 = Path(...), workspaces_repo=Depends(get_repository(WorkspaceRepository))) -> Workspace:
try:
return workspaces_repo.get_deployed_workspace_by_id(workspace_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST)
except ResourceIsNotDeployed:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_IS_NOT_DEPLOYED)
async def get_workspace_service_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository))) -> WorkspaceService:
try:
return workspace_services_repo.get_workspace_service_by_id(workspace_id, service_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_SERVICE_DOES_NOT_EXIST)
async def get_deployed_workspace_service_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository))) -> WorkspaceService:
try:
return workspace_services_repo.get_deployed_workspace_service_by_id(workspace_id, service_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_SERVICE_DOES_NOT_EXIST)
except ResourceIsNotDeployed:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_SERVICE_IS_NOT_DEPLOYED)
async def get_user_resource_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), resource_id: UUID4 = Path(...), user_resource_repo=Depends(get_repository(UserResourceRepository))) -> UserResource:
try:
return user_resource_repo.get_user_resource_by_id(workspace_id, service_id, resource_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.USER_RESOURCE_DOES_NOT_EXIST)
| [
"api.dependencies.database.get_repository",
"fastapi.Path",
"fastapi.HTTPException"
] | [((946, 955), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (950, 955), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((1168, 1177), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (1172, 1177), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((1691, 1700), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (1695, 1700), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((1722, 1731), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (1726, 1731), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2161, 2170), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (2165, 2170), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2192, 2201), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (2196, 2201), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2777, 2786), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (2781, 2786), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2808, 2817), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (2812, 2817), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2840, 2849), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (2844, 2849), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((981, 1016), 'api.dependencies.database.get_repository', 'get_repository', (['WorkspaceRepository'], {}), '(WorkspaceRepository)\n', (995, 1016), False, 'from api.dependencies.database import get_repository\n'), ((1203, 1238), 'api.dependencies.database.get_repository', 'get_repository', (['WorkspaceRepository'], {}), '(WorkspaceRepository)\n', (1217, 1238), False, 'from api.dependencies.database import get_repository\n'), ((1765, 1807), 'api.dependencies.database.get_repository', 'get_repository', (['WorkspaceServiceRepository'], {}), '(WorkspaceServiceRepository)\n', (1779, 1807), False, 'from api.dependencies.database import get_repository\n'), ((2235, 2277), 'api.dependencies.database.get_repository', 'get_repository', (['WorkspaceServiceRepository'], {}), '(WorkspaceServiceRepository)\n', (2249, 2277), False, 'from api.dependencies.database import get_repository\n'), ((2878, 2916), 'api.dependencies.database.get_repository', 'get_repository', (['UserResourceRepository'], {}), '(UserResourceRepository)\n', (2892, 2916), False, 'from api.dependencies.database import get_repository\n'), ((788, 886), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'strings.WORKSPACE_DOES_NOT_EXIST'}), '(status_code=status.HTTP_404_NOT_FOUND, detail=strings.\n WORKSPACE_DOES_NOT_EXIST)\n', (801, 886), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((1383, 1481), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'strings.WORKSPACE_DOES_NOT_EXIST'}), '(status_code=status.HTTP_404_NOT_FOUND, detail=strings.\n WORKSPACE_DOES_NOT_EXIST)\n', (1396, 1481), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((1525, 1623), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_409_CONFLICT', 'detail': 'strings.WORKSPACE_IS_NOT_DEPLOYED'}), '(status_code=status.HTTP_409_CONFLICT, detail=strings.\n WORKSPACE_IS_NOT_DEPLOYED)\n', (1538, 1623), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((1978, 2084), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'strings.WORKSPACE_SERVICE_DOES_NOT_EXIST'}), '(status_code=status.HTTP_404_NOT_FOUND, detail=strings.\n WORKSPACE_SERVICE_DOES_NOT_EXIST)\n', (1991, 2084), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2457, 2563), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'strings.WORKSPACE_SERVICE_DOES_NOT_EXIST'}), '(status_code=status.HTTP_404_NOT_FOUND, detail=strings.\n WORKSPACE_SERVICE_DOES_NOT_EXIST)\n', (2470, 2563), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((2607, 2713), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_409_CONFLICT', 'detail': 'strings.WORKSPACE_SERVICE_IS_NOT_DEPLOYED'}), '(status_code=status.HTTP_409_CONFLICT, detail=strings.\n WORKSPACE_SERVICE_IS_NOT_DEPLOYED)\n', (2620, 2713), False, 'from fastapi import Depends, HTTPException, Path, status\n'), ((3087, 3189), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'strings.USER_RESOURCE_DOES_NOT_EXIST'}), '(status_code=status.HTTP_404_NOT_FOUND, detail=strings.\n USER_RESOURCE_DOES_NOT_EXIST)\n', (3100, 3189), False, 'from fastapi import Depends, HTTPException, Path, status\n')] |
import unittest
from payroll import *
class P2Test(unittest.TestCase):
def setUp(self):
self.emp = payroll.Employee('12-3456789', 'John', 'Doe', '123 Anystreet', 'Anytown', 'Anystate', '98765')
def testHourly(self):
rate = 35.5
self.emp.make_hourly(rate)
for d in range(10):
self.emp.classification.add_timecard(4.0 + d*0.5)
self.assertEqual(self.emp.classification.compute_pay(), 62.5*rate)
# def testSalaried(self):
# salary = 10100.0
# self.emp.make_salaried(salary)
# self.assertEqual(self.emp.classification.compute_pay(), round(salary/24, 2))
# def testCommissioned(self):
# salary = 50000.0
# rate = 25
# self.emp.make_commissioned(salary, rate)
# for d in range(5):
# self.emp.classification.add_receipt(400.0 + d*25)
# self.assertEqual(self.emp.classification.compute_pay(), round(salary/24+2250.0*rate/100.0, 2))
if __name__ == '__main__':
unittest.main() | [
"unittest.main"
] | [((1000, 1015), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1013, 1015), False, 'import unittest\n')] |
# Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
import logging
from pylsp import hookimpl, _utils
log = logging.getLogger(__name__)
@hookimpl
def pylsp_hover(document, position):
code_position = _utils.position_to_jedi_linecolumn(document, position)
definitions = document.jedi_script().infer(**code_position)
word = document.word_at_position(position)
# Find first exact matching definition
definition = next((x for x in definitions if x.name == word), None)
# Ensure a definition is used if only one is available
# even if the word doesn't match. An example of this case is 'np'
# where 'numpy' doesn't match with 'np'. Same for NumPy ufuncs
if len(definitions) == 1:
definition = definitions[0]
if not definition:
return {'contents': ''}
# raw docstring returns only doc, without signature
doc = _utils.format_docstring(definition.docstring(raw=True))
# Find first exact matching signature
signature = next((x.to_string() for x in definition.get_signatures()
if x.name == word), '')
contents = []
if signature:
contents.append({
'language': 'python',
'value': signature,
})
if doc:
contents.append(doc)
if not contents:
return {'contents': ''}
return {'contents': contents}
| [
"logging.getLogger",
"pylsp._utils.position_to_jedi_linecolumn"
] | [((164, 191), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (181, 191), False, 'import logging\n'), ((261, 315), 'pylsp._utils.position_to_jedi_linecolumn', '_utils.position_to_jedi_linecolumn', (['document', 'position'], {}), '(document, position)\n', (295, 315), False, 'from pylsp import hookimpl, _utils\n')] |
import flask
import telebot
import words
from dotenv import load_dotenv
load_dotenv()
app = flask.Flask(__name__)
bot = telebot.TeleBot(environ.get("TG_TOKEN"), threaded=False)
WEBHOOK_URL_PATH = "/%s/" % (environ.get("TG_TOKEN"))
# # Remove webhook, it fails sometimes the set if there is a previous webhook
# bot.remove_webhook()
# time.sleep(1)
# # Set webhook
# bot.set_webhook(url=environ.get("WEBHOOK_URL") + WEBHOOK_URL_PATH)
@bot.message_handler(commands=['ping'])
def ping(message):
return bot.reply_to(message, "pong")
@bot.message_handler(commands=['start_game'])
def start_game(message):
if "group" in message.chat.type:
admins = bot.get_chat_administrators(message.chat.id)
w = words.Words()
for a in admins:
if message.from_user.id == a.user.id:
return bot.reply_to(message, w.start_game())
return bot.reply_to(message, "Only admins can do that!")
@bot.message_handler(commands=['ranks'])
def ranks(message):
w = words.Words()
return bot.reply_to(message, "`" + w.rankings() + "`", parse_mode="Markdown")
@bot.message_handler(commands=['ans'])
def answer(message):
if message.chat.id == message.from_user.id:
return bot.reply_to(message, "Sorry, its command work only on public chats.")
w = words.Words()
ans = message.text.split(' ')
if len(ans) == 2:
return bot.reply_to(message, w.check(message.from_user.first_name, ans[1]), parse_mode="Markdown")
return bot.reply_to(message, "Wrong command. You should use /ans <pkm_name>")
| [
"words.Words",
"flask.Flask",
"dotenv.load_dotenv"
] | [((74, 87), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (85, 87), False, 'from dotenv import load_dotenv\n'), ((94, 115), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (105, 115), False, 'import flask\n'), ((1004, 1017), 'words.Words', 'words.Words', ([], {}), '()\n', (1015, 1017), False, 'import words\n'), ((1304, 1317), 'words.Words', 'words.Words', ([], {}), '()\n', (1315, 1317), False, 'import words\n'), ((722, 735), 'words.Words', 'words.Words', ([], {}), '()\n', (733, 735), False, 'import words\n')] |
# -*- coding: utf-8 -*-
"""
Gist embedding plugin for Pelican
=================================
This plugin allows you to embed `Gists`_ into your posts.
.. _Gists: https://gist.github.com/
"""
from __future__ import unicode_literals
import hashlib
import logging
import os
import re
import codecs
import pygments
logger = logging.getLogger(__name__)
gist_regex = re.compile(
r'(<p>\[gist:id\=([0-9a-fA-F]+)(,file\=([^\],]+))?(,filetype\=([a-zA-Z]+))?\]</p>)')
gist_template = """<div class="gist">
<script src='{{script_url}}' crossorigin='anonymous'></script>
<noscript>
{{code}}
</noscript>
</div>"""
def gist_url(gist_id, filename=None):
url = "https://gist.githubusercontent.com/raw/{}".format(gist_id)
if filename is not None:
url += "/{}".format(filename)
return url
def script_url(gist_id, filename=None):
url = "https://gist.github.com/{}.js".format(gist_id)
if filename is not None:
url += "?file={}".format(filename)
return url
def cache_filename(base, gist_id, filename=None):
h = hashlib.md5()
h.update(str(gist_id).encode())
if filename is not None:
h.update(filename.encode())
return os.path.join(base, '{}.cache'.format(h.hexdigest()))
def get_cache(base, gist_id, filename=None):
cache_file = cache_filename(base, gist_id, filename)
if not os.path.exists(cache_file):
return None
with codecs.open(cache_file, 'rb', 'utf-8') as f:
return f.read()
def set_cache(base, gist_id, body, filename=None):
with codecs.open(cache_filename(base, gist_id, filename), 'wb', 'utf-8') as f:
f.write(body)
def fetch_gist(gist_id, filename=None):
"""Fetch a gist and return the contents as a string."""
import requests
url = gist_url(gist_id, filename)
response = requests.get(url)
if response.status_code != 200:
raise Exception('Got a bad status looking up gist.')
body = response.text
if not body:
raise Exception('Unable to get the gist contents.')
return body
def setup_gist(pelican):
"""Setup the default settings."""
pelican.settings.setdefault('GIST_CACHE_ENABLED', True)
pelican.settings.setdefault('GIST_CACHE_LOCATION',
'/tmp/gist-cache')
pelican.settings.setdefault('GIST_PYGMENTS_STYLE', 'default')
pelican.settings.setdefault('GIST_PYGMENTS_LINENUM', False)
# Make sure the gist cache directory exists
cache_base = pelican.settings.get('GIST_CACHE_LOCATION')
if not os.path.exists(cache_base):
os.makedirs(cache_base)
def render_code(code, filetype, pygments_style):
"""Renders a piece of code into HTML. Highlights syntax if filetype is specfied"""
if filetype:
lexer = pygments.lexers.get_lexer_by_name(filetype)
formatter = pygments.formatters.HtmlFormatter(style=pygments_style)
return pygments.highlight(code, lexer, formatter)
else:
return "<pre><code>{}</code></pre>".format(code)
def replace_gist_tags(generator):
"""Replace gist tags in the article content."""
from jinja2 import Template
template = Template(gist_template)
should_cache = generator.context.get('GIST_CACHE_ENABLED')
cache_location = generator.context.get('GIST_CACHE_LOCATION')
pygments_style = generator.context.get('GIST_PYGMENTS_STYLE')
body = None
for article in generator.articles:
for match in gist_regex.findall(article._content):
gist_id = match[1]
filename = None
filetype = None
if match[3]:
filename = match[3]
if match[5]:
filetype = match[5]
logger.info('[gist]: Found gist id {} with filename {} and filetype {}'.format(
gist_id,
filename,
filetype,
))
if should_cache:
body = get_cache(cache_location, gist_id, filename)
# Fetch the gist
if not body:
logger.info('[gist]: Gist did not exist in cache, fetching...')
body = fetch_gist(gist_id, filename)
if should_cache:
logger.info('[gist]: Saving gist to cache...')
set_cache(cache_location, gist_id, body, filename)
else:
logger.info('[gist]: Found gist in cache.')
# Create a context to render with
context = generator.context.copy()
context.update({
'script_url': script_url(gist_id, filename),
'code': render_code(body, filetype, pygments_style)
})
# Render the template
replacement = template.render(context)
article._content = article._content.replace(match[0], replacement)
def register():
"""Plugin registration."""
from pelican import signals
signals.initialized.connect(setup_gist)
signals.article_generator_finalized.connect(replace_gist_tags)
| [
"logging.getLogger",
"os.path.exists",
"hashlib.md5",
"pelican.signals.initialized.connect",
"re.compile",
"pelican.signals.article_generator_finalized.connect",
"os.makedirs",
"pygments.highlight",
"pygments.formatters.HtmlFormatter",
"jinja2.Template",
"requests.get",
"pygments.lexers.get_le... | [((327, 354), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (344, 354), False, 'import logging\n'), ((368, 478), 're.compile', 're.compile', (['"""(<p>\\\\[gist:id\\\\=([0-9a-fA-F]+)(,file\\\\=([^\\\\],]+))?(,filetype\\\\=([a-zA-Z]+))?\\\\]</p>)"""'], {}), "(\n '(<p>\\\\[gist:id\\\\=([0-9a-fA-F]+)(,file\\\\=([^\\\\],]+))?(,filetype\\\\=([a-zA-Z]+))?\\\\]</p>)'\n )\n", (378, 478), False, 'import re\n'), ((1070, 1083), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (1081, 1083), False, 'import hashlib\n'), ((1824, 1841), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1836, 1841), False, 'import requests\n'), ((3151, 3174), 'jinja2.Template', 'Template', (['gist_template'], {}), '(gist_template)\n', (3159, 3174), False, 'from jinja2 import Template\n'), ((4939, 4978), 'pelican.signals.initialized.connect', 'signals.initialized.connect', (['setup_gist'], {}), '(setup_gist)\n', (4966, 4978), False, 'from pelican import signals\n'), ((4984, 5046), 'pelican.signals.article_generator_finalized.connect', 'signals.article_generator_finalized.connect', (['replace_gist_tags'], {}), '(replace_gist_tags)\n', (5027, 5046), False, 'from pelican import signals\n'), ((1364, 1390), 'os.path.exists', 'os.path.exists', (['cache_file'], {}), '(cache_file)\n', (1378, 1390), False, 'import os\n'), ((1421, 1459), 'codecs.open', 'codecs.open', (['cache_file', '"""rb"""', '"""utf-8"""'], {}), "(cache_file, 'rb', 'utf-8')\n", (1432, 1459), False, 'import codecs\n'), ((2541, 2567), 'os.path.exists', 'os.path.exists', (['cache_base'], {}), '(cache_base)\n', (2555, 2567), False, 'import os\n'), ((2577, 2600), 'os.makedirs', 'os.makedirs', (['cache_base'], {}), '(cache_base)\n', (2588, 2600), False, 'import os\n'), ((2772, 2815), 'pygments.lexers.get_lexer_by_name', 'pygments.lexers.get_lexer_by_name', (['filetype'], {}), '(filetype)\n', (2805, 2815), False, 'import pygments\n'), ((2836, 2891), 'pygments.formatters.HtmlFormatter', 'pygments.formatters.HtmlFormatter', ([], {'style': 'pygments_style'}), '(style=pygments_style)\n', (2869, 2891), False, 'import pygments\n'), ((2907, 2949), 'pygments.highlight', 'pygments.highlight', (['code', 'lexer', 'formatter'], {}), '(code, lexer, formatter)\n', (2925, 2949), False, 'import pygments\n')] |
"""
A script that reads a file from the web and
returns the all the words having frequency in between two words passed
"""
import re
from nltk.corpus import stopwords
import requests
from operator import itemgetter
def run(url, word1, word2):
freq = {} # keep the freq of each word in the file
freq[word1] = 0;
freq[word2] = 0;
stopLex = set() # build a set of english stopwrods
success = False# become True when we get the file
for i in range(5): # try 5 times
try:
#use the browser to access the url
response = requests.get(url,headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36', })
success = True # success
break # we got the file, break the loop
except: # browser.open() threw an exception, the attempt to get the response failed
print ('failed attempt',i)
# all five attempts failed, return None
if not success:
return None
readText = response.text # read in the text from the file
sentences = readText.split('.') # split the text into sentences
for sentence in sentences: # for each sentence
sentence=sentence.lower().strip() # loewr case and strip
sentence=re.sub('[^a-z]', ' ', sentence) # replace all non-letter characters with a space
words = sentence.split(' ') # split to get the words in the sentence
for word in words: # for each word in the sentence
if word == '' or word in stopLex:
continue # ignore empty words and stopwords
else:
freq[word] = freq.get(word, 0) + 1 # update the frequency of the word
wordList = set() # set to store all the unique words
for word in freq: # traversing through all keys in the dictionary
if freq[word1] < freq[word] and freq[word2] > freq[word]:
wordList.add(word) # adding word to the set
return wordList # return the set
if __name__=='__main__':
word1 = "park"
word2 = "amazon"
print(run('http://tedlappas.com/wp-content/uploads/2016/09/textfile.txt', word1, word2))
| [
"re.sub",
"requests.get"
] | [((1318, 1349), 're.sub', 're.sub', (['"""[^a-z]"""', '""" """', 'sentence'], {}), "('[^a-z]', ' ', sentence)\n", (1324, 1349), False, 'import re\n'), ((580, 734), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'\n }"}), "(url, headers={'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'\n })\n", (592, 734), False, 'import requests\n')] |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Helper functions for running models in a distributed setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
def get_distribution_strategy(distribution_strategy="default",
num_gpus=0,
num_packs=-1):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are 'off', 'default', 'one_device', and 'mirrored'
case insensitive. 'off' means not to use Distribution Strategy; 'default'
means to choose from `MirroredStrategy`or `OneDeviceStrategy` according to
the number of GPUs.
num_gpus: Number of GPUs to run this model.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError("`num_gpus` can not be negative.")
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == "off":
if num_gpus > 1:
raise ValueError("When {} GPUs are specified, distribution_strategy "
"cannot be set to 'off'.".format(num_gpus))
return None
if (distribution_strategy == "one_device" or
(distribution_strategy == "default" and num_gpus <= 1)):
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy("device:CPU:0")
else:
if num_gpus > 1:
raise ValueError("`OneDeviceStrategy` can not be used for more than "
"one device.")
return tf.distribute.OneDeviceStrategy("device:GPU:0")
if distribution_strategy in ("mirrored", "default"):
if num_gpus == 0:
assert distribution_strategy == "mirrored"
devices = ["device:CPU:0"]
else:
devices = ["device:GPU:%d" % i for i in range(num_gpus)]
cross_device_ops = None
if num_packs > -1:
cross_device_ops = tf.distribute.NcclAllReduce(num_packs=num_packs)
return tf.distribute.MirroredStrategy(devices=devices,
cross_device_ops=cross_device_ops)
def strategy_scope_context(strategy):
if strategy:
strategy_scope = strategy.scope()
else:
strategy_scope = DummyContextManager()
return strategy_scope
class DummyContextManager(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
| [
"tensorflow.distribute.MirroredStrategy",
"six.moves.range",
"tensorflow.distribute.NcclAllReduce",
"tensorflow.distribute.OneDeviceStrategy"
] | [((2941, 3028), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {'devices': 'devices', 'cross_device_ops': 'cross_device_ops'}), '(devices=devices, cross_device_ops=\n cross_device_ops)\n', (2971, 3028), True, 'import tensorflow as tf\n'), ((2311, 2358), 'tensorflow.distribute.OneDeviceStrategy', 'tf.distribute.OneDeviceStrategy', (['"""device:CPU:0"""'], {}), "('device:CPU:0')\n", (2342, 2358), True, 'import tensorflow as tf\n'), ((2523, 2570), 'tensorflow.distribute.OneDeviceStrategy', 'tf.distribute.OneDeviceStrategy', (['"""device:GPU:0"""'], {}), "('device:GPU:0')\n", (2554, 2570), True, 'import tensorflow as tf\n'), ((2881, 2929), 'tensorflow.distribute.NcclAllReduce', 'tf.distribute.NcclAllReduce', ([], {'num_packs': 'num_packs'}), '(num_packs=num_packs)\n', (2908, 2929), True, 'import tensorflow as tf\n'), ((2787, 2802), 'six.moves.range', 'range', (['num_gpus'], {}), '(num_gpus)\n', (2792, 2802), False, 'from six.moves import range\n')] |
import os
# from signal import pause
from gpiozero import Button
from datetime import datetime
def take_screen():
screen_btn = Button(2)
while True:
if screen_btn.is_pressed:
timestamp = datetime.now()
cmd = "scrot -u d 5 $n {}.png".format('screen_' + str(timestamp))
os.system(cmd)
#screen_btn.when_pressed=os.system(cmd)
#pause()
take_screen()
| [
"datetime.datetime.now",
"os.system",
"gpiozero.Button"
] | [((132, 141), 'gpiozero.Button', 'Button', (['(2)'], {}), '(2)\n', (138, 141), False, 'from gpiozero import Button\n'), ((221, 235), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (233, 235), False, 'from datetime import datetime\n'), ((326, 340), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (335, 340), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
@author: david
"""
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay
class ModelEvaluation:
def evaluate(pipe, dades, objectiu, name, **evaluacio):
x = dades
y = objectiu
w = np.zeros(len(y))
pred = np.zeros(len(y))
classes = np.sort(np.unique(y))
for c in classes:
w[y==c] = 1 / sum(y==c)
kFolds = evaluacio.get('kFold', 5)
use_weights = evaluacio.get('class_weighted', True)
kf = KFold(n_splits=kFolds)
for ind_train, ind_test in kf.split(y):
x_t, y_t, w_t = x[ind_train], y[ind_train], w[ind_train]
x_cv = x[ind_test]
if use_weights:
pipe.fit(x_t, y_t, model__sample_weight=w_t)
else:
pipe.fit(x_t, y_t)
pred[ind_test] = pipe.predict(x_cv)
pred = pipe.predict(dades)
plots = evaluacio.get('plot', [])
if not type(plots) == list:
plots = [plots]
for plot in plots:
if plot == 'confusion':
cm = confusion_matrix(y, pred)
plt.subplots(figsize=(10, 6))
sns.heatmap(cm, annot = True, fmt = 'g')
plt.xlabel("Predit")
plt.ylabel("Real")
plt.title(f"Matriu de Confusió pel model {name}")
plt.show()
elif plot == 'percentage':
cm = confusion_matrix(y, pred, sample_weight=w)
plt.subplots(figsize=(10, 6))
sns.heatmap(cm, annot = True, fmt = 'g')
plt.xlabel("Predit")
plt.ylabel("Real")
plt.title(f"Matriu dels percentatges pel model {name}")
plt.show()
elif plot == 'AUC':
plt.figure(figsize=(15,10))
ax = plt.gca()
for c in classes:
yi = np.copy(y)
yi[yi!=c] = -1
yi[yi==c] = 1
predi = np.copy(pred)
predi[predi!=c] = -1
predi[predi==c] = 1
PrecisionRecallDisplay.from_predictions(yi, predi, sample_weight=w,\
ax=ax, name=f'Precision-recall curve of class {c}')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
plt.title('Precision-Recall Curve')
plt.show()
elif plot == 'ROC':
plt.figure(figsize=(15,10))
ax = plt.gca()
for c in classes:
yi = np.copy(y)
yi[yi!=c] = -1
yi[yi==c] = 1
predi = np.copy(pred)
predi[predi!=c] = -1
predi[predi==c] = 1
RocCurveDisplay.from_predictions(yi, predi, sample_weight=w,\
ax=ax, name=f'ROC curve of class {c}')
plt.xlabel('False Positive')
plt.ylabel('True Positive')
plt.legend(loc="lower right")
plt.title('ROC Curve')
plt.show()
else:
print(f'Plot for {plot} not implemented.')
scores = evaluacio.get('score', [])
if not type(plots) == list:
scores = [scores]
for score in scores:
if score == 'all':
print(classification_report(y, pred))
elif score == 'accuracy':
print(f'Accuracy = {sum(y==pred) / len(y)} : {sum(y==pred)}/{len(y)}')
print(f'Macro accuracy = {sum([sum(c==pred[y==c]) / sum(y==c) for c in classes]) / len(classes)}')
elif score == 'class accuracy':
for c in classes:
ind = y==c
print(f'Accuracy of class {c} = {sum(c==pred[ind]) / sum(ind)} : {sum(c==pred[ind])}/{sum(ind)}')
else:
print(f'Score for {score} not implemented.') | [
"numpy.copy",
"numpy.unique",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"seaborn.heatmap",
"sklearn.metrics.RocCurveDisplay.from_predictions",
"matplotlib.pyplot.figure",
"sklearn.metrics.Pr... | [((701, 723), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'kFolds'}), '(n_splits=kFolds)\n', (706, 723), False, 'from sklearn.model_selection import KFold\n'), ((509, 521), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (518, 521), True, 'import numpy as np\n'), ((1287, 1312), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'pred'], {}), '(y, pred)\n', (1303, 1312), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((1329, 1358), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1341, 1358), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1411), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '"""g"""'}), "(cm, annot=True, fmt='g')\n", (1386, 1411), True, 'import seaborn as sns\n'), ((1432, 1452), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predit"""'], {}), "('Predit')\n", (1442, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1487), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Real"""'], {}), "('Real')\n", (1479, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1553), 'matplotlib.pyplot.title', 'plt.title', (['f"""Matriu de Confusió pel model {name}"""'], {}), "(f'Matriu de Confusió pel model {name}')\n", (1513, 1553), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1578, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1683), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'pred'], {'sample_weight': 'w'}), '(y, pred, sample_weight=w)\n', (1657, 1683), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((1700, 1729), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1712, 1729), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1782), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '"""g"""'}), "(cm, annot=True, fmt='g')\n", (1757, 1782), True, 'import seaborn as sns\n'), ((1803, 1823), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predit"""'], {}), "('Predit')\n", (1813, 1823), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Real"""'], {}), "('Real')\n", (1850, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1930), 'matplotlib.pyplot.title', 'plt.title', (['f"""Matriu dels percentatges pel model {name}"""'], {}), "(f'Matriu dels percentatges pel model {name}')\n", (1884, 1930), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1955, 1957), True, 'import matplotlib.pyplot as plt\n'), ((3702, 3732), 'sklearn.metrics.classification_report', 'classification_report', (['y', 'pred'], {}), '(y, pred)\n', (3723, 3732), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((2006, 2034), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2016, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2064), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2062, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (2522, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2549, 2572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (2559, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2589, 2617), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (2599, 2617), True, 'import matplotlib.pyplot as plt\n'), ((2634, 2669), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall Curve"""'], {}), "('Precision-Recall Curve')\n", (2643, 2669), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2696), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2694, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2134), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2131, 2134), True, 'import numpy as np\n'), ((2232, 2245), 'numpy.copy', 'np.copy', (['pred'], {}), '(pred)\n', (2239, 2245), True, 'import numpy as np\n'), ((2347, 2470), 'sklearn.metrics.PrecisionRecallDisplay.from_predictions', 'PrecisionRecallDisplay.from_predictions', (['yi', 'predi'], {'sample_weight': 'w', 'ax': 'ax', 'name': 'f"""Precision-recall curve of class {c}"""'}), "(yi, predi, sample_weight=w, ax=ax,\n name=f'Precision-recall curve of class {c}')\n", (2386, 2470), False, 'from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay\n'), ((2745, 2773), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2755, 2773), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2803), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2801, 2803), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive"""'], {}), "('False Positive')\n", (3241, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3276, 3303), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive"""'], {}), "('True Positive')\n", (3286, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3349), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3330, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3388), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (3375, 3388), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3413, 3415), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2873), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2870, 2873), True, 'import numpy as np\n'), ((2971, 2984), 'numpy.copy', 'np.copy', (['pred'], {}), '(pred)\n', (2978, 2984), True, 'import numpy as np\n'), ((3086, 3190), 'sklearn.metrics.RocCurveDisplay.from_predictions', 'RocCurveDisplay.from_predictions', (['yi', 'predi'], {'sample_weight': 'w', 'ax': 'ax', 'name': 'f"""ROC curve of class {c}"""'}), "(yi, predi, sample_weight=w, ax=ax, name=\n f'ROC curve of class {c}')\n", (3118, 3190), False, 'from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/20 8:15
# @Author : HaiFeng
# @Email : <EMAIL>
from setuptools import setup
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
# 读取文件内容
def read_file(filename):
with open(os.path.join(this_directory, filename), encoding='utf-8') as f:
desc = f.read()
return desc
# 获取依赖
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
long_description = read_file('readme.md')
long_description_content_type = 'text/markdown' # 指定包文档格式为markdown
# talib无需加入 os.system('pipreqs . --encoding=utf8 --force') # 生成 requirements.txt
setup(
name='hfpy', # 包名
python_requires='>=3.6.0', # python环境
version='0.2.2', # 包的版本
description="Hai Feng Future Trading Platform with SE", # 包简介,显示在PyPI
long_description=long_description, # 读取的Readme文档内容
long_description_content_type=long_description_content_type, # 指定包文档格式为markdown
author="HaiFeng", # 作者相关信息
author_email='<EMAIL>',
url='https://github.com/haifengat/hf_at_py',
# 指定包信息,还可以用find_packages()函数
# packages=find_packages(),
packages=['hfpy'],
install_requires=read_requirements('requirements.txt'), # 指定需要安装的依赖
include_package_data=True,
license="MIT License",
platforms="any",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
"os.path.dirname",
"os.path.join"
] | [((192, 217), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (207, 217), False, 'import os\n'), ((269, 307), 'os.path.join', 'os.path.join', (['this_directory', 'filename'], {}), '(this_directory, filename)\n', (281, 307), False, 'import os\n')] |
import math
import numpy as np
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import CharRNN
from data import TextDataset, TextConverter
class Trainer(object):
def __init__(self, args):
self.args = args
self.device = torch.device('cuda' if self.args.cuda else 'cpu')
self.convert = None
self.model = None
self.optimizer = None
self.criterion = self.get_loss
self.meter = AverageValueMeter()
self.train_loader = None
self.get_data()
self.get_model()
self.get_optimizer()
def get_data(self):
self.convert = TextConverter(self.args.txt, max_vocab=self.args.max_vocab)
dataset = TextDataset(self.args.txt, self.args.len, self.convert.text_to_arr)
self.train_loader = DataLoader(dataset, self.args.batch_size, shuffle=True, num_workers=self.args.num_workers)
def get_model(self):
self.model = CharRNN(self.convert.vocab_size, self.args.embed_dim, self.args.hidden_size, self.args.num_layers,
self.args.dropout, self.args.cuda).to(self.device)
if self.args.cuda:
cudnn.benchmark = True
def get_optimizer(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.optimizer = ScheduledOptim(optimizer)
@staticmethod
def get_loss(score, label):
return nn.CrossEntropyLoss()(score, label.view(-1))
def save_checkpoint(self, epoch):
if (epoch + 1) % self.args.save_interval == 0:
model_out_path = self.args.save_file + "epoch_{}_model.pth".format(epoch + 1)
torch.save(self.model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
def save(self):
model_out_path = self.args.save_file + "final_model.pth"
torch.save(self.model, model_out_path)
print("Final model saved to {}".format(model_out_path))
@staticmethod
def pick_top_n(predictions, top_n=5):
top_predict_prob, top_predict_label = torch.topk(predictions, top_n, 1)
top_predict_prob /= torch.sum(top_predict_prob)
top_predict_prob = top_predict_prob.squeeze(0).cpu().numpy()
top_predict_label = top_predict_label.squeeze(0).cpu().numpy()
c = np.random.choice(top_predict_label, size=1, p=top_predict_prob)
return c
def train(self):
self.meter.reset()
self.model.train()
for x, y in tqdm(self.train_loader):
y = y.long()
x, y = x.to(self.device), y.to(self.device)
# Forward.
score, _ = self.model(x)
loss = self.criterion(score, y)
# Backward.
self.optimizer.zero_grad()
loss.backward()
# Clip gradient.
nn.utils.clip_grad_norm_(self.model.parameters(), 5)
self.optimizer.step()
self.meter.add(loss.item())
print('perplexity: {}'.format(np.exp(self.meter.value()[0])))
def test(self):
self.model.eval()
begin = np.array([i for i in self.args.begin])
begin = np.random.choice(begin, size=1)
text_len = self.args.predict_len
samples = [self.convert.word_to_int(c) for c in begin]
input_txt = torch.LongTensor(samples)[None]
input_txt = input_txt.to(self.device)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
with torch.no_grad():
for i in range(text_len):
out, init_state = self.model(model_input, init_state)
prediction = self.pick_top_n(out.data)
model_input = torch.LongTensor(prediction)[None].to(self.device)
result.append(prediction[0])
print(self.convert.arr_to_text(result))
def predict(self):
self.model.eval()
samples = [self.convert.word_to_int(c) for c in self.args.begin]
input_txt = torch.LongTensor(samples)[None].to(self.device)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
with torch.no_grad():
for i in range(self.args.predict_len):
out, init_state = self.model(model_input, init_state)
prediction = self.pick_top_n(out.data)
model_input = torch.LongTensor(prediction)[None].to(self.device)
result.append(prediction[0])
print(self.convert.arr_to_text(result))
def run(self):
for e in range(self.args.max_epoch):
print('===> EPOCH: {}/{}'.format(e + 1, self.args.max_epoch))
self.train()
self.test()
self.save_checkpoint(e)
self.save()
class AverageValueMeter(object):
"""
the meter tracker mainly focuses on mean and std
"""
def __init__(self):
super(AverageValueMeter, self).__init__()
self.n = None
self.sum = None
self.var = None
self.val = None
self.mean = None
self.std = None
self.reset()
def add(self, value, n=1):
self.val = value
self.sum += value
self.var += value * value
self.n += n
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
self.mean, self.std = self.sum, np.inf
else:
self.mean = self.sum / self.n
self.std = math.sqrt(
(self.var - self.n * self.mean * self.mean) / (self.n - 1.0))
def value(self):
return self.mean, self.std
def reset(self):
self.n = 0
self.sum = 0.0
self.var = 0.0
self.val = 0.0
self.mean = np.nan
self.std = np.nan
class ScheduledOptim(object):
"""A wrapper class for learning rate scheduling
"""
def __init__(self, optimizer):
self.optimizer = optimizer
self.lr = self.optimizer.param_groups[0]['lr']
self.current_steps = 0
def step(self):
"Step by the inner optimizer"
self.current_steps += 1
self.optimizer.step()
def zero_grad(self):
"Zero out the gradients by the inner optimizer"
self.optimizer.zero_grad()
def lr_multi(self, multi):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= multi
self.lr = self.optimizer.param_groups[0]['lr']
def set_learning_rate(self, lr):
self.lr = lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
@property
def learning_rate(self):
return self.lr | [
"torch.nn.CrossEntropyLoss",
"data.TextConverter",
"numpy.random.choice",
"torch.topk",
"data.TextDataset",
"tqdm.tqdm",
"torch.LongTensor",
"model.CharRNN",
"math.sqrt",
"numpy.array",
"torch.sum",
"torch.save",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.device"
] | [((334, 383), 'torch.device', 'torch.device', (["('cuda' if self.args.cuda else 'cpu')"], {}), "('cuda' if self.args.cuda else 'cpu')\n", (346, 383), False, 'import torch\n'), ((709, 768), 'data.TextConverter', 'TextConverter', (['self.args.txt'], {'max_vocab': 'self.args.max_vocab'}), '(self.args.txt, max_vocab=self.args.max_vocab)\n', (722, 768), False, 'from data import TextDataset, TextConverter\n'), ((787, 854), 'data.TextDataset', 'TextDataset', (['self.args.txt', 'self.args.len', 'self.convert.text_to_arr'], {}), '(self.args.txt, self.args.len, self.convert.text_to_arr)\n', (798, 854), False, 'from data import TextDataset, TextConverter\n'), ((883, 978), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'self.args.batch_size'], {'shuffle': '(True)', 'num_workers': 'self.args.num_workers'}), '(dataset, self.args.batch_size, shuffle=True, num_workers=self.\n args.num_workers)\n', (893, 978), False, 'from torch.utils.data import DataLoader\n'), ((1929, 1967), 'torch.save', 'torch.save', (['self.model', 'model_out_path'], {}), '(self.model, model_out_path)\n', (1939, 1967), False, 'import torch\n'), ((2139, 2172), 'torch.topk', 'torch.topk', (['predictions', 'top_n', '(1)'], {}), '(predictions, top_n, 1)\n', (2149, 2172), False, 'import torch\n'), ((2201, 2228), 'torch.sum', 'torch.sum', (['top_predict_prob'], {}), '(top_predict_prob)\n', (2210, 2228), False, 'import torch\n'), ((2381, 2444), 'numpy.random.choice', 'np.random.choice', (['top_predict_label'], {'size': '(1)', 'p': 'top_predict_prob'}), '(top_predict_label, size=1, p=top_predict_prob)\n', (2397, 2444), True, 'import numpy as np\n'), ((2558, 2581), 'tqdm.tqdm', 'tqdm', (['self.train_loader'], {}), '(self.train_loader)\n', (2562, 2581), False, 'from tqdm import tqdm\n'), ((3165, 3203), 'numpy.array', 'np.array', (['[i for i in self.args.begin]'], {}), '([i for i in self.args.begin])\n', (3173, 3203), True, 'import numpy as np\n'), ((3220, 3251), 'numpy.random.choice', 'np.random.choice', (['begin'], {'size': '(1)'}), '(begin, size=1)\n', (3236, 3251), True, 'import numpy as np\n'), ((1488, 1509), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1507, 1509), False, 'from torch import nn\n'), ((1729, 1767), 'torch.save', 'torch.save', (['self.model', 'model_out_path'], {}), '(self.model, model_out_path)\n', (1739, 1767), False, 'import torch\n'), ((3376, 3401), 'torch.LongTensor', 'torch.LongTensor', (['samples'], {}), '(samples)\n', (3392, 3401), False, 'import torch\n'), ((3588, 3603), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3601, 3603), False, 'import torch\n'), ((4267, 4282), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4280, 4282), False, 'import torch\n'), ((1021, 1158), 'model.CharRNN', 'CharRNN', (['self.convert.vocab_size', 'self.args.embed_dim', 'self.args.hidden_size', 'self.args.num_layers', 'self.args.dropout', 'self.args.cuda'], {}), '(self.convert.vocab_size, self.args.embed_dim, self.args.hidden_size,\n self.args.num_layers, self.args.dropout, self.args.cuda)\n', (1028, 1158), False, 'from model import CharRNN\n'), ((5590, 5661), 'math.sqrt', 'math.sqrt', (['((self.var - self.n * self.mean * self.mean) / (self.n - 1.0))'], {}), '((self.var - self.n * self.mean * self.mean) / (self.n - 1.0))\n', (5599, 5661), False, 'import math\n'), ((4086, 4111), 'torch.LongTensor', 'torch.LongTensor', (['samples'], {}), '(samples)\n', (4102, 4111), False, 'import torch\n'), ((3798, 3826), 'torch.LongTensor', 'torch.LongTensor', (['prediction'], {}), '(prediction)\n', (3814, 3826), False, 'import torch\n'), ((4490, 4518), 'torch.LongTensor', 'torch.LongTensor', (['prediction'], {}), '(prediction)\n', (4506, 4518), False, 'import torch\n')] |
import pytest
from d3rlpy.models.torch.encoders import PixelEncoder
from d3rlpy.models.torch.encoders import PixelEncoderWithAction
from d3rlpy.models.torch.encoders import VectorEncoder
from d3rlpy.models.torch.encoders import VectorEncoderWithAction
from d3rlpy.encoders import create_encoder_factory
from d3rlpy.encoders import PixelEncoderFactory
from d3rlpy.encoders import VectorEncoderFactory
@pytest.mark.parametrize('observation_shape', [(4, 84, 84)])
@pytest.mark.parametrize('action_size', [None, 2])
@pytest.mark.parametrize('discrete_action', [False, True])
def test_pixel_encoder_factory(observation_shape, action_size,
discrete_action):
factory = PixelEncoderFactory()
encoder = factory.create(observation_shape, action_size, discrete_action)
if action_size is None:
assert isinstance(encoder, PixelEncoder)
else:
assert isinstance(encoder, PixelEncoderWithAction)
assert encoder.discrete_action == discrete_action
assert factory.get_type() == 'pixel'
params = factory.get_params()
new_factory = PixelEncoderFactory(**params)
assert new_factory.get_params() == params
@pytest.mark.parametrize('observation_shape', [(100, )])
@pytest.mark.parametrize('action_size', [None, 2])
@pytest.mark.parametrize('discrete_action', [False, True])
def test_vector_encoder_factory(observation_shape, action_size,
discrete_action):
factory = VectorEncoderFactory()
encoder = factory.create(observation_shape, action_size, discrete_action)
if action_size is None:
assert isinstance(encoder, VectorEncoder)
else:
assert isinstance(encoder, VectorEncoderWithAction)
assert encoder.discrete_action == discrete_action
assert factory.get_type() == 'vector'
params = factory.get_params()
new_factory = VectorEncoderFactory(**params)
assert new_factory.get_params() == params
@pytest.mark.parametrize('name', ['pixel', 'vector'])
def test_create_encoder_factory(name):
factory = create_encoder_factory(name)
if name == 'pixel':
assert isinstance(factory, PixelEncoderFactory)
elif name == 'vector':
assert isinstance(factory, VectorEncoderFactory)
| [
"d3rlpy.encoders.PixelEncoderFactory",
"pytest.mark.parametrize",
"d3rlpy.encoders.VectorEncoderFactory",
"d3rlpy.encoders.create_encoder_factory"
] | [((404, 463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""observation_shape"""', '[(4, 84, 84)]'], {}), "('observation_shape', [(4, 84, 84)])\n", (427, 463), False, 'import pytest\n'), ((465, 514), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""action_size"""', '[None, 2]'], {}), "('action_size', [None, 2])\n", (488, 514), False, 'import pytest\n'), ((516, 573), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""discrete_action"""', '[False, True]'], {}), "('discrete_action', [False, True])\n", (539, 573), False, 'import pytest\n'), ((1180, 1234), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""observation_shape"""', '[(100,)]'], {}), "('observation_shape', [(100,)])\n", (1203, 1234), False, 'import pytest\n'), ((1237, 1286), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""action_size"""', '[None, 2]'], {}), "('action_size', [None, 2])\n", (1260, 1286), False, 'import pytest\n'), ((1288, 1345), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""discrete_action"""', '[False, True]'], {}), "('discrete_action', [False, True])\n", (1311, 1345), False, 'import pytest\n'), ((1959, 2011), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['pixel', 'vector']"], {}), "('name', ['pixel', 'vector'])\n", (1982, 2011), False, 'import pytest\n'), ((700, 721), 'd3rlpy.encoders.PixelEncoderFactory', 'PixelEncoderFactory', ([], {}), '()\n', (719, 721), False, 'from d3rlpy.encoders import PixelEncoderFactory\n'), ((1101, 1130), 'd3rlpy.encoders.PixelEncoderFactory', 'PixelEncoderFactory', ([], {}), '(**params)\n', (1120, 1130), False, 'from d3rlpy.encoders import PixelEncoderFactory\n'), ((1474, 1496), 'd3rlpy.encoders.VectorEncoderFactory', 'VectorEncoderFactory', ([], {}), '()\n', (1494, 1496), False, 'from d3rlpy.encoders import VectorEncoderFactory\n'), ((1879, 1909), 'd3rlpy.encoders.VectorEncoderFactory', 'VectorEncoderFactory', ([], {}), '(**params)\n', (1899, 1909), False, 'from d3rlpy.encoders import VectorEncoderFactory\n'), ((2065, 2093), 'd3rlpy.encoders.create_encoder_factory', 'create_encoder_factory', (['name'], {}), '(name)\n', (2087, 2093), False, 'from d3rlpy.encoders import create_encoder_factory\n')] |
# -*- coding: utf-8 -*-
#import game
from glob import glob
file1 = glob("01_face_dataset.py")
file2 = glob("02_face_training.py")
import facedataset
import facetrain
import cv2
import numpy as np
import os
from PIL import Image
#facedataset.first()
#facetrain.second()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascades/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id = 0
# names related to ids: example ==> loze: id=1, etc
# 이런식으로 사용자의 이름을 사용자 수만큼 추가해준다.
names = ['None', 'kkh', 'kth', 'ldh']
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
#img = cv2.flip(img, -1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 100):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
#game.start()
else:
facedataset.first()
facetrain.second()
#exec(open(file1.read())
#exec(open(file2.read())
#game.start()
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"facetrain.second",
"cv2.face.LBPHFaceRecognizer_create",
"facedataset.first",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.waitKey",
"glob.glob"
] | [((67, 93), 'glob.glob', 'glob', (['"""01_face_dataset.py"""'], {}), "('01_face_dataset.py')\n", (71, 93), False, 'from glob import glob\n'), ((102, 129), 'glob.glob', 'glob', (['"""02_face_training.py"""'], {}), "('02_face_training.py')\n", (106, 129), False, 'from glob import glob\n'), ((283, 319), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (317, 319), False, 'import cv2\n'), ((438, 472), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['cascadePath'], {}), '(cascadePath)\n', (459, 472), False, 'import cv2\n'), ((712, 731), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (728, 731), False, 'import cv2\n'), ((2192, 2215), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2213, 2215), False, 'import cv2\n'), ((994, 1031), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1006, 1031), False, 'import cv2\n'), ((1985, 2010), 'cv2.imshow', 'cv2.imshow', (['"""camera"""', 'img'], {}), "('camera', img)\n", (1995, 2010), False, 'import cv2\n'), ((1233, 1291), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (1246, 1291), False, 'import cv2\n'), ((2018, 2033), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2029, 2033), False, 'import cv2\n'), ((1607, 1626), 'facedataset.first', 'facedataset.first', ([], {}), '()\n', (1624, 1626), False, 'import facedataset\n'), ((1639, 1657), 'facetrain.second', 'facetrain.second', ([], {}), '()\n', (1655, 1657), False, 'import facetrain\n')] |
# This DAG is for running python scripts to generate static visualisation data
# from syncthing every month end
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from datetime import date, timedelta, datetime
import scripts.sync_school_data as sync_school_data
import scripts.process_raw_school_data as process_raw_school_data
import config.clix_config as clix_config
tools_modules_server_logs_datapath = clix_config.local_dst_state_data_logs
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(1),
#'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'provide_context': True,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'clix_static_visuals_dag', default_args=default_args,
schedule_interval= '@monthly')
# --------------------------------------------------------------------------------
# Each state is synced independently. We have four states and syncthing data folders
# corresponding to those states are synced through sync_school_data
# --------------------------------------------------------------------------------
#sshHook = SSHHook(conn_id=<YOUR CONNECTION ID FROM THE UI>)
#dummy_operator = DummyOperator(task_id='dummy_task', retries=3, dag=dag)
list_of_state_vis = []
for each_state in clix_config.static_visuals_states:
src = clix_config.remote_src_static_vis + each_state
dst = clix_config.local_dst_static_vis + each_state
list_of_tasks_chunks = []
#sync_state_data = SSHExecuteOperator( task_id="task1",
#bash_command= rsync -avzhe ssh {0}@{1}:{2} {3}".format(user, ip, src, dst),
#ssh_hook=sshHook,
#dag=dag)
sync_state_data = PythonOperator(
task_id='sync_state_data_' + each_state,
python_callable=sync_school_data.rsync_data_ssh,
op_kwargs={'state': each_state, 'src': src, 'dst': dst, 'static_flag': True},
dag=dag, retries=0)
# For parallel processing of files in the list of schools updated
# we use three parallel tasks each taking the portion of the list
# of files. This is done instead of generating tasks dynamically.
# number of schools chunks is set to clix_config.num_school_chunks
# refer: https://stackoverflow.com/questions/55672724/airflow-creating-dynamic-tasks-from-xcom
for each in list(range(clix_config.num_school_chunks)):
if each_state == 'ts':
each_state_new = 'tg'
elif each_state == 'cg':
each_state_new = 'ct'
else:
each_state_new = each_state
process_state_raw_data = PythonOperator(
task_id='process_raw_state_data_' + str(each) + '_' + each_state_new,
python_callable=process_raw_school_data.process_school_data,
op_kwargs={'state': each_state_new, 'chunk': each},
dag=dag)
list_of_tasks_chunks.append(process_state_raw_data)
sync_state_data.set_downstream(process_state_raw_data)
combine_state_chunks = PythonOperator(
task_id='combine_chunks_' + each_state_new,
python_callable=process_raw_school_data.combine_chunks,
op_kwargs={'state': each_state_new},
dag=dag)
list_of_tasks_chunks >> combine_state_chunks
get_state_static_vis_data = PythonOperator(
task_id = 'get_static_vis_' + each_state_new,
python_callable = process_raw_school_data.get_state_static_vis_data,
op_kwargs = {'state': each_state_new, 'all_states_flag': False},
dag=dag)
list_of_state_vis.append(get_state_static_vis_data)
combine_state_chunks >> get_state_static_vis_data
get_static_vis_data_all = PythonOperator(
task_id = 'get_static_vis_data_allstates',
python_callable = process_raw_school_data.get_state_static_vis_data,
op_kwargs = {'state': None, 'all_states_flag': True},
dag=dag)
list_of_state_vis >> get_static_vis_data_all
| [
"datetime.timedelta",
"airflow.utils.dates.days_ago",
"airflow.operators.python_operator.PythonOperator",
"airflow.DAG"
] | [((1235, 1327), 'airflow.DAG', 'DAG', (['"""clix_static_visuals_dag"""'], {'default_args': 'default_args', 'schedule_interval': '"""@monthly"""'}), "('clix_static_visuals_dag', default_args=default_args, schedule_interval\n ='@monthly')\n", (1238, 1327), False, 'from airflow import DAG\n'), ((4152, 4344), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""get_static_vis_data_allstates"""', 'python_callable': 'process_raw_school_data.get_state_static_vis_data', 'op_kwargs': "{'state': None, 'all_states_flag': True}", 'dag': 'dag'}), "(task_id='get_static_vis_data_allstates', python_callable=\n process_raw_school_data.get_state_static_vis_data, op_kwargs={'state':\n None, 'all_states_flag': True}, dag=dag)\n", (4166, 4344), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((894, 925), 'airflow.utils.dates.days_ago', 'airflow.utils.dates.days_ago', (['(1)'], {}), '(1)\n', (922, 925), False, 'import airflow\n'), ((1051, 1071), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (1060, 1071), False, 'from datetime import date, timedelta, datetime\n'), ((2210, 2421), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': "('sync_state_data_' + each_state)", 'python_callable': 'sync_school_data.rsync_data_ssh', 'op_kwargs': "{'state': each_state, 'src': src, 'dst': dst, 'static_flag': True}", 'dag': 'dag', 'retries': '(0)'}), "(task_id='sync_state_data_' + each_state, python_callable=\n sync_school_data.rsync_data_ssh, op_kwargs={'state': each_state, 'src':\n src, 'dst': dst, 'static_flag': True}, dag=dag, retries=0)\n", (2224, 2421), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((3500, 3669), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': "('combine_chunks_' + each_state_new)", 'python_callable': 'process_raw_school_data.combine_chunks', 'op_kwargs': "{'state': each_state_new}", 'dag': 'dag'}), "(task_id='combine_chunks_' + each_state_new, python_callable=\n process_raw_school_data.combine_chunks, op_kwargs={'state':\n each_state_new}, dag=dag)\n", (3514, 3669), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((3777, 3983), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': "('get_static_vis_' + each_state_new)", 'python_callable': 'process_raw_school_data.get_state_static_vis_data', 'op_kwargs': "{'state': each_state_new, 'all_states_flag': False}", 'dag': 'dag'}), "(task_id='get_static_vis_' + each_state_new, python_callable=\n process_raw_school_data.get_state_static_vis_data, op_kwargs={'state':\n each_state_new, 'all_states_flag': False}, dag=dag)\n", (3791, 3983), False, 'from airflow.operators.python_operator import PythonOperator\n')] |
from flask import Flask, request, jsonify, render_template_string
import redis
import requests
import re
import json
import sys
app = Flask(__name__)
@app.route('/getid/<username>')
def getid(username):
red = redis.Redis(host="redis_users")
return red.get(username).decode()
@app.route('/useraction', methods=["POST"])
def useraction():
mode = request.form.get("mode")
username = request.form.get("username")
if mode == "register":
r = requests.get('http://redis_userdata:5000/adduser')
port = int(r.text)
red = redis.Redis(host="redis_users")
red.set(username, port)
return ""
elif mode == "adddata":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
requests.post(f"http://redis_userdata:5000/putuser/{port}", json={
request.form.get("key"): request.form.get("value")
})
return ""
elif mode == "getdata":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
r = requests.get(f"http://redis_userdata:5000/getuser/{port}")
return jsonify(r.json())
elif mode == "bioadd":
bio = request.form.get("bio")
bio = bio.replace(".", "").replace("_", "").\
replace("{", "").replace("}", "").\
replace("(", "").replace(")", "").\
replace("|", "")
bio = re.sub(r'\[\[([^\[\]]+)\]\]', r'{{data["\g<1>"]}}', bio)
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
requests.post(f"http://redis_userdata:5000/bio/{port}", json={
"bio": bio
})
return ""
elif mode == "bioget":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
r = requests.get(f"http://redis_userdata:5000/bio/{port}")
return r.text
elif mode == "keytransfer":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
red2 = redis.Redis(host="redis_userdata",
port=int(port))
red2.migrate(request.form.get("host"),
request.form.get("port"),
[request.form.get("key")],
0, 1000,
copy=True, replace=True)
return ""
@app.route("/render", methods=["POST"])
def render_bio():
data = request.json.get('data')
if data is None:
data = {}
return render_template_string(request.json.get('bio'), data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| [
"requests.post",
"flask.Flask",
"requests.get",
"redis.Redis",
"flask.request.json.get",
"flask.request.form.get",
"re.sub"
] | [((134, 149), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (139, 149), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((215, 246), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (226, 246), False, 'import redis\n'), ((360, 384), 'flask.request.form.get', 'request.form.get', (['"""mode"""'], {}), "('mode')\n", (376, 384), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((400, 428), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (416, 428), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((2395, 2419), 'flask.request.json.get', 'request.json.get', (['"""data"""'], {}), "('data')\n", (2411, 2419), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((468, 518), 'requests.get', 'requests.get', (['"""http://redis_userdata:5000/adduser"""'], {}), "('http://redis_userdata:5000/adduser')\n", (480, 518), False, 'import requests\n'), ((560, 591), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (571, 591), False, 'import redis\n'), ((2493, 2516), 'flask.request.json.get', 'request.json.get', (['"""bio"""'], {}), "('bio')\n", (2509, 2516), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((684, 715), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (695, 715), False, 'import redis\n'), ((967, 998), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (978, 998), False, 'import redis\n'), ((1053, 1111), 'requests.get', 'requests.get', (['f"""http://redis_userdata:5000/getuser/{port}"""'], {}), "(f'http://redis_userdata:5000/getuser/{port}')\n", (1065, 1111), False, 'import requests\n'), ((1186, 1209), 'flask.request.form.get', 'request.form.get', (['"""bio"""'], {}), "('bio')\n", (1202, 1209), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((1404, 1465), 're.sub', 're.sub', (['"""\\\\[\\\\[([^\\\\[\\\\]]+)\\\\]\\\\]"""', '"""{{data["\\\\g<1>"]}}"""', 'bio'], {}), '(\'\\\\[\\\\[([^\\\\[\\\\]]+)\\\\]\\\\]\', \'{{data["\\\\g<1>"]}}\', bio)\n', (1410, 1465), False, 'import re\n'), ((1475, 1506), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (1486, 1506), False, 'import redis\n'), ((1557, 1631), 'requests.post', 'requests.post', (['f"""http://redis_userdata:5000/bio/{port}"""'], {'json': "{'bio': bio}"}), "(f'http://redis_userdata:5000/bio/{port}', json={'bio': bio})\n", (1570, 1631), False, 'import requests\n'), ((845, 868), 'flask.request.form.get', 'request.form.get', (['"""key"""'], {}), "('key')\n", (861, 868), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((870, 895), 'flask.request.form.get', 'request.form.get', (['"""value"""'], {}), "('value')\n", (886, 895), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((1713, 1744), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (1724, 1744), False, 'import redis\n'), ((1799, 1853), 'requests.get', 'requests.get', (['f"""http://redis_userdata:5000/bio/{port}"""'], {}), "(f'http://redis_userdata:5000/bio/{port}')\n", (1811, 1853), False, 'import requests\n'), ((1922, 1953), 'redis.Redis', 'redis.Redis', ([], {'host': '"""redis_users"""'}), "(host='redis_users')\n", (1933, 1953), False, 'import redis\n'), ((2110, 2134), 'flask.request.form.get', 'request.form.get', (['"""host"""'], {}), "('host')\n", (2126, 2134), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((2157, 2181), 'flask.request.form.get', 'request.form.get', (['"""port"""'], {}), "('port')\n", (2173, 2181), False, 'from flask import Flask, request, jsonify, render_template_string\n'), ((2205, 2228), 'flask.request.form.get', 'request.form.get', (['"""key"""'], {}), "('key')\n", (2221, 2228), False, 'from flask import Flask, request, jsonify, render_template_string\n')] |
####################################################
# (C) <NAME>, 2016, All Rights Reserved
#
# File Name: app.py
#
# Creation Date: 28-12-2016
#
# Created By: <NAME>
#
# Purpose:
#
####################################################
from __future__ import print_function
from credentialfile import CredentialFile
from ststoken import StsToken
class App(object):
def __init__(self, options):
self.options = options
self.credential_file = CredentialFile(path=options['credential_file'], profile=options['profile_name'])
self.credential_file.back_fill_user_data()
self.sts = StsToken(self.credential_file)
self.sts.get_auth()
| [
"credentialfile.CredentialFile",
"ststoken.StsToken"
] | [((462, 547), 'credentialfile.CredentialFile', 'CredentialFile', ([], {'path': "options['credential_file']", 'profile': "options['profile_name']"}), "(path=options['credential_file'], profile=options['profile_name']\n )\n", (476, 547), False, 'from credentialfile import CredentialFile\n'), ((614, 644), 'ststoken.StsToken', 'StsToken', (['self.credential_file'], {}), '(self.credential_file)\n', (622, 644), False, 'from ststoken import StsToken\n')] |
import re
from backend.projectfiles.GenericProjectFile import GenericProjectFile
QUOTE = r'(?:["|\'])'
STRING = r'([\w\.\-\+]+)'
GAV_REGEXP = QUOTE + '(?:' + ":".join([STRING, STRING, STRING]) + ')' + QUOTE
class GradleProjectFile(GenericProjectFile):
""" Gradle project file implementation to extract dependencies """
def extract(self):
dependencies = []
for line in self.result.iter_lines():
results = re.match('.*' + GAV_REGEXP + '.*', line)
if results:
group = results.group(1)
artifact = results.group(2)
version = results.group(3)
dependencies.append({'group': group,
'artifact': artifact,
'version': version,
'gav': ":".join([group, artifact, version])})
return dependencies
| [
"re.match"
] | [((445, 485), 're.match', 're.match', (["('.*' + GAV_REGEXP + '.*')", 'line'], {}), "('.*' + GAV_REGEXP + '.*', line)\n", (453, 485), False, 'import re\n')] |
from django.urls import path
from whitelist_api.views import AddTorrentInfoHash, RemoveTorrentInfoHash
app_name = 'whitelist_api'
urlpatterns = [
path('add-torrent-info-hash/', AddTorrentInfoHash.as_view()),
path('del-torrent-info-hash/', RemoveTorrentInfoHash.as_view()),
]
| [
"whitelist_api.views.RemoveTorrentInfoHash.as_view",
"whitelist_api.views.AddTorrentInfoHash.as_view"
] | [((184, 212), 'whitelist_api.views.AddTorrentInfoHash.as_view', 'AddTorrentInfoHash.as_view', ([], {}), '()\n', (210, 212), False, 'from whitelist_api.views import AddTorrentInfoHash, RemoveTorrentInfoHash\n'), ((250, 281), 'whitelist_api.views.RemoveTorrentInfoHash.as_view', 'RemoveTorrentInfoHash.as_view', ([], {}), '()\n', (279, 281), False, 'from whitelist_api.views import AddTorrentInfoHash, RemoveTorrentInfoHash\n')] |
from Stack import Stack
def main():
stack = Stack()
stack.push(0)
stack.push(1)
stack.push(2)
stack.push(3)
assert stack.data == [0, 1, 2, 3]
assert stack.capacity == 4
assert stack.size == 4
popped = stack.pop()
assert popped == 3
popped = stack.pop()
assert popped == 2
print(stack)
assert stack.data == [0, 1]
assert stack.capacity == 2
assert stack.size == 2
print("Expected:", "['0', '1'] Capacity: 2")
print("Output:", str(stack))
main() | [
"Stack.Stack"
] | [((50, 57), 'Stack.Stack', 'Stack', ([], {}), '()\n', (55, 57), False, 'from Stack import Stack\n')] |
import numpy as np
import pytest
from pandas.core.frame import DataFrame
from bender.importers import DataImporters
from bender.model_loaders import ModelLoaders
from bender.model_trainer.decision_tree import DecisionTreeClassifierTrainer
from bender.split_strategies import SplitStrategies
pytestmark = pytest.mark.asyncio
async def test_predict_data() -> None:
model, data_set = await (
DataImporters.literal(DataFrame({'x': [0, 1], 'y': [0, 1], 'output': [0, 1]}))
# No test set
.split(SplitStrategies.ratio(1))
.train(DecisionTreeClassifierTrainer(), input_features=['x', 'y'], target_feature='output')
.run()
)
test_data = DataFrame({'x': [2, -3, 4], 'y': [2, -3, 4]})
expected = [1, 0, 1]
_, _, result = await (ModelLoaders.literal(model).import_data(DataImporters.literal(test_data)).predict().run())
assert np.all(expected == result)
"""
Supervised Regression
Vector[float] -> float
.train(
RegresionModels.linear(),
input_features=["area", "location"], # floats
target_feature="price" # float
)
"""
"""
Supervised Classification
Vector[float / int / bool / str] -> str / bool / int
.train(
ClassificationModels.DecisionTree(),
input_features=["sepal_length", "sepal_width"], # float / int / bool / str
target_feature="class_name" # str / bool / int
)
# Should only be avaialbe for clustering / classification problems
.predict_probability(
labels={
"setosa": "is_setosa_probability",
"versicolor": "is_versicolor_probability",
}
)
"""
| [
"bender.model_trainer.decision_tree.DecisionTreeClassifierTrainer",
"bender.split_strategies.SplitStrategies.ratio",
"bender.importers.DataImporters.literal",
"bender.model_loaders.ModelLoaders.literal",
"numpy.all",
"pandas.core.frame.DataFrame"
] | [((686, 731), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'x': [2, -3, 4], 'y': [2, -3, 4]}"], {}), "({'x': [2, -3, 4], 'y': [2, -3, 4]})\n", (695, 731), False, 'from pandas.core.frame import DataFrame\n'), ((886, 912), 'numpy.all', 'np.all', (['(expected == result)'], {}), '(expected == result)\n', (892, 912), True, 'import numpy as np\n'), ((563, 594), 'bender.model_trainer.decision_tree.DecisionTreeClassifierTrainer', 'DecisionTreeClassifierTrainer', ([], {}), '()\n', (592, 594), False, 'from bender.model_trainer.decision_tree import DecisionTreeClassifierTrainer\n'), ((522, 546), 'bender.split_strategies.SplitStrategies.ratio', 'SplitStrategies.ratio', (['(1)'], {}), '(1)\n', (543, 546), False, 'from bender.split_strategies import SplitStrategies\n'), ((823, 855), 'bender.importers.DataImporters.literal', 'DataImporters.literal', (['test_data'], {}), '(test_data)\n', (844, 855), False, 'from bender.importers import DataImporters\n'), ((783, 810), 'bender.model_loaders.ModelLoaders.literal', 'ModelLoaders.literal', (['model'], {}), '(model)\n', (803, 810), False, 'from bender.model_loaders import ModelLoaders\n'), ((428, 483), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'x': [0, 1], 'y': [0, 1], 'output': [0, 1]}"], {}), "({'x': [0, 1], 'y': [0, 1], 'output': [0, 1]})\n", (437, 483), False, 'from pandas.core.frame import DataFrame\n')] |
#!/usr/bin/env python3
# Goshu IRC Bot
# written by <NAME> <<EMAIL>>
# licensed under the ISC license
"""extends several builtin functions and provides helper functions
The default Python library is extensive and well-stocked. There are some
times however, you wish a small task was taken care of for you. This module
if chock full of little extensions and helper functions I've needed while
writing Goshu.
Small, interesting, self-contained functions that can probably be reused
elsewhere.
"""
import collections.abc
import datetime
import imp
import json
import os
import re
import string
import sys
import urllib.parse
from girc.formatting import escape
from http_status import Status
from pyquery import PyQuery as pq
import importlib
import requests
import xml.sax.saxutils as saxutils
import yaml
valid_filename_chars = string.ascii_letters + string.digits + '#._- '
def true_or_false(in_str):
"""Returns True/False if string represents it, else None."""
in_str = in_str.lower()
if in_str.startswith(('true', 'y', '1', 'on')):
return True
elif in_str.startswith(('false', 'n', '0', 'off')):
return False
else:
return None
def split_num(line, chars=' ', maxsplits=1, empty=''):
"""/lazy/ wrapper, to stop us having to bounds-check when splitting.
Arguments:
line -- line to split
chars -- character(s) to split line on
maxsplits -- how many split items are returned
empty -- character to put in place of nothing
Returns:
line.split(chars, items); return value is padded until `maxsplits + 1` number of values
are present"""
line = line.split(chars, maxsplits)
while len(line) <= maxsplits:
line.append(empty)
return line
def is_ok(func, prompt, blank='', clearline=False):
"""Prompt the user for yes/no and returns True/False
Arguments:
prompt -- Prompt for the user
blank -- If True, a blank response will return True, ditto for False, the default ''
will not accept blank responses and ask until the user gives an appropriate
response
Returns:
True if user accepts, False if user does not"""
while True:
ok = func(prompt).lower().strip()
if len(ok) > 0:
if ok[0] == 'y' or ok[0] == 't' or ok[0] == '1': # yes, true, 1
return True
elif ok[0] == 'n' or ok[0] == 'f' or ok[0] == '0': # no, false, 0
return False
else:
if blank is True:
return True
elif blank is False:
return False
def bytes_to_str(bytes, base=2, precision=0):
"""Convert number of bytes to a human-readable format
Arguments:
bytes -- number of bytes
base -- base 2 'regular' multiplexer, or base 10 'storage' multiplexer
precision -- number of decimal places to output
Returns:
Human-readable string such as '1.32M'
"""
if base == 2:
multiplexer = 1024
elif base == 10:
multiplexer = 1000
else:
return None # raise error
precision_string = '%.' + str(precision) + 'f'
mebi_convert = True
if bytes >= (multiplexer ** 4):
terabytes = float(bytes / (multiplexer ** 4))
output = (precision_string % terabytes) + 'T'
elif bytes >= (multiplexer ** 3):
gigabytes = float(bytes / (multiplexer ** 3))
output = (precision_string % gigabytes) + 'G'
elif bytes >= (multiplexer ** 2):
megabytes = float(bytes / (multiplexer ** 2))
output = (precision_string % megabytes) + 'M'
elif bytes >= (multiplexer ** 1):
kilobytes = float(bytes / (multiplexer ** 1))
output = (precision_string % kilobytes) + 'K'
else:
output = (precision_string % float(bytes)) + 'B'
mebi_convert = False
# mebibytes and gibibytes all those weird HDD manufacturer terms
if base == 10 and mebi_convert:
num, base = output[:-1], output[-1]
output = num + base.lower() + 'B'
return output
def time_metric(secs=60, mins=0):
"""Returns user-readable string representing given number of seconds."""
if mins:
secs += (mins * 60)
time = ''
for metric_secs, metric_char in [[7 * 24 * 60 * 60, 'w'],
[24 * 60 * 60, 'd'],
[60 * 60, 'h'],
[60, 'm']]:
if secs > metric_secs:
time += '{}{}'.format(int(secs / metric_secs), metric_char)
secs -= int(secs / metric_secs) * metric_secs
if secs > 0:
time += '{}s'.format(secs)
return time
def metric(num, metric_list=[[10 ** 9, 'B'], [10 ** 6, 'M'], [10 ** 3, 'k']], additive=False):
"""Returns user-readable string representing given value.
Arguments:
num is the base value we're converting.
metric_list is the list of data we're working off.
additive is whether we add the various values together, or separate them.
Return:
a string such as 345K or 23w6d2h53s"""
output = ''
for metric_count, metric_char in metric_list:
if num > metric_count:
if additive:
format_str = '{}{}'
else:
format_str = '{:.1f}{}'
num = (num / metric_count)
if not additive:
num = float(num)
output += format_str.format(num, metric_char)
if not additive:
break
# just in case no output
if output == '':
output = str(num)
return output
def get_url(url, **kwargs):
"""Gets a url, handles all the icky requests stuff."""
try:
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
r = requests.get(url, **kwargs)
r.status = Status(r.status_code)
if not r.ok:
return 'HTTP Error - {code} {name} - {description}'.format(**{
'code': r.status.code,
'name': r.status.name,
'description': r.status.description
})
except requests.exceptions.Timeout:
return 'Connection timed out'
except requests.exceptions.RequestException as x:
return '{}'.format(x.__class__.__name__)
return r
def format_extract(format_json, input_element, format=None, debug=False, fail='Failure'):
if not format:
if 'format' in format_json:
format = format_json['format']
else:
return 'No format for format_extract()'
if 'debug' in format_json:
debug = format_json['debug']
# format-specific settings
if format == 'json':
input_element = json.loads(input_element)
retrieve = json_return
elif format == 'xml':
# ignore xml namespaces
input_element = input_element.replace(' xmlns:', ' xmlnamespace:')
input_element = input_element.replace(' xmlns=', ' xmlnamespace=')
retrieve = xml_return
# format extraction - format kwargs
format_dict = {}
if 'response_dict' in format_json:
for name in format_json['response_dict']:
try:
if isinstance(format_json['response_dict'][name], collections.abc.Callable):
try:
format_dict[name] = format_json['response_dict'][name](format_json,
input_element)
except BaseException as x:
if debug:
return 'Unknown failure: {}'.format(x)
else:
return 'Code error'
else:
format_dict[name] = retrieve(input_element,
format_json['response_dict'][name])
if format_dict[name] is None:
return fail
except KeyError:
if debug:
return 'Fail on {}'.format(name)
else:
return fail
except IndexError:
if debug:
return 'Fail on {}'.format(name)
else:
return fail
try:
return format_json['response'].format(**format_dict)
except KeyError:
if debug:
return 'Fail on format() key'
else:
return fail
except IndexError:
if debug:
return 'Fail on format() index'
else:
return fail
def xml_return(input_xml, selector):
pq_xml = pq(input_xml)
if selector[0] == 'text':
return selector[1]
elif selector[0] == 'text.escape':
return escape(selector[1])
elif selector[0] == 'jquery':
return pq_xml(selector[1]).text()
elif selector[0] == 'jquery.attr':
return pq_xml(selector[1]).attr(selector[2])
def json_return(input_json, selector):
if selector[0] == 'text':
return selector[1]
elif selector[0] == 'text.escape':
return escape(selector[1])
elif selector[0] == 'json.lower':
if len(selector) > 2:
default = selector[2]
else:
default = ""
return str(json_element(input_json, selector[1], default=default)).lower()
elif selector[0] == 'json.quote_plus':
if len(selector) > 2:
default = selector[2]
else:
default = ""
return urllib.parse.quote_plus(str(json_element(input_json, selector[1],
default=default)))
elif selector[0] == 'json.num.metric':
if len(selector) > 2:
default = selector[2]
else:
default = 0
return metric(int(json_element(input_json, selector[1], default=default)))
elif selector[0] == 'json.datetime.fromtimestamp':
if len(selector) > 2:
default = selector[2]
else:
default = 0
ts = json_element(input_json, selector[1], default=default)
return datetime.datetime.fromtimestamp(ts).strftime(selector[2])
elif selector[0] == 'json.dict.returntrue':
keys = []
json_dict = json_element(input_json, selector[1])
for key in json_dict:
if json_dict[key]:
keys.append(key)
return selector[2].join(keys)
# before general json
else:
if len(selector) > 2:
default = selector[2]
else:
default = None
return escape(str(json_element(input_json, selector[1], default=default)))
def json_element(input_dict, query, default=None):
"""Runs through a data structure and returns the selected element."""
for element in query:
is_list_index = isinstance(element, int) and isinstance(input_dict, (list, tuple))
if is_list_index or element in input_dict:
input_dict = input_dict[element]
else:
return default
return input_dict
def filename_escape(unsafe, replace_char='_', valid_chars=valid_filename_chars):
"""Escapes a string to provide a safe local filename
Arguments:
unsafe -- Unsafe string to escape
replace_char -- Character to replace unsafe characters with
valid_chars -- Valid filename characters
Returns:
Safe local filename string
"""
if not unsafe:
return ''
safe = ''
for character in unsafe:
if character in valid_chars:
safe += character
else:
safe += replace_char
return safe
_unescape_map = {
''': "'",
''': "'",
'"': "'",
}
def html_unescape(input):
"""Turns any html-escaped characters back to their normal equivalents."""
output = saxutils.unescape(input)
for char in _unescape_map.keys():
output = output.replace(char, _unescape_map[char])
return output
def utf8_bom(input):
"""Strips BOM from a utf8 string, because open() leaves it in for some reason."""
output = input.replace('\ufeff', '')
return output
class JsonHandler:
def __init__(self, base, folder, attr=None, callback_name=None, ext=None, yaml=False):
if ext:
self.pattern = [x.format(ext) for x in ['*.{}.yaml', '*.{}.json', '*_{}.py']]
else:
self.pattern = ['*.yaml', '*.json', '*.py']
self.base = base
self.attr = attr
self.folder = folder
self.ext = ext
self.callback_name = callback_name
self.yaml = yaml
self.reload()
def spread_new_json(self, new_json):
if self.attr:
setattr(self.base, self.attr, new_json)
if self.callback_name:
getattr(self.base, self.callback_name, None)(new_json)
def reload(self):
new_json = {}
if not os.path.exists(self.folder):
self.spread_new_json(new_json)
return
# loading
folders_to_scan = [self.folder]
# loading list of folders that contain modules
for f in os.listdir(self.folder):
if f == 'disabled':
continue
full_name = os.path.join(self.folder, f)
if os.path.isdir(full_name):
folders_to_scan.append(full_name)
# loading actual modules
for folder in folders_to_scan:
for f in os.listdir(folder):
full_name = os.path.join(folder, f)
if os.path.isfile(full_name):
(extname, ext) = os.path.splitext(full_name)
if ext.lower() not in ['.json', '.yaml']:
continue
# check for loader-specific extension
if self.ext:
name, ext = os.path.splitext(extname)
pyfile = '{}_{}'.format('.'.join(name.split(os.sep)), self.ext)
# not really our module
if ext != os.extsep + self.ext:
continue
else:
name, ext = extname, ''
pyfile = '.'.join(name[2:].split(os.sep))
# NOTE: this is static, and that is bad
pyfile = pyfile.lstrip('..modules.')
# py file
if self.yaml:
try:
module = importlib.import_module(pyfile)
imp.reload(module) # so reloading works
# we should capture this and output errors to stderr
except:
pass
# yaml / json
with open(full_name, encoding='utf-8') as js_f:
if self.yaml:
try:
info = yaml.load(js_f.read(), Loader=yaml.FullLoader)
# we should capture this and output errors to stderr
except Exception as ex:
print('failed to load YAML file', full_name, ':', ex)
continue
else:
info = json.loads(js_f.read())
# set module name and info
if 'name' not in info:
new_name = name.split('/')[-1].split('\\')[-1]
info['name'] = [new_name]
new_json[info['name'][0]] = info
# set info on base object and / or call callback
self.spread_new_json(new_json)
# timedelta functions
_td_str_map = [
('d', 'days'),
('h', 'hours'),
('m', 'minutes'),
('s', 'seconds'),
]
_str_td = r''
for istr, td in _td_str_map:
_str_td += r'\s*(?:(?P<' + td + r'>[0-9]+)\s*' + istr + r')?'
_TD_STR_REGEX = re.compile(_str_td)
def timedelta_to_string(delta):
"""Converts a timedelta dict to a string."""
td_string = ''
for istr, td in _td_str_map:
if td in delta:
td_string += str(delta[td])
td_string += istr
return td_string
def string_to_timedelta(td_string):
"""Converts a string to a timedelta dict."""
match = _TD_STR_REGEX.match(td_string)
delta = {}
for istr, td in _td_str_map:
if match.group(td):
if '.' in match.group(td):
val = float(match.group(td))
else:
val = int(match.group(td))
delta[td] = val
return delta
# path
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
| [
"os.path.exists",
"json.loads",
"os.listdir",
"sys.path.insert",
"girc.formatting.escape",
"importlib.import_module",
"re.compile",
"datetime.datetime.fromtimestamp",
"imp.reload",
"os.path.join",
"os.path.splitext",
"requests.get",
"os.path.isfile",
"os.path.isdir",
"http_status.Status"... | [((15914, 15933), 're.compile', 're.compile', (['_str_td'], {}), '(_str_td)\n', (15924, 15933), False, 'import re\n'), ((8631, 8644), 'pyquery.PyQuery', 'pq', (['input_xml'], {}), '(input_xml)\n', (8633, 8644), True, 'from pyquery import PyQuery as pq\n'), ((11787, 11811), 'xml.sax.saxutils.unescape', 'saxutils.unescape', (['input'], {}), '(input)\n', (11804, 11811), True, 'import xml.sax.saxutils as saxutils\n'), ((5782, 5809), 'requests.get', 'requests.get', (['url'], {}), '(url, **kwargs)\n', (5794, 5809), False, 'import requests\n'), ((5829, 5850), 'http_status.Status', 'Status', (['r.status_code'], {}), '(r.status_code)\n', (5835, 5850), False, 'from http_status import Status\n'), ((6696, 6721), 'json.loads', 'json.loads', (['input_element'], {}), '(input_element)\n', (6706, 6721), False, 'import json\n'), ((13075, 13098), 'os.listdir', 'os.listdir', (['self.folder'], {}), '(self.folder)\n', (13085, 13098), False, 'import os\n'), ((16647, 16671), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (16662, 16671), False, 'import sys\n'), ((8757, 8776), 'girc.formatting.escape', 'escape', (['selector[1]'], {}), '(selector[1])\n', (8763, 8776), False, 'from girc.formatting import escape\n'), ((9097, 9116), 'girc.formatting.escape', 'escape', (['selector[1]'], {}), '(selector[1])\n', (9103, 9116), False, 'from girc.formatting import escape\n'), ((12852, 12879), 'os.path.exists', 'os.path.exists', (['self.folder'], {}), '(self.folder)\n', (12866, 12879), False, 'import os\n'), ((13182, 13210), 'os.path.join', 'os.path.join', (['self.folder', 'f'], {}), '(self.folder, f)\n', (13194, 13210), False, 'import os\n'), ((13226, 13250), 'os.path.isdir', 'os.path.isdir', (['full_name'], {}), '(full_name)\n', (13239, 13250), False, 'import os\n'), ((13396, 13414), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (13406, 13414), False, 'import os\n'), ((13444, 13467), 'os.path.join', 'os.path.join', (['folder', 'f'], {}), '(folder, f)\n', (13456, 13467), False, 'import os\n'), ((13487, 13512), 'os.path.isfile', 'os.path.isfile', (['full_name'], {}), '(full_name)\n', (13501, 13512), False, 'import os\n'), ((13551, 13578), 'os.path.splitext', 'os.path.splitext', (['full_name'], {}), '(full_name)\n', (13567, 13578), False, 'import os\n'), ((13802, 13827), 'os.path.splitext', 'os.path.splitext', (['extname'], {}), '(extname)\n', (13818, 13827), False, 'import os\n'), ((14447, 14478), 'importlib.import_module', 'importlib.import_module', (['pyfile'], {}), '(pyfile)\n', (14470, 14478), False, 'import importlib\n'), ((14507, 14525), 'imp.reload', 'imp.reload', (['module'], {}), '(module)\n', (14517, 14525), False, 'import imp\n'), ((10111, 10146), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (10142, 10146), False, 'import datetime\n')] |
from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, IntegerField, FloatField, StringField
from wtforms.validators import DataRequired
import pandas as pd
uniq_vals = pd.read_csv("data/unique_cat_vals.csv", index_col=0)
class InputData(FlaskForm):
car = SelectField(label="Car", choices=uniq_vals.car.dropna().sort_values(), validators=[DataRequired()])
model = SelectField("Model", choices=uniq_vals.model.dropna().sort_values(), validators=[DataRequired()])
body = SelectField(label="Body", choices=uniq_vals.body.dropna().sort_values(), validators=[DataRequired()])
drive = SelectField("Drive", choices=uniq_vals.drive.dropna().sort_values(), validators=[DataRequired()])
engType = SelectField("Engine type: ", choices=uniq_vals.engType.dropna().sort_values(), validators=[DataRequired()])
engV = FloatField("Engine Volume", validators=[DataRequired()])
year = IntegerField("Year", validators=[DataRequired()])
mileage = IntegerField(label="Mileage", validators=[DataRequired()])
registration = SelectField(label="Registration", choices=uniq_vals.registration.dropna())
submit = SubmitField("Predict the price")
| [
"wtforms.validators.DataRequired",
"wtforms.SubmitField",
"pandas.read_csv"
] | [((193, 245), 'pandas.read_csv', 'pd.read_csv', (['"""data/unique_cat_vals.csv"""'], {'index_col': '(0)'}), "('data/unique_cat_vals.csv', index_col=0)\n", (204, 245), True, 'import pandas as pd\n'), ((1151, 1183), 'wtforms.SubmitField', 'SubmitField', (['"""Predict the price"""'], {}), "('Predict the price')\n", (1162, 1183), False, 'from wtforms import SubmitField, SelectField, IntegerField, FloatField, StringField\n'), ((369, 383), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (381, 383), False, 'from wtforms.validators import DataRequired\n'), ((479, 493), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (491, 493), False, 'from wtforms.validators import DataRequired\n'), ((592, 606), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (604, 606), False, 'from wtforms.validators import DataRequired\n'), ((702, 716), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (714, 716), False, 'from wtforms.validators import DataRequired\n'), ((824, 838), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (836, 838), False, 'from wtforms.validators import DataRequired\n'), ((892, 906), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (904, 906), False, 'from wtforms.validators import DataRequired\n'), ((953, 967), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (965, 967), False, 'from wtforms.validators import DataRequired\n'), ((1026, 1040), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1038, 1040), False, 'from wtforms.validators import DataRequired\n')] |
from wordcloud2 import wordcloud as W
import os
from PIL import Image
stwords = {"us", "will"}
print("==Obama's==")
cs = W.randomscheme() #:Set1_8
as_ = W.randomangles() #(0,90,45,-45)
dens = 0.5 #not too high
wca = W.wordcloud(
W.processtext(open(W.pkgdir(W.WordCloud)+"/res/Barack Obama's First Inaugural Address.txt").read(),
stopwords=set(W.stopwords_en).union(stwords)),
colors = cs,
angles = as_,
density = dens)
wca.generate()
#md# ### Then generate the wordcloud on the right
print("==Trump's==")
wcb = W.wordcloud(
W.processtext(open(W.pkgdir(W.WordCloud)+"/res/<NAME>ump's Inaugural Address.txt").read(),
stopwords=set(W.stopwords_en).union(stwords)),
mask = wca.getsvgmask(),
colors = cs,
angles = as_,
density = dens,
run = W.identity, #turn off the useless initimage! and placement! in advance
)
#md# Follow these steps to generate a wordcloud: initimage! -> placement! -> generate!
samewords = list(set(wca.getwords()).intersection(wcb.getwords()))
print(len(samewords), "same words")
for w in samewords:
wcb.setcolors(w, wca.getcolors(w))
wcb.setangles(w, wca.getangles(w))
wcb.initimages()
wcb.setstate(":placement!")
print("=ignore defferent words=")
with wcb.keep(samewords) as wcb:
assert set(wcb.getwords()) == set(samewords)
centers = wca.getpositions(samewords, type=W.Ju.getcenter)
wcb.setpositions(samewords, centers, type=W.Ju.setcenter_b) #manually initialize the position,
wcb.setstate(":placement!") #and set the state flag
wcb.generate(1000, patient=-1, retry=1) #patient=-1 means no teleport; retry=1 means no rescale
print("=pin same words=")
with wcb.pin(samewords):
wcb.placement()
wcb.generate(1000, retry=1) #allow teleport but don‘t allow rescale
if wcb.getstate() != ":generate!":
print("=overall tuning=")
wcb.generate(1000, patient=-1, retry=2) #allow rescale but don‘t allow teleport
ma = wca.paint()
mb = wcb.paint()
sp = ma.width//20
cmp = Image.new('RGBA', (ma.width*2+sp, ma.height))
cmp.paste(ma, (0, 0, ma.width, ma.height))
cmp.paste(mb, (ma.width+sp, 0, ma.width*2+sp, ma.height))
os.makedirs('address_compare', exist_ok=True)
print("results are saved in address_compare")
cmp.save("address_compare/compare.png")
gif = W.GIF("address_compare")
wca.record("Obama", gif)
wcb.record("Trump", gif)
W.gif_generate(gif, framerate=1)
#md# 
#md#  | [
"wordcloud2.wordcloud.GIF",
"os.makedirs",
"PIL.Image.new",
"wordcloud2.wordcloud.randomangles",
"wordcloud2.wordcloud.pkgdir",
"wordcloud2.wordcloud.randomscheme",
"wordcloud2.wordcloud.gif_generate"
] | [((123, 139), 'wordcloud2.wordcloud.randomscheme', 'W.randomscheme', ([], {}), '()\n', (137, 139), True, 'from wordcloud2 import wordcloud as W\n'), ((155, 171), 'wordcloud2.wordcloud.randomangles', 'W.randomangles', ([], {}), '()\n', (169, 171), True, 'from wordcloud2 import wordcloud as W\n'), ((2016, 2065), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(ma.width * 2 + sp, ma.height)'], {}), "('RGBA', (ma.width * 2 + sp, ma.height))\n", (2025, 2065), False, 'from PIL import Image\n'), ((2163, 2208), 'os.makedirs', 'os.makedirs', (['"""address_compare"""'], {'exist_ok': '(True)'}), "('address_compare', exist_ok=True)\n", (2174, 2208), False, 'import os\n'), ((2301, 2325), 'wordcloud2.wordcloud.GIF', 'W.GIF', (['"""address_compare"""'], {}), "('address_compare')\n", (2306, 2325), True, 'from wordcloud2 import wordcloud as W\n'), ((2376, 2408), 'wordcloud2.wordcloud.gif_generate', 'W.gif_generate', (['gif'], {'framerate': '(1)'}), '(gif, framerate=1)\n', (2390, 2408), True, 'from wordcloud2 import wordcloud as W\n'), ((254, 275), 'wordcloud2.wordcloud.pkgdir', 'W.pkgdir', (['W.WordCloud'], {}), '(W.WordCloud)\n', (262, 275), True, 'from wordcloud2 import wordcloud as W\n'), ((589, 610), 'wordcloud2.wordcloud.pkgdir', 'W.pkgdir', (['W.WordCloud'], {}), '(W.WordCloud)\n', (597, 610), True, 'from wordcloud2 import wordcloud as W\n')] |
# Copyright (c) <NAME>, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
import module_kits.itk_kit as itk_kit
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
class nbCurvesLevelSet(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
# setup defaults
self._config.propagationScaling = 1.0
self._config.advectionScaling = 1.0
self._config.curvatureScaling = 1.0
self._config.numberOfIterations = 500
configList = [
('Propagation scaling:', 'propagationScaling', 'base:float',
'text', 'Weight factor for the propagation term'),
('Advection scaling:', 'advectionScaling', 'base:float',
'text', 'Weight factor for the advection term'),
('Curvature scaling:', 'curvatureScaling', 'base:float',
'text', 'Weight factor for the curvature term'),
('Number of iterations:', 'numberOfIterations', 'base:int',
'text',
'Number of iterations that the algorithm should be run for')]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self})
# create all pipeline thingies
self._createITKPipeline()
self.sync_module_logic_with_config()
def close(self):
self._destroyITKPipeline()
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
def execute_module(self):
self.get_output(0).Update()
def get_input_descriptions(self):
return ('Feature image (ITK)', 'Initial level set (ITK)' )
def set_input(self, idx, inputStream):
if idx == 0:
self._nbcLS.SetFeatureImage(inputStream)
else:
self._nbcLS.SetInput(inputStream)
def get_output_descriptions(self):
return ('Image Data (ITK)',)
def get_output(self, idx):
return self._nbcLS.GetOutput()
def config_to_logic(self):
self._nbcLS.SetPropagationScaling(
self._config.propagationScaling)
self._nbcLS.SetAdvectionScaling(
self._config.advectionScaling)
self._nbcLS.SetCurvatureScaling(
self._config.curvatureScaling)
def logic_to_config(self):
self._config.propagationScaling = self._nbcLS.\
GetPropagationScaling()
self._config.advectionScaling = self._nbcLS.GetAdvectionScaling()
self._config.curvatureScaling = self._nbcLS.GetCurvatureScaling()
# --------------------------------------------------------------------
# END OF API CALLS
# --------------------------------------------------------------------
def _createITKPipeline(self):
# input: smoothing.SetInput()
# output: thresholder.GetOutput()
if3 = itk.Image[itk.F, 3]
self._nbcLS = itk.NarrowBandCurvesLevelSetImageFilter[if3,if3].New()
#self._nbcLS.SetMaximumRMSError( 0.1 );
self._nbcLS.SetNumberOfIterations( 500 );
itk_kit.utils.setupITKObjectProgress(
self, self._nbcLS,
'NarrowBandCurvesLevelSetImageFilter',
'Evolving level set')
def _destroyITKPipeline(self):
"""Delete all bindings to components of the ITK pipeline.
"""
del self._nbcLS
| [
"module_base.ModuleBase.close",
"module_mixins.ScriptedConfigModuleMixin.__init__",
"module_kits.itk_kit.utils.setupITKObjectProgress",
"module_mixins.ScriptedConfigModuleMixin.close",
"module_base.ModuleBase.__init__"
] | [((336, 377), 'module_base.ModuleBase.__init__', 'ModuleBase.__init__', (['self', 'module_manager'], {}), '(self, module_manager)\n', (355, 377), False, 'from module_base import ModuleBase\n'), ((1208, 1285), 'module_mixins.ScriptedConfigModuleMixin.__init__', 'ScriptedConfigModuleMixin.__init__', (['self', 'configList', "{'Module (self)': self}"], {}), "(self, configList, {'Module (self)': self})\n", (1242, 1285), False, 'from module_mixins import ScriptedConfigModuleMixin\n'), ((1505, 1542), 'module_mixins.ScriptedConfigModuleMixin.close', 'ScriptedConfigModuleMixin.close', (['self'], {}), '(self)\n', (1536, 1542), False, 'from module_mixins import ScriptedConfigModuleMixin\n'), ((1551, 1573), 'module_base.ModuleBase.close', 'ModuleBase.close', (['self'], {}), '(self)\n', (1567, 1573), False, 'from module_base import ModuleBase\n'), ((3201, 3321), 'module_kits.itk_kit.utils.setupITKObjectProgress', 'itk_kit.utils.setupITKObjectProgress', (['self', 'self._nbcLS', '"""NarrowBandCurvesLevelSetImageFilter"""', '"""Evolving level set"""'], {}), "(self, self._nbcLS,\n 'NarrowBandCurvesLevelSetImageFilter', 'Evolving level set')\n", (3237, 3321), True, 'import module_kits.itk_kit as itk_kit\n')] |
"""
Sep 21 -- A few of the plots used in analysis, very far from a complete list, and probably most are too specific to be
useful again.
Moved useful functions from here.
"""
from __future__ import annotations
from typing import List, Callable, Optional, Union, TYPE_CHECKING
import numpy as np
from dat_analysis.analysis_tools.entropy import dat_integrated_sub_lin
from dat_analysis.plotting.plotly.hover_info import HoverInfo
if TYPE_CHECKING:
pass
def common_dat_hover_infos(datnum=True,
heater_bias=False,
fit_entropy_name: Optional[str] = None,
fit_entropy=False,
int_info_name: Optional[str] = None,
output_name: Optional[str] = None,
integrated_entropy=False,
sub_lin: bool = False,
sub_lin_width: Optional[Union[float, Callable]] = None,
int_info=False,
amplitude=False,
theta=False,
gamma=False,
) -> List[HoverInfo]:
"""
Returns a list of HoverInfos for the specified parameters. To do more complex things, append specific
HoverInfos before/after this.
Examples:
hover_infos = common_dat_hover_infos(datnum=True, amplitude=True, theta=True)
hover_group = HoverInfoGroup(hover_infos)
Args:
datnum ():
heater_bias ():
fit_entropy_name (): Name of saved fit_entropy if wanting fit_entropy
fit_entropy ():
int_info_name (): Name of int_info if wanting int_info or integrated_entropy
output_name (): Name of SE output to integrate (defaults to int_info_name)
integrated_entropy ():
sub_lin (): Whether to subtract linear term from integrated_info first
sub_lin_width (): Width of transition to avoid in determining linear terms
int_info (): amp/dT/sf from int_info
Returns:
List[HoverInfo]:
"""
hover_infos = []
if datnum:
hover_infos.append(HoverInfo(name='Dat', func=lambda dat: dat.datnum, precision='.d', units=''))
if heater_bias:
hover_infos.append(HoverInfo(name='Bias', func=lambda dat: dat.AWG.max(0) / 10, precision='.1f', units='nA'))
if fit_entropy:
hover_infos.append(HoverInfo(name='Fit Entropy',
func=lambda dat: dat.Entropy.get_fit(name=fit_entropy_name,
check_exists=True).best_values.dS,
precision='.2f', units='kB'), )
if integrated_entropy:
if output_name is None:
output_name = int_info_name
if sub_lin:
if sub_lin_width is None:
raise ValueError(f'Must specify sub_lin_width if subtrating linear term from integrated entropy')
elif not isinstance(sub_lin_width, Callable):
sub_lin_width = lambda _: sub_lin_width # make a value into a function so so that can assume function
data = lambda dat: dat_integrated_sub_lin(dat, signal_width=sub_lin_width(dat), int_info_name=int_info_name,
output_name=output_name)
hover_infos.append(HoverInfo(name='Sub lin width', func=sub_lin_width, precision='.1f', units='mV'))
else:
data = lambda dat: dat.Entropy.get_integrated_entropy(
name=int_info_name,
data=dat.SquareEntropy.get_Outputs(
name=output_name).average_entropy_signal)
hover_infos.append(HoverInfo(name='Integrated Entropy',
func=lambda dat: np.nanmean(data(dat)[-10:]),
precision='.2f', units='kB'))
if int_info:
info = lambda dat: dat.Entropy.get_integration_info(name=int_info_name)
hover_infos.append(HoverInfo(name='SF amp',
func=lambda dat: info(dat).amp,
precision='.3f',
units='nA'))
hover_infos.append(HoverInfo(name='SF dT',
func=lambda dat: info(dat).dT,
precision='.3f',
units='mV'))
hover_infos.append(HoverInfo(name='SF',
func=lambda dat: info(dat).sf,
precision='.3f',
units=''))
return hover_infos
| [
"dat_analysis.plotting.plotly.hover_info.HoverInfo"
] | [((2160, 2236), 'dat_analysis.plotting.plotly.hover_info.HoverInfo', 'HoverInfo', ([], {'name': '"""Dat"""', 'func': '(lambda dat: dat.datnum)', 'precision': '""".d"""', 'units': '""""""'}), "(name='Dat', func=lambda dat: dat.datnum, precision='.d', units='')\n", (2169, 2236), False, 'from dat_analysis.plotting.plotly.hover_info import HoverInfo\n'), ((3407, 3492), 'dat_analysis.plotting.plotly.hover_info.HoverInfo', 'HoverInfo', ([], {'name': '"""Sub lin width"""', 'func': 'sub_lin_width', 'precision': '""".1f"""', 'units': '"""mV"""'}), "(name='Sub lin width', func=sub_lin_width, precision='.1f', units='mV'\n )\n", (3416, 3492), False, 'from dat_analysis.plotting.plotly.hover_info import HoverInfo\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 23:54:16 2021
@author: rolandvarga
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.signal import savgol_filter
import pickle
#%matplotlib qt
#%matplotlib inline
# Set to 1 to repeat SARSA learning (With Intel Core i7-8750H it takes
# around 70 minutes), 0 for loading previous result
REPEAT_LEARNING = 0
# Parameter to set which tests to do
DO_TEST1 = 1 # Simulate the system once and plot the trajectory
DO_TEST2 = 0 # Simulate the system 1000 times and plot success-rate
# Set to 1 to plot a projection of the state-value function V
PLOT_STATEVALUE = 1
#%% Load previous result
if REPEAT_LEARNING == 0:
filename='train_6x6x20x60000.pickle'
with open(filename, 'rb') as f:
cell_nums, dhat, durations, Q, reward_set, rhat, start_time, end_time, states_high, max_steps = pickle.load(f)
#%% SARSA learning
env = gym.make('SphericalRobot-v0')
#Function to choose the next action
def choose_action(state, eps):
action=0
if np.random.uniform(0, 1) < eps:
# Select a random action
action = env.action_space.sample()
else:
# Choose greedy action
action = np.array(np.unravel_index(np.argmax(Q[state], axis=None), Q[state].shape))
# action = np.argmax(Q[state])
return action
#Convert continuous state-space to discrete
def discretize_state(observation_c, low, high, cell_nums):
# Initialize the discretized observation
observation_d = []
# Loop through and discretize all 3 states
for state,low_val,high_val,c_num in zip(observation_c,low,high,cell_nums):
# Define intervals for the possible values
bins = np.linspace(low_val,high_val,c_num+1,endpoint=True)
# Discretize with NumPy function
state_d = np.digitize(state, bins, right=True)
# Check if the discrete values are valid
assert state_d > 0 and state_d <= c_num
observation_d.append(state_d-1) # -1 to have values start at 0
return observation_d
if REPEAT_LEARNING == 1:
# Learning parameters
epsilon = 0.3 # For start
total_episodes = 100
max_steps = 300
alpha = 0.1
gamma = 0.99
# The discretization of the states
states_high = np.array([6,6,2*np.pi/env.c]) # Set boundaries for the values
cell_nums = np.array([6,6,20]) # Set the number of discrete cells
#Initializing the Q-matrix
Q = np.ones(np.append(cell_nums,[3,3]))
#Function to update the Q-value
def update(state, state2, reward, action, action2):
predict = Q[state][action]
target = reward + gamma * Q[state2][action2]
Q[state][action] = Q[state][action] + alpha * (target - predict)
#Initializing the reward
# reward=0
reward_set = []
durations = []
start_time = time.time()
# Starting the SARSA learning
for episode in range(total_episodes):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), epsilon)
states = [state1]
while t < max_steps:
# Visualizing the training, TODO
# env.render()
# Getting the next state
state2, reward, done, info = env.step(action1)
# Note: The 3rd state is the difference between the wheel angles
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
# Choosing the next action
action2 = choose_action(tuple(state2_d), epsilon)
# Updating the Q-value
update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
# Update variables for next iteration
state1 = state2
action1 = action2
# Save state to be able to plot trajectories
states.append(state2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
reward_set.append(cumm_reward)
durations.append(t)
# plt.figure(0)
# x = np.array(states)[:,0]
# y = np.array(states)[:,1]
# plt.scatter(x, y)
# plt.xlim(-5, 5)
# plt.ylim(-5, 5)
# plt.show()
# Print time it took to run the learning
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
# Plot the filtered rewards during the learning
plt.figure(1)
#plt.plot(reward_set)
rhat = savgol_filter(reward_set, 501, 3) # window size 501, polynomial order 3
plt.plot(rhat)
#plt.ylim(-500, 500)
plt.xlabel(r"Episode [-]")
plt.ylabel(r"Reward [-]")
plt.legend()
plt.savefig('reward_learning.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot the filtered episode lengths during the learning
plt.figure(2)
#plt.plot(durations)
dhat = savgol_filter(durations, 51, 3) # window size 51, polynomial order 3
plt.plot(dhat)
plt.show()
#%% Test 1: Generate one trajectory
if DO_TEST1 == 1:
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
states = [state1]
actions = [action1]
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
states.append(state2)
actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
print(reward)
# Plot trajectory on 2D plot
plt.figure(3)
x = np.array(states)[:,0]
y = np.array(states)[:,1]
plt.scatter(x, y)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xticks(np.arange(-5, 6, 1))
plt.yticks(np.arange(-5, 6, 1))
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel(r"$x_1$ [m]")
plt.ylabel(r"$x_2$ [m]")
plt.legend()
plt.savefig('trajectory.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot position states separately
plt.figure(4)
plt.plot(x, label="x1")
plt.plot(y, label="x2")
plt.xlabel(r"Time step [-]")
plt.ylabel(r"Coordinate [m]")
plt.legend()
plt.savefig('trajectory_plot.eps', format='eps', bbox_inches='tight')
plt.show()
#%% Test 2: Successful-unsuccessful tries
if DO_TEST2 == 1:
cumm_rewards = []
for k in range(1000):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
#states.append(state2)
#actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
cumm_rewards.append(cumm_reward)
print("Average reward out of 1000 try: " + str(np.average(np.array(cumm_rewards))))
plt.figure(5)
plt.hist(cumm_rewards,np.array([-1000,0,1000]))
plt.show()
#%% Additional plot: State-value function
if PLOT_STATEVALUE == 1:
V = np.zeros([cell_nums[0],cell_nums[1]])
for k in range(V.shape[0]):
for l in range(V.shape[1]):
V[k,l]=np.amax(Q[k,l,:])
plt.figure(6)
plt.imshow(V, cmap='coolwarm', interpolation='nearest')
plt.colorbar()
plt.savefig('state_value.eps', format='eps', bbox_inches='tight')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"scipy.signal.savgol_filter",
"numpy.array",
"gym.make",
"numpy.arange",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.... | [((973, 1002), 'gym.make', 'gym.make', (['"""SphericalRobot-v0"""'], {}), "('SphericalRobot-v0')\n", (981, 1002), False, 'import gym\n'), ((2376, 2411), 'numpy.array', 'np.array', (['[6, 6, 2 * np.pi / env.c]'], {}), '([6, 6, 2 * np.pi / env.c])\n', (2384, 2411), True, 'import numpy as np\n'), ((2458, 2478), 'numpy.array', 'np.array', (['[6, 6, 20]'], {}), '([6, 6, 20])\n', (2466, 2478), True, 'import numpy as np\n'), ((2965, 2976), 'time.time', 'time.time', ([], {}), '()\n', (2974, 2976), False, 'import time\n'), ((5005, 5016), 'time.time', 'time.time', ([], {}), '()\n', (5014, 5016), False, 'import time\n'), ((5136, 5149), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5146, 5149), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5220), 'scipy.signal.savgol_filter', 'savgol_filter', (['reward_set', '(501)', '(3)'], {}), '(reward_set, 501, 3)\n', (5200, 5220), False, 'from scipy.signal import savgol_filter\n'), ((5263, 5277), 'matplotlib.pyplot.plot', 'plt.plot', (['rhat'], {}), '(rhat)\n', (5271, 5277), True, 'import matplotlib.pyplot as plt\n'), ((5307, 5332), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode [-]"""'], {}), "('Episode [-]')\n", (5317, 5332), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward [-]"""'], {}), "('Reward [-]')\n", (5348, 5362), True, 'import matplotlib.pyplot as plt\n'), ((5368, 5380), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5378, 5380), True, 'import matplotlib.pyplot as plt\n'), ((5385, 5454), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""reward_learning.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('reward_learning.eps', format='eps', bbox_inches='tight')\n", (5396, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5467, 5469), True, 'import matplotlib.pyplot as plt\n'), ((5539, 5552), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5549, 5552), True, 'import matplotlib.pyplot as plt\n'), ((5589, 5620), 'scipy.signal.savgol_filter', 'savgol_filter', (['durations', '(51)', '(3)'], {}), '(durations, 51, 3)\n', (5602, 5620), False, 'from scipy.signal import savgol_filter\n'), ((5662, 5676), 'matplotlib.pyplot.plot', 'plt.plot', (['dhat'], {}), '(dhat)\n', (5670, 5676), True, 'import matplotlib.pyplot as plt\n'), ((5681, 5691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5689, 5691), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7127), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (7124, 7127), True, 'import matplotlib.pyplot as plt\n'), ((7192, 7209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (7203, 7209), True, 'import matplotlib.pyplot as plt\n'), ((7214, 7229), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (7222, 7229), True, 'import matplotlib.pyplot as plt\n'), ((7234, 7249), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (7242, 7249), True, 'import matplotlib.pyplot as plt\n'), ((7378, 7401), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$ [m]"""'], {}), "('$x_1$ [m]')\n", (7388, 7401), True, 'import matplotlib.pyplot as plt\n'), ((7407, 7430), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$ [m]"""'], {}), "('$x_2$ [m]')\n", (7417, 7430), True, 'import matplotlib.pyplot as plt\n'), ((7436, 7448), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7446, 7448), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7517), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trajectory.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('trajectory.eps', format='eps', bbox_inches='tight')\n", (7464, 7517), True, 'import matplotlib.pyplot as plt\n'), ((7522, 7532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7530, 7532), True, 'import matplotlib.pyplot as plt\n'), ((7580, 7593), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (7590, 7593), True, 'import matplotlib.pyplot as plt\n'), ((7598, 7621), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'label': '"""x1"""'}), "(x, label='x1')\n", (7606, 7621), True, 'import matplotlib.pyplot as plt\n'), ((7626, 7649), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'label': '"""x2"""'}), "(y, label='x2')\n", (7634, 7649), True, 'import matplotlib.pyplot as plt\n'), ((7654, 7681), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time step [-]"""'], {}), "('Time step [-]')\n", (7664, 7681), True, 'import matplotlib.pyplot as plt\n'), ((7687, 7715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coordinate [m]"""'], {}), "('Coordinate [m]')\n", (7697, 7715), True, 'import matplotlib.pyplot as plt\n'), ((7721, 7733), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7731, 7733), True, 'import matplotlib.pyplot as plt\n'), ((7738, 7807), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trajectory_plot.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('trajectory_plot.eps', format='eps', bbox_inches='tight')\n", (7749, 7807), True, 'import matplotlib.pyplot as plt\n'), ((7812, 7822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7820, 7822), True, 'import matplotlib.pyplot as plt\n'), ((9488, 9501), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (9498, 9501), True, 'import matplotlib.pyplot as plt\n'), ((9558, 9568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9566, 9568), True, 'import matplotlib.pyplot as plt\n'), ((9645, 9683), 'numpy.zeros', 'np.zeros', (['[cell_nums[0], cell_nums[1]]'], {}), '([cell_nums[0], cell_nums[1]])\n', (9653, 9683), True, 'import numpy as np\n'), ((9802, 9815), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (9812, 9815), True, 'import matplotlib.pyplot as plt\n'), ((9820, 9875), 'matplotlib.pyplot.imshow', 'plt.imshow', (['V'], {'cmap': '"""coolwarm"""', 'interpolation': '"""nearest"""'}), "(V, cmap='coolwarm', interpolation='nearest')\n", (9830, 9875), True, 'import matplotlib.pyplot as plt\n'), ((9880, 9894), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9892, 9894), True, 'import matplotlib.pyplot as plt\n'), ((9899, 9964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""state_value.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('state_value.eps', format='eps', bbox_inches='tight')\n", (9910, 9964), True, 'import matplotlib.pyplot as plt\n'), ((9969, 9979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9977, 9979), True, 'import matplotlib.pyplot as plt\n'), ((926, 940), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (937, 940), False, 'import pickle\n'), ((1091, 1114), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1108, 1114), True, 'import numpy as np\n'), ((1762, 1818), 'numpy.linspace', 'np.linspace', (['low_val', 'high_val', '(c_num + 1)'], {'endpoint': '(True)'}), '(low_val, high_val, c_num + 1, endpoint=True)\n', (1773, 1818), True, 'import numpy as np\n'), ((1882, 1918), 'numpy.digitize', 'np.digitize', (['state', 'bins'], {'right': '(True)'}), '(state, bins, right=True)\n', (1893, 1918), True, 'import numpy as np\n'), ((2569, 2597), 'numpy.append', 'np.append', (['cell_nums', '[3, 3]'], {}), '(cell_nums, [3, 3])\n', (2578, 2597), True, 'import numpy as np\n'), ((7136, 7152), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (7144, 7152), True, 'import numpy as np\n'), ((7166, 7182), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (7174, 7182), True, 'import numpy as np\n'), ((7265, 7284), 'numpy.arange', 'np.arange', (['(-5)', '(6)', '(1)'], {}), '(-5, 6, 1)\n', (7274, 7284), True, 'import numpy as np\n'), ((7301, 7320), 'numpy.arange', 'np.arange', (['(-5)', '(6)', '(1)'], {}), '(-5, 6, 1)\n', (7310, 7320), True, 'import numpy as np\n'), ((9528, 9554), 'numpy.array', 'np.array', (['[-1000, 0, 1000]'], {}), '([-1000, 0, 1000])\n', (9536, 9554), True, 'import numpy as np\n'), ((6218, 6273), 'numpy.array', 'np.array', (['[state1[0], state1[1], state1[2] - state1[3]]'], {}), '([state1[0], state1[1], state1[2] - state1[3]])\n', (6226, 6273), True, 'import numpy as np\n'), ((6383, 6438), 'numpy.array', 'np.array', (['[state2[0], state2[1], state2[2] - state2[3]]'], {}), '([state2[0], state2[1], state2[2] - state2[3]])\n', (6391, 6438), True, 'import numpy as np\n'), ((7326, 7335), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7333, 7335), True, 'import matplotlib.pyplot as plt\n'), ((9775, 9794), 'numpy.amax', 'np.amax', (['Q[k, l, :]'], {}), '(Q[k, l, :])\n', (9782, 9794), True, 'import numpy as np\n'), ((1282, 1312), 'numpy.argmax', 'np.argmax', (['Q[state]'], {'axis': 'None'}), '(Q[state], axis=None)\n', (1291, 1312), True, 'import numpy as np\n'), ((3658, 3713), 'numpy.array', 'np.array', (['[state1[0], state1[1], state1[2] - state1[3]]'], {}), '([state1[0], state1[1], state1[2] - state1[3]])\n', (3666, 3713), True, 'import numpy as np\n'), ((3830, 3885), 'numpy.array', 'np.array', (['[state2[0], state2[1], state2[2] - state2[3]]'], {}), '([state2[0], state2[1], state2[2] - state2[3]])\n', (3838, 3885), True, 'import numpy as np\n'), ((8403, 8458), 'numpy.array', 'np.array', (['[state1[0], state1[1], state1[2] - state1[3]]'], {}), '([state1[0], state1[1], state1[2] - state1[3]])\n', (8411, 8458), True, 'import numpy as np\n'), ((8576, 8631), 'numpy.array', 'np.array', (['[state2[0], state2[1], state2[2] - state2[3]]'], {}), '([state2[0], state2[1], state2[2] - state2[3]])\n', (8584, 8631), True, 'import numpy as np\n'), ((9453, 9475), 'numpy.array', 'np.array', (['cumm_rewards'], {}), '(cumm_rewards)\n', (9461, 9475), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:33:47 2019
@author: Bogoclu
"""
import typing
import multiprocessing as mp
import warnings
import numpy as np
from scipy import stats
from .space import FullSpace
from duqo.proba import DS, MC, SUSE, ISPUD, FORM
from duqo.doe.lhs import make_doe
def _check_obj_wgt(obj_weights, num_obj):
""" Check obj_wgt argument passed to CondMom """
if obj_weights is None:
return None
try:
_ = obj_weights[0]
except (TypeError, IndexError):
obj_weights = np.ones(num_obj) * obj_weights
if len(obj_weights) != num_obj:
msg = f"Mismatch between the number of entries ({len(obj_weights)} in "
msg += f"obj_wgt and the number of stochastic objectives ({num_obj})."
raise ValueError(msg)
return np.array(obj_weights).ravel()
def _check_std_inds(use_std, num_obj):
""" Check use_std argument passed to CondMom and
convert it to a slice definition
"""
if isinstance(use_std, bool):
inds = [use_std] * num_obj
if len(inds) != num_obj:
msg = "Mismatch between the number of entries in "
msg += "use_std and the number of stochastic objectives."
raise ValueError(msg)
return np.array(use_std, dtype=bool)
def _find_integrator_cls(integrator):
"""
Find the Integrator class as defined by the string integrator
"""
integrator = integrator.upper()
if integrator == "DS":
IntCls = DS
elif integrator == "MC":
IntCls = MC
elif integrator == "ISPUD":
IntCls = ISPUD
elif integrator == "FORM":
IntCls = FORM
elif integrator == "SUSE":
IntCls = SUSE
else:
msg = f"Requested integrator {integrator} is not found."
raise ValueError(msg)
return IntCls
def _make_chain(methods: list):
"""Makes the chain given a list of method names"""
try:
first = methods[0]
except TypeError:
raise TypeError(f"methods must be a list of strings or classes, not {type(methods)}")
try:
_ = first.upper()
except AttributeError:
return methods
return [_find_integrator_cls(name.upper()) for name in methods]
def _n_para_chk(num_parallel: int = None):
""" Check the num_parallel argument as passed to CondProb """
n_procs = max(1, mp.cpu_count()) # could cpu_count ever be < 1?
if num_parallel is None or num_parallel > n_procs:
print(f"Number of parallel processes was set to {n_procs}")
return n_procs
return num_parallel
def _default_init(targ_prob: float, acc_max: float, num_inp: int,
num_para: int):
"""Decide the default integrator chain methods and arguments depending
on the problem
Parameters
----------
targ_prob : float
target failure probability
acc_max : float
target tolerance for the estimation
num_inp : int
number of stochastic inputs of the constraints
num_para : int
number of parallel processes to use
Returns
-------
integrators : list
Integrator classes, that are to be initiated
int_args : dict
Keyword arguments to pass to integrators
"""
if targ_prob * acc_max >= 1e-5:
if targ_prob * acc_max >= 1e-4:
integrators = ["MC"]
else:
integrators = ["SUSE", "MC"]
int_args = {"num_starts": 1, "batch_size": 1e5}
elif num_inp < 15:
integrators = ["SUSE", "DS"]
int_args = {"num_starts": 1}
else:
integrators = ["SUSE"]
int_args = {"num_starts": num_para}
print("Using", integrators, "as default chain.")
return integrators, int_args
def _is_worker(workers, name):
""" check if name is in workers list of classes"""
for worker in workers:
wname = read_integrator_name(worker)
if name.upper() in wname.upper():
return True
return False
def read_integrator_name(worker):
""" read the name of the integrator instance worker """
name = str(worker).split(".")[-1]
return "".join([c for c in name if c.isalnum()])
class CondMom:
"""Class to estimate conditional means
full_space : FullSpace instance
The definition of the optimization and stochastic spaces
base_doe : int or np.ndarray
set if a new doe should be calculated or the same one should
be transformed during the optimization.
if array, it should have zero mean and unit variance
but the original marginal distributions and correlation.
it should have same number of columns as stochastic variables
used in the objective. If integer, a base_doe with that number of
samples will be created
doe_size : int
The size of the doe to use. If base_doe is a numpy array, this
has no effect and doesn't have to be passed.
obj_wgt : float or iterable of floats:
If not None, these weights will be used for combining the
estimated mean and the variance/std. dev. If iterable, it
must be the same length as the number of stochastic input
variables as used for the objective function.
If None, the variances are returned separetly
use_std : bool or iterable of bools
Flag to use standard deviation (True) or the variance for the
estimation. If iterable, it must be the same length as the number
of stochastic input variables as used for the objective function.
"""
def __init__(self, full_space: FullSpace, base_doe: typing.Union[bool, np.ndarray] = True,
doe_size: int = 100, obj_wgt: typing.Optional[typing.Union[float, list, np.ndarray]] = None,
use_std: typing.Union[bool, list] = False):
self.full_space = full_space
num_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(use_std, num_obj)
self._obj_wgt = _check_obj_wgt(obj_wgt, num_obj)
self._doe_size = None
self._base_doe = None
self.doe_size = doe_size
self.base_doe = base_doe
@property
def base_doe(self):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
return self._base_doe
@base_doe.setter
def base_doe(self, new_doe):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
# Sanity checks for base_doe. Using parameters with multiple valid types
# may be an antipattern but it makes configuration easier from
# the user point of view. Tolerate this for a better user experience.
if isinstance(new_doe, np.ndarray):
if self._is_valid_base(new_doe): # raises errors
self._base_doe = new_doe.copy() # Make our copy.
return
try:
make_base_doe = bool(new_doe)
except ValueError:
return
if make_base_doe:
# Prepare doe with zero mean and unit variance
doe = self.full_space.inp_space.sto_obj_base_doe(self.doe_size)
self._base_doe = doe
return
# if not bool(new_doe); remake new doe so set base_doe to None
self._base_doe = None
return
def _is_valid_base(self, new_doe):
# Assume numpy array
n_sto_obj_inps = len(self.full_space.inp_space.inds["sto_obj"])
if new_doe.shape[1] != n_sto_obj_inps:
msg = "base_doe must be one of None, bool or a 2d array "
msg += f"with shape (num_samples, num_stochastic_objective_variables={n_sto_obj_inps})."
raise TypeError(msg)
if max(abs(new_doe.mean(0).max()), abs(1 - new_doe.std(0).max())) > 0.5:
msg = "base_doe must have zero mean and unit variance."
raise ValueError(msg)
return True
@property
def doe_size(self):
"""Size of the base doe to use for the moment estimation"""
return self._doe_size
@doe_size.setter
def doe_size(self, new_size):
"""Size of the base doe to use for the moment estimation"""
self._doe_size = new_size
if self.base_doe is not None:
self.base_doe = new_size
@property
def obj_wgt(self):
"""Weights for the linear combination of cond. moments"""
return self._obj_wgt
@obj_wgt.setter
def obj_wgt(self, new_obj_wgt):
"""Weights for the linear combination of cond. moments"""
n_obj = len(self.full_space.obj_inds["sto"])
self._obj_wgt = _check_obj_wgt(new_obj_wgt, n_obj)
@property
def use_std(self):
"""Indexes to use std. dev. instead of variance"""
return self._use_std
@use_std.setter
def use_std(self, new_std):
"""Indexes to use std. dev. instead of variance"""
n_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(new_std, n_obj)
def gen_doe(self, x_opt):
"""Get DoE for the Moment estimation for x_opt"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
if self.base_doe is None:
return self.full_space.inp_space.sto_obj_doe(x_opt, self._doe_size)
mean, std = self.full_space.inp_space.opt_moms(x_opt)
names = self.full_space.inp_space.mulvar.names
names = [names[i] for i in self.full_space.inp_space.mv_inds("sto_obj")]
# Translating is not sufficient for lognormal and truncated normal
inds = [i for i, x in enumerate(names) if "log" in x or "trunc" in x]
if not inds:
return self.base_doe * std + mean
# Handle Lognormal
binds = np.ones(self.base_doe.shape[1], dtype=bool)
binds[inds] = False
base_doe = self.base_doe.copy()
base_doe[:, binds] = base_doe[:, binds] * std[binds] + mean[binds]
mean = mean[inds]
std = std[inds]
cur_mv = self.full_space.inp_space.opt_mulvar(x_opt, domain="sto_obj")
for ind in inds:
base_doe[:, ind] = cur_mv.dists[ind].marg.ppf(base_doe[:, ind])
return base_doe
def est_mom(self, x_opt):
""" Estimate conditional moments for a single optimization point x_opt
Conditional moments are E[Y | x_opt] and Var[Y | x_opt]
Parameters
----------
x_opt : numpy.ndarray
the coordinates of the optimization variables to compute
the moments
Returns
-------
mus : numpy.ndarray
Estimated means, or if obj_wgt was not None,
the combined mu + obj_wgt * sigma
sigmas : numpy.ndarray
Estimated variances or std. dev. depending on the settings.
only returned if obj_wgt is None.
"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
doe = self.gen_doe(x_opt)
res = self.full_space.sto_obj(doe, x_opt)
mus = np.mean(res, axis=0)
sigmas = np.zeros(mus.shape)
std_inds = self.use_std
sigmas[std_inds] = np.std(res[:, std_inds], axis=0, ddof=1)
var_inds = np.logical_not(std_inds)
sigmas[var_inds] = np.var(res[:, var_inds], axis=0, ddof=1)
if self.obj_wgt is None:
return mus, sigmas
return mus + self.obj_wgt * sigmas
class CondProba:
"""A chain of integtrators for the calculation of the probability
This starts with a fast integrator to get an initial guess. If the
guess is too far away from target_pf, this stops further calculations
and returns the failure probability. Used for accelerating the
optimization process. Chains with a single element are also possible.
Parameters
----------
num_inputs : int
Number of stochastic inputs used for the constraints
target_fail_prob : float
Target failure probability. If unsure, just set it sufficiently low
i.e. >=1e-6. Note that Numerical unstabilities start at 1e-9 due to
scipy stats returning nans and infs
num_parallel : int
Number of parallel computations, if the used integrator supports it.
If passed, the entry in call_args will override this.
methods : None or list of str
Names of the methods to use for the estimation. If None, a default
chain will be selected depending the problem definition, which is
recommended for new users.
Currently the following names are supported:
MC - Crude Monte Carlo
DS - Directional simulation
FORM - First order reliability method
ISPUD - Importance sampling using design point (MPP)
call_args : None or list
keyword argument dict to pass to the integrator calc_prob_fail
as call arguments. Any argument in this will override the
initialization arguments with the same name i.e. target_fp and
num_parallel
target_tol : float
Target tolerance for the failure probability. Also used
for stopping the chain, if the computed failure probability
is either smaller than target_fp * target_tol or larger than
target_fp / target_tol.
"""
def __init__(self, target_fail_prob: float, num_inputs: int, num_parallel: int = 4,
methods: typing.Optional[typing.Union[str, list]] = None, call_args: typing.Optional[dict] = None,
target_tol: float = 0.01):
self.n_inp = num_inputs
num_para = _n_para_chk(num_parallel)
cargs = {"num_parallel": num_para, "multi_region": True}
if methods is None:
methods, cargs = _default_init(target_fail_prob, target_tol,
num_inputs, num_para)
if call_args is None:
self.call_args = {**cargs}
else:
self.call_args = {**cargs, **call_args}
self._tar_fp = target_fail_prob
self._tar_tol = target_tol
self.workers = _make_chain(methods)
self._prob_tol()
if "doe" in self.call_args.keys():
doe = self.call_args["doe"]
if doe.shape[1] != self.n_inp:
msg = f"Shape mismatch between the number of inputs ({self.n_inp}) "
msg += f"and the DoE {doe.shape[1]}"
raise ValueError()
mu_max = np.max(np.mean(doe, axis=0))
sig_max = np.max(np.std(doe, axis=0))
if abs(mu_max) > 1e-10 or abs(sig_max - 1) > 1e-10:
msg = "Zero mean and unit variance is required for doe "
msg += "in call_args, found mean == {mu_max} and "
msg += "sigma == {sig_max} columns"
raise ValueError(msg)
elif _is_worker(self.workers, "ISPUD"):
margs = [stats.norm() for k in range(self.n_inp)]
self.call_args["doe"] = make_doe(100, margs, num_tries=1000)
self.call_args["post_proc"] = False
self.call_args["num_parallel"] = num_para
@property
def target_fail_prob(self):
"""target failure probability"""
return self._tar_fp
@target_fail_prob.setter
def target_fail_prob(self, new_fp):
"""target failure probability"""
if new_fp <= 0 or new_fp > 0.9:
msg = "Target failure probability should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_fp = new_fp
self._prob_tol()
@property
def target_tol(self):
"""Target accuracy for failure probability estimation"""
return self._tar_tol
@target_tol.setter
def target_tol(self, new_tol):
"""Target accuracy for failure probability estimation"""
if new_tol <= 0 or new_tol > 0.9:
msg = "Target probability accuracy should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_tol = new_tol
self._prob_tol()
def _prob_tol(self):
prob_tol = self._tar_fp * self._tar_tol
if _is_worker(self.workers, "MC") and prob_tol < 1e-6:
msg = "Crude Monte Carlo can be very inefficient for "
msg += "such low probabilities of failure."
warnings.warn(msg)
self.call_args["prob_tol"] = prob_tol
def calc_fail_prob(self, input_mv, constraints, const_args, verbose: int = 0):
""" Calculate failure probability using the worker chain
Parameters
----------
input_mv : MultiVar instance
Definition of the multivariate input
constraints : list
constraint functions to initialize the integrator
const_args : None or list
arguments to pass to the constraints
Returns:
--------
pof : float
probability of failure
feasible : bool
pof <= target_pf
"""
if not self.workers:
raise ValueError("No estimators defined")
for worker in self.workers:
estimator = worker(input_mv, constraints, const_args)
try:
pof = estimator.calc_fail_prob(**self.call_args)[0]
except ValueError:
if worker == self.workers[-1]:
print("Fatal error while calculating probability of failure with", worker)
print(input_mv)
print("Setting it to 100%.")
pof = 1.
continue
if verbose > 1:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
if pof > self._tar_fp:
prob_ratio = self._tar_fp / pof
else:
prob_ratio = pof / self._tar_fp
if prob_ratio <= self._tar_tol:
break
if verbose > 0:
try:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
except NameError:
pass
return pof, pof <= self._tar_fp
| [
"numpy.mean",
"numpy.ones",
"duqo.doe.lhs.make_doe",
"scipy.stats.norm",
"numpy.logical_not",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.zeros",
"numpy.std",
"warnings.warn",
"numpy.var"
] | [((1245, 1274), 'numpy.array', 'np.array', (['use_std'], {'dtype': 'bool'}), '(use_std, dtype=bool)\n', (1253, 1274), True, 'import numpy as np\n'), ((2344, 2358), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2356, 2358), True, 'import multiprocessing as mp\n'), ((9965, 10008), 'numpy.ones', 'np.ones', (['self.base_doe.shape[1]'], {'dtype': 'bool'}), '(self.base_doe.shape[1], dtype=bool)\n', (9972, 10008), True, 'import numpy as np\n'), ((11293, 11313), 'numpy.mean', 'np.mean', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (11300, 11313), True, 'import numpy as np\n'), ((11331, 11350), 'numpy.zeros', 'np.zeros', (['mus.shape'], {}), '(mus.shape)\n', (11339, 11350), True, 'import numpy as np\n'), ((11410, 11450), 'numpy.std', 'np.std', (['res[:, std_inds]'], {'axis': '(0)', 'ddof': '(1)'}), '(res[:, std_inds], axis=0, ddof=1)\n', (11416, 11450), True, 'import numpy as np\n'), ((11470, 11494), 'numpy.logical_not', 'np.logical_not', (['std_inds'], {}), '(std_inds)\n', (11484, 11494), True, 'import numpy as np\n'), ((11522, 11562), 'numpy.var', 'np.var', (['res[:, var_inds]'], {'axis': '(0)', 'ddof': '(1)'}), '(res[:, var_inds], axis=0, ddof=1)\n', (11528, 11562), True, 'import numpy as np\n'), ((808, 829), 'numpy.array', 'np.array', (['obj_weights'], {}), '(obj_weights)\n', (816, 829), True, 'import numpy as np\n'), ((16529, 16547), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (16542, 16547), False, 'import warnings\n'), ((541, 557), 'numpy.ones', 'np.ones', (['num_obj'], {}), '(num_obj)\n', (548, 557), True, 'import numpy as np\n'), ((14701, 14721), 'numpy.mean', 'np.mean', (['doe'], {'axis': '(0)'}), '(doe, axis=0)\n', (14708, 14721), True, 'import numpy as np\n'), ((14752, 14771), 'numpy.std', 'np.std', (['doe'], {'axis': '(0)'}), '(doe, axis=0)\n', (14758, 14771), True, 'import numpy as np\n'), ((15213, 15249), 'duqo.doe.lhs.make_doe', 'make_doe', (['(100)', 'margs'], {'num_tries': '(1000)'}), '(100, margs, num_tries=1000)\n', (15221, 15249), False, 'from duqo.doe.lhs import make_doe\n'), ((15136, 15148), 'scipy.stats.norm', 'stats.norm', ([], {}), '()\n', (15146, 15148), False, 'from scipy import stats\n')] |
import tensorflow as tf
import numpy as np
from dps.register import RegisterBank
from dps.env import TensorFlowEnv
from dps.utils import Param, Config
def build_env():
return PathDiscovery()
config = Config(
build_env=build_env,
curriculum=[
dict(shape=(2, 2), threshold=6),
dict(shape=(3, 3), threshold=4),
dict(shape=(4, 4), threshold=2)
],
env_name='path_discovery',
shape=(3, 3),
T=10,
stopping_criteria="reward_per_ep,max",
)
class PathDiscovery(TensorFlowEnv):
""" The top-left cell stored an integers which says which of the other 3 corners is the rewarding corner.
Agents use the "look" to see which integer is present at the current cell.
"""
T = Param()
shape = Param()
n_val = Param()
require_discovery = Param(True)
def __init__(self, **kwargs):
self.action_names = '^ > v < look'.split()
self.action_shape = (len(self.action_names),)
self.rb = RegisterBank('PathDiscoveryRB', 'x y vision action', 'discovered',
[0.0, 0.0, -1.0, 0.0, 0.0], 'x y')
self.val_input = self._make_input(self.n_val)
self.test_input = self._make_input(self.n_val)
super(PathDiscovery, self).__init__()
def _make_input(self, batch_size):
start_x = np.random.randint(self.shape[0], size=(batch_size, 1))
start_y = np.random.randint(self.shape[1], size=(batch_size, 1))
grid = np.random.randint(3, size=(batch_size, np.product(self.shape)))
return np.concatenate([start_x, start_y, grid], axis=1).astype('f')
def _build_placeholders(self):
self.input = tf.placeholder(tf.float32, (None, 2+np.product(self.shape)))
def _make_feed_dict(self, n_rollouts, T, mode):
if mode == 'train':
inp = self._make_input(n_rollouts)
elif mode == 'val':
inp = self.val_input
elif mode == 'test':
inp = self.test_input
else:
raise Exception("Unknown mode: {}.".format(mode))
if n_rollouts is not None:
inp = inp[:n_rollouts, :]
return {self.input: inp}
def build_init(self, r):
return self.rb.wrap(x=self.input[:, 0:1], y=self.input[:, 1:2],
vision=r[:, 2:3], action=r[:, 3:4], discovered=r[:, 4:5])
def build_step(self, t, r, actions):
x, y, vision, action, discovered = self.rb.as_tuple(r)
up, right, down, left, look = tf.split(actions, 5, axis=1)
new_y = (1 - down - up) * y + down * (y+1) + up * (y-1)
new_x = (1 - right - left) * x + right * (x+1) + left * (x-1)
new_y = tf.clip_by_value(new_y, 0.0, self.shape[0]-1)
new_x = tf.clip_by_value(new_x, 0.0, self.shape[1]-1)
idx = tf.cast(y * self.shape[1] + x, tf.int32)
new_vision = tf.reduce_sum(
tf.one_hot(tf.reshape(idx, (-1,)), np.product(self.shape)) * self.input[:, 2:],
axis=1, keepdims=True)
vision = (1 - look) * vision + look * new_vision
action = tf.cast(tf.reshape(tf.argmax(actions, axis=1), (-1, 1)), tf.float32)
top_left = tf.cast(tf.equal(idx, 0), tf.float32)
discovered = discovered + look * top_left
discovered = tf.minimum(discovered, 1.0)
new_registers = self.rb.wrap(x=new_x, y=new_y, vision=vision, action=action, discovered=discovered)
top_right = tf.cast(tf.equal(idx, self.shape[1]-1), tf.float32)
bottom_left = tf.cast(tf.equal(idx, (self.shape[0]-1) * self.shape[1]), tf.float32)
bottom_right = tf.cast(tf.equal(idx, self.shape[0] * self.shape[1] - 1), tf.float32)
reward = (
top_right * tf.cast(tf.equal(self.input[:, 2:3], 0), tf.float32) +
bottom_left * tf.cast(tf.equal(self.input[:, 2:3], 1), tf.float32) +
bottom_right * tf.cast(tf.equal(self.input[:, 2:3], 2), tf.float32)
)
if self.require_discovery:
reward = reward * discovered
return tf.fill((tf.shape(r)[0], 1), 0.0), reward, new_registers
| [
"numpy.product",
"tensorflow.equal",
"tensorflow.shape",
"dps.register.RegisterBank",
"tensorflow.split",
"dps.utils.Param",
"numpy.random.randint",
"tensorflow.argmax",
"tensorflow.clip_by_value",
"numpy.concatenate",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.minimum"
] | [((740, 747), 'dps.utils.Param', 'Param', ([], {}), '()\n', (745, 747), False, 'from dps.utils import Param, Config\n'), ((760, 767), 'dps.utils.Param', 'Param', ([], {}), '()\n', (765, 767), False, 'from dps.utils import Param, Config\n'), ((780, 787), 'dps.utils.Param', 'Param', ([], {}), '()\n', (785, 787), False, 'from dps.utils import Param, Config\n'), ((812, 823), 'dps.utils.Param', 'Param', (['(True)'], {}), '(True)\n', (817, 823), False, 'from dps.utils import Param, Config\n'), ((982, 1088), 'dps.register.RegisterBank', 'RegisterBank', (['"""PathDiscoveryRB"""', '"""x y vision action"""', '"""discovered"""', '[0.0, 0.0, -1.0, 0.0, 0.0]', '"""x y"""'], {}), "('PathDiscoveryRB', 'x y vision action', 'discovered', [0.0, \n 0.0, -1.0, 0.0, 0.0], 'x y')\n", (994, 1088), False, 'from dps.register import RegisterBank\n'), ((1329, 1383), 'numpy.random.randint', 'np.random.randint', (['self.shape[0]'], {'size': '(batch_size, 1)'}), '(self.shape[0], size=(batch_size, 1))\n', (1346, 1383), True, 'import numpy as np\n'), ((1402, 1456), 'numpy.random.randint', 'np.random.randint', (['self.shape[1]'], {'size': '(batch_size, 1)'}), '(self.shape[1], size=(batch_size, 1))\n', (1419, 1456), True, 'import numpy as np\n'), ((2497, 2525), 'tensorflow.split', 'tf.split', (['actions', '(5)'], {'axis': '(1)'}), '(actions, 5, axis=1)\n', (2505, 2525), True, 'import tensorflow as tf\n'), ((2678, 2725), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_y', '(0.0)', '(self.shape[0] - 1)'], {}), '(new_y, 0.0, self.shape[0] - 1)\n', (2694, 2725), True, 'import tensorflow as tf\n'), ((2740, 2787), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_x', '(0.0)', '(self.shape[1] - 1)'], {}), '(new_x, 0.0, self.shape[1] - 1)\n', (2756, 2787), True, 'import tensorflow as tf\n'), ((2801, 2841), 'tensorflow.cast', 'tf.cast', (['(y * self.shape[1] + x)', 'tf.int32'], {}), '(y * self.shape[1] + x, tf.int32)\n', (2808, 2841), True, 'import tensorflow as tf\n'), ((3278, 3305), 'tensorflow.minimum', 'tf.minimum', (['discovered', '(1.0)'], {}), '(discovered, 1.0)\n', (3288, 3305), True, 'import tensorflow as tf\n'), ((3176, 3192), 'tensorflow.equal', 'tf.equal', (['idx', '(0)'], {}), '(idx, 0)\n', (3184, 3192), True, 'import tensorflow as tf\n'), ((3444, 3476), 'tensorflow.equal', 'tf.equal', (['idx', '(self.shape[1] - 1)'], {}), '(idx, self.shape[1] - 1)\n', (3452, 3476), True, 'import tensorflow as tf\n'), ((3518, 3568), 'tensorflow.equal', 'tf.equal', (['idx', '((self.shape[0] - 1) * self.shape[1])'], {}), '(idx, (self.shape[0] - 1) * self.shape[1])\n', (3526, 3568), True, 'import tensorflow as tf\n'), ((3611, 3659), 'tensorflow.equal', 'tf.equal', (['idx', '(self.shape[0] * self.shape[1] - 1)'], {}), '(idx, self.shape[0] * self.shape[1] - 1)\n', (3619, 3659), True, 'import tensorflow as tf\n'), ((1551, 1599), 'numpy.concatenate', 'np.concatenate', (['[start_x, start_y, grid]'], {'axis': '(1)'}), '([start_x, start_y, grid], axis=1)\n', (1565, 1599), True, 'import numpy as np\n'), ((3098, 3124), 'tensorflow.argmax', 'tf.argmax', (['actions'], {'axis': '(1)'}), '(actions, axis=1)\n', (3107, 3124), True, 'import tensorflow as tf\n'), ((1511, 1533), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (1521, 1533), True, 'import numpy as np\n'), ((1705, 1727), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (1715, 1727), True, 'import numpy as np\n'), ((2901, 2923), 'tensorflow.reshape', 'tf.reshape', (['idx', '(-1,)'], {}), '(idx, (-1,))\n', (2911, 2923), True, 'import tensorflow as tf\n'), ((2925, 2947), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (2935, 2947), True, 'import numpy as np\n'), ((3888, 3919), 'tensorflow.equal', 'tf.equal', (['self.input[:, 2:3]', '(2)'], {}), '(self.input[:, 2:3], 2)\n', (3896, 3919), True, 'import tensorflow as tf\n'), ((3725, 3756), 'tensorflow.equal', 'tf.equal', (['self.input[:, 2:3]', '(0)'], {}), '(self.input[:, 2:3], 0)\n', (3733, 3756), True, 'import tensorflow as tf\n'), ((3806, 3837), 'tensorflow.equal', 'tf.equal', (['self.input[:, 2:3]', '(1)'], {}), '(self.input[:, 2:3], 1)\n', (3814, 3837), True, 'import tensorflow as tf\n'), ((4045, 4056), 'tensorflow.shape', 'tf.shape', (['r'], {}), '(r)\n', (4053, 4056), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
import requests
class Query:
'''Implements queries to the Github API using GraphQL
'''
def __init__(self, token, max_page_size=100, min_page_size=5):
self._token = token
self._max_page_size = max_page_size
self._min_page_size = min_page_size
self.api_costs = {}
_MEMBERS = '''
organization(login: "{organization}") {{
team(slug: "{team}") {{
members(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
login
}}
}}
}}
}}
'''
def get_members(self, organization, team):
'''Get all team members for organization
Returns:
logins: a list of members' logins
'''
logins = []
not_end = True
query = Query._MEMBERS.format(organization=organization,
team=team,
max_page_size=self._max_page_size,
next='')
while not_end:
result = self._run(query)['organization']['team']
if result is None:
break
result = result['members']
not_end = result['pageInfo']['hasNextPage']
query = Query._MEMBERS.format(organization=organization,
team=team,
max_page_size=self._max_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
logins += [node['login'] for node in result['nodes']]
return logins
_LABELS = '''
repository(owner: "yandex" name: "ClickHouse") {{
pullRequest(number: {number}) {{
labels(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
name
color
}}
}}
}}
}}
'''
def get_labels(self, pull_request):
'''Fetchs all labels for given pull-request
Args:
pull_request: JSON object returned by `get_pull_requests()`
Returns:
labels: a list of JSON nodes with the name and color fields
'''
labels = [label for label in pull_request['labels']['nodes']]
not_end = pull_request['labels']['pageInfo']['hasNextPage']
query = Query._LABELS.format(number = pull_request['number'],
max_page_size = self._max_page_size,
next=f'after: "{pull_request["labels"]["pageInfo"]["endCursor"]}"')
while not_end:
result = self._run(query)['repository']['pullRequest']['labels']
not_end = result['pageInfo']['hasNextPage']
query = Query._LABELS.format(number=pull_request['number'],
max_page_size=self._max_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
labels += [label for label in result['nodes']]
return labels
_TIMELINE = '''
repository(owner: "yandex" name: "ClickHouse") {{
pullRequest(number: {number}) {{
timeline(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
... on CrossReferencedEvent {{
isCrossRepository
source {{
... on PullRequest {{
number
baseRefName
merged
labels(first: {max_page_size}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
name
color
}}
}}
}}
}}
target {{
... on PullRequest {{
number
}}
}}
}}
}}
}}
}}
}}
'''
def get_timeline(self, pull_request):
'''Fetchs all cross-reference events from pull-request's timeline
Args:
pull_request: JSON object returned by `get_pull_requests()`
Returns:
events: a list of JSON nodes for CrossReferenceEvent
'''
events = [event for event in pull_request['timeline']['nodes'] if event and event['source']]
not_end = pull_request['timeline']['pageInfo']['hasNextPage']
query = Query._TIMELINE.format(number = pull_request['number'],
max_page_size = self._max_page_size,
next=f'after: "{pull_request["timeline"]["pageInfo"]["endCursor"]}"')
while not_end:
result = self._run(query)['repository']['pullRequest']['timeline']
not_end = result['pageInfo']['hasNextPage']
query = Query._TIMELINE.format(number=pull_request['number'],
max_page_size=self._max_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
events += [event for event in result['nodes'] if event and event['source']]
return events
_PULL_REQUESTS = '''
repository(owner: "yandex" name: "ClickHouse") {{
defaultBranchRef {{
name
target {{
... on Commit {{
history(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
oid
associatedPullRequests(first: {min_page_size}) {{
totalCount
nodes {{
... on PullRequest {{
number
author {{
login
}}
mergedBy {{
login
}}
url
baseRefName
baseRepository {{
nameWithOwner
}}
mergeCommit {{
oid
}}
labels(first: {min_page_size}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
name
color
}}
}}
timeline(first: {min_page_size}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
... on CrossReferencedEvent {{
isCrossRepository
source {{
... on PullRequest {{
number
baseRefName
merged
labels(first: 0) {{
nodes {{
name
}}
}}
}}
}}
target {{
... on PullRequest {{
number
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
'''
def get_pull_requests(self, before_commit, login):
'''Get all merged pull-requests from the HEAD of default branch to the last commit (excluding)
Args:
before_commit (string-convertable): commit sha of the last commit (excluding)
login (string): filter pull-requests by user login
Returns:
pull_requests: a list of JSON nodes with pull-requests' details
'''
pull_requests = []
not_end = True
query = Query._PULL_REQUESTS.format(max_page_size=self._max_page_size,
min_page_size=self._min_page_size,
next='')
while not_end:
result = self._run(query)['repository']['defaultBranchRef']
default_branch_name = result['name']
result = result['target']['history']
not_end = result['pageInfo']['hasNextPage']
query = Query._PULL_REQUESTS.format(max_page_size=self._max_page_size,
min_page_size=self._min_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
for commit in result['nodes']:
if str(commit['oid']) == str(before_commit):
not_end = False
break
# TODO: fetch all pull-requests that were merged in a single commit.
assert commit['associatedPullRequests']['totalCount'] <= self._min_page_size, \
f'there are {commit["associatedPullRequests"]["totalCount"]} pull-requests merged in commit {commit["oid"]}'
for pull_request in commit['associatedPullRequests']['nodes']:
if(pull_request['baseRepository']['nameWithOwner'] == 'yandex/ClickHouse' and
pull_request['baseRefName'] == default_branch_name and
pull_request['mergeCommit']['oid'] == commit['oid'] and
(not login or pull_request['author']['login'] == login)):
pull_requests.append(pull_request)
return pull_requests
_DEFAULT = '''
repository(owner: "yandex", name: "ClickHouse") {
defaultBranchRef {
name
}
}
'''
def get_default_branch(self):
'''Get short name of the default branch
Returns:
name (string): branch name
'''
return self._run(Query._DEFAULT)['repository']['defaultBranchRef']['name']
def _run(self, query):
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
headers = {'Authorization': f'bearer {self._token}'}
query = f'''
{{
{query}
rateLimit {{
cost
remaining
}}
}}
'''
request = requests_retry_session().post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
result = request.json()
if 'errors' in result:
raise Exception(f'Errors occured: {result["errors"]}')
import inspect
caller = inspect.getouterframes(inspect.currentframe(), 2)[1][3]
if caller not in self.api_costs.keys():
self.api_costs[caller] = 0
self.api_costs[caller] += result['data']['rateLimit']['cost']
return result['data']
else:
import json
raise Exception(f'Query failed with code {request.status_code}:\n{json.dumps(request.json(), indent=4)}')
| [
"inspect.currentframe",
"requests.adapters.HTTPAdapter",
"requests.Session",
"urllib3.util.retry.Retry"
] | [((13434, 13556), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': 'retries', 'read': 'retries', 'connect': 'retries', 'backoff_factor': 'backoff_factor', 'status_forcelist': 'status_forcelist'}), '(total=retries, read=retries, connect=retries, backoff_factor=\n backoff_factor, status_forcelist=status_forcelist)\n', (13439, 13556), False, 'from urllib3.util.retry import Retry\n'), ((13669, 13699), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retry'}), '(max_retries=retry)\n', (13680, 13699), False, 'from requests.adapters import HTTPAdapter\n'), ((13395, 13413), 'requests.Session', 'requests.Session', ([], {}), '()\n', (13411, 13413), False, 'import requests\n'), ((14419, 14441), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (14439, 14441), False, 'import inspect\n')] |
from typing import List
import matplotlib.pyplot as plt
class Mortgage:
"""
A mortgage overview of the total burden (incl. interest) and the monthly fees per fixed period
"""
def __init__(self, mortgage_amount, burden, periods, monthly_fees, name):
self.mortgage_amount = int(mortgage_amount)
self.burden = int(burden)
self.periods = periods.copy()
self.monthly_fees = [int(fee) for fee in monthly_fees]
self.name = name
def __add__(self, other):
if not other:
return self
mortgage_amount = self.mortgage_amount + other.mortgage_amount
burden = self.burden + other.burden
periods, monthly_fees = _align_mortgages(periods_a=self.periods,
periods_b=other.periods,
fees_a=self.monthly_fees,
fees_b=other.monthly_fees)
name = self.name
if other.name != self.name:
name += ' & ' + other.name
return Mortgage(mortgage_amount=mortgage_amount,
burden=burden,
periods=periods,
monthly_fees=monthly_fees,
name=name)
def __radd__(self, other):
return self + other
def __repr__(self):
text = (f'{self.name}: {format(self.mortgage_amount, ",d")} euro\n'
f'Total burden: {format(self.burden, ",d")} euro\n'
'Monthly fees:\n')
for period, fee in zip(self.periods, self.monthly_fees):
text += f'- {period} months: {fee} euro\'s\n'
return text
def plot(self, axes=None) -> plt.axes:
if axes is None:
fig, axes = plt.subplots(2, 1, figsize=(5, 8))
nr_periods = len(self.periods)
axes[0].bar(x=range(nr_periods), height=self.monthly_fees, tick_label=self.periods,
color='darkblue')
axes[0].set_xlabel('Period (months)')
axes[0].set_ylabel('Monthly fee\n', color='darkblue')
axes[0].set_title(f'Subsequent monthly fees\nover the specified periods\n\n{self}\n')
axes[1].bar(x=[0, 1], height=[self.mortgage_amount, self.burden], color='purple')
axes[1].set_ylabel('\nAmount (euro)', color='purple')
axes[1].set_xlabel('')
axes[1].set_xticks([0, 1])
axes[1].set_xticklabels([f'Mortgage\n{format(self.mortgage_amount, ",d")}',
f'Total burden\n{format(self.burden, ",d")}'])
plt.tight_layout()
return axes
def compare(self, others: list) -> plt.axes:
mortgages = [self] + others
nr_mortgages = len(mortgages)
fig, axes = plt.subplots(2, nr_mortgages, figsize=(nr_mortgages * 3, 8), sharey='row')
for col_axes, mortgage in zip(axes.T, mortgages):
mortgage.plot(axes=col_axes)
plt.tight_layout()
return axes
def _align_mortgages(periods_a: List[int],
periods_b: List[int],
fees_a: List[int],
fees_b: List[int]) -> (List[int], List[int]):
""" Align periods and fees of two mortgages and compute the exact fee for each period.
:param periods_a: periods for Mortgage a
:param periods_b: periods for Mortgage b
:param fees_a: monthly fees for Mortgage a
:param fees_b: monthly fees for Mortgage b
:return: tuple of aligned periods and fees for the combined Mortgages a and b
"""
periods_a, periods_b, fees_a, fees_b = \
periods_a.copy(), periods_b.copy(), fees_a.copy(), fees_b.copy()
if not periods_a:
if not periods_b:
return [], []
else:
return periods_b, fees_b
elif not periods_b:
return periods_a, fees_a
if periods_b[0] < periods_a[0]:
periods_a, periods_b = periods_b, periods_a
fees_a, fees_b = fees_b, fees_a
first_period_fee = ([periods_a[0]], [fees_a[0] + fees_b[0]])
if periods_a[0] == periods_b[0]:
recursive_result = _align_mortgages(periods_a[1:], periods_b[1:], fees_a[1:], fees_b[1:])
else:
periods_b[0] -= periods_a[0]
recursive_result = _align_mortgages(periods_a[1:], periods_b, fees_a[1:], fees_b)
return tuple(a + b for a, b in zip(first_period_fee, recursive_result))
| [
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] | [((2593, 2611), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2609, 2611), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2850), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', 'nr_mortgages'], {'figsize': '(nr_mortgages * 3, 8)', 'sharey': '"""row"""'}), "(2, nr_mortgages, figsize=(nr_mortgages * 3, 8), sharey='row')\n", (2788, 2850), True, 'import matplotlib.pyplot as plt\n'), ((2958, 2976), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((1795, 1829), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(5, 8)'}), '(2, 1, figsize=(5, 8))\n', (1807, 1829), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import re
import shutil
import sys
import tarfile
import time
from enum import Enum
from zipfile import ZipFile
from utils.debug import debug
from utils.os_util import copy_dir
from utils.config import YAMLReader, ConfigParseError, ConfigurationError
from utils.wml import WMLWrapper, WMLWrapperError
from utils.cos import COSWrapper, COSWrapperError, BucketNotFoundError
class ExitCode(Enum):
"""
Defines the exit codes for this utility
"""
SUCCESS = 0
INCORRECT_INVOCATION = 1
ENV_ERROR = 2
CONFIGURATION_ERROR = 3
PRE_PROCESSING_FAILED = 4
TRAINING_FAILED = 5
DOWNLOAD_FAILED = 6
EXTRACTION_FAILED = 7
COPY_FAILED = 8
TRAINING_LOG_NAME = 'training-log.txt' # fixed; do not change
TRAINING_OUTPUT_ARCHIVE_NAME = 'model_training_output.tar.gz' # do not change
def print_banner(message):
print('# --------------------------------------------------------')
print('# {}'.format(message))
print('# --------------------------------------------------------')
# --------------------------------------------------------
# Process command line parameters
# --------------------------------------------------------
def process_cmd_parameters():
"""
Process command line parameters. This function terminates the
application if an invocation error was detected.
:returns: dict, containing two properties: 'config_file' and
'command'
:rtype: dict
"""
def display_usage():
print('--------------------------------------------------------'
'--------------------------------------------')
print('Train a MAX model using Watson Machine Learning. ')
print('\nUsage: {} <training_config_file> <command> \n'
.format(sys.argv[0]))
print('Valid commands:')
print(' clean '
'removes local model training artifacts')
print(' prepare '
'generates model training artifacts but skips model training')
print(' train '
'generates model training artifacts and trains the model')
print(' package '
'generates model training artifacts, trains the model, and '
'performs post processing')
print(' package <training_id> '
'monitors the training status and performs post processing')
print('--------------------------------------------------------'
'--------------------------------------------')
if len(sys.argv) <= 1:
# no arguments were provided; display usage information
display_usage()
sys.exit(ExitCode.SUCCESS.value)
if os.path.isfile(sys.argv[1]) is False:
print('Invocation error. "{}" is not a file.'.format(sys.argv[1]))
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
if len(sys.argv) < 3:
print('Invocation error. You must specify a command.')
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
cmd_parameters = {
'config_file': sys.argv[1],
'command': sys.argv[2].strip().lower(),
'training_id': None
}
if cmd_parameters['command'] not in ['clean',
'prepare',
'train',
'package']:
print('Invocation error. "{}" is not a valid command.'
.format(sys.argv[2]))
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
if cmd_parameters['command'] == 'package':
# package accepts as optional parameter an existing training id
if len(sys.argv) == 4:
cmd_parameters['training_id'] = sys.argv[3]
return cmd_parameters
cmd_parameters = process_cmd_parameters()
# --------------------------------------------------------
# Verify that the required environment variables are set
# --------------------------------------------------------
def verify_env_settings():
print_banner('Checking environment variables ...')
var_missing = False
# WML environment variables
for var_name in ['ML_ENV', 'ML_APIKEY', 'ML_INSTANCE']:
if os.environ.get(var_name) is None:
print(' Error. Environment variable {} is not defined.'
.format(var_name))
var_missing = True
# Cloud Object Storage environment variables
for var_name in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']:
if os.environ.get(var_name) is None:
print(' Error. Environment variable {} is not defined.'
.format(var_name))
var_missing = True
if var_missing:
sys.exit(ExitCode.ENV_ERROR.value)
verify_env_settings()
# --------------------------------------------------------
# Process configuration file
# --------------------------------------------------------
print_banner('Validating configuration file "{}" ...'
.format(cmd_parameters['config_file']))
config = None
try:
r = YAMLReader(cmd_parameters['config_file'])
config = r.read()
except ConfigurationError as ce:
for missing_setting in ce.get_missing_settings():
print('Error. Configuration file "{}" does not'
' define setting "{}".'
.format(cmd_parameters['config_file'],
missing_setting.get('yaml_path')))
sys.exit(ExitCode.CONFIGURATION_ERROR.value)
except ConfigParseError as cpe:
print('Error. Configuration file "{}" is invalid: {}'
.format(cmd_parameters['config_file'],
str(cpe)))
sys.exit(ExitCode.CONFIGURATION_ERROR.value)
except FileNotFoundError:
print('Error. Configuration file "{}" was not found.'
.format(cmd_parameters['config_file']))
sys.exit(ExitCode.INVOCATION_ERROR.value)
debug('Using the following configuration settings: ', config)
cw = None # COS wrapper handle
w = None # WML wrapper handle
training_guid = cmd_parameters.get('training_id', None)
if cmd_parameters['command'] == 'package' and training_guid is not None:
# monitor status of an existing training run; skip preparation steps
try:
# instantiate Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
except COSWrapperError as cwe:
print('Error. Cloud Object Storage preparation failed: {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print_banner('Verifying that "{}" is a valid training id ...'
.format(training_guid))
try:
# instantiate Watson Machine Learning wrapper
w = WMLWrapper(os.environ['ML_ENV'],
os.environ['ML_APIKEY'],
os.environ['ML_INSTANCE'])
# verify that the provided training id is valid
if not w.is_known_training_id(training_guid):
print('Error. "{}" is an unknown training id.'
.format(training_guid))
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
except WMLWrapperError as wmle:
print(wmle)
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except Exception as ex:
print(' Exception type: {}'.format(type(ex)))
print(' Exception: {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
else:
# --------------------------------------------------------
# Remove existing model training artifacts
# --------------------------------------------------------
print_banner('Removing temporary work files ...')
for file in [config['model_code_archive']]:
if os.path.isfile(file):
os.remove(file)
# terminate if the "clean" command was specified
# when the utility was invoked
if cmd_parameters['command'] == 'clean':
print('Skipping model training.')
sys.exit(ExitCode.SUCCESS.value)
# --------------------------------------------------------
# Verify the Cloud Object Storage configuration:
# - the results bucket must exist
# --------------------------------------------------------
print_banner('Verifying Cloud Object Storage setup ...')
try:
# instantiate the Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
print(' Verifying that training results bucket "{}" exists. '
' It will be created if necessary ...'
.format(config['results_bucket']))
# make sure the training results bucket exists;
# it can be empty, but doesn't have to be
cw.create_bucket(config['results_bucket'],
exist_ok=True)
print(' Verifying that training data bucket "{}" exists. '
' It will be created if necessary ...'
.format(config['training_bucket']))
# make sure the training data bucket exists;
cw.create_bucket(config['training_bucket'],
exist_ok=True)
# if there are any initial_model artifacts in ther training bucket
# remove them
im_object_list = cw.get_object_list(config['training_bucket'],
key_name_prefix='initial_model')
if len(im_object_list) > 0:
print(' Removing model artifacts from training bucket "{}" ... '
.format(config['training_bucket']))
cw.delete_objects(config['training_bucket'], im_object_list)
# is there training data in the bucket?
no_training_data = cw.is_bucket_empty(config['training_bucket'])
if config.get('local_data_dir') and \
os.path.isdir(config['local_data_dir']):
config['local_data_dir'] = \
os.path.abspath(config['local_data_dir'])
# add initial_model artifacts to bucket
if config.get('local_data_dir') and \
os.path.isdir(config['local_data_dir']):
initial_model_path = os.path.join(config['local_data_dir'],
'initial_model')
print(' Looking for model artifacts in "{}" ... '
.format(initial_model_path))
for file in glob.iglob(initial_model_path + '/**/*',
recursive=True):
if os.path.isfile(file):
print(' Uploading model artifact "{}" to '
'training data bucket "{}" ...'
.format(file[len(initial_model_path):].lstrip('/'),
config['training_bucket']))
cw.upload_file(file,
config['training_bucket'],
'initial_model',
file[len(initial_model_path):]
.lstrip('/'))
print(' Looking for training data in bucket "{}" ... '
.format(config['training_bucket']))
# if there's no training data in the training data bucket
# upload whatever is found locally
if no_training_data:
print(' No training data was found.')
if config.get('local_data_dir', None) is None:
# error. there is no local training data either;
# abort processing
print('Error. No local training data was found. '
'Please check your configuration settings.')
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# verify that local_data_dir is a directory
if not os.path.isdir(config['local_data_dir']):
print('Error. "{}" is not a directory or cannot be accessed.'
.format(config['local_data_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# upload training data from the local data directory
print(' Looking for training data in "{}" ... '
.format(config['local_data_dir']))
file_count = 0
ignore_list = []
ignore_list.append(os.path.join(config['local_data_dir'],
'README.md'))
for file in glob.iglob(config['local_data_dir'] + '/**/*',
recursive=True):
if file in ignore_list or file.startswith(initial_model_path):
continue
if os.path.isfile(file):
print(' Uploading "{}" to training data bucket "{}" ...'
.format(file[len(config['local_data_dir']):]
.lstrip('/'),
config['training_bucket']))
cw.upload_file(file,
config['training_bucket'],
config.get('training_data_key_prefix'),
file[len(config['local_data_dir']):]
.lstrip('/'))
file_count += 1
if file_count == 0:
print('Error. No local training data was found in "{}".'
.format(config['local_data_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
else:
print('Uploaded {} data files to training data bucket "{}".'
.format(file_count, config['training_bucket']))
else:
print(' Found data in training data bucket "{}". Skipping upload.'
.format(config['training_bucket']))
except ValueError as ve:
print('Error. {}'.format(ve))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except BucketNotFoundError as bnfe:
print('Error. {}'.format(bnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except FileNotFoundError as fnfe:
print('Error. {}'.format(fnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except COSWrapperError as cwe:
print('Error. Cloud Object Storage preparation failed: {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# --------------------------------------------------------
# Create model building ZIP
# --------------------------------------------------------
print_banner('Locating model building files ...')
#
# 1. Assure that the model building directory
# config['model_building_code_dir'] exists
# 2. If there are no files in config['model_building_code_dir']:
# - determine whether model-building code is stored in a COS bucket
# - download model-building code to config['model_building_code_dir']
# 3. ZIP files in config['model_building_code_dir']
try:
# task 1: make sure the specified model building code directory exists
os.makedirs(config['model_building_code_dir'], exist_ok=True)
except Exception as ex:
debug(' Exception type: {}'.format(type(ex)))
print('Error. Model building code preparation failed: {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
if len(os.listdir(config['model_building_code_dir'])) == 0:
# Task 2: try to download model building code from Cloud Object Storage
# bucket
#
print('No model building code was found in "{}".'
.format(config['model_building_code_dir']))
try:
if config.get('model_bucket') is None or \
cw.is_bucket_empty(config['model_bucket'],
config.get('model_key_prefix')):
print('Error. Model building code preparation failed: '
'No source code was found locally in "{}" or '
' in Cloud Object Storage.'
.format(config['model_building_code_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print('Found model building code in bucket "{}".'
.format(config['model_bucket']))
for object_key in cw.get_object_list(config['model_bucket'],
config.get(
'model_key_prefix')):
cw.download_file(config['model_bucket'],
object_key,
config['model_building_code_dir'])
except BucketNotFoundError as bnfe:
print('Error. {}'.format(bnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except COSWrapperError as cwe:
print('Error. {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except Exception as ex:
debug(' Exception type: {}'.format(type(ex)))
print('Error. {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print_banner('Packaging model building files in "{}" ...'
.format(config['model_building_code_dir']))
try:
shutil.make_archive(re.sub('.zip$', '', config['model_code_archive']),
'zip',
config['model_building_code_dir'])
except Exception as ex:
print('Error. Packaging failed: {}'.format(str(ex)))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
if os.path.isfile(config['model_code_archive']):
# display archive content
print('Model building package "{}" contains the following entries:'
.format(config['model_code_archive']))
with ZipFile(config['model_code_archive'], 'r') as archive:
for entry in sorted(archive.namelist()):
print(' {}'.format(entry))
# check archive size; WML limits size to 4MB
archive_size = os.path.getsize(config['model_code_archive'])
archive_size_limit = 1024 * 1024 * 4
if archive_size > archive_size_limit:
print('Error. Your model building code archive "{}" is too large '
'({:.2f} MB). WLM rejects archives larger than {} MB. '
'Please remove unnecessary files from the "{}" directory '
'and try again.'
.format(config['model_code_archive'],
archive_size / (1024 * 1024),
archive_size_limit / (1024 * 1024),
config['model_building_code_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# Status:
# - The model training job can now be started.
if cmd_parameters['command'] == 'prepare':
print('Skipping model training and post processing steps.')
sys.exit(ExitCode.SUCCESS.value)
# ---------------------------------------------------------
# Start model training
# --------------------------------------------------------
print_banner('Starting model training ...')
try:
# instantiate the WML client
w = WMLWrapper(os.environ['ML_ENV'],
os.environ['ML_APIKEY'],
os.environ['ML_INSTANCE'])
except WMLWrapperError as wmle:
print(wmle)
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# define training metadata
model_definition_metadata = {
w.get_client().repository.DefinitionMetaNames.NAME:
config['training_run_name'],
w.get_client().repository.DefinitionMetaNames.DESCRIPTION:
config['training_run_description'],
w.get_client().repository.DefinitionMetaNames.AUTHOR_NAME:
config['author_name'],
w.get_client().repository.DefinitionMetaNames.FRAMEWORK_NAME:
config['framework_name'],
w.get_client().repository.DefinitionMetaNames.FRAMEWORK_VERSION:
config['framework_version'],
w.get_client().repository.DefinitionMetaNames.RUNTIME_NAME:
config['runtime_name'],
w.get_client().repository.DefinitionMetaNames.RUNTIME_VERSION:
config['runtime_version'],
w.get_client().repository.DefinitionMetaNames.EXECUTION_COMMAND:
config['training_run_execution_command']
}
training_configuration_metadata = {
w.get_client().training.ConfigurationMetaNames.NAME:
config['training_run_name'],
w.get_client().training.ConfigurationMetaNames.AUTHOR_NAME:
config['author_name'],
w.get_client().training.ConfigurationMetaNames.DESCRIPTION:
config['training_run_description'],
w.get_client().training.ConfigurationMetaNames.COMPUTE_CONFIGURATION:
{'name': config['training_run_compute_configuration_name']},
w.get_client().training.ConfigurationMetaNames
.TRAINING_DATA_REFERENCE: {
'connection': {
'endpoint_url': config['cos_endpoint_url'],
'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']
},
'source': {
'bucket': config['training_bucket'],
},
'type': 's3'
},
w.get_client().training.ConfigurationMetaNames
.TRAINING_RESULTS_REFERENCE: {
'connection': {
'endpoint_url': config['cos_endpoint_url'],
'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']
},
'target': {
'bucket': config['results_bucket'],
},
'type': 's3'
}
}
print('Training configuration summary:')
print(' Training run name : {}'.format(config['training_run_name']))
print(' Training data bucket : {}'.format(config['training_bucket']))
print(' Results bucket : {}'.format(config['results_bucket']))
print(' Model-building archive: {}'.format(config['model_code_archive']))
try:
training_guid = w.start_training(config['model_code_archive'],
model_definition_metadata,
training_configuration_metadata)
except Exception as ex:
print('Error. Model training could not be started: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
print('Model training was started. Training id: {}'.format(training_guid))
# --------------------------------------------------------
# Monitor the training run until it completes
# successfully or throws an error
# --------------------------------------------------------
#
print('Checking model training status every {} seconds.'
' Press Ctrl+C once to stop monitoring or '
' press Ctrl+C twice to cancel training.'
.format(config['training_progress_monitoring_interval']))
print('Status - (p)ending (r)unning (e)rror (c)ompleted or canceled:')
try:
training_in_progress = True
while training_in_progress:
try:
# poll training status; ignore server errors (e.g. caused
# by temporary issues not specific to our training run)
status = w.get_training_status(training_guid,
ignore_server_error=True)
if status:
training_status = status.get('state') or '?'
else:
# unknown status; continue and leave it up to the user
# to terminate monitoring
training_status = '?'
# display training status indicator
# [p]ending
# [r]unning
# [c]ompleted
# [e]rror
# [?]
print(training_status[0:1], end='', flush=True)
if training_status == 'completed':
# training completed successfully
print('\nTraining completed.')
training_in_progress = False
elif training_status == 'error':
print('\nTraining failed.')
# training ended with error
training_in_progress = False
elif training_status == 'canceled':
print('\nTraining canceled.')
# training ended with error
training_in_progress = False
else:
time.sleep(
int(config['training_progress_monitoring_interval']))
except KeyboardInterrupt:
print('\nTraining monitoring was stopped.')
try:
input('Press Ctrl+C again to cancel model training or '
'any other key to continue training.')
print('To resume monitoring, run "python {} {} {} {}"'
.format(sys.argv[0],
sys.argv[1],
'package',
training_guid))
sys.exit(ExitCode.SUCCESS.value)
except KeyboardInterrupt:
try:
w.cancel_training(training_guid)
print('\nModel training was canceled.')
except Exception as ex:
print('Model training could not be canceled: {}'
.format(ex))
debug(' Exception type: {}'.format(type(ex)))
debug(' Exception: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
except Exception as ex:
print('Error. Model training monitoring failed with an exception: {}'
.format(ex))
debug(' Exception type: {}'.format(type(ex)))
debug(' Exception: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
# Status:
# - The model training job completed.
# - The training log file TRAINING_LOG_NAME can now be downloaded from COS.
results_references = None
try:
# --------------------------------------------------------
# Identify where the training artifacts are stored on COS
# {
# 'bucket': 'ademoout3',
# 'model_location': 'training-BA8P0BgZg'
# }
# Re-try to fetch information multiple times in case the WML service
# encounters a temporary issue
max_tries = 5
ise = True
for count in range(max_tries):
results_references = \
w.get_training_results_references(training_guid,
ignore_server_error=ise)
if results_references:
# got a response; move on
break
if count + 1 == max_tries:
# last attempt; if it fails stop trying
ise = False
time.sleep(3)
# --------------------------------------------------------
# Download the training log file from the results
# bucket on COS to config['local_download_directory']
# --------------------------------------------------------
print_banner('Downloading training log file "{}" ...'
.format(TRAINING_LOG_NAME))
training_log = cw.download_file(results_references['bucket'],
TRAINING_LOG_NAME,
config['local_download_directory'],
results_references['model_location'])
if training_status in ['error', 'canceled']:
# Training ended with an error or was canceled.
# Notify the user where the training log file was stored and exit.
print('The training log file "{}" was saved in "{}".'
.format(TRAINING_LOG_NAME,
config['local_download_directory']))
sys.exit(ExitCode.TRAINING_FAILED.value)
except Exception as ex:
print('Error. Download of training log file "{}" failed: {}'
.format(TRAINING_LOG_NAME, ex))
sys.exit(ExitCode.DOWNLOAD_FAILED.value)
# terminate if the "train" command was specified
# when the utility was invoked
if cmd_parameters['command'] == 'train':
print('Skipping post-processing steps.')
sys.exit(ExitCode.SUCCESS.value)
# - If training completed successfully, the trained model archive
# TRAINING_OUTPUT_ARCHIVE_NAME can now be downloaded from COS.
try:
# --------------------------------------------------------
# Download the trained model archive from the results
# bucket on COS to LOCAL_DOWNLOAD_DIRECTORY
# --------------------------------------------------------
print_banner('Downloading trained model archive "{}" ...'
.format(TRAINING_OUTPUT_ARCHIVE_NAME))
training_output = cw.download_file(results_references['bucket'],
TRAINING_OUTPUT_ARCHIVE_NAME,
config['local_download_directory'],
results_references['model_location'])
except Exception as ex:
print('Error. Trained model archive "{}" could not be '
'downloaded from Cloud Object Storage bucket "{}": {}'
.format(TRAINING_OUTPUT_ARCHIVE_NAME,
results_references['bucket'],
ex))
sys.exit(ExitCode.DOWNLOAD_FAILED.value)
# Status:
# - The trained model archive and training log file were
# downloaded to the directory identified by
# config['local_download_directory'].
# --------------------------------------------------------
# Extract the downloaded model archive
# --------------------------------------------------------
archive = os.path.join(config['local_download_directory'],
TRAINING_OUTPUT_ARCHIVE_NAME)
print_banner('Extracting trained model artifacts from "{}" ...'
.format(archive))
extraction_ok = False
try:
if tarfile.is_tarfile(archive):
tf = tarfile.open(archive,
mode='r:gz')
for file in tf.getnames():
print(file)
tf.extractall(config['local_download_directory'])
print('Trained model artifacts are located in the "{}" directory.'
.format(config['local_download_directory']))
extraction_ok = True
else:
print('Error. The downloaded file "{}" is not a valid tar file.'
.format(archive))
except FileNotFoundError:
print('Error. "{}" was not found.'.format(archive))
except tarfile.TarError as te:
print(te)
if extraction_ok is False:
sys.exit(ExitCode.EXTRACTION_FAILED.value)
# Status:
# - The trained model archive was downloaded to LOCAL_DOWNLOAD_DIRECTORY.
# The directory structure inshould look as follows:
# /trained_model/<framework-name-1>/<format>/<file-1>
# /trained_model/<framework-name-1>/<format>/<file-2>
# /trained_model/<framework-name-1>/<format-2>/<subdirectory>/<file-3>
# /trained_model/<framework-name-2>/<file-4>
# -------------------------------------------------------------------
# Copy the appropriate framework and format specific artifacts
# to the final destination, where the Docker build will pick them up
# -------------------------------------------------------------------
trained_model_path = config['trained_model_path']
trained_assets_dir = os.path.join(config['local_download_directory'],
trained_model_path)
print_banner('Copying trained model artifacts from "{}" to "{}" ...'
.format(trained_assets_dir,
config['docker_model_asset_directory']))
try:
copy_dir(trained_assets_dir,
config['docker_model_asset_directory'])
except Exception as ex:
print('Error. Trained model files could not be copied: {}'.format(str(ex)))
sys.exit(ExitCode.COPY_FAILED.value)
# Status:
# - The trained model artifacts were copied to the Docker image's asset
# directory, where the model-serving microservice will load them from.
print('Done')
sys.exit(ExitCode.SUCCESS.value)
| [
"tarfile.open",
"glob.iglob",
"zipfile.ZipFile",
"time.sleep",
"sys.exit",
"os.remove",
"os.listdir",
"tarfile.is_tarfile",
"os.path.isdir",
"utils.cos.COSWrapper",
"os.path.getsize",
"utils.wml.WMLWrapper",
"os.path.isfile",
"re.sub",
"utils.config.YAMLReader",
"os.makedirs",
"os.pa... | [((6583, 6644), 'utils.debug.debug', 'debug', (['"""Using the following configuration settings: """', 'config'], {}), "('Using the following configuration settings: ', config)\n", (6588, 6644), False, 'from utils.debug import debug\n'), ((30449, 30527), 'os.path.join', 'os.path.join', (["config['local_download_directory']", 'TRAINING_OUTPUT_ARCHIVE_NAME'], {}), "(config['local_download_directory'], TRAINING_OUTPUT_ARCHIVE_NAME)\n", (30461, 30527), False, 'import os\n'), ((32108, 32176), 'os.path.join', 'os.path.join', (["config['local_download_directory']", 'trained_model_path'], {}), "(config['local_download_directory'], trained_model_path)\n", (32120, 32176), False, 'import os\n'), ((32794, 32826), 'sys.exit', 'sys.exit', (['ExitCode.SUCCESS.value'], {}), '(ExitCode.SUCCESS.value)\n', (32802, 32826), False, 'import sys\n'), ((5781, 5822), 'utils.config.YAMLReader', 'YAMLReader', (["cmd_parameters['config_file']"], {}), "(cmd_parameters['config_file'])\n", (5791, 5822), False, 'from utils.config import YAMLReader, ConfigParseError, ConfigurationError\n'), ((18262, 18306), 'os.path.isfile', 'os.path.isfile', (["config['model_code_archive']"], {}), "(config['model_code_archive'])\n", (18276, 18306), False, 'import os\n'), ((28990, 29022), 'sys.exit', 'sys.exit', (['ExitCode.SUCCESS.value'], {}), '(ExitCode.SUCCESS.value)\n', (28998, 29022), False, 'import sys\n'), ((30682, 30709), 'tarfile.is_tarfile', 'tarfile.is_tarfile', (['archive'], {}), '(archive)\n', (30700, 30709), False, 'import tarfile\n'), ((31339, 31381), 'sys.exit', 'sys.exit', (['ExitCode.EXTRACTION_FAILED.value'], {}), '(ExitCode.EXTRACTION_FAILED.value)\n', (31347, 31381), False, 'import sys\n'), ((32394, 32462), 'utils.os_util.copy_dir', 'copy_dir', (['trained_assets_dir', "config['docker_model_asset_directory']"], {}), "(trained_assets_dir, config['docker_model_asset_directory'])\n", (32402, 32462), False, 'from utils.os_util import copy_dir\n'), ((3348, 3380), 'sys.exit', 'sys.exit', (['ExitCode.SUCCESS.value'], {}), '(ExitCode.SUCCESS.value)\n', (3356, 3380), False, 'import sys\n'), ((3389, 3416), 'os.path.isfile', 'os.path.isfile', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3403, 3416), False, 'import os\n'), ((3534, 3579), 'sys.exit', 'sys.exit', (['ExitCode.INCORRECT_INVOCATION.value'], {}), '(ExitCode.INCORRECT_INVOCATION.value)\n', (3542, 3579), False, 'import sys\n'), ((3702, 3747), 'sys.exit', 'sys.exit', (['ExitCode.INCORRECT_INVOCATION.value'], {}), '(ExitCode.INCORRECT_INVOCATION.value)\n', (3710, 3747), False, 'import sys\n'), ((4227, 4272), 'sys.exit', 'sys.exit', (['ExitCode.INCORRECT_INVOCATION.value'], {}), '(ExitCode.INCORRECT_INVOCATION.value)\n', (4235, 4272), False, 'import sys\n'), ((5437, 5471), 'sys.exit', 'sys.exit', (['ExitCode.ENV_ERROR.value'], {}), '(ExitCode.ENV_ERROR.value)\n', (5445, 5471), False, 'import sys\n'), ((6140, 6184), 'sys.exit', 'sys.exit', (['ExitCode.CONFIGURATION_ERROR.value'], {}), '(ExitCode.CONFIGURATION_ERROR.value)\n', (6148, 6184), False, 'import sys\n'), ((6357, 6401), 'sys.exit', 'sys.exit', (['ExitCode.CONFIGURATION_ERROR.value'], {}), '(ExitCode.CONFIGURATION_ERROR.value)\n', (6365, 6401), False, 'import sys\n'), ((6540, 6581), 'sys.exit', 'sys.exit', (['ExitCode.INVOCATION_ERROR.value'], {}), '(ExitCode.INVOCATION_ERROR.value)\n', (6548, 6581), False, 'import sys\n'), ((6986, 7071), 'utils.cos.COSWrapper', 'COSWrapper', (["os.environ['AWS_ACCESS_KEY_ID']", "os.environ['AWS_SECRET_ACCESS_KEY']"], {}), "(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY']\n )\n", (6996, 7071), False, 'from utils.cos import COSWrapper, COSWrapperError, BucketNotFoundError\n'), ((7445, 7534), 'utils.wml.WMLWrapper', 'WMLWrapper', (["os.environ['ML_ENV']", "os.environ['ML_APIKEY']", "os.environ['ML_INSTANCE']"], {}), "(os.environ['ML_ENV'], os.environ['ML_APIKEY'], os.environ[\n 'ML_INSTANCE'])\n", (7455, 7534), False, 'from utils.wml import WMLWrapper, WMLWrapperError\n'), ((8430, 8450), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (8444, 8450), False, 'import os\n'), ((8664, 8696), 'sys.exit', 'sys.exit', (['ExitCode.SUCCESS.value'], {}), '(ExitCode.SUCCESS.value)\n', (8672, 8696), False, 'import sys\n'), ((9056, 9141), 'utils.cos.COSWrapper', 'COSWrapper', (["os.environ['AWS_ACCESS_KEY_ID']", "os.environ['AWS_SECRET_ACCESS_KEY']"], {}), "(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY']\n )\n", (9066, 9141), False, 'from utils.cos import COSWrapper, COSWrapperError, BucketNotFoundError\n'), ((15737, 15798), 'os.makedirs', 'os.makedirs', (["config['model_building_code_dir']"], {'exist_ok': '(True)'}), "(config['model_building_code_dir'], exist_ok=True)\n", (15748, 15798), False, 'import os\n'), ((18712, 18757), 'os.path.getsize', 'os.path.getsize', (["config['model_code_archive']"], {}), "(config['model_code_archive'])\n", (18727, 18757), False, 'import os\n'), ((19600, 19632), 'sys.exit', 'sys.exit', (['ExitCode.SUCCESS.value'], {}), '(ExitCode.SUCCESS.value)\n', (19608, 19632), False, 'import sys\n'), ((19896, 19985), 'utils.wml.WMLWrapper', 'WMLWrapper', (["os.environ['ML_ENV']", "os.environ['ML_APIKEY']", "os.environ['ML_INSTANCE']"], {}), "(os.environ['ML_ENV'], os.environ['ML_APIKEY'], os.environ[\n 'ML_INSTANCE'])\n", (19906, 19985), False, 'from utils.wml import WMLWrapper, WMLWrapperError\n'), ((26658, 26698), 'sys.exit', 'sys.exit', (['ExitCode.TRAINING_FAILED.value'], {}), '(ExitCode.TRAINING_FAILED.value)\n', (26666, 26698), False, 'import sys\n'), ((27625, 27638), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (27635, 27638), False, 'import time\n'), ((28601, 28641), 'sys.exit', 'sys.exit', (['ExitCode.TRAINING_FAILED.value'], {}), '(ExitCode.TRAINING_FAILED.value)\n', (28609, 28641), False, 'import sys\n'), ((28778, 28818), 'sys.exit', 'sys.exit', (['ExitCode.DOWNLOAD_FAILED.value'], {}), '(ExitCode.DOWNLOAD_FAILED.value)\n', (28786, 28818), False, 'import sys\n'), ((30080, 30120), 'sys.exit', 'sys.exit', (['ExitCode.DOWNLOAD_FAILED.value'], {}), '(ExitCode.DOWNLOAD_FAILED.value)\n', (30088, 30120), False, 'import sys\n'), ((30724, 30758), 'tarfile.open', 'tarfile.open', (['archive'], {'mode': '"""r:gz"""'}), "(archive, mode='r:gz')\n", (30736, 30758), False, 'import tarfile\n'), ((32584, 32620), 'sys.exit', 'sys.exit', (['ExitCode.COPY_FAILED.value'], {}), '(ExitCode.COPY_FAILED.value)\n', (32592, 32620), False, 'import sys\n'), ((4939, 4963), 'os.environ.get', 'os.environ.get', (['var_name'], {}), '(var_name)\n', (4953, 4963), False, 'import os\n'), ((5238, 5262), 'os.environ.get', 'os.environ.get', (['var_name'], {}), '(var_name)\n', (5252, 5262), False, 'import os\n'), ((7214, 7260), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (7222, 7260), False, 'import sys\n'), ((7800, 7845), 'sys.exit', 'sys.exit', (['ExitCode.INCORRECT_INVOCATION.value'], {}), '(ExitCode.INCORRECT_INVOCATION.value)\n', (7808, 7845), False, 'import sys\n'), ((7910, 7956), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (7918, 7956), False, 'import sys\n'), ((8090, 8136), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (8098, 8136), False, 'import sys\n'), ((8464, 8479), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (8473, 8479), False, 'import os\n'), ((10523, 10562), 'os.path.isdir', 'os.path.isdir', (["config['local_data_dir']"], {}), "(config['local_data_dir'])\n", (10536, 10562), False, 'import os\n'), ((10621, 10662), 'os.path.abspath', 'os.path.abspath', (["config['local_data_dir']"], {}), "(config['local_data_dir'])\n", (10636, 10662), False, 'import os\n'), ((10774, 10813), 'os.path.isdir', 'os.path.isdir', (["config['local_data_dir']"], {}), "(config['local_data_dir'])\n", (10787, 10813), False, 'import os\n'), ((10848, 10903), 'os.path.join', 'os.path.join', (["config['local_data_dir']", '"""initial_model"""'], {}), "(config['local_data_dir'], 'initial_model')\n", (10860, 10903), False, 'import os\n'), ((11083, 11139), 'glob.iglob', 'glob.iglob', (["(initial_model_path + '/**/*')"], {'recursive': '(True)'}), "(initial_model_path + '/**/*', recursive=True)\n", (11093, 11139), False, 'import glob\n'), ((13107, 13169), 'glob.iglob', 'glob.iglob', (["(config['local_data_dir'] + '/**/*')"], {'recursive': '(True)'}), "(config['local_data_dir'] + '/**/*', recursive=True)\n", (13117, 13169), False, 'import glob\n'), ((14562, 14608), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (14570, 14608), False, 'import sys\n'), ((14697, 14743), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (14705, 14743), False, 'import sys\n'), ((14830, 14876), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (14838, 14876), False, 'import sys\n'), ((15000, 15046), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (15008, 15046), False, 'import sys\n'), ((15967, 16013), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (15975, 16013), False, 'import sys\n'), ((16026, 16071), 'os.listdir', 'os.listdir', (["config['model_building_code_dir']"], {}), "(config['model_building_code_dir'])\n", (16036, 16071), False, 'import os\n'), ((17961, 18010), 're.sub', 're.sub', (['""".zip$"""', '""""""', "config['model_code_archive']"], {}), "('.zip$', '', config['model_code_archive'])\n", (17967, 18010), False, 'import re\n'), ((18207, 18253), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (18215, 18253), False, 'import sys\n'), ((18484, 18526), 'zipfile.ZipFile', 'ZipFile', (["config['model_code_archive']", '"""r"""'], {}), "(config['model_code_archive'], 'r')\n", (18491, 18526), False, 'from zipfile import ZipFile\n'), ((19362, 19408), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (19370, 19408), False, 'import sys\n'), ((20091, 20137), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (20099, 20137), False, 'import sys\n'), ((23298, 23338), 'sys.exit', 'sys.exit', (['ExitCode.TRAINING_FAILED.value'], {}), '(ExitCode.TRAINING_FAILED.value)\n', (23306, 23338), False, 'import sys\n'), ((11195, 11215), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (11209, 11215), False, 'import os\n'), ((12359, 12405), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (12367, 12405), False, 'import sys\n'), ((12481, 12520), 'os.path.isdir', 'os.path.isdir', (["config['local_data_dir']"], {}), "(config['local_data_dir'])\n", (12494, 12520), False, 'import os\n'), ((12673, 12719), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (12681, 12719), False, 'import sys\n'), ((12986, 13037), 'os.path.join', 'os.path.join', (["config['local_data_dir']", '"""README.md"""'], {}), "(config['local_data_dir'], 'README.md')\n", (12998, 13037), False, 'import os\n'), ((13333, 13353), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (13347, 13353), False, 'import os\n'), ((14127, 14173), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (14135, 14173), False, 'import sys\n'), ((16779, 16825), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (16787, 16825), False, 'import sys\n'), ((17420, 17466), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (17428, 17466), False, 'import sys\n'), ((17561, 17607), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (17569, 17607), False, 'import sys\n'), ((17752, 17798), 'sys.exit', 'sys.exit', (['ExitCode.PRE_PROCESSING_FAILED.value'], {}), '(ExitCode.PRE_PROCESSING_FAILED.value)\n', (17760, 17798), False, 'import sys\n'), ((25913, 25945), 'sys.exit', 'sys.exit', (['ExitCode.SUCCESS.value'], {}), '(ExitCode.SUCCESS.value)\n', (25921, 25945), False, 'import sys\n'), ((26403, 26443), 'sys.exit', 'sys.exit', (['ExitCode.TRAINING_FAILED.value'], {}), '(ExitCode.TRAINING_FAILED.value)\n', (26411, 26443), False, 'import sys\n')] |
"""A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import sys
import gflags as flags
FLAGS = flags.FLAGS
def format_message(record):
try:
record_message = "%s" % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
class GlogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.FATAL: "F", # FATAL is alias of CRITICAL
logging.ERROR: "E",
logging.WARN: "W",
logging.INFO: "I",
logging.DEBUG: "D",
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = GlogFormatter.LEVEL_MAP[record.levelno]
except KeyError:
level = "?"
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = "%c%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s" % (
level,
date.tm_mon,
date.tm_mday,
date.tm_hour,
date.tm_min,
date.tm_sec,
date_usec,
record.process if record.process is not None else "?????",
record.filename,
record.lineno,
format_message(record),
)
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
class Logger(object):
def __init__(self, name, filename=None):
self.logger = logging.getLogger(name)
init(self.logger, filename)
self.debug = self.logger.debug
self.info = self.logger.info
self.warning = self.logger.warning
self.warn = self.logger.warning
self.error = self.logger.error
self.exception = self.logger.exception
self.fatal = self.logger.fatal
self.log = self.logger.log
def setLevel(self, newlevel):
setLevel(newlevel, self.logger)
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: "DEBUG",
INFO: "INFO",
WARN: "WARN",
ERROR: "ERROR",
FATAL: "FATAL",
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
(
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
"""
)
% "".join(_level_letters)
)
"""Regex you can use to parse glog line prefixes."""
global_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stderr_handler = logging.StreamHandler(sys.stderr)
file_handlers = dict()
def setLevel(newlevel, logger=global_logger):
logger.setLevel(newlevel)
logger.debug("Log level set to %s", newlevel)
def init(logger=None, filename=None):
if logger is None:
logger = global_logger
logger.propagate = False
if filename is None:
handler = stderr_handler
elif filename == "stderr":
handler = stderr_handler
elif filename == "stdout":
handler = stdout_handler
elif filename in file_handlers:
handler = file_handlers[filename]
else:
handler = logging.FileHandler(filename)
file_handlers[filename] = handler
handler.setFormatter(GlogFormatter())
logger.addHandler(handler)
class CaptureWarningsFlag(flags.BooleanFlag):
def __init__(self):
flags.BooleanFlag.__init__(
self,
"glog_capture_warnings",
True,
"Redirect warnings to log.warn messages",
)
def Parse(self, arg):
flags.BooleanFlag.Parse(self, arg)
logging.captureWarnings(self.value)
flags.DEFINE_flag(CaptureWarningsFlag())
class VerbosityParser(flags.ArgumentParser):
"""Sneakily use gflags parsing to get a simple callback."""
def Parse(self, arg):
try:
intarg = int(arg)
# Look up the name for this level (DEBUG, INFO, etc) if it exists
try:
level = logging._levelNames.get(intarg, intarg)
except AttributeError: # This was renamed somewhere b/w 2.7 and 3.4
level = logging._levelToName.get(intarg, intarg)
except ValueError:
level = arg
setLevel(level)
return level
flags.DEFINE(
parser=VerbosityParser(),
serializer=flags.ArgumentSerializer(),
name="verbosity",
default=logging.INFO,
help="Logging verbosity",
)
init(global_logger)
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
class FailedCheckException(AssertionError):
"""Exception with message indicating check-failure location and values."""
def check_failed(message):
stack = traceback.extract_stack()
stack = stack[0:-2]
stacktrace_lines = format_stacktrace(stack)
filename, line_num, _, _ = stack[-1]
try:
raise FailedCheckException(message)
except FailedCheckException:
log_record = global_logger.makeRecord(
"CRITICAL", 50, filename, line_num, message, None, None
)
stderr_handler.handle(log_record)
log_record = global_logger.makeRecord(
"DEBUG", 10, filename, line_num, "Check failed here:", None, None
)
stderr_handler.handle(log_record)
for line in stacktrace_lines:
log_record = global_logger.makeRecord(
"DEBUG", 10, filename, line_num, line, None, None
)
stderr_handler.handle(log_record)
raise
return
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| [
"logging.getLogger",
"logging.StreamHandler",
"traceback.extract_stack",
"logging.captureWarnings",
"gflags.ArgumentSerializer",
"logging.Formatter.format",
"gflags.BooleanFlag.__init__",
"logging._levelToName.get",
"logging.Formatter.__init__",
"os.path.basename",
"logging.FileHandler",
"logg... | [((2868, 2887), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2885, 2887), False, 'import logging\n'), ((2905, 2938), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2926, 2938), False, 'import logging\n'), ((2956, 2989), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (2977, 2989), False, 'import logging\n'), ((5576, 5601), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (5599, 5601), False, 'import traceback\n'), ((605, 637), 'logging.Formatter.__init__', 'logging.Formatter.__init__', (['self'], {}), '(self)\n', (631, 637), False, 'import logging\n'), ((806, 836), 'time.localtime', 'time.localtime', (['record.created'], {}), '(record.created)\n', (820, 836), False, 'import time\n'), ((1385, 1423), 'logging.Formatter.format', 'logging.Formatter.format', (['self', 'record'], {}), '(self, record)\n', (1409, 1423), False, 'import logging\n'), ((1515, 1538), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1532, 1538), False, 'import logging\n'), ((3781, 3890), 'gflags.BooleanFlag.__init__', 'flags.BooleanFlag.__init__', (['self', '"""glog_capture_warnings"""', '(True)', '"""Redirect warnings to log.warn messages"""'], {}), "(self, 'glog_capture_warnings', True,\n 'Redirect warnings to log.warn messages')\n", (3807, 3890), True, 'import gflags as flags\n'), ((3981, 4015), 'gflags.BooleanFlag.Parse', 'flags.BooleanFlag.Parse', (['self', 'arg'], {}), '(self, arg)\n', (4004, 4015), True, 'import gflags as flags\n'), ((4024, 4059), 'logging.captureWarnings', 'logging.captureWarnings', (['self.value'], {}), '(self.value)\n', (4047, 4059), False, 'import logging\n'), ((4746, 4772), 'gflags.ArgumentSerializer', 'flags.ArgumentSerializer', ([], {}), '()\n', (4770, 4772), True, 'import gflags as flags\n'), ((5278, 5300), 'os.path.basename', 'os.path.basename', (['f[0]'], {}), '(f[0])\n', (5294, 5300), False, 'import os\n'), ((4403, 4442), 'logging._levelNames.get', 'logging._levelNames.get', (['intarg', 'intarg'], {}), '(intarg, intarg)\n', (4426, 4442), False, 'import logging\n'), ((3556, 3585), 'logging.FileHandler', 'logging.FileHandler', (['filename'], {}), '(filename)\n', (3575, 3585), False, 'import logging\n'), ((4548, 4588), 'logging._levelToName.get', 'logging._levelToName.get', (['intarg', 'intarg'], {}), '(intarg, intarg)\n', (4572, 4588), False, 'import logging\n')] |
import unittest
from typing import Any
from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task
from coiny.utils import NullCoinPrice
class HasJson:
def __init__(self, data) -> None:
self.data = data
async def __aenter__(self):
return self
async def __aexit__(self, *args, **kwargs):
pass
async def json(self):
return self.data
class PriceTaskTests(unittest.IsolatedAsyncioTestCase):
async def test_price_task_empty_queue(self):
queue = CoinyQueue()
session = CoinySession()
result = await price_task(queue, session)
self.assertEqual(NullCoinPrice, result)
async def test_price_task_queue(self):
class NoGetSession(CoinySession):
"""HACK: Not a good idea to inherit from CoinySession"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.mock_url = ""
def get(
self, url: str, *, allow_redirects: bool = True, **kwargs: Any
) -> HasJson:
self.mock_url = f"called:{url}"
return HasJson({"mycoin": {"XYZ": 3.4}})
queue = CoinyQueue()
await queue.put(("mycoin", "XYZ", "https://myurl"))
async with NoGetSession() as session:
result = await price_task(queue, session)
expected = CoinPrice(fiat="XYZ", coin="mycoin", rate=3.4)
self.assertEqual(expected, result)
self.assertEqual("called:https://myurl", session.mock_url)
async def test_price_task_mock_eth(self):
mock_url = "https://run.mocky.io/v3/09750cfe-39a5-4d31-9651-2292765a8fe3"
# returns -> {"ethereum": {"eur": 3295.23}}
queue = CoinyQueue()
await queue.put(("ethereum", "eur", mock_url))
async with CoinySession() as session:
result = await price_task(queue, session)
expected = CoinPrice(fiat="eur", coin="ethereum", rate=3295.23)
self.assertEqual(expected, result)
async def test_price_task_mock_eth_invalid(self):
mock_url = "https://run.mocky.io/v3/09750cfe-39a5-4d31-9651-2292765a8fe3"
queue = CoinyQueue()
await queue.put(("bitcoin", "gbp", mock_url))
async with CoinySession() as session:
result = await price_task(queue, session)
self.assertEqual(NullCoinPrice, result)
async def test_price_task_real_eth(self):
queue = CoinyQueue()
await queue.put(("ethereum", "eur", price_now_url("ethereum", "eur")))
async with CoinySession() as session:
result = await price_task(queue, session)
# no way to test the live price of course
half_expected = CoinPrice(fiat="eur", coin="ethereum", rate=0.0)
self.assertEqual(half_expected.fiat, result.fiat)
self.assertEqual(half_expected.coin, result.coin)
__all__ = ["PriceTaskTests"]
| [
"coiny.core.CoinPrice",
"coiny.core.CoinyQueue",
"coiny.core.price_now_url",
"coiny.core.CoinySession",
"coiny.core.price_task"
] | [((534, 546), 'coiny.core.CoinyQueue', 'CoinyQueue', ([], {}), '()\n', (544, 546), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((565, 579), 'coiny.core.CoinySession', 'CoinySession', ([], {}), '()\n', (577, 579), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1225, 1237), 'coiny.core.CoinyQueue', 'CoinyQueue', ([], {}), '()\n', (1235, 1237), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1786, 1798), 'coiny.core.CoinyQueue', 'CoinyQueue', ([], {}), '()\n', (1796, 1798), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2232, 2244), 'coiny.core.CoinyQueue', 'CoinyQueue', ([], {}), '()\n', (2242, 2244), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2515, 2527), 'coiny.core.CoinyQueue', 'CoinyQueue', ([], {}), '()\n', (2525, 2527), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((603, 629), 'coiny.core.price_task', 'price_task', (['queue', 'session'], {}), '(queue, session)\n', (613, 629), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1423, 1469), 'coiny.core.CoinPrice', 'CoinPrice', ([], {'fiat': '"""XYZ"""', 'coin': '"""mycoin"""', 'rate': '(3.4)'}), "(fiat='XYZ', coin='mycoin', rate=3.4)\n", (1432, 1469), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1874, 1888), 'coiny.core.CoinySession', 'CoinySession', ([], {}), '()\n', (1886, 1888), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1978, 2030), 'coiny.core.CoinPrice', 'CoinPrice', ([], {'fiat': '"""eur"""', 'coin': '"""ethereum"""', 'rate': '(3295.23)'}), "(fiat='eur', coin='ethereum', rate=3295.23)\n", (1987, 2030), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2319, 2333), 'coiny.core.CoinySession', 'CoinySession', ([], {}), '()\n', (2331, 2333), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2627, 2641), 'coiny.core.CoinySession', 'CoinySession', ([], {}), '()\n', (2639, 2641), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2790, 2838), 'coiny.core.CoinPrice', 'CoinPrice', ([], {'fiat': '"""eur"""', 'coin': '"""ethereum"""', 'rate': '(0.0)'}), "(fiat='eur', coin='ethereum', rate=0.0)\n", (2799, 2838), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1372, 1398), 'coiny.core.price_task', 'price_task', (['queue', 'session'], {}), '(queue, session)\n', (1382, 1398), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((1928, 1954), 'coiny.core.price_task', 'price_task', (['queue', 'session'], {}), '(queue, session)\n', (1938, 1954), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2373, 2399), 'coiny.core.price_task', 'price_task', (['queue', 'session'], {}), '(queue, session)\n', (2383, 2399), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2681, 2707), 'coiny.core.price_task', 'price_task', (['queue', 'session'], {}), '(queue, session)\n', (2691, 2707), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n'), ((2572, 2604), 'coiny.core.price_now_url', 'price_now_url', (['"""ethereum"""', '"""eur"""'], {}), "('ethereum', 'eur')\n", (2585, 2604), False, 'from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task\n')] |
#!/usr/bin/env python
import math
import os
import numpy as np
import time
import sys
import copy
import rospy
import moveit_msgs.msg
import geometry_msgs.msg
import random
import csv
from sensor_msgs.msg import JointState
from gazebo_msgs.msg import LinkStates
from gazebo_msgs.msg import LinkState
from std_msgs.msg import Float64
from std_msgs.msg import String
from sensor_msgs.msg import Joy
import moveit_commander
from panda_rl.srv import StepAction, StepActionResponse
group_name = "panda_arm_hand"
move_group = moveit_commander.MoveGroupCommander(group_name)
quat_goal = np.array([1, 0, 0.0075, 0])
def vector2points(v, u):
v = np.array(v)
u = np.array(u)
vector = u - v
vector = np.round(vector, 5)
return vector
def get_hand_position():
msg = rospy.wait_for_message('/gazebo/link_states', LinkStates)
hand_positionx = (msg.pose[9].position.x + msg.pose[10].position.x) / 2
hand_positiony = (msg.pose[9].position.y + msg.pose[10].position.y) / 2
hand_positionz = (msg.pose[9].position.z + msg.pose[10].position.z) / 2
hand_position = [hand_positionx, hand_positiony, hand_positionz]
hand_position = np.round(hand_position, 5)
return hand_position
def get_hand_orientation():
msg = rospy.wait_for_message('/gazebo/link_states', LinkStates)
hand_orientation_x = (msg.pose[9].orientation.x + msg.pose[10].orientation.x) / 2
hand_orientation_y = (msg.pose[9].orientation.y + msg.pose[10].orientation.y) / 2
hand_orientation_z = (msg.pose[9].orientation.z + msg.pose[10].orientation.z) / 2
hand_orientation_w = (msg.pose[9].orientation.w + msg.pose[10].orientation.w) / 2
hand_orientation = [hand_orientation_x, hand_orientation_y, hand_orientation_z, hand_orientation_w]
hand_orientation = np.round(hand_orientation, 5)
return hand_orientation
def goal_distance(x, y):
x = np.array(x)
y = np.array(y)
distance = np.linalg.norm(x-y)
distance = np.round(distance, 5)
return distance
def take_action(msg):
done = False
goal = msg.goal
joint_state = move_group.get_current_joint_values()
joint_state[0] = joint_state[0] + (msg.action[0] / 20)
joint_state[1] = joint_state[1] + (msg.action[1] / 20)
joint_state[2] = joint_state[2] + (msg.action[2] / 20)
joint_state[3] = joint_state[3] + (msg.action[3] / 20)
joint_state[4] = joint_state[4] + (msg.action[4] / 20)
joint_state[5] = joint_state[5] + (msg.action[5] / 20)
joint_state[7] = 0.04
joint_state[8] = 0.04
if joint_state[0] < joint1_threshold_min or joint_state[0] > joint1_threshold_max \
or joint_state[1] < joint2_threshold_min or joint_state[1] > joint2_threshold_max \
or joint_state[2] < joint3_threshold_min or joint_state[2] > joint3_threshold_max \
or joint_state[3] < joint4_threshold_min or joint_state[3] > joint4_threshold_max \
or joint_state[4] < joint5_threshold_min or joint_state[4] > joint5_threshold_max \
or joint_state[5] < joint6_threshold_min or joint_state[5] > joint6_threshold_max:
hand_position = get_hand_position()
vector = vector2points(hand_position, goal)
obs = joint_state[0:7]
obs = np.round(obs, 5)
obs = np.append(obs, vector)
done = True
reward = -50
return StepActionResponse(obs=obs, reward=reward, done=done)
else:
move_group.go(joint_state, wait=True)
move_group.stop()
joint_state = move_group.get_current_joint_values()
obs = joint_state[0:7]
obs = np.round(obs, 5)
hand_position = get_hand_position()
quat = get_hand_orientation()
quat_reward = np.linalg.norm(quat_goal - quat)
d = goal_distance(hand_position, goal)
vector = vector2points(hand_position, goal)
z = hand_position[2] - goal[2]
obs = np.append(obs, vector)
if d < 0.02 and z > 0:
reward = 0
print("Action: ", msg.action)
print("Handpos: ", hand_position)
print("Goal: ", goal)
print("Observation ", obs)
print("reward target reached: ", reward)
done = True
group_name_gripper = "hand"
move_group_gripper = moveit_commander.MoveGroupCommander(group_name_gripper)
joint_values = move_group_gripper.get_current_joint_values()
joint_values[0] = 0.02
joint_values[1] = 0.02
move_group_gripper.go(joint_values, wait=True)
move_group_gripper.stop()
return StepActionResponse(obs=obs, reward=reward, done=done)
elif d > 0.08 and z < 0.05 or z < 0: #Fördert Anfahren von oben durch Bestrafung wenn EE weit weg ist, aber bereits auf ähnlicher Höhe zum Ziel
reward = 5 * (-d - quat_reward)
return StepActionResponse(obs=obs, reward=reward, done=done)
else:
reward = (-d - quat_reward)
#print("Action: ", msg.action)
print("Handpos: ", hand_position)
print("Goal: ", goal)
#print("Observation ", obs)
print("reward: ", reward)
print("Distance", d)
return StepActionResponse(obs=obs, reward=reward, done=done)
joint1_threshold_min = -2.8973
joint2_threshold_min = -1.7628
joint3_threshold_min = -2.8973
joint4_threshold_min = -3.0718
joint5_threshold_min = -2.8973
joint6_threshold_min = -0.0175
joint1_threshold_max = 2.8973
joint2_threshold_max = 1.7628
joint3_threshold_max = 2.8973
joint4_threshold_max = -0.0698
joint5_threshold_max = 2.8973
joint6_threshold_max = 3.7525
rospy.init_node('step_service', anonymous=False)
print("step_nodeaktiv")
s = rospy.Service('step_env', StepAction, take_action)
rospy.spin()
| [
"panda_rl.srv.StepActionResponse",
"rospy.init_node",
"rospy.Service",
"rospy.wait_for_message",
"numpy.append",
"moveit_commander.MoveGroupCommander",
"numpy.array",
"rospy.spin",
"numpy.linalg.norm",
"numpy.round"
] | [((523, 570), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['group_name'], {}), '(group_name)\n', (558, 570), False, 'import moveit_commander\n'), ((583, 610), 'numpy.array', 'np.array', (['[1, 0, 0.0075, 0]'], {}), '([1, 0, 0.0075, 0])\n', (591, 610), True, 'import numpy as np\n'), ((5634, 5682), 'rospy.init_node', 'rospy.init_node', (['"""step_service"""'], {'anonymous': '(False)'}), "('step_service', anonymous=False)\n", (5649, 5682), False, 'import rospy\n'), ((5711, 5761), 'rospy.Service', 'rospy.Service', (['"""step_env"""', 'StepAction', 'take_action'], {}), "('step_env', StepAction, take_action)\n", (5724, 5761), False, 'import rospy\n'), ((5762, 5774), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5772, 5774), False, 'import rospy\n'), ((646, 657), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (654, 657), True, 'import numpy as np\n'), ((666, 677), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (674, 677), True, 'import numpy as np\n'), ((710, 729), 'numpy.round', 'np.round', (['vector', '(5)'], {}), '(vector, 5)\n', (718, 729), True, 'import numpy as np\n'), ((785, 842), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/gazebo/link_states"""', 'LinkStates'], {}), "('/gazebo/link_states', LinkStates)\n", (807, 842), False, 'import rospy\n'), ((1160, 1186), 'numpy.round', 'np.round', (['hand_position', '(5)'], {}), '(hand_position, 5)\n', (1168, 1186), True, 'import numpy as np\n'), ((1251, 1308), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/gazebo/link_states"""', 'LinkStates'], {}), "('/gazebo/link_states', LinkStates)\n", (1273, 1308), False, 'import rospy\n'), ((1780, 1809), 'numpy.round', 'np.round', (['hand_orientation', '(5)'], {}), '(hand_orientation, 5)\n', (1788, 1809), True, 'import numpy as np\n'), ((1873, 1884), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1881, 1884), True, 'import numpy as np\n'), ((1893, 1904), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1901, 1904), True, 'import numpy as np\n'), ((1920, 1941), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (1934, 1941), True, 'import numpy as np\n'), ((1955, 1976), 'numpy.round', 'np.round', (['distance', '(5)'], {}), '(distance, 5)\n', (1963, 1976), True, 'import numpy as np\n'), ((3211, 3227), 'numpy.round', 'np.round', (['obs', '(5)'], {}), '(obs, 5)\n', (3219, 3227), True, 'import numpy as np\n'), ((3242, 3264), 'numpy.append', 'np.append', (['obs', 'vector'], {}), '(obs, vector)\n', (3251, 3264), True, 'import numpy as np\n'), ((3321, 3374), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (3339, 3374), False, 'from panda_rl.srv import StepAction, StepActionResponse\n'), ((3564, 3580), 'numpy.round', 'np.round', (['obs', '(5)'], {}), '(obs, 5)\n', (3572, 3580), True, 'import numpy as np\n'), ((3685, 3717), 'numpy.linalg.norm', 'np.linalg.norm', (['(quat_goal - quat)'], {}), '(quat_goal - quat)\n', (3699, 3717), True, 'import numpy as np\n'), ((3871, 3893), 'numpy.append', 'np.append', (['obs', 'vector'], {}), '(obs, vector)\n', (3880, 3893), True, 'import numpy as np\n'), ((4260, 4315), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['group_name_gripper'], {}), '(group_name_gripper)\n', (4295, 4315), False, 'import moveit_commander\n'), ((4576, 4629), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (4594, 4629), False, 'from panda_rl.srv import StepAction, StepActionResponse\n'), ((4846, 4899), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (4864, 4899), False, 'from panda_rl.srv import StepAction, StepActionResponse\n'), ((5208, 5261), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (5226, 5261), False, 'from panda_rl.srv import StepAction, StepActionResponse\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Base command.
"""
import re
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Union
import numpy as np
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.channels import Channel
class MetaCount(ABCMeta):
"""Meta class to count class instances."""
def __new__(mcs, name, bases, namespace, **_):
new_cls = super(MetaCount, mcs).__new__(mcs, name, bases, namespace)
new_cls.instances_counter = 0
return new_cls
class Command(metaclass=MetaCount):
"""Abstract command class."""
# Counter for the number of instances in this class
prefix = 'c'
@abstractmethod
def __init__(self, duration: Union[int, np.integer] = None):
"""Create a new command.
Args:
duration: Duration of this command.
Raises:
PulseError: when duration is not number of points
"""
if isinstance(duration, (int, np.integer)):
self._duration = int(duration)
else:
raise PulseError('Pulse duration should be integer.')
self._name = Command.create_name()
@classmethod
def create_name(cls, name: str = None) -> str:
"""Autogenerate names for pulse commands."""
if name is None:
try:
name = '%s%i' % (cls.prefix, cls.instances_counter) # pylint: disable=E1101
except TypeError:
raise PulseError("prefix and counter must be non-None when name is None.")
else:
try:
name = str(name)
except Exception:
raise PulseError("The pulse command name should be castable to a string "
"(or None for autogenerate a name).")
name_format = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
if name_format.match(name) is None:
raise PulseError("%s is an invalid OpenPulse command name." % name)
cls.instances_counter += 1 # pylint: disable=E1101
return name
@property
def duration(self) -> int:
"""Duration of this command."""
return self._duration
@property
def name(self) -> str:
"""Name of this command."""
return self._name
@abstractmethod
def to_instruction(self, command, *channels: List[Channel],
name: Optional[str] = None):
"""Create an instruction from command.
Returns:
Instruction
"""
pass
def __call__(self, *args, **kwargs):
"""Creates an Instruction obtained from call to `to_instruction` wrapped in a Schedule."""
return self.to_instruction(*args, **kwargs)
def __eq__(self, other: 'Command'):
"""Two Commands are the same if they are of the same type
and have the same duration and name.
Args:
other: other Command
Returns:
bool: are self and other equal
"""
return (type(self) is type(other)) and (self.duration == other.duration)
def __hash__(self):
return hash((type(self), self.duration, self.name))
def __repr__(self):
return '%s(duration=%d, name="%s")' % (self.__class__.__name__,
self.duration,
self.name)
| [
"qiskit.pulse.exceptions.PulseError",
"re.compile"
] | [((1541, 1588), 'qiskit.pulse.exceptions.PulseError', 'PulseError', (['"""Pulse duration should be integer."""'], {}), "('Pulse duration should be integer.')\n", (1551, 1588), False, 'from qiskit.pulse.exceptions import PulseError\n'), ((2292, 2327), 're.compile', 're.compile', (['"""[a-zA-Z][a-zA-Z0-9_]*"""'], {}), "('[a-zA-Z][a-zA-Z0-9_]*')\n", (2302, 2327), False, 'import re\n'), ((2398, 2459), 'qiskit.pulse.exceptions.PulseError', 'PulseError', (["('%s is an invalid OpenPulse command name.' % name)"], {}), "('%s is an invalid OpenPulse command name.' % name)\n", (2408, 2459), False, 'from qiskit.pulse.exceptions import PulseError\n'), ((1942, 2010), 'qiskit.pulse.exceptions.PulseError', 'PulseError', (['"""prefix and counter must be non-None when name is None."""'], {}), "('prefix and counter must be non-None when name is None.')\n", (1952, 2010), False, 'from qiskit.pulse.exceptions import PulseError\n'), ((2127, 2239), 'qiskit.pulse.exceptions.PulseError', 'PulseError', (['"""The pulse command name should be castable to a string (or None for autogenerate a name)."""'], {}), "(\n 'The pulse command name should be castable to a string (or None for autogenerate a name).'\n )\n", (2137, 2239), False, 'from qiskit.pulse.exceptions import PulseError\n')] |
import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
class TestData(unittest.TestCase):
def test_to_dict_dict_should_be_set(self):
"""
:test : mlsurvey.model.Data.to_dict()
:condition : x,y, y_pred data are filled.
:main_result : the dictionary generated is the same as expected
"""
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataPandas(df, df_contains='xyypred')
expected = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
result = data.to_dict()
self.assertDictEqual(expected, result)
def test_from_dict_df_empty(self):
"""
:test : mlsurvey.model.DataPandas.from_dict()
:condition : the input dict is set and an empty dataframe is given.
:main_result : a ModelError occurs
"""
df = pd.DataFrame(data=np.array([]))
d = None
input_dict = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
try:
d = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(d)
self.assertTrue(True)
def test_from_dict_dict_empty(self):
"""
:test : mlsurvey.model.Data.from_dict()
:condition : the input dict does not contains all keys and an full dataframe is given
:main_result : a ModelError occurs
"""
x = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = None
input_dict = {'df_contains': 'xyypred',
'y_pred_col_name': 'target_pred'}
try:
data = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(data)
self.assertTrue(True)
| [
"pandas.DataFrame",
"numpy.array",
"mlsurvey.sl.models.DataPandas",
"mlsurvey.sl.models.DataPandas.from_dict"
] | [((369, 401), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (377, 401), True, 'import numpy as np\n'), ((414, 430), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (422, 430), True, 'import numpy as np\n'), ((448, 464), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (456, 464), True, 'import numpy as np\n'), ((566, 595), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_array'}), '(data=data_array)\n', (578, 595), True, 'import pandas as pd\n'), ((611, 662), 'mlsurvey.sl.models.DataPandas', 'mls.sl.models.DataPandas', (['df'], {'df_contains': '"""xyypred"""'}), "(df, df_contains='xyypred')\n", (635, 662), True, 'import mlsurvey as mls\n'), ((1822, 1848), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1830, 1848), True, 'import numpy as np\n'), ((1861, 1877), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1869, 1877), True, 'import numpy as np\n'), ((1895, 1911), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1903, 1911), True, 'import numpy as np\n'), ((2013, 2042), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_array'}), '(data=data_array)\n', (2025, 2042), True, 'import pandas as pd\n'), ((1364, 1414), 'mlsurvey.sl.models.DataPandas.from_dict', 'mls.sl.models.DataPandas.from_dict', (['input_dict', 'df'], {}), '(input_dict, df)\n', (1398, 1414), True, 'import mlsurvey as mls\n'), ((2199, 2249), 'mlsurvey.sl.models.DataPandas.from_dict', 'mls.sl.models.DataPandas.from_dict', (['input_dict', 'df'], {}), '(input_dict, df)\n', (2233, 2249), True, 'import mlsurvey as mls\n'), ((1154, 1166), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1162, 1166), True, 'import numpy as np\n'), ((505, 518), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (513, 518), True, 'import numpy as np\n'), ((522, 540), 'numpy.array', 'np.array', (['[y_pred]'], {}), '([y_pred])\n', (530, 540), True, 'import numpy as np\n'), ((1952, 1965), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (1960, 1965), True, 'import numpy as np\n'), ((1969, 1987), 'numpy.array', 'np.array', (['[y_pred]'], {}), '([y_pred])\n', (1977, 1987), True, 'import numpy as np\n')] |
# encoding:utf-8
import requests
import base64
import time
'''
通用文字识别
'''
request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic"
access_token = '' # 百度AI的token access 详情请去看文档
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/x-www-form-urlencoded'}
for file_index in range(10000):
file_name = 'vcode_imgs/' + str(file_index) + '.png'
f_obj = open(file_name, 'rb')
img = base64.b64encode(f_obj.read())
f_obj.close()
params = {"image": img}
response = requests.post(request_url, data=params, headers=headers)
if response:
answer = response.content.decode().split(",")[-1].split("\"")[-2].replace(' ', '').lower()
if len(answer) < 5:
with open('baidu_ocr_verify_response.json', 'a') as f:
f.write('{}:{}\n'.format(str(file_index) + '.png', answer))
else:
with open('baidu_ocr_verify_response.json', 'a') as f:
f.write('{}:{}\n'.format(str(file_index) + '.png', '识别失败'))
print('对文件{}.png的识别失败 请手动核对'.format(file_index))
time.sleep(0.2) | [
"requests.post",
"time.sleep"
] | [((544, 600), 'requests.post', 'requests.post', (['request_url'], {'data': 'params', 'headers': 'headers'}), '(request_url, data=params, headers=headers)\n', (557, 600), False, 'import requests\n'), ((1111, 1126), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1121, 1126), False, 'import time\n')] |
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import cv2
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
from .RepulsionLoss.my_repulsion_loss import repulsion
def preprocess_input(image):
image /= 255
mean=(0.406, 0.456, 0.485)
std=(0.225, 0.224, 0.229)
image -= mean
image /= std
return image
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def get_target(anchor, bbox_annotation, classification, cuda):
IoU = calc_iou(anchor[:, :], bbox_annotation[:, :4])
IoU_max, IoU_argmax = torch.max(IoU, dim=1)
# compute the loss for classification
targets = torch.ones_like(classification) * -1
if cuda:
targets = targets.cuda()
targets[torch.lt(IoU_max, 0.4), :] = 0
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
return targets, num_positive_anchors, positive_indices, assigned_annotations
def encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y):
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# efficientdet style
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dy, targets_dx, targets_dh, targets_dw))
targets = targets.t()
return targets
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, classifications, regressions, anchors, annotations, alpha=0.25, gamma=2.0, cuda=True):
# 设置
dtype = regressions.dtype
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
repulsion_losses = []
# 获得先验框,将先验框转换成中心宽高的形势
anchor = anchors[0, :, :].to(dtype)
# 转换成中心,宽高的形式
anchor_widths = anchor[:, 3] - anchor[:, 1]
anchor_heights = anchor[:, 2] - anchor[:, 0]
anchor_ctr_x = anchor[:, 1] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 0] + 0.5 * anchor_heights
rep_target = []
rep_regres = []
for j in range(batch_size):
# 取出真实框
bbox_annotation = annotations[j]
# 获得每张图片的分类结果和回归预测结果
classification = classifications[j, :, :]
regression = regressions[j, :, :]
# 平滑标签
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if len(bbox_annotation) == 0:
alpha_factor = torch.ones_like(classification) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
repulsion_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
repulsion_losses.append(torch.tensor(0).to(dtype))
classification_losses.append(cls_loss.sum())
continue
# 获得目标预测结果
targets, num_positive_anchors, positive_indices, assigned_annotations = get_target(anchor, bbox_annotation, classification, cuda)
rep_target.append(bbox_annotation[:, 0:4])
rep_regres.append(anchor[positive_indices,:])
alpha_factor = torch.ones_like(targets) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
zeros = torch.zeros_like(cls_loss)
if cuda:
zeros = zeros.cuda()
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, zeros)
classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.to(dtype), min=1.0)) # cross_entropy ??
# smoooth_l1 & repulsion_loss
if positive_indices.sum() > 0:
targets = encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y)
# print("Targets:", targets)n * 4
regression_diff = torch.abs(targets - regression[positive_indices, :]) # -?
# smoooth_l1
L1delta = 1.0 #0.5
regression_loss = torch.where(
torch.le(regression_diff, L1delta),
0.5 * torch.pow(regression_diff, 2),
L1delta * regression_diff - 0.5 * L1delta ** 2
)
regression_losses.append(regression_loss.sum())
else:
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
repulsion_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
repulsion_losses.append(torch.tensor(0).to(dtype))
c_loss = torch.stack(classification_losses).mean()
r_loss = torch.stack(regression_losses).mean()
# Repulsion
# rep_target = torch.tensor(rep_target, dtype=torch.float16)
# rep_regres = torch.tensor(rep_regres, dtype=torch.float16)
loss_RepGT = repulsion(rep_target, rep_regres) # anchor
repu_loss = loss_RepGT.mean() # nan problem
loss = c_loss + r_loss #+ repu_loss
return loss, c_loss, r_loss, repu_loss
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
class Generator(object):
def __init__(self,batch_size,
train_lines, image_size,
):
self.batch_size = batch_size
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.image_size = image_size
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):
'''r实时数据增强的随机预处理'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
# correct boxes
box_data = np.zeros((len(box),5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:,:4]>0).any():
return image_data, box_data
else:
return image_data, []
def generate(self):
while True:
shuffle(self.train_lines)
lines = self.train_lines
inputs = []
targets = []
n = len(lines)
for i in range(len(lines)):
img,y = self.get_random_data(lines[i], self.image_size[0:2])
i = (i+1) % n
if len(y)!=0:
boxes = np.array(y[:,:4],dtype=np.float32)
y = np.concatenate([boxes,y[:,-1:]],axis=-1)
img = np.array(img,dtype = np.float32)
y = np.array(y,dtype = np.float32)
inputs.append(np.transpose(preprocess_input(img),(2,0,1)))
targets.append(y)
if len(targets) == self.batch_size:
tmp_inp = np.array(inputs)
tmp_targets = np.array(targets)
inputs = []
targets = []
yield tmp_inp, tmp_targets
| [
"numpy.random.rand",
"PIL.Image.new",
"torch.max",
"torch.pow",
"torch.eq",
"numpy.array",
"torch.unsqueeze",
"numpy.concatenate",
"torch.zeros_like",
"torch.le",
"torch.ones_like",
"torch.abs",
"random.shuffle",
"torch.ge",
"cv2.cvtColor",
"torch.lt",
"torch.clamp",
"PIL.Image.ope... | [((734, 756), 'torch.clamp', 'torch.clamp', (['iw'], {'min': '(0)'}), '(iw, min=0)\n', (745, 756), False, 'import torch\n'), ((766, 788), 'torch.clamp', 'torch.clamp', (['ih'], {'min': '(0)'}), '(ih, min=0)\n', (777, 788), False, 'import torch\n'), ((890, 916), 'torch.clamp', 'torch.clamp', (['ua'], {'min': '(1e-08)'}), '(ua, min=1e-08)\n', (901, 916), False, 'import torch\n'), ((1135, 1156), 'torch.max', 'torch.max', (['IoU'], {'dim': '(1)'}), '(IoU, dim=1)\n', (1144, 1156), False, 'import torch\n'), ((1365, 1387), 'torch.ge', 'torch.ge', (['IoU_max', '(0.5)'], {}), '(IoU_max, 0.5)\n', (1373, 1387), False, 'import torch\n'), ((2415, 2444), 'torch.clamp', 'torch.clamp', (['gt_widths'], {'min': '(1)'}), '(gt_widths, min=1)\n', (2426, 2444), False, 'import torch\n'), ((2462, 2492), 'torch.clamp', 'torch.clamp', (['gt_heights'], {'min': '(1)'}), '(gt_heights, min=1)\n', (2473, 2492), False, 'import torch\n'), ((2642, 2681), 'torch.log', 'torch.log', (['(gt_widths / anchor_widths_pi)'], {}), '(gt_widths / anchor_widths_pi)\n', (2651, 2681), False, 'import torch\n'), ((2699, 2740), 'torch.log', 'torch.log', (['(gt_heights / anchor_heights_pi)'], {}), '(gt_heights / anchor_heights_pi)\n', (2708, 2740), False, 'import torch\n'), ((2756, 2817), 'torch.stack', 'torch.stack', (['(targets_dy, targets_dx, targets_dh, targets_dw)'], {}), '((targets_dy, targets_dx, targets_dh, targets_dw))\n', (2767, 2817), False, 'import torch\n'), ((1214, 1245), 'torch.ones_like', 'torch.ones_like', (['classification'], {}), '(classification)\n', (1229, 1245), False, 'import torch\n'), ((8239, 8258), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (8249, 8258), False, 'from PIL import Image\n'), ((8845, 8886), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (8854, 8886), False, 'from PIL import Image\n'), ((522, 553), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 3]'], {'dim': '(1)'}), '(a[:, 3], dim=1)\n', (537, 553), False, 'import torch\n'), ((576, 603), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 1]', '(1)'], {}), '(a[:, 1], 1)\n', (591, 603), False, 'import torch\n'), ((633, 664), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 2]'], {'dim': '(1)'}), '(a[:, 2], dim=1)\n', (648, 664), False, 'import torch\n'), ((687, 714), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 0]', '(1)'], {}), '(a[:, 0], 1)\n', (702, 714), False, 'import torch\n'), ((798, 863), 'torch.unsqueeze', 'torch.unsqueeze', (['((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]))'], {'dim': '(1)'}), '((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1)\n', (813, 863), False, 'import torch\n'), ((1310, 1332), 'torch.lt', 'torch.lt', (['IoU_max', '(0.4)'], {}), '(IoU_max, 0.4)\n', (1318, 1332), False, 'import torch\n'), ((3925, 3974), 'torch.clamp', 'torch.clamp', (['classification', '(0.0001)', '(1.0 - 0.0001)'], {}), '(classification, 0.0001, 1.0 - 0.0001)\n', (3936, 3974), False, 'import torch\n'), ((5808, 5834), 'torch.zeros_like', 'torch.zeros_like', (['cls_loss'], {}), '(cls_loss)\n', (5824, 5834), False, 'import torch\n'), ((7731, 7747), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7745, 7747), True, 'import numpy as np\n'), ((9584, 9618), 'cv2.cvtColor', 'cv2.cvtColor', (['x', 'cv2.COLOR_HSV2RGB'], {}), '(x, cv2.COLOR_HSV2RGB)\n', (9596, 9618), False, 'import cv2\n'), ((9725, 9747), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (9742, 9747), True, 'import numpy as np\n'), ((10519, 10544), 'random.shuffle', 'shuffle', (['self.train_lines'], {}), '(self.train_lines)\n', (10526, 10544), False, 'from random import shuffle\n'), ((5262, 5286), 'torch.ones_like', 'torch.ones_like', (['targets'], {}), '(targets)\n', (5277, 5286), False, 'import torch\n'), ((5406, 5428), 'torch.eq', 'torch.eq', (['targets', '(1.0)'], {}), '(targets, 1.0)\n', (5414, 5428), False, 'import torch\n'), ((5501, 5523), 'torch.eq', 'torch.eq', (['targets', '(1.0)'], {}), '(targets, 1.0)\n', (5509, 5523), False, 'import torch\n'), ((5603, 5633), 'torch.pow', 'torch.pow', (['focal_weight', 'gamma'], {}), '(focal_weight, gamma)\n', (5612, 5633), False, 'import torch\n'), ((5928, 5951), 'torch.ne', 'torch.ne', (['targets', '(-1.0)'], {}), '(targets, -1.0)\n', (5936, 5951), False, 'import torch\n'), ((6413, 6465), 'torch.abs', 'torch.abs', (['(targets - regression[positive_indices, :])'], {}), '(targets - regression[positive_indices, :])\n', (6422, 6465), False, 'import torch\n'), ((7233, 7267), 'torch.stack', 'torch.stack', (['classification_losses'], {}), '(classification_losses)\n', (7244, 7267), False, 'import torch\n'), ((7292, 7322), 'torch.stack', 'torch.stack', (['regression_losses'], {}), '(regression_losses)\n', (7303, 7322), False, 'import torch\n'), ((9269, 9296), 'numpy.array', 'np.array', (['image', 'np.float32'], {}), '(image, np.float32)\n', (9277, 9296), True, 'import numpy as np\n'), ((10136, 10172), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (10150, 10172), True, 'import numpy as np\n'), ((11006, 11037), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (11014, 11037), True, 'import numpy as np\n'), ((11059, 11088), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float32'}), '(y, dtype=np.float32)\n', (11067, 11088), True, 'import numpy as np\n'), ((4057, 4088), 'torch.ones_like', 'torch.ones_like', (['classification'], {}), '(classification)\n', (4072, 4088), False, 'import torch\n'), ((4335, 4365), 'torch.pow', 'torch.pow', (['focal_weight', 'gamma'], {}), '(focal_weight, gamma)\n', (4344, 4365), False, 'import torch\n'), ((4407, 4438), 'torch.log', 'torch.log', (['(1.0 - classification)'], {}), '(1.0 - classification)\n', (4416, 4438), False, 'import torch\n'), ((6604, 6638), 'torch.le', 'torch.le', (['regression_diff', 'L1delta'], {}), '(regression_diff, L1delta)\n', (6612, 6638), False, 'import torch\n'), ((10863, 10899), 'numpy.array', 'np.array', (['y[:, :4]'], {'dtype': 'np.float32'}), '(y[:, :4], dtype=np.float32)\n', (10871, 10899), True, 'import numpy as np\n'), ((10922, 10965), 'numpy.concatenate', 'np.concatenate', (['[boxes, y[:, -1:]]'], {'axis': '(-1)'}), '([boxes, y[:, -1:]], axis=-1)\n', (10936, 10965), True, 'import numpy as np\n'), ((11297, 11313), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (11305, 11313), True, 'import numpy as np\n'), ((11348, 11365), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (11356, 11365), True, 'import numpy as np\n'), ((5665, 5690), 'torch.log', 'torch.log', (['classification'], {}), '(classification)\n', (5674, 5690), False, 'import torch\n'), ((5711, 5742), 'torch.log', 'torch.log', (['(1.0 - classification)'], {}), '(1.0 - classification)\n', (5720, 5742), False, 'import torch\n'), ((6666, 6695), 'torch.pow', 'torch.pow', (['regression_diff', '(2)'], {}), '(regression_diff, 2)\n', (6675, 6695), False, 'import torch\n'), ((4769, 4784), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4781, 4784), False, 'import torch\n'), ((4840, 4855), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4852, 4855), False, 'import torch\n'), ((7116, 7131), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7128, 7131), False, 'import torch\n'), ((7187, 7202), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7199, 7202), False, 'import torch\n'), ((4590, 4605), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4602, 4605), False, 'import torch\n'), ((4668, 4683), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4680, 4683), False, 'import torch\n'), ((6937, 6952), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (6949, 6952), False, 'import torch\n'), ((7015, 7030), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7027, 7030), False, 'import torch\n')] |