index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
69,091 | kkaris/paths_graph | refs/heads/master | /paths_graph/pg.py | import os
import logging
import itertools
from copy import deepcopy
import numpy as np
from collections import defaultdict
import networkx as nx
logger = logging.getLogger('paths_graph')
def get_reachable_sets(g, source, target, max_depth=10, signed=False):
"""Get sets of nodes reachable from source and target at different depths.
Parameters
----------
g : nx.DiGraph
The underlying graph used for computing reachable node sets.
source : str
Name of source node.
target : target
Name of target node.
max_depth : int
Maximum path length (depth) over which to compute reachable sets.
signed : boolean
Whether the graph is signed. If True, sign information should be encoded
in the 'sign' field of the edge data, with 0 indicating a positive edge
and 1 indicating a negative edge.
Returns
-------
tuple
The first item in the tuple represents the nodes reachable from the
source in the forward direction; the second represents the nodes
reachable from the target in the backward direction. Each reachable
set takes the form of a dict with integers representing different
depths as keys, and sets of nodes as values. The nodes themselves
consist of tuples (u, w), where u is the name of the node and w is the
cumulative polarity, forwards or backwards, from the source or target,
respectively. Note that if the source or target are not reachable by
any path within the given maximum depth, both dicts are empty.
"""
# Forward and backward level sets for signed and unsigned graphs
if signed:
source = (source, 0)
target = (target, 0)
f_level = {0: set([source])}
b_level = {0: set([target])}
else:
f_level = {0: set([source])}
b_level = {0: set([target])}
# A bit of trickery to avoid a duplicated for loop--may be too much!
directions = (
('forward', f_level, lambda u: [((u, v), v) for v in g.successors(u)]),
('backward', b_level, lambda v: [((u, v), u) for u in g.predecessors(v)]))
# Utility function to make code below more compact
def _add_signed_edge(reachable_set, node_polarity, edge_polarity):
cum_polarity = (node_polarity + edge_polarity) % 2
reachable_set.add((reachable_node, cum_polarity))
# Iterate over levels
for direction, level, edge_func in directions:
visited = set([source]) if direction == 'forward' else set([target])
for i in range(1, max_depth+1):
reachable_set = set()
# Signed graph
if signed:
for node, node_polarity in level[i-1]:
for (u, v), reachable_node in edge_func(node):
edge_dict = g.get_edge_data(u, v)
edge_polarity = edge_dict.get('sign')
# If this is a multidigraph, get_edge_data will return
# a dict keyed by integers
if edge_polarity is None:
for edge_key, edge_data in edge_dict.items():
_add_signed_edge(reachable_set, node_polarity,
edge_data['sign'])
else:
_add_signed_edge(reachable_set, node_polarity,
edge_polarity)
# Unsigned graph
else:
for node in level[i-1]:
for (u, v), reachable_node in edge_func(node):
reachable_set.add(reachable_node)
visited = visited | reachable_set
# If the reachable set is empty then we can stop
if not reachable_set:
break
else:
level[i] = reachable_set
# If we're going forward we make sure we visited the target
if (direction == 'forward' and target not in visited) or \
(direction == 'backward' and source not in visited):
return ({}, {})
return (f_level, b_level)
class PathsGraph(object):
"""Class representing the Paths Graph data structure.
Instances of this class should generally not be created using the
`__init__` method but rather through the factory class method
:py:meth:`from_graph`.
Attributes
----------
source_name : str or tuple
String or tuple representing the source.
source_node : tuple
The source node as it is represented in the paths graph.
target_name : str or tuple
String or tuple representing the target.
target_node : tuple
The target node as it is represented in the paths graph.
target_polarity : int
Integer representing the sign of the desired paths at the target
node. 0 indicates a positive (even) polarity, 1 indicates a negative
(odd) polarity.
graph : networkx.DiGraph
Graph object representing the paths graph.
path_length : int
Length of the path connecting source and target.
"""
def __init__(self, source_name, target_name, graph, path_length, signed,
target_polarity):
self.source_name = source_name
self.target_name = target_name
self.signed = signed
self.target_polarity = target_polarity
if signed:
self.source_name = (source_name, 0)
self.source_node = (0, self.source_name)
self.target_name = (target_name, target_polarity)
self.target_node = (path_length, self.target_name)
else:
self.source_node = (0, source_name)
self.target_node = (path_length, target_name)
self.graph = graph
self.path_length = path_length
# Used in cycle-free path sampling
self._blacklist_by_path = {}
@classmethod
def from_graph(klass, g, source, target, length, fwd_reachset=None,
back_reachset=None, signed=False, target_polarity=0):
"""Create a graph where all nodes lie on a path of the given length.
Nodes in the graph account for the cumulative polarity from the source
to the target, so that any path found from the source to the target
will have the overall polarity as specified by the target_polarity
argument.
The function uses the forward and backward reach sets provided as
arguments to efficiently identify the subset of nodes that are
reachable from both the forward and backward directions. Pairs of nodes
at neighboring levels are then checked against the original graph to
determine which nodes are connected by edges with the appropriate
direction and polarity. These nodes are then used to create a new
graph, the "paths graph," which consists solely of these nodes and
edges. This graph represents the superset of all possible paths from
source to target of a given legnth and target polarity. Specific paths
can then be obtained by sampling.
Parameters
----------
g : networkx.DiGraph
The underlying graph on which paths will be generated.
source : str
Name of the source node.
target : str
Name of the target node.
target_polarity : int
Whether the desired path from source to target is positive (0)
or negative (1).
length : int
Length of paths to compute.
fwd_reachset : Optional[dict]
Dictionary of sets representing the forward reachset computed over
the original graph g up to a maximum depth greater than the
requested path length. If not provided, the forward reach set is
calculated up to the requested path length up to the requested path
length by calling paths_graph.get_reachable_sets.
back_reachset : Optional[dict]
Dictionary of sets representing the backward reachset computed over
the original graph g up to a maximum depth greater than the
requested path length. If not provided, the backward reach set is
calculated up to the requested path length up to the requested path
length by calling paths_graph.get_reachable_sets.
signed : bool
Specifies whether the underlying graph and the corresponding
f_level and b_level reachable sets have signed edges. If True,
sign information should be encoded in the 'sign' field of the edge
data, with 0 indicating a positive edge and 1 indicating a negative
edge.
target_polarity : 0 or 1
Specifies the polarity of the target node: 0 indicates
positive/activation, 1 indicates negative/inhibition.
Returns
-------
PathsGraph
Instance of PathsGraph class representing paths from source to
target with a given length and overall polarity.
"""
# If the reachable sets aren't provided by the user, compute them here
# with a maximum depth given by the target path length.
if fwd_reachset is None or back_reachset is None:
(fwd_reachset, back_reachset) = get_reachable_sets(g, source,
target, max_depth=length,
signed=signed)
# If either fwd_reachset or back_reachset is an empty dict (as they
# would be if the nodes were unreachable from either directions) return
# an empty paths graph
if not fwd_reachset or not back_reachset:
paths_graph = nx.DiGraph()
return PathsGraph(source, target, paths_graph, length, signed,
target_polarity)
# Otherwise, if the reachable sets are provided, use them after checking
# if they have a depth at least equal to the given path length
_check_reach_depth('forward', fwd_reachset, length)
_check_reach_depth('backward', back_reachset, length)
# Also, if the reachable sets do not have entries at the given length,
# this means that either we are attempting to create a paths_graph for
# a path longer than we generated reachable sets, or there is no path of
# the given length (may depend on whether cycles were eliminated when
# when generating the reachable sets).
if not (length in fwd_reachset and length in back_reachset):
paths_graph = nx.DiGraph()
return klass(source, target, paths_graph, length, signed,
target_polarity)
# By default, we set the "adjusted backward reach set", aka
# back_reachset_adj, to be the same as the original back_reachset; this
# is only overriden if we have a signed graph and a negative target
# polarity
back_reachset_adj = back_reachset
# Signed graphs
if signed:
level = {0: set([(source, 0)]),
length: set([(target, target_polarity)])}
# If the target polarity is even (positive/neutral), then the
# cumulative polarities in the forward direction will match those
# in the reverse direction; if the target polarity is odd, then the
# polarities will be opposite at each matching node. Thus we check
# the target polarity and flip the polarities of the backward reach
# set if appropriate.
if target_polarity == 1:
back_reachset_adj = {}
for i in range(0, len(back_reachset)):
polar_set = set()
for (u, w) in back_reachset[i]:
w_flipped = (w + 1) % 2
polar_set.add((u, w_flipped))
back_reachset_adj[i] = polar_set
# Unsigned graphs
else:
level = {0: set([source]), length: set([target])}
# Next we calculate the subset of nodes at each level that are reachable
# from both the forward and backward directions. Because the polarities
# have already been set appropriately, we can do this with a simple
# set intersection. We also make sure that the target doesn't reappear
# anywhere except at the correct level.
for i in range(1, length):
f_reach_set = fwd_reachset[i]
b_reach_set = back_reachset_adj[length - i]
path_nodes = set(f_reach_set) & set(b_reach_set) - set([target])
level[i] = path_nodes
# Next we explicitly enumerate the path graph nodes by tagging each node
# with its level in the path
pg_nodes = {}
for i in range(0,length+1):
pg_nodes[i] = list(itertools.product([i], level[i]))
# Finally we add edges between these nodes if they are found in the
# original graph. Note that we have to check for an edge of the
# appropriate polarity.
pg_edges = []
g_edges = []
edge_weights = {}
# Collect edge and edge weight info from the graph
for u, v, data in g.edges(data=True):
if signed:
edge_key = (u, v, data['sign'])
else:
edge_key = (u, v)
g_edges.append(edge_key)
edge_weights[edge_key] = float(data.get('weight', 1.0))
for i in range(0, length):
actual_edges = []
logger.info("paths_graph: identifying edges at level %d" % i)
if signed:
# This set stores the information for performing the set
# intersection with the edges in the source graph
possible_edges = set()
# This dict stores the information for the actual edge, with
# weight, as we will need to add it to the PG
edge_lookup = {}
for edge in itertools.product(pg_nodes[i], pg_nodes[i+1]):
u_name, u_pol = edge[0][1]
v_name, v_pol = edge[1][1]
# If the polarity between neighboring nodes is the same,
# then we need a positive edge
required_sign = 0 if u_pol == v_pol else 1
edge_key = (u_name, v_name, required_sign)
possible_edges.add(edge_key)
edge_lookup[edge_key] = edge
for edge_key in possible_edges.intersection(g_edges):
weighted_edge = edge_lookup[edge_key] + \
({'weight': edge_weights[edge_key]},)
actual_edges.append(weighted_edge)
else:
# Build a set representing possible edges between adjacent
# levels
num_possible_edges = len(pg_nodes[i]) * len(pg_nodes[i+1])
logger.info("%d nodes at level %d, %d nodes at level %d,"
"%d possible edges" %
(len(pg_nodes[i]), i, len(pg_nodes[i+1]), i+1,
num_possible_edges))
logger.info("%d edges in graph" % len(g_edges))
if num_possible_edges < len(g_edges):
logger.info("Enumerating possible edges")
possible_edges = set([(u[1], v[1])
for u, v in
itertools.product(pg_nodes[i],
pg_nodes[i+1])])
# Actual edges are the ones contained in the original graph;
# add to list with prepended depths
logger.info("Enumerating actual edges")
for u, v in possible_edges.intersection(g_edges):
actual_edges.append(((i, u), (i+1, v),
{'weight': edge_weights[(u, v)]}))
else:
logger.info("Enumerating edges in graph")
for u, v in g_edges:
if u in level[i] and v in level[i+1]:
actual_edges.append(((i, u), (i+1, v),
{'weight': edge_weights[(u, v)]}))
pg_edges.extend(actual_edges)
logger.info("Done.")
logger.info("Creating graph")
paths_graph = nx.DiGraph()
paths_graph.add_edges_from(pg_edges)
logger.info("Paths graph for length %d has %d nodes" %
(length, len(paths_graph)))
return klass(source, target, paths_graph, length, signed,
target_polarity)
def enumerate_paths(self, names_only=True):
if not self.graph:
return tuple()
paths = [tuple(path) for path in nx.all_simple_paths(self.graph,
self.source_node, self.target_node)]
if names_only:
paths = self._name_paths(paths)
return tuple(paths)
def _get_path_counts(self):
"""Get a dictionary giving the number of paths through each node.
The entry for the source node gives the total number of paths in the
graph.
"""
if not self.graph:
return {}
# Group nodes by level
levels = defaultdict(list)
for node in self.graph.nodes():
levels[node[0]].append(node)
# Initialize the path count
path_counts = {}
path_counts[self.target_node] = 1
# Iterate over the levels going "backwards" from the target. This way
# the path count at each node reflects the number of paths passing
# through that node from source to target
for i in reversed(range(0, self.path_length)):
# Iterate over the nodes at this level
for node in levels[i]:
# The count for this node is the sum of the counts over all
# of its successors
path_counts[node] = \
sum([path_counts[succ]
for succ in self.graph.successors(node)])
return path_counts
def count_paths(self):
"""Count the total number of paths without enumerating them.
Returns
-------
int
The number of paths.
"""
path_counts = self._get_path_counts()
total_count = path_counts.get(self.source_node)
if total_count is None:
total_count = 0
return total_count
def _get_cf_path_counts(self):
"""Get the total number of cycle-free paths.
Uses the blacklist information generated by sampling to update
the initial path_counts.
The entry for the source node gives the total number of cycle-free
paths in the graph.
"""
initial_path_counts = self._get_path_counts()
cfpc = CycleFreePathCounts(initial_path_counts,
self._blacklist_by_path)
return cfpc
def count_cf_paths(self):
"""Get the approximate number of cycle-free paths."""
cfpc = self._get_cf_path_counts()
return cfpc.get(self.source_node, tuple())
def set_uniform_path_distribution(self):
"""Adjusts edge weights to allow uniform sampling of paths.
Note that calling this method will over-write any existing edge
weights in the graph.
"""
path_counts = self._get_path_counts()
weight_dict = {}
for u in self.graph.nodes():
count_tuples = [(v, float(path_counts[v]))
for v in self.graph.successors(u)]
if not count_tuples:
continue
v_list, counts = zip(*count_tuples)
weights = np.array(counts) / np.sum(counts)
for ix, v in enumerate(v_list):
weight_dict[(u, v)] = weights[ix]
nx.set_edge_attributes(self.graph, name='weight', values=weight_dict)
@staticmethod
def _name_paths(paths):
return [tuple([node[1] for node in path]) for path in paths]
def sample_paths(self, num_samples, names_only=True):
"""Sample paths of the given length between source and target.
Parameters
----------
num_samples : int
The number of paths to sample.
names_only : boolean
Whether the paths should consist only of node names, or of node
tuples (e.g., including depth and polarity). Default is True
(only names).
Returns
-------
list of tuples
Each item in the list is a tuple of strings representing a path.
Note that the paths may not be unique.
"""
if not self.graph:
return tuple()
paths = []
while len(paths) < num_samples:
try:
path = self.sample_single_path(names_only=False)
paths.append(path)
except PathSamplingException:
pass
if names_only:
paths = self._name_paths(paths)
return tuple(paths)
def sample_single_path(self, names_only=True):
"""Sample a path between source and target.
Parameters
----------
names_only : boolean
Whether the paths should consist only of node names, or of node
tuples (e.g., including depth and polarity). Default is True
(only names).
Returns
-------
tuple
Tuple of nodes or node names representing a path.
"""
# Sample a path from the paths graph.
# If the path graph is empty, there are no paths
if not self.graph:
return tuple()
path = [self.source_node]
current = self.source_node
while current[1] != self.target_name:
next = self._successor(path, current)
path.append(next)
current = next
if names_only:
path = tuple([node[1] for node in path])
else:
path = tuple(path)
return path
def sample_cf_paths(self, num_samples, names_only=True):
"""Sample a set of cycle-free paths from source to target.
Parameters
----------
num_samples : int
The number of paths to sample.
names_only : boolean
Whether the paths should consist only of node names, or of node
tuples (e.g., including depth and polarity). Default is True
(only names).
Returns
-------
list of tuples
Each item in the list is a tuple of strings representing a path.
Note that the paths may not be unique.
"""
# First, implement successor enumeration with a blacklist
def _successor_blacklist(path, node):
out_edges = []
weights = []
if tuple(path) in self._blacklist_by_path:
for e in self.graph.out_edges(node, data=True):
if e[1] not in self._blacklist_by_path[tuple(path)]:
out_edges.append(e)
weights.append(e[2]['weight'])
else:
for e in self.graph.out_edges(node, data=True):
out_edges.append(e)
weights.append(e[2]['weight'])
# If there are no successors...
if not out_edges:
return None
# For determinism in testing
#if 'TEST_FLAG' in os.environ:
# out_edges.sort()
#weights = [t[2]['weight'] for t in out_edges]
# Normalize the weights to a proper probability distribution
p = np.array(weights) / np.sum(weights)
pred_idx = np.random.choice(len(out_edges), p=p)
return out_edges[pred_idx][1]
# Check to make sure we don't have an empty graph!
if not self.graph:
return tuple()
# Initialize
paths = []
# Repeat for as many samples as we want...
for samp_ix in range(num_samples):
# The path starts at the source node
path = [self.source_node]
current = self.source_node
# while we haven't reached the target...
while current[1] != self.target_name:
# ...enumerate the allowable successors for this node
next = _successor_blacklist(path, current)
# If next is None, this means that there were no
# non-blacklisted successors and hence there are no cycle-free
# paths that pass through the current node. In this case we
# remove the current node from the path (effectively
# backtracking a level) and continue after updating the
# blacklist
if next is None:
# We can pop the information for the partial path from the
# blacklist because we will never come here again
try:
self._blacklist_by_path.pop(tuple(path))
except KeyError:
pass
# Now, backtrack by resetting the path up a level
path = path[:-1]
tup_path = tuple(path)
# If we've walked all the way back to the source node
# and the path is no empty, then there are no cycle free
# paths
if not path:
return tuple()
# The node we're backtracking to is the new final node
# in the path
backtrack_node = path[-1]
# Remember to never come to the "current" node (i.e., the
# one that was the last in the path before we figured out
# that it was a dead end) from the backtrack node again
if tup_path in self._blacklist_by_path:
self._blacklist_by_path[tup_path].append(current)
else:
self._blacklist_by_path[tup_path] = [current]
# Now make the backtrack node the new current node and
# we can proceed normally!
current = backtrack_node
# Otherwise we check if the node we've chosen introduces a
# cycle; if so, add to our blacklists then backtrack
elif next[1] in [node[1] for node in path]:
tup_path = tuple(path)
if tup_path in self._blacklist_by_path:
self._blacklist_by_path[tup_path].append(next)
else:
self._blacklist_by_path[tup_path] = [next]
# If it doesn't make a cycle, then we add it to the path
else:
path.append(next)
current = next
if names_only:
path = tuple([node[1] for node in path])
else:
path = tuple(path)
paths.append(path)
return tuple(paths)
def _successor(self, path, node):
out_edges = list(self.graph.out_edges(node, data=True))
# For determinism in testing
if 'TEST_FLAG' in os.environ:
out_edges = sorted(out_edges)
weights = [t[2]['weight'] for t in out_edges]
# Normalize the weights to a proper probability distribution
p = np.array(weights) / np.sum(weights)
pred_idx = np.random.choice(len(out_edges), p=p)
return out_edges[pred_idx][1]
class CombinedPathsGraph(object):
"""Combine PathsGraphs for different lengths into a single super-PG.
This is particularly useful for sampling paths where the sampled paths
reflect the likelihood of drawing paths of particular lengths, given
the weights on the edges.
Parameters
----------
pg_list : list of cfpg instances
Attributes
----------
source_name
source_node
target_name
target_node
graph
"""
def __init__(self, pg_list):
self.graph = nx.DiGraph()
self.pg_list = pg_list
for pg in pg_list:
self.graph.add_edges_from(pg.graph.edges(data=True))
# Add info from the last PG in the list
self.source_name = pg.source_name
self.source_node = pg.source_node
self.target_name = pg.target_name
self.target_node = pg.target_node
self.signed = pg.signed
self.target_polarity = pg.target_polarity
# Internally we create a PG wrapping the graph so as to re-use its
# sampling method by composition
self._pg = PathsGraph(pg.source_name, pg.target_name, self.graph,
None, pg.signed, pg.target_polarity)
# Override the re-appending of node polarity by PathsGraph constructor
self._pg.source_name = self.source_name
self._pg.source_node = self.source_node
self._pg.target_name = self.target_name
self._pg.target_node = self.target_node
def sample_paths(self, num_samples):
"""Sample paths from the combined paths graph.
Parameters
----------
num_samples : int
The number of paths to sample.
Returns
-------
list of tuples
Each item in the list is a tuple of strings representing a path.
Note that the paths may not be unique.
"""
return self._pg.sample_paths(num_samples=num_samples)
def count_paths(self):
total_paths = 0
for pg in self.pg_list:
total_paths += pg.count_paths()
return total_paths
def count_cf_paths(self):
total_paths = 0
for pg in self.pg_list:
if pg.graph:
total_paths += pg.count_cf_paths()
return total_paths
def sample_cf_paths(self, num_samples):
"""Sample cycle-free paths from the combined paths graph.
Parameters
----------
num_samples : int
The number of paths to sample.
Returns
-------
list of tuples
Each item in the list is a tuple of strings representing a path.
Note that the paths may not be unique.
"""
return self._pg.sample_cf_paths(num_samples=num_samples)
def _check_reach_depth(dir_name, reachset, length):
depth = max(reachset.keys())
if depth < length:
logger.warning("Insufficient depth: path length is %d "
"but %s reach set has maximum depth %d " %
(length, dir_name, depth))
class PathSamplingException(Exception):
"""Indicates a problem with sampling, e.g. a dead-end in the Pre-CFPG."""
pass
class CycleFreePathCounts(object):
def __init__(self, initial_counts, blacklist):
self._initial_counts = initial_counts
self._path_counts = {}
for path, blacklist_nodes in blacklist.items():
for bn in blacklist_nodes:
self._update(bn, path)
def get(self, node, prefix):
if node not in self._path_counts or \
prefix not in self._path_counts[node]:
return self._initial_counts[node]
else:
return self._path_counts[node][prefix]
def _update(self, node, prefix):
count_delta = self._initial_counts[node]
for end_ix in range(1, len(prefix)+1):
sub_prefix = prefix[0:end_ix]
head = sub_prefix[0:-1]
tail = sub_prefix[-1]
# Get the latest count
current_count = self.get(tail, head)
# Now update with the new info
if tail in self._path_counts:
self._path_counts[tail][head] = current_count - count_delta
else:
self._path_counts[tail] = {head: current_count - count_delta}
| {"/paths_graph/tests/test_pre_cfpg.py": ["/paths_graph/__init__.py"], "/paths_graph/pre_cfpg.py": ["/paths_graph/pg.py"], "/paths_graph/tests/test_paths_graph.py": ["/paths_graph/__init__.py"], "/paths_graph/tests/test_cfpg.py": ["/paths_graph/__init__.py"], "/paths_graph/cfpg.py": ["/paths_graph/__init__.py", "/paths_graph/pre_cfpg.py"], "/paths_graph/api.py": ["/paths_graph/pg.py", "/paths_graph/cfpg.py"], "/paths_graph/__init__.py": ["/paths_graph/pg.py", "/paths_graph/pre_cfpg.py", "/paths_graph/cfpg.py", "/paths_graph/paths_tree.py", "/paths_graph/api.py"]} |
69,092 | kkaris/paths_graph | refs/heads/master | /paths_graph/__init__.py | from .pg import PathsGraph, CombinedPathsGraph, get_reachable_sets
from .pre_cfpg import PreCFPG
from .cfpg import CFPG, CombinedCFPG
from .paths_tree import PathsTree
from .api import *
| {"/paths_graph/tests/test_pre_cfpg.py": ["/paths_graph/__init__.py"], "/paths_graph/pre_cfpg.py": ["/paths_graph/pg.py"], "/paths_graph/tests/test_paths_graph.py": ["/paths_graph/__init__.py"], "/paths_graph/tests/test_cfpg.py": ["/paths_graph/__init__.py"], "/paths_graph/cfpg.py": ["/paths_graph/__init__.py", "/paths_graph/pre_cfpg.py"], "/paths_graph/api.py": ["/paths_graph/pg.py", "/paths_graph/cfpg.py"], "/paths_graph/__init__.py": ["/paths_graph/pg.py", "/paths_graph/pre_cfpg.py", "/paths_graph/cfpg.py", "/paths_graph/paths_tree.py", "/paths_graph/api.py"]} |
69,093 | kkaris/paths_graph | refs/heads/master | /setup.py | import sys
from setuptools import setup
def main():
install_list = ['networkx', 'numpy']
setup(name='paths_graph',
version='0.0.4',
description='Algorithm for analyzing paths in directed graphs.',
long_description='The Paths Graph is a data structure derived from '
'a directed graph that can be used to represent '
'and analyze ensembles of directed (and possibly '
'signed) paths.',
author='John A. Bachman',
author_email='john_bachman@hms.harvard.edu',
url='https://github.com/johnbachman/paths_graph',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords=['graph', 'network', 'path', 'pathway', 'sampling',
'ensemble'],
#project_urls={'Documentation': 'https://paths_graph.readthedocs.io'},
packages=['paths_graph'],
install_requires=install_list,
#tests_require=['nose'],
include_package_data=True,
)
if __name__ == '__main__':
main()
| {"/paths_graph/tests/test_pre_cfpg.py": ["/paths_graph/__init__.py"], "/paths_graph/pre_cfpg.py": ["/paths_graph/pg.py"], "/paths_graph/tests/test_paths_graph.py": ["/paths_graph/__init__.py"], "/paths_graph/tests/test_cfpg.py": ["/paths_graph/__init__.py"], "/paths_graph/cfpg.py": ["/paths_graph/__init__.py", "/paths_graph/pre_cfpg.py"], "/paths_graph/api.py": ["/paths_graph/pg.py", "/paths_graph/cfpg.py"], "/paths_graph/__init__.py": ["/paths_graph/pg.py", "/paths_graph/pre_cfpg.py", "/paths_graph/cfpg.py", "/paths_graph/paths_tree.py", "/paths_graph/api.py"]} |
69,094 | Athulus/rfidLock | refs/heads/master | /rfidDoor/__init__.py |
from .Door import Door
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,095 | Athulus/rfidLock | refs/heads/master | /rfidLock/test/TestDoorConnection.py | #!/usr/bin/python
import unittest
import sqlite3
import json
import mysql.connector
from context import rfidLock
from rfidLock import MemberDatabase, DoorConnection
from os import remove
from datetime import datetime, timedelta
class TestDoorConnectionUpdate(unittest.TestCase):
db_path_local = u'/tmp/test_door_connection_update_local.db'
db_path_remote = u'/tmp/test_door_connection_update_remote.db'
def setUp(self):
self.db_local = sqlite3.connect(self.db_path_local)
self.member_db_local = MemberDatabase(self.db_local, "?")
self.member_db_local.create()
self.db_remote = sqlite3.connect(self.db_path_remote)
self.member_db_remote = MemberDatabase(self.db_remote, "?")
self.member_db_remote.create()
self.member_db_remote.add(b'test_data', u'John Smith', u'jsmith@hackrva.org', datetime.now() + timedelta(days = 1))
self.member_db_remote.add(b'dope_data', u'Crystal Meth', u'cmeth@hackrva.org', datetime.now() + timedelta(days = 1))
self.door_connection = DoorConnection(self.member_db_local, self.member_db_remote)
def tearDown(self):
self.db_local.close()
self.db_remote.close()
remove(self.db_path_local)
remove(self.db_path_remote)
def test_update_duplicates(self):
self.door_connection.update()
self.assertEqual(len(self.member_db_local.list()), 2)
self.assertTrue(self.member_db_local.have(b'test_data'))
self.assertTrue(self.member_db_local.have(b'dope_data'))
class TestDoorConnectionCheckRequest(unittest.TestCase):
db_path_local = u'/tmp/test_door_connection_check_request_local.db'
db_path_remote = u'/tmp/test_door_connection_check_request_remote.db'
def setUp(self):
self.db_local = sqlite3.connect(self.db_path_local)
self.member_db_local = MemberDatabase(self.db_local, "?")
self.member_db_local.create()
self.db_remote = sqlite3.connect(self.db_path_remote)
self.member_db_remote = MemberDatabase(self.db_remote, u'?')
self.member_db_remote.create()
self.member_db_remote.add(b'test_data', u'John Smith', u'jsmith@hackrva.org', datetime.now() + timedelta(days = 1))
self.member_db_remote.add(b'dope_data', u'Crystal Meth', u'cmeth@hackrva.org', datetime.now() + timedelta(days = 1))
self.door_connection = DoorConnection(self.member_db_local, self.member_db_remote)
def tearDown(self):
self.db_local.close()
self.db_remote.close()
remove(self.db_path_local)
remove(self.db_path_remote)
def test_remote_verifies(self):
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
def test_local_verifies_with_broken_remote(self):
self.door_connection.update()
self.db_remote.close()
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
def test_checking_syncs(self):
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
self.db_remote.close()
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
# Need a mysql.connector connection to test this one
class TestDoorConnectionRecover(unittest.TestCase):
db_path_local = u'/tmp/test_door_connection_check_recover_local.db'
def setUp(self):
config = {}
with open(u'test_db.json') as config_file:
config = json.load(config_file)
self.db_local = sqlite3.connect(self.db_path_local)
self.member_db_local = None
self.has_mysql = True
self.db_remote = None
self.member_db_remote = None
self.door_connection = None
try:
self.db_remote = mysql.connector.connect(**config)
except:
self.has_mysql = False
print(u'MySQL DB Connection Failure')
self.member_db_remote = MemberDatabase(self.db_remote, u'%s')
if self.has_mysql:
self.member_db_remote.create()
self.member_db_remote.add(b'test_data', u'John Smith', u'jsmith@hackrva.org', datetime.now() + timedelta(days = 1))
self.member_db_remote.add(b'dope_data', u'Crystal Meth', u'cmeth@hackrva.org', datetime.now() + timedelta(days = 1))
self.member_db_local = MemberDatabase(self.db_local, u'?')
self.member_db_local.create()
self.door_connection = DoorConnection(self.member_db_local, self.member_db_remote)
def tearDown(self):
self.db_local.close()
remove(self.db_path_local)
def test_mysql_remote_verifies(self):
if self.has_mysql:
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
self.member_db_remote.destroy()
self.db_remote.close()
def test_local_verifies_with_broken_mysql_remote(self):
if self.has_mysql:
self.door_connection.update()
self.member_db_remote.destroy()
self.db_remote.close()
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
def test_checking_syncs_mysql(self):
if self.has_mysql:
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
self.member_db_remote.destroy()
self.db_remote.close()
self.assertTrue(self.door_connection.check_request(b'test_data'))
self.assertTrue(self.door_connection.check_request(b'dope_data'))
if __name__ == '__main__':
unittest.main()
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,096 | Athulus/rfidLock | refs/heads/master | /rfidLock/DoorConnection.py | #!/usr/bin/python3
# TODO need to make things a bit more database agnostic if possible
import mysql.connector
from mysql.connector import errorcode
import time
from functools import partial
# remote_db = mysql.connector.connect(
# host = db_path,
# user = db_user,
# database = db_name,
# password = db_pass)
# local_db = sqlite3.connect(db_path)
class DoorConnection(object):
"""
Contains the functionalities required for the Raspberry Pi, to access the
user database.
Currently, just works with the MemberDatabase class to update the local
database if a member is missing.
"""
def __init__(self, local_member_db, remote_member_db):
self.local = local_member_db
self.remote = remote_member_db
def update(self):
"""Updates the local database to match the remote database"""
self.local.mimic(self.remote)
def check_request(self, card_data):
"""
Whether the card_data's hash is in the local or remote databases
First checks the local database, returns true if card_data's hash
is available locally, otherwise checks the remote database
and returns true if the hash is available remotely, and returns false
otherwise.
"""
try:
# Check the local database first
if self.local.have_current(card_data):
# found locally
return True
if self.remote.have_current(card_data):
# found remotely, sync
self.local.sync(self.remote, card_data)
return True
else:
# reject
return False
except mysql.connector.errors.OperationalError as e:
if e.errno == errorcode.CR_SERVER_GONE_ERROR:
# Attempt to recover
if self.recover():
return self.checkRequest(card_data)
print(e)
return False
except mysql.connector.errors.DatabaseError as e:
return False
def recover(self):
"""
Allows repairing the remote database connection in case it goes away
Returns true if the connection is successfully reestablished.
Note that this will only work for mysql.connector connections.
"""
self.remote.reconnect()
return self.remote.is_connected()
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,097 | Athulus/rfidLock | refs/heads/master | /rfidLock/__init__.py |
from .MemberDatabase import MemberDatabase
from .DoorConnection import DoorConnection
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,098 | Athulus/rfidLock | refs/heads/master | /rfidLock/test/TestMemberDatabase.py | #!/usr/bin/python
import unittest
import sqlite3
from context import rfidLock
from rfidLock import MemberDatabase
from os import remove
import random, string
from datetime import datetime, timedelta
class TestMemberDatabaseHash(unittest.TestCase):
def test_hash_mutates(self):
# Check that the hash isn't a nop
self.assertNotEqual(MemberDatabase.hash("hello".encode()), "hello".encode())
def test_hash_does_not_collide(self):
# Check that hash doesn't collide with 2000 unique random strings
self.assertEqual(len(frozenset([MemberDatabase.hash(''.join(random.sample(string.ascii_lowercase + string.ascii_uppercase + string.digits, 10)).encode()) for i in range(0, 2000)])), 2000)
class TestMemberDatabaseCreate(unittest.TestCase):
db_path = "/tmp/test_member_database_create.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
self.member_db = MemberDatabase(self.db, "?", "member_table_v")
def tearDown(self):
# close the connection and delete the object
self.db.close()
remove(self.db_path)
def test_can_create(self):
# Check that database creation doesn't cause an error
self.member_db.create()
def test_double_creation_fails(self):
# Check that trying to create an existing database throws an error
self.member_db.create()
with self.assertRaises(self.db.OperationalError):
self.member_db.create()
class TestMemberDatabaseDestroy(unittest.TestCase):
db_path = "/tmp/test_member_database_destroy.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
self.member_db = MemberDatabase(self.db, "?", "member_table_v")
def tearDown(self):
# close the connection and delete the object
self.db.close()
remove(self.db_path)
def test_creation_destruction_cycles_work(self):
# Check that database creation doesn't cause an error
self.member_db.create()
self.member_db.destroy()
self.member_db.create()
self.member_db.destroy()
class TestMemberDatabaseAdd(unittest.TestCase):
db_path = "/tmp/test_member_database_add.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
self.member_db = MemberDatabase(self.db, "?", "member_table_v")
self.member_db.create()
def tearDown(self):
# close the connection and delete the object
self.db.close()
remove(self.db_path)
def test_add_member_does_not_fail(self):
self.member_db.add(b'test_data', "John Smith", "js@hackrva.org", datetime.now() + timedelta(days = 1))
class TestMemberDatabaseHave(unittest.TestCase):
db_path = "/tmp/test_member_database_add.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
self.member_db = MemberDatabase(self.db, "?", "member_table_v")
self.member_db.create()
self.member_db.add(b'test_data', "John Smith", "js@hackrva.org", datetime.now() + timedelta(days = 1))
def tearDown(self):
# close the connection and delete the object
self.db.close()
remove(self.db_path)
def test_checks_member_existence(self):
self.assertTrue(self.member_db.have(b'test_data'))
def test_checks_member_non_existence(self):
self.assertFalse(self.member_db.have(b'bad_test_data'))
class TestMemberDatabaseHaveCurrent(unittest.TestCase):
db_path = "/tmp/test_member_database_add.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
self.member_db = MemberDatabase(self.db, "?", "member_table_v")
self.member_db.create()
self.member_db.add(b'test_data', "John Smith", "js@hackrva.org", datetime.now() + timedelta(days = 1))
def tearDown(self):
# close the connection and delete the object
self.db.close()
remove(self.db_path)
def test_checks_member_existence(self):
self.assertTrue(self.member_db.have_current(b'test_data'))
def test_checks_member_non_existence(self):
self.assertFalse(self.member_db.have_current(b'bad_test_data'))
class TestMemberDatabaseList(unittest.TestCase):
db_path = "/tmp/test_member_database_list.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
self.member_db = MemberDatabase(self.db, "?", "member_table_v")
self.member_db.create()
self.member_db.add(b'test_data', "John Smith", "jsmith@hackrva.org", datetime.now() + timedelta(days = 1))
self.member_db.add(b'othe_data', "Crystal Meth", "cmeth@hackrva.org", datetime.now() + timedelta(days = 1))
def tearDown(self):
# close the connection and delete the object
self.db.close()
remove(self.db_path)
def test_list_contains_users(self):
member_list = self.member_db.list()
self.assertEqual(len(member_list), 2)
class TestMemberDatabaseClear(unittest.TestCase):
db_path = "/tmp/test_member_database_clear.db"
def setUp(self):
self.db = sqlite3.connect(self.db_path)
def tearDown(self):
self.db.close()
remove(self.db_path)
def test_clear_database(self):
member_db = MemberDatabase(self.db, "?", "member_table_v")
member_db.create()
member_db.add(b'test_data', "John Smith", "jsmith@hackrva.org", datetime.now() + timedelta(days = 1))
member_db.add(b'othe_data', "Crystal Meth", "cmeth@hackrva.org", datetime.now() + timedelta(days = 1))
member_db.clear()
self.assertEqual(len(member_db.list()), 0)
class TestMemberDatabaseMimic(unittest.TestCase):
db_path1 = "/tmp/test_member_database_mimic1.db"
db_path2 = "/tmp/test_member_database_mimic2.db"
def setUp(self):
self.db1 = sqlite3.connect(self.db_path1)
self.db2 = sqlite3.connect(self.db_path2)
def tearDown(self):
self.db1.close()
self.db2.close()
remove(self.db_path1)
remove(self.db_path2)
def test_mimic_database(self):
member_db1 = MemberDatabase(self.db1, "?", "member_table_v")
member_db1.create()
member_db1.add(b'test_data', "John Smith", "jsmith@hackrva.org", datetime.now() + timedelta(days = 1))
member_db1.add(b'othe_data', "Crystal Meth", "cmeth@hackrva.org", datetime.now() + timedelta(days = 1))
self.db1.commit()
#
member_db2 = MemberDatabase(self.db2, "?", "member_table_v")
member_db2.create()
member_db2.mimic(member_db1)
self.assertEqual(len(member_db2.list()), 2)
class TestMemberDatabaseSync(unittest.TestCase):
db_path1 = "/tmp/test_member_database_sync1.db"
db_path2 = "/tmp/test_member_database_sync2.db"
def setUp(self):
self.db1 = sqlite3.connect(self.db_path1)
self.db2 = sqlite3.connect(self.db_path2)
def tearDown(self):
self.db1.close()
self.db2.close()
remove(self.db_path1)
remove(self.db_path2)
def test_mimic_database(self):
member_db1 = MemberDatabase(self.db1, "?", "member_table_v")
member_db1.create()
member_db1.add(b'test_data', "John Smith", "jsmith@hackrva.org", datetime.now() + timedelta(days = 1))
member_db1.add(b'othe_data', "Crystal Meth", "cmeth@hackrva.org", datetime.now() + timedelta(days = 1))
self.db1.commit()
#
member_db2 = MemberDatabase(self.db2, "?", "member_table_v")
member_db2.create()
member_db2.mimic(member_db1)
self.assertTrue(member_db2.have_current(b'test_data'))
self.assertTrue(member_db2.have_current(b'othe_data'))
class TestMemberDatabaseUseResource(unittest.TestCase):
db_path1 = "/tmp/test_member_database_use_resource1.db"
db_path2 = "/tmp/test_member_database_use_resource2.db"
db_path3 = "/tmp/test_member_database_use_resource3.db"
def setUp(self):
self.db1 = sqlite3.connect(self.db_path1)
self.db2 = sqlite3.connect(self.db_path2)
self.db3 = sqlite3.connect(self.db_path3)
def tearDown(self):
self.db1.close()
self.db2.close()
self.db3.close()
remove(self.db_path1)
remove(self.db_path2)
remove(self.db_path3)
def test_use_resource(self):
cur = self.db1.cursor()
cur.execute("""
CREATE TABLE member_table_v (
hash CHAR(24),
name TEXT,
email VARCHAR(254),
expiration_date DATE,
resource VARCHAR(255));
""")
cur.execute("""
INSERT INTO member_table_v (hash, name, email, expiration_date, resource) VALUES
(
?,
'John Smith',
'jsmith@hackrva.org',
?,
'door'
),
(
?,
'John Smith',
'jsmith@hackrva.org',
?,
'laser'
),
(
?,
'Crystal Meth',
'cmeth@hackrva.org',
?,
'door'
);
""",
(
MemberDatabase.hash(b'test_data'),
datetime.now() + timedelta(days = 1),
MemberDatabase.hash(b'test_data'),
datetime.now() + timedelta(days = 1),
MemberDatabase.hash(b'othe_data'),
datetime.now() + timedelta(days = 1),
))
self.db1.commit()
# Use laser resource
member_db1a = MemberDatabase(self.db1, "?", "member_table_v")
member_db1a.use_resource('laser')
member_db2 = MemberDatabase(self.db2, "?", "member_table_v")
member_db2.create()
member_db2.mimic(member_db1a)
self.assertTrue(member_db2.have_current(b'test_data'))
self.assertFalse(member_db2.have_current(b'othe_data'))
# Use door resource
member_db1b = MemberDatabase(self.db1, "?", "member_table_v")
member_db1b.use_resource('door')
member_db3 = MemberDatabase(self.db3, "?", "member_table_v")
member_db3.create()
member_db3.mimic(member_db1b)
self.assertTrue(member_db3.have_current(b'test_data'))
self.assertTrue(member_db3.have_current(b'othe_data'))
if __name__ == '__main__':
unittest.main()
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,099 | Athulus/rfidLock | refs/heads/master | /setup.py | #!/usr/bin/python3
from setuptools import setup
from os import mkdir
setup(
name='rfidLock',
version='0.1',
packages = ['rfidLock', 'rfidDoor'],
scripts = [
'bin/rfid_config',
'bin/rfid_db_install',
'bin/rfid_db_remove',
'bin/rfid_door'],
install_requires = [
# "Must not install sqlite3 from pypi"
'mysql-connector',
'RPIO',
'jinja2'],
package_data = {
'rfidLock': ['templates/*.json', 'templates/*.html'],
})
# TODO Create /etc/rfidlock/config.json
# TODO Install templates to /usr/share/rfidlock
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,100 | Athulus/rfidLock | refs/heads/master | /rfidDoor/Door.py | import RPIO
import serial
import time
# I haven't had access to the reader hardware so I can't really test this live
# Should create some stubbed out tests though
class Door(object):
def __init__(
self,
door_connection,
port = serial.Serial("/dev/ttyAMA0", baudrate = 9600, timeout = 3.0),
lock_pin = 7, # 7 is set as the output to trigure locking the door
unlock_pin = 8, # 8 is set as the output to trigure unlocking the door
start_tx_pin = 17, # 17 is a gpio tied to the serial output of the rfid reader.the interrupt for
lock_button_pin = 23 # 23 works as the input for the button
):
# start_tx_pin and lock_button_pin set up as inputs, pulled up to avoid false detection.
# start_tx_pin is used to determine when the reader starts transmitting
# could not configure pin 10 as the serial port and allow it to interrupt
self.door_connection = door_connection
self.lock_status = -1
self.port = port
self.lock_pin = lock_pin
self.unlock_pin = unlock_pin
self.start_tx_pin = start_tx_pin
self.lock_button_pin = lock_button_pin
self.connection = door_connection
def run(self):
RPIO.setmode(RPIO.BCM)
# now we'll define two threaded callback functions
# these will run in another thread when our events are detected
RPIO.add_interrupt_callback(
gpio_id=self.lock_button_pin,
callback=self.lock_button_cb,
edge='falling',
debounce_timeout_ms=100,
threaded_callback=False,
pull_up_down=RPIO.PUD_UP)
RPIO.add_interrupt_callback(
gpio_id=self.start_tx_pin,
callback=self.serial_cb,
edge='both',
threaded_callback=False,
pull_up_down=RPIO.PUD_UP)
RPIO.setup(self.lock_pin, RPIO.OUT, initial=RPIO.LOW)
RPIO.setup(self.unlock_pin, RPIO.OUT, initial=RPIO.LOW)
try:
RPIO.wait_for_interrupts()
except KeyboardInterrupt:
# Could we just use finally here instead?
# Uncaught exceptions will leave RPIO uncleaned, is this intentional?
RPIO.cleanup() # clean up RPIO on CTRL+C exit
RPIO.cleanup() # clean up RPIO on normal exit
def lock(self):
RPIO.output(self.lock_pin,True)
time.sleep(1)
RPIO.output(self.lock_pin,False)
self.lock_status = 1
def unlock(self):
RPIO.output(self.unlock_pin,True)
time.sleep(1)
RPIO.output(self.unlock_pin,False)
self.lock_status = 0
def lock_button_cb(self, gpio_id, value):
print("falling edge detected on 23")
#send lock signal to door
self.lock()
def serial_cb(self, gpio_id, value):
rcv = self.port.read(16)
if rcv != '':
#removing whitespace characters coming from rdif reader
x = rcv[1:13]
print(x)
#check db for user
# This is the part swapped in
if self.door_connection.check_request(x):
print 'unlocking'
self.unlock()
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,101 | Athulus/rfidLock | refs/heads/master | /rfidLock/MemberDatabase.py | # This is mostly a CRUD package for accessing the database of users
# There are certain
import hashlib
import pdb
from base64 import b64encode
from contextlib import closing
from datetime import datetime
# Used to abstract database details a little bit more
class MemberDatabase(object):
"""
An object used to abstract details of the member database from direct access.
Internally, email addresses and hashes must be unique when added to the
database.
The RFID data is hashed before use to prevent abuse
"""
def __init__(self, db, subs, table_name = "member_table"):
"""
db - database object to use
subs - substitution expression for the database in use
"""
self.db = db
self.subs = subs
self.table_name = table_name
try:
self.db.autocommit(True)
except:
pass
# Technically, emails may be 254 characters at most
self.start_query = u"""
CREATE TABLE {1} (
hash CHAR(24),
name TEXT,
email VARCHAR(254),
expiration_date DATE,
CONSTRAINT pk_hash PRIMARY KEY(hash));
""".format(subs, table_name)
self.destroy_query = u"""
DROP TABLE {1};
""".format(subs, table_name)
self.add_query = u"""
INSERT INTO {1} (name, email, hash, expiration_date) VALUES ({0}, {0}, {0}, {0});
""".format(subs, table_name)
self.have_query = u"""
SELECT COUNT(hash) FROM {1} WHERE hash={0};
""".format(subs, table_name)
self.have_current_query = u"""
SELECT COUNT(hash) FROM {1} WHERE hash={0} AND expiration_date > {0};
""".format(subs, table_name)
self.list_query = u"""
SELECT name, email, expiration_date FROM {1};
""".format(subs, table_name)
self.content_query = u"""
SELECT hash, name, email, expiration_date FROM {1};
""".format(subs, table_name)
self.record_query = u"""
SELECT hash, name, email, expiration_date FROM {1} WHERE hash={0};
""".format(subs, table_name)
self.clone_query = u"""
INSERT INTO {1} (hash, name, email, expiration_date) VALUES ({0}, {0}, {0}, {0});
""".format(subs, table_name)
@staticmethod
def hash(card_data):
"""Hashes the provided RFID data using MD5"""
m = hashlib.md5()
m.update(card_data)
# Needs to go through this for Python2 support
# Binary data is hard to work with across versions
print b64encode(m.digest()).decode()
return b64encode(m.digest()).decode()
def use_resource(self, resource):
self.have_query = u"""
SELECT COUNT(hash) FROM {1} WHERE hash={0} AND resource={2};
""".format(self.subs, self.table_name, resource)
self.have_current_query = u"""
SELECT COUNT(hash) FROM {1} WHERE hash={0} AND expire_date > {0} AND resource="{2}";
""".format(self.subs, self.table_name, resource)
self.list_query = u"""
SELECT name, email, expiration_date FROM {1} WHERE resource={2};
""".format(self.subs, self.table_name, resource)
self.content_query = u"""
SELECT hash, name, email, expiration_date FROM {1} WHERE resource={2};
""".format(self.subs, self.table_name, resource)
self.record_query = u"""
SELECT hash, name, email, expiration_date FROM {1} WHERE hash={0} AND resource={2};
""".format(self.subs, self.table_name, resource)
def add(self, card_data, member_name, member_email, expiration):
"""Adds a new member to the list of members"""
with closing(self.db.cursor()) as cur:
cur.execute(self.add_query, (member_name, member_email, MemberDatabase.hash(card_data), expiration))
self.db.commit()
def have(self, card_data):
"""
Uses the hash of the member's RFID data to check whether they have ever
been a member.
"""
with closing(self.db.cursor()) as cur:
cur.execute(self.have_query, (MemberDatabase.hash(card_data), ))
result = cur.fetchone()[0]
self.db.commit()
print result
return result > 0
def have_current(self, card_data):
"""
Uses the member's RFID data to check whether they are a current member.
"""
#pdb.set_trace()
with closing(self.db.cursor()) as cur:
cur.execute(self.have_current_query, (MemberDatabase.hash(card_data), datetime.now()))
result = cur.fetchone()[0]
self.db.commit()
#pdb.set_trace()
print result
return result > 0
def list(self):
"""Retrieves a list of all members and former members"""
with closing(self.db.cursor()) as cur:
cur.execute(self.list_query)
return cur.fetchall()
def create(self):
"""Creates the tables necessary for the membership system"""
with closing(self.db.cursor()) as cur:
cur.execute(self.start_query)
self.db.commit()
def destroy(self):
"""Removes the tables created for this system"""
with closing(self.db.cursor()) as cur:
cur.execute(self.destroy_query)
self.db.commit()
def clear(self):
"""Resets the contents of this database to be empty"""
self.destroy()
self.create()
def mimic(self, other):
"""Makes this database identical to the provided database"""
self.clear()
with closing(self.db.cursor()) as cur, closing(other.db.cursor()) as othercur:
othercur.execute(other.content_query)
for entry in othercur:
cur.execute(self.clone_query, entry)
self.db.commit()
def sync(self, other, card_data):
"""Updates a singular record from a different database"""
with closing(self.db.cursor()) as cur, closing(other.db.cursor()) as othercur:
othercur.execute(other.record_query, (MemberDatabase.hash(card_data), ))
cur.execute(self.clone_query, othercur.fetchone())
self.db.commit()
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,102 | Athulus/rfidLock | refs/heads/master | /rfidLock/SlackInviter.py | # Mostly for slack invite
import json
import pycurl
import traceback
from time import time
from sys import stderr, exc_info
class SlackInviter(object):
def __init__(self, site, token):
self.res = False
self.msg = None
self.site = site
self.token = token
def handle_result(self, buf):
try:
obj = json.load(buf)
self.res = obj['ok']
if not self.res:
self.msg = obj['error']
except:
stderr.write("Failure in write function")
typ, val, trace = exc_info()
stderr.write(str(val))
traceback.print_tb(trace, None, stderr)
def invite(self, email, name):
# check the result
try:
curl = pycurl.Curl()
url = "https://" + self.site + "/api/users.admin.invite?t=" + str(int(time()))
options = ["email=" + email, "token=" + self.token, "first_name=" + name]
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.POST, True)
curl.setopt(pycurl.COPYPOSTFIELDS, "&".join(options))
curl.setopt(pycurl.WRITEFUNCTION, handle_result)
curl.perform() # perform blocks execution
result = self.res
self.res = False
message = self.msg
self.msg = None
curl.close()
return (result, message)
except:
typ, val, trace = exc_info()
stderr.write(str(typ))
stderr.write("Failure\r\n")
stderr.write(str(val))
traceback.print_tb(trace, None, stderr)
| {"/rfidDoor/__init__.py": ["/rfidDoor/Door.py"], "/rfidLock/test/TestDoorConnection.py": ["/rfidLock/__init__.py"], "/rfidLock/__init__.py": ["/rfidLock/MemberDatabase.py", "/rfidLock/DoorConnection.py"], "/rfidLock/test/TestMemberDatabase.py": ["/rfidLock/__init__.py"]} |
69,104 | nlefrancois6/Phys-321-Project | refs/heads/main | /tests/test_helper_functions.py | import unittest
import nose.tools as nt
import numpy as np
import helper_functions as hf
import pypico
class test_tools():
def setUp(self):
# Create a random array
#self.n = 4
#self.rand_array = np.random.normal(size=(self.n,self.n))
pass
def tearDown(self):
pass
def test_WMAP_correlated_posterior(self):
theta=np.asarray([70,0.02,0.1,0.0,0.05,2e-9,0.97,0.07])
wmap = np.loadtxt('wmap_tt_spectrum_9yr_v5.txt')
multipole = wmap[:,0]
power = wmap[:,1]
errPower = wmap[:,2]
pico = pypico.load_pico("jcset_py3.dat")
p_log_post = hf.log_post_WMAP_correlated(theta, multipole, power, errPower)
assert type(p_log_post) == np.float64
nt.assert_equal(round(p_log_post,1), -1412.6)
def test_WMAP_uncorrelated_posterior(self):
theta=np.asarray([70,0.02,0.1,0.0,0.05,2e-9,0.97,0.07])
wmap = np.loadtxt('wmap_tt_spectrum_9yr_v5.txt')
multipole = wmap[:,0]
power = wmap[:,1]
errPower = wmap[:,2]
pico = pypico.load_pico("jcset_py3.dat")
p_log_post = hf.log_post_WMAP_uncorrelated(theta, multipole, power, errPower)
assert type(p_log_post) == np.float64
nt.assert_equal(round(p_log_post,1), -1428.6)
def test_WMAP_prior(self):
H0 = 70
h = H0/100
Omb = np.asarray([-0.1, 0.5, 1.1])
Ombh2 = Omb*h**2
t1=np.asarray([H0,Ombh2[0],0.1,0.0,0.05,2e-9,0.97,0.07])
t2=np.asarray([H0,Ombh2[1],0.1,0.0,0.05,2e-9,0.97,0.07])
t3=np.asarray([H0,Ombh2[2],0.1,0.0,0.05,2e-9,0.97,0.07])
p1 = hf.log_prior_WMAP(t1)
p2 = hf.log_prior_WMAP(t2)
p3 = hf.log_prior_WMAP(t3)
nt.assert_equal(-np.inf, p1)
nt.assert_equal(0.0, p2)
nt.assert_equal(-np.inf, p3)
def test_get_cov_model(self):
wmap = np.loadtxt('wmap_tt_spectrum_9yr_v5.txt')
errPower = wmap[:,2]
alpha = 0.1
C = hf.get_cov_model(errPower, alpha)
N = len(errPower)
nt.assert_equal(C.shape[0],N)
nt.assert_equal(C.shape[1],N)
def test_sn_posterior(self):
sn_z,sn_dm,sn_dm_err = np.loadtxt("SCPUnion2.1_mu_vs_z.txt",delimiter="\t",skiprows=5, usecols = (1,2,3),unpack=True)
theta = np.asarray([70,0.7,0.3])
p_log_post = hf.log_post_sn(theta,sn_z,sn_dm,sn_dm_err)
assert type(p_log_post) == np.float64
nt.assert_equal(round(p_log_post,1), -215.2)
def test_sn_prior(self):
Om0 = np.asarray([-0.1, 0.5, 1.1])
t1 = np.asarray([70,Om0[0],0.3])
t2 = np.asarray([70,Om0[1],0.3])
t3 = np.asarray([70,Om0[2],0.3])
p1 = hf.log_prior_sn(t1)
p2 = hf.log_prior_sn(t2)
p3 = hf.log_prior_sn(t3)
nt.assert_equal(-np.inf, p1)
nt.assert_equal(0.0, p2)
nt.assert_equal(-np.inf, p3)
| {"/tests/test_helper_functions.py": ["/helper_functions.py"]} |
69,105 | nlefrancois6/Phys-321-Project | refs/heads/main | /helper_functions.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 11:14:51 2021
@author: noahlefrancois
"""
import numpy as np
import pypico
#import time as t
import matplotlib.pyplot as plt
#import astropy.units as u
from astropy.cosmology import LambdaCDM
import emcee
#import corner
import os
import imageio
#import h5py
#Load the pico training data
pico = pypico.load_pico("jcset_py3.dat")
def get_PICO_spectrum(pars):
"""
Function to evaluate the CAMB model emulator for a given set of parameters.
Much faster than the full CAMB model
Input:
pars (arr)- model parameters
Output:
tt (arr)- power spectrum values as a function of multipole moment
according to a pico model with the given parameters
"""
H0, ombh2, omch2, omk, tau, As, ns, alpha = pars #Unpack model parameters
#Feed input parameters as dictionary to pico
input_dict = {"As": As,"ns": ns,"tau": tau,"ombh2":ombh2,"omch2":omch2,"H0":H0,"omk":omk}
output_dict = pico.get(**input_dict, force=True)
#Unpack the pico power spectrum output
tt = output_dict['dl_TT']
return tt
def get_cov_model(err, alpha):
"""
Evaluate the covariance matrix using a model where adjacent points have a correlation
scaled by the parameter alpha
Runtime testing:
0.16 seconds with vectorization, 0.36 seconds with for loops
Input:
err (arr)- error bars from each point in the WMAP data
alpha (float)- parameter controlling the correlation strength
Output:
C (arr)- covariance matrix of size [len(err),len(err)] for the correlated error model
"""
#Compute each element in the covariance matrix
err_shift_1 = np.roll(err,-1) #Get the error shifted by one, to compute k=1 correlation terms
#Compute the diagonal and k=1 terms
diag_terms = err**2
diag_k1_terms = alpha*np.abs(err[0:-1]*err_shift_1[0:-1]) #Should I take the absolute value?
#Cast the terms into matrix form and combine to get the final covariance matrix
C_diag = np.diag(np.array(diag_terms))
C_diag_k1 = np.diag(np.array(diag_k1_terms), k=1)
C_diag_km1 = np.diag(np.array(diag_k1_terms), k=-1)
C = C_diag + C_diag_k1 + C_diag_km1
return C
def log_likelihood_WMAP(theta, multipole, p_data, err, covariance_model):
"""
Evaluate the chi-sq metric of a PICO fit with a neighbour-correlation model or
an uncorrelated error model, given a set of model parameters stored in the array theta
Return the log likelihood probability, which is equal to -chi_sq
Input:
theta (arr)- model params
multipole (arr)- multipole moment data from WMAP
p_data (arr)- power spectrum data from WMAP
err (arr)- error bars on WMAP data points
covariance_model (str) - controls whether to calculate chi_sq using correlated or uncorrelated error model
Output:
chi_sq (float) - measure of goodness of fit, -chi_sq is proportional to log likelihood probability
"""
#Get model predictions from given params using PICO
pico_tt = get_PICO_spectrum(theta) #evaluate model
p_model = pico_tt[2:len(multipole)+2] #cut off the extra model points that extrapolate past where our multipole data ends
if covariance_model == 'correlated':
alpha = theta[7] #Get the covariance scaling parameter
#Get the components of the correlated chi-sq expression
At = np.array([p_data - p_model])
A = np.transpose(At)
C_inv = np.linalg.inv(get_cov_model(err, alpha))
chi_sq = np.dot(At, np.dot(C_inv,A))[0,0] #Evaluate the matrix multiplication of chi-squared terms
elif covariance_model == 'uncorrelated':
#Get the components of the uncorrelated chi-sq expression
x = np.asarray(p_data)
y = np.asarray(p_model)
error = np.asarray(err)
chi_sq = sum((x-y)**2/error**2) #Evaluate chi-sq
return -chi_sq
def log_prior_WMAP(theta):
"""
Evaluate the log prior probability function given model parameters
Input:
theta (arr)- model params
Output:
Return 0.0 if params fall within constraints, else return -np.inf
"""
H0, ombh2, omch2, omk, tau, As, ns, alpha = theta #Unpack model parameters
#Convert units of Omega params
h = H0/100
Omb = ombh2/(h**2)
Omde = omch2/(h**2)
#Check that the params are allowed by our physical constraints
if 0. <= Omb <= 1. and 0. < Omde < 1. and -1.<=alpha<=1.:
return 0.0 # the constant doesn't matter since MCMCs only care about *ratios* of probabilities
return -np.inf # log(0) = -inf
def log_post_WMAP_correlated(theta, multipole, p_data, err):
"""
Evaluate the log posterior probability function given WMAP data and
model parameters, using correlated error model
Input:
theta (arr)- model params
multipole (arr)- multipole moment data from WMAP
p_data (arr)- power spectrum data from WMAP
err (arr)- error bars on WMAP data points
Output:
Return log likelihood probability if params fall within constraints, else return -np.inf
"""
covariance_model = 'correlated'
lp = log_prior_WMAP(theta) #Evaluate log prior
if not np.isfinite(lp):
return -np.inf #Return -np.inf if params outside of constraints
#If params inside constraints, lp = 0.0 and we reutrn the log likelihood
return lp + log_likelihood_WMAP(theta, multipole, p_data, err, covariance_model)
def log_post_WMAP_uncorrelated(theta, multipole, p_data, err):
"""
Evaluate the log posterior probability function given WMAP data and
model parameters, using uncorrelated error model
Input:
theta (arr)- model params
multipole (arr)- multipole moment data from WMAP
p_data (arr)- power spectrum data from WMAP
err (arr)- error bars on WMAP data points
Output:
Return log likelihood probability if params fall within constraints, else return -np.inf
"""
covariance_model = 'uncorrelated'
lp = log_prior_WMAP(theta) #Evaluate log prior
if not np.isfinite(lp):
return -np.inf #Return -np.inf if params outside of constraints
#If params inside constraints, lp = 0.0 and we reutrn the log likelihood
return lp + log_likelihood_WMAP(theta, multipole, p_data, err, covariance_model)
def plot_SNe_sample(x_data, flat_samples, ind):
"""
Prepare plot of SNe MCMC fit for animation
Input:
x_data (arr)- x-axis values from data
flat_samples (arr)- parameters for each MCMC sample
ind (int) - index for selecting one sample
Output:
fig_name (str) - frame name that this figure was saved under, for use in reading into animation
"""
#Get the parameters from the specified sample, create the corresponding cosmo model, and evaluate mu
sample = flat_samples[ind]
H0, Om0, Ode0 = sample
cosmo = LambdaCDM(H0=H0, Om0=Om0, Ode0=Ode0)
y_model = mu_func(x_data, cosmo)
#Plot the fit for these parameters
plt.plot(x_data, y_model, alpha=0.01, color='red',zorder=2)
#Save the figure to add to the animation
fig_name = 'frame'+str(ind)+'.png'
plt.savefig(fig_name)
return fig_name
def plot_WMAP_sample(x_data, flat_samples, ind):
"""
Prepare plot of WMAP MCMC fit for animation
Input:
x_data (arr)- x-axis values from data
flat_samples (arr)- parameters for each MCMC sample
ind (int) - index for selecting one sample
Output:
fig_name (str) - frame name that this figure was saved under, for use in reading into animation
"""
#Get the parameters and evaluate the corresponding PICO model
sample = flat_samples[ind]
y_model = get_PICO_spectrum(sample)
y_model = y_model[2:len(x_data)+2]
#Plot the fit for these parameters
#plt.plot(multipole,power)
plt.plot(x_data, y_model, alpha=0.01, color='red',zorder=2)
#Save the figure to add to the animation
fig_name = 'frame'+str(ind)+'.png'
plt.savefig(fig_name)
return fig_name
def write_animation(fig_name_list, filename):
"""
Take a series of .png frames and animate them into a .gif. Save .gif to local working directory
Input:
fig_name_list (arr): contains all of the frame filenames
filename (str): name under which the animation will be saved
"""
#build gif from the frames in the directory
with imageio.get_writer(filename, mode='I') as writer:
for fig_name in fig_name_list:
image = imageio.imread(fig_name)
writer.append_data(image)
#clear the files from the directory files
for fig_name in set(fig_name_list):
os.remove(fig_name)
print('Animation saved as ', filename)
def MCMC_animation(flat_samples, x_data, y_data, y_err, dataset, filename, N_samples):
"""
Create an animation of the first N_samples of the MCMC fit
Input:
sampler (obj): output of emcee with multiple walkers
x_data (arr)
y_data (arr)
y_err (arr): error bars on y_data
dataset (str): controls whether to animate SNe or WMAP data
filename (str): name under which the animation will be saved
N_samples (int): number of samples in single chain
"""
#Plot the original data for either SNe or WMAP
plt.figure(figsize=(7,7))
if dataset == 'SNe':
flat_samples = flat_samples.get_chain(flat=True)[:N_samples]
plt.plot(x_data, y_data,'.k')
#plt.errorbar(x_data, y_data, yerr=y_err, linestyle = 'None', fmt='.k',mec='black',mfc='black',ecolor='grey',zorder=1)
plt.xlabel(r'$z$')
plt.ylabel(r'$m-M (Mag)$')
plt.xscale('log')
plt.title('SCP Union 2.1 SNe Ia Data')
if dataset =='WMAP':
plt.plot(x_data,y_data)
#plt.errorbar(x_data,y_data,y_err,fmt='*')
plt.xlabel('Multipole Moment')
plt.ylabel('Power Spectrum')
plt.title('WMAP Satellite 9-year CMB Data')
#Plot each sample and save the plot frame as a .png
fig_name_list = []
for ind in range(N_samples):
if dataset == 'SNe':
fig_name = plot_SNe_sample(x_data, flat_samples, ind)
if dataset == 'WMAP':
fig_name = plot_WMAP_sample(x_data, flat_samples, ind)
#Store the frame filename
fig_name_list.append(fig_name)
#Collect the .png frames and save them as a .gif animation
write_animation(fig_name_list, filename)
def run_mcmc(log_posterior, args, ndim, nwalkers, initial_pos, backend_filename, do_burn_in, plot_convergence=True, num_iter=1000, burn_in=0,thin=0):
"""
Function which will either run MCMC with unknown burn-in time just until convergence (do_burn_in=True)
OR with known burn-in, thinning, and number of iterations.
Input:
log_posterior (func): log posterior probability function to evaluate
args (arr): contains x_data, y_data, and y_err
ndim (int): number of model parameters to fit
nwalkers (int): number of emcee walkers to use
intial_pos (list): initial position in parameter space for each walker
backend_filename (str): name for backend file of results
do_burn_in (bool): controls whether to monitor convergence or specify number of iterations
plot_convergence (bool): controls whether to plot convergence-tracking figure
num_iter (int): specified number of iterations if do_burn_in==True
burn_in (int): number of steps to discard as burn-in
thin (int): thinning rate for chain. If thin=n, keep only every nth sample
"""
# Set up the backend to store chain results in case of crashing or infinite looping
backend = emcee.backends.HDFBackend(backend_filename)
backend.reset(nwalkers, ndim) #reset if it's already been created
# Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=args, backend=backend)
if(do_burn_in):
#run until converged , with the option of plotting convergence
autocorr, tau = mcmc_burn_in(sampler,plot_convergence)
#calculate the burn-in and thin parameters
burn_in = int(2 * np.max(tau))
thin = int(0.5 * np.min(tau))
print("Mean autocorrelation time: {0:.3f} steps".format(np.mean(tau)))
print("burn-in: {0}".format(burn_in))
print("thin: {0}".format(thin))
else:
#run with specified number of iterations
flat_samples = sampler.run_mcmc(initial_pos, num_iter, progress=True)
flat_samples = sampler.get_chain(discard=burn_in, flat=True, thin=thin)
return sampler, flat_samples , burn_in , thin
def plot_convergence(autocorr,index):
"""
Plot the convergence-tracking figure of autocorrelation time vs chain length
Input:
autocorr (arr)- autocorrelation times as a function of chain length
index (int)- Number of autocorrelation time measurements to plot
"""
n = 100 * np.arange(1, index + 1)
y = autocorr[:index]
plt.plot(n, n / 100.0, "--k")
plt.plot(n, y)
plt.xlim(0, n.max())
plt.ylim(0, y.max() + 0.1 * (y.max() - y.min()))
plt.xlabel("number of steps")
plt.ylabel(r"mean $\hat{\tau}$")
plt.show()
def mcmc_burn_in(sampler,plot,max_n=10000000):
"""
Note: The following code is adapted from an emcee tutorial
Run mcmc for maximum 100,000 steps, or until converged
Input:
sampler (obj)- output of emcee with multiple walkers
Output:
autocorr (arr)- autocorrelation times as a function of chain length
tau (float) - autocorrelation time
"""
# We'll track how the average autocorrelation time estimate changes
index = 0
autocorr = np.empty(max_n)
# This will be useful to testing convergence
old_tau = np.inf
# Now we'll sample for up to max_n steps
for sample in sampler.sample(initial_pos, iterations=max_n, store = True , progress=True):
# Only check convergence every 100 steps
if sampler.iteration % 100:
continue
# Compute the autocorrelation time so far
# Using tol=0 means that we'll always get an estimate even if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau)
index += 1
# Check convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
break
old_tau = tau
tau = sampler.get_autocorr_time()
if(plot):
plot_convergence(autocorr,index)
return autocorr, tau
def mu_func(z, cosmo):
"""
Given a redshift value z and universe model cosmo, convert to luminosity distance and calculate
the distance modulus mu (aka m-M)
Input:
z (float): redshift value
cosmo (obj): LambdaCDM cosmology model
Output:
mu (float): distance modulus, aka m-M
"""
D_L = cosmo.luminosity_distance(z).value #convert z to luminosity distance
mu = 5*np.log10(D_L)+25 #calculate mu
return mu
def log_likelihood_sn(theta, z, mu_data, mu_err):
"""
Evaluate the log likelihood of a SNe Ia fit with an uncorrelated error
model given a set of model parameters stored in the array theta
Input:
theta - array of model params
z - array of redshift data from SNe Ia
mu_data - array of mu data from SNe Ia
mu_err - array of error bars on mu_data
Output:
Return the log likelihood probability, which is equal to -chi_sq
"""
#Get the parameters and create the corresponding cosmo model
H0, Om0, Ode0 = theta
cosmo = LambdaCDM(H0=H0, Om0=Om0, Ode0=Ode0)
#Evaluate the model at the data point z values
mu_model = mu_func(z, cosmo)
sigma2 = mu_err ** 2
return -0.5 * np.sum((mu_data - mu_model) ** 2 / sigma2 + np.log(2*np.pi*sigma2)) # the 2pi factor doesn't affect the shape
def log_prior_sn(theta):
"""
Evaluate the log prior probability function given model parameters
Input:
theta (arr)- model params
Output:
Return 0.0 if params fall within constraints, else return -np.inf
"""
H0, Om0, Ode0 = theta
#Check that the params are allowed by our physical constraints
if 0. <= Om0 <= 1. and 0. < Ode0 < 1.:
return 0.0 # the constant doesn't matter since MCMCs only care about *ratios* of probabilities
return -np.inf # log(0) = -inf
def log_post_sn(theta, z, mu_data, mu_err):
"""
Evaluate the log posterior probability function given SNe Ia data and model parameters
Input:
theta (arr)- model params
z (arr)- redshift data from SNe Ia
mu_data (arr)- mu data from SNe Ia
mu_err (arr)- error bars on mu_data
Output:
Return log likelihood probability if params fall within constraints, else return -np.inf
"""
lp = log_prior_sn(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood_sn(theta, z, mu_data, mu_err)
| {"/tests/test_helper_functions.py": ["/helper_functions.py"]} |
69,116 | chenrun666/JW_purchase | refs/heads/master | /bin/action.py | import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.select import Select
from conf.settings import *
from bin.log import logger
from bin.MyException import *
class Action(object):
def __init__(self, url):
"""
初始化浏览器
"""
# 当前运行状态,如果页面动作出现错误之后将终止运行
self.run_status = True
self.index_url = url
try:
# chrome_options = Options()
# chrome_options.add_argument('--headless')
self.driver = webdriver.Chrome()
# self.driver = webdriver.Firefox()
# self.driver = webdriver.PhantomJS()
self.wait = WebDriverWait(self.driver, 20, 0.5)
self.driver.get(self.index_url)
time.sleep(3)
# self.driver.set_window_size(500, 700)
logger.info("初始化webdriver对象")
except TimeoutException:
logger.error("初始化超时")
self.run_status = False
raise StopException("初始化浏览器超时")
except Exception as e:
logger.error("初始化webdriver对象失败" + str(e))
self.run_status = False
# 对input框输入内容
def fill_input(self, content, xpath, single_input=False, el=None):
"""
获取到xpath表达式,定位元素,输入内容
:param args:
:param kwargs:
:return:
"""
try:
if not el:
input_content = self.wait.until(
EC.presence_of_element_located((
By.XPATH,
xpath
))
)
if input_content.is_enabled():
# 一个一个字母输入
input_content.clear()
if single_input:
for item in content:
input_content.send_keys(item)
time.sleep(0.7)
else:
input_content.send_keys(content)
else:
logger.debug(f"fill_input:{xpath}该元素不可操作")
self.run_status = False
else:
if not single_input:
el.find_element_by_xpath(xpath).send_keys(content)
else:
for item in content:
el.find_element_by_xpath(xpath).send_keys(item)
time.sleep(0.3)
except TimeoutException:
logger.error("填写信息超时")
self.run_status = False
raise StopException("填写信息超时")
except Exception as e:
logger.error(f"定位{xpath}时,填写{content}时出错,错误信息:{str(e)}")
self.run_status = False
def click_btn(self, xpath, el=None):
try:
if not el:
btn = self.wait.until(
EC.presence_of_element_located((
By.XPATH,
xpath
))
)
if btn.is_enabled():
btn.click()
time.sleep(1)
else:
logger.debug(f"click_btn:{xpath}该元素不可操作")
self.run_status = False
else:
el.find_element_by_xpath(xpath).click()
except TimeoutException:
logger.error(f"点击{xpath}超时")
self.run_status = False
raise StopException("点击超时")
except Exception as e:
logger.error(f"定位{xpath}时,点击click时出错,错误信息:{str(e)}")
self.run_status = False
def select_date(self, div_num, day):
"""
选择日期
:param div_num:
:param day:
:return:
"""
try:
a = self.wait.until(
EC.presence_of_element_located((
By.XPATH,
f'//*[@id="datepicker"]/div/div[{div_num}]' # 2 是相对于当前的第几个月
))
)
# 如果day小于10,就要去掉前面的0
day = str(int(day))
a.find_element_by_link_text(f"{day}").click()
logger.info("选择出发日期")
time.sleep(1)
except Exception as e:
logger.error(f"选择出发日期时发生错误,错误信息:{str(e)}")
self.run_status = False
def get_text(self, xpath, el=None):
try:
if not el:
h1 = self.wait.until(
EC.presence_of_element_located((
By.XPATH,
xpath
))
)
return h1.text
else:
t = el.find_element_by_xpath(xpath)
return t.text
except Exception as e:
logger.error(f"获取页面文本值出错,错误信息为{str(e)}")
self.run_status = False
def scroll_screen(self, el=None):
if not el:
scroll_screen_js = 'window.scroll(0, document.body.scrollHeight)'
self.driver.execute_script(scroll_screen_js)
else:
if isinstance(el, webdriver.remote.webelement.WebElement):
self.driver.execute_script("arguments[0].scrollIntoView();", el)
pass
else:
# 拖动至可见元素
element_obj = self.wait.until(
EC.presence_of_element_located((
By.XPATH,
el
))
)
self.driver.execute_script("arguments[0].scrollIntoView();", element_obj)
def get_ele_list(self, xpath):
try:
ele_list = self.wait.until(
EC.presence_of_all_elements_located((
By.XPATH,
xpath
))
)
return ele_list
except Exception as e:
logger.error(f"获取元素列表失败,错误提示:" + str(e))
self.run_status = False
| {"/bin/action.py": ["/conf/settings.py"], "/bin/jw_payment.py": ["/bin/action.py", "/conf/settings.py"]} |
69,117 | chenrun666/JW_purchase | refs/heads/master | /test/exception_test.py | from bin.MyException import StopException
try:
raise StopException("stop")
except StopException as e:
print(e.msg)
except Exception as e:
print(e)
| {"/bin/action.py": ["/conf/settings.py"], "/bin/jw_payment.py": ["/bin/action.py", "/conf/settings.py"]} |
69,118 | chenrun666/JW_purchase | refs/heads/master | /conf/settings.py | TEST = True
# TEST = False
CLIENTTYPE = "JW_WEB_CLIENT"
MACHINECODE = "JWceshi"
# 回填数据格式
BACKFILLINFO = {
"accountPassword": "",
"accountType": "",
"accountUsername": "",
"cardName": "", # VCC-VCC
"cardNumber": "", # 5533970000008000
"checkStatus": True,
"clientType": "JW_WEB_CLIENT",
"createTaskStatus": True,
"linkEmail": "",
"linkEmailPassword": "",
"linkPhone": "",
"machineCode": "JWceshi",
"nameList": [],
"payTaskId": None, # 34212
"pnr": "",
"price": None, # 支付的机票含税总价(不包含行李价格)
"baggagePrice": None,
"sourceCur": "EUR",
"status": None,
"targetCur": "EUR"
}
# BookingFail(301,"保留失败"),
# PriceVerifyFail(340, "失败,执行下一条规则"),// 某些客户端特有
# BookingSuccess(350,"保留成功"),
# PayFail(401,"支付失败"),// 普通失败,如登录失败,某些页面刷新错误
# PayFailForNoFlight(402,"无航班,支付失败"),// 无航班特有状态
# PayFailForHighPrice(403,"高价,支付失败"),// 支付价高于目标价
# PayFailForErrorAccount(404,"登录账号有误,支付失败"),// 需注意,明确登录账号或者密码有误回此状态
# PayFailAfterSubmitCard(440,"提交支付后,获取票号失败"),// 提交最后一步时,页面刷新错误
# PaySuccess(450,"支付成功"),
#
# // 下面为客户端返回440之后适配的流程状态
# SearchPNRException(441, "查询票号状态异常"),// 此状态会自动重发查询票号任务
# SearchPNRFail(442, "查询票号状态,确认失败"),
# SearchPNRToPerson(444, "票号状态需人工确认"),// 查询之后客户端也确定不了票号的状态
| {"/bin/action.py": ["/conf/settings.py"], "/bin/jw_payment.py": ["/bin/action.py", "/conf/settings.py"]} |
69,119 | chenrun666/JW_purchase | refs/heads/master | /bin/buy_post.py | """
"""
import requests
def get_flight():
target_url = "https://www.vanilla-air.com/hk/booking/#/flight-select/?tripType=OW&origin=NRT&destination=CTS&outboundDate=2019-03-31&adults=1&children=0&infants=0&promoCode=&mode=searchResultInter"
response = requests.get(target_url)
print(response.text)
def get_traceID():
url = "https://www.vanilla-air.com/api/booking/track/log.json?__ts=1551242320737&version=1.0"
data = {
"fromState": "^",
"toState": "/flight-select/?tripType&origin&destination&outboundDate&returnDate&adults&children&infants&promoCode&mode&targetMonth&returnTargetMonth&2origin&2destination&3origin&3destination&4origin&4destination",
"paymentMethod": "",
"condition": {
"adultCount": 1,
"childCount": 0,
"infantCount": 0,
"couponCode": "",
"currency": "",
"tripType": ""
},
"flights": [
],
"passengers": [
],
"contact": {
"name": "",
"country": "",
"zipCode": "",
"phoneNumber": "",
"email": "",
"mailMagFlg": False,
"preferredLanguage": "",
"givenName": "",
"surName": "",
"password": "",
"chkbxRegistMember": "",
"dateOfBirth": "",
"namePrefix": "",
"prefecture": ""
},
"mode": {
"pointCalculate": False,
"priceOnly": False
},
"payment": {
"paymentType": ""
},
"flightSummary": [
]
}
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36",
"Referer": "https://www.vanilla-air.com/hk/booking/",
"__locale": "hk",
"Accept": "application/json, text/plain, */*",
"content-type": "application/json;charset=UTF-8",
"channel": "pc"
}
response = requests.post(url, data=data, headers=headers)
print(response)
if __name__ == '__main__':
# get_flight()
get_traceID()
| {"/bin/action.py": ["/conf/settings.py"], "/bin/jw_payment.py": ["/bin/action.py", "/conf/settings.py"]} |
69,120 | chenrun666/JW_purchase | refs/heads/master | /bin/jw_payment.py | """
target_url = https://www.vanilla-air.com/hk/
https://www.vanilla-air.com/api/booking/track/log.json?__ts=1551233518756&version=1.0
https://www.vanilla-air.com/hk/booking/#/flight-select/?tripType=OW&origin=NRT&destination=HKD&outboundDate=2019-03-22&adults=1&children=0&infants=0&promoCode=&mode=searchResultInter
rev
"""
import time
import datetime
from bin.log import logger
from bin.MyException import StopException
from bin.action import Action
from conf.settings import TEST, BACKFILLINFO
class Buy(Action):
def __init__(self, task):
self.fill_back = BACKFILLINFO
self.origin = task["depAirport"]
self.destination = task["arrAirport"]
self.outboundDate = task["depDate"]
# 初始化成人儿童婴儿个数
self.adults = 0
self.children = 0
self.infants = 0
self.now_year = int(datetime.datetime.now().year)
self.passengers = task["passengerVOList"]
# 根据乘客年龄进行排序
self.passengers.sort(key=lambda x: int(x["birthday"].split("-")[0]))
for passenger in self.passengers:
self.fill_back["nameList"].append(passenger["name"])
if (self.now_year - int(passenger["birthday"].split("-")[0])) >= 12:
self.adults += 1
elif 2 <= (self.now_year - int(passenger["birthday"].split("-")[0])) < 12:
self.children += 1
else:
self.infants += 1
url = "https://www.vanilla-air.com/hk/booking/#/flight-select/?tripType=OW&origin={}&destination={}&outboundDate={}&adults={}&children={}&infants={}&promoCode=&mode=searchResultInter".format(
self.origin, self.destination, self.outboundDate, self.adults, self.children, self.infants)
# 初始化航班信息
self.flight_num = task["depFlightNumber"]
# 初始化乘客信息
self.passengers_info = task["passengerVOList"]
# 初始化行李信息
self.luggages_weight = [item["baggageWeight"] for item in self.passengers]
# 初始化联系人方式
self.contact = task["contactVO"]
# 初始化卡信息
self.payment_card_info = task["payPaymentInfoVo"]["cardVO"]
# 获取目标价格
self.target_price = task["targetPrice"]
# 初始化回填数据
self.fill_back["cardName"] = task["pnrVO"]["cardName"]
self.fill_back["cardNumber"] = task["pnrVO"]["cardNumber"]
self.fill_back["linkEmail"] = task["contactVO"]["linkEmail"]
self.fill_back["linkEmailPassword"] = task["contactVO"]["linkEmailPassword"]
self.fill_back["linkPhone"] = task["contactVO"]["linkPhone"]
self.fill_back["sourceCur"] = task["sourceCurrency"]
self.fill_back["targetCur"] = task["targetCurrency"]
Action.__init__(self, url)
def select_flight(self, flight_num):
"""
选择对应的航班号
:param flight_num:
:return:
"""
try:
flight_info_xpath = '/html/body/div[1]/div[2]/div/div/div[1]/div/div[1]/div[3]/div[2]/div/div[4]/div/div[2]/dl'
dl_list = self.get_ele_list(flight_info_xpath)
# 匹配航班号
for index, item in enumerate(dl_list):
# 获取页面的航班号
if index == 0:
continue
page_flight_num_xpath = './dt/span[2]'
page_flight_num, page_flight_date = self.get_text(page_flight_num_xpath, item).split()
if page_flight_num == flight_num:
# 点击选中航班
# 点击原味香草simple
simple_xpath = './dd[2]'
self.click_btn(simple_xpath, item)
time.sleep(2)
# 关闭提示框
tip_xpath = './dd[2]/div/span/span[2]/div/div/div[1]/a'
self.click_btn(tip_xpath, item)
time.sleep(1)
# 获取价格校验航班价格
page_flight_price_xpath = './dd[2]/div/span/span[2]'
currency, page_flight_price = self.get_text(page_flight_price_xpath, item).split()
if float(page_flight_price) > self.target_price: # 页面价格大于目标价格,中断程序
logger.info(f"目标价格小于页面价格, 页面价格为:{page_flight_price}, 任务目标价格为:{self.target_price}")
self.fill_back["status"] = 403
raise StopException("目标价格小于页面价格")
# 回填数据
# 。。。。。。
# 点击下一步
next_step_xpath = '/html/body/div[1]/div[2]/div/div/div[1]/div/div[1]/div[4]/div[3]/div[2]/a'
self.click_btn(next_step_xpath)
time.sleep(2)
# 记录日志
logger.debug(f"选择航班结束, 航班号为{page_flight_num}")
break
else:
self.fill_back["status"] = 402
logger.debug("没有找到航班")
raise StopException("没有查询到航班")
except StopException as e:
logger.error(f"{e}导致程序中断")
raise StopException(e)
except Exception as e:
logger.error(f"选择航班发生错误,错误提示:{e}")
raise StopException(e)
def fill_passenger_info(self):
"""
填写乘客信息
:return:
"""
# 关闭登陆提示框
close_btn_xpath = '//*[@id="loginform-modal"]/div/div/div[1]/a'
self.click_btn(close_btn_xpath)
# 获取所有的乘客输入div标签
form_passenger_xpath = '//ng-form[@name="passengersCtl.paxInfo"]/div[1]/div'
form_passengers_obj = self.get_ele_list(form_passenger_xpath)
for index, passenger in enumerate(self.passengers_info):
# index : 乘客对应的form输入框
# 名字
firstname_xpath = './form//ul/li[1]/input'
lastname_xpath = './form//ul/li[2]/input'
self.fill_input(content=passenger["name"].split("/")[1], xpath=firstname_xpath,
el=form_passengers_obj[index])
self.fill_input(content=passenger["name"].split("/")[0], xpath=lastname_xpath,
el=form_passengers_obj[index])
man_xpath = './form/div[1]/div[4]/div[2]/div/div[2]/ul/li[3]/span[1]'
female_xpath = './form/div[1]/div[4]/div[2]/div/div[2]/ul/li[3]/span[2]'
if passenger["sex"] == "F": # 女生
self.click_btn(el=form_passengers_obj[index], xpath=female_xpath)
else:
self.click_btn(el=form_passengers_obj[index], xpath=man_xpath)
# 输入出生日期
# 在输入日期之前先点击输入框
birthday_xpath = './form//ul/li[4]/input'
self.click_btn(xpath=birthday_xpath, el=form_passengers_obj[index])
birthday = "".join(passenger["birthday"].split("-"))
self.fill_input(xpath=birthday_xpath, content=birthday,
el=form_passengers_obj[index], single_input=False)
# 选择国家
if index == 0:
country_xpath = './form/div[2]/ul/li[1]/dl/dd/div[1]/label/input'
self.fill_input(content="日本", xpath=country_xpath, el=form_passengers_obj[index])
# 填写完毕,点击下一步
next_xpath = '//dd[@class="usr-action-box--jp-b next"]/button'
self.click_btn(xpath=next_xpath)
logger.debug("乘客信息填写完毕")
time.sleep(3)
def select_luggages(self):
if sum(self.luggages_weight) > 0:
# 有乘客携带行李,选择行李
add_luggage_xpath = '//div[@class="vnl-service-options-box vnl-service-options-box--jp-b baggage"]'
self.click_btn(add_luggage_xpath)
passenger_luggage_list_xpath = '//div[@class="vnl-seat-select-box"]/div/div[2]/div/div[2]/div'
passenger_luggage_list = self.get_ele_list(passenger_luggage_list_xpath)
# 对每个乘客进行行李的点击
for index, passenger in enumerate(self.passengers):
# 滑动到可见元素
self.scroll_screen(passenger_luggage_list[index])
time.sleep(2)
if passenger["baggageWeight"] == 0:
continue
# 计算获取对应的重量
quotient, residue = divmod(passenger["baggageWeight"], 5)
if residue > 0:
quotient += 1
# 行李重量li标签的索引
if quotient < 4:
select_weight_index = 2
else:
select_weight_index = quotient - 2
# 51KG一下的
# 点击选择行李重量
if select_weight_index < 10:
weight_xpath = './div[@class="vnl-baggage-select-box--flight_baggage-one-person"]/div[2]/ol/li[{}]'
self.click_btn(xpath=weight_xpath.format(select_weight_index), el=passenger_luggage_list[index])
else:
# 501KG以上的行李选择
# 先点击更多
more_xpath = './div[@class="vnl-baggage-select-box--flight_baggage-one-person"]/div[2]/ol/li[@class="more"]'
# 滑动屏幕
# self.scroll_screen()
self.click_btn(more_xpath, el=passenger_luggage_list[index])
time.sleep(1)
select_weight_index -= 8
weight_xpath = './div[@class="vnl-baggage-select-box--flight_baggage-one-person"]/div[2]/ol/li[@class="more"]/div/ul/li[{}]'
self.click_btn(xpath=weight_xpath.format(select_weight_index), el=passenger_luggage_list[index])
pass
else:
# 点击完成
over_xpath = '//a[@ng-click="selectBaggageCtl.close()"]'
self.click_btn(over_xpath)
time.sleep(2)
# 无行李,选择空过
# 点击下一步
next_xpath = '//a[@data-id="act-next"]'
self.click_btn(next_xpath)
logger.debug("选择行李完毕")
time.sleep(2)
# 选择不用了,谢谢
no_thanks_js = "document.querySelector('body > div.vnl-modal-popup.vnl-modal-popup_wrap > div > div > div > div.vnl-popup_btn > a.no.action_close').click()"
self.driver.execute_script(no_thanks_js)
def fill_contact_info(self):
"""
填写联系人信息
:return:
"""
# 填写邮箱
email_xpath = '//div[@class="vnl-payment-contact-information"]//div[@class="right"]/dl[1]/dd//input'
self.fill_input(xpath=email_xpath, content=self.contact["linkEmail"])
email_sure_xpath = '//div[@class="vnl-payment-contact-information"]//div[@class="right"]/dl[2]/dd//input'
self.fill_input(xpath=email_sure_xpath, content=self.contact["linkEmail"])
# 联系人区号
area_code_xpath = '//div[@class="vnl-payment-contact-information"]//div[@class="left"]/dl[4]/dd//input'
self.fill_input(xpath=area_code_xpath, content=100000)
# 联系人电话
phone_num_xpath = '//div[@class="vnl-payment-contact-information"]//div[@class="left"]/dl[5]/dd//input'
self.fill_input(xpath=phone_num_xpath, content=self.contact["linkPhone"])
logger.debug("联系人信息填写完毕")
def select_payment_method(self):
"""
选择支付方式
:return:
"""
# 选用VCC visa
vcc_xpath = '//div[@class="cardtype ng-scope"]/label[2]/span'
self.click_btn(vcc_xpath)
time.sleep(5)
logger.debug("支付方式选择完毕")
# 点击下一步
next_xpath = '//div[@class="vnl-payment-action"]/a[@data-id="act-next"]'
self.click_btn(next_xpath)
time.sleep(5)
def sure_info(self):
"""
确认所选信息, 校对选择的行李是否正确,获取行李的总价格回填行李价格和其他费用
:return:
"""
if sum(self.luggages_weight) > 0:
# 有行李
# 获取行李价格
detail_info_list_xpath = '//div[@data-id="flight_1"]//div[@class="vnl-fare-detail-summary-detail-box vnl-fare-detail-summary-detail__blk"][2]/div'
detail_info_list = self.get_ele_list(detail_info_list_xpath)
for item in detail_info_list:
if "託運行李" in self.get_text(xpath='./p', el=item):
luggage_price_xpath = './/dd[@class="price ng-binding"]'
luggage_price = self.get_text(xpath=luggage_price_xpath, el=item).split()[1].replace(",",
"") # 'HKD 1,976'
self.fill_back["baggagePrice"] = float(luggage_price)
break
else:
# 回填行李重量和价格
self.fill_back["baggagePrice"] = 0.0
# 获取价格,去掉行李的价格
total_price_xpath = '//div[@class="vnl_itinerary_price-total_price"]/span/span'
total_price = self.get_text(xpath=total_price_xpath).split()[1].replace(",", "")
self.fill_back["price"] = float(total_price) - self.fill_back["baggagePrice"]
# 同意协议
sure_xpath = '//div[@class="reconfirm_agreement_check"]/p//span[1]'
self.click_btn(sure_xpath)
# 点击下一步
pay_btn_xpath = '//a[@data-id="act-purchase"]'
self.click_btn(pay_btn_xpath)
logger.debug("同意协议点击无误")
time.sleep(10)
def fill_card_info(self):
"""
输入卡号信息
:return:
"""
card_num_xpath = '//*[@id="cardNumber"]'
card_name_xpath = '//*[@id="cardholderName"]'
month_xpath = '//*[@id="expiryMonth"]'
year_xpath = '//*[@id="expiryYear"]'
cvv_code_xpath = '//*[@id="securityCode"]'
card_info_dict = {
card_num_xpath: self.payment_card_info["cardNumber"],
card_name_xpath: self.payment_card_info["lastName"] + "/" + self.payment_card_info["firstName"],
month_xpath: self.payment_card_info["cardExpired"].split("-")[1],
year_xpath: self.payment_card_info["cardExpired"].split("-")[0],
cvv_code_xpath: self.payment_card_info["cvv"],
}
for k, v in card_info_dict.items():
self.fill_input(content=v, xpath=k)
logger.debug("填写卡号信息完成")
def pay(self):
"""
点击进行支付
:return:
"""
logger.debug("开始支付")
pay_btn_xpath = '//*[@id="submitButton"]'
self.click_btn(pay_btn_xpath)
# 获取票号
def __call__(self, *args, **kwargs):
try:
# 选择乘客
self.select_flight(self.flight_num)
# 填写乘客信息
self.fill_passenger_info()
# 选择行李
self.select_luggages()
# 填写联络信息
self.fill_contact_info()
# 选择支付方式
self.select_payment_method()
# 确认行李
self.sure_info()
# 输入支付卡信息
self.fill_card_info()
# 点击支付,获取pnr
self.pay()
except StopException as e:
if not self.fill_back["status"]:
self.fill_back["status"] = 401
logger.error(f"程序中断:错误提示 -》{e}")
return
def __del__(self):
self.driver.close()
if __name__ == '__main__':
if TEST:
import json
with open("../files/fake_task.json", "r", encoding="utf-8") as f:
task = f.read()
task = json.loads(task)["data"]
else:
task = {}
buy = Buy(task)
buy()
| {"/bin/action.py": ["/conf/settings.py"], "/bin/jw_payment.py": ["/bin/action.py", "/conf/settings.py"]} |
69,121 | chenrun666/JW_purchase | refs/heads/master | /test/select_luggage.py | weight = 26
s, y = divmod(weight, 5)
print(s, y) | {"/bin/action.py": ["/conf/settings.py"], "/bin/jw_payment.py": ["/bin/action.py", "/conf/settings.py"]} |
69,126 | Jared-Hood/Brawlhalla-Rank-Data | refs/heads/master | /BrawlhallaScraper.py | from time import sleep
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
def simple_get(url):
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url,str(e)))
return None
def is_good_response(resp):
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
print(e)
#http://www.brawlhalla.com/rankings/1v1/
#Create file to write data to
file = open("output.txt",'w')
#keep track of page number: Last page 5233
last_page_number = 5300 #use as estimate for percentage
page_number = 0
while True:
raw_html = simple_get('http://www.brawlhalla.com/rankings/1v1/' + str(page_number))
#raw_html of non ranked page should be 2691, Create small buffer in just in case
if ( len(raw_html) < 5000):
print("End of Scrape")
print(raw_html)
print(page_number)
break
html = BeautifulSoup(raw_html, 'html.parser')
#Separate common tags to get data for each player
odd_players = html.find_all('tr', class_='odd')
even_players = html.find_all('tr', class_='odd')
#Find current rank from list of specific players data
for player in odd_players:
player_data = player.find_all('td', class_='pcenter')
if player_data:
file.write(player_data[3].text + '\n')
for player in even_players:
player_data = player.find_all('td', class_='pcenter')
if player_data:
file.write(player_data[3].text + '\n')
#Next page number
print(str(round(page_number / last_page_number,4) * 100) + "%")
page_number += 1
sleep(0.2)
file.close() | {"/GraphData.py": ["/average.py"]} |
69,127 | Jared-Hood/Brawlhalla-Rank-Data | refs/heads/master | /average.py | def averages():
file = open("output.txt",'r')
total = 0.0
i = 0
ranks = {
"diamond": 0 ,#2000+
"platinum" :0 ,#1679-1999
"gold" : 0 ,#1338-1678
"silver" : 0 ,#1086-1337
"bronze" : 0 ,#1086-872
"tin" : 0 # <872
}
#change to switch - cases!
for line in file:
i += 1
elo = int(line)
if(elo>=2000):
ranks["diamond"] += 1
if(elo<2000 and elo >= 1679):
ranks["platinum"] += 1
if(elo<1679 and elo >=1338):
ranks["gold"] += 1
if(elo < 1338 and elo >= 1086):
ranks["silver"] += 1
if(elo < 1086 and elo >= 872):
ranks["bronze"] += 1
if(elo < 872):
ranks["tin"] += 1
file.close()
return ranks, i | {"/GraphData.py": ["/average.py"]} |
69,128 | Jared-Hood/Brawlhalla-Rank-Data | refs/heads/master | /GraphData.py | import matplotlib.pyplot as plt
import matplotlib.axes
import numpy as np
import average
data = [0]
playerrank_list , num_players = average.averages()
for i in playerrank_list:
data.append(round((playerrank_list[i] / num_players) * 100, 4 ))
#Cumulative percentage for rankings
for i in range(len(data)):
if( i != 0):
data[i] += data[i-1]
#ranking cutoffs
ranks = np.array([2300,2000,1700,1500,1200,1000,700])
plt.plot(ranks,data)
plt.xlabel("Rank")
plt.ylabel("Percentage of Players Above Rank")
plt.title("Cumalative Rank Percentages of Brawlhalla Players")
for i_x, i_y in zip(ranks,data):
plt.text(i_x, i_y, '({}, {})'.format(i_x, round(i_y,2)))
plt.show()
| {"/GraphData.py": ["/average.py"]} |
69,129 | Jared-Hood/Brawlhalla-Rank-Data | refs/heads/master | /statistics.py | import math
def stdDev(file, average):
new_average = 0
iter = 0
for line in file:
new_average += (int(line) - average) ** 2
iter += 1
new_average = new_average / iter
return math.sqrt(new_average)
file = open("output.txt",'r')
#statistics
maxElo = int(file.__next__())
minElo = maxElo
average = 0
mode = 0
median = 0
Std = 0
frequency = [0] * (maxElo + 1)
lines = 0
for line in file:
frequency[int(line)] += 1
lines += 1
average += int(line)
if (int(line) < minElo):
minElo = int(line)
average = average / lines
file.close()
file = open("output.txt",'r')
Std = stdDev(file, average)
file.close()
file = open("output.txt",'r')
iter2 = 0
for line in file:
if (iter2 == lines //2):
median = int(line)
break
else:
iter2 += 1
modeNum = 0
for i in frequency:
if(int(i) > modeNum):
modeNum = int(i)
mode = frequency.index(modeNum)
print("Max Elo: " + str(maxElo))
print("Min Elo: " + str(minElo))
print("Mode: " + str(mode))
print("Number of occurances: " + str(modeNum))
print("Average: " + str(round(average,2)))
print("Median: " + str(median))
print("Standard Deviation: " + str(round(Std,2)))
file.close()
| {"/GraphData.py": ["/average.py"]} |
69,130 | OguzDegirmenci/git_first | refs/heads/master | /main.py | from email_receiver import getEmail
from email_sender import postEmail
def emoji():
print(":)")
def main():
print("1 - Inbox")
print("2 - New")
q = int(input("Seçim = "))
if q == 1:
getEmail()
elif q == 2:
postEmail()
else:
print("Yanlış tercih")
main()
emoji() | {"/main.py": ["/email_receiver.py", "/email_sender.py"]} |
69,131 | OguzDegirmenci/git_first | refs/heads/master | /email_receiver.py | def getEmail():
print("Email received...") | {"/main.py": ["/email_receiver.py", "/email_sender.py"]} |
69,132 | OguzDegirmenci/git_first | refs/heads/master | /email_sender.py | def postEmail():
print("Email sent...") | {"/main.py": ["/email_receiver.py", "/email_sender.py"]} |
69,138 | shaoliangliang1996/beta3_IRT | refs/heads/master | /models/beta_irt.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import edward as ed
import six
import os
import sys
import re
import time
from hsvi.Hierarchi_klqp import Hierarchi_klqp
from edward.models import Normal,Beta,Gamma,TransformedDistribution,InverseGamma,RandomVariable
ds = tf.contrib.distributions
class Beta_IRT:
def __init__(self,M,C,theta_prior,delta_prior,a_prior):
self.M = M
self.C = C
self.theta_prior = theta_prior # prior of ability
self.delta_prior = delta_prior # prior of difficulty
self.a_prior = a_prior # prior of discrimination
if isinstance(a_prior,ed.RandomVariable):
# variational posterior of discrimination
self.qa = Normal(loc=tf.Variable(tf.ones([M])), scale=tf.nn.softplus(tf.Variable(tf.ones([M])*.5)),name='qa')
else:
self.qa = a_prior
with tf.variable_scope('local'):
# variational posterior of ability
if isinstance(self.theta_prior,RandomVariable):
self.qtheta = TransformedDistribution(distribution=Normal(loc=tf.Variable(tf.random_normal([C])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([C])))),\
bijector=ds.bijectors.Sigmoid(), sample_shape=[M],name='qtheta')
else:
self.qtheta = self.theta_prior
# variational posterior of difficulty
self.qdelta = TransformedDistribution(distribution=Normal(loc=tf.Variable(tf.random_normal([M])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([M])))), \
bijector=ds.bijectors.Sigmoid(), sample_shape=[C],name='qdelta')
alpha = (tf.transpose(self.qtheta)/self.qdelta)**self.qa
beta = ((1. - tf.transpose(self.qtheta))/(1. - self.qdelta))**self.qa
# observed variable
self.x = Beta(tf.transpose(alpha),tf.transpose(beta))
def init_inference(self, data, n_iter=1000, n_print=100):
# for discrimination a is latent variable
if isinstance(self.a_prior,RandomVariable):
if isinstance(self.theta_prior, RandomVariable):
self.inference = Hierarchi_klqp(latent_vars={self.a_prior:self.qa}, data={self.x:data}, \
local_vars={self.theta_prior:self.qtheta,self.delta_prior:self.qdelta},local_data={self.x:data})
else:
self.inference = Hierarchi_klqp(latent_vars={self.a_prior:self.qa}, data={self.x:data}, \
local_vars={self.delta_prior:self.qdelta},local_data={self.x:data})
# for discrimination a is constant
else:
self.inference = Hierarchi_klqp(latent_vars={self.theta_prior:self.qtheta,self.delta_prior:self.qdelta},data={self.x:data})
self.inference.initialize(auto_transform=False,n_iter=n_iter,n_print=n_print)
def fit(self,local_iter=50):
tf.global_variables_initializer().run()
for jj in range(self.inference.n_iter):
if isinstance(self.a_prior,ed.RandomVariable):
for _ in range(local_iter):
self.inference.update(scope='local')
info_dict = self.inference.update(scope='global')
self.inference.print_progress(info_dict)
| {"/models/beta_irt.py": ["/hsvi/Hierarchi_klqp.py"]} |
69,139 | shaoliangliang1996/beta3_IRT | refs/heads/master | /hsvi/Hierarchi_klqp.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
import six
import os
#from edward.util import check_data, check_latent_vars
from edward.inferences import Inference
class Hierarchi_klqp(Inference):
def __init__(self,latent_vars={},data={},local_data={},local_vars=None,*args,**kwargs):
super(Hierarchi_klqp,self).__init__(*args,**kwargs)
self.latent_vars = latent_vars
self.data = data
self.local_vars = local_vars
self.local_data = local_data
def initialize(self,scale={},optimizer={}, clipping={}, constraints=None, *args, **kwargs):
self.scale = scale
self.optimizer = optimizer
self.clipping = clipping
self.constraints = constraints
var_list = set()
for v in tf.trainable_variables():
var_list.add(v)
if not self.local_vars is None:
local_var_list = set()
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="local"):
local_var_list.add(v)
var_list.difference_update(local_var_list)
local_var_list = list(local_var_list)
else:
local_var_list = None
var_list = list(var_list)
self.loss, grads_and_vars, self.local_loss, grads_and_vars_local = self.build_loss_and_gradients(var_list,local_var_list)
self.config_optimizer()
self.train = self.optimizer['global'][0].apply_gradients(grads_and_vars,global_step=self.optimizer['global'][1])
if not local_var_list is None:
self.config_optimizer(scope='local')
self.train_local = self.optimizer['local'][0].apply_gradients(grads_and_vars_local, global_step=self.optimizer['local'][1])
super(Hierarchi_klqp,self).initialize(*args,**kwargs)
def config_optimizer(self,scope='global'):
# Need default optimizer
if not scope in self.optimizer:
if scope=='local':
decay = (10000,0.999)
with tf.variable_scope('local'):
global_step = tf.Variable(0, trainable=False, name="local_step")
else:
decay = (100,0.9)
global_step = tf.Variable(0, trainable=False, name="global_step")
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate,
global_step,
decay[0], decay[1], staircase=True)
self.optimizer[scope] = (tf.train.AdamOptimizer(learning_rate),global_step)
# Need default global_step for train
elif len(self.optimizer[scope])==1:
if scope=='local':
with tf.variable_scope('local'):
global_step = tf.Variable(0, trainable=False, name="local_step")
else:
global_step = tf.Variable(np.int64(0), trainable=False, name="global_step")
self.optimizer[scope].append(global_step)
return
def build_loss_and_gradients(self,var_list,local_var_list):
loss, grads_and_vars = self.build_reparam_ELBO_and_grads(var_list)
if not local_var_list is None:
local_loss, grads_and_vars_local = self.build_reparam_ELBO_and_grads(local_var_list,scope='local')
else:
local_loss = 0.
grads_and_vars_local = None
return loss, grads_and_vars, local_loss, grads_and_vars_local
def build_reparam_ELBO_and_grads(self,var_list,scope='global'):
ll = 0.
kl = 0.
if scope == 'global':
data = self.data
vars = self.latent_vars
else:
data = self.local_data
vars = self.local_vars
for x, qx in six.iteritems(data):
ll += tf.reduce_mean(self.scale.get(x,1.)*x.log_prob(qx))
for z,qz in six.iteritems(vars):
kl += tf.reduce_mean(qz.log_prob(qz))-tf.reduce_mean(z.log_prob(qz))
if not self.constraints is None:
closs = 0.
for qz in six.iterkeys(self.constraints):
if qz in self.vars.values():
closs += self.constraints[qz]
#print(closs)
kl += closs
loss = kl - ll
grads = tf.gradients(loss, var_list)
if scope in self.clipping:
grads = [tf.clip_by_value(grd,self.clipping[scope][0],self.clipping[scope][1]) for grd in grads]
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
def update(self,feed_dict=None,scope='global'):
if feed_dict is None:
feed_dict = {}
for key, value in six.iteritems(self.local_data):
if isinstance(key, tf.Tensor) and "Placeholder" in key.op.type:
feed_dict[key] = value
sess = ed.get_session()
if scope=='global':
_,t, loss = sess.run([self.train, self.increment_t, self.loss], feed_dict)
return {'t':t,'loss':loss}
if scope=='local':
_,local_loss = sess.run([self.train_local,self.local_loss], feed_dict)
return {'t':self.increment_t,'loss':local_loss}
def print_progress(self, info_dict):
"""Print progress to output."""
if self.n_print != 0:
t = info_dict['t']
if t == 1 or t % self.n_print == 0:
self.progbar.update(t, {'Loss': info_dict['loss']})
| {"/models/beta_irt.py": ["/hsvi/Hierarchi_klqp.py"]} |
69,142 | Leeps-Lab/imperfect_monitoring | refs/heads/master | /models.py | from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from otree_redwood.models import DecisionGroup
from otree_redwood.utils import DiscreteEventEmitter
import csv, random, math
from jsonfield import JSONField
from django.db.models import IntegerField
def parse_config(config_file):
with open('imperfect_monitoring/configs/' + config_file) as f:
rows = list(csv.DictReader(f))
rounds = []
for row in rows:
rounds.append({
'payoff_matrix': [
[float(row['pi1(AGood)']), float(row['pi2(AGood)'])], [float(row['pi1(ABad)']), float(row['pi2(ABad)'])],
[float(row['pi1(BGood)']), float(row['pi2(BGood)'])], [float(row['pi1(BBad)']), float(row['pi2(BBad)'])]
],
'probability_matrix': [
[float(row['p1(AA)']), float(row['p2(AA)'])], [float(row['p1(AB)']), float(row['p2(AB)'])],
[float(row['p1(BA)']), float(row['p2(BA)'])], [float(row['p1(BB)']), float(row['p2(BB)'])]
],
'displayed_subperiods': int(row['displayed_subperiods']),
'subperiod_length': int(row['subperiod_length']),
'rest_length_seconds': int(row['rest_length_seconds']),
'seconds_per_tick': float(row['seconds_per_tick']),
'num_signals': int(row['num_signals']),
'display_average_a_graph': True if row['display_average_a_graph'] == 'TRUE' else False,
'display_average_b_graph': True if row['display_average_b_graph'] == 'TRUE' else False,
'display_average_ab_graph': True if row['display_average_ab_graph'] == 'TRUE' else False,
'display_payoff_matrix': True if row['display_payoff_matrix'] == 'TRUE' else False,
'display_score': True if row['display_score'] == 'TRUE' else False,
'enable_animations': True if row['enable_animations'] == 'TRUE' else False,
'use_single_button': True if row['use_single_button'] == 'TRUE' else False,
'public_monitoring': True if row['public_monitoring'] == 'TRUE' else False,
'display_coordination_indicator': True if row['display_coordination_indicator'] == 'TRUE' else False,
})
return rounds
class Constants(BaseConstants):
name_in_url = 'imperfect_monitoring'
players_per_group = 2
num_rounds = 100
class Subsession(BaseSubsession):
def before_session_starts(self):
self.group_randomly()
class Group(DecisionGroup):
subperiod_results = JSONField()
def num_rounds(self):
return len(parse_config(self.session.config['config_file']))
def seconds_per_tick(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['seconds_per_tick']
def subperiod_length(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['subperiod_length']
def rest_length_seconds(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['rest_length_seconds']
def displayed_subperiods(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['displayed_subperiods']
def display_average_a_graph(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['display_average_a_graph']
def display_average_b_graph(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['display_average_b_graph']
def display_average_ab_graph(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['display_average_ab_graph']
def display_payoff_matrix(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['display_payoff_matrix']
def display_score(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['display_score']
def enable_animations(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['enable_animations']
def use_single_button(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['use_single_button']
def public_monitoring(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['public_monitoring']
def num_signals(self):
return parse_config(self.session.config['config_file'])[self.round_number-1]['num_signals']
def period_length(self):
rest_length_seconds = self.rest_length_seconds()
seconds_per_tick = self.seconds_per_tick()
num_signals = self.num_signals()
subperiod_length = self.subperiod_length()
num_subperiods = math.ceil(num_signals / subperiod_length)
return (num_signals * seconds_per_tick) + (num_subperiods * rest_length_seconds)
def when_all_players_ready(self):
super().when_all_players_ready()
if not self.subperiod_results:
self.subperiod_results = {}
self.save(update_fields=['subperiod_results'])
emitter = DiscreteEventEmitter(
(self.seconds_per_tick() * self.subperiod_length()) + self.rest_length_seconds(),
self.period_length(),
self,
self.subperiod_start,
True)
emitter.start()
def subperiod_start(self, current_interval, intervals):
self.refresh_from_db()
num_signals = min(self.num_signals() - current_interval * self.subperiod_length(), self.subperiod_length())
msg = {}
# if we're doing public monitoring, just calculate one set of signals
# just use the first player's decision since public monitoring requires that probs are symmetric
if self.public_monitoring():
signals = self.calc_signals(num_signals, self.get_players()[0]);
# "coordination indicator" is random number shown on everyone's screen to encourage coordination
coord_indicator = random.randint(0, 100)
for player in self.get_players():
pcode = player.participant.code
if not self.public_monitoring():
signals = self.calc_signals(num_signals, player)
msg[pcode] = {
'fixed_decision': self.group_decisions[pcode],
'payoffs': self.calc_payoffs(player, current_interval, signals),
'coordination_indicator': coord_indicator,
}
self.send('subperiod-start', msg)
def calc_signals(self, num_signals, player):
pcode = player.participant.code
my_decision = self.group_decisions[pcode]
other_decision = [self.group_decisions[c] for c in self.group_decisions if c != pcode][0]
probability_matrix = parse_config(self.session.config['config_file'])[self.round_number-1]['probability_matrix']
probabilities = [e[player.id_in_group - 1] for e in probability_matrix]
prob = ((my_decision * other_decision * probabilities[0]) +
(my_decision * (1 - other_decision) * probabilities[1]) +
((1 - my_decision) * other_decision * probabilities[2]) +
((1 - my_decision) * (1 - other_decision) * probabilities[3]))
signals = []
for i in range(num_signals):
if random.random() <= prob:
signals.append('B')
else:
signals.append('G')
return signals
def calc_payoffs(self, player, subperiod_num, signals):
payoff_matrix = parse_config(self.session.config['config_file'])[self.round_number-1]['payoff_matrix']
pcode = player.participant.code
payoffs = [e[player.id_in_group - 1] for e in payoff_matrix]
my_decision = self.group_decisions[pcode]
realized_payoffs = []
for signal in signals:
if signal == 'B' and my_decision == 1:
realized_payoffs.append(payoffs[1])
if signal == 'B' and my_decision == 0:
realized_payoffs.append(payoffs[3])
if signal == 'G' and my_decision == 1:
realized_payoffs.append(payoffs[0])
if signal == 'G' and my_decision == 0:
realized_payoffs.append(payoffs[2])
self.add_subperiod_result(pcode, signal, subperiod_num)
player.payoff += sum(realized_payoffs)
player.save()
return realized_payoffs
def add_subperiod_result(self, pcode, result, subperiod_num):
subperiod_key = str(subperiod_num)
if subperiod_key not in self.subperiod_results:
self.subperiod_results[subperiod_key] = {}
if pcode not in self.subperiod_results[subperiod_key]:
self.subperiod_results[subperiod_key][pcode] = ''
self.subperiod_results[subperiod_key][pcode] += result
self.save(update_fields=['subperiod_results'])
class Player(BasePlayer):
# stores randomly generated initial decision for this player so player.initial_decision
# always returns the same value
_initial_decision = IntegerField(null=True)
def initial_decision(self):
# if we're in single-button mode, we always want to start in A
if self.group.use_single_button():
return 1
self.refresh_from_db()
if self._initial_decision:
return self._initial_decision
self._initial_decision = random.choice([0, 1])
self.save(update_fields=['_initial_decision'])
return self._initial_decision
| {"/views.py": ["/models.py"]} |
69,143 | Leeps-Lab/imperfect_monitoring | refs/heads/master | /views.py | from otree.api import Currency as c, currency_range
from . import models
from ._builtin import Page, WaitPage
from .models import Constants, parse_config
import math
class Instructions(Page):
def is_displayed(self):
return self.round_number == 1
def vars_for_template(self):
return {
'instructions_link': self.session.config['instructions_link'],
}
class Decision(Page):
def is_displayed(self):
return self.round_number <= self.group.num_rounds()
def vars_for_template(self):
return {
"payoff_matrix": parse_config(self.session.config['config_file'])[self.round_number-1]['payoff_matrix'],
"probability_matrix": parse_config(self.session.config['config_file'])[self.round_number-1]['probability_matrix'],
}
class Results(Page):
def is_displayed(self):
return self.round_number <= self.group.num_rounds()
def get_config_columns(group):
num_signals = group.num_signals()
subperiod_length = group.subperiod_length()
num_subperiods = math.ceil(num_signals / subperiod_length)
seconds_per_tick = group.seconds_per_tick()
rest_length = group.rest_length_seconds()
config = parse_config(group.session.config['config_file'])
payoff_matrix = config[group.round_number - 1]['payoff_matrix']
probability_matrix = config[group.round_number - 1]['probability_matrix']
return [num_subperiods, seconds_per_tick, rest_length, payoff_matrix, probability_matrix]
def get_output_table_header(groups):
return [
'timestamp_of_start',
'session_ID',
'period_id',
'pair_id',
'p1_code',
'p2_code',
'p1_action',
'p2_action',
'p1_countGood',
'p2_countGood',
'p1_periodResult',
'p2_periodResult',
'p1_avg_payoffs',
'p2_avg_payoffs',
'subperiod_length',
'num_subperiods',
'seconds_per_tick',
'rest_length_seconds',
'payoff_matrix(AGood, ABad, BGood, BBad)',
'probability_matrix(AA, AB, BA, BB)'
]
def get_output_table(events):
if not events:
return []
rows = []
p1, p2 = events[0].group.get_players()
p1_code = p1.participant.code
p2_code = p2.participant.code
group = events[0].group
config_columns = get_config_columns(group)
subperiod_num = 0
for event in events:
if event.channel == 'subperiod-start':
p1_result = group.subperiod_results[str(subperiod_num)][p1_code]
p2_result = group.subperiod_results[str(subperiod_num)][p2_code]
p1_payoffs = event.value[p1_code]['payoffs']
p2_payoffs = event.value[p2_code]['payoffs']
rows.append([
event.timestamp,
group.session.code,
group.subsession_id,
group.id_in_subsession,
p1_code,
p2_code,
event.value[p1_code]['fixed_decision'],
event.value[p2_code]['fixed_decision'],
p1_result.count('G'),
p2_result.count('G'),
p1_result,
p2_result,
sum(p1_payoffs) / len(p1_payoffs),
sum(p2_payoffs) / len(p2_payoffs),
len(p1_payoffs)
] + config_columns)
subperiod_num += 1
rows.append("")
return rows
page_sequence = [
Instructions,
Decision,
Results,
]
| {"/views.py": ["/models.py"]} |
69,144 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/FeedForwardAnalyzer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
from NCBounds.Network import Network, topological_sort, ToyNetwork, ToyTreeNetwork
from NCBounds.ServiceCurve import ServiceCurve, convolution, delay, backlog
from NCBounds.ArrivalCurve import ArrivalCurve
import numpy as np
class FeedForwardAnalyzer:
"""
Constructor for the feed-forward analyzer of a network. The network should be feed-forward.
This class regroup several subclasses, depending of the typs of analyze to perform.
* SFA: separate flow analysis, that computes a residual service curve and output arrival curve for server and
each flow that crosses that server
* Exact, that computes the exact bounds based on [1] and [2]
:param network: the network to analyze
:type network: Network
"""
def __init__(self, network=Network()):
self.network = network
def delay(self, flow: int) -> float:
"""
Computes a delay bound of a flow based on the chosen analysis.
:param flow: flow for which the delay is computed
:type flow: int
:return: the delay of that flow
:rtype: float
"""
raise NotImplemented
def backlog(self, flow, server):
"""
Computes a backlog bound of a flow at a server based on the chosen analysis.
:param flow: flow for which the backlog is computed
:type flow: int
:param server: server at which the backlog is computed
:type server: int
:return: the backlog of flow and server
:rtype: float
"""
raise NotImplemented
class SFAFeedForwardAnalyzer(FeedForwardAnalyzer):
@property
def sfa_blind(self) -> tuple:
"""
Computes the residual service curves and the arrival curves for every flow and every server.
:return: tab_ac is the list of output arrival curves and tab_sc: the list of residual service curves
:rtype: tuple
>>> toy = ToyNetwork()
>>> toy_saf = SFAFeedForwardAnalyzer(toy)
>>> toy_saf.sfa_blind
([<ArrivalCurve: 1.00 + 2.00 t>, <ArrivalCurve: 7.00 + 2.00 t>, <ArrivalCurve: 24.00 + 2.00 t>, \
<ArrivalCurve: 0.00 + 0.00 t>, <ArrivalCurve: 2.00 + 3.00 t>, <ArrivalCurve: 24.00 + 3.00 t>, \
<ArrivalCurve: 1.00 + 1.00 t>, <ArrivalCurve: 0.00 + 0.00 t>, <ArrivalCurve: 5.50 + 1.00 t>], \
[<ServiceCurve: 3.00 . (t - 3.00)+>, <ServiceCurve: 2.00 . (t - 8.50)+>, <ServiceCurve: 3.00 . (t - 12.17)+>, \
<ServiceCurve: inf . (t - 0.00)+>, <ServiceCurve: 3.00 . (t - 7.33)+>, <ServiceCurve: 4.00 . (t - 9.12)+>, \
<ServiceCurve: 2.00 . (t - 4.50)+>, <ServiceCurve: inf . (t - 0.00)+>, <ServiceCurve: 2.00 . (t - 27.50)+>])
"""
n = self.network.num_servers
m = self.network.num_flows
tab_ac = (n * m) * [ArrivalCurve(0, 0)]
tab_sc = (n * m) * [ServiceCurve(np.inf, 0)]
list_idx_flows = np.zeros(m, int)
sort = topological_sort(self.network.adjacency_matrix)
for h in sort:
for i in self.network.flows_in_servers[h]:
if list_idx_flows[i] == 0:
tab_ac[i * n + h] = self.network.flows[i].acurve
list_ac = [tab_ac[j * n + h] for j in self.network.flows_in_servers[h]]
lr, loa = self.network.servers[h].list_residual_output(list_ac)
for i in range(len(lr)):
j = self.network.flows_in_servers[h][i]
tab_sc[j * n + h] = lr[i]
if list_idx_flows[i] < self.network.flows[i].length - 1:
tab_ac[j * n + self.network.flows[j].path[list_idx_flows[j] + 1]] = loa[i]
list_idx_flows[j] += 1
return tab_ac, tab_sc
def delay(self, flow) -> float:
"""
Computes a worst-case delay obound of a flow i in the network with the SFA method
:param flow: flow under consideration
:type flow: int
:return: a delay upper bound for the flow
:rtype: float
>>> toy = ToyNetwork()
>>> toy_saf = SFAFeedForwardAnalyzer(toy)
>>> toy_saf.delay(0)
24.166666666666664
"""
sc = ServiceCurve(np.inf, 0)
tab_ac, tab_sc = self.sfa_blind
for h in self.network.flows[flow].path:
print(h)
sc = convolution(sc, tab_sc[flow * self.network.num_servers + h])
return delay(self.network.flows[flow].acurve, sc)
def backlog(self, flow, server):
"""
Computes a worst-case backlog bound of a flow in a server in the network with the SFA method
:param flow: flow under consideration
:type flow: int
:param server: server under consideration
:type server: int
:return: a backlog upper bound for the flow at the server
:rtype: float
>>> toy = ToyNetwork()
>>> toy_saf = SFAFeedForwardAnalyzer(toy)
>>> toy_saf.backlog(0, 2)
48.33333333333333
>>> tree = ToyTreeNetwork()
>>> tree_sfa = SFAFeedForwardAnalyzer(tree)
>>> tree_sfa.backlog(0, 2)
38.2
"""
tab_ac, tab_sc = self.sfa_blind
return backlog(tab_ac[flow * self.network.num_servers + server],
tab_sc[flow * self.network.num_servers + server])
class PMOOFeedForwardAnalyzer(FeedForwardAnalyzer):
def pmoo_blind(self, flow):
"""
Computes the residual service curves for a flow.
:param flow: flow under consideration
:type flow: Flow
:return: the residual service service curve of foi
:rtype: ServiceCurve
"""
n = self.network.flows[flow].path[-1]
nk = self.network.trim(n)
if nk.is_forest:
res_rates = nk.residual_rate[flow]
R = min([res_rates[j] for j in nk.flows[flow].path])
C = sum([(nk.flows[i].acurve.sigma + nk.flows[i].acurve.rho *
sum([nk.servers[k].scurve.latency for k in nk.flows[i].path]))
for i in range(nk.num_flows) if not i == flow])
T = sum([nk.servers[k].scurve.latency for k in nk.flows[flow].path])
return ServiceCurve(rate=R, latency= T + C / R)
else:
raise NameError("Network is not a forest, PMOO analysis impossible")
def delay(self, flow):
return delay(self.network.flows[flow].acurve, self.pmoo_blind(flow))
def backlog(self, flow, server):
nk = self.network.trim(server)
return backlog(self.network.flows[flow].acurve, PMOOFeedForwardAnalyzer(nk).pmoo_blind(flow))
class ExactFeedForwardAnalyzer(FeedForwardAnalyzer):
def rstar(self, j, foi) -> float:
"""
Computes the arrival rate for the flows of interests that cross server j
:param j: number of the server
:type j: int
:param foi: list of the flows of interest
:type foi: list
:return: the arrival rate of the flows of interest at server j
:rtype: float
>>> tree = ToyTreeNetwork()
>>> tree_exact = ExactFeedForwardAnalyzer(tree)
>>> tree_exact.rstar(1, [0])
2
"""
list_j = [i for i in self.network.flows_in_servers[j] if i in foi]
return sum(self.network.flows[i].acurve.rho for i in list_j)
def xi_rate(self, foi, j, k) -> float:
"""
The arrival rates of the flows not of interest crossing server j and ending at server k
:param foi: list of the flows of interest
:type foi: list
:param j: number of the server
:type j: int
:param k: ending server
:type k: int
:return: The sum of the rates of the flows in flows_in_server[j] \ foi ending at server k
:rtype: float
>>> tree = ToyTreeNetwork()
>>> tree_exact = ExactFeedForwardAnalyzer(tree)
>>> tree_exact.xi_rate([0], 1, 2)
3
"""
s = 0
for i in self.network.flows_in_servers[j]:
if i not in foi and self.network.flows[i].path[-1] == k:
s += self.network.flows[i].acurve.rho
return s
def exact_xi(self, flows_interest, destination) -> np.ndarray:
"""
Computes the xi coefficients to compute the worst-case delay bounds in a forest topology
:param flows_interest: list of flows of interests
:type flows_interest: list
:param destination: root of the tree under analysis
:type destination: int
:return: a matrix of xi's
:rtype: np.ndarray
>>> tree = ToyTreeNetwork()
>>> tree_exact = ExactFeedForwardAnalyzer(tree)
>>> tree_exact.exact_xi([0], 2)
array([[0.66666667, 0.66666667, 0.66666667],
[0. , 0.5 , 0.5 ],
[0. , 0. , 0.5 ]])
"""
if self.network.is_forest:
xi = np.zeros((self.network.num_servers, self.network.num_servers))
j = destination
xi[j, j] = self.rstar(j, flows_interest) / (self.network.servers[j].scurve.rate -
self.xi_rate(flows_interest, j, j))
list_suc = self.network.predecessors[j]
while not list_suc == []:
j = list_suc[0]
r1 = self.rstar(j, flows_interest)
r2 = self.network.servers[j].scurve.rate
path_d = self.network.path_dest(j, destination)
r2 -= sum(self.xi_rate(flows_interest, j, l) for l in path_d)
k = destination
while xi[self.network.successors[j], k] > r1 / r2:
xi[j, k] = xi[self.network.successors[j], k]
r2 += self.xi_rate(flows_interest, j, k)
r1 += xi[self.network.successors[j], k] * self.xi_rate(flows_interest, j, k)
path_d = path_d[1:]
k = path_d[0]
for k in path_d:
xi[j, k] = r1 / r2
xi[j, j] = r1 / r2
list_suc = list_suc[1:] + self.network.predecessors[j]
return xi
else:
raise NameError("Network is not a forest, exact analysis impossible")
def latency_term(self, foi, server, xi):
"""
Computes the term of the exact performance involving latencies. xi is the matrix exact_xi precomputed
"""
lat = 0.
for j in range(server+1):
if j in self.network.path_dest(j, server):
lat += (sum(xi[j, l] * self.xi_rate(foi, j, l) for l in self.network.path_dest(j, server))) * \
self.network.servers[j].scurve.latency
if j in self.network.path_dest(j, server):
lat += self.rstar(j, foi) * self.network.servers[j].scurve.latency
return lat
def backlog(self, flow, server):
"""
Computes a worst-case backlog bound of a flow in a server in the network with the Exact method for trees
:param flow: flow under consideration
:type flow: int
:param server: server under consideration
:type server: int
:return: a backlog upper bound for the flow at the server
:rtype: float
>>> tree = ToyTreeNetwork()
>>> tree_exact = ExactFeedForwardAnalyzer(tree)
>>> tree_exact.backlog(0, 2)
23.5
"""
tnet = self.network.trim(server)
ffa = ExactFeedForwardAnalyzer(tnet)
xi = ffa.exact_xi([flow], server)
b = 0
for i in range(tnet.num_flows):
if i == flow:
b += tnet.flows[i].acurve.sigma
else:
if not tnet.flows[i].path == []:
b += xi[tnet.flows[i].path[0], tnet.flows[i].path[-1]] * \
tnet.flows[i].acurve.sigma
b += self.latency_term([flow], server, xi)
return b
def delay(self, flow):
start = self.network.flows[flow].path[0]
end = self.network.flows[flow].path[-1]
tnet = self.network.trim(end)
ffa = ExactFeedForwardAnalyzer(tnet)
xi = ffa.exact_xi([flow], end)
bkl = self.backlog(flow, end)
return (bkl - self.network.flows[flow].acurve.sigma * (1 - xi[start, end])) / self.network.flows[flow].acurve.rho
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,145 | ashishkashinath/NCBounds | refs/heads/master | /two_rings.py | from NCBounds.ArrivalCurve import ArrivalCurve
from NCBounds.Flow import Flow
from NCBounds.ServiceCurve import ServiceCurve
from NCBounds.Server import Server
from NCBounds.Network import Network, Ring, TwoRings
from NCBounds.FeedForwardAnalyzer import FeedForwardAnalyzer, SFAFeedForwardAnalyzer, ExactFeedForwardAnalyzer
from NCBounds.FixPointAnalyzer import FixPointAnalyzer, SFAFixPointAnalyzer, ExactFixPointAnalyzer, \
GroupFixPointAnalyzer, LinearFixPointAnalyzer
import numpy as np
n =5
R = 100
u = 0.3
d = np.inf
T = 0.001
two_ring = TwoRings(n, ArrivalCurve(1, u), Server(ServiceCurve(n, 1)), Server(ServiceCurve(2 * n, 1)))
print("** The small Network description **")
print(two_ring)
print("\n ** Transformation into a forest (noy yet with the correct arrival curves)**")
forest_two_ring = ExactFixPointAnalyzer(two_ring).nk2forest[0]
print(forest_two_ring)
print("\n The matrix of xi computed for flow 0 and server 4")
print(ExactFeedForwardAnalyzer(forest_two_ring).exact_xi([0],2 * (n - 1)))
print("\n \nComputing the equivalent forest network (with correct arrival curves) and performances")
print("\n\t*SFA method: each flow is decomposed into sub-paths of length 1")
sfa = SFAFixPointAnalyzer(two_ring)
print(sfa.ff_equiv)
print(sfa.backlog(0, 2*(n-1)))
print("\n\t*Linear-flows method: the network is decomposed into a tree, and an arrival curve is computed for each sub-path")
exact = ExactFixPointAnalyzer(two_ring)
print(exact.ff_equiv)
print(exact.backlog(0,2*(n-1 )))
print("\n\t*Linear-arc method: the network is decomposed into a tree, and an arrival curve is computed for each arc that has been removed")
group = GroupFixPointAnalyzer(two_ring)
print(group.ff_equiv)
#print(group.ff_equiv_bis)
#print(group.backlog_bis(0, 2*(n-1)))
print("\n\t*Linear method: the network is decomposed into a tree, and an arrival curve is computed for each arc that has been removed and each sub-path of the flows")
linear = LinearFixPointAnalyzer(two_ring)
# #print(linear.ff_equiv)
#print(linear.backlog(0, 2*(n-1)))
print("\n\nComparing the approaches")
k = 2* (n-1)
f = open('./two_ring_delay_3.data', 'w')
#f.write("# u\t SFA\t Exact\t Group \t Comby\n")
two_ring = TwoRings(n, ArrivalCurve(1, u), Server(ServiceCurve(n, T)), Server(ServiceCurve(2 * n, T)))
u = 0.01
lin = 0
while u < 1 and lin < 50000.:
two_ring = TwoRings(n, ArrivalCurve(1, u), Server(ServiceCurve(n, T)), Server(ServiceCurve(2 * n, T)))
f.write("%f\t" % u)
f.write("%f\t" % SFAFixPointAnalyzer(two_ring).delay(0))
f.write("%f\t" % ExactFixPointAnalyzer(two_ring).delay(0))
f.write("%f\t" % GroupFixPointAnalyzer(two_ring).delay(0))
lin = LinearFixPointAnalyzer(two_ring).delay(0)
f.write("%f\n" % lin)
print(u)
u += 0.01
f.close()
# while u < 1 and lin < 50000.:
# two_ring = TwoRings(n, ArrivalCurve(1, u), Server(ServiceCurve(n, T)), Server(ServiceCurve(2 * n, T)))
# f.write("%f\t" % u)
# f.write("%f\t" % SFAFixPointAnalyzer(two_ring).backlog(0, 2*(n-1 )))
# f.write("%f\t" % ExactFixPointAnalyzer(two_ring).backlog(0, 2*(n-1 )))
# f.write("%f\t" % GroupFixPointAnalyzer(two_ring).backlog(0, 2*(n-1 )))
# lin = LinearFixPointAnalyzer(two_ring).backlog(0, 2*(n-1 ))
# f.write("%f\n" % lin)
# print(u)
# u += 0.01
# f.close()
with open('./two_ring_delay_3.data') as f:
lines = f.readlines()
u = [float(line.split()[0]) for line in lines]
sfa = [float(line.split()[1]) for line in lines]
exact = [float(line.split()[2]) for line in lines]
group = [float(line.split()[3]) for line in lines]
combi = [float(line.split()[4]) for line in lines]
f.close()
import matplotlib.pyplot as pl
pl.plot(u,sfa, c='r', label='SFA')
pl.plot(u,exact, c='b', label='Flows')
pl.plot(u,group, c='y', label='Arcs')
pl.plot(u,combi, c='m', label='F+A')
pl.xlabel('Utilization rate')
pl.ylabel('Backlog bound')
pl.legend()
pl.axis([0., 1, 1, 200])
#pl.semilogy()
pl.show()
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,146 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
"""Top-level package for Network Calculus Bounds."""
from .ArrivalCurve import ArrivalCurve, ac_sum, ac_add, ac_sub
from .ServiceCurve import ServiceCurve, convolution, backlog, delay, deconvolution, residual_blind
from .Flow import Flow
from .Server import Server
from .Network import Network, ToyNetwork, Ring, ToyTreeNetwork, TwoRings
from .FeedForwardAnalyzer import FeedForwardAnalyzer, SFAFeedForwardAnalyzer, ExactFeedForwardAnalyzer
from .FixPointAnalyzer import FixPointAnalyzer, SFAFixPointAnalyzer, ExactFixPointAnalyzer, LinearFixPointAnalyzer
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,147 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/Server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
from NCBounds.ServiceCurve import ServiceCurve, deconvolution, residual_blind
from NCBounds.ArrivalCurve import ArrivalCurve, ac_sub, ac_sum
class Server:
"""
Constructor for a server. A server is defined by a service curve and a service policy. By default, the service
policy is blind, and servers with other service policies are defined as subclasses
:param scurve: the service curve of the server
:type scurve: ServiceCurve
>>> scurve = ServiceCurve(5.,4.)
>>> Server(scurve)
<Server: β(t) = 5.00 . (t - 4.00)+>
<BLANKLINE>
"""
def __init__(self, scurve=ServiceCurve()):
self.scurve = scurve
def __str__(self) -> str:
return "β(t) = %s" % self.scurve
def __repr__(self) -> str:
return "<Server: %s>\n" % self.__str__()
def list_residual_output(self, list_ac: list) -> tuple:
"""
Computes the residual service curve and arrival curve of the departure process for each flow
of the server is crossed by flows described by their arrival curves in list_ac
:param list_ac: list of arrival curves
:type list_ac: list[ArrivalCurve]
:return: the list of residual service curves and the list of output arrival curves for each departure
process of a flow
:rtype: (list(ServiceCurve), list(ArrivalCurve)
>>> scurve = ServiceCurve(5., 4.)
>>> server = Server(scurve)
>>> list_ac = [ArrivalCurve(2., 1.), ArrivalCurve(3., 2.)]
>>> server.list_residual_output(list_ac)
([<ServiceCurve: 3.00 . (t - 7.67)+>, <ServiceCurve: 4.00 . (t - 5.50)+>], [<ArrivalCurve: 9.67 + 1.00 t>, \
<ArrivalCurve: 14.00 + 2.00 t>])
"""
list_res = []
list_outac = []
ac = ac_sum(list_ac)
for i in range(len(list_ac)):
ac_cross = ac_sub(ac, list_ac[i])
sc_res = residual_blind(ac_cross, self.scurve)
list_res += [sc_res]
list_outac += [deconvolution(list_ac[i], sc_res)]
return list_res, list_outac
# class FIFOServer(Server):
# def list_residual_output(self, list_ac):
# """
# Computes the residual service curves under the FIFO policy for every flow entering the server
# :param list_ac: list of arrival curves of the flows crossing the server
# :type list_ac: ArrivalCurve list
# :return: list of residual service curves for each flow and the list of output arrival curves
# :rtype: ServiceCurve list * ArrivalCurve list
# """
# list_res = []
# list_outac = []
# # if self.policy == BLIND:
# ac = ac_sum(list_ac)
# # print("sum ar", ac)
# for i in range(len(list_ac)):
# ac_cross = ac_sub(ac, list_ac[i])
# # print("ar cross", ac_cross)
# sc_res = residual_blind(ac_cross, self.scurve)
# list_res += [sc_res]
# list_outac += [deconvolution(list_ac[i], sc_res)]
# # print("********", list_res[i], list_outac[i])
# return list_res, list_outac
#
#
# class PriorityServer(Server):
# def __init__(self, scurve=ServiceCurve(), priority=list()):
# """
#
# :param scurve: service curve of the server
# :type scurve: ServiceCurve
# :param priority: priority order on the flows
# :type priority: int list
# """
# self.scurve = scurve
# self.priority = priority
#
# def list_residual_output(self, list_ac):
# """
# Computes the residual service curves under the FIFO policy for every flow entering the server
# :param list_ac: list of arrival curves of the flows crossing the server
# :type list_ac: ArrivalCurve list
# :return: list of residual service curves for each flow and the list of output arrival curves
# :rtype: ServiceCurve list * ArrivalCurve list
# """
# list_res = []
# list_outac = []
# # if self.policy == BLIND:
# ac = ac_sum(list_ac)
# # print("sum ar", ac)
# for i in range(len(list_ac)):
# ac_cross = ac_sub(ac, list_ac[i])
# # print("ar cross", ac_cross)
# sc_res = residual_blind(ac_cross, self.scurve)
# list_res += [sc_res]
# list_outac += [deconvolution(list_ac[i], sc_res)]
# # print("********", list_res[i], list_outac[i])
# return list_res, list_outac
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,148 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/ServiceCurve.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
from NCBounds.ArrivalCurve import ArrivalCurve
from numpy import Inf
class ServiceCurve:
"""
Constructor for rate-latency service curve. in the following, we denote :math:`\\beta: t \\mapsto R(t-T)_+`
such a curve. An argument of a function name sc1 will be denoted :math:`\\beta_1: t \\mapsto R_1(t-T_1)_+` and
similarly for all indices.
:param rate: :math:`R`
:param latency: :math:`T`
>>> service_curve = ServiceCurve(rate =5., latency=3.)
"""
def __init__(self, rate: float = 0, latency: float = 0):
self.rate = rate
self.latency = latency
def __repr__(self):
return '<ServiceCurve: %s>' % self
def __str__(self):
return "%.2f . (t - %.2f)+" % (self.rate, self.latency)
def convolution(sc1: ServiceCurve, sc2: ServiceCurve) -> ServiceCurve:
r"""
Computes the (min,plus)-convolution of two rate-latency service curves:
.. math:: \beta\gets\beta_1 * \beta_2
and :math:`\beta: t \mapsto R(t-T)_+` with
* :math:`R = \min (R_1, R_2)`;
* :math:`T = T_1 + T_2`;
:param sc1: first sevice curve.
:type sc1: ServiceCurve
:param sc2: second service curve
:type sc2: ServiceCurve
:return: The convolution of the two service curves _sc1_ * _sc2_.
:rtype: ServiceCurve
>>> sc1 = ServiceCurve(5, 3)
>>> sc2 = ServiceCurve(2, 2)
>>> convolution(sc1, sc2)
<ServiceCurve: 2.00 . (t - 5.00)+>
"""
return ServiceCurve(rate=min(sc1.rate, sc2.rate), latency=sc1.latency + sc2.latency)
def deconvolution(ac: ArrivalCurve, sc: ServiceCurve) -> ArrivalCurve:
r"""
Computes the (min,plus)-convolution of an arrival curve and of a service curve:
.. math:: \alpha'\gets\beta \oslash \alpha
and :math:`\alpha': t \mapsto \sigma' + \rho' t` with
* :math:`\sigma' = \sigma + \rho T`;
* :math:`\rho' = \rho`;
if :math:`R \geq \rho` and :math:`\alpha': t \mapsto \infty` otherwise.
:param ac: Arrival curve of the flow.
:type ac: ArrivalCurve
:param sc: service curve
:type sc: ServiceCurve of the server
:return: The deconvolution of _ac_ by _sc_, that is an arrival curve for the output flow
:rtype: ArrivalCurve
>>> ac = ArrivalCurve(2, 2)
>>> sc = ServiceCurve(5, 3)
>>> deconvolution(ac, sc)
<ArrivalCurve: 8.00 + 2.00 t>
>>> ac = ArrivalCurve(5, 3)
>>> sc = ServiceCurve(2, 2)
>>> deconvolution(ac, sc)
<ArrivalCurve: inf + inf t>
"""
if ac.rho > sc.rate:
return ArrivalCurve(sigma=Inf, rho=Inf)
else:
return ArrivalCurve(sigma=ac.sigma + ac.rho * sc.latency, rho=ac.rho)
def delay(ac: ArrivalCurve, sc: ServiceCurve) -> float:
r"""
Computes the maxumum delay bound for a flow with arrival curce ac traversing a server with service curve sc :
.. math:: D \gets T + \frac{\sigma}{R}
if :math:`R \geq \rho` and :math:`D = +\infty` otherwise.
:param ac: Arrival curve.
:type ac: ArrivalCurve
:param sc: service curve
:type sc: ServiceCurve
:return: A maximum delay bound of the flow.
:rtype: float
>>> ac = ArrivalCurve(2, 2)
>>> sc = ServiceCurve(5, 3)
>>> delay(ac, sc)
3.4
>>> ac = ArrivalCurve(5, 3)
>>> sc = ServiceCurve(2, 2)
>>> delay(ac, sc)
inf
"""
if ac.rho > sc.rate:
return Inf
else:
return (ac.sigma + sc.rate * sc.latency) / sc.rate
def backlog(ac: ArrivalCurve, sc: ServiceCurve) -> float:
r"""
Computes the maxumum backlog bound for a flow with arrival curce ac traversing a server with service curve sc:
.. math:: B \gets \sigma + \rho T
if :math:`R \geq \rho` and :math:`B = +\infty` otherwise.
:param ac: Arrival curve.
:type ac: ArrivalCurve
:param sc: service curve
:type sc: ServiceCurve
:return: A maximum backlog bound of the flow.
:rtype: float
>>> ac = ArrivalCurve(2., 2.)
>>> sc = ServiceCurve(5., 3.)
>>> backlog(ac, sc)
8.0
>>> ac = ArrivalCurve(5, 3)
>>> sc = ServiceCurve(2, 2)
>>> backlog(ac, sc)
inf
"""
if ac.rho > sc.rate:
return Inf
else:
return ac.sigma + ac.rho * sc.latency
def residual_blind(ac: ArrivalCurve, sc: ServiceCurve) -> ServiceCurve:
r"""
Computes an arrival curve for the output flow that corresponds to a flow with arrival curce ac traversing a
server with service curve sc.
:math:`\beta_r \gets = R_r(t-T_r)_+`
where
* :math:`R_r = R-\rho` and
* :math:`T_r = \frac{\sigma + RT}{R-\rho}`
if :math:`R \geq \rho` and :math:`B = +\infty` otherwise.
:param ac: Arrival curve.
:type ac: ArrivalCurve
:param sc: service curve
:type sc: ServiceCurve
:return: the residual service curve.
:rtype: ServiceCurve
>>> ac = ArrivalCurve(2., 2.)
>>> sc = ServiceCurve(5., 3.)
>>> residual_blind(ac, sc)
<ServiceCurve: 3.00 . (t - 5.67)+>
>>> ac = ArrivalCurve(5, 3)
>>> sc = ServiceCurve(2, 2)
>>> residual_blind(ac, sc)
<ServiceCurve: 0.00 . (t - 0.00)+>
"""
if ac.rho >= sc.rate:
return ServiceCurve(rate=0., latency=0.)
else:
return ServiceCurve(
rate=sc.rate - ac.rho,
latency=(ac.sigma + sc.rate * sc.latency) / (sc.rate - ac.rho)
)
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,149 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/ArrivalCurve.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
from numpy import Inf
from typing import List
class ArrivalCurve:
"""
The ArrivalCurve class encodes token-bucket arrival curves.
:math:`\\alpha: t \\mapsto \\sigma + \\rho t`.
:param sigma: burst of the arrival curve
:param rho: long-term arrival rate
>>> arrival_curve = ArrivalCurve(sigma =5., rho=3.)
"""
def __init__(self, sigma: float = Inf, rho: float = Inf):
self.sigma = sigma
self.rho = rho
def __repr__(self):
return '<ArrivalCurve: %s>' % self
def __str__(self) -> str:
return "%.2f + %.2f t" % (self.sigma, self.rho)
def ac_add(ac1: ArrivalCurve, ac2: ArrivalCurve) -> ArrivalCurve:
r"""
Makes the sum of two arrival curves:
.. math:: \alpha\gets\alpha_1 + \alpha_2
:param ac1: first arrival curve
:type ac1: ArrivalCurve
:param ac2: Second arrival curve
:type ac2: ArrivalCurve
:return: The sum of the two arrival curves
:rtype: ArrivalCurve
>>> ac1 = ArrivalCurve(5, 3)
>>> ac2 = ArrivalCurve(2, 2)
>>> ac_add(ac1, ac2)
<ArrivalCurve: 7.00 + 5.00 t>
"""
return ArrivalCurve(sigma=ac1.sigma + ac2.sigma, rho=ac1.rho + ac2.rho)
def ac_sum(list_ac: List[ArrivalCurve]) -> ArrivalCurve:
r"""
Makes the sum of all the arrival curves in :param list_ac:
.. math:: \alpha \gets \sum_{i=1}^n \alpha_i
:param list_ac: a first arrival curve
:type list_ac: ArrivalCurve list
:return: The sum of the arrival curves in :param list_ac:
:rtype: ArrivalCurve
>>> ac1 = ArrivalCurve(5, 3)
>>> ac2 = ArrivalCurve(2, 2)
>>> ac_sum([ac1, ac2])
<ArrivalCurve: 7.00 + 5.00 t>
"""
return ArrivalCurve(sigma=sum(x.sigma for x in list_ac), rho=sum(x.rho for x in list_ac))
def ac_sub(ac1: ArrivalCurve, ac2: ArrivalCurve) -> ArrivalCurve:
r"""
Makes the sum of two arrival curves:
_Warning:_ should be used for removing a flow from the aggregation only
.. math:: \alpha\gets\alpha_1 - \alpha_2
:param ac1: first arrival curve
:type ac1: ArrivalCurve
:param ac2: Second arrival curve
:type ac2: ArrivalCurve
:return: The difference between the two arrival curves
:rtype: ArrivalCurve
>>> ac1 = ArrivalCurve(5, 3)
>>> ac2 = ArrivalCurve(2, 2)
>>> ac_sub(ac1, ac2)
<ArrivalCurve: 3.00 + 1.00 t>
"""
if ac2.sigma == Inf or ac2.rho == Inf:
return ArrivalCurve(Inf, Inf)
else:
return ArrivalCurve(sigma=ac1.sigma - ac2.sigma, rho=ac1.rho - ac2.rho)
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,150 | ashishkashinath/NCBounds | refs/heads/master | /small_network_example.py | from NCBounds.ArrivalCurve import ArrivalCurve
from NCBounds.Flow import Flow
from NCBounds.ServiceCurve import ServiceCurve
from NCBounds.Server import Server
from NCBounds.Network import Network, Ring, TwoRings
from NCBounds.FeedForwardAnalyzer import FeedForwardAnalyzer, SFAFeedForwardAnalyzer, ExactFeedForwardAnalyzer
from NCBounds.FixPointAnalyzer import FixPointAnalyzer, SFAFixPointAnalyzer, ExactFixPointAnalyzer, \
GroupFixPointAnalyzer, LinearFixPointAnalyzer
class SmallNetwork(Network):
def __init__(self, u):
super(SmallNetwork, self).__init__([Flow(ArrivalCurve(1, 25 * u), [2, 3, 1]),
Flow(ArrivalCurve(1, 25 * u), [3, 1, 2]),
Flow(ArrivalCurve(1, 25 * u), [1, 0, 2]),
Flow(ArrivalCurve(1, 25 * u), [1, 2, 3])],
[Server(ServiceCurve(100, 1)), Server(ServiceCurve(100, 1)),
Server(ServiceCurve(100, 1)), Server(ServiceCurve(100, 1))])
snk = SmallNetwork(0.5)
print("** The small Network description **")
print(snk)
print("\n ** Transformation into a forest (noy yet with the correct arrival curves)**")
forest_snk = ExactFixPointAnalyzer(snk).nk2forest[0]
print(forest_snk)
print("\n The matrix of xi computed for flow 6 and server 3")
print(ExactFeedForwardAnalyzer(forest_snk).exact_xi([6],3))
print("\n \nComputing the equivalent forest network (with correct arrival curves) and performances")
print("\n\t*SFA method: each flow is decomposed into sub-paths of length 1")
sfa = SFAFixPointAnalyzer(snk)
print(sfa.ff_equiv)
print(sfa.backlog(3, 3))
print("\n\t*Linear-flows method: the network is decomposed into a tree, and an arrival curve is computed for each sub-path")
exact = ExactFixPointAnalyzer(snk)
print(exact.ff_equiv)
print(exact.backlog(3, 3))
print("\n\t*Linear-arc method: the network is decomposed into a tree, and an arrival curve is computed for each arc that has been removed")
group = GroupFixPointAnalyzer(snk)
print(group.ff_equiv)
print(group.backlog(3, 3))
print("\n\t*Linear method: the network is decomposed into a tree, and an arrival curve is computed for each arc that has been removed and each sub-path of the flows")
linear = LinearFixPointAnalyzer(snk)
#print(linear.ff_equiv)
print(linear.backlog(3, 3))
print("\n\nComparing the approaches")
f = open('./small_network.data', 'w')
#f.write("# u\t SFA\t Exact\t Group \t Comby\n")
u=0.01
while u < 1:
snk = SmallNetwork(u)
f.write("%f\t" % u)
f.write("%f\t" % SFAFixPointAnalyzer(snk).backlog(3, 3))
f.write("%f\t" % ExactFixPointAnalyzer(snk).backlog(3, 3))
f.write("%f\t" % GroupFixPointAnalyzer(snk).backlog_bis(3, 3))
f.write("%f\n" % LinearFixPointAnalyzer(snk).backlog(3, 3))
u += 0.01
f.close()
with open('./small_network.data') as f:
lines = f.readlines()
u = [float(line.split()[0]) for line in lines]
sfa = [float(line.split()[1]) for line in lines]
exact = [float(line.split()[2]) for line in lines]
group = [float(line.split()[3]) for line in lines]
combi = [float(line.split()[4]) for line in lines]
f.close()
import matplotlib.pyplot as pl
pl.plot(u,sfa, c='r', label='SFA')
pl.plot(u,exact, c='b', label='Flows')
pl.plot(u,group, c='y', label='Arcs')
pl.plot(u,combi, c='m', label='F+A')
pl.xlabel('Utilization rate')
pl.ylabel('Backlog bound')
pl.legend()
pl.axis([0., 1, 0, 2000])
pl.show()
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,151 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/Network.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
import numpy as np
from NCBounds.Flow import *
from NCBounds.Server import *
from NCBounds.ArrivalCurve import ArrivalCurve
from NCBounds.Server import Server
def list_to_str(l: list) -> str:
return "\n".join([" %4i:%s" % (i, x) for (i, x) in enumerate(l)])
def print_list(l: list):
print(list_to_str(l))
def dfs(u, n, a, state, queue, sort) -> tuple:
"""
Depth-first-search implementation of a graph without cycles
Args:
u: source node
n: sizeof the networks
a: adjacency matric of the graph
state: state of the nodes ('white'/'gray'/'black')
queue: set of nodes queued for analysis
sort: list of nodes in the reversed order of end of discovery (when they become 'black')
Returns: the new state after exporation from u, new queue, update order
"""
state[u] = 1
queue = [u] + queue
while not queue == []:
for v in [x for x in range(n) if a[u][x] == 1]:
if state[v] == 0:
queue = [v] + queue
state[v] = 1
(state, queue, sort) = dfs(v, n, a, state, queue, sort)
elif state[v] == 1:
raise NameError("Network has cycles: feed-forward analysis impossible")
sort = [u] + sort
state[u] = 2
queue = queue[1:]
return state, queue, sort
def topological_sort(adjacency_matrix) -> list:
"""
Topological sort of a graph given by its adjacency matrix
Args:
adjacency_matrix: adjacency matric of the graph
Returns: the topological order of the nodes, and an error if the graph has cycles.
"""
n, m = adjacency_matrix.shape
if not (n == m):
raise NameError("Adjacency matrix is not a square matrix")
else:
sort = []
state = np.zeros(n, int)
u = np.argmin(state)
while state[u] == 0:
(state, queue, sort) = dfs(u, n, adjacency_matrix, state, [], sort)
u = np.argmin(state)
return sort
class Network:
"""
Constructor for a network described by a list of flows and a list of servers
:param flows: list of flows circulating in the network
:type flows: List[Flow]
:param servers: list of servers of the network
:type servers: List[Server]
>>> flows = [Flow(ArrivalCurve(2, 1), [0,1 ]), Flow(ArrivalCurve(3, 2), [1, 0])]
>>> servers = [Server(ServiceCurve(5, 1)), Server(ServiceCurve(6, 2))]
>>> network = Network(flows, servers)
>>> network
<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0, 1]
1:α(t) = 3.00 + 2.00 t; π = [1, 0]
Servers:
0:β(t) = 5.00 . (t - 1.00)+
1:β(t) = 6.00 . (t - 2.00)+>
>>> network.num_flows
2
>>> network.num_servers
2
>>> network.size
4
>>> toy = ToyNetwork()
>>> toy
<Network:
Flows:
0:α(t) = 1.00 + 2.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 3.00 t; π = [1, 2]
2:α(t) = 1.00 + 1.00 t; π = [0, 2]
Servers:
0:β(t) = 4.00 . (t - 2.00)+
1:β(t) = 5.00 . (t - 3.00)+
2:β(t) = 7.00 . (t - 1.00)+>
>>> toy.num_flows
3
>>> toy.num_servers
3
>>> toy.size
7
"""
def __init__(self, flows=list(), servers=list()):
self.flows = flows
self.servers = servers
self.num_flows = len(flows)
self.num_servers = len(servers)
self.size = sum(self.flows[i].length for i in range(self.num_flows))
self._flows_in_servers = None
self._adjacency_matrix = None
def __str__(self) -> str:
return "Flows:\n%s\nServers:\n%s" % (list_to_str(self.flows), list_to_str(self.servers))
def __repr__(self) -> str:
return "<Network:\n%s>" % self.__str__()
@property
def index(self) -> np.ndarray:
mat = np.ones((self.num_flows, self.num_servers), dtype=int)
mat = -mat
counter = 0
for i in range(self.num_flows):
for j in range(self.flows[i].length):
mat[i][self.flows[i].path[j]] = counter
counter += 1
return mat
@property
def matrix_topology(self) -> np.ndarray:
"""
Computes :math:`M` the matrix topology. The matrix has :attr:`num_flows` lines and :attr:`num_servers` columns,
and :math:`M_{i,j} = k` if server :math:`j` is the :math:`k`-th server crossed by flow :math:`i`.
:return: a matrix as defined above
:rtype: np.ndarray
>>> toy = ToyNetwork()
>>> toy.matrix_topology
array([[1, 2, 3],
[0, 1, 2],
[1, 0, 2]])
"""
mat = np.zeros((self.num_flows, self.num_servers), int)
for i in range(self.num_flows):
pf = self.flows[i].path
for j in range(len(pf)):
mat[i, pf[j]] = j + 1
return mat
@property
def flows_in_servers(self) -> list:
r"""
Returns the list of the list of the flows crossing each server:
.. math:: FiS[j] = \{i~|~j\in\pi_i\}
:return: the list of the list of the flows crossing each server
:rtype: list
>>> toy = ToyNetwork()
>>> toy.flows_in_servers
[[0, 2], [0, 1], [0, 1, 2]]
"""
if self._flows_in_servers is None:
fis = self.num_servers * [[]]
for i in range(self.num_flows):
for p in self.flows[i].path:
fis[p] = fis[p] + [i]
self._flows_in_servers = fis
return self._flows_in_servers
@property
def residual_rate(self) -> np.ndarray:
fis = self.flows_in_servers
mat = np.zeros((self.num_flows, self.num_servers), int)
for j in range(self.num_servers):
res_rate = self.servers[j].scurve.rate - sum(self.flows[i].acurve.rho for i in fis[j])
for i in fis[j]:
mat[i][j] = res_rate + self.flows[i].acurve.rho
return mat
@property
def adjacency_matrix(self) -> np.ndarray:
r"""
Constructs the adjacency matrix :math:`A` of the network:
.. math:: A[h,\ell] = 1 \text{ if }\exists i \text{ such that } \pi_i = \langle \ldots h, \ell, \ldots \rangle
and 0 otherwise
:return: the adjacency matrix of the network
:rtype: np.ndarray
>>> toy = ToyNetwork()
>>> toy.adjacency_matrix
array([[0, 1, 1],
[0, 0, 1],
[0, 0, 0]])
"""
if self._adjacency_matrix is None:
n = self.num_servers
adj = np.zeros((n, n), int)
for i in range(self.num_flows):
flow = self.flows[i]
for j in range(self.flows[i].length - 1):
adj[flow.path[j], flow.path[j+1]] = 1
self._adjacency_matrix = adj
return self._adjacency_matrix
@property
def successors(self) -> list:
r"""
Constructs the list :math:`Succ` of servers successors of each server:
.. math:: Succ[h] = \{\ell~|~A[h, \ell] = 1\}.
:return: the list of the lists of successors
:rtype: List(list)
>>> toy = ToyNetwork()
>>> toy.successors
[[1, 2], [2], []]
"""
succ = self.num_servers * [[]]
for i in range(self.num_servers):
succ[i] = [j for j in range(self.num_servers) if self.adjacency_matrix[i, j] == 1]
return succ
@property
def predecessors(self) -> list:
r"""
Constructs the list :math:`Pred` of servers predecessors of each server:
.. math:: Pred[\ell] = \{h~|~A[h, \ell] = 1\}.
:return: the list of the lists of predecessors
:rtype: List(list)
>>> toy = ToyNetwork()
>>> toy.predecessors
[[], [0], [0, 1]]
"""
prec = self.num_servers * [[]]
for i in range(self.num_servers):
prec[i] = [j for j in range(self.num_servers) if self.adjacency_matrix[j, i] == 1]
return prec
@property
def is_forest(self) -> bool:
"""
Checks that a feed-forward network has a forest topology, that is if each server has at most one successor
Caveat: if the network has cycle dependencies, then the algorithme can return True
:return: true if the network is a forest
:rtype: bool
>>> toy = ToyNetwork()
>>> toy.is_forest
False
>>> tree = ToyTreeNetwork()
>>> tree.is_forest
True
"""
for i in range(self.num_servers):
if len(self.successors[i]) >= 2:
return False
return True
def path_dest(self, j, dest) -> list:
"""
In a tree (0r a forest), constructs the path from server j to server dest, given in reversed order..
:param j: the server from where the path is computed
:type j: int
:param dest: the destination server
:type dest: int
:return: the list of servers from j to dest (including those servers) in the order [dest,...,j] and
the empty list if there is no path from j to dest.
:rtype: list
>>> toy = ToyTreeNetwork()
>>> toy.path_dest(0,2)
[2, 1, 0]
>>> toy.path_dest(2,1)
[]
"""
if self.is_forest:
p = [j]
k = j
while not (k == dest):
if self.successors[k] == []:
return []
else:
k = self.successors[k][0]
p = [k] + p
return p
else:
raise NameError("not a forest topology, path might be undefined or not unique")
def trim(self, server):
"""
In a forest, remove all servers that have a larger number than server
Caveat: Should be used for ordered forests
:param server: server that will become a root of the forest
:type server: int
:return: the trimmed network
:rtype: Network
>>> tree = ToyTreeNetwork()
>>> tree.trim(1)
<Network:
Flows:
0:α(t) = 1.00 + 2.00 t; π = [0, 1]
1:α(t) = 2.00 + 3.00 t; π = [1]
2:α(t) = 1.00 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 4.00 . (t - 2.00)+
1:β(t) = 8.00 . (t - 3.00)+>
"""
list_servers = self.servers[0:server+1]
list_path = self.num_flows * [[]]
for i in range(self.num_flows):
list_path[i] = [self.flows[i].path[p] for p in range(self.flows[i].length)
if self.flows[i].path[p] <= server]
list_flows = [Flow(self.flows[i].acurve, list_path[i]) for i in range(self.num_flows)]
return Network(list_flows, list_servers)
class Ring(Network):
"""
Constructor for a ring network of size :math:`n` = :attr:`num_flows`: here are :math:`n` flows of length :math:`n`,
starting one at each server. Every server has the same service curve/policy and every flows has the same arrival
curve, and server :math:`i` is the successor of server :math:`i+1`, server :math:`n-1` is the successor of
server :math:`0`.
:param num_servers: number of servers in the ring-network
:type num_servers: int
:param acurve: arrival curve common to every flow
:type acurve: ArrivalCurve
:param server: server description common to every server
:type server: Server
>>> Ring(3, ArrivalCurve(2., 3.), Server(ServiceCurve(3, 4)))
<Network:
Flows:
0:α(t) = 2.00 + 3.00 t; π = [0 1 2]
1:α(t) = 2.00 + 3.00 t; π = [1 2 0]
2:α(t) = 2.00 + 3.00 t; π = [2 0 1]
Servers:
0:β(t) = 3.00 . (t - 4.00)+
1:β(t) = 3.00 . (t - 4.00)+
2:β(t) = 3.00 . (t - 4.00)+>
"""
def __init__(self, num_servers, acurve, server):
super(Ring, self).__init__(
[Flow(acurve, np.concatenate([np.arange(i, num_servers), np.arange(0, i)])) for i in range(num_servers)],
num_servers * [server]
)
class ToyNetwork(Network):
"""
Construction for a toy network used in the tests.
>>> ToyNetwork()
<Network:
Flows:
0:α(t) = 1.00 + 2.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 3.00 t; π = [1, 2]
2:α(t) = 1.00 + 1.00 t; π = [0, 2]
Servers:
0:β(t) = 4.00 . (t - 2.00)+
1:β(t) = 5.00 . (t - 3.00)+
2:β(t) = 7.00 . (t - 1.00)+>
"""
def __init__(self):
super(ToyNetwork, self).__init__([Flow(ArrivalCurve(1, 2), [0, 1, 2]), Flow(ArrivalCurve(2, 3), [1, 2]),
Flow(ArrivalCurve(1, 1), [0, 2])],
[Server(ServiceCurve(4, 2)), Server(ServiceCurve(5, 3)),
Server(ServiceCurve(7, 1))])
class ToyTreeNetwork(Network):
"""
Construction for a toy network used in the tests with a tree topology.
>>> ToyTreeNetwork()
<Network:
Flows:
0:α(t) = 1.00 + 2.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 3.00 t; π = [1, 2]
2:α(t) = 1.00 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 4.00 . (t - 2.00)+
1:β(t) = 8.00 . (t - 3.00)+
2:β(t) = 7.00 . (t - 1.00)+>
"""
def __init__(self):
super(ToyTreeNetwork, self).__init__([Flow(ArrivalCurve(1, 2), [0, 1, 2]), Flow(ArrivalCurve(2, 3), [1, 2]),
Flow(ArrivalCurve(1, 1), [0, 1])],
[Server(ServiceCurve(4, 2)), Server(ServiceCurve(8, 3)),
Server(ServiceCurve(7, 1))])
class TwoRings(Network):
def __init__(self, num_servers_per_ring, acurve, server1, server2):
super(TwoRings, self).__init__([Flow(acurve, np.concatenate([np.arange(i, num_servers_per_ring - 1),
[2 * num_servers_per_ring - 2], np.arange(0, i)]))
for i in range(num_servers_per_ring)] +
[Flow(acurve, np.concatenate([np.arange(num_servers_per_ring - 1 + i,
2 * num_servers_per_ring - 2),
[2 * num_servers_per_ring - 2],
np.arange(num_servers_per_ring - 1,
num_servers_per_ring - 1 + i)]))
for i in range(num_servers_per_ring)],
(2 * num_servers_per_ring - 2) * [server1] + [server2])
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,152 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/Flow.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
from NCBounds.ArrivalCurve import ArrivalCurve
from typing import List, Tuple
class Flow:
"""
Constructor for a flow. A flow is defined by
:param acurve: the arrival curve for the flow
:type acurve: ArrivalCurve
:param path: the path it follows, that is the list of servers it crosses, represented by their number
:type path: List[int]
>>> acurve = ArrivalCurve(4,3)
>>> path = [0,1,3]
>>> Flow(acurve, path)
<Flow: α(t) = 4.00 + 3.00 t; π = [0, 1, 3]>
<BLANKLINE>
>>> Flow(acurve, path).length
3
"""
def __init__(self, acurve=ArrivalCurve(), path=list()):
self.acurve = acurve
self.path = path
self.length = len(path)
def __str__(self) -> str:
return "α(t) = %s; π = %s" % (self.acurve, self.path)
def __repr__(self) -> str:
return "<Flow: %s>\n" % self.__str__()
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,153 | ashishkashinath/NCBounds | refs/heads/master | /uniform_ring.py | import numpy as np
from NCBounds.ArrivalCurve import ArrivalCurve
from NCBounds.Flow import Flow
from NCBounds.ServiceCurve import ServiceCurve
from NCBounds.Server import Server
from NCBounds.Network import Network, Ring, TwoRings
from NCBounds.FeedForwardAnalyzer import FeedForwardAnalyzer, SFAFeedForwardAnalyzer, ExactFeedForwardAnalyzer
from NCBounds.FixPointAnalyzer import FixPointAnalyzer, SFAFixPointAnalyzer, ExactFixPointAnalyzer, \
GroupFixPointAnalyzer, LinearFixPointAnalyzer
def backlogPMOO(n):
"""
Computes the backlog bound of flow 0 at the last server with the PMOC method (based on PMOO tandem approach)
"""
C = np.zeros((n-1,1))
for i in range(n-1):
C[i,0] = (i+1)*T + ((i+n-1)* b + (i+1)*(n-1)*r*T)/(R-(n-1)*r)
a = r/(R-(n-1)*r)
I = np.identity(n-1)
U = np.ones((n-1,n-1))
A = np.linalg.inv(I-a*U)
matT = np.dot(A,C)
return b + r * (n*T + (2*(n-1)*b + n * (n-1) * r * T)/(R-(n-1)*r) + a * np.sum(matT) )
def delayPMOO(n):
"""
Computes the delay bound of flow 0 with the PMOC method (based on PMOO tandem approach)
"""
C = np.zeros((n-1,1))
for i in range(n-1):
C[i,0] = (i+1)*T + ((i+n-1)* b + (i+1)*(n-1)*r*T)/(R-(n-1)*r)
a = r/(R-(n-1)*r)
I = np.identity(n-1)
U = np.ones((n-1,n-1))
A = np.linalg.inv(I-a*U)
matT = np.dot(A,C)
return (n*T + (2*(n-1)*b + n * (n-1) * r * T)/(R-(n-1)*r) + a * np.sum(matT) ) + b / (R - (n - 1) * r)
def backlogLeBoudec(n):
"""
Computes the backlog bound of flow 0 at the last server based on the results in Le Boudec, Thiran 2004.
"""
return(n*r / (R - n*r) * (n * n + n*R*T) + n * (1 + R*T))
n = 10
# r = 10
R = 100
u = 0.01
T = 0.001
b = 1
f = open('./uni_ring_%d.data' % n, 'w')
while u<1:
r = u*R/n
ring = Ring(n, ArrivalCurve(b, r), Server(ServiceCurve(R,T)))
f.write("%f\t" % u)
f.write("%f\t" % SFAFixPointAnalyzer(ring).backlog(0, n-1))
if u< n/(2*(n-1)):
f.write("%f\t" % backlogPMOO(n))
else:
f.write("inf\t")
f.write("%f\t" % ExactFixPointAnalyzer(ring).backlog(0, n-1))
f.write("%f\t" % GroupFixPointAnalyzer(ring).backlog(0, n-1))
f.write("%f\t" % LinearFixPointAnalyzer(ring).backlog(0, n-1))
f.write("%f\n" % backlogLeBoudec(n))
print(R, u)
u += 0.01
f.close()
with open('./uni_ring_%d.data' % n) as f:
lines = f.readlines()
u = [float(line.split()[0]) for line in lines]
sfa = [float(line.split()[1]) for line in lines]
pmoo = [float(line.split()[2]) for line in lines]
exact = [float(line.split()[3]) for line in lines]
arc = [float(line.split()[4]) for line in lines]
linear = [float(line.split()[5]) for line in lines]
leboudec = [float(line.split()[6]) for line in lines]
f.close()
import matplotlib.pyplot as pl
pl.plot(u,sfa, c='r', label='SFA')
pl.plot(u,exact, c='b', label='Flows')
pl.plot(u,pmoo, c='y', label='PMOO')
pl.plot(u,arc, c='m', label='Arcs')
pl.plot(u,linear, c='r', label='F+A')
pl.plot(u,leboudec, c='g', label='Leboudec')
pl.xlabel('Utilization rate')
pl.ylabel('Delay bound')
pl.legend()
pl.axis([0, 1, 0, 100])
#pl.semilogy()
pl.show()
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,154 | ashishkashinath/NCBounds | refs/heads/master | /NCBounds/FixPointAnalyzer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/NCBounds
__author__ = "Anne Bouillard"
__maintainer__ = "Anne Bouillard"
__email__ = "anne.bouillard@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2019, Nokia"
__license__ = "BSD-3"
from NCBounds.FeedForwardAnalyzer import *
from NCBounds.Flow import *
from NCBounds.Network import Network, Ring
from NCBounds.Server import Server
import numpy as np
from cvxopt import matrix, solvers
from scipy.optimize import linprog
solvers.options['show_progress'] = False
def resoud(mat_a, vec_b) -> np.ndarray:
"""
solves the equation :math:` mat_a X = vec_b`
:param mat_a: matrix of the equation
:type mat_a: np.ndarray
:param vec_b: vector of the equation
:type vec_b: np.ndarray
:return: the solution of the linear equation
:rtype: np.ndarray
"""
det = np.linalg.det(mat_a)
if det != 0:
return np.linalg.solve(mat_a, vec_b)
else:
return np.inf * np.ones(len(vec_b))
class FixPointAnalyzer:
"""
Constructor for the fix point analyzer of a network. This network might contain cycles.
This class regroup several subclasses, depending of the type of analyze to perform.
Basically, it uses the Feed Forward analysis to obtain a fix-point equation to be solved. The different methods
correspond to the different ways to obtain the equation.
* SFA: Uses the SFA Feed-forwarw analysis
* Exact, use the exact bounds after decomposing the network into a forest
* Group uses the exact bounds applied to groups of flows along the arcs that have neem removed from the network
* Combi combines the last two methods (using some mathematical programming)
:param network: the network to analyze
:type network: Network
"""
def __init__(self, network):
self.network = network
@property
def fixpoint_matrix(self):
r"""
Compute the fix-point matrix to solve, reprensented by the tuple (mat_a, vec_b), based on the chosen analysis.
:return: the matrix and the vector such that :math:`mat_a \sigma = vec_b`
:rtype: tuple
"""
raise NotImplemented
@property
def ff_equiv(self) -> Network:
r"""
transforms a non feed-forward network into a feed-forward network by splitting the flows and computing the
arrival curve of every splitted flow by the fixpoint method
:return: a feed-forward network "equivalent" to the non feed-forward one.
:rtype: Network
"""
raise NotImplemented
def backlog(self, flow, server):
"""
Computes a backlog bound of a flow at a server based on the chosen analysis.
:param flow: flow for which the backlog is computed
:type flow: int
:param server: server at which the backlog is computed
:type server: int
:return: the backlog of flow and server
:rtype: float
"""
raise NotImplemented
class SFAFixPointAnalyzer(FixPointAnalyzer):
@property
def fixpoint_matrix(self):
r"""
Compute the fix-point matrix to solve with the SFA method, reprensented by the tuple (mat_a, vec_b).
For example, with the blind multiplexing, the :math:`\sigma` after a server crossed by :math:`n` flows is
given by:
.. math:: \sigma'_1 = \sigma_1 + \rho_1\frac{\sum_{i=2}^n \sigma_i + RT}{R-\sum_{i=2}^n rho_i}.
Computing these relations for all server and all flows crossing that server will lead to a system of linear
equation, whise unknown are the :math:`\sigma`'s.
:return: the matrix and the vector such that :math:`mat_a \sigma = vec_b`
:rtype: tuple
>>> toy = SFAFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.fixpoint_matrix
(array([[ 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[-1. , 1. , 0. , 0. , 0. , -0.25, 0. , -0.25, 0. ],
[ 0. , -1. , 1. , -0.25, 0. , 0. , 0. , 0. , -0.25],
[ 0. , 0. , 0. , 1. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , -0.25, 0. , -1. , 1. , 0. , 0. , 0. , -0.25],
[ 0. , 0. , -0.25, 0. , -1. , 1. , -0.25, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. ],
[ 0. , 0. , -0.25, 0. , -0.25, 0. , -1. , 1. , 0. ],
[-0.25, 0. , 0. , 0. , 0. , -0.25, 0. , -1. , 1. ]]), array([2., 6., 6., 2., 6., 6., 2., 6., 6.]))
"""
s = self.network.size
flo = self.network.matrix_topology
res = self.network.residual_rate
index = self.network.index
mat_a = np.zeros((s, s))
vec_b = np.zeros(s)
for i in range(self.network.num_flows):
for j in range(self.network.flows[i].length):
path = self.network.flows[i].path
idx = index[i, path[j]]
if flo[i][path[j]] == 1:
vec_b[idx] = self.network.flows[i].acurve.sigma
mat_a[idx, idx] = 1.
else:
mat_a[idx, idx] = 1.
mat_a[idx, index[i, path[j - 1]]] = -1.
for ic in range(self.network.num_flows):
if not (flo[ic, path[j - 1]] == 0. or ic == i):
mat_a[idx, index[ic, path[j - 1]]] = -self.network.flows[i].acurve.rho / res[i][path[j - 1]]
vec_b[idx] = self.network.flows[i].acurve.rho \
* self.network.servers[path[j - 1]].scurve.rate \
* self.network.servers[path[j - 1]].scurve.latency / res[i][path[j - 1]]
return mat_a, vec_b
@property
def ff_equiv(self) -> Network:
"""
transforms a non feed-forward network into a feed-forward network by splitting the flows and computing the
arrival curve of every splitted flow by the fixpoint method with SFA method
:return: The equivalent network
:rtype: Network
>>> toy = SFAFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.ff_equiv
<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0]
1:α(t) = 24.40 + 1.00 t; π = [1]
2:α(t) = 41.20 + 1.00 t; π = [2]
3:α(t) = 2.00 + 1.00 t; π = [1]
4:α(t) = 24.40 + 1.00 t; π = [2]
5:α(t) = 41.20 + 1.00 t; π = [0]
6:α(t) = 2.00 + 1.00 t; π = [2]
7:α(t) = 24.40 + 1.00 t; π = [0]
8:α(t) = 41.20 + 1.00 t; π = [1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>
"""
tab_sigma = resoud(self.fixpoint_matrix[0], self.fixpoint_matrix[1])
num_split = [self.network.flows[i].length for i in range(self.network.num_flows)]
s = self.network.size
list_flows = []
j = 0
h = 0
flow = self.network.flows[j]
for i in range(s):
if tab_sigma[i] >= 0:
list_flows += [Flow(ArrivalCurve(tab_sigma[i], flow.acurve.rho), [flow.path[h]])]
else:
list_flows += [Flow(ArrivalCurve(np.inf, flow.acurve.rho), [flow.path[h]])]
h += 1
if h == num_split[j]:
h = 0
j += 1
if j < self.network.num_flows:
flow = self.network.flows[j]
return Network(list_flows, self.network.servers)
def _flow_decomp(self, flow, server):
i = 0
f = 0
while i < flow:
f += self.network.flows[i].length
i += 1
i = 0
while not self.network.flows[flow].path[i] == server:
f += 1
i += 1
return f
def backlog(self, flow, server):
"""
Computes a backlog bound of a flow at a server based on the SFA analysis.
:param flow: flow for which the backlog is computed
:type flow: int
:param server: server at which the backlog is computed
:type server: int
:return: the backlog of flow and server
:rtype: float
>>> toy = SFAFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.backlog(0, 2)
53.8
"""
f = self._flow_decomp(flow, server)
return SFAFeedForwardAnalyzer(self.ff_equiv).backlog(f, server)
def delay(self, flow):
"""
Computes a delay bound of a flow at a server based on the SFA analysis.
:param flow: flow for which thedelay is computed
:type flow: int
:return: the delay of flow
:rtype: float
"""
ffnet = SFAFeedForwardAnalyzer(self.ff_equiv)
tab_ac, tab_sc = ffnet.sfa_blind
i = 0
f = 0
while i < flow:
f += self.network.flows[i].length
i += 1
sc = ServiceCurve(np.inf, 0)
for i in range(len(self.network.flows[flow].path)):
sc = convolution(sc,
tab_sc[self.network.flows[flow].path[i] +
(f + i) * self.network.num_servers])
#server = self.network.flows[flow].path[-1]
#f = self._flow_decomp(flow, server)
#return SFAFeedForwardAnalyzer(self.ff_equiv).delay(f)
return delay(self.network.flows[flow].acurve, sc)
class ExactFixPointAnalyzer(FixPointAnalyzer):
@property
def succ_forest(self):
sf = np.zeros(self.network.num_servers, int)
for i in range(self.network.num_servers):
j = self.network.num_servers
for k in self.network.successors[i]:
if k > i:
j = min(k, j)
sf[i] = j
return sf
@property
def nk2forest(self) -> tuple:
"""
Transforms the network into a forest by keeping one successor in the acyclic transformation: the one with
the smallest number higher than this server
:return: network with split flows so that the topology is a forest and the list of the number of the flow
before first split for each flow
:rtype: tuple
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.nk2forest
(<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 1.00 t; π = [1, 2]
2:α(t) = 2.00 + 1.00 t; π = [0]
3:α(t) = 2.00 + 1.00 t; π = [2]
4:α(t) = 2.00 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>, [0, 1, 3])
"""
flow_list = []
list_prems = []
pre = 0
for flow in self.network.flows:
i = 0
list_prems += [pre]
p = [flow.path[i]]
while i < len(flow.path) - 1:
if flow.path[i + 1] == self.succ_forest[flow.path[i]]:
p += [flow.path[i + 1]]
else:
pre += 1
flow_list += [Flow(flow.acurve, p)]
p = [flow.path[i + 1]]
i += 1
pre += 1
flow_list += [Flow(flow.acurve, p)]
return Network(flow_list, self.network.servers), list_prems
@property
def fixpoint_matrix(self) -> tuple:
r"""
Compute the fix-point matrix to solve with the Rxact method, represented by the tuple (mat_a, vec_b).
This make use of the matrix computing the :math:`xi` coefficients. The unknown are the :math:`\sigma` of the
flows in the network transformed into a forest.
:return: the matrix and the vector such that :math:`mat_a \sigma = vec_b`
:rtype: tuple
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.fixpoint_matrix
(array([[ 1. , 0. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. , 0. ],
[-0.25, -1. , 0.9 , -0.25, -0.25],
[ 0. , 0. , 0. , 1. , 0. ],
[-0.25, -0.25, -0.07, -1. , 0.9 ]]), array([ 2. , 2. , 14.4 , 2. , 10.08]))
"""
forest, list_prems = self.nk2forest
s = len(forest.flows)
mat_a = np.zeros((s, s))
vec_b = np.zeros(s)
list_prems += [forest.num_flows]
i = 0
for h in range(s):
if h == list_prems[i]:
vec_b[h] = forest.flows[i].acurve.sigma
mat_a[h, h] = 1.
i += 1
else:
ftrim = forest.trim(forest.flows[h-1].path[-1])
# print (ftrim)
ffa = ExactFeedForwardAnalyzer(ftrim)
xi = ffa.exact_xi([h-1], forest.flows[h-1].path[-1])
# print(xi)
mat_a[h, h] = 1.
mat_a[h, h - 1] = -1
for h1 in range(s):
if not h - 1 == h1 and not ftrim.flows[h1].path == []:
mat_a[h, h1] -= xi[ftrim.flows[h1].path[0], ftrim.flows[h1].path[-1]]
vec_b[h] = ffa.latency_term([h-1], forest.flows[h-1].path[-1], xi)
return mat_a, vec_b
@property
def ff_equiv(self) -> Network:
"""
transforms a non feed-forward network into a feed-forward network by splitting the flows and computing the
arrival curve of every splitted flow by the fixpoint method with exact method
:return: The equivalent network
:rtype: Network
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.ff_equiv
<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 1.00 t; π = [1, 2]
2:α(t) = 23.89 + 1.00 t; π = [0]
3:α(t) = 2.00 + 1.00 t; π = [2]
4:α(t) = 16.39 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>
"""
tab_sigma = resoud(self.fixpoint_matrix[0], self.fixpoint_matrix[1])
forest = self.nk2forest[0]
s = forest.num_flows
list_flows = []
for i in range(s):
flow = forest.flows[i]
if tab_sigma[i] >= 0:
list_flows += [Flow(ArrivalCurve(tab_sigma[i], flow.acurve.rho), flow.path)]
else:
list_flows += [Flow(ArrivalCurve(np.inf, flow.acurve.rho), flow.path)]
return Network(list_flows, self.network.servers)
def _flow_decomp(self, flow, server):
ff_net, list_prems = self.nk2forest
f = list_prems[flow]
if flow == self.network.num_flows - 1:
b = ff_net.num_flows
else:
b = list_prems[flow + 1]
while f < b and server not in ff_net.flows[f].path:
f += 1
if f == b:
raise NameError("flow does not cross the server")
return f
def backlog(self, flow, server):
"""
Computes a backlog bound of a flow at a server based on the exact analysis.
:param flow: flow for which the backlog is computed
:type flow: int
:param server: server at which the backlog is computed
:type server: int
:return: the backlog of flow and server
:rtype: float
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.backlog(0, 2)
31.069400630914828
"""
f = self._flow_decomp(flow, server)
# print(f)
return ExactFeedForwardAnalyzer(self.ff_equiv).backlog(f, server)
def delay(self, flow):
"""
Computes a delay bound of a flow based on the exact analysis.
WARNING: only for flows not cut into several subflows -> TODO
:param flow: flow for which the delay is computed
:type flow: int
:return: the delay of flow
:rtype: float
"""
server = self.network.flows[flow].path[-1]
f = self._flow_decomp(flow, server)
# print(f)
# print(ExactFeedForwardAnalyzer(self.ff_equiv).network)
# print(f)
return ExactFeedForwardAnalyzer(self.ff_equiv).delay(f)
class GroupFixPointAnalyzer(ExactFixPointAnalyzer):
@property
def _removed_edges(self) -> list:
"""
Compute the set of edges that are removed when transforming the network into a forest.
:return: the list of removed edges
:rtype: list
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy._removed_edges
[(2, 0)]
"""
lre = set([])
for i in range(self.network.num_flows):
for h in range(self.network.flows[i].length - 1):
if not self.network.flows[i].path[h + 1] == self.succ_forest[self.network.flows[i].path[h]]:
lre.add((self.network.flows[i].path[h], self.network.flows[i].path[h + 1]))
return list(lre)
@property
def foi_group(self):
"""
For each removed edge, constructs the set of flows of interest for the analysis, that is the set of flows that
were going through that edge. These will be the set of flows of interest for gurther analysis (we want the
global worst-case backlog of these flows
:return: the list of flows of interests for each removed edge
:rtype: list
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.foi_group
[[1, 3]]
"""
forest, list_prems = self.nk2forest
list_prems += [forest.num_flows]
list_per_edge = len(self._removed_edges) * [[]]
for f in range(len(self._removed_edges)):
(i, j) = self._removed_edges[f]
s = 1
for h in range(forest.num_flows - 1):
if h + 1 == list_prems[s]:
s += 1
elif (i, j) == (forest.flows[h].path[-1], forest.flows[h + 1].path[0]):
list_per_edge[f] = list_per_edge[f] + [h]
return list_per_edge
@property
def fixpoint_matrix(self):
r"""
Compute the fix-point matrix to solve with the Exact method, represented by the tuple (mat_a, vec_b).
This make use of the matrix computing the :math:`xi` coefficients. The unknown are the :math:`\sigma` of the
grups of flows, per removed edge in the network transformed into a forest.
:return: the matrix and the vector such that :math:`mat_a \sigma = vec_b`
:rtype: tuple
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.fixpoint_matrix
(array([[0.72]]), array([18.]))
"""
forest, list_prems = self.nk2forest
redges = self._removed_edges
rlist = self.foi_group
#print(rlist)
#print(list_prems)
s = len(redges)
mat_a = np.zeros((s, s))
vec_b = np.zeros(s)
for h in range(s):
tforest = forest.trim(redges[h][0])
ffa = ExactFeedForwardAnalyzer(tforest)
xi = ffa.exact_xi(rlist[h], redges[h][0])
# print(xi)
mat_a[h, h] = 1
for e in range(s):
mat_a[h, e] -= max([0] + [xi[tforest.flows[f + 1].path[0],
tforest.flows[f + 1].path[-1]]
for f in rlist[e]])
vec_b[h] = sum([xi[tforest.flows[f].path[0], tforest.flows[f].path[-1]]
* tforest.flows[f].acurve.sigma
for f in list_prems if not tforest.flows[f].path == []
and f not in rlist[h]])
vec_b[h] += sum([tforest.flows[f].acurve.sigma
for f in list_prems if not tforest.flows[f].path == []
and f in rlist[h]])
vec_b += ffa.latency_term(rlist[h], redges[h][0], xi)
#print(mat_a, vec_b)
return mat_a, vec_b
@property
def ff_equiv(self) -> Network:
"""
transforms a non feed-forward network into a feed-forward network by splitting the flows and computing the
arrival curve of every splitted flow by the fixpoint method with exact method and grouping flows.
:return: The equivalent network
:rtype: Network
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.ff_equiv
<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 1.00 t; π = [1, 2]
2:α(t) = 30.53 + 1.00 t; π = [0]
3:α(t) = 2.00 + 1.00 t; π = [2]
4:α(t) = 30.53 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>
"""
tab_sigma = resoud(self.fixpoint_matrix[0], self.fixpoint_matrix[1])
forest, list_prems = self.nk2forest
s = forest.num_flows
r = len(self._removed_edges)
list_sigma = np.zeros(s)
for i in range(self.network.num_flows):
list_sigma[list_prems[i]] = self.network.flows[i].acurve.sigma
for i in range(r):
for f in self.foi_group[i]:
if tab_sigma[i] >= 0:
list_sigma[f + 1] = tab_sigma[i]
else:
list_sigma[f + 1] = np.inf
list_flows = []
for i in range(s):
flow = forest.flows[i]
list_flows += [Flow(ArrivalCurve(list_sigma[i], flow.acurve.rho), flow.path)]
return Network(list_flows, self.network.servers)
def backlog(self, flow, server):
"""
Computes a backlog bound of a flow at a server based on the exact analysis.
:param flow: flow for which the backlog is computed
:type flow: int
:param server: server at which the backlog is computed
:type server: int
:return: the backlog of flow and server
:rtype: float
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.backlog(0, 2)
33.5
"""
f = self._flow_decomp(flow, server)
return ExactFeedForwardAnalyzer(self.ff_equiv).backlog(f, server)
def delay(self, flow):
"""
Computes a delay bound of a flow based on the exact analysis.
WARNING: only for flows not cut into several subflows -> TODO
:param flow: flow for which the delay is computed
:type flow: int
:return: the delay of flow
:rtype: float
"""
server = self.network.flows[flow].path[-1]
f = self._flow_decomp(flow, server)
#print(ExactFeedForwardAnalyzer(self.ff_equiv).network)
# print(f)
return ExactFeedForwardAnalyzer(self.ff_equiv).delay(f)
class LinearFixPointAnalyzer(GroupFixPointAnalyzer):
@property
def matrix_diag(self):
(forest, prems) = self.nk2forest
num_f = forest.num_flows
removed_edges = self._removed_edges
num_a = len(removed_edges)
mat_up = np.eye(num_f)
mat_bot = np.zeros((num_a, num_f))
for i in range(num_a):
for j in self.foi_group[i]:
mat_bot[i, j+1] = 1
return np.block([[mat_up], [mat_bot]])
@property
def matrix_end(self):
(forest, prems) = self.nk2forest
num_f = forest.num_flows
removed_edges = self._removed_edges
num_a = len(removed_edges)
return -1 * np.eye(num_a + num_f)
@property
def fixpoint_matrix_flows(self) -> tuple:
forest, list_prems = self.nk2forest
s = len(forest.flows)
mat_a = np.zeros((s, s))
vec_b = np.zeros(s)
list_prems += [forest.num_flows]
i = 0
for h in range(s):
if h == list_prems[i]:
vec_b[h] = forest.flows[i].acurve.sigma
i += 1
else:
ftrim = forest.trim(forest.flows[h-1].path[-1])
ffa = ExactFeedForwardAnalyzer(ftrim)
xi = ffa.exact_xi([h-1], forest.flows[h-1].path[-1])
mat_a[h, h - 1] = -1
for h1 in range(s):
if not h - 1 == h1 and not ftrim.flows[h1].path == []:
mat_a[h, h1] -= xi[ftrim.flows[h1].path[0], ftrim.flows[h1].path[-1]]
vec_b[h] = ffa.latency_term([h-1], forest.flows[h-1].path[-1], xi)
return mat_a, vec_b
@property
def fixpoint_matrix_arcs(self):
forest, list_prems = self.nk2forest
num_f = forest.num_flows
redges = self._removed_edges
rlist = self.foi_group
num_a = len(redges)
mat_a = np.zeros((num_a, num_f))
vec_b = np.zeros(num_a)
for h in range(num_a):
tforest = forest.trim(redges[h][0])
ffa = ExactFeedForwardAnalyzer(tforest)
xi = ffa.exact_xi(rlist[h], redges[h][0])
for e in range(num_f):
if e in rlist[h]:
mat_a[h, e] = -1
elif not tforest.flows[h].path == []:
mat_a[h, e] = -xi[tforest.flows[e].path[0], tforest.flows[e].path[-1]]
vec_b[h] = ffa.latency_term(rlist[h], redges[h][0], xi)
return mat_a, vec_b
@property
def matrix_f_and_a(self):
(forest, prems) = self.nk2forest
num_f = forest.num_flows
removed_edges = self._removed_edges
num_a = len(removed_edges)
return np.block([[self.fixpoint_matrix_flows[0]], [self.fixpoint_matrix_arcs[0]]]), \
np.block([[self.fixpoint_matrix_flows[1], self.fixpoint_matrix_arcs[1]]])
def the_big_matrix_and_vector(self):
mat_a, vec_b = self.matrix_f_and_a
matrix_diag = self.matrix_diag
matrix_end = self.matrix_end
n1, n2 = np.shape(matrix_diag)
mat0 = np.zeros((n1 + 1, n2))
end_mat = np.eye(n1)
big_a = np.block([[np.block([[mat_a[0]],[matrix_diag]]),
np.block([[mat0] * (n1 - 1)]),
np.block([[end_mat[0]], [matrix_end]])]])
big_b = np.zeros(n1 + n1 * (n1 + 1))
big_b[n1] = vec_b[0,0]
for i in range(n1 - 2):
big_b[n1 + (i + 1) * (n1+1)] = vec_b[0,i + 1]
a_next = np.block([np.block([[mat0] * (i+1)]),
np.block([[mat_a[i+1]],[matrix_diag]]),
np.block([[mat0] * (n1 - i - 2)]),
np.block([[end_mat[i+1]], [matrix_end]])])
big_a = np.concatenate((big_a, a_next), axis=0)
a_next = np.block([[np.block([[mat0] * (n1 - 1)]),
np.block([[mat_a[n1 - 1]],[matrix_diag]]),
np.block([[end_mat[n1 - 1]], [matrix_end]])]])
big_a = np.concatenate((big_a, a_next), axis = 0)
big_b[n1 + (n1 - 1) * (n1+1) ] = vec_b[0,n1 - 1]
big_a = np.block([[matrix_diag, np.zeros((n1, (n1 * n2))), matrix_end], [np.zeros((n1 * (n1 + 1), n2)), big_a]])
return big_a, big_b
def backlog(self, flow, server):
mat_a, vec_b = self.the_big_matrix_and_vector()
n1, n2 = np.shape(mat_a)
tforest = self.nk2forest[0].trim(server)
tfa = ExactFeedForwardAnalyzer(tforest)
c = np.zeros(n2)
f = self._flow_decomp(flow, server)
xi = tfa.exact_xi([f], server)
s = len(tforest.flows)
for i in range(s):
if i == f:
c[i] = -1
else:
if not tforest.flows[i].path == []:
c[i] = -xi[tfa.network.flows[i].path[0], tfa.network.flows[i].path[-1]]
linear = linprog(c, mat_a, vec_b, options={'tol':1e-7})
if linear.success == True:
bkl = -linear.fun
bkl += tfa.latency_term([f], server, xi)
else:
bkl = np.inf
return bkl
def delay(self, flow):
server = self.network.flows[flow].path[-1]
start = self.network.flows[flow].path[0]
mat_a, vec_b = self.the_big_matrix_and_vector()
n1, n2 = np.shape(mat_a)
tforest = self.nk2forest[0].trim(server)
tfa = ExactFeedForwardAnalyzer(tforest)
c = np.zeros(n2)
f = self._flow_decomp(flow, server)
xi = tfa.exact_xi([f], server)
s = len(tforest.flows)
for i in range(s):
if i == f:
c[i] = -1
else:
if not tforest.flows[i].path == []:
c[i] = -xi[tfa.network.flows[i].path[0], tfa.network.flows[i].path[-1]]
linear = linprog(c, mat_a, vec_b, options={'tol':1e-7})
if linear.success == True:
bkl = -linear.fun
bkl += tfa.latency_term([f], server, xi)
else:
bkl = np.inf
return np.inf
return (bkl - self.network.flows[flow].acurve.sigma * (1 - xi[start, server])) / self.network.flows[flow].acurve.rho
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,155 | ashishkashinath/NCBounds | refs/heads/master | /uniform_ring_stability.py | import numpy as np
from numpy import linalg as la
R = 100
def mat_exacte(n, R, u):
r = u*R/n
ρ = r / (R - (n-1) * r)
x = np.zeros(n)
x[0] = ρ
for i in np.arange(1,n):
x[i] = (x[0] * (n-i) * r + r * sum([x[j] for j in np.arange(1,i)])) / (R - r)
return np.array([[x[max(i + 1 - j,0)]for j in np.arange(n-1)] for i in np.arange(n-1)])
N = 100
tabEXACT_u = np.ones(N+1)
tab_n = np.arange(N+1)
u = 1
n = 2
b = True
while b :
r = u*R/n
ρ = r / (R- (n-1)*r)
mat = mat_exacte(n, R, u)
s = max(la.eigvals(mat))
if s < 1:
tabEXACT_u[n] = u
n += 1
else:
u -= 0.0001
if u < 0 or n > N:
b = False
tabSFA_u = np.ones(N+1)
u = 1
n = 2
b = True
while b :
r = u*R/n
ρ = r / (R- (n-1)*r)
mat = ρ * np.ones((n-1,n-1))
for i in np.arange(n-2):
mat[i+1,i] = 1
s = max(la.eigvals(mat))
if s < 1:
tabSFA_u[n] = u
n += 1
else:
u -= 0.001
if u < 0 or n > N:
b = False
tabPMOO_u = np.ones(N+1)
u = 1
n = 2
b = True
while b :
r = u*R/n
ρ = r / (R- (n-1)*r)
mat = ρ * np.ones((n-1,n-1))
s = max(la.eigvals(mat))
if s < 1:
tabPMOO_u[n] = u
n += 1
else:
u -= 0.001
if u < 0 or n > N:
b = False
f = open('./ring_stability.data', 'w')
n = 0
while n<N+1:
f.write("%f\t" % n)
f.write("%f\t" % tabEXACT_u[n])
f.write("%f\t" % tabPMOO_u[n])
f.write("%f\n" % tabSFA_u[n])
n += 1
f.close()
with open('./ring_stability.data') as f:
lines = f.readlines()
n = [float(line.split()[0]) for line in lines]
exact = [float(line.split()[1]) for line in lines]
pmoo = [float(line.split()[2]) for line in lines]
sfa = [float(line.split()[3]) for line in lines]
f.close()
import matplotlib.pyplot as pl
pl.plot(n,sfa, c='r', label='SFA')
pl.plot(n,exact, c='b', label='Exact')
pl.plot(n,pmoo, c='y', label='PMOO')
pl.xlabel('Number of servers')
pl.ylabel('Maximum utilization rate for stability')
pl.legend()
pl.axis([0, 100, 0, 1])
pl.show()
| {"/NCBounds/FeedForwardAnalyzer.py": ["/NCBounds/Network.py", "/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/two_rings.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/__init__.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Server.py": ["/NCBounds/ServiceCurve.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/ServiceCurve.py": ["/NCBounds/ArrivalCurve.py"], "/small_network_example.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/Network.py": ["/NCBounds/Flow.py", "/NCBounds/Server.py", "/NCBounds/ArrivalCurve.py"], "/NCBounds/Flow.py": ["/NCBounds/ArrivalCurve.py"], "/uniform_ring.py": ["/NCBounds/ArrivalCurve.py", "/NCBounds/Flow.py", "/NCBounds/ServiceCurve.py", "/NCBounds/Server.py", "/NCBounds/Network.py", "/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/FixPointAnalyzer.py"], "/NCBounds/FixPointAnalyzer.py": ["/NCBounds/FeedForwardAnalyzer.py", "/NCBounds/Flow.py", "/NCBounds/Network.py", "/NCBounds/Server.py"]} |
69,156 | thepavangollapalli-zz/qpacalc | refs/heads/master | /calc/models.py | from django.db import models
class Session(models.Model):
sid = models.CharField(max_length=300)
def __str__(self):
return self.sid
# Create your models here.
class Course(models.Model):
session = models.ForeignKey(Session, on_delete = models.CASCADE)
courseName = models.CharField(max_length = 10)
units = models.IntegerField()
grade = models.CharField(max_length = 3)
qp = models.IntegerField()
def __str__(self):
return self.courseName | {"/calc/views.py": ["/calc/models.py", "/calc/calculate.py"], "/calc/calculate.py": ["/calc/models.py"], "/calc/admin.py": ["/calc/models.py"]} |
69,157 | thepavangollapalli-zz/qpacalc | refs/heads/master | /calc/views.py | from django.shortcuts import render
from .models import Session, Course
from django.views import generic
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from .calculate import *
calculatedTargetQPA = 0
newGradeList = []
desiredQPA = 0
session = None
session_id = None
# Create your views here.
def index(request):
global calculatedTargetQPA
global newGradeList
global desiredQPA
global session
global session_id
request.session.save()
(units, totalqp, qpa) = (0,0,0)
if not request.session.exists(request.session.session_key):
request.session.create()
if(not Session.objects.filter(sid=request.session.session_key).exists()):
session = Session(sid=request.session.session_key)
print("\n\nNEW SESSION", request.session.session_key)
session.save()
else:
print("\n\nSESSION EXISTS", Session.objects.get(sid=request.session.session_key))
session = Session.objects.get(sid=request.session.session_key)
if(request.session.session_key != session_id):
print("\n\n***SESSION WILL BE CHANGED***")
print("previous session: ",session_id)
print("next session:", request.session.session_key)
session_id = request.session.session_key
courses = session.course_set.all()
if(len(courses) > 0):
units = totalUnits(session_id)
totalqp = totalQP(session_id)
qpa = calculateQPA(session_id)
resultsDict = {"units": units, "totalqp": totalqp, "qpa": qpa}
if(desiredQPA > 0):
resultsDict["desiredQPA"] = desiredQPA
if(calculatedTargetQPA > 0):
resultsDict["calculatedTargetQPA"] = calculatedTargetQPA
if(len(newGradeList) > 0):
resultsDict["passingGradeList"] = formatNewGrades(newGradeList)
context = {"courses": courses, "results": resultsDict}
calculatedTargetQPA = 0
newGradeList = []
desiredQPA = 0
return render(request, "calc/index.html", context)
class AddView(generic.View):
def get(self, request):
global session_id
session_id = request.session.session_key
return render(request, "calc/add.html", {"what":None})
def post(self, request):
global session
global session_id
grade = request.POST["grade"]
units = request.POST["units"]
course = request.POST["course"]
session.course_set.create(courseName = course, units = units, grade = grade, qp=0)
totalQP(session_id)
# newCourse = Course(courseName = course, units = units, grade = grade, qp=0)
# newCourse.save()`
return HttpResponseRedirect(reverse('calc:index'))
class CalcView(generic.View):
def get(self, request):
return render(request, 'calc/calc.html')
def post(self, request):
global calculatedTargetQPA
global newGradeList
global desiredQPA
session_id = request.session.session_key
desiredQPA = float(request.POST["qpa"])
newGradeList = raiseToQPA(session_id, desiredQPA)
calculatedTargetQPA = calcDesiredQPA(session_id, newGradeList, desiredQPA)
return HttpResponseRedirect(reverse('calc:index'))
def formatNewGrades(gradeList):
for course in gradeList:
if(isinstance(gradeList[course], int)):
gradeList[course] = chr(abs(gradeList[course]-4)+65)
return gradeList | {"/calc/views.py": ["/calc/models.py", "/calc/calculate.py"], "/calc/calculate.py": ["/calc/models.py"], "/calc/admin.py": ["/calc/models.py"]} |
69,158 | thepavangollapalli-zz/qpacalc | refs/heads/master | /calc/urls.py | from django.conf.urls import url, include
from django.contrib import admin
from . import views
app_name="calc"
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'add/', views.AddView.as_view(), name="add"),
url(r'calc/', views.CalcView.as_view(), name="calc"),
] | {"/calc/views.py": ["/calc/models.py", "/calc/calculate.py"], "/calc/calculate.py": ["/calc/models.py"], "/calc/admin.py": ["/calc/models.py"]} |
69,159 | thepavangollapalli-zz/qpacalc | refs/heads/master | /calc/calculate.py | from .models import Course, Session
from decimal import *
# import copy
def totalUnits(session_id):
session = Session.objects.get(sid=session_id)
units = 0
for course in session.course_set.all():
units += course.units
return units
def totalQP(session_id):
session = Session.objects.get(sid=session_id)
qp = 0
for course in session.course_set.all():
units = course.units
if(course.grade == "A"):
gradeMultiplier = 4
elif(course.grade == "B"):
gradeMultiplier = 3
elif(course.grade == "C"):
gradeMultiplier = 2
elif(course.grade == "D"):
gradeMultiplier = 1
else:
gradeMultiplier = 0
course.qp = units * gradeMultiplier
course.save()
qp += units * gradeMultiplier
return qp
def calculateQPA(session_id):
units = totalUnits(session_id)
qp = totalQP(session_id)
return Decimal(str(qp / units)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
def raiseToQPA(session_id, finalQPA):
"""
given a qpa to reach, raises grades of all courses(or lowers them) till the
returned QPA is greater than the final (if it is above) or less than the final
(if it is below.)
"""
session = Session.objects.get(sid=session_id)
courses = session.course_set.all()
units = 0
currGrades = dict()
for course in courses:
units += course.units
# creates a dict with course names and their grade multipliers
currGrades[course.courseName] = 4-(ord(course.grade)-65)
# increases a single grade
changedCourse = dict()
for course in currGrades:
if(currGrades[course]==4): #can't increase the grade if it's an A
continue
for i in range(4 - currGrades[course]):
currGrades[course] += 1
if(course not in changedCourse):
changedCourse[course] = 0
else:
changedCourse[course] += 1
if(testQPA(session_id, currGrades, finalQPA)):#currGrades, gradeList, finalQPA
return currGrades
for course in changedCourse:
currGrades[course] -= changedCourse[course]
return None
def calcDesiredQPA(session_id, gradeList, finalQPA):
qp = 0
units = 0
session = Session.objects.get(sid=session_id)
for course in session.course_set.all():
qp += course.units * gradeList[course.courseName]
units += course.units
qpa = qp / units
print("\n\nQPA:%4.2f\n\n" % qpa)
qpa = float(Decimal(str(qpa)).quantize(Decimal("0.01")))
return qpa
def testQPA(session_id, gradeList, finalQPA):
qpa = calcDesiredQPA(session_id, gradeList, finalQPA)
if(qpa >= finalQPA):
return True
return False
| {"/calc/views.py": ["/calc/models.py", "/calc/calculate.py"], "/calc/calculate.py": ["/calc/models.py"], "/calc/admin.py": ["/calc/models.py"]} |
69,160 | thepavangollapalli-zz/qpacalc | refs/heads/master | /calc/admin.py | from django.contrib import admin
from .models import Course, Session
# Register your models here.
admin.site.register(Course)
admin.site.register(Session) | {"/calc/views.py": ["/calc/models.py", "/calc/calculate.py"], "/calc/calculate.py": ["/calc/models.py"], "/calc/admin.py": ["/calc/models.py"]} |
69,161 | thepavangollapalli-zz/qpacalc | refs/heads/master | /calc/migrations/0005_auto_20160112_1504.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-12 15:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calc', '0004_course_qp'),
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.CharField(max_length=300, primary_key=True, serialize=False)),
],
),
migrations.AddField(
model_name='course',
name='session',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='calc.Session'),
preserve_default=False,
),
]
| {"/calc/views.py": ["/calc/models.py", "/calc/calculate.py"], "/calc/calculate.py": ["/calc/models.py"], "/calc/admin.py": ["/calc/models.py"]} |
69,198 | vwhu/impedance.py | refs/heads/master | /impedance/tests/test_model_io.py | import numpy as np
from impedance.model_io import model_export, model_import
from impedance.circuits import CustomCircuit
def test_model_io():
# get example data
data = np.genfromtxt('./data/exampleData.csv', delimiter=',')
frequencies = data[:, 0]
Z = data[:, 1] + 1j*data[:, 2]
randles = CustomCircuit(initial_guess=[.01, .005, .1, .005, .1, .001, 200],
circuit='R_0-p(R_1,C_1)-p(R_1,C_1)-W_1/W_2')
randles.fit(frequencies, Z)
print(randles)
model_export(randles, './test_io.json')
randles2 = model_import('./test_io.json')
print(randles2)
assert randles == randles2
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,199 | vwhu/impedance.py | refs/heads/master | /impedance/tests/test_fitting.py | from impedance.fitting import buildCircuit, rmse
import numpy as np
# def test_residuals():
# pass
#
#
# def test_valid():
# pass
#
#
# def test_computeCircuit():
# pass
def test_buildCircuit():
# Test simple Randles circuit with CPE
circuit = 'R_0-p(R_1, E_1/E_2)-W_1/W_2'
params = [.1, .01, 15, .9, 1, 1000]
frequencies = [1000.0, 5.0, 0.01]
assert buildCircuit(circuit, frequencies, *params)[0].replace(' ', '') == \
's([R([0.1],[1000.0,5.0,0.01]),' + \
'p([R([0.01],[1000.0,5.0,0.01]),' + \
'E([15.0,0.9],[1000.0,5.0,0.01])]),' + \
'W([1.0,1000.0],[1000.0,5.0,0.01])])'
# Test multiple parallel elements
circuit = 'R_0-p(C_1,R_1,R_2)'
params = [.1, .01, .2, .3]
frequencies = [1000.0, 5.0, 0.01]
assert buildCircuit(circuit, frequencies, *params)[0].replace(' ', '') == \
's([R([0.1],[1000.0,5.0,0.01]),' + \
'p([C([0.01],[1000.0,5.0,0.01]),' + \
'R([0.2],[1000.0,5.0,0.01]),' + \
'R([0.3],[1000.0,5.0,0.01])])])'
# Test nested parallel groups
circuit = 'R_0-p(p(R_1, C_1)-R_2, C_2)'
params = [1, 2, 3, 4, 5]
frequencies = [1000.0, 5.0, 0.01]
assert buildCircuit(circuit, frequencies, *params)[0].replace(' ', '') == \
's([R([1],[1000.0,5.0,0.01]),' + \
'p([s([p([R([2],[1000.0,5.0,0.01]),' + \
'C([3],[1000.0,5.0,0.01])]),' + \
'R([4],[1000.0,5.0,0.01])]),' + \
'C([5],[1000.0,5.0,0.01])])])'
def test_RMSE():
a = np.array([2 + 4*1j, 3 + 2*1j])
b = np.array([2 + 4*1j, 3 + 2*1j])
assert rmse(a, b) == 0.0
c = np.array([2 + 4*1j, 1 + 4*1j])
d = np.array([4 + 2*1j, 3 + 2*1j])
assert np.isclose(rmse(c, d), 2*np.sqrt(2))
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,200 | vwhu/impedance.py | refs/heads/master | /impedance/tests/test_validation.py | from impedance.validation import calc_mu, eval_linKK, residuals_linKK
import numpy as np
def test_eval_linKK():
Rs = [1, 2, 3]
ts = [.1, .2]
f = np.array([.01, 1000])
Z = Rs[0] + (Rs[1]/(1 + ts[0]*1j*f)) + (Rs[2]/(1 + ts[1]*1j*f))
assert (eval_linKK(Rs, ts, f) == Z).all()
Z_data = Z + np.array([1 + 1j, 1 + 1j])
assert (residuals_linKK(Rs, ts, Z_data, f) ==
(Z_data - Z).real/np.abs(Z_data)).all()
assert (residuals_linKK(Rs, ts, Z_data, f, residuals='imag') ==
(Z_data - Z).imag/np.abs(Z_data)).all()
diff_real = (Z_data - Z).real/np.abs(Z_data)
diff_imag = (Z_data - Z).imag/np.abs(Z_data)
assert (residuals_linKK(Rs, ts, Z_data, f, residuals='both') ==
[diff_real[0], diff_imag[0], diff_real[1], diff_imag[1]]).all()
def test_calc_mu():
Rs = [1, 2, 3, -3, -2, -1]
assert calc_mu(Rs) == 0
Rs = [-1, 2, 4, -3, 4, -1]
assert calc_mu(Rs) == 0.5
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,201 | vwhu/impedance.py | refs/heads/master | /impedance/validation.py | import numpy as np
from scipy.optimize import least_squares
from .fitting import rmse
def linKK(f, Z, c=0.85, max_M=50):
""" A method for implementing the Lin-KK test for validating linearity [1]
Parameters
----------
f: np.ndarray
measured frequencies
Z: np.ndarray of complex numbers
measured impedances
c: np.float
cutoff for mu
max_M: int
the maximum number of RC elements
Returns
-------
mu: np.float
under- or over-fitting measure
residuals: np.ndarray of complex numbers
the residuals of the fit at input frequencies
Z_fit: np.ndarray of complex numbers
impedance of fit at input frequencies
Notes
-----
The lin-KK method from Schönleber et al. [1] is a quick test for checking
the
validity of EIS data. The validity of an impedance spectrum is analyzed by
its reproducibility by a Kramers-Kronig (KK) compliant equivalent circuit.
In particular, the model used in the lin-KK test is an ohmic resistor,
:math:`R_{Ohm}`, and :math:`M` RC elements.
.. math::
\\hat Z = R_{Ohm} + \\sum_{k=1}^{M} \\frac{R_k}{1 + j \\omega \\tau_k}
The :math:`M` time constants, :math:`\\tau_k`, are distributed
logarithmically,
.. math::
\\tau_1 = \\frac{1}{\\omega_{max}} ; \\tau_M = \\frac{1}{\\omega_{min}}
; \\tau_k = 10^{\\log{(\\tau_{min}) + \\frac{k-1}{M-1}\\log{{(
\\frac{\\tau_{max}}{\\tau_{min}}}})}}
and are not fit during the test (only :math:`R_{Ohm}` and :math:`R_{k}`
are free parameters).
In order to prevent under- or over-fitting, Schönleber et al. propose using
the ratio of positive resistor mass to negative resistor mass as a metric
for finding the optimal number of RC elements.
.. math::
\\mu = 1 - \\frac{\\sum_{R_k \\ge 0} |R_k|}{\\sum_{R_k < 0} |R_k|}
The argument :code:`c` defines the cutoff value for :math:`\\mu`. The
algorithm starts at :code:`M = 3` and iterates up to :code:`max_M` until a
:math:`\\mu < c` is reached. The default of 0.85 is simply a heuristic
value based off of the experience of Schönleber et al.
If the argument :code:`c` is :code:`None`, then the automatic determination
of RC elements is turned off and the solution is calculated for
:code:`max_M` RC elements. This manual mode should be used with caution as
under- and over-fitting should be avoided.
[1] Schönleber, M. et al. A Method for Improving the Robustness of
linear Kramers-Kronig Validity Tests. Electrochimica Acta 131, 20–27 (2014)
`doi: 10.1016/j.electacta.2014.01.034
<https://doi.org/10.1016/j.electacta.2014.01.034>`_.
"""
def get_tc_distribution(f, M):
""" Returns the distribution of time constants for the linKK method """
t_max = 1/np.min(f)
t_min = 1/np.max(f)
ts = np.zeros(shape=(M,))
ts[0] = t_min
ts[-1] = t_max
if M > 1:
for k in range(2, M):
ts[k-1] = 10**(np.log10(t_min) +
((k-1)/(M-1))*np.log10(t_max/t_min))
ts *= 2*np.pi
return ts
if c is not None:
M = 0
mu = 1
while mu > c and M <= max_M:
M += 1
ts = get_tc_distribution(f, M)
p_values, mu = fitLinKK(f, ts, M, Z)
if M % 10 == 0:
print(M, mu, rmse(eval_linKK(p_values, ts, f), Z))
else:
M = max_M
ts = get_tc_distribution(f, M)
p_values, mu = fitLinKK(f, M, Z)
return M, mu, eval_linKK(p_values, ts, f), \
residuals_linKK(p_values, ts, Z, f, residuals='real'), \
residuals_linKK(p_values, ts, Z, f, residuals='imag')
def fitLinKK(f, ts, M, Z):
""" Fits the linKK model using scipy.optimize.least_squares """
initial_guess = np.append(min(np.real(Z)),
np.ones(shape=(M,)) *
((max(np.real(Z))-min(np.real(Z)))/M))
result = least_squares(residuals_linKK, initial_guess, method='lm',
args=(ts, Z, f, 'both'),
ftol=1E-13, gtol=1E-10)
p_values = result['x']
mu = calc_mu(p_values[1:])
return p_values, mu
def eval_linKK(Rs, ts, f):
""" Builds a circuit of RC elements to be used in LinKK """
from .circuit_elements import s, R, K # noqa
circuit_string = 's([R({},{}),'.format([Rs[0]], f.tolist())
for i, (Rk, tk) in enumerate(zip(Rs[1:], ts)):
circuit_string += 'K({},{}),'.format([Rk, tk], f.tolist())
circuit_string = circuit_string.strip(',')
circuit_string += '])'
return eval(circuit_string)
def residuals_linKK(Rs, ts, Z, f, residuals='real'):
""" Calculates the residual between the data and a LinKK fit """
err = Z - eval_linKK(Rs, ts, f)
if residuals == 'real':
return err.real/(np.abs(Z))
elif residuals == 'imag':
return err.imag/(np.abs(Z))
elif residuals == 'both':
z1d = np.zeros(Z.size*2, dtype=np.float64)
z1d[0:z1d.size:2] = err.real/(np.abs(Z))
z1d[1:z1d.size:2] = err.imag/(np.abs(Z))
return z1d
def calc_mu(Rs):
""" Calculates mu for use in LinKK """
neg_sum = sum(abs(x) for x in Rs if x < 0)
pos_sum = sum(abs(x) for x in Rs if x >= 0)
return 1 - neg_sum/pos_sum
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,202 | vwhu/impedance.py | refs/heads/master | /impedance/fitting.py | from .circuit_elements import R, C, L, W, A, E, G, s, p # noqa: F401
import numpy as np
from scipy.optimize import curve_fit
def rmse(a, b):
"""
A function which calculates the root mean squared error
between two vectors.
Notes
---------
.. math::
RMSE = \\sqrt{\\frac{1}{n}(a-b)^2}
"""
n = len(a)
return np.linalg.norm(a - b) / np.sqrt(n)
def circuit_fit(frequencies, impedances, circuit, initial_guess,
method='lm', bounds=None, bootstrap=False):
""" Main function for fitting an equivalent circuit to data
Parameters
-----------------
frequencies : numpy array
Frequencies
impedances : numpy array of dtype 'complex128'
Impedances
circuit : string
string defining the equivalent circuit to be fit
initial_guess : list of floats
initial guesses for the fit parameters
method : {‘lm’, ‘trf’, ‘dogbox’}, optional
Name of method to pass to scipy.optimize.curve_fit
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to bounds on all
parameters of 0 and np.inf, except the CPE alpha
which has an upper bound of 1
Returns
------------
p_values : list of floats
best fit parameters for specified equivalent circuit
p_errors : list of floats
one standard deviation error estimates for fit parameters
Notes
---------
Need to do a better job of handling errors in fitting.
Currently, an error of -1 is returned.
"""
circuit = circuit.replace('_', '')
f = frequencies
Z = impedances
if bounds is None:
lb, ub = [], []
p_string = [x for x in circuit if x not in 'ps(),-/']
for i, (a, b) in enumerate(zip(p_string[::2], p_string[1::2])):
lb.append(0)
if str(a+b) == "E2":
ub.append(1)
else:
ub.append(np.inf)
bounds = ((lb), (ub))
popt, pcov = curve_fit(wrapCircuit(circuit), f,
np.hstack([Z.real, Z.imag]), p0=initial_guess,
bounds=bounds, maxfev=100000, ftol=1E-13)
perror = np.sqrt(np.diag(pcov))
return popt, perror
def wrapCircuit(circuit):
""" wraps function so we can pass the circuit string """
def wrappedCircuit(frequencies, *parameters):
""" returns a stacked
Parameters
----------
circuit : string
parameters : list of floats
frequencies : list of floats
Returns
-------
array of floats
"""
x = eval(buildCircuit(circuit, frequencies, *parameters,
eval_string='', index=0)[0])
y_real = np.real(x)
y_imag = np.imag(x)
return np.hstack([y_real, y_imag])
return wrappedCircuit
def computeCircuit(circuit, frequencies, *parameters):
""" evaluates a circuit string for a given set of parameters and frequencies
Parameters
----------
circuit : string
frequencies : list/tuple/array of floats
parameters : list/tuple/array of floats
Returns
-------
array of complex numbers
"""
return eval(buildCircuit(circuit, frequencies, *parameters,
eval_string='', index=0)[0])
def buildCircuit(circuit, frequencies, *parameters, eval_string='', index=0):
""" recursive function that transforms a circuit, parameters, and
frequencies into a string that can be evaluated
Parameters
----------
circuit: str
parameters: list/tuple/array of floats
frequencies: list/tuple/array of floats
Returns
-------
eval_string: str
Python expression for calculating the resulting fit
index: int
Tracks parameter index through recursive calling of the function
"""
parameters = np.array(parameters).tolist()
frequencies = np.array(frequencies).tolist()
circuit = circuit.replace(' ', '')
def parse_circuit(circuit, parallel=False, series=False):
""" Splits a circuit string by either dashes (series) or commas
(parallel) outside of any paranthesis. Removes any leading 'p('
or trailing ')' when in parallel mode """
assert parallel != series, \
'Exactly one of parallel or series must be True'
def count_parens(string):
return string.count('('), string.count(')')
if parallel:
special = ','
if circuit.endswith(')') and circuit.startswith('p('):
circuit = circuit[2:-1]
if series:
special = '-'
split = circuit.split(special)
result = []
skipped = []
for i, sub_str in enumerate(split):
if i not in skipped:
if '(' not in sub_str and ')' not in sub_str:
result.append(sub_str)
else:
open_parens, closed_parens = count_parens(sub_str)
if open_parens == closed_parens:
result.append(sub_str)
else:
uneven = True
while i < len(split) - 1 and uneven:
sub_str += special + split[i+1]
open_parens, closed_parens = count_parens(sub_str)
uneven = open_parens != closed_parens
i += 1
skipped.append(i)
result.append(sub_str)
return result
parallel = parse_circuit(circuit, parallel=True)
series = parse_circuit(circuit, series=True)
if parallel is not None and len(parallel) > 1:
eval_string += "p(["
split = parallel
elif series is not None and len(series) > 1:
eval_string += "s(["
split = series
for i, elem in enumerate(split):
if ',' in elem or '-' in elem:
eval_string, index = buildCircuit(elem, frequencies,
*parameters,
eval_string=eval_string,
index=index)
else:
param_string = ""
elem_number = len(elem.split("/"))
param_string += str(parameters[index:index + elem_number])
new = elem[0] + '(' + param_string + ',' + str(frequencies) + ')'
eval_string += new
index += elem_number
if i == len(split) - 1:
eval_string += '])'
else:
eval_string += ','
return eval_string, index
def calculateCircuitLength(circuit):
l1 = ['R', 'E', 'W', 'C', 'L', 'A', 'G']
length = 0
for char in l1:
length += circuit.count(char)
return length
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,203 | vwhu/impedance.py | refs/heads/master | /impedance/plotting.py | import numpy as np
from matplotlib.ticker import ScalarFormatter
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=True):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
def plot_nyquist(ax, freq, Z, scale=1, units='Ohms', fmt='.-'):
""" Convenience function for plotting nyquist plots
Parameters
----------
ax: matplotlib.axes.Axes
axes on which to plot the nyquist plot
freq: np.array of floats
frequencies
Z: np.array of complex numbers
impedance data
scale: float
the scale for the axes
units: string
units for :math:`Z(\\omega)`
fmt: string
format string passed to matplotlib (e.g. '.-' or 'o')
Returns
-------
ax: matplotlib.axes.Axes
"""
ax.plot(np.real(Z), -np.imag(Z), fmt, lw=3)
# Make the axes square
ax.set_aspect('equal')
# Set the labels to -imaginary vs real
ax.set_xlabel(r'$Z^{\prime}(\omega)$ ' +
'$[{}]$'.format(units), fontsize=20)
ax.set_ylabel(r'$-Z^{\prime\prime}(\omega)$ ' +
'$[{}]$'.format(units), fontsize=20)
# Make the tick labels larger
ax.tick_params(axis='both', which='major', labelsize=14)
# Change the number of labels on each axis to five
ax.locator_params(axis='x', nbins=5, tight=True)
ax.locator_params(axis='y', nbins=5, tight=True)
# Add a light grid
ax.grid(b=True, which='major', axis='both', alpha=.5)
# Change axis units to 10**log10(scale) and resize the offset text
ax.xaxis.set_major_formatter(FixedOrderFormatter(-np.log10(scale)))
ax.yaxis.set_major_formatter(FixedOrderFormatter(-np.log10(scale)))
y_offset = ax.yaxis.get_offset_text()
y_offset.set_size(18)
t = ax.xaxis.get_offset_text()
t.set_size(18)
return ax
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,204 | vwhu/impedance.py | refs/heads/master | /impedance/circuits.py | from .fitting import circuit_fit, buildCircuit, calculateCircuitLength
from .plotting import plot_nyquist
from .circuit_elements import R, C, L, W, A, E, G, s, p # noqa: F401
import numpy as np
import os
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt # noqa E402
class BaseCircuit:
""" Base class for equivalent circuit models """
def __init__(self, initial_guess, name=None, bounds=None):
""" Base constructor for any equivalent circuit model
Parameters
----------
initial_guess: numpy array
Initial guess of the circuit values
"""
# if supplied, check that initial_guess is valid and store
for i in initial_guess:
assert isinstance(i, (float, int, np.int32, np.float64)),\
'value {} in initial_guess is not a number'.format(i)
# initalize class attributes
self.initial_guess = initial_guess
self.name = name
# initialize fit parameters and confidence intervals
self.parameters_ = None
self.conf_ = None
def __eq__(self, other):
if self.__class__ == other.__class__:
matches = []
for key, value in self.__dict__.items():
if isinstance(value, np.ndarray):
matches.append((value == other.__dict__[key]).all())
else:
matches.append(value == other.__dict__[key])
return np.array(matches).all()
else:
raise TypeError('Comparing object is not of the same type.')
def fit(self, frequencies, impedance, method='lm', bounds=None):
""" Fit the circuit model
Parameters
----------
frequencies: numpy array
Frequencies
impedance: numpy array of dtype 'complex128'
Impedance values to fit
method: {‘lm’, ‘trf’, ‘dogbox’}, optional
Name of method to pass to scipy.optimize.curve_fit
bounds: 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to bounds on all
parameters of 0 and np.inf, except the CPE alpha
which has an upper bound of 1
Returns
-------
self: returns an instance of self
"""
# check that inputs are valid:
# frequencies: array of numbers
# impedance: array of complex numbers
# impedance and frequency match in length
assert isinstance(frequencies, np.ndarray),\
'frequencies is not of type np.ndarray'
assert isinstance(frequencies[0], (float, int, np.int32, np.float64)),\
'frequencies does not contain a number'
assert isinstance(impedance, np.ndarray),\
'impedance is not of type np.ndarray'
assert isinstance(impedance[0], (complex, np.complex128)),\
'impedance does not contain complex numbers'
assert len(frequencies) == len(impedance),\
'mismatch in length of input frequencies and impedances'
if self.initial_guess is not None:
parameters, conf = circuit_fit(frequencies, impedance,
self.circuit, self.initial_guess,
method=method, bounds=bounds)
self.parameters_ = parameters
if conf is not None:
self.conf_ = conf
else:
# TODO auto calc guess
raise ValueError('no initial guess supplied')
return self
def _is_fit(self):
""" check if model has been fit (parameters_ is not None) """
if self.parameters_ is not None:
return True
else:
return False
def predict(self, frequencies, use_initial=False):
""" Predict impedance using a fit equivalent circuit model
Parameters
----------
frequencies: numpy array
Frequencies
use_initial: boolean
If true and a fit was already completed,
use the initial parameters instead
Returns
-------
impedance: numpy array of dtype 'complex128'
Predicted impedance
"""
# check that inputs are valid:
# frequencies: array of numbers
assert isinstance(frequencies, np.ndarray),\
'frequencies is not of type np.ndarray'
assert isinstance(frequencies[0], (float, int, np.int32, np.float64)),\
'frequencies does not contain a number'
if self._is_fit() and not use_initial:
return eval(buildCircuit(self.circuit, frequencies,
*self.parameters_, eval_string='',
index=0)[0])
else:
print("Simulating circuit based on initial parameters")
return eval(buildCircuit(self.circuit, frequencies,
*self.initial_guess, eval_string='',
index=0)[0])
def get_param_names(self):
"""Converts circuit string to names"""
# parse the element names from the circuit string
names = self.circuit.replace('p', '').replace('(', '').replace(')', '')
names = names.replace(',', '-').replace('/', '-').split('-')
return names
def get_verbose_string(self):
""" Defines the pretty printing of all data in the circuit"""
names = self.get_param_names()
to_print = '\n-------------------------------\n' # noqa E222
to_print += 'Circuit: {}\n'.format(self.name)
to_print += 'Circuit string: {}\n'.format(self.circuit)
if self._is_fit():
to_print += "Fit: True\n"
else:
to_print += "Fit: False\n"
to_print += '\n-------------------------------\n'
to_print += 'Initial guesses:\n'
for name, param in zip(names, self.initial_guess):
to_print += '\t{} = {:.2e}\n'.format(name, param)
if self._is_fit():
to_print += '\n-------------------------------\n'
to_print += 'Fit parameters:\n'
for name, param, conf in zip(names, self.parameters_, self.conf_):
to_print += '\t{} = {:.2e}'.format(name, param)
to_print += '\t(+/- {:.2e})\n'.format(conf)
return to_print
def __str__(self):
""" Defines the pretty printing of the circuit """
names = self.get_param_names()
to_print = '\n-------------------------------\n' # noqa E222
to_print += 'Circuit: {}\n'.format(self.name)
to_print += 'Circuit string: {}\n'.format(self.circuit)
if self._is_fit():
to_print += "Fit: True\n"
else:
to_print += "Fit: False\n"
if self._is_fit():
to_print += '\n-------------------------------\n'
to_print += 'Fit parameters:\n'
for name, param, conf in zip(names, self.parameters_, self.conf_):
to_print += '\t{} = {:.2e}\n'.format(name, param)
else:
to_print += '\n-------------------------------\n'
to_print += 'Initial guesses:\n'
for name, param in zip(names, self.initial_guess):
to_print += '\t{} = {:.2e}\n'.format(name, param)
return to_print
def plot(self, ax=None, f_data=None, Z_data=None,
conf_bounds=None, scale=1, units='Ohms'):
""" a convenience method for plotting Nyquist plots
Parameters
----------
f_data: np.array of type float
Frequencies of input data (for Bode plots)
Z_data: np.array of type complex
Impedance data to plot
conf_bounds: {'error_bars', 'filled', 'filledx', 'filledy'}, optional
Include bootstrapped confidence bands (95%) on the predicted best
fit model shown as either error bars or a filled confidence area.
Confidence bands are estimated by simulating the spectra for 10000
randomly sampled parameter sets where each of the parameters is
sampled from a normal distribution
Returns
-------
ax: matplotlib.axes
axes of the created nyquist plot
"""
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
if Z_data is not None:
ax = plot_nyquist(ax, f_data, Z_data,
scale=scale, units=units, fmt='s')
if self._is_fit():
if f_data is not None:
f_pred = f_data
else:
f_pred = np.logspace(5, -3)
Z_fit = self.predict(f_pred)
ax = plot_nyquist(ax, f_data, Z_fit,
scale=scale, units=units, fmt='s')
base_ylim, base_xlim = ax.get_ylim(), ax.get_xlim()
if conf_bounds is not None:
N = 10000
n = len(self.parameters_)
f_pred = np.logspace(np.log10(min(f_data)),
np.log10(max(f_data)),
num=100)
params = self.parameters_
confs = self.conf_
full_range = np.ndarray(shape=(N, len(f_pred)), dtype=complex)
for i in range(N):
self.parameters_ = params + \
confs*np.random.randn(n)
full_range[i, :] = self.predict(f_pred)
self.parameters_ = params
min_Zr, min_Zi = [], []
max_Zr, max_Zi = [], []
xerr, yerr = [], []
for i, Z in enumerate(Z_fit):
Zr, Zi = np.real(Z), np.imag(Z)
yr, yi = [], []
for run in full_range:
yi.append(run[i].imag)
yr.append(run[i].real)
min_Zr.append(1j*Zi + (Zr - 2*np.std(yr)))
max_Zr.append(1j*Zi + (Zr + 2*np.std(yr)))
min_Zi.append(Zr + 1j*(Zi - 2*np.std(yi)))
max_Zi.append(Zr + 1j*(Zi + 2*np.std(yi)))
xerr.append(2*np.std(yr))
yerr.append(2*np.std(yi))
conf_x, conf_y = False, False
if conf_bounds == 'error_bars':
ax.errorbar(Z_fit.real, -Z_fit.imag, xerr=xerr, yerr=yerr,
fmt='', color='#7f7f7f', zorder=-2)
elif conf_bounds == 'filled':
conf_x = True
conf_y = True
elif conf_bounds == 'filledx':
conf_x = True
elif conf_bounds == 'filledy':
conf_y = True
if conf_x:
ax.fill_betweenx(-np.imag(min_Zr), np.real(min_Zr),
np.real(max_Zr), alpha='.2',
color='#7f7f7f', zorder=-2)
if conf_y:
ax.fill_between(np.real(min_Zi), -np.imag(min_Zi),
-np.imag(max_Zi), alpha='.2',
color='#7f7f7f', zorder=-2)
ax.set_ylim(base_ylim)
ax.set_xlim(base_xlim)
return ax
class Randles(BaseCircuit):
""" A Randles circuit model class """
def __init__(self, CPE=False, **kwargs):
""" Constructor for the Randles' circuit class
Parameters
----------
initial_guess: numpy array
Initial guess of the circuit values
CPE: boolean
Use a constant phase element instead of a capacitor
"""
super().__init__(**kwargs)
if CPE:
self.name = 'Randles w/ CPE'
self.circuit = 'R_0-p(R_1,E_1/E_2)-W_1/W_2'
circuit_length = calculateCircuitLength(self.circuit)
assert len(self.initial_guess) == circuit_length, \
'Initial guess length needs to be equal to parameter length'
else:
self.name = 'Randles'
self.circuit = 'R_0-p(R_1,C_1)-W_1/W_2'
circuit_length = calculateCircuitLength(self.circuit)
assert len(self.initial_guess) == circuit_length, \
'Initial guess length needs to be equal to parameter length'
class CustomCircuit(BaseCircuit):
def __init__(self, circuit, **kwargs):
""" Constructor for a customizable equivalent circuit model
Parameters
----------
initial_guess: numpy array
Initial guess of the circuit values
circuit: string
A string that should be interpreted as an equivalent circuit
Notes
-----
A custom circuit is defined as a string comprised of elements in series
(separated by a `-`) and elements in parallel (grouped as (x,y)).
Elements with two or more parameters are separated by a forward slash
(`/`).
Example:
Randles circuit is given by 'R0-p(R1,C1)-W1/W2'
"""
super().__init__(**kwargs)
self.circuit = circuit
circuit_length = calculateCircuitLength(self.circuit)
assert len(self.initial_guess) == circuit_length, \
'Initial guess length needs to be equal to {circuit_length}'
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,205 | vwhu/impedance.py | refs/heads/master | /impedance/tests/test_circuits.py | from impedance.circuits import BaseCircuit, CustomCircuit, Randles
import matplotlib.pyplot as plt
import numpy as np
def test_BaseCircuit():
initial_guess = [0.01, 0.02, 50]
base_circuit = BaseCircuit(initial_guess)
assert base_circuit.initial_guess == initial_guess
def test_Randles():
# check for proper functionality
# get example data
data = np.genfromtxt('./data/exampleData.csv', delimiter=',')
frequencies = data[:, 0]
Z = data[:, 1] + 1j*data[:, 2]
randles = Randles(initial_guess=[.01, .005, .1, .0001, 200])
randles.fit(frequencies[np.imag(Z) < 0], Z[np.imag(Z) < 0])
np.testing.assert_almost_equal(randles.parameters_,
np.array([1.86146620e-02, 1.15477171e-02,
1.33331949e+00, 6.31473571e-02,
2.22407275e+02]), decimal=2)
# check that plotting returns a plt.Axes() object
fig, ax = plt.subplots()
assert isinstance(randles.plot(ax, frequencies, Z), type(ax))
assert isinstance(randles.plot(ax, frequencies, Z,
conf_bounds='error_bars'), type(ax))
# check that predicting impedance from fit works
assert np.isclose(randles.predict(np.array([10.0])),
np.complex(0.02495749, -0.00614842))
# check that it rejects improper inputs - enforcing initial guess types
try:
r = Randles(initial_guess=['hi', 0.1])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects improper inputs - enforcing data types
try:
r = Randles(initial_guess=[.01, .005, .1, .0001, 200])
r.fit(['hi', 'hello'], [0.5, 0.2])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects improper inputs - enforcing data lengths
try:
r = Randles(initial_guess=[.01, .005, .1, .0001, 200])
r.fit(frequencies[np.imag(Z) < 0][:5], Z[np.imag(Z) < 0])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects improper inputs
# enforcing the length of initial_guess
try:
r = Randles(initial_guess=[.01, .005, .1, .0001])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects missing input
try:
r = Randles()
except(TypeError):
pass
else:
raise Exception('unhandled error occured')
return
def test_CustomCircuit():
initial_guess = [.01, .005, .1, .005, .1, .001, 200]
custom_string = 'R_0-p(R_1,C_1)-p(R_2,C_2)-W_1/W_2'
custom_circuit = CustomCircuit(initial_guess=initial_guess,
circuit=custom_string)
# check get_param_names()
assert custom_circuit.get_param_names() == \
['R_0', 'R_1', 'C_1', 'R_2', 'C_2', 'W_1', 'W_2']
# check _is_fit()
assert not custom_circuit._is_fit()
initial_guess = [.01, .005, .1]
custom_string = 'R_0-p(R_1,C_1)'
custom_circuit = CustomCircuit(initial_guess=initial_guess,
circuit=custom_string, name='Test')
line = '\n-------------------------------\n'
assert str(custom_circuit) == line + \
'Circuit: Test\n' + \
'Circuit string: R_0-p(R_1,C_1)\n' + \
'Fit: False\n' + line + \
'Initial guesses:\n' + \
'\tR_0 = 1.00e-02\n' + \
'\tR_1 = 5.00e-03\n' + \
'\tC_1 = 1.00e-01\n'
# check that it rejects improper inputs
# enforcing the length of initial_guess
try:
initial_guess = [.01, .005, .1, .005, .1, .001, 200]
custom_string = 'R_0-p(R_1,E_1/E_2)-p(R_1,C_1)-W_1/W_2'
custom_circuit = CustomCircuit(initial_guess=initial_guess,
circuit=custom_string)
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
return
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,206 | vwhu/impedance.py | refs/heads/master | /impedance/preprocessing.py | """
Methods for preprocessing impedance data from instrument files
"""
import numpy as np
def readFile(filename, type=None):
""" A wrapper for reading in many common types of impedance files
Parameters
----------
filename: string
Filename to extract impedance data from
type: string
Type of instrument file
Returns
-------
frequencies : np.ndarray
Array of frequencies
impedance : np.ndarray of complex numbers
Array of complex impedances
"""
supported_types = ['gamry', 'autolab', 'parstat']
if type is not None:
assert type in supported_types,\
'{} is not a supported type ({})'.format(type, supported_types)
if type == 'gamry':
f, Z = readGamry(filename)
elif type == 'autolab':
f, Z = readAutolab(filename)
elif type == 'parstat':
f, Z = readParstat(filename)
elif type is None:
f, Z = readCSV(filename)
return f, Z
def readGamry(filename):
""" function for reading the .DTA file from Gamry
Parameters
----------
filename: string
Filename of .DTA file to extract impedance data from
Returns
-------
frequencies : np.ndarray
Array of frequencies
impedance : np.ndarray of complex numbers
Array of complex impedances
"""
with open(filename, 'r', encoding='ISO-8859-1') as input:
lines = input.readlines()
for i, line in enumerate(lines):
if 'ZCURVE' in line:
start_line = i
raw_data = lines[start_line + 3:]
f, Z = [], []
for line in raw_data:
each = line.split()
f.append(float(each[2]))
Z.append(complex(float(each[3]), float(each[4])))
return np.array(f), np.array(Z)
def readAutolab(filename):
""" function for reading the .csv file from Autolab
Parameters
----------
filename: string
Filename of .csv file to extract impedance data from
Returns
-------
frequencies : np.ndarray
Array of frequencies
impedance : np.ndarray of complex numbers
Array of complex impedances
"""
with open(filename, 'r') as input:
lines = input.readlines()
raw_data = lines[1:]
f, Z = [], []
for line in raw_data:
each = line.split(',')
f.append(each[0])
Z.append(complex(float(each[1]), float(each[2])))
return np.array(f), np.array(Z)
def readParstat(filename):
""" function for reading the .txt file from Parstat
Parameters
----------
filename: string
Filename of .txt file to extract impedance data from
Returns
-------
frequencies : np.ndarray
Array of frequencies
impedance : np.ndarray of complex numbers
Array of complex impedances
"""
with open(filename, 'r') as input:
lines = input.readlines()
raw_data = lines[1:]
f, Z = [], []
for line in raw_data:
each = line.split()
f.append(each[4])
Z.append(complex(float(each[6]), float(each[7])))
return np.array(f), np.array(Z)
def readCSV(filename):
data = np.genfromtxt(filename, delimiter=',')
f = data[:, 0]
Z = data[:, 1] + 1j*data[:, 2]
return f, Z
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,207 | vwhu/impedance.py | refs/heads/master | /impedance/tests/test_plotting.py | import os
import numpy as np
from impedance.plotting import plot_nyquist
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt # noqa E402
def test_plot_nyquist():
frequencies = [1000.0, 1.0, 0.01]
Z = np.array([1, 2, 3]) + 1j*np.array([2, 3, 4])
fig, ax = plt.subplots()
ax = plot_nyquist(ax, frequencies, Z)
xs, ys = ax.lines[0].get_xydata().T
assert (xs == Z.real).all() and (ys == -Z.imag).all()
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,208 | vwhu/impedance.py | refs/heads/master | /impedance/model_io.py | import json
from .circuits import CustomCircuit
import numpy as np
def model_export(model, filepath):
""" Exports a model to JSON
Parameters
---------
model: CustomCircuit
Circuit model object
filepath: Path String
Destination for exporting model object
"""
model_string = model.circuit
model_name = model.name
initial_guess = model.initial_guess
if model._is_fit():
parameters_ = list(model.parameters_)
model_conf_ = list(model.conf_)
data_dict = {"Name": model_name,
"Circuit String": model_string,
"Initial Guess": initial_guess,
"Fit": True,
"Parameters": parameters_,
"Confidence": model_conf_,
}
else:
data_dict = {"Name": model_name,
"Circuit String": model_string,
"Initial Guess": initial_guess,
"Fit": False}
with open(filepath, 'w') as f:
json.dump(data_dict, f)
def model_import(filepath):
""" Imports a model from JSON
Parameters
---------
as_initial_guess: bool
If True, imports the fitted parameters from json as an unfitted model
otherwise imports the data as a fitted model object
Returns
----------
circuit_model: CustomCircuit
Circuit model object
"""
json_data_file = open(filepath, 'r')
json_data = json.load(json_data_file)
model_name = json_data["Name"]
if model_name == 'None':
model_name = None
model_string = json_data["Circuit String"]
model_initial_guess = json_data["Initial Guess"]
circuit_model = CustomCircuit(initial_guess=model_initial_guess,
circuit=model_string,
name=model_name)
if json_data["Fit"]:
circuit_model.parameters_ = np.array(json_data["Parameters"])
circuit_model.conf_ = np.array(json_data["Confidence"])
return circuit_model
| {"/impedance/tests/test_model_io.py": ["/impedance/model_io.py", "/impedance/circuits.py"], "/impedance/tests/test_fitting.py": ["/impedance/fitting.py"], "/impedance/tests/test_validation.py": ["/impedance/validation.py"], "/impedance/validation.py": ["/impedance/fitting.py"], "/impedance/circuits.py": ["/impedance/fitting.py", "/impedance/plotting.py"], "/impedance/tests/test_circuits.py": ["/impedance/circuits.py"], "/impedance/tests/test_plotting.py": ["/impedance/plotting.py"], "/impedance/model_io.py": ["/impedance/circuits.py"]} |
69,213 | dohyeong123/secondproject | refs/heads/master | /blog/migrations/0004_auto_20190525_0627.py | # Generated by Django 2.2.1 on 2019-05-24 21:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_delete_blog'),
]
operations = [
migrations.AlterField(
model_name='twice',
name='nationality',
field=models.CharField(choices=[('KR', '대한민국'), ('JP', '일본'), ('TW', '대만')], max_length=20),
),
]
| {"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"]} |
69,214 | dohyeong123/secondproject | refs/heads/master | /blog/models.py | from django.db import models
from django.utils.timezone import now
# Create your models here.
class Twice(models.Model):
NATION_CHOICES=(
('KR','한국'),
('JP','일본'),
('TW','대만'),
)
name = models.CharField(max_length = 200)
age = models.IntegerField(default = 0)
birth= models.DateTimeField(default=now)
nationality = models.CharField(choices = NATION_CHOICES, max_length = 20)
position = models.TextField()
def __str__(self):
return self.name | {"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"]} |
69,215 | dohyeong123/secondproject | refs/heads/master | /blog/views.py | from django.shortcuts import render
from .models import Twice
# Create your views here.
def twice(request):
members = Twice.objects.filter(nationality='JP')
return render(request, 'home.html',{'japanese' : members})
| {"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"]} |
69,216 | dohyeong123/secondproject | refs/heads/master | /blog/admin.py | from django.contrib import admin
from .models import Twice
# Register your models here.
admin.site.register(Twice) | {"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"]} |
69,217 | dohyeong123/secondproject | refs/heads/master | /blog/migrations/0002_twice.py | # Generated by Django 2.2.1 on 2019-05-24 19:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Twice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('age', models.IntegerField(default=0)),
('birth', models.DateTimeField()),
('nationality', models.CharField(choices=[('KR', '한국'), ('JP', '일본'), ('TW', '대만')], max_length=20)),
('position', models.TextField()),
],
),
]
| {"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"]} |
69,219 | BastianOpazo/dwy_proyecto | refs/heads/master | /myProyecto/miProyectodwy/models.py | from django.db import models
# Create your models here.
# creacion tablas para slider y misionyvision
class Slider1(models.Model):
ident = models.CharField(max_length=15,primary_key=True)
imagen = models.ImageField(upload_to='autos',null=True)
def __str__(self):
return self.ident
# Crear tabla de insumos.
class Insumos(models.Model):
nombre = models.CharField(max_length=120,primary_key=True)
precio = models.IntegerField()
descripcion = models.TextField()
stock = models.IntegerField()
def __str__(self):
return self.nombre
# Crear tabla de misionyvision.
class MisionyVision(models.Model):
ident = models.CharField(max_length=15,primary_key=True)
mision = models.TextField()
vision = models.TextField()
valores = models.TextField(null=False,default=1)
def __str__(self):
return self.ident
# Crear tabla de galeria.
class Galeria(models.Model):
ident = models.CharField(max_length=15,primary_key=True)
imagengaleria = models.ImageField(upload_to='galeria',null=True)
def __str__(self):
return self.ident
| {"/myProyecto/miProyectodwy/admin.py": ["/myProyecto/miProyectodwy/models.py"], "/myProyecto/miProyectodwy/tests.py": ["/myProyecto/miProyectodwy/models.py"]} |
69,220 | BastianOpazo/dwy_proyecto | refs/heads/master | /myProyecto/miProyectodwy/admin.py | from django.contrib import admin
from .models import Slider1,Insumos,MisionyVision,Galeria
# Register your models here.
class InsumosAdmin(admin.ModelAdmin):
list_display = ['nombre','precio','descripcion','stock']
search_fields = ['nombre','descripcion']
list_per_page = 10
class Slider1Admin(admin.ModelAdmin):
list_display = ['ident','imagen']
search_fields = ['ident']
list_per_page = 3
class GaleriaAdmin(admin.ModelAdmin):
list_display = ['ident','imagengaleria']
search_fields = ['ident']
list_per_page = 10
admin.site.register(Slider1,Slider1Admin)
admin.site.register(Insumos, InsumosAdmin)
admin.site.register(MisionyVision)
admin.site.register(Galeria,GaleriaAdmin)
| {"/myProyecto/miProyectodwy/admin.py": ["/myProyecto/miProyectodwy/models.py"], "/myProyecto/miProyectodwy/tests.py": ["/myProyecto/miProyectodwy/models.py"]} |
69,221 | BastianOpazo/dwy_proyecto | refs/heads/master | /myProyecto/miProyectodwy/tests.py | from django.test import TestCase
import unittest
from .models import Insumos
# Create your tests here.
class TestBaseDatos(unittest.TestCase):
def test_guardar_insumo(self):
valor = 0
try:
insumo = Insumos(
nombre="perfume",
descripcion="200ml",
precio=4000,
stock=1
)
insumo.save()
valor = 1
except:
valor = 0
self.assertEqual(valor,1)
def test_Eliminar_insumo(self):
valor = 0
try:
insumo = Insumos(
nombre="alargador",
)
insumo.delete()
valor = 1
except:
valor = 0
self.assertEqual(valor,1)
if __name__ == "__main__":
unittest.main()
| {"/myProyecto/miProyectodwy/admin.py": ["/myProyecto/miProyectodwy/models.py"], "/myProyecto/miProyectodwy/tests.py": ["/myProyecto/miProyectodwy/models.py"]} |
69,299 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/__init__.py | # # -*- coding: utf-8 -*-
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseForbidden
from tastypie.authorization import DjangoAuthorization
from tastypie.exceptions import ApiFieldError
from tastypie.resources import ModelResource
class BaseResource(ModelResource):
def apply_filters(self, request, applicable_filters):
if 'custom' in applicable_filters:
custom = applicable_filters.pop('custom')
else:
custom = None
semi_filtered = super(BaseResource, self).apply_filters(request, applicable_filters)
return semi_filtered.filter(custom).distinct() if custom else semi_filtered
@classmethod
def full_bundle(cls, obj, request=None, full=True):
self = cls()
bundle = self.build_bundle(obj=obj, request=request)
bundle.full = full
bundle = self.full_dehydrate(bundle)
return bundle | {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,300 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/network_monitor/forms.py | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
class FilterForm(forms.SelfHandlingForm):
addr = forms.ChoiceField(
label=_('Addr'),
required=True,
choices=[('ShangHai', _('ShangHai')),
('BeiJing', _('BeiJing')),
('GuangZhou', _('GuangZhou'))])
start_time = forms.DateTimeField(
label=_('StartTime'),
required=False
)
end_time = forms.DateTimeField(
label=_('EndTime'),
required=False,
)
priority = forms.ChoiceField(
label=_('Priority'),
required=False,
choices=[('Any', _('Any')),
('Emergency', _('Emergency')),
('Alert', _('Alert')),
('Critical', _('Critical')),
('Error', _('Error')),
('Warning', _('Warning')),
('Notice', _('Notice')),
('Informational', _('Informational')),
('Debug', _('Debug'))])
attack_type = forms.ChoiceField(
label=_('Attack Type'),
required=False,
choices=[('Any', _('Any')),
('SHELL', _('SHELL')),
('DPATTACK', _('DPATTACK')),
('FILTER', _('FILTER')),
('DPURPF', _('DPURPF')),
('ARP', _('ARP')),
('WEB', _('WEB')),
('SOCKET', _('SOCKET')),
('CFGMAN', _('CFGMAN')),
('CFM', _('CFM'))]
)
srcip = forms.CharField(max_length=15,
label=_('SrcIP'),
help_text="192.168.7.137",
required=False)
destip = forms.CharField(max_length=15,
label=_('DestIP'),
help_text="192.168.202.1",
required=False)
def __init__(self, request, *args, **kwargs):
super(FilterForm, self).__init__(request, *args, **kwargs)
def clean(self):
return super(FilterForm, self).clean()
def handle(self, request, data):
meta = {'priority': data['priority'],
'attack_type': data['attack_type'],
'srcip': (data['srcip']),
'destip': (data['destip'])}
try:
# image = api.glance.image_create(request, **meta)
filter = []
# messages.success(request, _('--------------------'))
return filter
except Exception:
exceptions.handle(request, _('Unable to create new image.'))
class AddBlacklistForm(forms.SelfHandlingForm):
firewall_ip = forms.CharField(max_length=15,
label=_('FirewallIP'),
initial='192.168.202.1',
help_text="192.168.202.1",
required=True)
ip = forms.CharField(max_length=15,
label=_('IP'),
help_text="192.168.7.137",
required=True)
time = forms.ChoiceField(label=_("Old Time"),
choices=[('1', _("EveryTime")),
('2', _("30 min")),
('3', _("3 h")),
('4', _("1000 min")),],
initial= '1',
widget=forms.RadioSelect(attrs={'default': '1',}))
def __init__(self, request, *args, **kwargs):
super(AddBlacklistForm, self).__init__(request, *args, **kwargs)
def clean(self):
return super(AddBlacklistForm, self).clean()
def handle(self, request, data):
meta = {}
try:
blacklist = []
messages.success(request, _('--------------------'))
return blacklist
except Exception:
exceptions.handle(request, _('Unable to create new blacklist.')) | {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,301 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/models.py | """
Stub file to work around django bug: https://code.djangoproject.com/ticket/7198
"""
from django.db import models
from django.utils import timezone
# Create your models here.
class Service(models.Model):
name = models.CharField(max_length=32)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.create_time = timezone.now()
self.update_time = timezone.now()
return super(Service, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Server(models.Model):
SNMP_VERSION_CHOICES = (
('1', '1'),
('2c', '2c'),
('3', '3'),
)
name = models.CharField(max_length=64)
ip = models.CharField(max_length=64)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(editable=False)
snmp_version = models.CharField(max_length=2, choices=SNMP_VERSION_CHOICES, default='2c')
snmp_commit = models.CharField(max_length=32, default='public')
ssh_name = models.CharField(max_length=32, default='root', null=True, blank=True)
ssh_key = models.CharField(max_length=32, null=True, blank=True)
services = models.ManyToManyField(Service, blank=True)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.create_time = timezone.now()
self.update_time = timezone.now()
return super(Server, self).save(*args, **kwargs)
def __str__(self):
return self.name
class ServerMonitorMessage(models.Model):
server = models.ForeignKey(Server)
time = models.DateTimeField(auto_now_add=True, editable=False)
cpu_usage = models.DecimalField(max_digits=4, decimal_places=2)
mem_usage = models.DecimalField(max_digits=4, decimal_places=2)
disk_usage = models.DecimalField(max_digits=4, decimal_places=2)
process_num = models.IntegerField()
process_status = models.CharField(max_length=255)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.time = timezone.now()
self.time = timezone.now()
return super(ServerMonitorMessage, self).save(*args, **kwargs)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,302 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/regulation/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.monitor import dashboard
class Regulation(horizon.Panel):
name = _("Regulation")
slug = "regulation"
dashboard.Monitor.register(Regulation)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,303 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/resource.py | # # -*- coding: utf-8 -*-
import json
from django.contrib.contenttypes.models import ContentType
from django.utils.timezone import now
from tastypie.authentication import SessionAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.authorization import Authorization
from tastypie import fields
from tastypie.resources import Resource
from tastypie.exceptions import ApiFieldError, Unauthorized
from tastypie.serializers import Serializer
from openstack_dashboard.dashboards.newtouch import BaseResource
from .models import ServerMonitorMessage,Service,Server
from tastypie.exceptions import BadRequest
class VerboseSerializer(Serializer):
"""
Gives message when loading JSON fails.
"""
def from_json(self, content):
"""
Override method of `Serializer.from_json`. Adds exception message when loading JSON fails.
"""
try:
return json.loads(content)
except ValueError as e:
raise BadRequest(u"Incorrect JSON format: Reason: \"{}\"".format(e.message))
class ServiceResource(BaseResource):
class Meta:
queryset = Service.objects.all()
resource_name = 'service'
authorization = Authorization()
list_allowed_methods = ['post','get',]
detail_allowed_methods = ['get',]
always_return_data = True
class ServerResource(BaseResource):
services = fields.ManyToManyField(ServiceResource,'services')
class Meta:
queryset = Server.objects.all()
resource_name = 'server'
authorization = Authorization()
list_allowed_methods = ['post','get', ]
detail_allowed_methods = ['get',]
always_return_data = True
class ServerMonitorMessageResource(BaseResource):
server = fields.ForeignKey(ServerResource, 'server')
class Meta:
queryset = ServerMonitorMessage.objects.all()
resource_name = 'server_monitor'
# authentication = SessionAuthentication()
# authorization = DjangoAuthorization()
authorization = Authorization()
list_allowed_methods = ['post','get', 'patch']
detail_allowed_methods = ['post','get']
always_return_data = True
# serializer = VerboseSerializer(formats=['json'])
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,304 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/rules/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.newtouch import dashboard
class Rules(horizon.Panel):
name = _("Rules")
slug = "rules"
dashboard.Newtouch.register(Rules)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,305 | junhuizx/horizon-newtouch | refs/heads/master | /snmp_daemon/snmp_daemon.py | import netsnmp
import requests
import threading
import time
import json
from keystoneclient.auth.identity import v2
from keystoneclient import session
from novaclient import client
BASE_API_URL = 'http://localhost'
AUTH_URL = 'http://localhost:5000/v2.0/'
USERNAME = 'admin'
PASSWORD = 'admin'
PROJECT_ID = 'admin'
## SNMP OID
# System Group
SYS = ".1.3.6.1.2.1.1"
SYS_DESCR = ".1.3.6.1.2.1.1.1"
SYS_OBJECT_ID = ".1.3.6.1.2.1.1.2"
SYS_UPTIME = ".1.3.6.1.2.1.1.3"
SYS_CONTACT = ".1.3.6.1.2.1.1.4"
SYS_NAME = ".1.3.6.1.2.1.1.5"
SYS_LOCATION = ".1.3.6.1.2.1.1.6"
HR_SW_RUNNAME = ".1.3.6.1.2.1.25.4.2.1.2"
HR_SW_RUNPARM = ".1.3.6.1.2.1.25.4.2.1.5"
# Interfaces Group
# CPU Group
CPU = ".1.3.6.1.4.1.2021.11"
USER_CPU = ".1.3.6.1.4.1.2021.11.9"
SYSTEM_CPU = ".1.3.6.1.4.1.2021.11.10"
IDLE_CPU = ".1.3.6.1.4.1.2021.11.11"
RAW_USER_CPU = ".1.3.6.1.4.1.2021.11.50"
RAW_NICE_CPU = ".1.3.6.1.4.1.2021.11.51"
RAW_SYSTEM_CPU = ".1.3.6.1.4.1.2021.11.52"
RAW_IDLE_CPU = ".1.3.6.1.4.1.2021.11.53"
# Memery Group
RAM = ".1.3.6.1.4.1.2021.4"
TOTAL_RAM = ".1.3.6.1.4.1.2021.4.5"
TOTAL_RAM_USED = ".1.3.6.1.4.1.2021.4.6"
TOTAL_RAM_FREE = ".1.3.6.1.4.1.2021.4.7"
# Disk Group
DISK = ".1.3.6.1.4.1.2021.9.1"
TOTAL_DISK = ".1.3.6.1.4.1.2021.9.1.6"
DISK_AVAIL = ".1.3.6.1.4.1.2021.9.1.7"
DISK_USED = ".1.3.6.1.4.1.2021.9.1.8"
def time2str(time):
msecond = time % 100
seconds = time / 100
day = seconds / (3600 * 24)
hour = (seconds - (3600 * 24) * day )/ 3600
min = (seconds - (3600 * 24) * day - 3600 * hour) / 60
second = seconds - (3600 * 24) * day - 3600 * hour - 60 * min
return ("%d day %02d:%02d:%02d.%02d") %(day ,hour, min, second, msecond)
class Server(object):
id = None
name = None
ip = None
snmp_commit = None
snmp_version = None
services = None
cpu_message = None
def __init__(self, **kwargs):
self.id = kwargs['id']
self.name = kwargs['name']
self.ip = kwargs['ip']
self.snmp_commit = kwargs['snmp_commit']
self.snmp_version = kwargs['snmp_version']
self.services = kwargs['services']
self.session = netsnmp.Session(DestHost=self.ip, Version=2, Community=self.snmp_commit)
def get_server_services(self):
headers = {'content-type': 'application/json'}
monitor_services = []
for service in self.services:
url = BASE_API_URL + str(service)
re = requests.get(url, headers=headers)
#print re.text
object = json.loads(re.text)
monitor_services.append(str(object['name']))
return monitor_services
def get_snmp_sys_message(self):
vars = netsnmp.VarList(netsnmp.Varbind(SYS))
self.sys_message = self.session.walk(vars)
self.sys_uptime = int(self.sys_message[2])
self.sys_contact = self.sys_message[3]
self.sys_name = self.sys_message[4]
self.sys_location = self.sys_message[5]
def get_snmp_cpu_message(self):
vars = netsnmp.VarList(netsnmp.Varbind(CPU))
self.cpu_message = self.session.walk(vars)
self.cpu_usage = 100 - int(self.cpu_message[10])
def get_snmp_mem_message(self):
vars = netsnmp.VarList(netsnmp.Varbind(RAM))
self.mem_message = self.session.walk(vars)
self.total_ram = int(self.mem_message[4])
self.total_ram_used = int(self.mem_message[5])
self.total_ram_free = int(self.mem_message[6])
self.ram_usage = "%.2f" % (100.00 - self.total_ram_used * 100.00 / self.total_ram)
def get_snmp_disk_message(self):
vars = netsnmp.VarList(netsnmp.Varbind(DISK))
self.disk_message = self.session.walk(vars)
self.total_disk = int(self.disk_message[5])
self.total_avail_disk = int(self.disk_message[6])
self.total_used_disk = int(self.disk_message[7])
self.disk_usage = "%.2f" % (self.total_used_disk * 100.00 / self.total_disk)
def get_snmp_process_messages(self):
monitor_services = self.get_server_services()
vars = netsnmp.VarList(netsnmp.Varbind(HR_SW_RUNNAME))
self.process_name_message = self.session.walk(vars)
vars = netsnmp.VarList(netsnmp.Varbind(HR_SW_RUNPARM))
self.process_parm_message = self.session.walk(vars)
self.process_num = len(self.process_name_message)
self.process_message = []
nvs = zip(self.process_name_message, self.process_parm_message)
# self.process_message = [name + " " + parm for name,parm in nvs if None != parm]
for name,parm in nvs:
if None != parm:
process = name + " " + parm
else:
process = name
self.process_message.append(process)
service_stats = []
service_status_tmp = 0
for service in monitor_services:
for process in self.process_message:
if service in process:
service_status_tmp = 1
break
service_stats.append(service_status_tmp)
service_status_tmp = 0
nvs2 = zip(monitor_services, service_stats)
self.service_status = dict((service, status) for service, status in nvs2)
def get_snmp_message(self):
self.get_snmp_sys_message()
self.get_snmp_cpu_message()
self.get_snmp_mem_message()
self.get_snmp_disk_message()
self.get_snmp_process_messages()
def post_snmp_messgae(self):
server = "/dashboard/api/v1/server/%s/" % (self.id)
url = BASE_API_URL + "/dashboard/api/v1/server_monitor/"
headers = {'content-type': 'application/json','X-HTTP-Method-Override': 'POST'}
data = {"server": server,
"cpu_usage": self.cpu_usage,
"mem_usage": self.ram_usage,
"disk_usage": self.disk_usage,
"process_num": self.process_num,
"process_status": self.service_status}
re = requests.post(url, headers=headers, data=json.dumps(data))
return re.text
def current_hypervisors_list():
url = BASE_API_URL + "/dashboard/api/v1/server/"
headers = {'content-type': 'application/json'}
re = requests.get(url, headers=headers)
object = json.loads(re.text)
return object
def add_new_hypervisor(hypervisor):
url = BASE_API_URL + "/dashboard/api/v1/server/"
headers = {'content-type': 'application/json','X-HTTP-Method-Override': 'POST'}
data = {"name": hypervisor.hypervisor_hostname,
"ip": hypervisor.host_ip,
"snmp_version": "2c",
"snmp_commit": "newtouch",
"services":""}
re = requests.post(url, headers=headers, data=json.dumps(data))
return re
def get_hypervisors_list():
server_list = []
auth = v2.Password(auth_url=AUTH_URL,
username=USERNAME,
password=PASSWORD,
tenant_name=PROJECT_ID)
sess = session.Session(auth=auth)
nova = client.Client('1.1', session=sess)
try:
hypervisors = nova.hypervisors.list()
except Exception:
pass
current_hypervisors = current_hypervisors_list()
current_hypervisor_ip_list = []
for current_hypervisor in current_hypervisors['objects']:
current_hypervisor_ip_list.append(current_hypervisor['ip'])
for hypervisor in hypervisors:
if hypervisor.host_ip not in current_hypervisor_ip_list:
add_new_hypervisor(hypervisor)
current_hypervisors = current_hypervisors_list()
for current_hypervisor in current_hypervisors['objects']:
server = Server(id = current_hypervisor['id'],
name=current_hypervisor['name'],
ip=current_hypervisor['ip'],
snmp_commit=current_hypervisor['snmp_commit'],
snmp_version=current_hypervisor['snmp_version'],
services = current_hypervisor['services'])
server_list.append(server)
return server_list
if __name__ == "__main__":
server_list = get_hypervisors_list()
count = 2
while True:
if 0 == count:
print '==================================='
server_list = get_hypervisors_list()
count = 2
for server in server_list:
try:
print "Get SNMP message from %s(%s)" % (server.name, server.ip)
server.get_snmp_message()
server.post_snmp_messgae()
except Exception:
pass
count -= 1
time.sleep(150)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,306 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/network_monitor/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.monitor import dashboard
class Network_Monitor(horizon.Panel):
name = _("Network_Monitor")
slug = "network_monitor"
dashboard.Monitor.register(Network_Monitor)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,307 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/equipment_monitor/tags.py | from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from horizon import messages
from openstack_dashboard.dashboards.monitor import monitor
from openstack_dashboard.dashboards.monitor.equipment_monitor import tables as project_tables
from horizon.utils import functions as utils
class NodeListTab(tabs.TableTab):
name = _("NodeList")
slug = "node_list"
table_classes = (project_tables.NodeListTable,)
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_node_list_data(self):
nodelist = []
try:
# nodelist = api.nova.hypervisor_list(self.request)
# nodelist.sort(key=utils.natural_sort('hypervisor_hostname'))
nodelist = monitor.node_list(self.request)
# import pprint
# pprint.pprint(type(nodelist[0]))
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor information.'))
return nodelist
class EquipmentListTab(tabs.TableTab):
name = _("EquipmentList")
slug = "equipment_list"
table_classes = (project_tables.EquipmentListTable,)
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_equipment_list_data(self):
try:
equipments = monitor.equipment_monitor_equipment_list(request = self.request,
marker = None,
paginate = False,
addr = self.slug)
return equipments
except Exception:
self._has_more = False
error_message = _('Unable to get instances')
exceptions.handle(self.request, error_message)
return []
class EquipmentMonitorTabs(tabs.TabGroup):
slug = "equipment_monitor_tabs"
tabs = (NodeListTab, EquipmentListTab, )
sticky = True
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,308 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/network_monitor/views.py | from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
from horizon import forms
from horizon import tables
from horizon import messages
from openstack_dashboard.dashboards.monitor import monitor
from openstack_dashboard.dashboards.monitor.network_monitor import tags as project_tags
from openstack_dashboard.dashboards.monitor.network_monitor import forms as project_forms
from openstack_dashboard.dashboards.monitor.network_monitor import tables as project_tables
from openstack_dashboard.dashboards.project.images.images import forms as image_forms
class IndexView(tabs.TabbedTableView):
# A very simple class-based view...
tab_group_class = project_tags.NetworkMonitorTabs
template_name = 'monitor/network_monitor/index.html'
# def get_data(self, request, context, *args, **kwargs):
# # Add data to the context here...
# return context
class EquipmentDetailView(tables.DataTableView):
table_class = project_tables.InterfaceListTable
template_name = 'monitor/network_monitor/equipment_detail.html'
def get_data(self):
interfaces = monitor.get_interface(self.request, self.kwargs["equipment_id"])
return interfaces
class InterfaceDetailView(tables.DataTableView):
table_class = project_tables.SyslogListTable
template_name = 'monitor/network_monitor/interface_detail.html'
def has_more_data(self, table):
return self._more
def get_data(self):
interface = self.kwargs['interface'].split("-")
filters = self.get_filters()
# print filters
marker = self.request.GET.get('marker')
syslogs, self._more, count = monitor.syslog_list(self.request,
marker=marker,
paginate=True,
interface = self.kwargs['interface'],
filters = filters)
messages.info(self.request, "%s has %s Logs" % (interface[0] + "/" + interface[1], count))
return syslogs
def get_filters(self):
filters = {}
filter_field = self.table.get_filter_field()
filter_string = self.table.get_filter_string()
filter_action = self.table._meta._filter_action
if filter_field and filter_string and (filter_action.is_api_filter(filter_field)):
filters[filter_field] = filter_string
return filters
class MessageDetailView(tables.DataTableView):
table_class = project_tables.MessageDetailTable
template_name = 'monitor/network_monitor/message_detail.html'
def get_data(self):
message_id = self.kwargs['message_id']
message = monitor.logs_detail(self.request, message_id)
return message
class NetworkMonitorFilterView(forms.ModalFormView):
form_class = project_forms.FilterForm
template_name = 'monitor/network_monitor/filter.html'
context_object_name = 'image'
success_url = reverse_lazy("horizon:monitor:network_monitor:index")
class FilterOptClass(object):
attack_type = None
addr = None
priority = None
destip = None
srcip = None
StartTime = None
EndTime = None
tag_list = None
def __init__(self, dict):
self.addr = dict["addr"]
if dict["attack_type"] != 'Any':
self.attack_type = dict["attack_type"]
if dict["priority"] != 'Any':
self.priority = dict["priority"]
if dict["StartTime"]:
self.StartTime = dict["StartTime"]
if dict["EndTime"]:
self.EndTime = dict["EndTime"]
if dict["destip"]:
self.destip = dict["destip"]
if dict["srcip"]:
self.srcip = dict["srcip"]
self.tag_list = self.get_tag_list()
def get_tag_list(self):
return ['Newtouch-H3C', ]
def get_filter_opt(post_dict):
filteropt = FilterOptClass(post_dict)
return filteropt
class NetworkMonitorFilterActionView(tables.DataTableView):
table_class = project_tables.FilterSyslogListTable
template_name = 'monitor/network_monitor/interface_detail.html'
def has_more_data(self, table):
return self._more
def get_data(self):
# print self.request.method
# import pprint
# pprint.pprint(self.request.POST)
filter_opt = get_filter_opt(self.request.POST)
# print(filter_opt.priority,filter_opt.attack_type,filter_opt.StartTime,filter_opt.tag_list)
# interface = ["GigabitEthernet0", "1", "192.168.202.1", "Connected"]
marker = self.request.GET.get('marker')
syslogs, self._more, count = monitor.filter_syslog_list(self.request,
marker=marker,
paginate=False,
opt = filter_opt)
# syslogs = []
# self._more = False
# count = "0"
messages.info(self.request, "Find %s Logs" % (count))
return syslogs
# class BlackListView(tables.DataTableView):
# table_class = project_tables.BlackListTable
# template_name = 'monitor/network_monitor/blacklist.html'
#
# def has_more_data(self, table):
# return self._more
#
# def get_data(self):
# self._more = False
# equipment_id = self.kwargs['equipment_id']
# # print equipment_id
# return []
class BlackListView(forms.ModalFormView):
form_class = project_forms.AddBlacklistForm
template_name = 'monitor/network_monitor/add_blacklist.html'
context_object_name = 'blacklist'
success_url = reverse_lazy("horizon:monitor:network_monitor:index") | {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,309 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/network_monitor/tags.py | from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from horizon import messages
from openstack_dashboard.dashboards.monitor import monitor
from openstack_dashboard.dashboards.monitor.network_monitor import tables
class AddrBaseTab(tabs.TableTab):
name = _("")
slug = ""
table_classes = (tables.EquipmentListTable,)
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_equipment_list_data(self):
try:
if self.request.method == "POST":
# print self.request.POST
result = monitor.add_blacklist(request = self.request)
if 0 == result:
messages.success(self.request, _("Add Blacklist ok"))
elif 1 == result:
messages.info(self.request, _("Alread in list"))
else:
messages.info(self.request, _("Error"))
equipments = monitor.network_monitor_equipment_list(request = self.request,
marker = None,
paginate = False,
addr = self.slug)
return equipments
except Exception:
self._has_more = False
error_message = _('Unable to get instances')
exceptions.handle(self.request, error_message)
return []
class ShangHaiTab(AddrBaseTab):
name = _("ShangHai")
slug = "shanghai"
class BeijingTab(AddrBaseTab):
name = _("BeiJing")
slug = "beijing"
class GuangZhouTab(AddrBaseTab):
name = _("GuangZhou")
slug = "guangzhou"
class NetworkMonitorTabs(tabs.TabGroup):
slug = "network_monitor_tabs"
tabs = (ShangHaiTab, BeijingTab, GuangZhouTab, )
sticky = True
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,310 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/equipment_monitor/tables.py | from django.utils.translation import ugettext_lazy as _
from horizon import tables
class EquipmentListFilterAction(tables.FilterAction):
name = "filter"
class NodeListTable(tables.DataTable):
id = tables.Column("id", verbose_name=_("ID"), hidden=True)
hostname = tables.Column("hostname",
verbose_name=_("Hostname"),
link="#")
ip = tables.Column('ip', verbose_name=_("Ip"))
# temperature = tables.Column("temperature", verbose_name=_("Temperature"))
# cpu_usage = tables.Column('cpu_usage', verbose_name=_("CpuUsage"))
# mem_usage = tables.Column('mem_usage', verbose_name=_("MemUsage"))
status = tables.Column('status', verbose_name=_("Status"))
def get_object_id(self, datum):
return "%s" % (datum.id)
class Meta:
name = "node_list"
verbose_name = _("NodeList")
table_actions = (EquipmentListFilterAction, )
multi_select = False
class EquipmentListTable(tables.DataTable):
id = tables.Column("id", verbose_name=_("ID"), hidden=True)
name = tables.Column("name",
verbose_name=_("Name"),
link="#")
ip = tables.Column('ip', verbose_name=_("Ip"))
temperature = tables.Column("temperature", verbose_name=_("Temperature"))
cpu_usage = tables.Column('cpu_usage', verbose_name=_("CpuUsage"))
mem_usage = tables.Column('mem_usage', verbose_name=_("MemUsage"))
max_connect_num = tables.Column('max_connect_num', verbose_name=_("MaxConnectNum"))
cur_connect_num = tables.Column('cur_connect_num', verbose_name=_("CurConnectNum"))
interface_num = tables.Column('interface_num',
verbose_name=_("InterFaceNum"),
link="horizon:monitor:equipment_monitor:interface")
def get_object_id(self, datum):
return "%s-%s" % (datum.id, datum.interface_num)
class Meta:
name = "equipment_list"
verbose_name = _("Equipment List")
table_actions = (EquipmentListFilterAction, )
multi_select = False
class InterfaceListTable(tables.DataTable):
id = tables.Column("id", hidden=True)
name = tables.Column("name", verbose_name=_("Name"))
ip = tables.Column("ip", verbose_name=_("IP"))
inoctets = tables.Column("inoctets", verbose_name=_("InOctets"))
outoctets = tables.Column("outoctets", verbose_name=_("OutOctets"))
indiscards = tables.Column("indiscards", verbose_name=_("InDiscards"))
outdiscards = tables.Column("outdiscards", verbose_name=_("OutDiscards"))
inerrors = tables.Column("inerrors", verbose_name=_("InErrors"))
outerrors = tables.Column("outerrors", verbose_name=_("OutErrors"))
status = tables.Column("status", verbose_name=_("Status"))
class Meta:
name = 'interface list'
verbose_name = _("Interface List")
multi_select = False
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,311 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/enabled/_50_newtouch.py | # The name of the dashboard to be added to HORIZON['dashboards']. Required.
DASHBOARD = 'newtouch'
# If set to True, this dashboard will not be added to the settings.
DISABLED = False
# A dictionary of exception classes to be added to HORIZON['exceptions'].
ADD_EXCEPTIONS = {}
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = [
'openstack_dashboard.dashboards.newtouch',
]
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,312 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/overview/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.newtouch import dashboard
class Overview(horizon.Panel):
name = _("Overview")
slug = "overview"
dashboard.Newtouch.register(Overview)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,313 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/server/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.newtouch import dashboard
class Server(horizon.Panel):
name = _("Server")
slug = "server"
dashboard.Newtouch.register(Server)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,314 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/equipment_monitor/views.py | from horizon import tabs,tables
from openstack_dashboard.dashboards.monitor.equipment_monitor import tags as project_tags
from openstack_dashboard.dashboards.monitor.equipment_monitor import tables as project_tables
# from openstack_dashboard import api
from openstack_dashboard.dashboards.monitor import monitor
class IndexView(tabs.TabbedTableView):
# A very simple class-based view...
tab_group_class = project_tags.EquipmentMonitorTabs
template_name = 'monitor/equipment_monitor/index.html'
# def get_data(self, request, context, *args, **kwargs):
# # Add data to the context here...
# return context
class InterfaceDetailView(tables.DataTableView):
table_class = project_tables.InterfaceListTable
template_name = 'monitor/equipment_monitor/interface_list.html'
def get_data(self):
# interfaces = api.monitor.get_interface(self.request, self.kwargs["interface"])
interfaces = monitor.equipment_monitor_interface_list(self.request, self.kwargs["interface"])
return interfaces
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,315 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/event/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.newtouch import dashboard
class Event(horizon.Panel):
name = _("Event")
slug = "event"
dashboard.Newtouch.register(Event)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,316 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/network_monitor/urls.py | from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.monitor.network_monitor import views
urlpatterns = patterns('openstack_dashboard.dashboards.monitor.network_monitor.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^equipment/(?P<equipment_id>[^/]+)/$', views.EquipmentDetailView.as_view(), name='equipment'),
url(r'^interface/(?P<interface>[^/]+)/$', views.InterfaceDetailView.as_view(), name='interface'),
url(r'^detail/(?P<message_id>[^/]+)/$', views.MessageDetailView.as_view(), name='detail'),
url(r'^filter/$', views.NetworkMonitorFilterView.as_view(), name='filter'),
url(r'^filteraction/$', views.NetworkMonitorFilterActionView.as_view(), name='filteraction'),
url(r'^blacklist/(?P<equipment_id>[^/]+)/$', views.BlackListView.as_view(), name='blacklist'),
# url(r'^addblacklist/$', views.BlackListActionView.as_view(), name='addblacklist'),
# url(r'^addblacklist/(?P<equipment_id>[^/]+)/$', views.BlackListView.as_view(), name='addblacklist'),
)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,317 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/equipment_monitor/panel.py | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.monitor import dashboard
class Equipment_Monitor(horizon.Panel):
name = _("Equipment_Monitor")
slug = "equipment_monitor"
dashboard.Monitor.register(Equipment_Monitor)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,318 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/equipment_monitor/urls.py | from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.monitor.equipment_monitor import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^interface/(?P<interface>[^/]+)/$', views.InterfaceDetailView.as_view(), name='interface'),
)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,319 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/dashboard.py | from django.utils.translation import ugettext_lazy as _
import horizon
class ServerPanels(horizon.PanelGroup):
slug = "server"
name = _("Server")
panels = ('overview',
'server',)
class AlertPanels(horizon.PanelGroup):
slug = "alert"
name = _("Alert")
panels = ('rules',
'event',)
class Newtouch(horizon.Dashboard):
name = _("Newtouch")
slug = "newtouch"
panels = (ServerPanels, AlertPanels) # Add your panels here.
default_panel = 'server' # Specify the slug of the dashboard's default panel.
horizon.register(Newtouch)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,320 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/server/forms.py | from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.dashboards.newtouch.models import Server,Service
def get_available_services():
services = Service.objects.all()
return ((service.name, service.name) for service in services)
class ServerEditServicesForm(forms.SelfHandlingForm):
services_available = forms.MultipleChoiceField(label=_('services_available'),
widget=forms.CheckboxSelectMultiple,
choices=get_available_services())
def __init__(self, request, *args, **kwargs):
super(ServerEditServicesForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
server = Server.objects.get(pk=self.initial['pk'])
server.services.clear()
print server.services.all()
for service in data['services_available']:
server.services.add(Service.objects.get(name=service).id)
server.save()
message = _('Successfully Add Services %s') % (self.initial['pk'])
messages.success(request, message)
except Exception:
exceptions.handle(request, _('Unable to Add Services.'))
return True
class EditServerForm(forms.SelfHandlingForm):
snmp_version = forms.CharField(label=_("SNMP Version"),
max_length=255)
snmp_commit = forms.CharField(label=_("SNMP Commit"),
max_length=255)
ssh_name = forms.CharField(label=_("SSH Name"),
max_length=255,
required=False)
ssh_key = forms.CharField(label=_("SSH Key"),
max_length=255,
required=False)
def __init__(self, request, *args, **kwargs):
super(EditServerForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
pk = self.initial['pk']
snmp_version = data['snmp_version']
snmp_commit = data['snmp_commit']
ssh_name = data['ssh_name']
ssh_key = data['ssh_key']
try:
Server.objects.filter(pk=pk).update(snmp_version=snmp_version,
snmp_commit=snmp_commit,
ssh_name=ssh_name,
ssh_key=ssh_key)
server_name = Server.objects.get(pk = pk).name
message = _('Successfully update Server %s') % (server_name)
messages.success(request, message)
except Exception:
exceptions.handle(request, _('Unable to update the Server.'))
return True
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,321 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/server/urls.py | from django.conf.urls import patterns
from django.conf.urls import url
from tastypie.api import Api
from openstack_dashboard.dashboards.newtouch import resource
from .views import ServerListView, ServerDetailView, ServerEditView, ServerEditServicesView
api = Api('v1')
api.register(resource.ServerMonitorMessageResource())
api.register(resource.ServerResource())
api.register(resource.ServiceResource())
urlpatterns = patterns(
'',
url(r'^$', ServerListView.as_view(), name="index"),
url(r'^(?P<pk>\d+)/$', ServerDetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/edit/$', ServerEditView.as_view(), name='edit'),
url(r'^(?P<pk>\d+)/add/$', ServerEditServicesView.as_view(), name='services'),
# url(r'^(?P<pk>\d+)/services/(?P<service_id>\d+)/restart/$', server_service_restart, name='restart'),
url(r'^(?P<pk>\d+)/services/stop/$', ServerEditServicesView.as_view(), name='stop'),
url(r'^(?P<pk>\d+)/services/start/$', ServerEditServicesView.as_view(), name='start'),
)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,322 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/server/tables.py | import netsnmp
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import redirect
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import messages
from openstack_dashboard.dashboards.newtouch.models import ServerMonitorMessage,Server,Service
SNMP_RESTART_HTTPD = '.1.3.6.1.4.1.2021.18'
SNMP_RESTART_NOVA_API = '.1.3.6.1.4.1.2021.19'
SNMP_RESTART_NOVA_COMPUTE = '.1.3.6.1.4.1.2021.20'
SNMP_RESTART_MYSQL = '.1.3.6.1.4.1.2021.21'
SNMP_RESTART_GATOREDIS = '.1.3.6.1.4.1.2021.22'
def safe_unordered_list(value):
return filters.unordered_list(value, autoescape=True)
def get_service_list(server):
services = getattr(server, "services", None)
return [service.name for service in services.all()]
def server_service_restart(request, **kwargs):
server = Server.objects.get(pk = kwargs['server_id'])
service = kwargs['service']
session = netsnmp.Session(DestHost=server.ip, Version=2, Community=server.snmp_commit)
if(service.name == 'nova-api'):
vars = netsnmp.VarList(netsnmp.Varbind(SNMP_RESTART_NOVA_API))
elif(service.name == 'nova-compute'):
vars = netsnmp.VarList(netsnmp.Varbind(SNMP_RESTART_NOVA_COMPUTE))
elif(service.name == 'mysql'):
vars = netsnmp.VarList(netsnmp.Varbind(SNMP_RESTART_MYSQL))
elif(service.name == 'apache'):
vars = netsnmp.VarList(netsnmp.Varbind(SNMP_RESTART_HTTPD))
elif(service.name == 'go_to_redis'):
vars = netsnmp.VarList(netsnmp.Varbind(SNMP_RESTART_GATOREDIS))
else:
vars = netsnmp.VarList(netsnmp.Varbind(SNMP_RESTART_HTTPD))
restart_message = session.walk(vars)
messages.success(request,
_('%s') % (restart_message[9]))
def server_service_stop(**kwargs):
server = Server.objects.get(pk = kwargs['server_id'])
service = kwargs['service']
def server_service_start(**kwargs):
server = Server.objects.get(pk = kwargs['server_id'])
service = kwargs['service']
class FilterAction(tables.FilterAction):
name = "filter"
class EditActionLink(tables.LinkAction):
name = "edit_server"
verbose_name = _("Edit Server")
url = "horizon:newtouch:server:edit"
classes = ("ajax-modal",)
class ServicesActionLink(tables.LinkAction):
name = "edit_service"
verbose_name = _("Edit Service")
url = "horizon:newtouch:server:services"
classes = ("ajax-modal",)
class RestartActionLink(tables.Action):
name = "restart_service"
verbose_name = _("Restart Service")
classes = ("btn-danger",)
def single(self, table, request, id):
#tag, sep, server_id = request.path.strip("/").partition('/')
server_id = request.path.strip('/').split('/')[-1]
server_id = int(server_id)
server = Server.objects.get(pk = server_id)
service = Service.objects.get(pk=id)
try:
server_service_restart(request ,server_id=server_id, service=service)
messages.success(request,
_('Successfully Restart %s Service On %s(%s)') % (service.name, server.name, server.ip))
except Exception:
exceptions.handle(request,
_('Unable to Restart %s Service On %s(%s)') % (service.name, server.name, server.ip))
return redirect('horizon:newtouch:server:detail', server_id)
class StopActionLink(tables.Action):
name = "stop_service"
verbose_name = _("Stop Service")
classes = ("btn-danger",)
def single(self, table, request, id):
tag, sep, server_id = request.path.strip("/").partition('/')
server_id = int(server_id)
server = Server.objects.get(pk = server_id)
service = Service.objects.get(pk=id)
try:
server_service_stop(server_id=server_id, service=service)
messages.success(request,
_('Successfully Stop %s Service On %s(%s)') % (service.name, server.name, server.ip))
except Exception:
exceptions.handle(request,
_('Unable to Stop %s Service On %s(%s)') % (service.name, server.name, server.ip))
return redirect('horizon:newtouch:server:detail', server_id)
class StartActionLink(tables.Action):
name = "Start_service"
verbose_name = _("Start Service")
classes = ("btn-danger",)
def single(self, table, request, id):
tag, sep, server_id = request.path.strip("/").partition('/')
server_id = int(server_id)
server = Server.objects.get(pk = server_id)
service = Service.objects.get(pk=id)
try:
server_service_start(server_id=server_id, service=service)
messages.success(request,
_('Successfully Start %s Service On %s(%s)') % (service.name, server.name, server.ip))
except Exception:
exceptions.handle(request,
_('Unable to Start %s Service On %s(%s)') % (service.name, server.name, server.ip))
return redirect('horizon:newtouch:server:detail', server_id)
class ServersTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'),
link="horizon:newtouch:server:detail",
form_field=forms.CharField(max_length=64))
ip = tables.Column('ip',
verbose_name=_('Description'),
form_field=forms.CharField(
widget=forms.Textarea(),
required=False))
create_time = tables.Column('create_time', verbose_name=_('Create Time'))
update_time = tables.Column('update_time', verbose_name=_('Update Time'))
snmp_version = tables.Column('snmp_version', verbose_name=_('Snmp Version'))
snmp_commit = tables.Column('snmp_commit', verbose_name=_('Snmp Commit'))
# ssh_name = tables.Column('ssh_name', verbose_name=_('SSH Name'))
# ssh_key = tables.Column('ssh_key', verbose_name=_('SSH Key'))
services = tables.Column(get_service_list,
wrap_list=True,
filters=(safe_unordered_list,),
verbose_name=_('Services'))
def render(self):
templates = super(ServersTable, self).render()
return templates
class Meta:
name = "server"
verbose_name = _("Server")
table_actions = (FilterAction,)
row_actions = (EditActionLink, ServicesActionLink)
multi_select = False
class ServerMessagesTable(tables.DataTable):
time = tables.Column('time', verbose_name=_("Time"))
cpu_usage = tables.Column('cpu_usage', verbose_name=_("CPU Usgae"))
mem_usage = tables.Column('mem_usage', verbose_name=_("Mem Usage"))
disk_usage = tables.Column('disk_usage', verbose_name=_("Disk Usage"))
process_num = tables.Column('process_num', verbose_name=_("Process Num"))
class Meta:
name = "server_messages"
verbose_name = _("Server Messages")
# row_class = UpdateRow
# row_actions = (RestartActionLink, StopActionLink, StartActionLink)
# table_actions = (AddActionLink,)
multi_select = False
class ServerServicesTable(tables.DataTable):
server = tables.Column('server', hidden=True, verbose_name=_("Server"))
name = tables.Column('name', verbose_name=_("Name"))
status = tables.Column('status', verbose_name=_("Status"))
class Meta:
name = "server_services"
verbose_name = _("Server Services")
#row_actions = (RestartActionLink, StopActionLink, StartActionLink, )
row_actions = (RestartActionLink, )
multi_select = False
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,323 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/server/views.py | import datetime
import random
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render_to_response, render
# from django.views.generic import ListView,DetailView
import time
from openstack_dashboard.dashboards.newtouch.models import ServerMonitorMessage,Server,Service
from openstack_dashboard.dashboards.newtouch.server import tables as project_tables
from openstack_dashboard.dashboards.newtouch.server import forms as project_forms
from horizon import tables,forms
# Create your views here.
class ServerEditView(forms.ModalFormView):
template_name = 'newtouch/server/edit.html'
form_class = project_forms.EditServerForm
success_url = reverse_lazy('horizon:newtouch:server:index')
def get_initial(self):
server = Server.objects.get(pk=self.kwargs["pk"])
return {'pk': self.kwargs["pk"],
'snmp_commit': server.snmp_commit,
'snmp_version':server.snmp_version,
'ssh_name': server.ssh_name,
'ssh_key': server.ssh_key}
def get_context_data(self, **kwargs):
context = super(ServerEditView, self).get_context_data(**kwargs)
context['pk'] = self.kwargs['pk']
return context
class ServerEditServicesView(forms.ModalFormView):
template_name = 'newtouch/server/edit_services.html'
form_class = project_forms.ServerEditServicesForm
success_url = reverse_lazy('horizon:newtouch:server:index')
def get_initial(self):
server = Server.objects.get(pk=self.kwargs["pk"])
return {'pk': self.kwargs["pk"],
'services_available':[service.name for service in server.services.all()]}
def get_context_data(self, **kwargs):
context = super(ServerEditServicesView, self).get_context_data(**kwargs)
context['pk'] = self.kwargs['pk']
server = Server.objects.get(pk=self.kwargs["pk"])
context['service_used'] = [service.name for service in server.services.all()]
return context
class ServerDetailService(object):
id = None
name = None
status = None
def __init__(self, **kwargs):
self.id = kwargs['id']
self.name = kwargs['name']
self.status = kwargs['status']
self.server = kwargs['server']
def __str__(self):
return self.name
class ServerDetailChart(object):
def __init__(self, **kwargs):
self.type = kwargs['type']
self.data = kwargs['data']
self.container = kwargs['container']
self.extra = kwargs['extra']
def __str__(self):
return self.type
class ServerListView(tables.DataTableView):
# A very simple class-based view...
template_name = 'newtouch/server/index.html'
table_class = project_tables.ServersTable
def get_data(self):
# Add data to the context here...
server_list = Server.objects.all()
return server_list
class ServerDetailView(tables.MultiTableView):
table_classes = (project_tables.ServerServicesTable, project_tables.ServerMessagesTable)
template_name = 'newtouch/server/server_detail.html'
failure_url = reverse_lazy('horizon:newtouch:server:index')
def get_server_services_data(self, **kwargs):
server_id = self.kwargs.get('pk')
services = []
last_server_message = ServerMonitorMessage.objects.all().filter(server=Server.objects.get(pk=server_id)).order_by("-id")[0]
service_status = last_server_message.process_status.lstrip("{").rstrip("}").split(',')
for service in service_status:
service = str(service)
name, sep, status = service.partition(":")
name = name.strip().lstrip("u").strip("'")
id = Service.objects.get(name=name).id
status = status.strip()
if(status == '1'):
status = 'UP'
else:
status = 'DOWN'
services.append(ServerDetailService(id = id,server = server_id,name= name, status=status))
return services
def get_server_messages_data(self, **kwargs):
pk = self.kwargs.get('pk')
server_messages = ServerMonitorMessage.objects.all().filter(server=Server.objects.get(pk=pk)).order_by("-id")
if len(server_messages) > 20:
server_messages = list(server_messages)[:20]
return server_messages
# def get_queryset(self, **kwargs):
# pk = self.kwargs.get('pk')
# server_detail_list = super(ServerDetailView, self).get_queryset()
# server_detail_list = server_detail_list.filter(server=Server.objects.get(pk=pk))
#
# return server_detail_list
#
def get_context_data(self, **kwargs):
context = super(ServerDetailView, self).get_context_data(**kwargs)
pk = self.kwargs.get('pk')
monitor_message_list = ServerMonitorMessage.objects.all().filter(server=Server.objects.get(pk=pk))
monitor_message_list = list(monitor_message_list)[-24:]
tooltip_date = "%d %b %Y %H:%M:%S %p"
extra_serie = {
"tooltip": {"y_start": "There are ", "y_end": " calls"},
"date_format": tooltip_date,
# 'color': '#a4c639'
}
xdata = [int(time.mktime(message.time.timetuple()) * 1000) for message in monitor_message_list]
ydata = [str(message.cpu_usage) for message in monitor_message_list]
ydata2 = [str(message.mem_usage) for message in monitor_message_list]
ydata3 = [str(message.disk_usage) for message in monitor_message_list]
data = {
'x': xdata,
'name1': 'cpu', 'y1': ydata, 'extra1': extra_serie,
'name2': 'memory', 'y2': ydata2, 'extra2': extra_serie,
'name3': 'disk', 'y3': ydata3, 'extra3': extra_serie,
}
extra = {
'x_is_date': True,
'x_axis_format': '%b %d %H:%M:%S',
'tag_script_js': True,
'jquery_on_ready': False,
}
chart1 = ServerDetailChart(type="lineChart",
container="linechart_container",
data=data,
extra=extra)
context["chart1"] = chart1
return context
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,324 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/monitor.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import MySQLdb
import netsnmp
import telnetlib
from uuid import uuid4
from django.utils.translation import ugettext_lazy as _
from horizon.utils import functions as utils
from openstack_dashboard.api.nova import hypervisor_list
MYSQL_HOST = '192.168.164.129'
MYSQL_SYSLOG_DB_NAME = 'Syslog'
MYSQL_SYSLOG_DB_USER = 'rsyslog'
MYSQL_SYSLOG_DB_PASSWD = '123456'
MYSQL_MONITOR_DB_NAME = 'monitor'
MYSQL_MONITOR_DB_USER = 'monitor'
MYSQL_MONITOR_DB_PASSWD = '123456'
DestHostList = ["192.168.202.1"]
ifDescr = ".1.3.6.1.2.1.2.2.1.2"
ifType = ".1.3.6.1.2.1.2.2.1.3"
ifSpeed = ".1.3.6.1.2.1.2.2.1.5"
ifAdminStatus = ".1.3.6.1.2.1.2.2.1.7"
ifOperStatus = ".1.3.6.1.2.1.2.2.1.8"
hh3cEntityExtCpuUsage = ".1.3.6.1.4.1.25506.2.6.1.1.1.1.6"
hh3cEntityExtMemUsage = ".1.3.6.1.4.1.25506.2.6.1.1.1.1.8"
hh3cEntityExtTemperature = ".1.3.6.1.4.1.25506.2.6.1.1.1.1.12"
hh3cFWMaxConnNum = ".1.3.6.1.4.1.25506.2.88.1.1.1"
hh3cFWConnNumCurr = ".1.3.6.1.4.1.25506.2.88.1.2.1"
ifNumber = ".1.3.6.1.2.1.2.1"
ifInOctets=".1.3.6.1.2.1.2.2.1.10"
ifOutOctets=".1.3.6.1.2.1.2.2.1.16"
ifInDiscards=".1.3.6.1.2.1.2.2.1.13"
ifOutDiscards=".1.3.6.1.2.1.2.2.1.19"
ifInErrors=".1.3.6.1.2.1.2.2.1.14"
ifOutErrors=".1.3.6.1.2.1.2.2.1.20"
ipAdEntAddr=".1.3.6.1.2.1.4.20.1.1"
ipAdEntIndex=".1.3.6.1.2.1.4.20.1.2"
def get_net_equipment_snmpmsg(host, commit):
snmpmsg = SnmpMessage()
snmpmsg.cpu = netsnmp.snmpwalk(hh3cEntityExtCpuUsage,
Version = 2,
DestHost = host,
Community = commit)
snmpmsg.mem = netsnmp.snmpwalk(hh3cEntityExtMemUsage,
Version = 2,
DestHost = host,
Community = commit)
return snmpmsg
def get_equipment(id):
monitordb = MySQLdb.connect(MYSQL_HOST,
MYSQL_MONITOR_DB_NAME,
MYSQL_MONITOR_DB_PASSWD,
MYSQL_MONITOR_DB_NAME)
cursor = monitordb.cursor()
row = cursor.execute("SELECT * FROM equipments WHERE id=%d" % (id))
results = cursor.fetchmany(row)
for equipment in results:
equipmentdetail = Equipment(equipment[0], equipment[1],
equipment[2], equipment[3], "", "",
equipment[4], equipment[5])
cursor.close()
monitordb.close()
return equipmentdetail
class SnmpMessage(object):
def __init__(self):
self.cpu = None
self.mem = None
class Equipment(object):
def __init__(self, id, name, type,
ip, cpu_usage, mem_usage,
descrition, status):
self.id = id
self.name = name
self.type = _(type)
self.ip = ip
self.cpu_usage = cpu_usage + "%"
self.mem_usage = mem_usage + "%"
self.descrition = descrition
self.status = status
def __str__(self):
return self.name
class EquipmentMonitor(object):
id = None
name = None
temperature = None
ip = None
cpu_usage = None
mem_usage = None
max_connect_num = None
cur_connect_num = None
interface_num = None
def __init__(self, id, name, ip):
self.id = id
self.name = name
self.ip = ip
def fill_snmp_data(self):
self.temperature = netsnmp.snmpwalk(hh3cEntityExtTemperature,
Version = 2,
DestHost = self.ip,
Community = "newtouch")[2] + ' C'
self.cpu_usage = netsnmp.snmpwalk(hh3cEntityExtCpuUsage,
Version = 2,
DestHost = self.ip,
Community = "newtouch")[2] + "%"
self.mem_usage = netsnmp.snmpwalk(hh3cEntityExtMemUsage,
Version = 2,
DestHost = self.ip,
Community = "newtouch")[2] + "%"
self.max_connect_num = netsnmp.snmpwalk(hh3cFWMaxConnNum,
Version = 2,
DestHost = self.ip,
Community = "newtouch")[0]
self.cur_connect_num = netsnmp.snmpwalk(hh3cFWConnNumCurr,
Version = 2,
DestHost = self.ip,
Community = "newtouch")[0]
self.interface_num = netsnmp.snmpwalk(ifNumber,
Version = 2,
DestHost = self.ip,
Community = "newtouch")[0]
def equipment_monitor_equipment_list(request = None, marker = None, paginate = False, addr = None):
equipments = []
monitordb = MySQLdb.connect(MYSQL_HOST,
MYSQL_MONITOR_DB_NAME,
MYSQL_MONITOR_DB_PASSWD,
MYSQL_MONITOR_DB_NAME)
cursor = monitordb.cursor()
row = cursor.execute("SELECT * FROM equipments")
results = cursor.fetchmany(row)
for result in results:
equipment = EquipmentMonitor(result[0], result[1], result[3])
equipment.fill_snmp_data()
equipments.append(equipment)
cursor.close()
monitordb.close()
return equipments
def network_monitor_equipment_list(request = None, marker = None, paginate = False, addr = None):
equipments = []
monitordb = MySQLdb.connect(MYSQL_HOST,
MYSQL_MONITOR_DB_NAME,
MYSQL_MONITOR_DB_PASSWD,
MYSQL_MONITOR_DB_NAME)
cursor = monitordb.cursor()
if addr:
row = cursor.execute("SELECT * FROM equipments WHERE addr=\'%s\'" % (addr))
else:
row = cursor.execute("SELECT * FROM equipments")
results = cursor.fetchmany(row)
for equipment in results:
snmpmsg = get_net_equipment_snmpmsg(equipment[3], "newtouch")
equipments.append(Equipment(equipment[0],
equipment[1],
equipment[2],
equipment[3],
snmpmsg.cpu[2],
snmpmsg.mem[2],
equipment[4],
equipment[5]))
cursor.close()
monitordb.close()
return equipments
class InterFace(object):
def __init__(self, id, index, name, desthost, status):
self.id = id
self.name = name
self.index = index
self.desthost = desthost
if 'GigabitEthernet0/0' == name:
self.description = "Management(192.168.0.1/255.255.255.0)"
elif 'GigabitEthernet0/1' == name:
self.description = "Untrust(58.247.8.188/255.255.255.248)"
elif 'GigabitEthernet0/2' == name:
self.description = "Trust(192.168.202.1/255.255.0.0)"
else:
self.description = '-'
if '1' == status:
self.status = _("Connected")
elif '2' == status:
self.status = _("Not Connected")
else:
self.status = _("Unknown")
def fill_interface_ip(self):
ip_index = netsnmp.snmpwalk(ipAdEntIndex,
Version = 2,
DestHost = self.desthost,
Community = "newtouch")
if str(self.index + 1) in ip_index:
self.desthost = netsnmp.snmpwalk(ipAdEntAddr,
Version = 2,
DestHost = self.desthost,
Community = "newtouch")[ip_index.index(str(self.index + 1))]
else:
self.desthost = "-"
def get_interface(request, id):
interfaces = []
equipment = get_equipment(int(id))
interfaces_name = netsnmp.snmpwalk(ifDescr,
Version = 2,
DestHost = equipment.ip,
Community = "newtouch")
interfaces_status = netsnmp.snmpwalk(ifOperStatus,
Version = 2,
DestHost = equipment.ip,
Community = "newtouch")
for interface in interfaces_name:
if (-1 == interface.find("NULL")) & (-1 == interface.find("Vlan")):
tag, sep, id = interface.partition("/")
interfaceid = tag + "-" + id
one_interface = InterFace(interfaceid, interfaces_name.index(interface),
interface,equipment.ip,
interfaces_status[interfaces_name.index(interface)])
one_interface.fill_interface_ip()
interfaces.append(one_interface)
return interfaces
class EquipmentMonitorInterFace(object):
def __init__(self, id, name, status, ip):
self.id = id
self.name = name
if '1' == status:
self.status = _("Connected")
elif '2' == status:
self.status = _("Not Connected")
else:
self.status = _("Unknown")
self.equipment_ip = ip
def fill_snmp_data(self):
self.inoctets = netsnmp.snmpwalk(ifInOctets,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[self.id]
self.outoctets = netsnmp.snmpwalk(ifOutOctets,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[self.id]
self.indiscards = netsnmp.snmpwalk(ifInDiscards,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[self.id]
self.outdiscards = netsnmp.snmpwalk(ifOutDiscards,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[self.id]
self.inerrors = netsnmp.snmpwalk(ifInErrors,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[self.id]
self.outerrors = netsnmp.snmpwalk(ifOutErrors,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[self.id]
def fill_interface_ip(self):
ip_index = netsnmp.snmpwalk(ipAdEntIndex,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")
if str(self.id + 1) in ip_index:
self.ip = netsnmp.snmpwalk(ipAdEntAddr,
Version = 2,
DestHost = self.equipment_ip,
Community = "newtouch")[ip_index.index(str(self.id + 1))]
else:
self.ip = "-"
def equipment_monitor_interface_list(request, interface):
interfaces = []
id, sep , num = interface.partition("-")
equipment = get_equipment(int(id))
interfaces_name = netsnmp.snmpwalk(ifDescr,
Version = 2,
DestHost = equipment.ip,
Community = "newtouch")
interfaces_status = netsnmp.snmpwalk(ifOperStatus,
Version = 2,
DestHost = equipment.ip,
Community = "newtouch")
for name in interfaces_name:
interface = EquipmentMonitorInterFace(interfaces_name.index(name),
name,
interfaces_status[interfaces_name.index(name)],
equipment.ip)
interface.fill_snmp_data()
interface.fill_interface_ip()
interfaces.append(interface)
return interfaces
class Logs(object):
def __init__(self, dict):
self.id = dict['id']
self.time = dict['time']
self.type = dict['type']
self.host = dict['host']
if 0 == dict['priority']:
self.priority = _("Emergency")
elif 1 == dict['priority']:
self.priority = _("Alert")
elif 2 == dict['priority']:
self.priority = _("Critical")
elif 3 == dict['priority']:
self.priority = _("Error")
elif 4 == dict['priority']:
self.priority = _("Warning")
elif 5 == dict['priority']:
self.priority = _("Notice")
elif 6 == dict['priority']:
self.priority = _("Informational")
elif 7 == dict['priority']:
self.priority = _("Debug")
else:
self.priority = None
self.interface = dict['interface']
self.src_ip = dict['src_ip']
self.dev_type = dict['dev_type']
self.dest_ip = dict['dest_ip']
self.message = dict['message']
def __str__(self):
return "%s-%s-%s" % (self.time, self.type, self.priority)
def get_syslogs_from_db(limit = None, marker = None,
system_tag = None, interface = None,
filters = None):
'''
Get syslogs from mysql
:param limit:
:param marker:
:param system_tag:
:return:
'''
syslogs = []
syslogdb = MySQLdb.connect(MYSQL_HOST,
MYSQL_SYSLOG_DB_USER,
MYSQL_SYSLOG_DB_PASSWD,
MYSQL_SYSLOG_DB_NAME)
cursor = syslogdb.cursor()
filter = ''
if filters.has_key('time'):
filter = 'AND ReceivedAt LIKE "%s%s%s" ' % ("%",filters['time'],"%")
elif filters.has_key('id'):
try:
tmp = int(filters['id'])
filter = 'AND ID = %s ' % (filters['id'])
except ValueError:
filter = 'AND ID = %s ' % ("0")
elif filters.has_key('type'):
filter = 'AND Message LIKE "%s%s%s" ' % ("%", filters['type'], "%")
elif filters.has_key('priority'):
filter = 'AND Priority LIKE "%s%s%s" ' % ("%", filters['priority'], "%")
else:
pass
print
interface_name = ""
if interface:
interface_detail = interface.split("-")
interface_name = ' AND (Message LIKE %s%s%s%s%s OR Message NOT LIKE %s%s%s ) ' % ("'%", interface_detail[0], "/", interface_detail[1], "%'",
"'%", interface_detail[0],"%'")
if marker:
sql = "SELECT * FROM SystemEvents WHERE (SysLogTag = {0} AND ID < {1} {2} {3}) ORDER BY id DESC"
row = cursor.execute(sql.format(system_tag, int(marker), interface_name, filter))
else:
sql = "SELECT * FROM SystemEvents WHERE (SysLogTag = {0} {1} {2}) order by id DESC"
row = cursor.execute(sql.format(system_tag, interface_name, filter))
if row:
results = cursor.fetchmany(row)
for syslog in results:
tag, sep, message = syslog[7].partition(":")
if (-1 != tag.find("message repeated")):
message = message.strip().lstrip("[").rstrip("]")
tag, sep, message = message.partition(":")
type = tag.strip().lstrip("%%10")
type = type.split("/", 1)[0]
message_list = message.split(";")
dev_type_value = ""
interface_type_value = ""
srcip_type_value = ""
destip_type_value = ""
for message in message_list:
if -1 != message.find("DEV_TYPE"):
tag, sep , dev_type_value = message.partition("=")
dev_type_value = dev_type_value.strip()
elif -1 != message.find("atckType"):
tag, sep , atck_type_value = message.partition("=")
atck_type_value = atck_type_value.strip()
elif -1 != message.find("rcvIfName"):
tag, sep , interface_type_value = message.partition("=")
interface_type_value = interface_type_value.strip()
elif -1 != message.find("srcIPAddr"):
tag, sep , srcip_type_value = message.partition("=")
srcip_type_value = srcip_type_value.strip()
elif -1 != message.find("srcMacAddr"):
tag, sep , srcmac_type_value = message.partition("=")
srcmac_type_value = srcmac_type_value.strip()
elif -1 != message.find("destIPAddr"):
tag, sep , destip_type_value = message.partition("=")
destip_type_value = destip_type_value.strip()
elif -1 != message.find("destMacAddr"):
tag, sep , destmac_type_value = message.partition("=")
destmac_type_value = destmac_type_value.strip()
else:
pass
syslog_dict = dict(id=syslog[0], time=syslog[2], priority=syslog[5], host=syslog[6], message=syslog[7],
type=type, dev_type=dev_type_value, interface=interface_type_value,
src_ip=srcip_type_value, dest_ip=destip_type_value)
syslogs.append(Logs(syslog_dict))
if limit == len(syslogs):
break
cursor.close()
syslogdb.close()
return (syslogs, row)
def get_filter_syslogs_from_db(limit = None,
marker = None,
opt = None):
'''
:param limit:
:param marker:
:param opt:
:return:
'''
syslogs = []
syslogdb = MySQLdb.connect(MYSQL_HOST,
MYSQL_SYSLOG_DB_USER,
MYSQL_SYSLOG_DB_PASSWD,
MYSQL_SYSLOG_DB_NAME)
cursor = syslogdb.cursor()
sql_filter = "SELECT * FROM SystemEvents WHERE "
sql_filter_tag = ""
sql_filter_priority = ""
sql_filter_type = ""
sql_filter_starttime = ""
sql_filter_endtime = ""
sql_filter_marker = ""
for system_tag in opt.tag_list:
if "" == sql_filter_tag:
sql_filter_tag = "SysLogTag = \'%s\' " % (system_tag)
else:
sql_filter_tag += "OR SysLogTag = \'%s\' " % (system_tag)
if opt.priority:
if "Emergency" == opt.priority:
sql_filter_priority = "AND Priority = 0 "
elif "Alert" == opt.priority:
sql_filter_priority = "AND Priority = 1 "
elif "Critical" == opt.priority:
sql_filter_priority = "AND Priority = 2 "
elif "Error" == opt.priority:
sql_filter_priority = "AND Priority = 3 "
elif "Warning" == opt.priority:
sql_filter_priority = "AND Priority = 4 "
elif "Notice" == opt.priority:
sql_filter_priority = "AND Priority = 5 "
elif "Informational" == opt.priority:
sql_filter_priority = "AND Priority = 6 "
elif "Debug" == opt.priority:
sql_filter_priority = "AND Priority = 7 "
else:
sql_filter_priority = ""
if opt.attack_type:
sql_filter_type = "AND Message LIKE \'%s%s%s\' " % ('%', opt.attack_type, '%')
if opt.StartTime:
sql_filter_starttime = "AND ReceivedAt >= \'%s 00:00:01\' " % (opt.StartTime)
if opt.EndTime:
sql_filter_endtime = "AND ReceivedAt <= \'%s 24:00:00\' " % (opt.EndTime)
if marker:
sql_filter_marker = "AND ID < %s ORDER BY id DESC" % (marker)
sql_filter = sql_filter + sql_filter_tag + \
sql_filter_priority + sql_filter_type + \
sql_filter_starttime + sql_filter_endtime + \
sql_filter_marker
row = cursor.execute(sql_filter)
if row:
results = cursor.fetchmany(row)
for syslog in results:
tag, sep, message = syslog[7].partition(":")
if (-1 != tag.find("message repeated")):
message = message.strip().lstrip("[").rstrip("]")
tag, sep, message = message.partition(":")
type = tag.strip().lstrip("%%10")
type = type.split("/", 1)[0]
message_list = message.split(";")
dev_type_value = ""
interface_type_value = ""
srcip_type_value = ""
destip_type_value = ""
for message in message_list:
if -1 != message.find("DEV_TYPE"):
tag, sep , dev_type_value = message.partition("=")
dev_type_value = dev_type_value.strip()
elif -1 != message.find("atckType"):
tag, sep , atck_type_value = message.partition("=")
atck_type_value = atck_type_value.strip()
elif -1 != message.find("rcvIfName"):
tag, sep , interface_type_value = message.partition("=")
interface_type_value = interface_type_value.strip()
elif -1 != message.find("srcIPAddr"):
tag, sep , srcip_type_value = message.partition("=")
srcip_type_value = srcip_type_value.strip()
elif -1 != message.find("srcMacAddr"):
tag, sep , srcmac_type_value = message.partition("=")
srcmac_type_value = srcmac_type_value.strip()
elif -1 != message.find("destIPAddr"):
tag, sep , destip_type_value = message.partition("=")
destip_type_value = destip_type_value.strip()
elif -1 != message.find("destMacAddr"):
tag, sep , destmac_type_value = message.partition("=")
destmac_type_value = destmac_type_value.strip()
else:
pass
syslog_dict = dict(id=syslog[0], time=syslog[2], priority=syslog[5], host=syslog[6], message=syslog[7],
type=type, dev_type=dev_type_value, interface=interface_type_value,
src_ip=srcip_type_value, dest_ip=destip_type_value)
syslogs.append(Logs(syslog_dict))
if limit == len(syslogs):
break
cursor.close()
syslogdb.close()
return (syslogs, row)
def syslog_list(request, marker = None,
paginate = False, interface = None,
filters = None):
'''
:return:syslog class list
'''
limit = 200
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
syslogs, count = get_syslogs_from_db(limit=request_size,
marker = marker,
system_tag="\'Newtouch-H3C\'" ,
interface = interface,
filters = filters)
# has_prev_data = False
has_more_data = False
if paginate:
# images = list(itertools.islice(images_iter, request_size))
# first and middle page condition
if len(syslogs) > page_size:
syslogs.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
pass
# has_prev_data = True
# last page condition
elif marker is not None:
pass
# has_prev_data = True
return (syslogs, has_more_data, count)
def filter_syslog_list(request,
marker = None,
paginate = False,
opt = None):
limit = 500
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
syslogs, count = get_filter_syslogs_from_db(limit=request_size,
marker = marker,
opt = opt)
# has_prev_data = False
has_more_data = False
if paginate:
# images = list(itertools.islice(images_iter, request_size))
# first and middle page condition
if len(syslogs) > page_size:
syslogs.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
pass
# has_prev_data = True
# last page condition
elif marker is not None:
pass
# has_prev_data = True
return (syslogs, has_more_data, count)
class LogDetail(object):
def __init__(self, id, message):
self.id = id
self.message = message
def logs_detail(request, id):
message = []
syslogdb = MySQLdb.connect(MYSQL_HOST,MYSQL_SYSLOG_DB_USER,
MYSQL_SYSLOG_DB_PASSWD,MYSQL_SYSLOG_DB_NAME)
cursor = syslogdb.cursor()
sql = "SELECT * FROM SystemEvents WHERE ID = {0}"
cursor.execute(sql.format(id))
result = cursor.fetchone()
message.append(LogDetail(id, result[7].strip().lstrip("%%10")))
cursor.close()
syslogdb.close()
return message
class Node(object):
id = None
hostname = None
ip = None
cpu_usage = None
mem_usage = None
status = 0
def __init__(self, id, hostname, host_ip):
self.id = id
self.hostname = hostname
self.ip = host_ip
def get_status(self):
import re
import subprocess
regex = re.compile("100% packet loss")
try:
p = subprocess.Popen(["ping -c 1 -w 1"+ self.ip],stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
out = p.stdout.read()
if len(regex.findall(out)) == 0:
self.status = "Up"
else:
self.status = "Down"
except Exception:
self.status = "Error"
def node_list(request):
hypervisors = hypervisor_list(request)
hypervisors.sort(key=utils.natural_sort('hypervisor_hostname'))
print hypervisors[0]
nodelist = []
for hypervisor in hypervisors:
node = Node(hypervisor.id, hypervisor.hypervisor_hostname,
hypervisor.host_ip)
node.get_status()
nodelist.append(node)
return nodelist
def add_blacklist(request):
form = request.POST
print form
if '' == form['firewall_ip'] or '' == form['ip']:
return 2
if '192.168.202.1' != form['firewall_ip']:
return 1
cmd = ""
if '1' == form['time']:
cmd = 'blacklist ip %s\n' % (form['ip'])
elif '2' == form['time']:
cmd = 'blacklist ip %s timeout %s\n' % (form['ip'], '30')
elif '3' == form['time']:
cmd = 'blacklist ip %s timeout %s\n' % (form['ip'], '180')
elif '4' == form['time']:
cmd = 'blacklist ip %s timeout %s\n' % (form['ip'], '1000')
cmd = str(cmd)
finish1 = '<Newtouch-H3C>'
finish2 = '[Newtouch-H3C]'
tn = telnetlib.Telnet(form['firewall_ip'])
tn.read_until('Password:')
tn.write('newtouch!@#123' + '\n')
tn.read_until(finish1)
tn.write('system-view\n')
tn.read_until(finish2)
print cmd
tn.write(cmd)
tn.write('quit\n')
tn.write('quit\n')
tn.close()
return 0
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,325 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/dashboard.py | from django.utils.translation import ugettext_lazy as _
import horizon
class Monitor(horizon.Dashboard):
name = _("Monitor")
slug = "monitor"
panels = ('overview', 'regulation', 'network_monitor', 'equipment_monitor') # Add your panels here.
default_panel = 'overview' # Specify the slug of the dashboard's default panel.
horizon.register(Monitor)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,326 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/monitor/network_monitor/tables.py | from django.utils.translation import ugettext_lazy as _
from django.template import defaultfilters as filters
from horizon import tables
class NetworkMonitorFilterAction(tables.LinkAction):
name = "filter"
verbose_name = _("Filter")
url = "horizon:monitor:network_monitor:filter"
# classes = ("",)
icon = "search"
class FilterSyslogFilterAction(tables.FilterAction):
name = "filter"
class SyslogFilterAction(tables.FilterAction):
name = "filter"
filter_type = "server"
filter_choices = (('id', _("ID ="), True),
('time', _('Time ='), True),
('type', _('Type ='), True),
('priority', _('Priority ='), True),)
class BlacklistAction(tables.LinkAction):
name = "add_blacklist"
verbose_name = _("Add Blacklist")
url = "horizon:monitor:network_monitor:blacklist"
# classes = ("ajax-modal",)
class SettingAction(tables.LinkAction):
name = "basic_information"
verbose_name = _("Basic Information")
url = "#"
class InterfaceAction(tables.LinkAction):
name = "interface_setting"
verbose_name = _("Interface Setting")
url = "#"
class EquipmentListTable(tables.DataTable):
id = tables.Column("id", verbose_name=_("ID"), hidden=True)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:monitor:network_monitor:equipment")
type = tables.Column("type", verbose_name=_("Type"))
descrition = tables.Column("descrition", verbose_name=_("Descrition"))
ip = tables.Column('ip', verbose_name=_("Ip"))
cpu_usage = tables.Column('cpu_usage', verbose_name=_("CpuUsage"))
mem_usage = tables.Column('mem_usage', verbose_name=_("MemUsage"))
status = tables.Column("status", verbose_name=_("Status"))
def get_object_id(self, datum):
return "%s" % (datum.id)
class Meta:
name = "equipment_list"
verbose_name = _("Equipment List")
row_actions = (BlacklistAction, SettingAction, InterfaceAction)
table_actions = (NetworkMonitorFilterAction, )
multi_select = False
class InterfaceListTable(tables.DataTable):
id = tables.Column("id", hidden=True)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:monitor:network_monitor:interface")
description = tables.Column("description", verbose_name=_("Description"))
desthost = tables.Column("desthost", verbose_name=_("DestHost"))
status = tables.Column("status", verbose_name=_("Status"))
def get_object_id(self, obj):
return "%s-%s" % (obj.id, obj.desthost)
class Meta:
name = 'interface list'
verbose_name = _("InterfaceList")
multi_select = False
class SyslogListTable(tables.DataTable):
id = tables.Column("id",
verbose_name=_('Id'),
link="horizon:monitor:network_monitor:detail",
filters=(filters.title,))
time = tables.Column("time",
verbose_name=_('Time'),
filters=(filters.title,))
type = tables.Column("type",
verbose_name=_('Type'),
filters=(filters.title,))
priority = tables.Column("priority",
verbose_name=_('Priority'),
filters=(filters.title,))
dev_type = tables.Column("dev_type", verbose_name=_('DevType'))
interface = tables.Column("interface", verbose_name=_('Interface'))
src_ip = tables.Column("src_ip", verbose_name=_('SrcIP'))
dest_ip = tables.Column("dest_ip", verbose_name=_('DestIP'))
def get_object_id(self, obj):
return "%s" % (obj.id)
class Meta:
name = "syslogs"
verbose_name = _("Syslogs")
table_actions = (SyslogFilterAction, )
multi_select = False
class FilterSyslogListTable(tables.DataTable):
id = tables.Column("id",
verbose_name=_('Id'),
link="horizon:monitor:network_monitor:detail")
time = tables.Column("time",
verbose_name=_('Time'))
type = tables.Column("type",
verbose_name=_('Type'))
priority = tables.Column("priority",
verbose_name=_('Priority'))
dev_type = tables.Column("dev_type", verbose_name=_('DevType'))
interface = tables.Column("interface", verbose_name=_('Interface'))
src_ip = tables.Column("src_ip", verbose_name=_('SrcIP'))
dest_ip = tables.Column("dest_ip", verbose_name=_('DestIP'))
def get_object_id(self, obj):
return "%s" % (obj.id)
class Meta:
name = "syslogs"
verbose_name = _("Syslogs")
table_actions = (FilterSyslogFilterAction, )
multi_select = False
class MessageDetailTable(tables.DataTable):
id = tables.Column("id", verbose_name=_('Id'), hidden=True)
message = tables.Column("message", verbose_name=_("Message"))
class Meta:
name = "message_detail"
verbose_name = _("MessageDetail")
class BlackListTable(tables.DataTable):
id = tables.Column('id', _("ID"))
ip = tables.Column('ip', _("IP"))
type = tables.Column('type', _("Type"))
start_time = tables.Column('start_time', _("StartTime"))
end_time = tables.Column('end_time', _("EndTime"))
class Meta:
name = 'blacklist'
verbose_name = _("Blacklist")
table_actions = () | {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,327 | junhuizx/horizon-newtouch | refs/heads/master | /openstack_dashboard/dashboards/newtouch/rules/tests.py | from horizon.test import helpers as test
class EventTests(test.TestCase):
# Unit tests for event.
def test_me(self):
self.assertTrue(1 + 1 == 2)
| {"/openstack_dashboard/dashboards/newtouch/resource.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/models.py"], "/openstack_dashboard/dashboards/newtouch/rules/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/overview/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/event/panel.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py"], "/openstack_dashboard/dashboards/newtouch/server/urls.py": ["/openstack_dashboard/dashboards/newtouch/__init__.py", "/openstack_dashboard/dashboards/newtouch/server/views.py"]} |
69,349 | wilfoderek/Human-Violence-Detection | refs/heads/master | /training/train.py | import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import LSTM,GRU,Dropout,Dense
from keras.optimizers import Adam
from keras.losses import binary_crossentropy
from keras.metrics import binary_accuracy
## LOAD THE DATA HERE
model=Sequential()
model.add(LSTM(50,activation='relu',return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(100,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000,activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer=Adam(lr=0.001,decay=1e-5),loss=binary_crossentropy,metrics=[binary_accuracy])
history=model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=20,batch_size=32,shuffle=True)
# Save the model
| {"/main.py": ["/yolo3/yolo.py", "/tools/processing.py", "/tools/coord_in_box.py", "/deepsort/detection.py", "/training/data_preprocessing.py"], "/yolo3/yolo.py": ["/tools/utils.py"]} |
69,350 | wilfoderek/Human-Violence-Detection | refs/heads/master | /tools/processing.py | import math
import time
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import cv2
from tools import utils
COCO_BODY_PARTS=['nose','neck',
'right_shoulder','right_elbow','right_wrist',
'left_shoulder','left_elbow','left_wrist',
'right_hip','right_knee','right_ankle',
'left_hip','left_knee','left_ankle',
'right_eye','left_eye','right_ear','left_ear','background'
]
def extract_parts(input_image,params,model,model_params):
multiplier=[x*model_params['boxsize']/input_image.shape[0] for x in params['scale_search']]
# Body parts location heatmap, one per part (19)
heatmap_avg=np.zeros((input_image.shape[0],input_image.shape[1],19))
# Part affinities, one per limb (38)
paf_avg=np.zeros((input_image.shape[0],input_image.shape[1],38))
# start=time.time()
for scale in multiplier:
image_to_test=cv2.resize(input_image,(0,0),fx=scale,fy=scale,interpolation=cv2.INTER_CUBIC)
image_to_test_padded,pad=utils.pad_right_down_corner(image_to_test,model_params['stride'],
model_params['padValue'])
# required shape (1, width, height, channels)
input_img=np.transpose(np.float32(image_to_test_padded[:,:,:,np.newaxis]),(3,0,1,2))
# start1=time.time()
output_blobs=model.predict(input_img)
# extract outputs, resize, and remove padding
heatmap=np.squeeze(output_blobs[1]) # output 1 is heatmaps
heatmap=cv2.resize(heatmap,(0,0),fx=model_params['stride'],fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
heatmap=heatmap[:image_to_test_padded.shape[0]-pad[2],:image_to_test_padded.shape[1]-pad[3],:]
heatmap=cv2.resize(heatmap,(input_image.shape[1],input_image.shape[0]),interpolation=cv2.INTER_CUBIC)
paf=np.squeeze(output_blobs[0]) # output 0 is PAFs
paf=cv2.resize(paf,(0,0),fx=model_params['stride'],fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
paf=paf[:image_to_test_padded.shape[0]-pad[2],:image_to_test_padded.shape[1]-pad[3],:]
paf=cv2.resize(paf,(input_image.shape[1],input_image.shape[0]),interpolation=cv2.INTER_CUBIC)
heatmap_avg=heatmap_avg+heatmap
paf_avg=paf_avg+paf
# print('Net took {} seconds'.format(time.time()-start1))
# 'Loop 1 took {} seconds'.format(time.time()-start))
heatmap_avg=heatmap_avg/len(multiplier)
paf_avg=paf_avg/len(multiplier)
all_peaks=[]
peak_counter=0
# start=time.time()
for part in range(18):
hmap_ori=heatmap_avg[:,:,part]
hmap=gaussian_filter(hmap_ori,sigma=3)
# Find the pixel that has maximum value compared to those around it
hmap_left=np.zeros(hmap.shape)
hmap_left[1:,:]=hmap[:-1,:]
hmap_right=np.zeros(hmap.shape)
hmap_right[:-1,:]=hmap[1:,:]
hmap_up=np.zeros(hmap.shape)
hmap_up[:,1:]=hmap[:,:-1]
hmap_down=np.zeros(hmap.shape)
hmap_down[:,:-1]=hmap[:,1:]
# reduce needed because there are > 2 arguments
peaks_binary=np.logical_and.reduce(
(hmap>=hmap_left,hmap>=hmap_right,hmap>=hmap_up,hmap>=hmap_down,hmap>params['thre1']))
peaks=list(zip(np.nonzero(peaks_binary)[1],np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score=[x+(hmap_ori[x[1],x[0]],) for x in peaks] # add a third element to tuple with score
idx=range(peak_counter,peak_counter+len(peaks))
peaks_with_score_and_id=[peaks_with_score[i]+(idx[i],) for i in range(len(idx))]
all_peaks.append(peaks_with_score_and_id)
peak_counter+=len(peaks)
# print('Loop 2 took {} seconds'.format(time.time()-start))
connection_all=[]
special_k=[]
mid_num=10
# start=time.time()
for k in range(len(utils.hmapIdx)):
score_mid=paf_avg[:,:,[x-19 for x in utils.hmapIdx[k]]]
cand_a=all_peaks[utils.limbSeq[k][0]-1]
cand_b=all_peaks[utils.limbSeq[k][1]-1]
n_a=len(cand_a)
n_b=len(cand_b)
if n_a!=0 and n_b!=0:
connection_candidate=[]
for i in range(n_a):
for j in range(n_b):
vec=np.subtract(cand_b[j][:2],cand_a[i][:2])
norm=math.sqrt(vec[0]*vec[0]+vec[1]*vec[1])
# failure case when 2 body parts overlaps
if norm==0:
continue
vec=np.divide(vec,norm)
startend=list(zip(np.linspace(cand_a[i][0],cand_b[j][0],num=mid_num),
np.linspace(cand_a[i][1], cand_b[j][1],num=mid_num)))
vec_x=np.array(
[score_mid[int(round(startend[I][1])),int(round(startend[I][0])),0]
for I in range(len(startend))])
vec_y=np.array(
[score_mid[int(round(startend[I][1])),int(round(startend[I][0])),1]
for I in range(len(startend))])
score_midpts=np.multiply(vec_x,vec[0])+np.multiply(vec_y,vec[1])
score_with_dist_prior=sum(score_midpts)/len(score_midpts)+ min(0.5*input_image.shape[0]/norm-1,0)
criterion1=len(np.nonzero(score_midpts>params['thre2'])[0])>0.8*len(score_midpts)
criterion2=score_with_dist_prior>0
if criterion1 and criterion2:
connection_candidate.append([i,j,score_with_dist_prior,
score_with_dist_prior+cand_a[i][2]+cand_b[j][2]])
connection_candidate=sorted(connection_candidate,key=lambda x: x[2],reverse=True)
connection=np.zeros((0,5))
for c in range(len(connection_candidate)):
i,j,s=connection_candidate[c][0:3]
if i not in connection[:,3] and j not in connection[:,4]:
connection=np.vstack([connection,[cand_a[i][3],cand_b[j][3],s,i,j]])
if len(connection)>=min(n_a,n_b):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# print('Loop 3 took {} seconds'.format(time.time()-start))
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset=np.empty((0,20))
candidate=np.array([item for sublist in all_peaks for item in sublist])
# start=time.time()
for k in range(len(utils.hmapIdx)):
if k not in special_k:
part_as=connection_all[k][:,0]
part_bs=connection_all[k][:,1]
index_a,index_b=np.array(utils.limbSeq[k])-1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found=0
subset_idx=[-1,-1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][index_a]==part_as[i] or subset[j][index_b]==part_bs[i]:
subset_idx[found]=j
found+=1
if found==1:
j=subset_idx[0]
if subset[j][index_b]!=part_bs[i]:
subset[j][index_b]=part_bs[i]
subset[j][-1]+=1
subset[j][-2]+=candidate[part_bs[i].astype(int),2]+connection_all[k][i][2]
elif found==2: # if found 2 and disjoint, merge them
j1,j2=subset_idx
membership=((subset[j1]>=0).astype(int)+(subset[j2]>=0).astype(int))[:-2]
if len(np.nonzero(membership==2)[0])==0: # merge
subset[j1][:-2]+=(subset[j2][:-2]+1)
subset[j1][-2:]+=subset[j2][-2:]
subset[j1][-2]+=connection_all[k][i][2]
subset=np.delete(subset,j2, 0)
else: # as like found == 1
subset[j1][index_b]=part_bs[i]
subset[j1][-1]+=1
subset[j1][-2]+=candidate[part_bs[i].astype(int),2]+connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k<17:
row=-1*np.ones(20)
row[index_a]=part_as[i]
row[index_b]=part_bs[i]
row[-1]=2
row[-2]=sum(candidate[connection_all[k][i,:2].astype(int),2])+connection_all[k][i][2]
subset=np.vstack([subset,row])
# print('Loop 4 took {} seconds'.format(time.time()-start))
# delete some rows of subset which has few parts occur
delete_idx=[]
for i in range(len(subset)):
if subset[i][-1]<4 or subset[i][-2]/subset[i][-1]<0.4:
delete_idx.append(i)
subset=np.delete(subset,delete_idx,axis=0)
coord_id=[]
for i in all_peaks:
for j in i:
coord_id.append(j)
coord_id.append((-1,-1,-1,-1))
coord_id=np.array(coord_id,'int64')[:,[0,1,3]]
temp=coord_id[np.array(subset[:,:18],'int64'),:2]
person_dict={}
for i in range(temp.shape[0]):
for j in range(18):
if 'person'+str(i+1) not in person_dict:
person_dict['person'+str(i+1)]={}
person_dict['person'+str(i+1)][COCO_BODY_PARTS[j]]=temp[i,j,:]
return person_dict
def non_max_suppression(boxes,max_bbox_overlap,scores=None):
if len(boxes)==0:
return []
boxes=boxes.astype(np.float)
pick=[]
x1=boxes[:,0]
y1=boxes[:,1]
x2=boxes[:,2]+boxes[:,0]
y2=boxes[:,3]+boxes[:,1]
area=(x2-x1+1)*(y2-y1+1)
if scores is not None:
idxs=np.argsort(scores)
else:
idxs=np.argsort(y2)
while len(idxs)>0:
last=len(idxs)-1
i=idxs[last]
pick.append(i)
xx1=np.maximum(x1[i],x1[idxs[:last]])
yy1=np.maximum(y1[i],y1[idxs[:last]])
xx2=np.minimum(x2[i],x2[idxs[:last]])
yy2=np.minimum(y2[i],y2[idxs[:last]])
w=np.maximum(0,xx2-xx1+1)
h=np.maximum(0,yy2-yy1+1)
overlap=(w*h)/area[idxs[:last]]
idxs=np.delete(
idxs,np.concatenate(
([last],np.where(overlap>max_bbox_overlap)[0])))
return pick | {"/main.py": ["/yolo3/yolo.py", "/tools/processing.py", "/tools/coord_in_box.py", "/deepsort/detection.py", "/training/data_preprocessing.py"], "/yolo3/yolo.py": ["/tools/utils.py"]} |
69,351 | wilfoderek/Human-Violence-Detection | refs/heads/master | /deepsort/nn_matching.py | import numpy as np
def _pdist(a,b):
#Compute pair-wise squared distance between points in `a` and `b`.
a,b=np.asarray(a),np.asarray(b)
if len(a)==0 or len(b)==0:
return np.zeros((len(a),len(b)))
a2,b2=np.square(a).sum(axis=1),np.square(b).sum(axis=1)
r2=-2.*np.dot(a,b.T)+a2[:,None]+b2[None,:]
r2=np.clip(r2,0.,float(np.inf))
return r2
def _cosine_distance(a,b,data_is_normalized=False):
#Compute pair-wise cosine distance between points in `a` and `b`.
if not data_is_normalized:
a=np.asarray(a)/np.linalg.norm(a,axis=1,keepdims=True)
b=np.asarray(b)/np.linalg.norm(b,axis=1,keepdims=True)
return 1.-np.dot(a, b.T)
def _nn_euclidean_distance(x,y):
#Function for nearest neighbor distance metric (Euclidean).
distances=_pdist(x,y)
return np.maximum(0.0,distances.min(axis=0))
def _nn_cosine_distance(x,y):
#Function for nearest neighbor distance metric (cosine).
distances=_cosine_distance(x,y)
return distances.min(axis=0)
class NearestNeighborDistanceMetric(object):
"""
A nearest neighbor distance metric that, for each target, returns
the closest distance to any sample that has been observed so far.
Parameters
----------
metric : str
Either "euclidean" or "cosine".
matching_threshold: float
The matching threshold. Samples with larger distance are considered an
invalid match.
budget : Optional[int]
If not None, fix samples per class to at most this number. Removes
the oldest samples when the budget is reached.
"""
def __init__(self,metric,matching_threshold,budget=None):
if metric=="euclidean":
self._metric=_nn_euclidean_distance
elif metric=="cosine":
self._metric=_nn_cosine_distance
else:
raise ValueError(
"Invalid metric; must be either 'euclidean' or 'cosine'")
self.matching_threshold=matching_threshold
self.budget=budget
self.samples={}
def partial_fit(self,features,targets,active_targets):
#Update the distance metric with new data.
for feature,target in zip(features,targets):
self.samples.setdefault(target,[]).append(feature)
if self.budget is not None:
self.samples[target]=self.samples[target][-self.budget:]
self.samples={k: self.samples[k] for k in active_targets}
def distance(self,features,targets):
#Compute distance between features and targets.
cost_matrix=np.zeros((len(targets),len(features)))
for i,target in enumerate(targets):
cost_matrix[i,:]=self._metric(self.samples[target],features)
return cost_matrix
| {"/main.py": ["/yolo3/yolo.py", "/tools/processing.py", "/tools/coord_in_box.py", "/deepsort/detection.py", "/training/data_preprocessing.py"], "/yolo3/yolo.py": ["/tools/utils.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.