hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
574cf324dd7e3124ebf51b524833be21ee9c36b2 | 2,144 | py | Python | OOP Week 2/6310545566_Week12/run_board3.py | HelloYeew/helloyeew-lab-computer-programming-i | 60b05072f32f23bab4a336b506ba7f66e52c045d | [
"MIT"
] | null | null | null | OOP Week 2/6310545566_Week12/run_board3.py | HelloYeew/helloyeew-lab-computer-programming-i | 60b05072f32f23bab4a336b506ba7f66e52c045d | [
"MIT"
] | null | null | null | OOP Week 2/6310545566_Week12/run_board3.py | HelloYeew/helloyeew-lab-computer-programming-i | 60b05072f32f23bab4a336b506ba7f66e52c045d | [
"MIT"
] | null | null | null | from player import *
from board import *
from cell import *
a = Player("A")
b = Player("B")
board = Board('board1.txt')
board.add_player(a)
board.add_player(b)
### For testing function move
print(">>> A moves")
print(a)
# Set player A to move 4 steps
a.current_move = 4
print(a)
a.move(board)
print(a)
print()
print('>>> B moves 1 step and obtains new cell status')
print(b)
# Set player B to move 1 step
b.current_move = 1
print(b)
b.move(board)
print(b)
print('>>> Print B\'s cell 1 status.')
cell = board.access_cell(b.current_pos)
print(cell)
### For testing function obtain_cell_status
print('>>> B obtains current cell 1 status')
b.obtain_cell_status(board)
print(b)
# B is still at cell 1. Function obtain_cell_status does not move B.
# To update move=4 obtained from cell 1, we need to call function move again.
print('>>> After obtaining cell 1 status and moving B:')
b.move(board)
print(b)
print()
print('>>> A moves 5 steps and obtains new cell status')
print(a)
# Set player A to move 5 steps
a.current_move = 5
print(a)
a.move(board)
print(a)
a.obtain_cell_status(board)
print(a)
print('>>> Print current A\'s cell')
cell = board.access_cell(a.current_pos)
print(cell)
### For testing function randomize_dice
print()
print('>>> B randomizes dice, moves, and obtain new cell status')
print(b)
b.randomize_dice() # randomizes dice. Your randomized value may not be 6, like the sample output.
print(b)
b.move(board) # move
print(b)
b.obtain_cell_status(board) # obtain new cell status
print(b)
print('>>> Print current B\'s cell')
cell = board.access_cell(b.current_pos)
print(cell)
print('>>> B moves more than what is inside the board. B will move to the last cell.')
b.current_move = 30
print(b)
b.move(board)
print(b)
winning_cell_id = 19 # Fill ? yourself
winning_cell = board.access_cell(winning_cell_id)
print(len(winning_cell.get_occupy_list_str())) # The result is zero although b is at the winning cell.
# This happens because function move does not add the player t0 occupy_list of the cell.
# We must explicitly update occupy_list after running function move.
print(board.check_winner())
print(board.get_winner()) | 25.831325 | 102 | 0.734142 |
8de08f30f4be7057951699a66441357c473c84d6 | 3,695 | py | Python | module/malware/30.py | 5l1v3r1/saydog-framework | 92d6d2a80958ecf5724c95d1d3c76d8ca95db8d6 | [
"Xnet",
"TCP-wrappers",
"X11"
] | 2 | 2020-09-26T16:43:11.000Z | 2021-02-09T21:46:08.000Z | module/malware/30.py | 5l1v3r1/saydog-framework | 92d6d2a80958ecf5724c95d1d3c76d8ca95db8d6 | [
"Xnet",
"TCP-wrappers",
"X11"
] | null | null | null | module/malware/30.py | 5l1v3r1/saydog-framework | 92d6d2a80958ecf5724c95d1d3c76d8ca95db8d6 | [
"Xnet",
"TCP-wrappers",
"X11"
] | 1 | 2022-03-19T06:40:56.000Z | 2022-03-19T06:40:56.000Z | import os,sys
import time
u='\033[4m'
w='\x1b[00m'
r='\x1b[91m'
b='\x1b[36;1m'
y='\x1b[33m'
def exit():
print(r+'[!]'+w+' The user forces it to stop')
print(r+'[!]'+w+' Exiting program')
sys.exit(1)
def corrupt():
print(r+'[?]'+w+' Command not found, please type help')
########### 256 ############
def thezoo():
while True:
try:
global name
global output
names = 'EquationGroup'
mg2 = input(w+'saydog('+r+'malware/'+names+w+') > ')
if mg2 == 'help':
print('')
print('Malware name: '+names)
print('-------')
print('command example')
print('------- -------')
print('set name [new name] set name saydog')
print('set output [path] set output /sdcard')
print('show show')
print('run, go, create create')
print('')
elif mg2 == 'exit':
exit()
elif mg2 == 'back':
sys.exit(0)
elif mg2 == 'clear':
os.system('clear')
elif 'set name' in mg2:
name = mg2.split()[(-1)]
print('name > '+name)
elif 'set output' in mg2:
output = mg2.split()[(-1)]
print('output > '+output)
elif mg2 == 'show':
print('')
print('-------------------')
print('name : '+name)
print('output : '+output)
print('-------------------')
print('')
elif mg2 == 'run' or mg2 == 'go' or mg2 == 'create':
time.sleep(1)
print(y+'[-]'+w+' Generate malware '+names)
time.sleep(2)
print(y+'[-]'+w+' please wait for a minute ...')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.md5 -O '+output+'/'+name+'.md5')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.pass -O '+output+'/'+name+'.pass')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.sha256 -O '+output+'/'+name+'.sha256')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.zip -O '+output+'/'+name+'.zip')
print(w+'\033[41m success \033[00m file saved as '+output)
else:
corrupt()
except NameError:
print(r+'[!] Error: '+w+'[name] or [output] not found')
except KeyboardInterrupt:
exit()
thezoo() | 49.932432 | 178 | 0.347226 |
e912162e98a071826f62ad98af891120325dbaa8 | 2,327 | py | Python | lab1/domanda3.py | D33pBlue/Algoritmi | 76088f40b49d51416919515a274ba076ae6a197e | [
"MIT"
] | null | null | null | lab1/domanda3.py | D33pBlue/Algoritmi | 76088f40b49d51416919515a274ba076ae6a197e | [
"MIT"
] | 1 | 2018-03-17T19:15:41.000Z | 2018-03-17T19:18:24.000Z | lab1/domanda3.py | D33pBlue/Algoritmi | 76088f40b49d51416919515a274ba076ae6a197e | [
"MIT"
] | null | null | null | from domanda1 import *
import random
import time
class DPATrial:
def __init__(self,m):
self.numNodes = m
self.nodeNumbers = list()
for i in range(m):
for _ in range(m):
self.nodeNumbers.append(i)
def runTrial(self,m):
V = []
random.shuffle(self.nodeNumbers)
for i in range(m):
u = self.nodeNumbers.pop()
V.append(u)
self.nodeNumbers.append(self.numNodes)
for v in V:
self.nodeNumbers.append(v)
self.numNodes = self.numNodes+1
return V
"""
Implementazione dell'algoritmo DPA per la generazione di un grafo casuale
"""
def DPA_graph(m,n):
graph = dict()
for v in range(m):
graph[v] = []
for u in range(m):
if u!=v:
graph[v].append(u)
trial = DPATrial(m)
for u in range(m,n):
V = trial.runTrial(m)
graph[u] = []
for v in V:
graph[u].append(v)
return graph
"""
Restituisce la distribuzione del grado uscente del grafo, come
dizionario avente per chiavi il grado e per valori la probabilita'
"""
def outdegree_dist(graph):
nvertex = float(len(graph.keys()))
outdegree = dict()
for v in graph.keys():
deg = len(graph[v])
if deg in outdegree:
outdegree[deg] += 1.0/nvertex
else:
outdegree[deg] = 1.0/nvertex
return outdegree
"""
Genera il grafico delle due distribuzioni, mostrandole assieme per
poterle confrontare
"""
def compare_dists(dist1,dist2):
xs = dist1.keys()
ys = [dist1[v] for v in xs]
plt.xscale('log')
plt.yscale('log')
plt.scatter(xs,ys,label="dist1")
xs = dist2.keys()
ys = [dist2[v] for v in xs]
plt.scatter(xs,ys,label="dist2")
plt.show()
if __name__ == '__main__':
graph_cit = load_adj_list('Cit-HepTh.txt',directed=True)
# inddist1 = indegree_dist(graph_cit)
outdist1 = outdegree_dist(graph_cit)
n = len(graph_cit.keys())
m = 0.0
for o in outdist1.keys():
m += o*outdist1[o]
m = int(round(m))
print "m=",m,"n=",n
t = time.time()
graph_dpa = DPA_graph(m,n)
print "Grafo generato in",time.time()-t,"s"
inddist2 = indegree_dist(graph_dpa)
# compare_dists(inddist1,inddist2)
plot_dist(inddist2)
| 25.855556 | 73 | 0.588311 |
12bdc0e0b4fc10f76ab0c7e32c691033a994b60e | 8,789 | py | Python | grblas/_automethods.py | vishalbelsare/grblas | d181ac4cb495d1bc806253137f53a42d65693b5e | [
"Apache-2.0"
] | 16 | 2020-07-28T22:14:56.000Z | 2022-03-26T14:45:41.000Z | grblas/_automethods.py | vishalbelsare/grblas | d181ac4cb495d1bc806253137f53a42d65693b5e | [
"Apache-2.0"
] | 75 | 2020-07-27T04:06:16.000Z | 2022-03-30T19:23:08.000Z | grblas/_automethods.py | vishalbelsare/grblas | d181ac4cb495d1bc806253137f53a42d65693b5e | [
"Apache-2.0"
] | 4 | 2020-07-23T19:04:13.000Z | 2022-03-18T00:34:14.000Z | """ Define functions to use as property methods on expressions.
These will automatically compute the value and avoid the need for `.new()`.
To automatically create the functions, run:
```python
common = {
"_name_html",
"_nvals",
"gb_obj",
"isclose",
"isequal",
"name",
"nvals",
"to_pygraphblas",
"wait",
}
scalar = {
"__array__",
"__bool__",
"__complex__",
"__eq__",
"__float__",
"__index__",
"__int__",
"__invert__",
"__neg__",
"is_empty",
"value",
}
vector_matrix = {
"S",
"V",
"__and__",
"__contains__",
"__getitem__",
"__iter__",
"__matmul__",
"__or__",
"__rand__",
"__rmatmul__",
"__ror__",
"_carg",
"apply",
"ewise_add",
"ewise_mult",
"ss",
"to_values",
}
vector = {
"inner",
"outer",
"reduce",
"vxm",
}
matrix = {
"T",
"kronecker",
"mxm",
"mxv",
"reduce_columns",
"reduce_rows",
"reduce_columnwise",
"reduce_rowwise",
"reduce_scalar",
}
common_raises = set()
scalar_raises = {
"__and__",
"__matmul__",
"__or__",
"__rand__",
"__rmatmul__",
"__ror__",
}
vector_matrix_raises = {
"__array__",
"__bool__",
}
has_defaults = {
"__eq__",
}
# no inplace math for expressions
bad_sugar = {
"__iadd__",
"__ifloordiv__",
"__imod__",
"__imul__",
"__ipow__",
"__isub__",
"__itruediv__",
"__ixor__",
"__ior__",
"__iand__",
"__imatmul__",
}
# Copy the result of this below
for name in sorted(common | scalar | vector_matrix | vector | matrix):
print(f"def {name}(self):")
if name in has_defaults:
print(f" return self._get_value({name!r}, default{name})\n\n")
else:
print(f" return self._get_value({name!r})\n\n")
for name in sorted(bad_sugar):
print(f"def {name}(self, other):")
print(f' raise TypeError(f"{name!r} not supported for {{type(self).__name__}}")\n\n')
# Copy to scalar.py and infix.py
print(" _get_value = _automethods._get_value")
for name in sorted(common | scalar):
print(f" {name} = wrapdoc(Scalar.{name})(property(_automethods.{name}))")
if name == "name":
print(" name = name.setter(_automethods._set_name)")
print(" # These raise exceptions")
for name in sorted(common_raises | scalar_raises):
print(f" {name} = wrapdoc(Scalar.{name})(Scalar.{name})")
print()
# Copy to vector.py and infix.py
print(" _get_value = _automethods._get_value")
for name in sorted(common | vector_matrix | vector):
print(f" {name} = wrapdoc(Vector.{name})(property(_automethods.{name}))")
if name == "name":
print(" name = name.setter(_automethods._set_name)")
print(" # These raise exceptions")
for name in sorted(common_raises | vector_matrix_raises):
print(f" {name} = wrapdoc(Vector.{name})(Vector.{name})")
for name in sorted(bad_sugar):
print(f" {name} = _automethods.{name}")
print()
# Copy to matrix.py and infix.py
print(" _get_value = _automethods._get_value")
for name in sorted(common | vector_matrix | matrix):
print(f" {name} = wrapdoc(Matrix.{name})(property(_automethods.{name}))")
if name == "name":
print(" name = name.setter(_automethods._set_name)")
print(" # These raise exceptions")
for name in sorted(common_raises | vector_matrix_raises):
print(f" {name} = wrapdoc(Matrix.{name})(Matrix.{name})")
for name in sorted(bad_sugar):
print(f" {name} = _automethods.{name}")
```
"""
from . import config
def _get_value(self, attr=None, default=None):
if config.get("autocompute"):
if self._value is None:
self._value = self.new()
if attr is None:
return self._value
else:
return getattr(self._value, attr)
if default is not None:
return default.__get__(self)
raise TypeError(
f"{attr} not enabled for objects of type {type(self)}. "
f"Use `.new()` to create a new {self.output_type.__name__}.\n\n"
"Hint: use `grblas.config.set(autocompute=True)` to enable "
"automatic computation of expressions."
)
def _set_name(self, name):
self._get_value().name = name
def default__eq__(self, other):
raise TypeError(
f"__eq__ not enabled for objects of type {type(self)}. "
f"Use `.new()` to create a new {self.output_type.__name__}, then use `.isequal` method.\n\n"
"Hint: use `grblas.config.set(autocompute=True)` to enable "
"automatic computation of expressions."
)
# Paste here
def S(self):
return self._get_value("S")
def T(self):
return self._get_value("T")
def V(self):
return self._get_value("V")
def __and__(self):
return self._get_value("__and__")
def __array__(self):
return self._get_value("__array__")
def __bool__(self):
return self._get_value("__bool__")
def __complex__(self):
return self._get_value("__complex__")
def __contains__(self):
return self._get_value("__contains__")
def __eq__(self):
return self._get_value("__eq__", default__eq__)
def __float__(self):
return self._get_value("__float__")
def __getitem__(self):
return self._get_value("__getitem__")
def __index__(self):
return self._get_value("__index__")
def __int__(self):
return self._get_value("__int__")
def __invert__(self):
return self._get_value("__invert__")
def __iter__(self):
return self._get_value("__iter__")
def __matmul__(self):
return self._get_value("__matmul__")
def __neg__(self):
return self._get_value("__neg__")
def __or__(self):
return self._get_value("__or__")
def __rand__(self):
return self._get_value("__rand__")
def __rmatmul__(self):
return self._get_value("__rmatmul__")
def __ror__(self):
return self._get_value("__ror__")
def _carg(self):
return self._get_value("_carg")
def _name_html(self):
return self._get_value("_name_html")
def _nvals(self):
return self._get_value("_nvals")
def apply(self):
return self._get_value("apply")
def ewise_add(self):
return self._get_value("ewise_add")
def ewise_mult(self):
return self._get_value("ewise_mult")
def gb_obj(self):
return self._get_value("gb_obj")
def inner(self):
return self._get_value("inner")
def is_empty(self):
return self._get_value("is_empty")
def isclose(self):
return self._get_value("isclose")
def isequal(self):
return self._get_value("isequal")
def kronecker(self):
return self._get_value("kronecker")
def mxm(self):
return self._get_value("mxm")
def mxv(self):
return self._get_value("mxv")
def name(self):
return self._get_value("name")
def nvals(self):
return self._get_value("nvals")
def outer(self):
return self._get_value("outer")
def reduce(self):
return self._get_value("reduce")
def reduce_columns(self):
return self._get_value("reduce_columns")
def reduce_columnwise(self):
return self._get_value("reduce_columnwise")
def reduce_rows(self):
return self._get_value("reduce_rows")
def reduce_rowwise(self):
return self._get_value("reduce_rowwise")
def reduce_scalar(self):
return self._get_value("reduce_scalar")
def ss(self):
return self._get_value("ss")
def to_pygraphblas(self):
return self._get_value("to_pygraphblas")
def to_values(self):
return self._get_value("to_values")
def value(self):
return self._get_value("value")
def vxm(self):
return self._get_value("vxm")
def wait(self):
return self._get_value("wait")
def __iadd__(self, other):
raise TypeError(f"'__iadd__' not supported for {type(self).__name__}")
def __iand__(self, other):
raise TypeError(f"'__iand__' not supported for {type(self).__name__}")
def __ifloordiv__(self, other):
raise TypeError(f"'__ifloordiv__' not supported for {type(self).__name__}")
def __imatmul__(self, other):
raise TypeError(f"'__imatmul__' not supported for {type(self).__name__}")
def __imod__(self, other):
raise TypeError(f"'__imod__' not supported for {type(self).__name__}")
def __imul__(self, other):
raise TypeError(f"'__imul__' not supported for {type(self).__name__}")
def __ior__(self, other):
raise TypeError(f"'__ior__' not supported for {type(self).__name__}")
def __ipow__(self, other):
raise TypeError(f"'__ipow__' not supported for {type(self).__name__}")
def __isub__(self, other):
raise TypeError(f"'__isub__' not supported for {type(self).__name__}")
def __itruediv__(self, other):
raise TypeError(f"'__itruediv__' not supported for {type(self).__name__}")
def __ixor__(self, other):
raise TypeError(f"'__ixor__' not supported for {type(self).__name__}")
| 20.631455 | 100 | 0.656616 |
5f1fbb8f563b9b5cf0bb7496ee223845c1da1c0e | 10,912 | py | Python | hanlp/components/parsers/parse_alg.py | antfootAlex/HanLP | e8044b27ae1de54b9070db08549853d3ca8271e2 | [
"Apache-2.0"
] | 3 | 2022-03-22T05:47:50.000Z | 2022-03-22T05:47:58.000Z | hanlp/components/parsers/parse_alg.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | null | null | null | hanlp/components/parsers/parse_alg.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-04-02 23:20
from collections import defaultdict
import hanlp.utils.span_util
from hanlp.components.parsers.chu_liu_edmonds import decode_mst
import numpy as np
class Tarjan:
"""Computes Tarjan's algorithm for finding strongly connected components (cycles) of a graph"""
def __init__(self, prediction, tokens):
"""
Parameters
----------
prediction : numpy.ndarray
a predicted dependency tree where prediction[dep_idx] = head_idx
tokens : numpy.ndarray
the tokens we care about (i.e. exclude _GO, _EOS, and _PAD)
"""
self._edges = defaultdict(set)
self._vertices = set((0,))
for dep, head in enumerate(prediction[tokens]):
self._vertices.add(dep + 1)
self._edges[head].add(dep + 1)
self._indices = {}
self._lowlinks = {}
self._onstack = defaultdict(lambda: False)
self._SCCs = []
index = 0
stack = []
for v in self.vertices:
if v not in self.indices:
self.strongconnect(v, index, stack)
# =============================================================
def strongconnect(self, v, index, stack):
"""
Args:
v:
index:
stack:
Returns:
"""
self._indices[v] = index
self._lowlinks[v] = index
index += 1
stack.append(v)
self._onstack[v] = True
for w in self.edges[v]:
if w not in self.indices:
self.strongconnect(w, index, stack)
self._lowlinks[v] = min(self._lowlinks[v], self._lowlinks[w])
elif self._onstack[w]:
self._lowlinks[v] = min(self._lowlinks[v], self._indices[w])
if self._lowlinks[v] == self._indices[v]:
self._SCCs.append(set())
while stack[-1] != v:
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
return
# ======================
@property
def edges(self):
return self._edges
@property
def vertices(self):
return self._vertices
@property
def indices(self):
return self._indices
@property
def SCCs(self):
return self._SCCs
class UnionFind(object):
def __init__(self, n) -> None:
super().__init__()
self.parent = [x for x in range(n)]
self.height = [0] * n
def find(self, x):
if self.parent[x] == x:
return x
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.height[x] < self.height[y]:
self.parent[x] = y
else:
self.parent[y] = x
if self.height[x] == self.height[y]:
self.height[x] += 1
def same(self, x, y):
return self.find(x) == self.find(y)
def tarjan(parse_probs, length, tokens_to_keep, ensure_tree=True):
"""Adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
Args:
parse_probs(NDArray): seq_len x seq_len, the probability of arcs
length(NDArray): sentence length including ROOT
tokens_to_keep(NDArray): mask matrix
ensure_tree: (Default value = True)
Returns:
"""
if ensure_tree:
parse_preds, parse_probs, tokens = unique_root(parse_probs, tokens_to_keep, length)
# remove cycles
tarjan = Tarjan(parse_preds, tokens)
for SCC in tarjan.SCCs:
if len(SCC) > 1:
dependents = set()
to_visit = set(SCC)
while len(to_visit) > 0:
node = to_visit.pop()
if not node in dependents:
dependents.add(node)
to_visit.update(tarjan.edges[node])
# The indices of the nodes that participate in the cycle
cycle = np.array(list(SCC))
# The probabilities of the current heads
old_heads = parse_preds[cycle]
old_head_probs = parse_probs[cycle, old_heads]
# Set the probability of depending on a non-head to zero
non_heads = np.array(list(dependents))
parse_probs[np.repeat(cycle, len(non_heads)), np.repeat([non_heads], len(cycle), axis=0).flatten()] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[cycle][:, tokens], axis=1) + 1
new_head_probs = parse_probs[cycle, new_heads] / old_head_probs
# Select the most probable change
change = np.argmax(new_head_probs)
changed_cycle = cycle[change]
old_head = old_heads[change]
new_head = new_heads[change]
# Make the change
parse_preds[changed_cycle] = new_head
tarjan.edges[new_head].add(changed_cycle)
tarjan.edges[old_head].remove(changed_cycle)
return parse_preds
else:
# block and pad heads
parse_probs = parse_probs * tokens_to_keep
parse_preds = np.argmax(parse_probs, axis=1)
return parse_preds
def chu_liu_edmonds(parse_probs, length):
tree = decode_mst(hanlp.utils.span_util.T, length, False)[0]
tree[0] = 0
return tree
def unique_root(parse_probs, tokens_to_keep: np.ndarray, length):
I = np.eye(len(tokens_to_keep))
# block loops and pad heads
if tokens_to_keep.ndim == 1:
tokens_to_keep = np.expand_dims(tokens_to_keep, -1)
parse_probs = parse_probs * tokens_to_keep * (1 - I)
parse_preds = np.argmax(parse_probs, axis=1)
tokens = np.arange(1, length)
roots = np.where(parse_preds[tokens] == 0)[0] + 1
# ensure at least one root
if len(roots) < 1:
# The current root probabilities
root_probs = parse_probs[tokens, 0]
# The current head probabilities
old_head_probs = parse_probs[tokens, parse_preds[tokens]]
# Get new potential root probabilities
new_root_probs = root_probs / old_head_probs
# Select the most probable root
new_root = tokens[np.argmax(new_root_probs)]
# Make the change
parse_preds[new_root] = 0
# ensure at most one root
elif len(roots) > 1:
# The probabilities of the current heads
root_probs = parse_probs[roots, 0]
# Set the probability of depending on the root zero
parse_probs[roots, 0] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[roots][:, tokens], axis=1) + 1
new_head_probs = parse_probs[roots, new_heads] / root_probs
# Select the most probable root
new_root = roots[np.argmin(new_head_probs)]
# Make the change
parse_preds[roots] = new_heads
parse_preds[new_root] = 0
return parse_preds, parse_probs, tokens
def dfs(graph, start, end):
fringe = [(start, [])]
while fringe:
state, path = fringe.pop()
if path and state == end:
yield path
continue
for next_state in graph[state]:
if next_state in path:
continue
fringe.append((next_state, path + [next_state]))
def mst_then_greedy(arc_scores, rel_scores, mask, root_rel_idx, rel_idx=None):
from scipy.special import softmax
from scipy.special import expit as sigmoid
length = sum(mask) + 1
mask = mask[:length]
arc_scores = arc_scores[:length, :length]
arc_pred = arc_scores > 0
arc_probs = sigmoid(arc_scores)
rel_scores = rel_scores[:length, :length, :]
rel_probs = softmax(rel_scores, -1)
if not any(arc_pred[:, 0][1:]): # no root
root = np.argmax(rel_probs[1:, 0, root_rel_idx]) + 1
arc_probs[root, 0] = 1
parse_preds, parse_probs, tokens = unique_root(arc_probs, mask, length)
root = adjust_root_score(arc_scores, parse_preds, root_rel_idx, rel_scores)
tree = chu_liu_edmonds(arc_scores, length)
if rel_idx is not None: # Unknown DEPREL label: 'ref'
rel_scores[np.arange(len(tree)), tree, rel_idx] = -float('inf')
return tree, add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx)
def adjust_root_score(arc_scores, parse_preds, root_rel_idx, rel_scores=None):
root = np.where(parse_preds[1:] == 0)[0] + 1
arc_scores[:, 0] = min(np.min(arc_scores), -1000)
arc_scores[root, 0] = max(np.max(arc_scores), 1000)
if rel_scores is not None:
rel_scores[:, :, root_rel_idx] = -float('inf')
rel_scores[root, 0, root_rel_idx] = float('inf')
return root
def add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx, arc_preds=None):
if not isinstance(tree, np.ndarray):
tree = np.array(tree)
if arc_preds is None:
arc_preds = arc_scores > 0
rel_pred = np.argmax(rel_scores, axis=-1)
return add_secondary_arcs_by_preds(arc_scores, arc_preds, rel_pred, tree, root_rel_idx)
def add_secondary_arcs_by_preds(arc_scores, arc_preds, rel_preds, tree, root_rel_idx=None):
dh = np.argwhere(arc_preds)
sdh = sorted([(arc_scores[x[0], x[1]], list(x)) for x in dh], reverse=True)
graph = [[] for _ in range(len(tree))]
for d, h in enumerate(tree):
if d:
graph[h].append(d)
for s, (d, h) in sdh:
if not d or not h or d in graph[h]:
continue
try:
path = next(dfs(graph, d, h))
except StopIteration:
# no path from d to h
graph[h].append(d)
parse_graph = [[] for _ in range(len(tree))]
num_root = 0
for h in range(len(tree)):
for d in graph[h]:
rel = rel_preds[d, h]
if h == 0 and root_rel_idx is not None:
rel = root_rel_idx
assert num_root == 0
num_root += 1
parse_graph[d].append((h, rel))
parse_graph[d] = sorted(parse_graph[d])
return parse_graph
def adjust_root_score_then_add_secondary_arcs(arc_scores, rel_scores, tree, root_rel_idx):
if len(arc_scores) != tree:
arc_scores = arc_scores[:len(tree), :len(tree)]
rel_scores = rel_scores[:len(tree), :len(tree), :]
parse_preds = arc_scores > 0
# adjust_root_score(arc_scores, parse_preds, rel_scores)
parse_preds[:, 0] = False # set heads to False
rel_scores[:, :, root_rel_idx] = -float('inf')
return add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx, parse_preds)
| 34.86262 | 119 | 0.591459 |
bfc78aba7a65c5242a9f8828e068975eece9e10b | 7,921 | py | Python | tail/modecoupling.py | ickc/TAIL | 3b4e49d6d5a4c2b57f7de5cbfcb2441e405efbeb | [
"BSD-3-Clause"
] | 1 | 2020-12-10T22:58:02.000Z | 2020-12-10T22:58:02.000Z | tail/modecoupling.py | ickc/TAIL | 3b4e49d6d5a4c2b57f7de5cbfcb2441e405efbeb | [
"BSD-3-Clause"
] | 1 | 2017-04-24T09:31:29.000Z | 2017-04-24T09:31:29.000Z | tail/modecoupling.py | ickc/TAIL | 3b4e49d6d5a4c2b57f7de5cbfcb2441e405efbeb | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numba import jit, prange
from scipy.fftpack import fft2, next_fast_len
from dautil.util import zero_padding
from tail.numba_wrap import fftfreq
from tail.util import fill_nan, norm_fft, normalize_row
@jit(nopython=True, nogil=True, parallel=True)
def _bin_psd2(pixel_size, l_max, mask):
'''identical to ``_bin_psd2_cross`` except
that mask1 == mask2
'''
N = mask.shape[0]
freq = fftfreq(N, pixel_size)
n = l_max + 1
psd_1d = np.zeros(n)
hit = np.zeros(n, dtype=np.int64)
pi_2 = np.pi * 2.
for i in prange(N):
freq_i = freq[i]
for j in range(N):
freq_j = freq[j]
l = int(round(pi_2 * np.sqrt(freq_i * freq_i + freq_j * freq_j)))
idx = l if l < l_max else l_max
hit[idx] += 1
# psd_2d
mask_ij = mask[i, j]
real = mask_ij.real
imag = mask_ij.imag
psd_1d[idx] += real * real + imag * imag
psd_1d = psd_1d[:-1]
hit = hit[:-1]
for i in range(l_max):
hit_ = hit[i]
psd_1d[i] = psd_1d[i] / hit_ if hit_ > 0 else np.nan
fill_nan(psd_1d)
return psd_1d
@jit(nopython=True, nogil=True, parallel=True)
def _bin_psd2_cross(pixel_size, l_max, mask1, mask2):
'''bins 2d fft to 1d integers
'''
N = mask1.shape[0]
freq = fftfreq(N, pixel_size)
n = l_max + 1
psd_1d = np.zeros(n)
hit = np.zeros(n, dtype=np.int64)
pi_2 = np.pi * 2.
for i in prange(N):
freq_i = freq[i]
for j in range(N):
freq_j = freq[j]
l = int(round(pi_2 * np.sqrt(freq_i * freq_i + freq_j * freq_j)))
idx = l if l < l_max else l_max
hit[idx] += 1
# psd_2d
mask1_ij = mask1[i, j]
mask2_ij = mask2[i, j]
psd_1d[idx] += mask1_ij.real * mask2_ij.real + mask1_ij.imag * mask2_ij.imag
psd_1d = psd_1d[:-1]
hit = hit[:-1]
for i in range(l_max):
hit_ = hit[i]
psd_1d[i] = psd_1d[i] / hit_ if hit_ > 0 else np.nan
fill_nan(psd_1d)
return psd_1d
def _get_W(l_max, pixel_size, mask1, mask2=None, l_min=1):
'''if ``mask2 is None``, get auto-psd of ``mask1``,
else cross-psd of ``mask1`` and ``mask2``.
return the 1d-spectrum, binned to integers up to (but not include) ``l_max``
'''
def _get_fft(mask, n_x):
mask = zero_padding(mask, (n_x, n_x))
return fft2(mask) * norm_fft(mask)
n_x = max(int(round(np.pi / (pixel_size * l_min))), mask1.shape[0])
n_x = next_fast_len(n_x)
mask1_fft = _get_fft(mask1, n_x)
mask2_fft = None if mask2 is None else _get_fft(mask2, n_x)
W = _bin_psd2(pixel_size, l_max, mask1_fft) if mask2_fft is None else \
_bin_psd2_cross(pixel_size, l_max, mask1_fft, mask2_fft)
return W
@jit(nopython=True, nogil=True)
def _J_t(k1, k2, k3):
'''See Eq. A10 from MASTER paper
it actually returns J_t * pi / 2 because overall scale doesn't matter
'''
k1_2 = k1 * k1
k2_2 = k2 * k2
k3_2 = k3 * k3
temp = 2 * (k1_2 * k2_2 + k2_2 * k3_2 + k3_2 * k1_2) - k1_2 * k1_2 - k2_2 * k2_2 - k3_2 * k3_2
# factor of 2 / pi ignored
# return 2. / (np.pi * np.sqrt(temp)) if temp > 0 else 0.
return 1. / np.sqrt(temp) if temp > 0 else 0.
@jit(nopython=True, nogil=True)
def _get_alpha(k1, k2, k3):
'''return the angle in [0, pi], corresponds to k1
made in the triangle of k1, k2, k3
essentially just cosine rule
'''
return np.arccos((k2 * k2 + k3 * k3 - k1 * k1) / (2 * k2 * k3))
def _get_J_p(Mtype, pure='hybrid'):
'''supported cases:
('EEEE', 'hybrid'),
('BBBB', 'hybrid'),
('TETE', 'hybrid'),
('TBTB', 'hybrid'),
('EBEB', 'hybrid'),
('EBEB', 'pseudo')
To include other cases, port them from commit 70fba3c.
'''
@jit(nopython=True, nogil=True)
def tete(k1, k2, k3):
alpha3 = _get_alpha(k3, k1, k2)
return np.cos(2. * alpha3)
@jit(nopython=True, nogil=True)
def eeee(k1, k2, k3):
alpha3 = _get_alpha(k3, k1, k2)
temp = np.cos(2. * alpha3)
return temp * temp
@jit(nopython=True, nogil=True)
def ebeb_pseudo(k1, k2, k3):
alpha3 = _get_alpha(k3, k1, k2)
return np.cos(4. * alpha3)
@jit(nopython=True, nogil=True)
def tbtb(k1, k2, k3):
alpha1 = _get_alpha(k1, k2, k3)
alpha3 = _get_alpha(k3, k1, k2)
k3_k1 = k3 / k1
temp = np.cos(2. * alpha3) + 2. * k3_k1 * np.cos(alpha3 - alpha1) + k3_k1 * k3_k1 * np.cos(2. * alpha1)
return temp
@jit(nopython=True, nogil=True)
def bbbb(k1, k2, k3):
alpha1 = _get_alpha(k1, k2, k3)
alpha3 = _get_alpha(k3, k1, k2)
k3_k1 = k3 / k1
temp = np.cos(2. * alpha3) + 2. * k3_k1 * np.cos(alpha3 - alpha1) + k3_k1 * k3_k1 * np.cos(2. * alpha1)
return temp * temp
@jit(nopython=True, nogil=True)
def ebeb(k1, k2, k3):
alpha1 = _get_alpha(k1, k2, k3)
alpha3 = _get_alpha(k3, k1, k2)
alpha31 = alpha3 - alpha1
alpha1 *= 2.
alpha3 *= 2.
k3_k1 = k3 / k1
k3_k1_2 = k3_k1 * k3_k1
k3_k1 *= 2.
temp = np.cos(alpha3)
temp *= temp + k3_k1 * np.cos(alpha31) + k3_k1_2 * np.cos(alpha1)
temp2 = np.sin(alpha3)
temp2 *= temp2 + k3_k1 * np.sin(alpha31) - k3_k1_2 * np.sin(alpha1)
return temp - temp2
if Mtype == 'EEEE':
return eeee
elif Mtype == 'BBBB':
return bbbb
elif Mtype == 'TETE':
return tete
elif Mtype == 'TBTB':
return tbtb
elif Mtype == 'EBEB':
if pure == 'hybrid':
return ebeb
else:
return ebeb_pseudo
def _get_M_gen(Mtype, pure='hybrid'):
if Mtype == 'TTTT':
_J = _J_t
else:
_J_p = _get_J_p(Mtype, pure='hybrid')
@jit(nopython=True, nogil=True)
def _J(k1, k2, k3):
return _J_t(k1, k2, k3) * _J_p(k1, k2, k3)
@jit(nopython=True, nogil=True)
def simps(W, k1, k2):
'''integrate W * J * k3 for k3 in (k3_min, k3_max)
using Simpson's rule.
1st term of Simpson's rule put at k3_min,
hence the first non-zero terms are 4, then 2, ...
which equals to 2 * (2 - i % 2)
'''
k3_min = np.abs(k1 - k2)
k3_max = k1 + k2
result = 0.
for i, k3 in enumerate(range(k3_min + 1, k3_max)):
result += (2 - i % 2) * _J(k1, k2, k3) * W[k3] * k3
# factor of 2 / 3 ignored
# return result / 1.5
return result
@jit(nopython=True, nogil=True, parallel=True)
def _get_M(W, l_max, dl):
'''Note that the middle of the l-bin is biased by 0.5.
e.g. dl = 10. first bin is [0, 10), middle is chosen as 5,
but it should be 4.5 instead.
'''
bin_width = dl // 2
n = l_max // dl
M = np.empty((n, n))
for i in prange(n):
k1 = bin_width + dl * i
for j in range(n):
k2 = bin_width + dl * j
# factor of 2 pi ignored
# M[i, j] = 2. * np.pi * k2 * simps(W, k1, k2)
M[i, j] = k2 * simps(W, k1, k2)
# from all the factors ignored above, it should return this instead
# return M * (8. / 3.)
return M
return _get_M
def calc_M(mask1, mask2, Mtype, pure, pixel_size=0.0005817764173314432, l_max=3000, dl=10, normalize=True):
'''assume ``l_max // dl == 0``, any excess will be included. e.g. if l_max=3001, dl=10, then
the last bin is [3000, 3010)
For no binning, set ``dl = 1``.
'''
# k3 < k1_max + k2_max = 2 * l_max - dl - dl % 2
W = _get_W(2 * l_max - dl - dl % 2, pixel_size, mask1, mask2=mask2)
get_M = _get_M_gen(Mtype, pure=pure)
M = get_M(W, l_max, dl)
if normalize:
normalize_row(M)
return M
| 29.337037 | 111 | 0.550309 |
36a24eae5aaf60ceb72a84728799a20ce4d64699 | 65,572 | py | Python | plasmapy/physics/transport/collisions.py | pep8speaks/PlasmaPy | d00f32b0266f8f39ff1ce4a1b38afbc1b9c7b733 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-04-10T06:00:46.000Z | 2019-04-10T06:00:46.000Z | plasmapy/physics/transport/collisions.py | pep8speaks/PlasmaPy | d00f32b0266f8f39ff1ce4a1b38afbc1b9c7b733 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/physics/transport/collisions.py | pep8speaks/PlasmaPy | d00f32b0266f8f39ff1ce4a1b38afbc1b9c7b733 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """Functions to calculate transport coefficients.
This module includes a number of functions for handling Coulomb collisions
spanning weakly coupled (low density) to strongly coupled (high density)
regimes.
Coulomb collisions
==================
Coulomb collisions are collisions where the interaction force is conveyed
via the electric field, instead of any kind of contact force. They usually
result in relatively small deflections of particle trajectories. However,
given that there are many charged particles in a plasma, one has to take
into account the cumulative effects of many such collisions.
Coulomb logarithms
==================
Please see the documentation for the `Coulomb logarithm <Coulomb_logarithm>`_
for a review of the many ways in which one can define and calculate
that quantity.
Collision rates
===============
The module gathers a few functions helpful for calculating collision
rates between particles. The most general of these is `collision_frequency`,
while if you need average values for a Maxwellian distribution, try
out `collision_rate_electron_ion` and `collision_rate_ion_ion`. These
use `collision_frequency` under the hood.
Macroscopic properties
======================
These include:
* `Spitzer_resistivity`
* `mobility`
* `Knudsen_number`
* `coupling_parameter`
"""
# python modules
from astropy import units as u
import numpy as np
import warnings
# plasmapy modules
from plasmapy import utils
from plasmapy.constants import (c, m_e, k_B, e, eps0, pi, hbar)
from plasmapy import atomic
from plasmapy.physics import parameters
from plasmapy.physics.quantum import (Wigner_Seitz_radius,
thermal_deBroglie_wavelength,
chemical_potential)
from plasmapy.mathematics import Fermi_integral
from plasmapy.utils import check_quantity, _check_relativistic
__all__ = [
"Coulomb_logarithm",
"impact_parameter_perp",
"impact_parameter",
"collision_frequency",
"Coulomb_cross_section",
"fundamental_electron_collision_freq",
"fundamental_ion_collision_freq",
"mean_free_path",
"Spitzer_resistivity",
"mobility",
"Knudsen_number",
"coupling_parameter",
]
@utils.check_quantity(T={"units": u.K, "can_be_negative": False},
n_e={"units": u.m ** -3})
@atomic.particle_input
def Coulomb_logarithm(T,
n_e,
particles: (atomic.Particle, atomic.Particle),
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""
Estimates the Coulomb logarithm.
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle.
n_e : ~astropy.units.Quantity
The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second).
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
lnLambda : float or numpy.ndarray
An estimate of the Coulomb logarithm that is accurate to
roughly its reciprocal.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect.
If the n_e, T, or V are not Quantities.
PhysicsError
If the result is smaller than 1.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The classical Coulomb logarithm is given by
.. math::
\ln{\Lambda} \equiv \ln\left( \frac{b_{max}}{b_{min}} \right)
where :math:`b_{min}` and :math:`b_{max}` are the inner and outer
impact parameters for Coulomb collisions [1]_.
The outer impact parameter is given by the Debye length:
:math:`b_{min} = \lambda_D` which is a function of electron
temperature and electron density. At distances greater than the
Debye length, electric fields from other particles will be
screened out due to electrons rearranging themselves.
The choice of inner impact parameter is either the distance of closest
approach for a 90 degree Coulomb collision or the thermal deBroglie
wavelength, whichever is larger. This is because Coulomb-style collisions
cannot occur for impact parameters shorter than the deBroglie
wavelength because quantum effects will change the fundamental
nature of the collision [2]_, [3]_.
Errors associated with the classical Coulomb logarithm are of order its
inverse. If the Coulomb logarithm is of order unity, then the
assumptions made in the standard analysis of Coulomb collisions
are invalid.
For dense plasmas where the classical Coulomb logarithm breaks
down there are various extended methods. These can be found
in D.O. Gericke et al's paper, which has a table summarizing
the methods [4]_. The GMS-1 through GMS-6 methods correspond
to the methods found it that table.
It should be noted that GMS-4 thru GMS-6 modify the Coulomb
logarithm to the form:
.. math::
\ln{\Lambda} \equiv 0.5 \ln\left(1 + \frac{b_{max}^2}{b_{min}^2} \right)
This means the Coulomb logarithm will not break down for Lambda < 0,
which occurs for dense, cold plasmas.
Methods
---
Classical
classical Landau-Spitzer approach. Fails for large coupling
parameter where Lambda can become less than zero.
GMS-1
1st method listed in Table 1 of reference [3]
Landau-Spitzer, but with interpolated bmin instead of bmin
selected between deBroglie wavelength and distance of closest
approach. Fails for large coupling
parameter where Lambda can become less than zero.
GMS-2
2nd method listed in Table 1 of reference [3]
Another Landau-Spitzer like approach, but now bmax is also
being interpolated. The interpolation is between the Debye
length and the ion sphere radius, allowing for descriptions
of dilute plasmas. Fails for large coupling
parameter where Lambda can become less than zero.
3rd method listed in Table 1 of reference [3]
classical Landau-Spitzer fails for argument of Coulomb logarithm
Lambda < 0, therefore a clamp is placed at Lambda_min = 2
GMS-4
4th method listed in Table 1 of reference [3]
Spitzer-like extension to Coulomb logarithm by noting that
Coulomb collisions take hyperbolic trajectories. Removes
divergence for small bmin issue in classical Landau-Spitzer
approach, so bmin can be zero. Also doesn't break down as
Lambda < 0 is now impossible, even when coupling parameter is large.
GMS-5
5th method listed in Table 1 of reference [3]
Similar to GMS-4, but setting bmin as distance of closest approach
and bmax interpolated between Debye length and ion sphere radius.
Lambda < 0 impossible.
GMS-6
6th method listed in Table 1 of reference [3]
Similar to GMS-4 and GMS-5, but using interpolation methods
for both bmin and bmax.
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> Coulomb_logarithm(T, n, particles)
14.545527226436974
>>> Coulomb_logarithm(T, n, particles, V=1e6 * u.m / u.s)
11.363478214139432
References
----------
.. [1] Physics of Fully Ionized Gases, L. Spitzer (1962)
.. [2] Francis, F. Chen. Introduction to plasma physics and controlled
fusion 3rd edition. Ch 5 (Springer 2015).
.. [3] Comparison of Coulomb Collision Rates in the Plasma Physics
and Magnetically Confined Fusion Literature, W. Fundamenski and
O.E. Garcia, EFDA–JET–R(07)01
(http://www.euro-fusionscipub.org/wp-content/uploads/2014/11/EFDR07001.pdf)
.. [4] Dense plasma temperature equilibration in the binary collision
approximation. D. O. Gericke et. al. PRE, 65, 036418 (2002).
DOI: 10.1103/PhysRevE.65.036418
"""
# fetching impact min and max impact parameters
bmin, bmax = impact_parameter(T=T,
n_e=n_e,
particles=particles,
z_mean=z_mean,
V=V,
method=method)
if method in ("classical", "GMS-1", "GMS-2"):
ln_Lambda = np.log(bmax / bmin)
elif method == "GMS-3":
ln_Lambda = np.log(bmax / bmin)
if np.any(ln_Lambda < 2):
if np.isscalar(ln_Lambda.value):
ln_Lambda = 2 * u.dimensionless_unscaled
else:
ln_Lambda[ln_Lambda < 2] = 2 * u.dimensionless_unscaled
elif method in ("GMS-4", "GMS-5", "GMS-6"):
ln_Lambda = 0.5 * np.log(1 + bmax ** 2 / bmin ** 2)
else:
raise ValueError("Unknown method! Choose from 'classical' and 'GMS-N', N from 1 to 6.")
# applying dimensionless units
ln_Lambda = ln_Lambda.to(u.dimensionless_unscaled).value
if np.any(ln_Lambda < 2) and method in ["classical", "GMS-1", "GMS-2"]:
warnings.warn(f"Coulomb logarithm is {ln_Lambda} and {method} relies on weak coupling.",
utils.CouplingWarning)
elif np.any(ln_Lambda < 4):
warnings.warn(f"Coulomb logarithm is {ln_Lambda}, you might have strong coupling effects",
utils.CouplingWarning)
return ln_Lambda
@atomic.particle_input
def _boilerPlate(T, particles: (atomic.Particle, atomic.Particle), V):
"""
Some boiler plate code for checking if inputs to functions in
collisions.py are good. Also obtains reduced in mass in a
2 particle collision system along with thermal velocity.
"""
# checking temperature is in correct units
T = T.to(u.K, equivalencies=u.temperature_energy())
masses = [p.mass for p in particles]
charges = [np.abs(p.charge) for p in particles]
# obtaining reduced mass of 2 particle collision system
reduced_mass = atomic.reduced_mass(*particles)
# getting thermal velocity of system if no velocity is given
V = _replaceNanVwithThermalV(V, T, reduced_mass)
_check_relativistic(V, 'V')
return T, masses, charges, reduced_mass, V
def _replaceNanVwithThermalV(V, T, m):
"""
Get thermal velocity of system if no velocity is given, for a given mass.
Handles vector checks for V, you must already know that T and m are okay.
"""
if np.any(V == 0):
raise utils.PhysicsError("You cannot have a collision for zero velocity!")
# getting thermal velocity of system if no velocity is given
if V is None:
V = parameters.thermal_speed(T, mass=m)
elif np.any(np.isnan(V)):
if np.isscalar(V.value) and np.isscalar(T.value):
V = parameters.thermal_speed(T, mass=m)
elif np.isscalar(V.value):
V = parameters.thermal_speed(T, mass=m)
elif np.isscalar(T.value):
V = V.copy()
V[np.isnan(V)] = parameters.thermal_speed(T, mass=m)
else:
V = V.copy()
V[np.isnan(V)] = parameters.thermal_speed(T[np.isnan(V)], mass=m)
return V
@check_quantity(T={"units": u.K, "can_be_negative": False})
@atomic.particle_input
def impact_parameter_perp(T,
particles: (atomic.Particle, atomic.Particle),
V=np.nan * u.m / u.s):
r"""Distance of closest approach for a 90 degree Coulomb collision.
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
Returns
-------
impact_parameter_perp : float or numpy.ndarray
The distance of closest approach for a 90 degree Coulomb collision.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The distance of closest approach, impact_parameter_perp, is given by [1]_
.. math::
b_{\perp} = \frac{Z_1 Z_2}{4 \pi \epsilon_0 m v^2}
Examples
--------
>>> from astropy import units as u
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> impact_parameter_perp(T, particles)
<Quantity 8.35505011e-12 m>
References
----------
.. [1] Francis, F. Chen. Introduction to plasma physics and controlled
fusion 3rd edition. Ch 5 (Springer 2015).
"""
# boiler plate checks
T, masses, charges, reduced_mass, V = _boilerPlate(T=T,
particles=particles,
V=V)
# Corresponds to a deflection of 90 degrees, which is valid when
# classical effects dominate.
# !!!Note: an average ionization parameter will have to be
# included here in the future
bPerp = (charges[0] * charges[1] / (4 * pi * eps0 * reduced_mass * V ** 2))
return bPerp.to(u.m)
@check_quantity(T={"units": u.K, "can_be_negative": False},
n_e={"units": u.m ** -3}
)
def impact_parameter(T,
n_e,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Impact parameters for classical and quantum Coulomb collision
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle
n_e : ~astropy.units.Quantity
The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
bmin, bmax : tuple of floats
The minimum and maximum impact parameters (distances) for a
Coulomb collision.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The minimum and maximum impact parameters may be calculated in a
variety of ways. The maximum impact parameter is typically
the Debye length.
For quantum plasmas the maximum impact parameter can be the
quadratic sum of the debye length and ion radius (Wigner_Seitz) [1]_
.. math::
b_{max} = \left(\lambda_{De}^2 + a_i^2\right)^{1/2}
The minimum impact parameter is typically some combination of the
thermal deBroglie wavelength and the distance of closest approach
for a 90 degree Coulomb collision. A quadratic sum is used for
all GMS methods, except for GMS-5, where b_min is simply set to
the distance of closest approach [1]_.
.. math::
b_{min} = \left(\Lambda_{deBroglie}^2 + \rho_{\perp}^2\right)^{1/2}
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> impact_parameter(T, n, particles)
(<Quantity 1.05163088e-11 m>, <Quantity 2.18225522e-05 m>)
>>> impact_parameter(T, n, particles, V=1e6 * u.m / u.s)
(<Quantity 2.53401778e-10 m>, <Quantity 2.18225522e-05 m>)
References
----------
.. [1] Dense plasma temperature equilibration in the binary collision
approximation. D. O. Gericke et. al. PRE, 65, 036418 (2002).
DOI: 10.1103/PhysRevE.65.036418
"""
# boiler plate checks
T, masses, charges, reduced_mass, V = _boilerPlate(T=T,
particles=particles,
V=V)
# catching error where mean charge state is not given for non-classical
# methods that require the ion density
if method in ("GMS-2", "GMS-5", "GMS-6"):
if np.isnan(z_mean):
raise ValueError("Must provide a z_mean for GMS-2, GMS-5, and "
"GMS-6 methods.")
# Debye length
lambdaDe = parameters.Debye_length(T, n_e)
# deBroglie wavelength
lambdaBroglie = hbar / (2 * reduced_mass * V)
# distance of closest approach in 90 degree Coulomb collision
bPerp = impact_parameter_perp(T=T,
particles=particles,
V=V)
# obtaining minimum and maximum impact parameters depending on which
# method is requested
if method == "classical":
bmax = lambdaDe
# Coulomb-style collisions will not happen for impact parameters
# shorter than either of these two impact parameters, so we choose
# the larger of these two possibilities. That is, between the
# deBroglie wavelength and the distance of closest approach.
# ARRAY NOTES
# T and V should be guaranteed to be same size inputs from _boilerplate
# therefore, lambdaBroglie and bPerp are either both scalar or both array
# if np.isscalar(bPerp.value) and np.isscalar(lambdaBroglie.value): # both scalar
try: # assume both scalar
if bPerp > lambdaBroglie:
bmin = bPerp
else:
bmin = lambdaBroglie
# else: # both lambdaBroglie and bPerp are arrays
except ValueError: # both lambdaBroglie and bPerp are arrays
bmin = lambdaBroglie
bmin[bPerp > lambdaBroglie] = bPerp[bPerp > lambdaBroglie]
elif method == "GMS-1":
# 1st method listed in Table 1 of reference [1]
# This is just another form of the classical Landau-Spitzer
# approach, but bmin is interpolated between the deBroglie
# wavelength and distance of closest approach.
bmax = lambdaDe
bmin = (lambdaBroglie ** 2 + bPerp ** 2) ** (1 / 2)
elif method == "GMS-2":
# 2nd method listed in Table 1 of reference [1]
# Another Landau-Spitzer like approach, but now bmax is also
# being interpolated. The interpolation is between the Debye
# length and the ion sphere radius, allowing for descriptions
# of dilute plasmas.
# Mean ion density.
n_i = n_e / z_mean
# mean ion sphere radius.
ionRadius = Wigner_Seitz_radius(n_i)
bmax = (lambdaDe ** 2 + ionRadius ** 2) ** (1 / 2)
bmin = (lambdaBroglie ** 2 + bPerp ** 2) ** (1 / 2)
elif method == "GMS-3":
# 3rd method listed in Table 1 of reference [1]
# same as GMS-1, but not Lambda has a clamp at Lambda_min = 2
# where Lambda is the argument to the Coulomb logarithm.
bmax = lambdaDe
bmin = (lambdaBroglie ** 2 + bPerp ** 2) ** (1 / 2)
elif method == "GMS-4":
# 4th method listed in Table 1 of reference [1]
bmax = lambdaDe
bmin = (lambdaBroglie ** 2 + bPerp ** 2) ** (1 / 2)
elif method == "GMS-5":
# 5th method listed in Table 1 of reference [1]
# Mean ion density.
n_i = n_e / z_mean
# mean ion sphere radius.
ionRadius = Wigner_Seitz_radius(n_i)
bmax = (lambdaDe ** 2 + ionRadius ** 2) ** (1 / 2)
bmin = bPerp
elif method == "GMS-6":
# 6th method listed in Table 1 of reference [1]
# Mean ion density.
n_i = n_e / z_mean
# mean ion sphere radius.
ionRadius = Wigner_Seitz_radius(n_i)
bmax = (lambdaDe ** 2 + ionRadius ** 2) ** (1 / 2)
bmin = (lambdaBroglie ** 2 + bPerp ** 2) ** (1 / 2)
else:
raise ValueError(f"Method {method} not found!")
# ARRAY NOTES
# it could be that bmin and bmax have different sizes. If Te is a scalar,
# T and V will be scalar from _boilerplate, so bmin will scalar. However
# if n_e is an array, than bmax will be an array. if this is the case,
# do we want to extend the scalar bmin to equal the length of bmax? Sure.
if np.isscalar(bmin.value) and not np.isscalar(bmax.value):
bmin = np.repeat(bmin, len(bmax))
return bmin.to(u.m), bmax.to(u.m)
@check_quantity(T={"units": u.K, "can_be_negative": False},
n={"units": u.m ** -3}
)
def collision_frequency(T,
n,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Collision frequency of particles in a plasma.
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature.
This should be the electron temperature for electron-electron
and electron-ion collisions, and the ion temperature for
ion-ion collisions.
n : ~astropy.units.Quantity
The density in units convertible to per cubic meter.
This should be the electron density for electron-electron collisions,
and the ion density for electron-ion and ion-ion collisions.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
freq : float or numpy.ndarray
The collision frequency of particles in a plasma.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The collision frequency is given by [1]_
.. math::
\nu = n \sigma v \ln{\Lambda}
where n is the particle density, :math:`\sigma` is the collisional
cross-section, :math:`v` is the inter-particle velocity (typically
taken as the thermal velocity), and :math:`\ln{\Lambda}` is the Coulomb
logarithm accounting for small angle collisions.
See eq (2.14) in [2]_.
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> collision_frequency(T, n, particles)
<Quantity 702505.15998601 Hz>
References
----------
.. [1] Francis, F. Chen. Introduction to plasma physics and controlled
fusion 3rd edition. Ch 5 (Springer 2015).
.. [2] http://homepages.cae.wisc.edu/~callen/chap2.pdf
"""
# boiler plate checks
T, masses, charges, reduced_mass, V_r = _boilerPlate(T=T,
particles=particles,
V=V)
# using a more descriptive name for the thermal velocity using
# reduced mass
V_reduced = V_r
if particles[0] in ('e','e-') and particles[1] in ('e','e-'):
# electron-electron collision
# if a velocity was passed, we use that instead of the reduced
# thermal velocity
V = _replaceNanVwithThermalV(V, T, reduced_mass)
# impact parameter for 90 degree collision
bPerp = impact_parameter_perp(T=T,
particles=particles,
V=V_reduced)
print(T, n, particles, z_mean, method)
# Coulomb logarithm
cou_log = Coulomb_logarithm(T,
n,
particles,
z_mean,
V=np.nan * u.m / u.s,
method=method)
elif particles[0] in ('e','e-') or particles[1] in ('e','e-'):
# electron-ion collision
# Need to manually pass electron thermal velocity to obtain
# correct perpendicular collision radius
# we ignore the reduced velocity and use the electron thermal
# velocity instead
V = _replaceNanVwithThermalV(V, T, m_e)
# need to also correct mass in collision radius from reduced
# mass to electron mass
bPerp = impact_parameter_perp(T=T,
particles=particles,
V=V) * reduced_mass / m_e
# Coulomb logarithm
# !!! may also need to correct Coulomb logarithm to be
# electron-electron version !!!
cou_log = Coulomb_logarithm(T,
n,
particles,
z_mean,
V=np.nan * u.m / u.s,
method=method)
else:
# ion-ion collision
# if a velocity was passed, we use that instead of the reduced
# thermal velocity
V = _replaceNanVwithThermalV(V, T, reduced_mass)
bPerp = impact_parameter_perp(T=T,
particles=particles,
V=V)
# Coulomb logarithm
cou_log = Coulomb_logarithm(T,
n,
particles,
z_mean,
V=np.nan * u.m / u.s,
method=method)
# collisional cross section
sigma = Coulomb_cross_section(bPerp)
# collision frequency where Coulomb logarithm accounts for
# small angle collisions, which are more frequent than large
# angle collisions.
freq = n * sigma * V * cou_log
return freq.to(u.Hz)
@check_quantity(impact_param={'units': u.m, 'can_be_negative': False})
def Coulomb_cross_section(impact_param: u.m):
r"""Cross section for a large angle Coulomb collision.
Parameters
----------
impact_param : ~astropy.units.Quantity
Impact parameter for the collision.
Examples
--------
>>> Coulomb_cross_section(7e-10*u.m)
<Quantity 6.1575216e-18 m2>
>>> Coulomb_cross_section(0.5*u.m)
<Quantity 3.14159265 m2>
Notes
-----
The collisional cross-section (see [1]_ for a graphical demonstration)
for a 90 degree Coulomb collision is obtained by
.. math::
\sigma = \pi (2 * \rho_{\perp})^2
where :math:`\rho_{\perp}` is the distance of closest approach for
a 90 degree Coulomb collision. This function is a generalization of that
calculation. Please note that it is not guaranteed to return the correct
results for small angle collisions.
Returns
-------
~astropy.units.Quantity
The Coulomb collision cross section area.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cross_section_(physics)#Collision_among_gas_particles
"""
sigma = np.pi * (2 * impact_param) ** 2
return sigma
@utils.check_quantity(
T_e={'units': u.K, 'can_be_negative': False},
n_e={'units': u.m ** -3, 'can_be_negative': False}
)
def fundamental_electron_collision_freq(T_e,
n_e,
ion_particle,
coulomb_log=None,
V=None,
coulomb_log_method="classical"):
r"""
Average momentum relaxation rate for a slowly flowing Maxwellian distribution of electrons.
[3]_ provides a derivation of this as an average collision frequency between electrons
and ions for a Maxwellian distribution. It is thus a special case of the collision
frequency with an averaging factor, and is on many occasions in transport theory
the most relevant collision frequency that has to be considered. It is heavily
related to diffusion and resistivity in plasmas.
Parameters
----------
T_e : ~astropy.units.Quantity
The electron temperature of the Maxwellian test electrons
n_e : ~astropy.units.Quantity
The number density of the Maxwellian test electrons
ion_particle: str
String signifying a particle type of the field ions, including charge
state information.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
coulomb_log : float or dimensionless ~astropy.units.Quantity, optional
Option to specify a Coulomb logarithm of the electrons on the ions.
If not specified, the Coulomb log will is calculated using the
`~plasmapy.physics.transport.Coulomb_logarithm` function.
coulomb_log_method : string, optional
Method used for Coulomb logarithm calculation (see that function
for more documentation). Choose from "classical" or "GMS-1" to "GMS-6".
Notes
-----
Equations (2.17) and (2.120) in [3]_ provide the original source used
to implement this formula, however, the simplest form that connects our average
collision frequency to the general collision frequency is is this (from 2.17):
.. math::
\nu_e = \frac{4}{3 \sqrt{\pi}} \nu(v_{Te})
Where :math:`\nu` is the general collision frequency and :math:`v_{Te}`
is the electron thermal velocity (the average, for a Maxwellian distribution).
This implementation of the average collision frequency is is equivalent to:
* 1/tau_e from ref [1]_ eqn (2.5e) pp. 215,
* nu_e from ref [2]_ pp. 33,
References
----------
.. [1] Braginskii, S. I. "Transport processes in a plasma." Reviews of
plasma physics 1 (1965): 205.
.. [2] Huba, J. D. "NRL (Naval Research Laboratory) Plasma Formulary,
revised." Naval Research Lab. Report NRL/PU/6790-16-614 (2016).
https://www.nrl.navy.mil/ppd/content/nrl-plasma-formulary
.. [3] J.D. Callen, Fundamentals of Plasma Physics draft material,
Chapter 2, http://homepages.cae.wisc.edu/~callen/chap2.pdf
Examples
--------
>>> from astropy import units as u
>>> fundamental_electron_collision_freq(0.1 * u.eV, 1e6 / u.m ** 3, 'p')
<Quantity 0.00180172 1 / s>
>>> fundamental_electron_collision_freq(1e6 * u.K, 1e6 / u.m ** 3, 'p')
<Quantity 1.07222852e-07 1 / s>
>>> fundamental_electron_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p')
<Quantity 3936037.8595928 1 / s>
>>> fundamental_electron_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p', coulomb_log_method = 'GMS-1')
<Quantity 3872922.52743562 1 / s>
>>> fundamental_electron_collision_freq(0.1 * u.eV, 1e6 / u.m ** 3, 'p', V = c / 100)
<Quantity 4.41166015e-07 1 / s>
>>> fundamental_electron_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p', coulomb_log = 20)
<Quantity 5812633.74935003 1 / s>
See Also
--------
collision_frequency
fundamental_ion_collision_freq
"""
T_e = T_e.to(u.K, equivalencies=u.temperature_energy())
# specify to use electron thermal velocity (most probable), not based on reduced mass
V = _replaceNanVwithThermalV(V, T_e, m_e)
particles = [ion_particle, 'e-']
Z_i = atomic.integer_charge(ion_particle)
nu = collision_frequency(T_e,
n_e,
particles,
z_mean=Z_i,
V=V,
method=coulomb_log_method
)
coeff = 4 / np.sqrt(np.pi) / 3
# accounting for when a Coulomb logarithm value is passed
if np.any(coulomb_log):
cLog = Coulomb_logarithm(T_e,
n_e,
particles,
z_mean=Z_i,
V=np.nan * u.m / u.s,
method=coulomb_log_method)
# dividing out by typical Coulomb logarithm value implicit in
# the collision frequency calculation and replacing with
# the user defined Coulomb logarithm value
nu_mod = nu * coulomb_log / cLog
nu_e = coeff * nu_mod
else:
nu_e = coeff * nu
return nu_e.to(1 / u.s)
@utils.check_quantity(
T_i={'units': u.K, 'can_be_negative': False},
n_i={'units': u.m ** -3, 'can_be_negative': False}
)
def fundamental_ion_collision_freq(T_i,
n_i,
ion_particle,
coulomb_log=None,
V=None,
coulomb_log_method="classical"):
r"""
Average momentum relaxation rate for a slowly flowing Maxwellian distribution of ions.
[3]_ provides a derivation of this as an average collision frequency between ions
and ions for a Maxwellian distribution. It is thus a special case of the collision
frequency with an averaging factor.
Parameters
----------
T_i : ~astropy.units.Quantity
The electron temperature of the Maxwellian test ions
n_i : ~astropy.units.Quantity
The number density of the Maxwellian test ions
ion_particle: str
String signifying a particle type of the test and field ions,
including charge state information. This function assumes the test
and field ions are the same species.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
coulomb_log : float or dimensionless ~astropy.units.Quantity, optional
Option to specify a Coulomb logarithm of the electrons on the ions.
If not specified, the Coulomb log will is calculated using the
~plasmapy.physics.transport.Coulomb_logarithm function.
coulomb_log_method : str, optional
Method used for Coulomb logarithm calculation (see that function
for more documentation). Choose from "classical" or "GMS-1" to "GMS-6".
Notes
-----
Equations (2.36) and (2.122) in [3]_ provide the original source used
to implement this formula, however, in our implementation we use the very
same process that leads to the fundamental electron collison rate (2.17),
gaining simply a different coefficient:
.. math::
\nu_i = \frac{8}{3 * 4 * \sqrt{\pi}} \nu(v_{Ti})
Where :math:`\nu` is the general collision frequency and :math:`v_{Ti}`
is the ion thermal velocity (the average, for a Maxwellian distribution).
Note that in the derivation, it is assumed that electrons are present
in such numbers as to establish quasineutrality, but the effects of the
test ions colliding with them are not considered here. This is a very
typical approximation in transport theory.
This result is an ion momentum relaxation rate, and is used in many
classical transport expressions. It is equivalent to:
* 1/tau_i from ref [1]_, equation (2.5i) pp. 215,
* nu_i from ref [2]_ pp. 33,
References
----------
.. [1] Braginskii, S. I. "Transport processes in a plasma." Reviews of
plasma physics 1 (1965): 205.
.. [2] Huba, J. D. "NRL (Naval Research Laboratory) Plasma Formulary,
revised." Naval Research Lab. Report NRL/PU/6790-16-614 (2016).
https://www.nrl.navy.mil/ppd/content/nrl-plasma-formulary
.. [3] J.D. Callen, Fundamentals of Plasma Physics draft material,
Chapter 2, http://homepages.cae.wisc.edu/~callen/chap2.pdf
Examples
--------
>>> from astropy import units as u
>>> fundamental_ion_collision_freq(0.1 * u.eV, 1e6 / u.m ** 3, 'p')
<Quantity 2.97315582e-05 1 / s>
>>> fundamental_ion_collision_freq(1e6 * u.K, 1e6 / u.m ** 3, 'p')
<Quantity 1.78316012e-09 1 / s>
>>> fundamental_ion_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p')
<Quantity 66411.80316364 1 / s>
>>> fundamental_ion_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p', coulomb_log_method='GMS-1')
<Quantity 66407.00859126 1 / s>
>>> fundamental_ion_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p', V = c / 100)
<Quantity 6.53577473 1 / s>
>>> fundamental_ion_collision_freq(100 * u.eV, 1e20 / u.m ** 3, 'p', coulomb_log=20)
<Quantity 95918.76240877 1 / s>
See Also
--------
collision_frequency
fundamental_electron_collision_freq
"""
T_i = T_i.to(u.K, equivalencies=u.temperature_energy())
m_i = atomic.particle_mass(ion_particle)
particles = [ion_particle, ion_particle]
# specify to use ion thermal velocity (most probable), not based on reduced mass
V = _replaceNanVwithThermalV(V, T_i, m_i)
Z_i = atomic.integer_charge(ion_particle)
nu = collision_frequency(T_i,
n_i,
particles,
z_mean=Z_i,
V=V,
method=coulomb_log_method)
# factor of 4 due to reduced mass in bperp and the rest is
# due to differences in definitions of collisional frequency
coeff = np.sqrt(8 / np.pi) / 3 / 4
# accounting for when a Coulomb logarithm value is passed
if np.any(coulomb_log):
cLog = Coulomb_logarithm(T_i,
n_i,
particles,
z_mean=Z_i,
V=np.nan * u.m / u.s,
method=coulomb_log_method)
# dividing out by typical Coulomb logarithm value implicit in
# the collision frequency calculation and replacing with
# the user defined Coulomb logarithm value
nu_mod = nu * coulomb_log / cLog
nu_i = coeff * nu_mod
else:
nu_i = coeff * nu
return nu_i.to(1 / u.s)
@check_quantity(T={"units": u.K, "can_be_negative": False},
n_e={"units": u.m ** -3}
)
def mean_free_path(T,
n_e,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Collisional mean free path (m)
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle
n_e : ~astropy.units.Quantity
The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
mfp : float or numpy.ndarray
The collisional mean free path for particles in a plasma.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The collisional mean free path is given by [1]_
.. math::
\lambda_{mfp} = \frac{v}{\nu}
where :math:`v` is the inter-particle velocity (typically taken to be
the thermal velocity) and :math:`\nu` is the collision frequency.
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> mean_free_path(T, n, particles)
<Quantity 7.8393631 m>
>>> mean_free_path(T, n, particles, V=1e6 * u.m / u.s)
<Quantity 0.00852932 m>
References
----------
.. [1] Francis, F. Chen. Introduction to plasma physics and controlled
fusion 3rd edition. Ch 5 (Springer 2015).
"""
# collisional frequency
freq = collision_frequency(T=T,
n=n_e,
particles=particles,
z_mean=z_mean,
V=V,
method=method)
# boiler plate to fetch velocity
# this has been moved to after collision_frequency to avoid use of
# reduced mass thermal velocity in electron-ion collision case.
# Should be fine since collision_frequency has its own boiler_plate
# check, and we are only using this here to get the velocity.
T, masses, charges, reduced_mass, V = _boilerPlate(T=T,
particles=particles,
V=V)
# mean free path length
mfp = V / freq
return mfp.to(u.m)
@check_quantity(T={"units": u.K, "can_be_negative": False},
n={"units": u.m ** -3}
)
def Spitzer_resistivity(T,
n,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Spitzer resistivity of a plasma
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature.
This should be the electron temperature for electron-electron
and electron-ion collisions, and the ion temperature for
ion-ion collisions.
n : ~astropy.units.Quantity
The density in units convertible to per cubic meter.
This should be the electron density for electron-electron collisions,
and the ion density for electron-ion and ion-ion collisions.
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
spitzer : float or numpy.ndarray
The resistivity of the plasma in Ohm meters.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The Spitzer resistivity is given by [1]_ [2]_
.. math::
\eta = \frac{m}{n Z_1 Z_2 q_e^2} \nu_{1,2}
where :math:`m` is the ion mass or the reduced mass, :math:`n` is the
ion density, :math:`Z` is the particle charge state, :math:`q_e` is the
charge of an electron, :math:`\nu_{1,2}` is the collisional frequency
between particle species 1 and 2.
Typically, particle species 1 and 2 are selected to be an electron
and an ion, since electron-ion collisions are inelastic and therefore
produce resistivity in the plasma.
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> Spitzer_resistivity(T, n, particles)
<Quantity 2.4916169e-06 m Ohm>
>>> Spitzer_resistivity(T, n, particles, V=1e6 * u.m / u.s)
<Quantity 0.00041583 m Ohm>
References
----------
.. [1] Francis, F. Chen. Introduction to plasma physics and controlled
fusion 3rd edition. Ch 5 (Springer 2015).
.. [2] http://homepages.cae.wisc.edu/~callen/chap2.pdf
"""
# collisional frequency
freq = collision_frequency(T=T,
n=n,
particles=particles,
z_mean=z_mean,
V=V,
method=method)
# boiler plate checks
# fetching additional parameters
T, masses, charges, reduced_mass, V = _boilerPlate(T=T,
particles=particles,
V=V)
if np.isnan(z_mean):
spitzer = freq * reduced_mass / (n * charges[0] * charges[1])
else:
spitzer = freq * reduced_mass / (n * (z_mean * e) ** 2)
return spitzer.to(u.Ohm * u.m)
@check_quantity(T={"units": u.K, "can_be_negative": False},
n_e={"units": u.m ** -3}
)
def mobility(T,
n_e,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Electrical mobility (m^2/(V s))
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle
n_e : ~astropy.units.Quantity
The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters. It is also used the obtain the average mobility
of a plasma with multiple charge state species. When z_mean
is not given, the average charge between the two particles is
used instead.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
mobility_value : float or numpy.ndarray
The electrical mobility of particles in a collisional plasma.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The mobility is given by [1]_
.. math::
\mu = \frac{q}{m \nu}
where :math:`q` is the particle charge, :math:`m` is the particle mass
and :math:`\nu` is the collisional frequency of the particle in the
plasma.
The mobility describes the forced diffusion of a particle in a collisional
plasma which is under the influence of an electric field. The mobility
is essentially the ratio of drift velocity due to collisions and the
electric field driving the forced diffusion.
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> mobility(T, n, particles)
<Quantity 250500.35318738 m2 / (s V)>
>>> mobility(T, n, particles, V=1e6 * u.m / u.s)
<Quantity 1500.97042427 m2 / (s V)>
References
----------
.. [1] https://en.wikipedia.org/wiki/Electrical_mobility#Mobility_in_gas_phase
"""
freq = collision_frequency(T=T,
n=n_e,
particles=particles,
z_mean=z_mean,
V=V,
method=method)
# boiler plate checks
# we do this after collision_frequency since collision_frequency
# already has a boiler_plate check and we are doing this just
# to recover the charges, mass, etc.
T, masses, charges, reduced_mass, V = _boilerPlate(T=T,
particles=particles,
V=V)
if np.isnan(z_mean):
z_val = (charges[0] + charges[1]) / 2
else:
z_val = z_mean * e
mobility_value = z_val / (reduced_mass * freq)
return mobility_value.to(u.m ** 2 / (u.V * u.s))
@check_quantity(T={"units": u.K, "can_be_negative": False},
n_e={"units": u.m ** -3}
)
def Knudsen_number(characteristic_length,
T,
n_e,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Knudsen number (dimless)
Parameters
----------
characteristic_length : ~astropy.units.Quantity
Rough order-of-magnitude estimate of the relevant size of the system.
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle
n_e : ~astropy.units.Quantity
The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
knudsen_param : float or numpy.ndarray
The dimensionless Knudsen number.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The Knudsen number is given by [1]_
.. math::
Kn = \frac{\lambda_{mfp}}{L}
where :math:`\lambda_{mfp}` is the collisional mean free path for
particles in a plasma and :math`L` is the characteristic scale
length of interest.
Typically the characteristic scale length is the plasma size or the
size of a diagnostic (such a the length or radius of a Langmuir
probe tip). The Knudsen number tells us whether collisional effects
are important on this scale length.
Examples
--------
>>> from astropy import units as u
>>> L = 1e-3 * u.m
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> Knudsen_number(L, T, n, particles)
<Quantity 7839.36310417>
>>> Knudsen_number(L, T, n, particles, V=1e6 * u.m / u.s)
<Quantity 8.52931736>
References
----------
.. [1] https://en.wikipedia.org/wiki/Knudsen_number
"""
path_length = mean_free_path(T=T,
n_e=n_e,
particles=particles,
z_mean=z_mean,
V=V,
method=method)
knudsen_param = path_length / characteristic_length
return knudsen_param.to(u.dimensionless_unscaled)
@check_quantity(T={"units": u.K, "can_be_negative": False},
n_e={"units": u.m ** -3}
)
def coupling_parameter(T,
n_e,
particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical"):
r"""Coupling parameter.
Coupling parameter compares Coulomb energy to kinetic energy (typically)
thermal. Classical plasmas are weakly coupled Gamma << 1, whereas dense
plasmas tend to have significant to strong coupling Gamma >= 1.
Parameters
----------
T : ~astropy.units.Quantity
Temperature in units of temperature or energy per particle,
which is assumed to be equal for both the test particle and
the target particle
n_e : ~astropy.units.Quantity
The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
(listed first) and the target particle (listed second)
z_mean : ~astropy.units.Quantity, optional
The average ionization (arithmetic mean) for a plasma where the
a macroscopic description is valid. This is used to recover the
average ion density (given the average ionization and electron
density) for calculating the ion sphere radius for non-classical
impact parameters.
V : ~astropy.units.Quantity, optional
The relative velocity between particles. If not provided,
thermal velocity is assumed: :math:`\mu V^2 \sim 2 k_B T`
where `mu` is the reduced mass.
method: str, optional
Selects which theory to use when calculating the Coulomb
logarithm. Defaults to classical method.
Returns
-------
coupling : float or numpy.ndarray
The coupling parameter for a plasma.
Raises
------
ValueError
If the mass or charge of either particle cannot be found, or
any of the inputs contain incorrect values.
UnitConversionError
If the units on any of the inputs are incorrect
TypeError
If the n_e, T, or V are not Quantities.
RelativityError
If the input velocity is same or greater than the speed
of light.
Warns
-----
~astropy.units.UnitsWarning
If units are not provided, SI units are assumed
~plasmapy.utils.RelativityWarning
If the input velocity is greater than 5% of the speed of
light.
Notes
-----
The coupling parameter is given by
.. math::
\Gamma = \frac{E_{Coulomb}}{E_{Kinetic}}
The Coulomb energy is given by
.. math::
E_{Coulomb} = \frac{Z_1 Z_2 q_e^2}{4 \pi \epsilon_0 r}
where :math:`r` is the Wigner-Seitz radius, and 1 and 2 refer to
particle species 1 and 2 between which we want to determine the
coupling.
In the classical case the kinetic energy is simply the thermal energy
.. math::
E_{kinetic} = k_B T_e
The quantum case is more complex. The kinetic energy is dominated by
the Fermi energy, modulated by a correction factor based on the
ideal chemical potential. This is obtained more precisely
by taking the the thermal kinetic energy and dividing by
the degeneracy parameter, modulated by the Fermi integral [1]_
.. math::
E_{kinetic} = 2 k_B T_e / \chi f_{3/2} (\mu_{ideal} / k_B T_e)
where :math:`\chi` is the degeneracy parameter, :math:`f_{3/2}` is the
Fermi integral, and :math:`\mu_{ideal}` is the ideal chemical
potential.
The degeneracy parameter is given by
.. math::
\chi = n_e \Lambda_{deBroglie} ^ 3
where :math:`n_e` is the electron density and :math:`\Lambda_{deBroglie}`
is the thermal deBroglie wavelength.
See equations 1.2, 1.3 and footnote 5 in [2]_ for details on the ideal
chemical potential.
Examples
--------
>>> from astropy import units as u
>>> n = 1e19*u.m**-3
>>> T = 1e6*u.K
>>> particles = ('e', 'p')
>>> coupling_parameter(T, n, particles)
<Quantity 5.80330315e-05>
>>> coupling_parameter(T, n, particles, V=1e6 * u.m / u.s)
<Quantity 5.80330315e-05>
References
----------
.. [1] Dense plasma temperature equilibration in the binary collision
approximation. D. O. Gericke et. al. PRE, 65, 036418 (2002).
DOI: 10.1103/PhysRevE.65.036418
.. [2] Bonitz, Michael. Quantum kinetic theory. Stuttgart: Teubner, 1998.
"""
# boiler plate checks
T, masses, charges, reduced_mass, V = _boilerPlate(T=T,
particles=particles,
V=V)
if np.isnan(z_mean):
# using mean charge to get average ion density.
# If you are running this, you should strongly consider giving
# a value of z_mean as an argument instead.
Z1 = np.abs(atomic.integer_charge(particles[0]))
Z2 = np.abs(atomic.integer_charge(particles[1]))
Z = (Z1 + Z2) / 2
# getting ion density from electron density
n_i = n_e / Z
# getting Wigner-Seitz radius based on ion density
radius = Wigner_Seitz_radius(n_i)
else:
# getting ion density from electron density
n_i = n_e / z_mean
# getting Wigner-Seitz radius based on ion density
radius = Wigner_Seitz_radius(n_i)
# Coulomb potential energy between particles
if np.isnan(z_mean):
coulombEnergy = charges[0] * charges[1] / (4 * np.pi * eps0 * radius)
else:
coulombEnergy = (z_mean * e) ** 2 / (4 * np.pi * eps0 * radius)
if method == "classical":
# classical thermal kinetic energy
kineticEnergy = k_B * T
elif method == "quantum":
# quantum kinetic energy for dense plasmas
lambda_deBroglie = thermal_deBroglie_wavelength(T)
chemicalPotential = chemical_potential(n_e, T)
fermiIntegral = Fermi_integral(chemicalPotential.si.value, 1.5)
denom = (n_e * lambda_deBroglie ** 3) * fermiIntegral
kineticEnergy = 2 * k_B * T / denom
if np.all(np.imag(kineticEnergy) == 0):
kineticEnergy = np.real(kineticEnergy)
else: # coveralls: ignore
raise ValueError("Kinetic energy should not be imaginary."
"Something went horribly wrong.")
coupling = coulombEnergy / kineticEnergy
return coupling.to(u.dimensionless_unscaled)
| 36.550725 | 107 | 0.614088 |
0f34ca172e9437c1a2f288d343c81965f753ed44 | 3,044 | py | Python | segmentation_models_pytorch/common/blocks.py | vinnamkim/segmentation_models.pytorch | f967ded34df6fb536e8e8cba9b6491ae63b939f5 | [
"MIT"
] | null | null | null | segmentation_models_pytorch/common/blocks.py | vinnamkim/segmentation_models.pytorch | f967ded34df6fb536e8e8cba9b6491ae63b939f5 | [
"MIT"
] | null | null | null | segmentation_models_pytorch/common/blocks.py | vinnamkim/segmentation_models.pytorch | f967ded34df6fb536e8e8cba9b6491ae63b939f5 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
class ZeroCenter(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
"""x : [B, C, H, W]"""
return x.sub_(x.flatten(1).mean(1, keepdim=True).unsqueeze(-1).unsqueeze(-1))
EPS = 1e-5
class ZeroNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
"""x : [B, C, H, W]"""
"""x_mean : [B, 1, 1, 1]"""
return F.layer_norm(x, x.size()[1:], None, None, EPS)
class Conv2dReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0,
stride=1, use_batchnorm=True, center='before', **batchnorm_params):
super().__init__()
layers = [
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm)
)
]
if use_batchnorm == 'inplace':
try:
from inplace_abn import InPlaceABN
except ImportError:
raise RuntimeError("In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. To install see: https://github.com/mapillary/inplace_abn")
layers.append(InPlaceABN(out_channels, activation='leaky_relu', activation_param=0.0, **batchnorm_params))
elif use_batchnorm:
if center == 'before':
layers.append(ZeroCenter())
layers.append(nn.ReLU(inplace=True))
layers.append(nn.BatchNorm2d(out_channels, **batchnorm_params))
elif center == 'after':
layers.append(nn.BatchNorm2d(out_channels, **batchnorm_params))
layers.append(nn.ReLU(inplace=False))
layers.append(ZeroCenter())
elif center == 'norm':
layers.append(nn.BatchNorm2d(out_channels, **batchnorm_params))
layers.append(nn.ReLU(inplace=True))
layers.append(ZeroNorm())
else:
layers.append(nn.BatchNorm2d(out_channels, **batchnorm_params))
layers.append(nn.ReLU(inplace=True))
else:
layers.append(nn.ReLU(inplace=True))
self.block = nn.Sequential(*layers)
def forward(self, x):
return self.block(x)
class SCSEModule(nn.Module):
def __init__(self, ch, re=16):
super().__init__()
self.cSE = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(ch, ch//re, 1),
nn.ReLU(inplace=True),
nn.Conv2d(ch//re, ch, 1),
nn.Sigmoid()
)
self.sSE = nn.Sequential(nn.Conv2d(ch, ch, 1),
nn.Sigmoid())
def forward(self, x):
return x * self.cSE(x) + x * self.sSE(x)
| 35.811765 | 175 | 0.524967 |
245554ca3f1836c597210282087c81a43378a5cc | 179 | py | Python | models/elm.py | likeand/ml | fa54cebeb9998d8aa1241445b4b9492695bb4073 | [
"MIT"
] | 1 | 2020-09-14T06:32:20.000Z | 2020-09-14T06:32:20.000Z | models/elm.py | likeand/ml | fa54cebeb9998d8aa1241445b4b9492695bb4073 | [
"MIT"
] | null | null | null | models/elm.py | likeand/ml | fa54cebeb9998d8aa1241445b4b9492695bb4073 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Date : 2020/5/31
# @Author: Luokun
# @Email : olooook@outlook.com
import numpy as np
class ELM:
"""
Extreme learning machine(极限学习机)
"""
| 13.769231 | 35 | 0.586592 |
2cf1f1ec562d20749a8ef0298d55afa9d1576843 | 593 | py | Python | app/pokemon/migrations/0004_auto_20201114_2342.py | innacroft/PokemonService | 3dade01c3fe5d5bc56ff631f69a5548fafe4d076 | [
"MIT"
] | null | null | null | app/pokemon/migrations/0004_auto_20201114_2342.py | innacroft/PokemonService | 3dade01c3fe5d5bc56ff631f69a5548fafe4d076 | [
"MIT"
] | null | null | null | app/pokemon/migrations/0004_auto_20201114_2342.py | innacroft/PokemonService | 3dade01c3fe5d5bc56ff631f69a5548fafe4d076 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-11-14 23:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokemon', '0003_auto_20201114_2333'),
]
operations = [
migrations.AddField(
model_name='pokemon',
name='height',
field=models.IntegerField(default=0, verbose_name='Height points'),
),
migrations.AddField(
model_name='pokemon',
name='width',
field=models.IntegerField(default=0, verbose_name='Width points'),
),
]
| 24.708333 | 79 | 0.591906 |
5dc67b983976e6e03a32e00a0265a29b1b558a24 | 1,725 | py | Python | onlinepayments/sdk/domain/get_payment_products_response.py | wl-online-payments-direct/sdk-python2 | eac2d5ad1945700cdfdffe7ff7da161eb8bfaf04 | [
"Apache-2.0"
] | null | null | null | onlinepayments/sdk/domain/get_payment_products_response.py | wl-online-payments-direct/sdk-python2 | eac2d5ad1945700cdfdffe7ff7da161eb8bfaf04 | [
"Apache-2.0"
] | null | null | null | onlinepayments/sdk/domain/get_payment_products_response.py | wl-online-payments-direct/sdk-python2 | eac2d5ad1945700cdfdffe7ff7da161eb8bfaf04 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This class was auto-generated.
#
from onlinepayments.sdk.data_object import DataObject
from onlinepayments.sdk.domain.payment_product import PaymentProduct
class GetPaymentProductsResponse(DataObject):
"""
| The response contains an array of payment products that match the filters supplied in the request.
"""
__payment_products = None
@property
def payment_products(self):
"""
| Array containing payment products and their characteristics
Type: list[:class:`onlinepayments.sdk.domain.payment_product.PaymentProduct`]
"""
return self.__payment_products
@payment_products.setter
def payment_products(self, value):
self.__payment_products = value
def to_dictionary(self):
dictionary = super(GetPaymentProductsResponse, self).to_dictionary()
if self.payment_products is not None:
dictionary['paymentProducts'] = []
for element in self.payment_products:
if element is not None:
dictionary['paymentProducts'].append(element.to_dictionary())
return dictionary
def from_dictionary(self, dictionary):
super(GetPaymentProductsResponse, self).from_dictionary(dictionary)
if 'paymentProducts' in dictionary:
if not isinstance(dictionary['paymentProducts'], list):
raise TypeError('value \'{}\' is not a list'.format(dictionary['paymentProducts']))
self.payment_products = []
for element in dictionary['paymentProducts']:
value = PaymentProduct()
self.payment_products.append(value.from_dictionary(element))
return self
| 35.9375 | 104 | 0.671884 |
87a71ca46b239d4272834ca71bbd5110b0b3c18b | 5,963 | py | Python | app.py | vanandsh/dash_live_demo | 3eae09e7b9146316081df61a8c7b6a3a276ecc61 | [
"MIT"
] | null | null | null | app.py | vanandsh/dash_live_demo | 3eae09e7b9146316081df61a8c7b6a3a276ecc61 | [
"MIT"
] | null | null | null | app.py | vanandsh/dash_live_demo | 3eae09e7b9146316081df61a8c7b6a3a276ecc61 | [
"MIT"
] | null | null | null | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import altair as alt
#import utility as util
#from ./src import utility as util
app = dash.Dash(__name__, assets_folder='assets')
server = app.server
app.title = 'Dash app with pure Altair HTML'
import altair as alt
import vega_datasets
def mds_special():
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 24,
"font": font,
"anchor": "start", # equivalent of left-aligned.
"fontColor": "#000000"
},
'view': {
"height": 300,
"width": 400
},
"axisX": {
"domain": True,
#"domainColor": axisColor,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
"tickColor": axisColor,
"tickSize": 5, # default, including it just to show you can change it
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 14,
"labelAngle": 0,
#"ticks": False, # even if you don't have a "domain" you need to turn these off.
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
}
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
#alt.themes.enable('none') # to return to default
def make_plot(x_axis = 'Displacement', y_axis = 'Horsepower'):
# Create a plot of the Displacement and the Horsepower of the cars dataset
alt.themes.enable('mds_special')
x_axis_with_type = x_axis + ':Q'
y_axis_with_type = y_axis + ':Q'
chart = alt.Chart(vega_datasets.data.cars.url).mark_point(size=90).encode(
alt.X(x_axis_with_type, title = x_axis),
alt.Y(y_axis_with_type, title = y_axis),
tooltip = [x_axis_with_type, y_axis_with_type]
).properties(title=x_axis + ' vs. ' + y_axis,
width=500, height=350).interactive()
return chart
app.layout = html.Div([
### ADD CONTENT HERE like: html.H1('text'),
# html.H1('This is my first dashboard'),
# html.H2('This is a subtitle'),
# html.H3('Here is an image'),
# html.Img(src='https://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Unico_Anello.png/1920px-Unico_Anello.png',
# width='10%'),
html.H3('Dynamic Plot:'),
html.Iframe(
sandbox='allow-scripts',
id='plot',
height='500',
width='750',
style={'border-width': '0'},
################ The magic happens here
#srcDoc=open('./Lecture1_charts/horsepower_vs_displacement.html').read()
srcDoc=make_plot().to_html()
################ The magic happens here
),
# dcc.Markdown('''
# ## Markdown Section
# '''
# ),
# html.H3("Dropdown!"),
# html.H3("Slider Bar!"),
# dcc.Slider(
# min=0,
# max=9,
# marks={i: 'Label {}'.format(i) for i in range(10)},
# value=5,
# ),
# html.Br(),
# html.Br(),
# html.Br(),
# html.Br(),
### Add Tabs to the top of the page
# dcc.Tabs(id='tabs', value='tab1', children=[
# dcc.Tab(label='Lecture 1', value='tab-1'),
# dcc.Tab(label='Lecture 2', value='tab-2'),
# dcc.Tab(label='Lecture 3', value='tab-3'),
# dcc.Tab(label='Lecture 4', value='tab-4'),
# ]),
dcc.Markdown('''
### X-axis
'''
),
dcc.Dropdown(
id='dd-chart_x',
options=[
{'label': 'Fuel Efficiency', 'value': 'Miles_per_Gallon'},
{'label': 'Cylinders', 'value': 'Cylinders'},
{'label': 'Engine Displacement', 'value': 'Displacement'},
# Missing option here
],
value='Displacement',
style=dict(width='45%',
verticalAlign="middle")
),
dcc.Markdown('''
### Y-axis
'''
),
dcc.Dropdown(
id='dd-chart_y',
options=[
{'label': 'Fuel Efficiency', 'value': 'Miles_per_Gallon'},
{'label': 'Cylinders', 'value': 'Cylinders'},
{'label': 'Engine Displacement', 'value': 'Displacement'},
# Missing option here
],
value='Cylinders',
style=dict(width='45%',
verticalAlign="middle")
),
])
@app.callback(
dash.dependencies.Output('plot', 'srcDoc'),
[dash.dependencies.Input('dd-chart_x', 'value'),
dash.dependencies.Input('dd-chart_y', 'value')])
def update_plot(xaxis_column_name, yaxis_column_name):
'''
Takes in an xaxis_column_name and calls make_plot to update our Altair figure
'''
updated_plot = make_plot(xaxis_column_name,yaxis_column_name).to_html()
return updated_plot
if __name__ == '__main__':
app.run_server(debug=True)
| 31.219895 | 121 | 0.533959 |
374d98af4201b4130e19522a682682be318589c7 | 5,244 | py | Python | rivalcfg/devices/rival310.py | Clueninja/rivalcfg | f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346 | [
"WTFPL"
] | 604 | 2016-03-31T12:22:26.000Z | 2022-03-31T18:51:50.000Z | rivalcfg/devices/rival310.py | Clueninja/rivalcfg | f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346 | [
"WTFPL"
] | 162 | 2016-04-17T10:58:08.000Z | 2022-03-11T18:59:18.000Z | rivalcfg/devices/rival310.py | Clueninja/rivalcfg | f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346 | [
"WTFPL"
] | 89 | 2016-04-10T08:56:58.000Z | 2022-03-18T21:04:10.000Z | from .. import usbhid
# fmt: off
_RGBGRADIENT_HEADER = {
"header_length": 26, # Length of the header excuding command / LED ID
"led_id_offsets": [0], # Offset of the "led_id" fields
"duration_offset": 1, # Offset of the "duration" field
"duration_length": 2, # Length of the "duration" field (in Bytes)
"repeat_offset": 17, # Offset of the "repeat" flag
"triggers_offset": 21, # Offset of the "triggers" field (buttons mask)
"color_count_offset": 25, # Offset of the "color_count" field
}
# fmt: on
_DEFAULT_RGBGRADIENT = (
"rgbgradient(duration=1000; colors=0%: #ff0000, 33%: #00ff00, 66%: #0000ff)"
)
profile = {
"name": "SteelSeries Rival 310",
"models": [
{
"name": "SteelSeries Rival 310",
"vendor_id": 0x1038,
"product_id": 0x1720,
"endpoint": 0,
},
{
"name": "SteelSeries Rival 310 CS:GO Howl Edition",
"vendor_id": 0x1038,
"product_id": 0x171E,
"endpoint": 0,
},
{
"name": "SteelSeries Rival 310 PUBG Edition",
"vendor_id": 0x1038,
"product_id": 0x1736,
"endpoint": 0,
},
],
"settings": {
"sensitivity1": {
"label": "Sensibility preset 1",
"description": "Set sensitivity preset 1 (DPI)",
"cli": ["-s", "--sensitivity1"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x53, 0x00, 0x01],
"command_suffix": [0x00, 0x42],
"value_type": "range",
"input_range": [100, 12000, 100],
"output_range": [0x00, 0x77, 1],
"default": 800,
},
"sensitivity2": {
"label": "Sensibility preset 2",
"description": "Set sensitivity preset 2 (DPI)",
"cli": ["-S", "--sensitivity2"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x53, 0x00, 0x02],
"command_suffix": [0x00, 0x42],
"value_type": "range",
"input_range": [100, 12000, 100],
"output_range": [0x00, 0x77, 1],
"default": 1600,
},
"polling_rate": {
"label": "Polling rate",
"description": "Set polling rate (Hz)",
"cli": ["-p", "--polling-rate"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x54, 0x00],
"value_type": "choice",
"choices": {
125: 0x04,
250: 0x03,
500: 0x02,
1000: 0x01,
},
"default": 1000,
},
"logo_color": {
"label": "Logo LED colors and effects",
"description": "Set the colors and the effects of the logo LED",
"cli": ["-c", "--logo-color"],
"report_type": usbhid.HID_REPORT_TYPE_FEATURE,
"command": [0x5B, 0x00],
"value_type": "rgbgradient",
"rgbgradient_header": _RGBGRADIENT_HEADER,
"led_id": 0x00,
"default": _DEFAULT_RGBGRADIENT,
},
"wheel_color": {
"label": "Wheel LED colors and effects",
"description": "Set the colors and the effects of the wheel LED",
"cli": ["-C", "--wheel-color"],
"report_type": usbhid.HID_REPORT_TYPE_FEATURE,
"command": [0x5B, 0x00],
"value_type": "rgbgradient",
"rgbgradient_header": _RGBGRADIENT_HEADER,
"led_id": 0x01,
"default": _DEFAULT_RGBGRADIENT,
},
"buttons_mapping": {
"label": "Buttons mapping",
"description": "Set the mapping of the buttons",
"cli": ["-b", "--buttons"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x31, 0x00],
"value_type": "buttons",
"buttons": {
"Button1": {"id": 0x01, "offset": 0x00, "default": "button1"},
"Button2": {"id": 0x02, "offset": 0x05, "default": "button2"},
"Button3": {"id": 0x03, "offset": 0x0A, "default": "button3"},
"Button4": {"id": 0x04, "offset": 0x0F, "default": "button4"},
"Button5": {"id": 0x05, "offset": 0x14, "default": "button5"},
"Button6": {"id": 0x06, "offset": 0x19, "default": "dpi"},
},
# fmt: off
"button_disable": 0x00,
"button_keyboard": 0x51,
"button_multimedia": 0x61,
"button_dpi_switch": 0x30,
"button_scroll_up": 0x31,
"button_scroll_down": 0x32,
# fmt: on
"button_field_length": 5,
"default": "buttons(button1=button1; button2=button2; button3=button3; button4=button4; button5=button5; button6=dpi; layout=qwerty)",
},
},
"save_command": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x59, 0x00],
},
"firmware_version": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x90, 0x00],
"response_length": 2,
},
}
| 36.929577 | 146 | 0.502288 |
a04f72af495a456f87194a084e2ad71a152aacbe | 380 | py | Python | Quad_Code_share/ADC.py | EvanHope/SeniorProject | 0980f7187d49988d2d81e34506fa97e7c8cca3a4 | [
"MIT"
] | null | null | null | Quad_Code_share/ADC.py | EvanHope/SeniorProject | 0980f7187d49988d2d81e34506fa97e7c8cca3a4 | [
"MIT"
] | null | null | null | Quad_Code_share/ADC.py | EvanHope/SeniorProject | 0980f7187d49988d2d81e34506fa97e7c8cca3a4 | [
"MIT"
] | 1 | 2021-12-02T21:39:18.000Z | 2021-12-02T21:39:18.000Z | import sys, time
import numpy as np
import navio.adc
import navio.util
navio.util.check_apm()
adc = navio.adc.ADC()
data = np.zeros(1000)
start_time = time.clock()*1000.0
for i in range(0,1000):
analog_val = adc.read(4)*0.001
data[i] = analog_val
print analog_val
time.sleep(0.25)
end_time = time.clock()*1000.0
average = np.mean(data)
print end_time - start_time, average
| 19 | 36 | 0.728947 |
14d63e648b7dec620431a5b5dc4834c6aca0d6f4 | 5,308 | py | Python | lstm.py | sankalpg10/NLP-Sarcasm-Detection | 7012aa28b51046f8bafb7e24a52f58c88535824e | [
"MIT"
] | 1 | 2021-02-22T19:08:38.000Z | 2021-02-22T19:08:38.000Z | lstm.py | sankalpg10/NLP-Sarcasm-Detection | 7012aa28b51046f8bafb7e24a52f58c88535824e | [
"MIT"
] | null | null | null | lstm.py | sankalpg10/NLP-Sarcasm-Detection | 7012aa28b51046f8bafb7e24a52f58c88535824e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
import re
import pickle
import csv
from sklearn.metrics import classification_report, confusion_matrix
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
X_train_or = pd.read_csv('data/train.csv',names=["headline","y"])
X_valid_or = pd.read_csv('data/valid.csv',names=["headline","y"])
X_test_or = pd.read_csv('data/test.csv',names=["headline","y"])
stop_words = set(stopwords.words('english'))
tokenizer = RegexpTokenizer(r'\w+')
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
def preprocess(data):
new_data=[]
for iter in range(len(data)):
curText=data.iloc[iter,0]
if len(curText) > 0:
ascii_data = re.sub('[^A-Za-z0-9\']+', ' ', curText)
text = [w for w in ascii_data.split() if w not in stop_words]
lem_text = [lemmatizer.lemmatize(i) for i in text]
stem_text = ""
for abc in lem_text:
# stem = "".join([stemmer.stem(i) for i in abc])
stem = "".join([i for i in abc])
stem_text = stem_text + " " + stem
new_data.append(stem_text)
else:
new_data.append(np.NaN)
data['headline']=new_data
return data
# print(X_train_or.iloc[1,0])
X_train_pre = preprocess(X_train_or)
X_valid_pre = preprocess(X_valid_or)
X_test_pre = preprocess(X_test_or)
# X_train_pre = X_train_or
# X_valid_pre = X_valid_or
# X_test_pre = X_test_or
# print(X_train_pre.iloc[1,0])
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 50000
# Max number of words in each complaint.
MAX_SEQUENCE_LENGTH = 64
# This is fixed.
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(X_train_pre['headline'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X_train = tokenizer.texts_to_sequences(X_train_pre['headline'].values) # Every word got a new number
# print(X_train[1])
X_train = pad_sequences(X_train, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of train data tensor:', X_train.shape)
# print(X_train[1])
X_test = tokenizer.texts_to_sequences(X_test_pre['headline'].values) # Every word got a new number
# print(X_test[1])
X_test = pad_sequences(X_test, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of test data tensor:', X_test.shape)
# print(X_test[1])
X_valid = tokenizer.texts_to_sequences(X_valid_pre['headline'].values) # Every word got a new number
# print(X_valid[1])
X_valid = pad_sequences(X_valid, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of valid data tensor:', X_valid.shape)
# print(X_valid[1])
Y_train = pd.get_dummies(X_train_pre['y']).values
print('Shape of train label tensor:', Y_train.shape)
Y_test = pd.get_dummies(X_test_pre['y']).values
print('Shape of test label tensor:', Y_test.shape)
Y_valid = pd.get_dummies(X_valid_pre['y']).values
print('Shape of valid label tensor:', Y_valid.shape)
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.4))
model.add(LSTM(196, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
epochs = 2
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_valid, Y_valid),callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
name = "2_64"
np.save("history_"+name+".npy",history)
pickle.dump(model, open(name, 'wb'))
# Plot training & validation accuracy values
plt.figure(1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.savefig("Accuracyvsepoch_"+name+".png")
# plt.show()
# Plot training & validation loss values
plt.figure(2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.savefig("lossvsepoch_"+name+".png")
# plt.show()
# # Make predictions
y_pred = model.predict(X_test, batch_size=128, verbose=2)
print(type(y_pred))
print(y_pred.shape)
# print("Accuracy of test data is: ",accuracy(y_pred,Y_valid))
cm=confusion_matrix(np.argmax(Y_test, axis=1),np.argmax(y_pred, axis=1))
print("\n\nConfusion Matrix\n")
print(cm)
cr=classification_report(np.argmax(Y_test, axis=1),np.argmax(y_pred, axis=1))
print("\n\nClassification Report\n")
print(cr)
with open("predictions_"+name+".csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(y_pred)
| 33.175 | 187 | 0.727581 |
cefdb0f2c6ef1e75aa5c9e8174aaa1ca6224df36 | 771 | py | Python | src/optoforce/optoforce_ros-indigo-devel/test/rosbag_utilities.py | Slifer64/ur_ws | 314e66df22e81342a2baa82185d24b3badf77654 | [
"MIT"
] | null | null | null | src/optoforce/optoforce_ros-indigo-devel/test/rosbag_utilities.py | Slifer64/ur_ws | 314e66df22e81342a2baa82185d24b3badf77654 | [
"MIT"
] | null | null | null | src/optoforce/optoforce_ros-indigo-devel/test/rosbag_utilities.py | Slifer64/ur_ws | 314e66df22e81342a2baa82185d24b3badf77654 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import roslib
#roslib.load_manifest('kdl')
import PyKDL as kdl
import rosbag
import os
import sys,getopt
import matplotlib.pyplot as plt
import matplotlib
def extract_data_from_bag(bag_name, list_topics):
bag = rosbag.Bag( bag_name)
data = dict()
#todo make sure these default data are not in the package already
data['tinit'] = None
data['tfinal'] = None
for topic in list_topics:
data[topic] = [[],[]]
for topic, msg, t in bag.read_messages( topics=list_topics):
if not data['tinit']:
data['tinit'] = t
t_cur =(t - data['tinit']).to_sec()
data['tfinal'] = t_cur
data[topic][0].append(t_cur)
data[topic][1].append(msg)
bag.close()
return data
| 21.416667 | 69 | 0.634241 |
cb7f9dbd8b7cd03fa7b44383f9cd728a7b2c3041 | 1,222 | py | Python | tests/test_preparation.py | kyusque/percival | 31f204f7988c6acff6a489a2ffc698d19a259e09 | [
"MIT"
] | null | null | null | tests/test_preparation.py | kyusque/percival | 31f204f7988c6acff6a489a2ffc698d19a259e09 | [
"MIT"
] | 2 | 2019-09-05T10:31:09.000Z | 2020-01-07T04:58:46.000Z | tests/test_preparation.py | kyusque/percival | 31f204f7988c6acff6a489a2ffc698d19a259e09 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from percival.preparation.domain.value_objects import Smiles
from percival.preparation.domain.molecule import MoleculeFactory, Molecule
from percival.preparation.service.input_generator import InputGenerator
import unittest
class PreparationTest(unittest.TestCase):
"""Basic test cases."""
def test_mol_factory(self):
smiles: Smiles = Smiles("CCC")
molecule: Molecule = MoleculeFactory.create_molecule_from_smiles(smiles)
res = molecule.mol.GetNumAtoms()
self.assertEqual(11, res)
def test_mol_conformer(self):
smiles: Smiles = Smiles("C")
molecule: Molecule = MoleculeFactory.create_molecule_from_smiles(smiles)
confs = molecule.generate_conformers()
lines = confs.to_sdf().split("\n")
self.assertEqual("$$$$", lines[-1])
def test_InputGenerator_gjf(self):
smiles: Smiles = Smiles("C")
molecule: Molecule = MoleculeFactory.create_molecule_from_smiles(smiles)
confs = molecule.generate_conformers()
lines = InputGenerator.generate_gjf(confs.to_list()[0], "test").split("\n")
self.assertEqual("%chk=test", lines[0])
if __name__ == '__main__':
unittest.main()
| 34.914286 | 83 | 0.697218 |
e14d2ef8e493f16d989d69f70cf6f956fd5c768d | 3,265 | py | Python | visualization/owned_listener.py | suhasini-gesis/IWAAN | 343b48908198019e9be25332639cded204f8e7b4 | [
"MIT"
] | null | null | null | visualization/owned_listener.py | suhasini-gesis/IWAAN | 343b48908198019e9be25332639cded204f8e7b4 | [
"MIT"
] | null | null | null | visualization/owned_listener.py | suhasini-gesis/IWAAN | 343b48908198019e9be25332639cded204f8e7b4 | [
"MIT"
] | null | null | null | import datetime
import numpy as np
import pandas as pd
import plotly
from plotly import graph_objs
from IPython.display import display, Markdown as md
class OwnedListener():
def __init__(self, df, editor):
self.df = df.sort_values(['token_id', 'rev_time'], ascending=True).set_index('token_id')
self.editor = editor
self.days = df.loc[df['o_editor'] == editor, 'rev_time'
].dt.to_period('D').unique()
today = pd.Period(datetime.datetime.today(), freq='D')
self.days = pd.Series(np.append(self.days, today)).sort_values(ascending=False)
self.df['rev_time'] = pd.to_datetime(self.df['rev_time']).dt.tz_localize(None)
if len(self.days) > 0:
self.days = self.days.dt.to_timestamp('D') + pd.DateOffset(1)
_all = []
_abs = []
df = self.df
for rev_time in self.days:
df = df[df['rev_time'] <= rev_time]
last_action = df.groupby('token_id').last()
surv = last_action[last_action['action'] != 'out']
_abs.append(sum(surv['o_editor'] == self.editor))
_all.append(len(surv))
self.summ = pd.DataFrame({
'day': self.days,
'abs': _abs,
'all': _all
})
self.summ['res'] = 100 * self.summ['abs'] / self.summ['all']
else:
self.summ = pd.DataFrame([], columns = ['day', 'abs', 'all', 'res'])
def listen(self, _range1, _range2, granularity, trace):
df = self.summ
if len(df) == 0:
display(md("***It is not possible to plot the tokens owned because this editor has never owned any token.***"))
return
df = df[(df.day.dt.date >= _range1) &
(df.day.dt.date <= _range2 + datetime.timedelta(days=1))].copy()
self.traces = []
if trace == 'Tokens Owned':
_range = None
df['time'] = df['day'].dt.to_period(granularity[0]).dt.to_timestamp(granularity[0])
df = df[~df.duplicated(subset='time', keep='first')]
_y = df['abs']
elif trace == 'Tokens Owned (%)':
_range = [0, 100]
df['time'] = df['day'].dt.to_period(granularity[0]).dt.to_timestamp(granularity[0])
df = df[~df.duplicated(subset='time', keep='first')]
_y = df['res']
self.traces.append(
graph_objs.Scatter(
x=df['time'], y=_y,
name=trace,
marker=dict(color='rgba(255, 0, 0, .5)'))
)
layout = graph_objs.Layout(hovermode='closest',
xaxis=dict(title=granularity, ticklen=5,
zeroline=True, gridwidth=2),
yaxis=dict(
ticklen=5, gridwidth=2, range=_range),
legend=dict(x=0.5, y=1.2),
showlegend=True, barmode='group')
self.df_plotted = df
plotly.offline.init_notebook_mode(connected=True)
plotly.offline.iplot({"data": self.traces, "layout": layout})
| 37.102273 | 123 | 0.508729 |
5f843d72c1a07110c59dde129199a7ceb98990f0 | 17,828 | py | Python | pinnwand/http.py | millefalcon/pinnwand | 5f4319cfe37f06b71a46838a145d1f8858884bc3 | [
"MIT"
] | null | null | null | pinnwand/http.py | millefalcon/pinnwand | 5f4319cfe37f06b71a46838a145d1f8858884bc3 | [
"MIT"
] | null | null | null | pinnwand/http.py | millefalcon/pinnwand | 5f4319cfe37f06b71a46838a145d1f8858884bc3 | [
"MIT"
] | null | null | null | import json
import logging
import secrets
import docutils.core
from typing import Any, List
from urllib.parse import urljoin
import tornado.web
from tornado.escape import url_escape
from pinnwand import database, path, utility, error, configuration
log = logging.getLogger(__name__)
class Base(tornado.web.RequestHandler):
def write_error(self, status_code: int, **kwargs: Any) -> None:
if status_code == 404:
self.render(
"error.html",
text="That page does not exist",
status_code=404,
pagetitle="error",
)
else:
type_, exc, traceback = kwargs["exc_info"]
if type_ == error.ValidationError:
self.set_status(400)
self.render(
"error.html",
text=str(exc),
status_code=400,
pagetitle="error",
)
else:
self.render(
"error.html",
text="unknown error",
status_code=500,
pagetitle="error",
)
async def get(self) -> None:
raise tornado.web.HTTPError(404)
async def post(self) -> None:
raise tornado.web.HTTPError(405)
class CreatePaste(Base):
"""The index page shows the new paste page with a list of all available
lexers from Pygments."""
async def get(self, lexers: str = "") -> None:
"""Render the new paste form, optionally have a lexer preselected from
the URL."""
lexers_available = utility.list_languages()
lexers_selected = [
lexer for lexer in lexers.split("+") if lexer.strip()
]
# Our default lexer is just that, text
if not lexers_selected:
lexers_selected = ["text"]
# Make sure all lexers are available
if not all(lexer in lexers_available for lexer in lexers_selected):
log.debug("CreatePaste.get: non-existent lexer requested")
raise tornado.web.HTTPError(404)
await self.render(
"create.html",
lexers=lexers_selected,
lexers_available=lexers_available,
pagetitle="Create new paste",
message=None,
paste=None,
)
async def post(self) -> None:
# This is a historical endpoint to create pastes, pastes are marked as
# old-web and will get a warning on top of them to remove any access to
# this route.
# pinnwand has since evolved with an API which should be used and a
# multi-file paste.
# See the 'CreateAction' for the new-style creation of pastes.
lexer = self.get_body_argument("lexer")
raw = self.get_body_argument("code")
expiry = self.get_body_argument("expiry")
if lexer not in utility.list_languages():
log.info("Paste.post: a paste was submitted with an invalid lexer")
raise tornado.web.HTTPError(400)
# Guard against empty strings
if not raw:
return self.redirect(f"/+{lexer}")
if expiry not in utility.expiries:
log.info("Paste.post: a paste was submitted with an invalid expiry")
raise tornado.web.HTTPError(400)
paste = database.Paste(utility.expiries[expiry], "deprecated-web")
file = database.File(raw, lexer)
file.slug = paste.slug # XXX fix, this can duplicate!!!
paste.files.append(file)
with database.session() as session:
session.add(paste)
session.commit()
# The removal cookie is set for the specific path of the paste it is
# related to
self.set_cookie(
"removal", str(paste.removal), path=f"/{paste.slug}"
)
# Send the client to the paste
self.redirect(f"/{paste.slug}")
def check_xsrf_cookie(self) -> None:
"""The CSRF token check is disabled. While it would be better if it
was on the impact is both small (someone could make a paste in
a users name which could allow pinnwand to be used as a vector for
exfiltration from other XSS) and some command line utilities
POST directly to this endpoint without using the JSON endpoint."""
return
class CreateAction(Base):
def post(self) -> None: # type: ignore
expiry = self.get_body_argument("expiry")
if expiry not in utility.expiries:
log.info(
"CreateAction.post: a paste was submitted with an invalid expiry"
)
raise tornado.web.HTTPError(400)
auto_scale = self.get_body_argument("long", None) is None
lexers = self.get_body_arguments("lexer")
raws = self.get_body_arguments("raw")
filenames = self.get_body_arguments("filename")
if not all([lexers, raws, filenames]):
# Prevent empty argument lists from making it through
raise tornado.web.HTTPError(400)
with database.session() as session:
paste = database.Paste(utility.expiries[expiry], "web", auto_scale)
if any(len(L) != len(lexers) for L in [lexers, raws, filenames]):
log.info("CreateAction.post: mismatching argument lists")
raise tornado.web.HTTPError(400)
for (lexer, raw, filename) in zip(lexers, raws, filenames):
if lexer not in utility.list_languages():
log.info("CreateAction.post: a file had an invalid lexer")
raise tornado.web.HTTPError(400)
if not raw:
log.info("CreateAction.post: a file had an empty raw")
raise tornado.web.HTTPError(400)
paste.files.append(
database.File(
raw, lexer, filename if filename else None, auto_scale
)
)
session.add(paste)
session.commit()
# The removal cookie is set for the specific path of the paste it is
# related to
self.set_cookie(
"removal", str(paste.removal), path=f"/{paste.slug}"
)
# Send the client to the paste
self.redirect(f"/{paste.slug}")
class RepastePaste(Base):
"""Repaste is a specific case of the paste page. It only works for pre-
existing pastes and will prefill the textarea and lexer."""
async def get(self, slug: str) -> None: # type: ignore
"""Render the new paste form, optionally have a lexer preselected from
the URL."""
with database.session() as session:
paste = (
session.query(database.Paste)
.filter(database.Paste.slug == slug)
.first()
)
if not paste:
raise tornado.web.HTTPError(404)
lexers_available = utility.list_languages()
await self.render(
"create.html",
lexers=["text"], # XXX make this majority of file lexers?
lexers_available=lexers_available,
pagetitle="repaste",
message=None,
paste=paste,
)
class ShowPaste(Base):
async def get(self, slug: str) -> None: # type: ignore
with database.session() as session:
paste = (
session.query(database.Paste)
.filter(database.Paste.slug == slug)
.first()
)
if not paste:
raise tornado.web.HTTPError(404)
can_delete = self.get_cookie("removal") == str(paste.removal)
self.render(
"show.html",
paste=paste,
pagetitle=f"View paste {paste.slug}",
can_delete=can_delete,
linenos=False,
)
class RedirectShowPaste(Base):
async def get(self, slug: str) -> None: # type: ignore
with database.session() as session:
paste = (
session.query(database.Paste)
.filter(database.Paste.slug == slug)
.first()
)
if not paste:
raise tornado.web.HTTPError(404)
self.redirect(f"/{paste.slug}")
class RawFile(Base):
async def get(self, file_id: str) -> None: # type: ignore
with database.session() as session:
file = (
session.query(database.File)
.filter(database.File.slug == file_id)
.first()
)
if not file:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", "text/plain; charset=utf-8")
self.write(file.raw)
class DownloadFile(Base):
async def get(self, file_id: str) -> None: # type: ignore
with database.session() as session:
file = (
session.query(database.File)
.filter(database.File.slug == file_id)
.first()
)
if not file:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", "text/plain; charset=utf-8")
self.set_header(
"Content-Disposition", f"attachment; filename={file.slug}"
)
self.write(file.raw)
class RemovePaste(Base):
"""Remove a paste."""
async def get(self, removal: str) -> None: # type: ignore
"""Look up if the user visiting this page has the removal id for a
certain paste. If they do they're authorized to remove the paste."""
with database.session() as session:
paste = (
session.query(database.Paste)
.filter(database.Paste.removal == removal)
.first()
)
if not paste:
log.info("RemovePaste.get: someone visited with invalid id")
raise tornado.web.HTTPError(404)
session.delete(paste)
session.commit()
self.redirect("/")
class APIShow(Base):
async def get(self, slug: str) -> None: # type: ignore
with database.session() as session:
paste = (
session.query(database.Paste)
.filter(database.Paste.slug == slug)
.first()
)
if not paste:
raise tornado.web.HTTPError(404)
self.write(
{
"paste_id": paste.slug,
"raw": paste.files[0].raw,
"fmt": paste.files[0].fmt,
"lexer": paste.files[0].lexer,
"expiry": paste.exp_date.isoformat(),
"filename": paste.files[0].filename,
}
)
class APINew(Base):
def check_xsrf_cookie(self) -> None:
return
async def get(self) -> None:
raise tornado.web.HTTPError(405)
async def post(self) -> None:
lexer = self.get_body_argument("lexer")
raw = self.get_body_argument("code")
expiry = self.get_body_argument("expiry")
filename = self.get_body_argument("filename", None)
if not raw:
log.info("APINew.post: a paste was submitted without content")
raise tornado.web.HTTPError(400)
if lexer not in utility.list_languages():
log.info("APINew.post: a paste was submitted with an invalid lexer")
raise tornado.web.HTTPError(400)
if expiry not in utility.expiries:
log.info(
"APINew.post: a paste was submitted with an invalid expiry"
)
raise tornado.web.HTTPError(400)
paste = database.Paste(utility.expiries[expiry], "deprecated-api")
paste.files.append(database.File(raw, lexer, filename))
with database.session() as session:
session.add(paste)
session.commit()
req_url = self.request.full_url()
location = paste.slug
if filename:
location += "#" + url_escape(filename)
self.write(
{
"paste_id": paste.slug,
"removal_id": paste.removal,
"paste_url": urljoin(req_url, f"/{location}"),
"raw_url": urljoin(req_url, f"/raw/{paste.files[0].slug}"),
}
)
class APIRemove(Base):
def check_xsrf_cookie(self) -> None:
return
async def post(self) -> None:
with database.session() as session:
paste = (
session.query(database.Paste)
.filter(
database.Paste.removal
== self.get_body_argument("removal_id")
)
.first()
)
if not paste:
self.set_status(400)
return
session.delete(paste)
session.commit()
# this is set this way because tornado tries to protect us
# by not allowing lists to be returned, looking at this code
# it really shouldn't be a list but we have to keep it for
# backwards compatibility
self.set_header("Content-Type", "application/json")
self.write(
json.dumps([{"paste_id": paste.slug, "status": "removed"}])
)
class APILexers(Base):
async def get(self) -> None:
self.write(utility.list_languages())
class APIExpiries(Base):
async def get(self) -> None:
self.write(
{name: str(delta) for name, delta in utility.expiries.items()}
)
class CurlCreate(Base):
def check_xsrf_cookie(self) -> None:
return
async def post(self) -> None:
lexer = self.get_body_argument("lexer", "text")
raw = self.get_body_argument("raw", None)
expiry = self.get_body_argument("expiry", "1day")
self.set_header("Content-Type", "text/plain")
if lexer not in utility.list_languages():
log.info(
"CurlCreate.post: a paste was submitted with an invalid lexer"
)
self.set_status(400)
self.write("Invalid `lexer` supplied.\n")
return
# Guard against empty strings
if not raw:
log.info("CurlCreate.post: a paste was submitted without raw")
self.set_status(400)
self.write("Invalid `raw` supplied.\n")
return
if expiry not in utility.expiries:
log.info("CurlCreate.post: a paste was submitted without raw")
self.set_status(400)
self.write("Invalid `expiry` supplied.\n")
return
paste = database.Paste(utility.expiries[expiry], "curl")
file = database.File(raw, lexer)
paste.files.append(file)
with database.session() as session:
session.add(paste)
session.commit()
# The removal cookie is set for the specific path of the paste it is
# related to
self.set_cookie(
"removal", str(paste.removal), path=f"/{paste.slug}"
)
url_request = self.request.full_url()
url_paste = urljoin(url_request, f"/{paste.slug}")
url_removal = urljoin(url_request, f"/remove/{paste.removal}")
url_raw = urljoin(url_request, f"/raw/{file.slug}")
self.write(
f"Paste URL: {url_paste}\nRaw URL: {url_raw}\nRemoval URL: {url_removal}\n"
)
class RestructuredTextPage(Base):
def initialize(self, file: str) -> None:
self.file = file
async def get(self) -> None:
try:
with open(path.page / self.file) as f:
html = docutils.core.publish_parts(
f.read(), writer_name="html"
)["html_body"]
except FileNotFoundError:
raise tornado.web.HTTPError(404)
self.render(
"restructuredtextpage.html",
html=html,
pagetitle=(path.page / self.file).stem,
)
def make_application() -> tornado.web.Application:
pages: List[Any] = [
(r"/", CreatePaste),
(r"/\+(.*)", CreatePaste),
(r"/create", CreateAction),
(r"/show/([A-Z2-7]+)(?:#.+)?", RedirectShowPaste),
(r"/repaste/([A-Z2-7]+)(?:#.+)?", RepastePaste),
(r"/raw/([A-Z2-7]+)(?:#.+)?", RawFile),
(r"/download/([A-Z2-7]+)(?:#.+)?", DownloadFile),
(r"/remove/([A-Z2-7]+)", RemovePaste),
]
pages += [
(r"/about", RestructuredTextPage, {"file": f"{file}.rst"})
for file in configuration.page_list
]
pages += [
(r"/removal", RestructuredTextPage, {"file": "removal.rst"}),
(r"/expiry", RestructuredTextPage, {"file": "expiry.rst"}),
(r"/json/new", APINew),
(r"/json/remove", APIRemove),
(r"/json/show/([A-Z2-7]+)(?:#.+)?", APIShow),
(r"/json/lexers", APILexers),
(r"/json/expiries", APIExpiries),
(r"/curl", CurlCreate),
(
r"/static/(.*)",
tornado.web.StaticFileHandler,
{"path": path.static},
),
(r"/(.*)(?:#.+)?", ShowPaste),
]
app = tornado.web.Application(
pages,
template_path=path.template,
default_handler_class=Base,
xsrf_cookies=True,
cookie_secret=secrets.token_hex(),
)
app.configuration = configuration # type: ignore
return app
| 32.064748 | 97 | 0.541956 |
5bfa904763d72f715cbc674e44bf3bf937c51c19 | 4,654 | py | Python | API_FULL/api_python/resources/Tabla6/P1.py | JoseMiguelGutierrezGuevara/ASR-Preguntas | 4123c744f71fda99256c2fcc10f66e0208fbfcbd | [
"MIT"
] | null | null | null | API_FULL/api_python/resources/Tabla6/P1.py | JoseMiguelGutierrezGuevara/ASR-Preguntas | 4123c744f71fda99256c2fcc10f66e0208fbfcbd | [
"MIT"
] | 1 | 2021-05-09T02:45:44.000Z | 2021-05-09T02:45:44.000Z | API_FULL/api_python/resources/Tabla6/P1.py | jmiguelgg/ASR-Preguntas | 4123c744f71fda99256c2fcc10f66e0208fbfcbd | [
"MIT"
] | null | null | null | import json
import os
import time
import telnetlib
from ..Util.Notifications import Notifications
from flask import Flask, request, jsonify, url_for
from werkzeug.datastructures import ImmutableMultiDict
from flask_restful import Resource, Api
ALOWED_EXTENSIONS = set(['txt'])
user = "humberto"
password = "123456"
show1 = "conf t"
show2 = "show interfaces"
salir = "exit"
espacio = " "
class T6_P1(Resource):
def post(self):
data = ImmutableMultiDict(request.form)
data.to_dict(flat=False)
file = request.files['file']
emails = [data['email']]
numbers = [data['number']]
resptPP = getStadistics(file)
message = message_formater(resptPP)
notify_email(emails,message)
#notify_whatsapp(numbers,message)
return jsonify(resptPP)
def allowed_file(file_name):
raise '.' in file_name and file_name.rsplit('.',1)[1] in ALOWED_EXTENSIONS
def file_json_IP(file):
return [{'ip': ip.rstrip('\n'),'fecha': '','hostname':'','info': []} for ip in file]
def getStadistics(file):
json_build = file_json_IP(file)
counter = 0
for ip in json_build:
#Codigo
flag = False
try:
tn = telnetlib.Telnet(ip['ip'],23,1)
flag = True
except:
print('No fue posible la conexion con : ' + ip['ip'])
if flag:
tn.read_until(b"Username: ")
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ")
tn.write(password.encode('ascii') + b"\n")
tn.write(show1.encode('ascii') + b"\n")
tn.write(salir.encode('ascii') + b"\n")
tn.write(show2.encode('ascii') + b"\n")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"\n")
tn.write(salir.encode('ascii') + b"\n")
archivo = open(ip['ip'], "w+")
archivo.write(tn.read_all().decode('ascii'))
archivo.close()
archivo = open(ip['ip'], "r")
b = 0
while b != 2:
linea = archivo.readline()
b = b + 1
pos = linea.find("#")
json_build[counter]['hostname'] = linea[0:pos]
n_lineas = len(archivo.readlines())
archivo.seek(0)
puntero = 0
while puntero != n_lineas:
linea = archivo.readline()
posicion_int = linea.find("FastEthernet")
if posicion_int >= 0:
json_build[counter]['info'].append({'data':linea})
posicion_int = linea.find("Serial")
if posicion_int >= 0:
json_build[counter]['info'].append({'data':linea})
posicion_input = linea.find("packets input")
if posicion_input >= 0:
json_build[counter]['info'].append({'data':linea})
posicion_output = linea.find("packets output")
if posicion_output >= 0:
json_build[counter]['info'].append({'data':linea})
puntero = puntero + 1
archivo.close()
os.remove(ip['ip'])
t = time.localtime()
current_time = time.strftime("%d/%m/%y %H:%M:%S", t)
json_build[counter]['fecha'] = current_time
counter += 1
return json_build
def message_formater(json_resp):
message = 'Se obtuvo informacion de los host:\n'
for ip in json_resp:
if len(ip['info']) > 0:
message += ip['ip'] + ' - fecha de respuesta: ' + ip['fecha'] +'\n\t'
message += '\nLas host que no respondieron son:\n'
for ip in json_resp:
if len(ip['info']) == 0:
message += ip['ip'] + ' - fecha de respuesta: ' + ip['fecha'] + '\n'
return message
def notify_email(emails, message):
notify = Notifications()
notify.sendEmail(emails,'Trafico de los routers',message)
def notify_whatsapp(numbers, message):
notify = Notifications()
notify.sendWhatsApp(message,numbers)
| 35.526718 | 88 | 0.544263 |
58b1a85b4be68c3d4439163b63b933ca261e3e17 | 2,353 | py | Python | allesina_stability_function.py | galeanojav/Stability_equations | 22e881569f46e7c02a0985bae8b9bd7757bad764 | [
"MIT"
] | null | null | null | allesina_stability_function.py | galeanojav/Stability_equations | 22e881569f46e7c02a0985bae8b9bd7757bad764 | [
"MIT"
] | null | null | null | allesina_stability_function.py | galeanojav/Stability_equations | 22e881569f46e7c02a0985bae8b9bd7757bad764 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 10:36:42 2018
@author: javiergaleano
"""
# Sommers's stability
import numpy as np
import matplotlib.pyplot as plt
def random_matrix(S,center,sigma, ty=None):
"""
Function to calculate the random matrix.
S = size of the matrix SxS
center = value of the matrix diagonal
sigma = var
ty [optinal]= None --> normal distibution (0,sigma)
ty = 'halfnorm' --> half normal distribution
ty = "exponential" --> exponential distribution scale sigma
"""
### matrix with "center" in the diagonal
diago = np.full_like(np.arange(S), center, dtype=np.float32)
diagonal = np.diagflat(diago)
### Array with normal distribution mean = 0.0 and var = sigma
if ty == 'exponential':
matriz = np.random.exponential(sigma, size=(S*S-S))
if ty == 'halfnorm':
matriz = np.abs(np.random.normal(loc = 0.0, scale =sigma, size=(S*S-S)))
else:
matriz = np.random.normal(loc = 0.0, scale =sigma, size=(S*S-S))
### Random matrix complete
k=0
for i in range(S):
for j in range(S):
if i != j:
diagonal[i][j]= matriz[k]
k +=1
return diagonal
def plot_eig(autovalor):
"""
Plot eigenvalues. Axis X real part, Axis Y imaginary part
"""
fig, ax1 = plt.subplots(1, 1)
X1=[]
Y1=[]
for i in autovalor:
X1.append(i.real)
Y1.append(i.imag)
ax1.scatter(X1,Y1,alpha=0.3)
ax1.set(xlabel="Real", ylabel="Im")
return
S = 1000 # number of species
center = -10.0
sigma = 1/S
diagonal = random_matrix(S,center,sigma, ty='halfnorm')
### mean and var of the matrix
media1 = np.mean(diagonal, dtype = np.float64)
varianza1 = np.var(diagonal,dtype=np.float64)
print(media1, varianza1)
### calculating eigenvalues and plotting
autovalor1 = np.linalg.eigvals(diagonal)
plot_eig(autovalor1)
plt.show()
#mean, var, skew, kurt = halfnorm.stats(moments='mvsk')
#x = np.linspace(halfnorm.ppf(0.01), halfnorm.ppf(0.99), 100)
#ax.plot(x, halfnorm.pdf(x), 'ro', lw=5, alpha=0.6, label='halfnorm pdf')
#r = halfnorm.rvs(size=10000)
#ax.hist(r, normed=True, histtype='stepfilled', bins = 20, alpha=0.2)
#ax.legend(loc='best', frameon=False)
#plt.show()
| 22.409524 | 80 | 0.613685 |
66b34181aa9970b2e5472d08ab4c8da63703431b | 354 | py | Python | openfda/spl/extract_one.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | null | null | null | openfda/spl/extract_one.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 2 | 2021-03-25T23:41:00.000Z | 2021-06-02T01:34:44.000Z | openfda/spl/extract_one.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 1 | 2021-01-18T01:34:45.000Z | 2021-01-18T01:34:45.000Z | #!/usr/local/bin/python
import extract
import sys
tree = extract.parse_xml(sys.argv[1])
methods_to_call = []
for method in dir(extract):
if method.find('Extract') == 0:
methods_to_call.append(method)
methods_to_call.sort()
for method in methods_to_call:
print method.replace('Extract', '') + ':', eval('extract.%(method)s(tree)' % locals())
| 20.823529 | 88 | 0.700565 |
f79cc4760079a4ff4a1278a20afa4118be8a303a | 18,149 | py | Python | quiz/quiz.py | keanemind/Keane-Cogs | c0a04d2b5c12ca8f3fbf2eaff9df5c220ab5bc93 | [
"MIT"
] | null | null | null | quiz/quiz.py | keanemind/Keane-Cogs | c0a04d2b5c12ca8f3fbf2eaff9df5c220ab5bc93 | [
"MIT"
] | 5 | 2017-06-17T05:21:50.000Z | 2018-04-17T04:33:47.000Z | quiz/quiz.py | keanemind/Keane-Cogs | c0a04d2b5c12ca8f3fbf2eaff9df5c220ab5bc93 | [
"MIT"
] | null | null | null | """A trivia cog that uses Open Trivia Database."""
import os
import html
import asyncio
import time
import datetime
import random
import math
import aiohttp
import discord
from discord.ext import commands
from __main__ import send_cmd_help
from .utils import checks
from .utils.dataIO import dataIO
SAVE_FILEPATH = "data/KeaneCogs/quiz/quiz.json"
class Quiz:
"""Play a kahoot-like trivia game with questions from Open Trivia Database."""
def __init__(self, bot):
self.bot = bot
self.save_file = dataIO.load_json(SAVE_FILEPATH)
self.playing_channels = {}
self.timeout = 20
self.game_tasks = []
self.starter_task = bot.loop.create_task(self.start_loop())
@commands.group(pass_context=True, no_pm=True)
async def quiz(self, ctx):
"""Play a kahoot-like trivia game with questions from Open Trivia Database.
In this game, you will compete with other players to correctly answer each
question as quickly as you can. You have 10 seconds to type the answer
choice before time runs out. Only your first answer will be registered.
The longer you take to say the right answer, the fewer points you get.
If you get it wrong, you get no points.
"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@quiz.command(name="play", pass_context=True)
async def quiz_play(self, ctx):
"""Create or join a quiz game."""
channel = ctx.message.channel
player = ctx.message.author
if channel.id not in self.playing_channels:
self.playing_channels[channel.id] = {"Start":datetime.datetime.utcnow(),
"Started":False,
"Players":{player.id:0},
"Answers":{}
}
return await self.bot.say("{} is starting a quiz game! It will start "
"in 20 seconds. Use `{}quiz play` to join."
.format(player.display_name, ctx.prefix))
channelinfo = self.playing_channels[channel.id]
if player.id in channelinfo["Players"]:
await self.bot.say("You are already in the game.")
elif channelinfo["Started"]:
await self.bot.say("A quiz game is already underway.")
else:
channelinfo["Players"][player.id] = 0
await self.bot.say("{} joined the game.".format(player.display_name))
async def start_loop(self):
"""Starts quiz games when the timeout period ends."""
while True:
await asyncio.sleep(1)
for channelid in list(self.playing_channels):
channelinfo = self.playing_channels[channelid]
since_start = (datetime.datetime.utcnow() - channelinfo["Start"]).total_seconds()
if since_start > self.timeout and not channelinfo["Started"]:
if len(channelinfo["Players"]) > 1:
channel = self.bot.get_channel(channelid)
self.game_tasks.append(self.bot.loop.create_task(self.game(channel)))
channelinfo["Started"] = True
else:
await self.bot.send_message(self.bot.get_channel(channelid),
"Nobody else joined the quiz game.")
self.playing_channels.pop(channelid)
async def on_message(self, message):
authorid = message.author.id
channelid = message.channel.id
choice = message.content.lower()
if channelid in self.playing_channels:
channelinfo = self.playing_channels[channelid]
if (authorid in channelinfo["Players"]
and authorid not in channelinfo["Answers"]
and choice in {"a", "b", "c", "d"}):
channelinfo["Answers"][authorid] = {"Choice":choice,
"Time":time.perf_counter()}
async def game(self, channel):
"""Runs a quiz game on a channel."""
self.add_server(channel.server)
try:
category = await self.category_selector()
category_name = await self.category_name(category)
response = await self.get_questions(channel.server, category=category)
except RuntimeError:
await self.bot.send_message(channel, "An error occurred in retrieving questions. "
"Please try again.")
self.playing_channels.pop(channel.id)
raise
channelinfo = self.playing_channels[channel.id]
# Introduction
intro = ("Welcome to the quiz game! Your category is {}.\n"
"Remember to answer correctly as quickly as you can. "
"Only your first answer will be registered by the game. "
"You have 10 seconds per question.\n"
"The game will begin shortly.".format(category_name))
await self.bot.send_message(channel, intro)
await asyncio.sleep(4)
# Question and Answer
afk_questions = 0
for index, dictionary in enumerate(response["results"]):
answers = [dictionary["correct_answer"]] + dictionary["incorrect_answers"]
# Display question and countdown
if len(answers) == 2: # true/false question
answers = ["True", "False", "", ""]
else:
answers = [html.unescape(answer) for answer in answers]
random.shuffle(answers)
message = "```\n"
message += "{} ({}/20)\n".format(html.unescape(dictionary["question"]), index + 1)
message += "A. {}\n".format(answers[0])
message += "B. {}\n".format(answers[1])
message += "C. {}\n".format(answers[2])
message += "D. {}\n".format(answers[3])
message += "```"
message_obj = await self.bot.send_message(channel, message)
await self.bot.add_reaction(message_obj, "0⃣")
channelinfo["Answers"].clear() # clear the previous question's answers
start_time = time.perf_counter()
numbers = ["1⃣", "2⃣", "3⃣", "4⃣", "5⃣", "6⃣", "7⃣", "8⃣", "9⃣", "🔟"]
for i in range(10):
if len(channelinfo["Answers"]) == len(channelinfo["Players"]):
break
await asyncio.sleep(1)
await self.bot.add_reaction(message_obj, numbers[i])
# Organize answers
user_answers = channelinfo["Answers"] # snapshot channelinfo["Answers"] at this point in time
# to ignore new answers that are added to it
answerdict = {["a", "b", "c", "d"][num]: answers[num] for num in range(4)}
# Check for AFK
if len(user_answers) < 2:
afk_questions += 1
if afk_questions == 3:
await self.bot.send_message(channel, "The game has been cancelled due "
"to lack of participation.")
self.playing_channels.pop(channel.id)
return
else:
afk_questions = 0
# Find and display correct answer
correct_letter = ""
for letter, answer in answerdict.items():
if answer == html.unescape(dictionary["correct_answer"]):
correct_letter = letter
break
assert answerdict[correct_letter] == html.unescape(dictionary["correct_answer"])
message = "Correct answer:```{}. {}```".format(correct_letter.upper(),
answerdict[correct_letter])
await self.bot.send_message(channel, message)
# Sort player IDs by answer time
playerids = sorted(user_answers,
key=lambda playerid: user_answers[playerid]["Time"])
# Assign scores
first = True
for playerid in playerids:
if user_answers[playerid]["Choice"] == correct_letter:
time_taken = user_answers[playerid]["Time"] - start_time
assert time_taken > 0
# the 20 in the formula below is 2 * 10s (max answer time)
points = round(1000 * (1 - (time_taken / 20)))
# The first correct answer gets a bonus 250 points
if first:
points += 250
first = False
channelinfo["Players"][playerid] += points
# Display top 5 players and their points
message = self.scoreboard(channel)
await self.bot.send_message(channel, "Scoreboard:\n" + message)
await asyncio.sleep(4)
if index < 19:
await self.bot.send_message(channel, "Next question...")
await asyncio.sleep(1)
# Ending and Results
await self.end_game(channel)
async def end_game(self, channel):
"""Ends a quiz game."""
# non-linear credit earning .0002x^{2.9} where x is score/100
# leaderboard with credits earned
channelinfo = self.playing_channels[channel.id]
idlist = sorted(channelinfo["Players"],
key=(lambda idnum: channelinfo["Players"][idnum]),
reverse=True)
winner = channel.server.get_member(idlist[0])
await self.bot.send_message(channel, "Game over! {} won!".format(winner.mention))
bank = self.bot.get_cog("Economy").bank
leaderboard = "```py\n"
max_credits = self.calculate_credits(channelinfo["Players"][idlist[0]])
end_len = len(str(max_credits)) + 1 # the 1 is for a space between a max length name and the score
rank_len = len(str(len(channelinfo["Players"])))
rank = 1
no_account = False
for playerid in idlist:
player = channel.server.get_member(playerid)
account_exists = bank.account_exists(player) # how does this know what server it's called in???
if account_exists:
if len(player.display_name) > 25 - rank_len - end_len:
name = player.display_name[:22 - rank_len - end_len] + "..."
else:
name = player.display_name
else:
if len(player.display_name) > 24 - rank_len - end_len:
name = player.display_name[:21 - rank_len - end_len] + "...*"
else:
name = player.display_name + "*"
leaderboard += str(rank)
leaderboard += " " * (1 + rank_len - len(str(rank)))
leaderboard += name
creds = self.calculate_credits(channelinfo["Players"][playerid])
creds_str = str(creds)
leaderboard += " " * (26 - rank_len - 1 - len(name) - len(creds_str))
leaderboard += creds_str + "\n"
if account_exists:
bank.deposit_credits(player, creds)
else:
no_account = True
rank += 1
if not no_account:
leaderboard += "```"
else:
leaderboard += ("* because you do not have a bank account, "
"you did not get to keep the credits you won.```\n")
await self.bot.send_message(channel, "Credits earned:\n" + leaderboard)
self.playing_channels.pop(channel.id)
def scoreboard(self, channel):
"""Returns a scoreboard string to be sent to the text channel."""
channelinfo = self.playing_channels[channel.id]
scoreboard = "```py\n"
idlist = sorted(channelinfo["Players"],
key=(lambda idnum: channelinfo["Players"][idnum]),
reverse=True)
max_score = channelinfo["Players"][idlist[0]]
end_len = len(str(max_score)) + 1
rank = 1
for playerid in idlist[:5]:
player = channel.server.get_member(playerid)
if len(player.display_name) > 24 - end_len:
name = player.display_name[:21 - end_len] + "..."
else:
name = player.display_name
scoreboard += str(rank) + " " + name
score_str = str(channelinfo["Players"][playerid])
scoreboard += " " * (24 - len(name) - len(score_str))
scoreboard += score_str + "\n"
rank += 1
scoreboard += "```"
return scoreboard
def calculate_credits(self, score):
"""Calculates credits earned from a score."""
adjusted = score / 100
if adjusted < 156.591:
result = .0002 * (adjusted**2.9)
else:
result = (.6625 * math.exp(.0411 * adjusted)) + 50
return round(result)
# OpenTriviaDB API functions
async def get_questions(self, server, category=None, difficulty=None):
"""Gets questions, resetting a token or getting a new one if necessary."""
parameters = {"amount": 20}
if category:
parameters["category"] = category
if difficulty:
parameters["difficulty"] = difficulty
for _ in range(3):
parameters["token"] = await self.get_token(server)
async with aiohttp.get("https://opentdb.com/api.php",
params=parameters) as response:
response_json = await response.json()
response_code = response_json["response_code"]
if response_code == 0:
return response_json
elif response_code == 1:
raise RuntimeError("Question retrieval unsuccessful. Response "
"code from OTDB: 1")
elif response_code == 2:
raise RuntimeError("Question retrieval unsuccessful. Response "
"code from OTDB: 2")
elif response_code == 3:
# Token expired. Obtain new one.
print("Response code from OTDB: 3")
self.save_file["Servers"][server.id]["Token"] = ""
dataIO.save_json(SAVE_FILEPATH, self.save_file)
elif response_code == 4:
# Token empty. Reset it.
print("Response code from OTDB: 4")
await self.reset_token(server)
raise RuntimeError("Failed to retrieve questions.")
async def get_token(self, server):
"""Gets the provided server's token, or generates
and saves one if one doesn't exist."""
if self.save_file["Servers"][server.id]["Token"]:
token = self.save_file["Servers"][server.id]["Token"]
else:
async with aiohttp.get("https://opentdb.com/api_token.php",
params={"command": "request"}) as response:
response_json = await response.json()
token = response_json["token"]
self.save_file["Servers"][server.id]["Token"] = token
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return token
async def reset_token(self, server):
"""Resets the provided server's token."""
token = self.save_file["Servers"][server.id]["Token"]
async with aiohttp.get("https://opentdb.com/api_token.php",
params={"command": "reset", "token": token}) as response:
response_code = (await response.json())["response_code"]
if response_code != 0:
raise RuntimeError("Token reset was unsuccessful. Response code from "
"OTDB: {}".format(response_code))
async def category_selector(self):
"""Chooses a random category that has enough questions."""
for _ in range(10):
category = random.randint(9, 32)
async with aiohttp.get("https://opentdb.com/api_count.php",
params={"category": category}) as response:
response_json = await response.json()
assert response_json["category_id"] == category
if response_json["category_question_count"]["total_question_count"] > 39:
return category
raise RuntimeError("Failed to select a category.")
async def category_name(self, idnum):
"""Finds a category's name from its number."""
async with aiohttp.get("https://opentdb.com/api_category.php") as response:
response_json = await response.json()
for cat_dict in response_json["trivia_categories"]:
if cat_dict["id"] == idnum:
return cat_dict["name"]
raise RuntimeError("Failed to find category's name.")
# Other functions
def add_server(self, server):
"""Adds the server to the file if it isn't already in it."""
if server.id not in self.save_file["Servers"]:
self.save_file["Servers"][server.id] = {"Token": ""}
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return
def __unload(self):
self.starter_task.cancel()
for task in self.game_tasks:
task.cancel()
def dir_check():
"""Creates a folder and save file for the cog if they don't exist."""
if not os.path.exists("data/KeaneCogs/quiz"):
print("Creating data/KeaneCogs/quiz folder...")
os.makedirs("data/KeaneCogs/quiz")
if not dataIO.is_valid_json(SAVE_FILEPATH):
print("Creating default quiz.json...")
dataIO.save_json(SAVE_FILEPATH, {"Servers": {}})
def setup(bot):
"""Creates a Quiz object."""
dir_check()
bot.add_cog(Quiz(bot))
| 43.627404 | 107 | 0.551601 |
be43fd0b89a13298e2a312e28c84b431a770e615 | 340 | py | Python | pirates/npc/DistributedBomberZombieAI.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 81 | 2018-04-08T18:14:24.000Z | 2022-01-11T07:22:15.000Z | pirates/npc/DistributedBomberZombieAI.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 4 | 2018-09-13T20:41:22.000Z | 2022-01-08T06:57:00.000Z | pirates/npc/DistributedBomberZombieAI.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 26 | 2018-05-26T12:49:27.000Z | 2021-09-11T09:11:59.000Z | from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class DistributedBomberZombieAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBomberZombieAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air) | 42.5 | 85 | 0.835294 |
f8a12363aa921f7d8482baa192a6b7c26431c7c6 | 482 | py | Python | python/exercicios mundo 2/ex36_45.py/ex004.py | LEXW3B/PYTHON | 1ae54ea709c008bd7fab7602e034773610e7985e | [
"MIT"
] | 1 | 2022-01-05T08:51:16.000Z | 2022-01-05T08:51:16.000Z | python/exercicios mundo 2/ex36_45.py/ex004.py | LEXW3B/PYTHON | 1ae54ea709c008bd7fab7602e034773610e7985e | [
"MIT"
] | null | null | null | python/exercicios mundo 2/ex36_45.py/ex004.py | LEXW3B/PYTHON | 1ae54ea709c008bd7fab7602e034773610e7985e | [
"MIT"
] | null | null | null | #38-escreva um programa que leia dois numeros inteiros e compare os, mostrando na tela uma menssagem.
#-o primeiro valor e maior
#-o segundo valor e maior
#-nao existe valor maior, os dois sao iguais.
n1 = int(input('digite um numero: '))
n2 = int(input('digite um numero: '))
if n1 > n2:
print('O primeiro valor é maior.')
elif n2 > n1:
print('O segundo valor é maior.')
else:
print('Não existe valor maior, os dois são iguais.')
#FIM//A\\ | 34.428571 | 101 | 0.649378 |
8494656e895f75c1119dd1503cae11b193ac86ac | 1,278 | py | Python | config/urls.py | RohitRepo/onepunch | 6c0eca944e63e74f0a94c3d5cfdbecc5e96541de | [
"BSD-3-Clause"
] | null | null | null | config/urls.py | RohitRepo/onepunch | 6c0eca944e63e74f0a94c3d5cfdbecc5e96541de | [
"BSD-3-Clause"
] | null | null | null | config/urls.py | RohitRepo/onepunch | 6c0eca944e63e74f0a94c3d5cfdbecc5e96541de | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("onepunch.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| 35.5 | 91 | 0.70579 |
73e8691d8fe72d8b259780d25b967a8fc4ff64b1 | 476 | py | Python | 2-mouth02/day09/read.py | gary-gggggg/gary | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | [
"Apache-2.0"
] | 4 | 2021-02-01T10:28:11.000Z | 2021-02-01T10:34:40.000Z | 2-mouth02/day09/read.py | gary-gggggg/gary | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | [
"Apache-2.0"
] | null | null | null | 2-mouth02/day09/read.py | gary-gggggg/gary | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | [
"Apache-2.0"
] | null | null | null | """pymysql数据读操作
"""
import pymysql
db_dic = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "123456",
"database": "gary",
"charset": "utf8"
}
# 链接数据库
db = pymysql.connect(**db_dic) # 一个链接口
# 创建游标 游标对象:执行sql得到结果的对象
cur = db.cursor() # 打开完成
# 操作数据
sql="select name, grade from school where grade>%s;"
cur.execute(sql,[60])
#迭代获取查询结果
# for res in cur:
# print(res)
one=cur.fetchmany(3)
print(one)
# 关闭数据库
cur.close()
db.close() | 15.354839 | 52 | 0.615546 |
06eca3be10ebb8f32ee2c9ff05fd6471592bd754 | 1,892 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/linux_configuration_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/linux_configuration_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/linux_configuration_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinuxConfiguration(Model):
"""Specifies the Linux operating system settings on the virtual machine.
<br><br>For a list of supported Linux distributions, see [Linux on
Azure-Endorsed
Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
<br><br> For running non-endorsed distributions, see [Information for
Non-Endorsed
Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
:param disable_password_authentication: Specifies whether password
authentication should be disabled.
:type disable_password_authentication: bool
:param ssh: Specifies the ssh key configuration for a Linux OS.
:type ssh: ~azure.mgmt.compute.v2016_03_30.models.SshConfiguration
"""
_attribute_map = {
'disable_password_authentication': {'key': 'disablePasswordAuthentication', 'type': 'bool'},
'ssh': {'key': 'ssh', 'type': 'SshConfiguration'},
}
def __init__(self, *, disable_password_authentication: bool=None, ssh=None, **kwargs) -> None:
super(LinuxConfiguration, self).__init__(**kwargs)
self.disable_password_authentication = disable_password_authentication
self.ssh = ssh
| 47.3 | 166 | 0.688161 |
4acc19edf282eb5b236597954e9a151b39a88cd2 | 640 | py | Python | InvenTree/stock/migrations/0043_auto_20200525_0420.py | ArakniD/InvenTree | 0ebf2ebd832b2d736e895abe054ca56bfd1cc477 | [
"MIT"
] | 656 | 2017-03-29T22:06:14.000Z | 2022-03-30T11:23:52.000Z | InvenTree/stock/migrations/0043_auto_20200525_0420.py | ArakniD/InvenTree | 0ebf2ebd832b2d736e895abe054ca56bfd1cc477 | [
"MIT"
] | 1,545 | 2017-04-10T23:26:04.000Z | 2022-03-31T18:32:10.000Z | InvenTree/stock/migrations/0043_auto_20200525_0420.py | fablabbcn/InvenTree | 1d7ea7716cc96c6ffd151c822b01cd1fb5dcfecd | [
"MIT"
] | 196 | 2017-03-28T03:06:21.000Z | 2022-03-28T11:53:29.000Z | # Generated by Django 3.0.5 on 2020-05-25 04:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('part', '0042_auto_20200518_0900'),
('stock', '0042_auto_20200523_0121'),
]
operations = [
migrations.AlterField(
model_name='stockitem',
name='part',
field=models.ForeignKey(help_text='Base part', limit_choices_to={'active': True, 'virtual': False}, on_delete=django.db.models.deletion.CASCADE, related_name='stock_items', to='part.Part', verbose_name='Base Part'),
),
]
| 30.47619 | 227 | 0.65625 |
15d02a7b7e23602640e33d3c326f60148380329f | 1,966 | py | Python | Conet/posting/models.py | JJack27/CMPUT404-Project | 24bb293ce4da1643f51bb8162bc9372a139df1b3 | [
"MIT"
] | 1 | 2021-04-26T18:08:34.000Z | 2021-04-26T18:08:34.000Z | Conet/posting/models.py | JJack27/CMPUT404-Project | 24bb293ce4da1643f51bb8162bc9372a139df1b3 | [
"MIT"
] | 4 | 2020-02-11T23:59:31.000Z | 2021-06-10T21:19:04.000Z | Conet/posting/models.py | JJack27/CMPUT404-Project | 24bb293ce4da1643f51bb8162bc9372a139df1b3 | [
"MIT"
] | 2 | 2019-03-29T20:16:01.000Z | 2019-04-03T21:03:32.000Z | import uuid
from django.db import models
from Accounts.models import Author
#from django.apps import apps
# Create your models here.
content_type_choice = (
('text/plain', 'text/plain'),
('text/markdown', 'text/markdown'),
('application/base64', 'application/base64'),
('image/png;base64', 'image/png;base64'),
('image/jpeg;base64', 'image/jpeg;base64'),
)
class Post(models.Model):
visible_type_choice = (
('PRIVATE', 'private to visibleTo list'),
('FRIENDS', 'private to my friends'),
('FOAF', 'private to friends of friends'),
('SERVERONLY', 'private to only firends on local server'),
('PUBLIC', 'public'),
)
postid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=128)
source = models.URLField(null=True)
origin = models.URLField(null=True)
description = models.CharField(max_length=200)
postauthor = models.ForeignKey(Author, on_delete=models.CASCADE, related_name="postauthor")
contentType = models.CharField(max_length=32, choices=content_type_choice, default='text/plain')
content = models.TextField(blank=True)
categories = models.CharField(max_length=250)
published = models.DateTimeField(auto_now=True)
visibility = models.CharField(max_length=10, choices=visible_type_choice, default='PUBLIC')
visibleTo = models.TextField(blank=True)
unlisted = models.BooleanField(default=False)
class Comment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
commentauthor = models.ForeignKey(Author, on_delete=models.CASCADE, related_name='author')
post = models.ForeignKey(Post, on_delete=models.CASCADE,related_name='post')
comment = models.CharField(max_length=500)
contentType = models.CharField(max_length=32, choices=content_type_choice, default='text/plain')
published = models.DateTimeField(auto_now=True)
| 40.958333 | 100 | 0.72177 |
a43eabc66867e9950ef82949e00bf9dddba4a323 | 1,455 | py | Python | wouso/core/qpool/admin.py | AlexandruGhergut/wouso | f26244ff58ae626808ae8c58ccc93d21f9f2666f | [
"Apache-2.0"
] | null | null | null | wouso/core/qpool/admin.py | AlexandruGhergut/wouso | f26244ff58ae626808ae8c58ccc93d21f9f2666f | [
"Apache-2.0"
] | null | null | null | wouso/core/qpool/admin.py | AlexandruGhergut/wouso | f26244ff58ae626808ae8c58ccc93d21f9f2666f | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from wouso.core.qpool import models
from django.forms import ModelForm
class AnswersInline(admin.TabularInline):
model = models.Answer
extra = 0
verbose_name = "Answer"
verbose_name_plural = "Answers"
class TagsInline(admin.TabularInline):
model = models.Question.tags.through
extra = 0
verbose_name = "Tag"
verbose_name_plural = "Tags"
class CategoryInline(admin.TabularInline):
model = models.Question.category
extra = 0
verbose_name = "Category"
verbose_name_plural = "Categories"
class ScheduleInline(admin.TabularInline):
model = models.Schedule
extra = 0
verbose_name = "Day"
class QuestionForm(ModelForm):
class Meta:
model = models.Question
def clean(self):
# TODO: should check question type against number of
# correct answers selected in AnswerInline
return self.cleaned_data
class QuestionAdmin(admin.ModelAdmin):
inlines = [AnswersInline,TagsInline,ScheduleInline]
form = QuestionForm
exclude = ('tags',)
list_display = ('text', 'tags_nice', 'scheduled', 'category', 'active', 'id', 'proposed_by', 'endorsed_by')
list_filter = ('active', 'category', 'tags')
class Questions2(admin.ModelAdmin):
list_display = ('text')
admin.site.register(models.Question, QuestionAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Category)
admin.site.register(models.Schedule)
| 28.529412 | 111 | 0.716838 |
37037d62c2358cc09cbdbfc771ca742430d38d69 | 3,490 | py | Python | python/GafferImageUI/RampUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferImageUI/RampUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferImageUI/RampUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import imath
import Gaffer
import GafferImage
## A function suitable as the postCreator in a NodeMenu.append() call. It
# sets the ramp position for the node to cover the entire format.
def postCreate( node, menu ) :
format = GafferImage.FormatPlug.getDefaultFormat( node.scriptNode().context() )
displayWindow = format.getDisplayWindow()
node["startPosition"].setValue( imath.V2f( 0, displayWindow.size().y * .5 ) )
node["endPosition"].setValue( imath.V2f( displayWindow.size().x, displayWindow.size().y * .5 ) )
Gaffer.Metadata.registerNode(
GafferImage.Ramp,
"description",
"""
Outputs an image of a color gradient interpolated using the ramp plug.
""",
plugs = {
"format" : [
"description",
"""
The resolution and aspect ratio of the image.
""",
],
"ramp" : [
"description",
"""
The gradient of colour used to draw the ramp.
""",
],
"startPosition" : [
"description",
"""
2d position for the start of the ramp color interpolation.
""",
],
"endPosition" : [
"description",
"""
2d position for the end of the ramp color interpolation.
""",
],
"layer" : [
"description",
"""
The layer to generate. The output channels will
be named ( layer.R, layer.G, layer.B and layer.A ).
""",
"stringPlugValueWidget:placeholderText", "[RGBA]",
],
"transform" : [
"description",
"""
A transformation applied to the entire ramp.
The translate and pivot values are specified in pixels,
and the rotate value is specified in degrees.
""",
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
"layout:section", "Transform",
],
}
)
| 27.054264 | 97 | 0.666762 |
166f27ae819911d9834ce03269a394029e0d202f | 19,903 | py | Python | aasaan/communication/api.py | deepakkt/aasaan | 77ef72e785e6ae562f51ae64fa9d85faf860c315 | [
"MIT"
] | null | null | null | aasaan/communication/api.py | deepakkt/aasaan | 77ef72e785e6ae562f51ae64fa9d85faf860c315 | [
"MIT"
] | 15 | 2020-06-05T19:26:26.000Z | 2022-03-11T23:33:53.000Z | aasaan/communication/api.py | deepakkt/aasaan | 77ef72e785e6ae562f51ae64fa9d85faf860c315 | [
"MIT"
] | null | null | null | import requests
from sys import modules
from functools import partial
from django.core.exceptions import ValidationError, ObjectDoesNotExist, ImproperlyConfigured
from django.core.mail import get_connection
from django.core.mail.message import EmailMultiAlternatives
from django.conf import settings
from .settings import smscountry_parms as base_smscountry_parms
from .settings import COMMUNICATION_CONTEXTS, COMMUNICATION_TYPES, RECIPIENT_VISIBILITY
from . import settings as comm_settings
from .models import Payload, PayloadDetail, CommunicationProfile
from contacts.models import RoleGroup
import sendgrid
import markdown
try:
import django_rq
except (ImportError, ImproperlyConfigured):
pass
def _get_param(param_name):
try:
param_value = getattr(comm_settings, param_name)
except KeyError:
raise ValidationError("Trying to locate '%s' parameter but could not find it. "
"Please define the parameter in settings.py in the communication app" % param_name)
return param_value
class MessageAdapter(object):
def __init__(self, *args, **kwargs):
self._validate_message_key(*args, **kwargs)
self._setup_profile(*args, **kwargs)
self.get_connection()
def _validate_message_key(self, *args, **kwargs):
self.message_key = kwargs.get('message_key')
if not self.message_key:
raise ValidationError('Message key cannot be empty')
try:
self.message = Payload.objects.get(communication_hash=self.message_key)
except ObjectDoesNotExist:
raise ValidationError("Message key '%s' not found" % (self.message_key))
if self.message.recipient_count() == 0:
raise ValidationError("No recipients staged for message key '%s'"
%(self.message_key))
if not self.message.is_status_pending():
raise ValidationError("%s is not 'pending'. Restage a fresh message" % self.message)
self.message_recipients = PayloadDetail.objects.filter(communication=self.message)
self.communication_type = self.message.communication_type
def _setup_profile(self, *args, **kwargs):
profile_name = kwargs.get('profile_name')
try:
default_profile = CommunicationProfile.objects.get(communication_type=self.communication_type,
default=True)
except ObjectDoesNotExist:
raise ValidationError('No default email profile found. Mark one profile as default.')
if profile_name:
try:
self.profile = CommunicationProfile.objects.get(communication_type=self.communication_type,
profile_name=profile_name)
except ObjectDoesNotExist:
self.profile = default_profile
else:
self.profile = default_profile
def get_connection(self):
"""
inheriting class to set this up. assign self.connection at end of function
"""
pass
def close_connection(self):
"""
Close connection here
"""
pass
def validate_message(self):
"""
Place validations prior to sending the message here
"""
pass
def load_communication_settings(self):
"""
Load adapter specific settings here. One important variable
the loader needs to set is self.loop_individual_recipient.
Set other settings here which can be used by methods later
in the workflow
"""
self.message_send_exceptions = None
def setup_initial_adapter_message(self):
"""
This is the hook to setup the communication adapter specific message
object
"""
self.adapter_message = None
def setup_final_adapter_message(self, *args, **kwargs):
"""
Use this hook to setup the final message that will go out through the
communication channel
"""
pass
def send_adapter_communication(self):
"""
Setup the actual logic to send the message via the adapter
"""
pass
def send_message(self):
try:
connection = self.connection
except NameError:
raise ValidationError("No valid connection present to send message")
else:
connection = None
self.load_communication_settings()
self.validate_message()
try:
looper = self.loop_individual_recipient
except NameError:
raise ValidationError("'loop_individual_recipient' flag was not found. The default adapter "
"expects the inheritor to set this flag. Set this flag in the "
"'load_communication_settings' method")
else:
del looper
self.setup_initial_adapter_message()
self.message.set_in_progress()
self.message.save()
error_free = True
raised_exception = None
if self.loop_individual_recipient:
for each_recipient in self.message_recipients:
self.setup_final_adapter_message(recipient=each_recipient)
try:
self.send_adapter_communication()
each_recipient.set_success()
each_recipient.set_send_time()
each_recipient.communication_status_message = 'Success!'
each_recipient.save()
except self.message_send_exceptions as e:
raised_exception = e
each_recipient.set_error()
each_recipient.communication_status_message = 'Error: %s' % (e.args[0])
each_recipient.save()
error_free = False
else:
self.setup_final_adapter_message()
try:
self.send_adapter_communication()
for each_recipient in self.message_recipients:
each_recipient.set_success()
each_recipient.set_send_time()
each_recipient.save()
except self.message_send_exceptions as e:
error_free = False
raised_exception = e
for each_recipient in self.message_recipients:
each_recipient.set_error()
each_recipient.communication_status_message = 'Error: %s' % (e.args[0])
each_recipient.save()
if error_free:
self.message.set_success()
self.message.communication_status_message = 'Message appears to be successfully sent!'
else:
self.message.set_error()
self.message.communication_status_message = raised_exception.args[0]
self.message.save()
return self.message.communication_status
class EmailMessageAdapter(MessageAdapter):
def get_connection(self):
try:
connection = get_connection(host=self.profile.smtp_server,
port=self.profile.smtp_port,
username=self.profile.user_name,
password=self.profile.password,
use_tls=self.profile.use_tls,
use_ssl=self.profile.use_ssl)
except:
raise ValidationError('Could not setup SMTP connection. Check connectivity or credentials.')
self.connection = connection
def close_connection(self):
self.connection.close()
def load_communication_settings(self):
super(EmailMessageAdapter, self).load_communication_settings()
self.recipient_visibility = self.message.recipient_visibility
self.default_email_type = _get_param('default_email_type').lower()
self.loop_individual_recipient = True if \
self.recipient_visibility == comm_settings.RECIPIENT_VISIBILITY[-1][0] \
else False
self.message_send_exceptions = ValidationError
self._message_munge = markdown.markdown if \
self.default_email_type == comm_settings.DEFAULT_EMAIL_TYPE[0][0] \
else (lambda x: x)
def setup_initial_adapter_message(self):
self.adapter_message = EmailMultiAlternatives()
self.adapter_message.subject = self.message.communication_title
self.adapter_message.from_email = "%s <%s>" % (self.profile.sender_name,
self.profile.user_name)
self.adapter_message.body = self._message_munge(self.message.communication_message)
self.adapter_message.content_subtype = self.default_email_type
self.adapter_message.connection = self.connection
def setup_final_adapter_message(self, *args, **kwargs):
recipient = kwargs.get('recipient')
if recipient:
self.adapter_message.to = recipient.communication_recipient
return
if self.recipient_visibility == "BCC":
self.adapter_message.bcc = [x.communication_recipient for
x in self.message_recipients]
elif self.recipient_visibility == "TO/CC":
self.adapter_message.to = [self.message_recipients[0].communication_recipient]
self.adapter_message.cc = [x.communication_recipient for
x in self.message_recipients[1:]]
elif self.recipient_visibility == "TO/BCC":
self.adapter_message.to = [self.message_recipients[0].communication_recipient]
self.adapter_message.bcc = [x.communication_recipient for
x in self.message_recipients[1:]]
def send_adapter_communication(self):
try:
self.adapter_message.send()
except:
raise ValidationError('Error sending message ==> %s' % (self.message))
class SendGridMessageAdapter(EmailMessageAdapter):
def get_connection(self):
try:
connection = sendgrid.SendGridClient(self.profile.user_name,
raise_errors=True)
except (sendgrid.SendGridClientError, sendgrid.SendGridServerError):
raise ValidationError('Could not setup SendGrid connection. Check connectivity or credentials.')
self.connection = connection
def close_connection(self):
pass
def load_communication_settings(self):
super(SendGridMessageAdapter, self).load_communication_settings()
self.message_send_exceptions = (sendgrid.SendGridClientError, sendgrid.SendGridServerError)
def setup_initial_adapter_message(self):
self.adapter_message = sendgrid.Mail()
self.adapter_message.set_from('%s <%s>' %(self.profile.sender_name,
self.profile.sender_id))
self.adapter_message.set_subject(self.message.communication_title)
self.adapter_message.set_html(self._message_munge(self.message.communication_message))
self.adapter_message.set_text(self.message.communication_message)
def setup_final_adapter_message(self, *args, **kwargs):
recipient = kwargs.get('recipient')
if recipient:
self.setup_initial_adapter_message()
self.adapter_message.add_to(recipient.communication_recipient)
return
if self.recipient_visibility == "BCC":
self.adapter_message.add_bcc([x.communication_recipient for
x in self.message_recipients])
elif self.recipient_visibility == "TO/CC":
self.adapter_message.add_to(self.message_recipients[0].communication_recipient)
self.adapter_message.add_cc([x.communication_recipient for
x in self.message_recipients[1:]])
elif self.recipient_visibility == "TO/BCC":
self.adapter_message.add_to(self.message_recipients[0].communication_recipient)
self.adapter_message.add_bcc([x.communication_recipient for
x in self.message_recipients[1:]])
def send_adapter_communication(self):
self.connection.send(self.adapter_message)
class SMSCountryMessageAdapter(MessageAdapter):
def get_connection(self):
self.connection = _get_param('smscountry_api_url')
self.adapter_message = base_smscountry_parms.copy()
self.adapter_message['user'] = self.profile.user_name
self.adapter_message['passwd'] = self.profile.password
def validate_message(self):
message_length = len(self.message.communication_message)
if message_length > self.allowed_sms_length:
raise ValidationError('SMS length exceeds allowed limit. '
'Allowed ==> %d, '
'Provided ==> %d' % (self.allowed_sms_length,
message_length))
def load_communication_settings(self):
self.allowed_sms_length = _get_param('sms_length_limit')
self.loop_individual_recipient = True
self.message_send_exceptions = ValidationError
def setup_initial_adapter_message(self):
self.adapter_message['message'] = self.message.communication_message
def setup_final_adapter_message(self, *args, **kwargs):
self.adapter_message['mobilenumber'] = kwargs.get('recipient').communication_recipient
def send_adapter_communication(self):
sms_request = requests.get(self.connection, params=self.adapter_message)
if sms_request.text[:3] == "OK:":
pass
else:
raise ValidationError('SMS send failed for %s. '
'Recipient at failure was \'%s\'' % (self.message,
self.adapter_message['mobilenumber']))
class PushoverMessageAdapter(MessageAdapter):
def get_connection(self):
self.connection = _get_param('pushover_api_url')
def load_communication_settings(self):
self.loop_individual_recipient = True
self.message_send_exceptions = ValidationError
def setup_initial_adapter_message(self):
self.adapter_message = dict()
self.adapter_message['message'] = self.message.communication_message
self.adapter_message['token'] = self.profile.user_name
def setup_final_adapter_message(self, *args, **kwargs):
self.adapter_message['user'] = kwargs.get('recipient').communication_recipient
def send_adapter_communication(self):
pushover_request = requests.post(self.connection, params=self.adapter_message)
if pushover_request.status_code != 200:
raise ValidationError('Could not push for user %s. Error message received was'
'"%s"' %(self.adapter_message['user'], pushover_request.text))
def stage_communication(communication_type="EMail", role_groups=None,
communication_recipients=None, communication_context="Communication",
communication_title="", communication_message="",
recipient_visibility=""):
if not (role_groups or communication_recipients):
raise ValidationError("Specify at least one recipient")
if communication_type not in [x[0] for x in COMMUNICATION_TYPES]:
raise ValidationError("Invalid communication type '%s'" % communication_type)
if communication_context not in [x[0] for x in COMMUNICATION_CONTEXTS]:
raise ValidationError("Invalid communication context '%s'" % communication_context)
if not (communication_title and communication_message):
raise ValidationError("Specify message and title")
if not recipient_visibility:
recipient_visibility = RECIPIENT_VISIBILITY[-1][0]
else:
if recipient_visibility not in [x[0] for x in RECIPIENT_VISIBILITY]:
raise ValidationError("Invalid recipient visibility '%s'" % recipient_visibility)
message = Payload()
message.communication_title = communication_title
message.communication_message = communication_message
message.communication_type = communication_type
message.communication_context = communication_context
message.recipient_visibility = recipient_visibility
message.save()
contact_set = []
contact_field_map = dict(zip([x[0] for x in COMMUNICATION_TYPES],
['primary_email', 'primary_mobile', 'primary_email', 'pushover_token']))
for each_role in (role_groups or []):
curr_role_group = RoleGroup.objects.get(role_name=each_role)
contact_set.extend(filter(bool, [getattr(x, contact_field_map[communication_type])
for x in curr_role_group.contacts]))
contact_set.extend(communication_recipients or [])
for each_contact in sorted(list(set(contact_set))):
new_recipient = PayloadDetail()
new_recipient.communication = message
new_recipient.communication_recipient = each_contact
new_recipient.save()
return message.communication_hash
def send_communication(communication_type="EMail",
message_key="", *args, **kwargs):
this_module = modules[__name__]
api_full_name = _get_param('communication_dispatcher')[communication_type]
api_name = api_full_name.split('.')[-1]
message_api = getattr(this_module, api_name)
if not message_api:
raise ValidationError("No API has been defined for communication type '%s'"
% (communication_type))
message_container = message_api(message_key=message_key)
if settings.ASYNC:
django_rq.enqueue(message_container.send_message)
message_status = "Complete"
else:
message_status = message_container.send_message()
if message_status != comm_settings.COMMUNICATION_STATUS[2][0]:
raise ValidationError("Message send for key '%s' does not report success. "
"Review communication status inside payload"
"for error details" % (message_key))
return message_status
# setup canned functions for various scenarios
stage_pushover = partial(stage_communication, communication_type="Pushover",
communication_context = "Transactional",
communication_title = "Pushover Notification")
stage_email_transactional = partial(stage_communication, communication_type="EMail",
communication_context = "Transactional",
communication_title = "Email Notification")
stage_email = partial(stage_communication, communication_type="EMail",
communication_context = "Communication")
stage_sendgrid = partial(stage_communication, communication_type="SendGrid",
communication_context = "Communication")
stage_sendgrid_transactional = partial(stage_communication, communication_type="SendGrid",
communication_context = "Transactional",
communication_title = "Sendgrid Notification")
stage_sms = partial(stage_communication, communication_type="SMS",
communication_context = "Communication")
stage_sms_transactional = partial(stage_communication, communication_type="SMS",
communication_context = "Transactional",
communication_title = "SMS Notification")
| 42.346809 | 113 | 0.641562 |
b22af5fb06bb9f4ac6c404e8c4e6c0610c0754ba | 3,650 | py | Python | 2019/exercise3_R/reinforcement_learning/agent/dqn_agent.py | f2010126/DL_Labs | ee81d8aa6027846fc32c98feb9079211c59aa0e9 | [
"BSD-3-Clause"
] | null | null | null | 2019/exercise3_R/reinforcement_learning/agent/dqn_agent.py | f2010126/DL_Labs | ee81d8aa6027846fc32c98feb9079211c59aa0e9 | [
"BSD-3-Clause"
] | null | null | null | 2019/exercise3_R/reinforcement_learning/agent/dqn_agent.py | f2010126/DL_Labs | ee81d8aa6027846fc32c98feb9079211c59aa0e9 | [
"BSD-3-Clause"
] | null | null | null | import tensorflow as tf
import numpy as np
from dqn.replay_buffer import ReplayBuffer
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
class DQNAgent:
def __init__(self, Q, Q_target, num_actions, gamma=0.95, batch_size=64, epsilon=0.1, tau=0.01, lr=1e-4, history_length=0):
"""
Q-Learning agent for off-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
Q: Action-Value function estimator (Neural Network)
Q_target: Slowly updated target network to calculate the targets.
num_actions: Number of actions of the environment.
gamma: discount factor of future rewards.
batch_size: Number of samples per batch.
tau: indicates the speed of adjustment of the slowly updated target network.
epsilon: Chance to sample a random action. Float betwen 0 and 1.
lr: learning rate of the optimizer
"""
# setup networks
self.Q = Q.cuda()
self.Q_target = Q_target.cuda()
self.Q_target.load_state_dict(self.Q.state_dict())
# define replay buffer
self.replay_buffer = ReplayBuffer(history_length)
# parameters
self.batch_size = batch_size
self.gamma = gamma
self.tau = tau
self.epsilon = epsilon
self.loss_function = torch.nn.MSELoss()
self.optimizer = optim.Adam(self.Q.parameters(), lr=lr)
self.num_actions = num_actions
def train(self, state, action, next_state, reward, terminal):
"""
This method stores a transition to the replay buffer and updates the Q networks.
"""
# TODO:
# 1. add current transition to replay buffer
# 2. sample next batch and perform batch update:
# 2.1 compute td targets and loss
# td_target = reward + discount * max_a Q_target(next_state_batch, a)
# 2.2 update the Q network
# 2.3 call soft update for target network
# soft_update(self.Q_target, self.Q, self.tau)
def act(self, state, deterministic):
"""
This method creates an epsilon-greedy policy based on the Q-function approximator and epsilon (probability to select a random action)
Args:
state: current state input
deterministic: if True, the agent should execute the argmax action (False in training, True in evaluation)
Returns:
action id
"""
r = np.random.uniform()
if deterministic or r > self.epsilon:
# TODO: take greedy action (argmax)
# action_id = ...
else:
# TODO: sample random action
# Hint for the exploration in CarRacing: sampling the action from a uniform distribution will probably not work.
# You can sample the agents actions with different probabilities (need to sum up to 1) so that the agent will prefer to accelerate or going straight.
# To see how the agent explores, turn the rendering in the training on and look what the agent is doing.
# action_id = ...
return action_id
def save(self, file_name):
torch.save(self.Q.state_dict(), file_name)
def load(self, file_name):
self.Q.load_state_dict(torch.load(file_name))
self.Q_target.load_state_dict(torch.load(file_name))
| 40.555556 | 161 | 0.633151 |
effa28cc6cfb1b871a263cc249fb5ad03d2d17ef | 1,963 | py | Python | firmwire/vendor/mtk/mtk_task.py | j4s0n/FirmWire | d3a20e2429cb4827f538d1a16163afde8b45826b | [
"BSD-3-Clause"
] | null | null | null | firmwire/vendor/mtk/mtk_task.py | j4s0n/FirmWire | d3a20e2429cb4827f538d1a16163afde8b45826b | [
"BSD-3-Clause"
] | null | null | null | firmwire/vendor/mtk/mtk_task.py | j4s0n/FirmWire | d3a20e2429cb4827f538d1a16163afde8b45826b | [
"BSD-3-Clause"
] | null | null | null | ## Copyright (c) 2022, Team FirmWire
## SPDX-License-Identifier: BSD-3-Clause
import inspect
from struct import unpack
TASK_STRUCT_SIZE = 0x20
TASK_NAME_PTR_OFFSET = 0
TASK_SCHED_PRIO_OFFSET = 8
TASK_STACKSIZE_OFFSET = 0xC
TASK_MAIN_FN_OFFSET = 0x10
class TaskEntry:
def __init__(self, parent_task, offset):
pass
class MtkTask:
def __getattribute__(self, name):
if name == "__dict__" or name == "entries":
return super().__getattribute__(name)
elif name in self.entries:
off = self.entries[name]
return unpack("<I", self.data[off : off + 4])[0]
return super().__getattribute__(name)
def __setattr__(self, name, value):
if name == "entries":
super().__setattr__(name, value)
elif name in self.entries:
off = self.entries[name]
self.set_int(off, value)
else:
super().__setattr__(name, value)
def set_int(self, off, raw):
self.data[off : off + 4] = raw.to_bytes(4, byteorder="little")
def __init__(self, raw_bytes=None, name_ptr=None, create_fn=None, sched_prio=None):
"""
Creates a task data based on the input.
If raw_bytes is not set, all non specified fields are initialized to 0
"""
glb = inspect.stack()[0].frame.f_globals # get globals on file scope
offsets = [
g for g in filter(lambda x: x[0:5] == "TASK_" and x[-7:] == "_OFFSET", glb)
]
self.entries = {o[5:-7].lower(): glb[o] for o in offsets}
self.data = (
bytearray(TASK_STRUCT_SIZE) if raw_bytes is None else bytearray(raw_bytes)
)
if name_ptr:
self.set_int(TASK_NAME_PTR_OFFSET, name_ptr)
if create_fn:
self.set_int(TASK_MAIN_FN_OFFSET, create_fn)
if sched_prio:
self.set_int(TASK_SCHED_PRIO_OFFSET, sched_prio)
if __name__ == "__main__":
t = Task()
| 29.742424 | 87 | 0.613347 |
e11cd57261f64cfb9f7914fcca7a10a5b3762f5e | 83 | py | Python | re_gen/__init__.py | lahwran/regex_generator | 4494287fcb2a9c1686a0f6381f8347f45cc15d78 | [
"MIT"
] | 1 | 2017-05-25T10:32:04.000Z | 2017-05-25T10:32:04.000Z | re_gen/__init__.py | lahwran/regex_generator | 4494287fcb2a9c1686a0f6381f8347f45cc15d78 | [
"MIT"
] | null | null | null | re_gen/__init__.py | lahwran/regex_generator | 4494287fcb2a9c1686a0f6381f8347f45cc15d78 | [
"MIT"
] | null | null | null | # Copyright (c) 2012
# Licensed under the terms of the MIT license; see LICENSE.txt | 41.5 | 62 | 0.759036 |
3e3fa0570fdaf3ce7fc9c53fcb46ca418066aa8a | 10,311 | py | Python | tests/test_models/test_video_interpolator/test_basic_interpolator.py | wangruohui/mmediting | 6577d307caf9edfb34c6e46547994e6314fffc37 | [
"Apache-2.0"
] | 45 | 2022-03-05T06:54:34.000Z | 2022-03-30T02:15:42.000Z | tests/test_models/test_video_interpolator/test_basic_interpolator.py | wangruohui/mmediting | 6577d307caf9edfb34c6e46547994e6314fffc37 | [
"Apache-2.0"
] | 1 | 2022-03-25T14:04:39.000Z | 2022-03-31T04:48:38.000Z | tests/test_models/test_video_interpolator/test_basic_interpolator.py | wangruohui/mmediting | 6577d307caf9edfb34c6e46547994e6314fffc37 | [
"Apache-2.0"
] | 1 | 2022-03-24T05:07:29.000Z | 2022-03-24T05:07:29.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class InterpolateExample(nn.Module):
"""An example of interpolate network for testing BasicInterpolator.
"""
def __init__(self):
super().__init__()
self.layer = nn.Conv2d(3, 3, 3, 1, 1)
def forward(self, x):
return self.layer(x[:, 0])
def init_weights(self, pretrained=None):
pass
@COMPONENTS.register_module()
class InterpolateExample2(nn.Module):
"""An example of interpolate network for testing BasicInterpolator.
"""
def __init__(self):
super().__init__()
self.layer = nn.Conv2d(3, 3, 3, 1, 1)
def forward(self, x):
return self.layer(x[:, 0]).unsqueeze(1)
def init_weights(self, pretrained=None):
pass
def test_basic_interpolator():
model_cfg = dict(
type='BasicInterpolator',
generator=dict(type='InterpolateExample'),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'BasicInterpolator'
assert isinstance(restorer.generator, InterpolateExample)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 2, 3, 20, 20)
target = torch.rand(1, 3, 20, 20)
data_batch = {'inputs': inputs, 'target': target}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
}
# test forward train
outputs = restorer(**data_batch, test_mode=False)
assert isinstance(outputs, dict)
assert isinstance(outputs['losses'], dict)
assert isinstance(outputs['losses']['loss_pix'], torch.FloatTensor)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
assert torch.equal(outputs['results']['target'], data_batch['target'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# test forward_test
with torch.no_grad():
restorer.val_step(data_batch)
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['inputs'], data_batch['inputs'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 20, 20)
assert outputs['output'].max() <= 1.
assert outputs['output'].min() >= 0.
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['inputs'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 20, 20)
# test train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
assert torch.equal(outputs['results']['target'], data_batch['target'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer['generator'] = obj_from_dict(
optim_cfg, torch.optim, dict(params=restorer.parameters()))
data_batch = {'inputs': inputs.cuda(), 'target': target.cuda()}
# test forward train
outputs = restorer(**data_batch, test_mode=False)
assert isinstance(outputs, dict)
assert isinstance(outputs['losses'], dict)
assert isinstance(outputs['losses']['loss_pix'],
torch.cuda.FloatTensor)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'],
data_batch['inputs'].cpu())
assert torch.equal(outputs['results']['target'],
data_batch['target'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# forward_test
with torch.no_grad():
restorer.val_step(data_batch)
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['inputs'], data_batch['inputs'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 20, 20)
assert outputs['output'].max() <= 1.
assert outputs['output'].min() >= 0.
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'],
data_batch['inputs'].cpu())
assert torch.equal(outputs['results']['target'],
data_batch['target'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# test with metric and save image
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'inputs': inputs,
'target': target,
'meta': [{
'key': '000001/0000',
'target_path': 'fake_path/fake_name.png'
}]
}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with pytest.raises(AssertionError):
# evaluation with metrics must have target images
restorer(inputs=inputs, test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
inputs=inputs,
target=target,
meta=[{
'key':
'000001/0000',
'inputs_path':
['fake_path/fake_name.png', 'fake_path/fake_name.png']
}],
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
# test forward_test when output.shape==5
model_cfg = dict(
type='BasicInterpolator',
generator=dict(type='InterpolateExample2'),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
train_cfg = None
test_cfg = None
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
outputs = restorer(
inputs=inputs,
target=target.unsqueeze(1),
meta=[{
'key':
'000001/0000',
'inputs_path':
['fake_path/fake_name.png', 'fake_path/fake_name.png']
}],
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
outputs = restorer(
inputs=inputs,
target=target.unsqueeze(1),
meta=[{
'key':
'000001/0000',
'inputs_path':
['fake_path/fake_name.png', 'fake_path/fake_name.png']
}],
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
# test merge_frames
input_tensors = torch.rand(2, 2, 3, 256, 256)
output_tensors = torch.rand(2, 1, 3, 256, 256)
result = restorer.merge_frames(input_tensors, output_tensors)
assert isinstance(result, list)
assert len(result) == 5
assert result[0].shape == (256, 256, 3)
# test split_frames
tensors = torch.rand(1, 10, 3, 256, 256)
result = restorer.split_frames(tensors)
assert isinstance(result, torch.Tensor)
assert result.shape == (9, 2, 3, 256, 256)
# test evaluate 5d output
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
output = torch.rand(1, 2, 3, 256, 256)
target = torch.rand(1, 2, 3, 256, 256)
restorer.evaluate(output, target)
| 35.071429 | 78 | 0.600427 |
2ba692d802bf9f06f9a5b92c9b00d7c5597ce6eb | 3,447 | py | Python | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/list_retirable_grants_request.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/list_retirable_grants_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/list_retirable_grants_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class ListRetirableGrantsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'body': 'ListRetirableGrantsRequestBody'
}
attribute_map = {
'version_id': 'version_id',
'body': 'body'
}
def __init__(self, version_id=None, body=None):
"""ListRetirableGrantsRequest - a model defined in huaweicloud sdk"""
self._version_id = None
self._body = None
self.discriminator = None
self.version_id = version_id
if body is not None:
self.body = body
@property
def version_id(self):
"""Gets the version_id of this ListRetirableGrantsRequest.
:return: The version_id of this ListRetirableGrantsRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this ListRetirableGrantsRequest.
:param version_id: The version_id of this ListRetirableGrantsRequest.
:type: str
"""
self._version_id = version_id
@property
def body(self):
"""Gets the body of this ListRetirableGrantsRequest.
:return: The body of this ListRetirableGrantsRequest.
:rtype: ListRetirableGrantsRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListRetirableGrantsRequest.
:param body: The body of this ListRetirableGrantsRequest.
:type: ListRetirableGrantsRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListRetirableGrantsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.917293 | 77 | 0.561648 |
a7deb4a0a74056224e56cd2f3bd938e8e247d036 | 4,261 | py | Python | numba/tests/test_recursion.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_recursion.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_recursion.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | from __future__ import print_function, division, absolute_import
import math
import warnings
from numba import jit
from numba import unittest_support as unittest
from numba.errors import TypingError, NumbaWarning
from .support import TestCase, tag
class TestSelfRecursion(TestCase):
def setUp(self):
# Avoid importing this module at toplevel, as it triggers compilation
# and can therefore fail
from . import recursion_usecases
self.mod = recursion_usecases
def check_fib(self, cfunc):
self.assertPreciseEqual(cfunc(10), 55)
@tag('important')
def test_global_explicit_sig(self):
self.check_fib(self.mod.fib1)
def test_inner_explicit_sig(self):
self.check_fib(self.mod.fib2)
@tag('important')
def test_global_implicit_sig(self):
self.check_fib(self.mod.fib3)
def test_runaway(self):
with self.assertRaises(TypingError) as raises:
self.mod.runaway_self(123)
self.assertIn("cannot type infer runaway recursion",
str(raises.exception))
def test_type_change(self):
pfunc = self.mod.make_type_change_self()
cfunc = self.mod.make_type_change_self(jit(nopython=True))
args = 13, 0.125
self.assertPreciseEqual(pfunc(*args), cfunc(*args))
def test_raise(self):
with self.assertRaises(ValueError) as raises:
self.mod.raise_self(3)
self.assertEqual(str(raises.exception), "raise_self")
def test_optional_return(self):
pfunc = self.mod.make_optional_return_case()
cfunc = self.mod.make_optional_return_case(jit(nopython=True))
for arg in (0, 5, 10, 15):
self.assertEqual(pfunc(arg), cfunc(arg))
def test_growing_return_tuple(self):
cfunc = self.mod.make_growing_tuple_case(jit(nopython=True))
with self.assertRaises(TypingError) as raises:
cfunc(100)
self.assertIn(
"Return type of recursive function does not converge",
str(raises.exception),
)
class TestMutualRecursion(TestCase):
def setUp(self):
from . import recursion_usecases
self.mod = recursion_usecases
def test_mutual_1(self):
expect = math.factorial(10)
self.assertPreciseEqual(self.mod.outer_fac(10), expect)
def test_mutual_2(self):
pfoo, pbar = self.mod.make_mutual2()
cfoo, cbar = self.mod.make_mutual2(jit(nopython=True))
for x in [-1, 0, 1, 3]:
self.assertPreciseEqual(pfoo(x=x), cfoo(x=x))
self.assertPreciseEqual(pbar(y=x, z=1), cbar(y=x, z=1))
def test_runaway(self):
with self.assertRaises(TypingError) as raises:
self.mod.runaway_mutual(123)
self.assertIn("cannot type infer runaway recursion",
str(raises.exception))
@tag('important')
def test_type_change(self):
pfunc = self.mod.make_type_change_mutual()
cfunc = self.mod.make_type_change_mutual(jit(nopython=True))
args = 13, 0.125
self.assertPreciseEqual(pfunc(*args), cfunc(*args))
def test_four_level(self):
pfunc = self.mod.make_four_level()
cfunc = self.mod.make_four_level(jit(nopython=True))
arg = 7
self.assertPreciseEqual(pfunc(arg), cfunc(arg))
def test_inner_error(self):
# nopython mode
cfunc = self.mod.make_inner_error(jit(nopython=True))
with self.assertRaises(TypingError) as raises:
cfunc(2)
errmsg = 'Unknown attribute \'ndim\''
self.assertIn(errmsg, str(raises.exception))
# objectmode
# error is never trigger, function return normally
cfunc = self.mod.make_inner_error(jit)
pfunc = self.mod.make_inner_error()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=NumbaWarning)
got = cfunc(6)
self.assertEqual(got, pfunc(6))
def test_raise(self):
cfunc = self.mod.make_raise_mutual()#jit(nopython=True))
with self.assertRaises(ValueError) as raises:
cfunc(2)
self.assertEqual(str(raises.exception), "raise_mutual")
if __name__ == '__main__':
unittest.main()
| 32.776923 | 77 | 0.652664 |
bed13f8236aaefcf8dfd8df84d4d351c9660400b | 23,495 | py | Python | tests/data/metrics/conftest.py | nickaustinlee/labelbox-python | 45eb808165849e5a55fb6869ca5cc415d74772ce | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/data/metrics/conftest.py | nickaustinlee/labelbox-python | 45eb808165849e5a55fb6869ca5cc415d74772ce | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/data/metrics/conftest.py | nickaustinlee/labelbox-python | 45eb808165849e5a55fb6869ca5cc415d74772ce | [
"Apache-2.0",
"MIT"
] | null | null | null | from io import BytesIO
from types import SimpleNamespace
import pytest
import numpy as np
from PIL import Image
import base64
class NameSpace(SimpleNamespace):
def __init__(self, predictions, labels, expected, classifications=None):
super(NameSpace,
self).__init__(predictions=predictions,
labels={
'DataRow ID': 'ckppihxc10005aeyjen11h7jh',
'Labeled Data': "https://.jpg",
'Label': {
'objects': labels,
'classifications': classifications or []
}
},
expected=expected)
@pytest.fixture
def polygon_pair():
return NameSpace(labels=[{
'featureId':
'ckppivl7p0006aeyj92cezr9d',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'polygon': [{
'x': 0,
'y': 0
}, {
'x': 1,
'y': 0
}, {
'x': 1,
'y': 1
}, {
'x': 0,
'y': 1
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'polygon': [{
'x': 0,
'y': 0
}, {
'x': 1,
'y': 0
}, {
'x': 1,
'y': 0.5
}, {
'x': 0,
'y': 0.5
}]
}],
expected=0.5)
@pytest.fixture
def box_pair():
return NameSpace(labels=[{
'featureId': 'ckppivl7p0006aeyj92cezr9d',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
"bbox": {
"top": 1099,
"left": 2010,
"height": 690,
"width": 591
}
}],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
"bbox": {
"top": 1099,
"left": 2010,
"height": 690,
"width": 591
}
}],
expected=1.0)
@pytest.fixture
def unmatched_prediction():
return NameSpace(labels=[{
'featureId':
'ckppivl7p0006aeyj92cezr9d',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'polygon': [{
'x': 0,
'y': 0
}, {
'x': 1,
'y': 0
}, {
'x': 1,
'y': 1
}, {
'x': 0,
'y': 1
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'polygon': [{
'x': 0,
'y': 0
}, {
'x': 1,
'y': 0
}, {
'x': 1,
'y': 0.5
}, {
'x': 0,
'y': 0.5
}]
}, {
'uuid':
'd0ba2520-02e9-47d4-8736-088bbdbabbc3',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'polygon': [{
'x': 10,
'y': 10
}, {
'x': 11,
'y': 10
}, {
'x': 11,
'y': 1.5
}, {
'x': 10,
'y': 1.5
}]
}],
expected=0.25)
@pytest.fixture
def unmatched_label():
return NameSpace(labels=[{
'featureId':
'1234567890111213141516171',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'polygon': [{
'x': 0,
'y': 0
}, {
'x': 1,
'y': 0
}, {
'x': 1,
'y': 1
}, {
'x': 0,
'y': 1
}]
}, {
'featureId':
'ckppiw3bs0007aeyjs3pvrqzi',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'polygon': [{
'x': 10,
'y': 10
}, {
'x': 11,
'y': 10
}, {
'x': 11,
'y': 11
}, {
'x': 10,
'y': 11
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'polygon': [{
'x': 0,
'y': 0
}, {
'x': 1,
'y': 0
}, {
'x': 1,
'y': 0.5
}, {
'x': 0,
'y': 0.5
}]
}],
expected=0.25)
def create_mask_url(indices, h, w, value):
mask = np.zeros((h, w, 3), dtype=np.uint8)
for idx in indices:
mask[idx] = value
return base64.b64encode(mask.tobytes()).decode('utf-8')
@pytest.fixture
def mask_pair():
return NameSpace(labels=[{
'featureId':
'1234567890111213141516171',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'instanceURI':
create_mask_url([(0, 0), (0, 1)], 32, 32, (255, 255, 255))
}],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'mask': {
'instanceURI':
create_mask_url([(0, 0)], 32, 32, (1, 1, 1)),
'colorRGB': (1, 1, 1)
}
}],
expected=0.5)
@pytest.fixture
def matching_radio():
return NameSpace(labels=[],
classifications=[{
'featureId': '1234567890111213141516171',
'schemaId': 'ckrm02no8000008l3arwp6h4f',
'answer': {
'schemaId': 'ckppid25v0000aeyjmxfwlc7t'
}
}],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckrm02no8000008l3arwp6h4f',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answer': {
'schemaId': 'ckppid25v0000aeyjmxfwlc7t'
}
}],
expected=1.)
@pytest.fixture
def empty_radio_label():
return NameSpace(labels=[],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answer': {
'schemaId': 'ckppid25v0000aeyjmxfwlc7t'
}
}],
expected=0)
@pytest.fixture
def empty_radio_prediction():
return NameSpace(labels=[],
classifications=[{
'featureId': '1234567890111213141516171',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': {
'schemaId': 'ckppid25v0000aeyjmxfwlc7t'
}
}],
predictions=[],
expected=0)
@pytest.fixture
def matching_checklist():
return NameSpace(labels=[],
classifications=[{
'featureId':
'1234567890111213141516171',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}, {
'schemaId': 'ckppidq4u0002aeyjmcc4toxw'
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}, {
'schemaId': 'ckppidq4u0002aeyjmcc4toxw'
}]
}],
expected=1.)
@pytest.fixture
def partially_matching_checklist_1():
return NameSpace(labels=[],
classifications=[{
'featureId':
'1234567890111213141516171',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}, {
'schemaId': 'ckppidq4u0002aeyjmcc4toxw'
}, {
'schemaId': 'ckppie29m0003aeyjk1ixzcom'
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}, {
'schemaId': 'ckppidq4u0002aeyjmcc4toxw'
}, {
'schemaId': 'ckppiebx80004aeyjuwvos69e'
}]
}],
expected=0.6)
@pytest.fixture
def partially_matching_checklist_2():
return NameSpace(labels=[],
classifications=[{
'featureId':
'1234567890111213141516171',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}, {
'schemaId': 'ckppidq4u0002aeyjmcc4toxw'
}, {
'schemaId': 'ckppiebx80004aeyjuwvos69e'
}]
}],
expected=0.5)
@pytest.fixture
def partially_matching_checklist_3():
return NameSpace(labels=[],
classifications=[{
'featureId':
'1234567890111213141516171',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}, {
'schemaId': 'ckppidq4u0002aeyjmcc4toxw'
}, {
'schemaId': 'ckppiebx80004aeyjuwvos69e'
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
}, {
'schemaId': 'ckppide010001aeyj0yhiaghc'
}]
}],
expected=0.5)
@pytest.fixture
def empty_checklist_label():
return NameSpace(labels=[],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t'
}]
}],
expected=0)
@pytest.fixture
def empty_checklist_prediction():
return NameSpace(labels=[],
classifications=[{
'featureId': '1234567890111213141516171',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answers': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t'
}]
}],
predictions=[],
expected=0)
@pytest.fixture
def matching_text():
return NameSpace(labels=[],
classifications=[{
'featureId': '1234567890111213141516171',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': 'test'
}],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answer': 'test'
}],
expected=1.0)
@pytest.fixture
def not_matching_text():
return NameSpace(labels=[],
classifications=[{
'featureId': '1234567890111213141516171',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': 'test'
}],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'answer': 'not_test'
}],
expected=0.)
@pytest.fixture
def test_box_with_subclass():
return NameSpace(labels=[{
'featureId':
'ckppivl7p0006aeyj92cezr9d',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
"bbox": {
"top": 1099,
"left": 2010,
"height": 690,
"width": 591
},
'classifications': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': 'test'
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
"bbox": {
"top": 1099,
"left": 2010,
"height": 690,
"width": 591
},
'classifications': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': 'test'
}]
}],
expected=1.0)
@pytest.fixture
def test_box_with_wrong_subclass():
return NameSpace(labels=[{
'featureId':
'ckppivl7p0006aeyj92cezr9d',
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
"bbox": {
"top": 1099,
"left": 2010,
"height": 690,
"width": 591
},
'classifications': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': 'test'
}]
}],
predictions=[{
'uuid':
'76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'schemaId':
'ckppid25v0000aeyjmxfwlc7t',
"bbox": {
"top": 1099,
"left": 2010,
"height": 690,
"width": 591
},
'classifications': [{
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'answer': 'not_test'
}]
}],
expected=0.5)
@pytest.fixture
def line_pair():
return NameSpace(labels=[{
'featureId': 'ckppivl7p0006aeyj92cezr9d',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
"line": [{
"x": 0,
"y": 100
}, {
"x": 0,
"y": 0
}],
}],
predictions=[{
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
"line": [{
"x": 5,
"y": 95
}, {
"x": 0,
"y": 0
}],
}],
expected=0.9496975567603978)
@pytest.fixture
def point_pair():
return NameSpace(labels=[{
'featureId': 'ckppivl7p0006aeyj92cezr9d',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
"point": {
'x': 0,
'y': 0
}
}],
predictions=[{
'dataRow': {
'id': 'ckppihxc10005aeyjen11h7jh'
},
'uuid': '76e0dcea-fe46-43e5-95f5-a5e3f378520a',
'schemaId': 'ckppid25v0000aeyjmxfwlc7t',
"point": {
'x': 5,
'y': 5
}
}],
expected=0.879113232477017)
| 34.653392 | 78 | 0.328325 |
bcbb72650e609ba968c7c0b6c7e63a33a6f1bbaa | 213 | py | Python | refugio/apps/usuario/urls.py | sebas095/DjangoCF | 42c359da6a92a093d17d6b8ca8dd1d2d7a161983 | [
"MIT"
] | null | null | null | refugio/apps/usuario/urls.py | sebas095/DjangoCF | 42c359da6a92a093d17d6b8ca8dd1d2d7a161983 | [
"MIT"
] | null | null | null | refugio/apps/usuario/urls.py | sebas095/DjangoCF | 42c359da6a92a093d17d6b8ca8dd1d2d7a161983 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import RegistroUsuario, UserAPI
urlpatterns = [
url(r'^registrar', RegistroUsuario.as_view(), name='registrar'),
url(r'^api', UserAPI.as_view(), name='api'),
] | 30.428571 | 68 | 0.70892 |
aaaf12aa48354dee2789996d4a998518ecbd9a3f | 3,675 | py | Python | local_server/test/unit/data_common/fbs/test_matrix.py | prete/cellxgene | 11acea86c4b3df334300fac7e9e034c1e61e67bc | [
"MIT"
] | 3 | 2019-11-11T15:41:07.000Z | 2020-12-14T08:47:35.000Z | local_server/test/unit/data_common/fbs/test_matrix.py | prete/cellxgene | 11acea86c4b3df334300fac7e9e034c1e61e67bc | [
"MIT"
] | null | null | null | local_server/test/unit/data_common/fbs/test_matrix.py | prete/cellxgene | 11acea86c4b3df334300fac7e9e034c1e61e67bc | [
"MIT"
] | 1 | 2021-05-12T15:15:05.000Z | 2021-05-12T15:15:05.000Z | import unittest
import pandas as pd
import numpy as np
from scipy import sparse
import local_server.test.unit.decode_fbs as decode_fbs
from local_server.data_common.fbs.matrix import encode_matrix_fbs, decode_matrix_fbs
class FbsTests(unittest.TestCase):
"""Test Case for Matrix FBS data encode/decode """
def test_encode_boundary(self):
""" test various boundary checks """
# row indexing is unsupported
with self.assertRaises(ValueError):
encode_matrix_fbs(matrix=pd.DataFrame(), row_idx=[])
# matrix must be 2D
with self.assertRaises(ValueError):
encode_matrix_fbs(matrix=np.zeros((3, 2, 1)))
with self.assertRaises(ValueError):
encode_matrix_fbs(matrix=np.ones((10,)))
def fbs_checks(self, fbs, dims, expected_types, expected_column_idx):
d = decode_fbs.decode_matrix_FBS(fbs)
self.assertEqual(d["n_rows"], dims[0])
self.assertEqual(d["n_cols"], dims[1])
self.assertIsNone(d["row_idx"])
self.assertEqual(len(d["columns"]), dims[1])
for i in range(0, len(d["columns"])):
self.assertEqual(len(d["columns"][i]), dims[0])
self.assertIsInstance(d["columns"][i], expected_types[i][0])
if expected_types[i][1] is not None:
self.assertEqual(d["columns"][i].dtype, expected_types[i][1])
if expected_column_idx is not None:
self.assertSetEqual(set(expected_column_idx), set(d["col_idx"]))
def test_encode_DataFrame(self):
df = pd.DataFrame(
data={
"a": np.zeros((10,), dtype=np.float32),
"b": np.ones((10,), dtype=np.int64),
"c": np.array([i for i in range(0, 10)], dtype=np.uint16),
"d": pd.Series(["x", "y", "z", "x", "y", "z", "a", "x", "y", "z"], dtype="category"),
}
)
expected_types = ((np.ndarray, np.float32), (np.ndarray, np.int32), (np.ndarray, np.uint32), (list, None))
fbs = encode_matrix_fbs(matrix=df, row_idx=None, col_idx=df.columns)
self.fbs_checks(fbs, (10, 4), expected_types, ["a", "b", "c", "d"])
def test_encode_ndarray(self):
arr = np.zeros((3, 2), dtype=np.float32)
expected_types = ((np.ndarray, np.float32), (np.ndarray, np.float32), (np.ndarray, np.float32))
fbs = encode_matrix_fbs(matrix=arr, row_idx=None, col_idx=None)
self.fbs_checks(fbs, (3, 2), expected_types, None)
def test_encode_sparse(self):
csc = sparse.csc_matrix(np.array([[0, 1, 2], [3, 0, 4]]))
expected_types = ((np.ndarray, np.int32), (np.ndarray, np.int32), (np.ndarray, np.int32))
fbs = encode_matrix_fbs(matrix=csc, row_idx=None, col_idx=None)
self.fbs_checks(fbs, (2, 3), expected_types, None)
def test_roundtrip(self):
dfSrc = pd.DataFrame(
data={
"a": np.zeros((10,), dtype=np.float32),
"b": np.ones((10,), dtype=np.int64),
"c": np.array([i for i in range(0, 10)], dtype=np.uint16),
"d": pd.Series(["x", "y", "z", "x", "y", "z", "a", "x", "y", "z"], dtype="category"),
}
)
dfDst = decode_matrix_fbs(encode_matrix_fbs(matrix=dfSrc, col_idx=dfSrc.columns))
self.assertEqual(dfSrc.shape, dfDst.shape)
self.assertEqual(set(dfSrc.columns), set(dfDst.columns))
for c in dfSrc.columns:
self.assertTrue(c in dfDst.columns)
if isinstance(dfSrc[c], pd.Series):
self.assertTrue(np.all(dfSrc[c] == dfDst[c]))
else:
self.assertEqual(dfSrc[c], dfDst[c])
| 44.277108 | 114 | 0.58966 |
e64481d931dc07a2d19b379d1ad93faafdfe07ab | 7,407 | py | Python | SCRIPTS/DATA_PRESENT/Figure3C.py | huckgroup/formose-2021 | f1c5e809e0cbbbc744a4fe636069cdfc83ad6091 | [
"BSD-3-Clause"
] | null | null | null | SCRIPTS/DATA_PRESENT/Figure3C.py | huckgroup/formose-2021 | f1c5e809e0cbbbc744a4fe636069cdfc83ad6091 | [
"BSD-3-Clause"
] | null | null | null | SCRIPTS/DATA_PRESENT/Figure3C.py | huckgroup/formose-2021 | f1c5e809e0cbbbc744a4fe636069cdfc83ad6091 | [
"BSD-3-Clause"
] | null | null | null | '''
Example reaction networks determined from amplitude data. Specifically
illustrating the variation of reaction pathways as the formaldehyde
concentration is variated. Figure 2B.
'''
import sys
import pickle
import pandas as pd
import networkx as nx
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
# add the SCRIPTS directory to the system path
# so that its contents can be imported
script_dir = Path(__file__).parents[1].as_posix()
sys.path.append(script_dir)
# get the repository directory for file output
repository_dir = Path(__file__).parents[2]
from NorthNet import Classes
from helpers.layout import graphviz_layout
from helpers import chem_info as info_params
from helpers.network_load_helper import convert_to_networkx
from NorthNet.network_visualisation import coordinates as c_ops
data_folder = repository_dir/'DATA'
determined_params_dir = data_folder/'DERIVED_PARAMETERS'
plot_folder = repository_dir/'PLOTS'
exp_info_dir = repository_dir/"EXPERIMENT_INFO/Experiment_parameters.csv"
reaction_list_directory = Path(repository_dir/'REACTION_LISTS')
# load in experiment information.
exp_info = pd.read_csv(exp_info_dir, index_col = 0)
# sequences of data set keys
series_seqs = pd.read_csv(repository_dir/'EXPERIMENT_INFO/Series_info.csv', index_col = 0)
# average data for sizing nodes
average_data = pd.read_csv(determined_params_dir/'AverageData.csv', index_col = 0)
average_data = average_data.dropna(axis = 1)
# amplitude data for sizing nodes
amplitude_data = pd.read_csv(determined_params_dir/'AmplitudeData.csv', index_col = 0)
amplitude_data = amplitude_data.dropna(axis = 1)
# loading in the formose reaction as a NorthNet Network Object
formose_file = repository_dir/'FORMOSE_REACTION/FormoseReactionNetwork.pickle'
with open(formose_file, 'rb') as f:
FormoseNetwork = pickle.load(f)
series_sel = 'Formaldehyde_2_series'
file_name = 'Figure3C'
# get the experiment codes for the series
data_keys = series_seqs.loc[series_sel]
data_set_selections = list(data_keys.dropna())
data_set_selections = [data_set_selections[1], data_set_selections[-1]]
print(data_set_selections)
quit()
# load in the reaction lists determined for
# modulated data sets.
# use dictionary insertion ordering to
# add network reactions into a
networks = {}
for e in exp_info.index:
for d in data_set_selections:
fname = '{}_reaction_list.txt'.format(d)
with open(reaction_list_directory/fname, 'r') as f:
for line in f:
lines = f.readlines()
rxns = []
for l in lines:
rxns.append(FormoseNetwork.NetworkReactions[l.strip('\n')])
n_net = Classes.Network(rxns, e, '')
networks[d] = convert_to_networkx(n_net)
# remove secondary reactant nodes
node_removals = ['C=O', 'O', '[OH-]']
for n in networks:
[networks[n].remove_node(node) for node in node_removals
if node in networks[n].nodes]
# create a network merging all of the networks
F = nx.DiGraph()
for n in networks:
F = nx.compose(F,networks[n])
# create a layout for F (this will be the layout for each plotted network)
pos = graphviz_layout(F, render_engine = 'neato')
# use F to process coordinate system
c_ops.set_network_coords(F,pos)
c_ops.normalise_network_coordinates(F)
# get line plto for merged network
base_net_plot = c_ops.get_network_lineplot(F)
# create new position container from F
pos_norm = {n:F.nodes[n]['pos'] for n in F}
for n in networks:
c_ops.set_network_coords(networks[n],pos_norm)
'''Add colouring information into the networks'''
for n in networks:
for node in networks[n].nodes:
if '>>' in node:
networks[n].nodes[node]['color'] = "#000000"
else:
networks[n].nodes[node]['color'] = info_params.colour_assignments[node]
for n in networks:
for edge in networks[n].edges:
for e in edge:
if '>>' in e:
col = info_params.reaction_colours[e]
networks[n].edges[edge]['color'] = col
'''Add sizing information into networks'''
reaction_node_size = 10
compound_node_size = 40
for n in networks:
for node in networks[n].nodes:
if '>>' in node:
networks[n].nodes[node]['size'] = reaction_node_size
else:
networks[n].nodes[node]['size'] = compound_node_size
'''Plotting series in four panels'''
fig_width = 14/2.54 # cm conversion to inches for plt
fig_height = 8/2.54 # cm conversion to inches for plt
base_linew = 0.5
fig,ax = plt.subplots(ncols = 2,
figsize = (fig_width, fig_height))
axes = ax.flatten()
for c,n in enumerate(networks):
axes[c].plot(base_net_plot[0],base_net_plot[1],
c = '#ffffff',
linewidth = base_linew,
zorder = 0, alpha = 0.0)
for e in networks[n].edges:
arrow = FancyArrowPatch(networks[n].nodes[e[0]]['pos'],
networks[n].nodes[e[1]]['pos'],
arrowstyle='-|>',
path = None,
connectionstyle='Arc',
facecolor = networks[n].edges[e]['color'],
edgecolor = networks[n].edges[e]['color'],
linewidth = 1,
mutation_scale = 5,
shrinkA = 5,
shrinkB = 3,
alpha = 1,
zorder = 1)
axes[c].add_patch(arrow)
# build node scatter
compound_nodes_x = []
compound_nodes_y = []
compound_node_colours = []
compound_node_sizes = []
reaction_nodes_x = []
reaction_nodes_y = []
for node in networks[n].nodes:
if '>>' in node:
reaction_nodes_x.append(networks[n].nodes[node]['pos'][0])
reaction_nodes_y.append(networks[n].nodes[node]['pos'][1])
else:
compound_nodes_x.append(networks[n].nodes[node]['pos'][0])
compound_nodes_y.append(networks[n].nodes[node]['pos'][1])
compound_node_colours.append(networks[n].nodes[node]['color'])
compound_node_sizes.append(networks[n].nodes[node]['size'])
# plot solid scatter for compounds
axes[c].scatter(compound_nodes_x, compound_nodes_y,
facecolors = compound_node_colours,
s = compound_node_sizes,
zorder = 2,
edgecolors = 'None',
alpha = 1)
axes[c].scatter(reaction_nodes_x, reaction_nodes_y,
c = '#000000',
s = reaction_node_size,
marker = 'D',
edgecolors = 'None',
zorder = 2,
alpha = 1)
axes[c].set_axis_off()
# optional annotations
for node in networks[n].nodes:
if node in info_params.compound_numbering:
number = info_params.compound_numbering[node]
axes[c].annotate(number, xy = networks[n].nodes[node]['pos'],
ha = 'center', va = 'center',
fontsize = 6)
fig.tight_layout()
plt.savefig(plot_folder/'{}.png'.format(file_name), dpi = 600)
plt.savefig(plot_folder/'{}.svg'.format(file_name))
plt.close()
| 35.271429 | 90 | 0.6371 |
59f40c52f2e688e9d51a91c17579a6ef9f16bdc4 | 21,464 | py | Python | nova/objects/fields.py | bopopescu/Trusted-Platform-Module-nova | 20d28ef29daf6fd7a67b37b87ec2561c34b4230b | [
"Apache-2.0"
] | 5 | 2016-04-28T16:20:38.000Z | 2021-04-25T11:19:03.000Z | nova/objects/fields.py | bopopescu/Trusted-Platform-Module-nova | 20d28ef29daf6fd7a67b37b87ec2561c34b4230b | [
"Apache-2.0"
] | null | null | null | nova/objects/fields.py | bopopescu/Trusted-Platform-Module-nova | 20d28ef29daf6fd7a67b37b87ec2561c34b4230b | [
"Apache-2.0"
] | 5 | 2020-04-08T20:24:45.000Z | 2020-10-05T19:02:13.000Z | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
import six
# TODO(berrange) Temporary import for Arch class
from nova.compute import arch
# TODO(berrange) Temporary import for CPU* classes
from nova.compute import cpumodel
# TODO(berrange) Temporary import for HVType class
from nova.compute import hv_type
# TODO(berrange) Temporary import for VMMode class
from nova.compute import vm_mode
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
# Import field errors from oslo.versionedobjects
KeyTypeError = fields.KeyTypeError
ElementTypeError = fields.ElementTypeError
# Import fields from oslo.versionedobjects
BooleanField = fields.BooleanField
UnspecifiedDefault = fields.UnspecifiedDefault
IntegerField = fields.IntegerField
UUIDField = fields.UUIDField
FloatField = fields.FloatField
StringField = fields.StringField
SensitiveStringField = fields.SensitiveStringField
EnumField = fields.EnumField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
DictOfNullableStringsField = fields.DictOfNullableStringsField
DictOfIntegersField = fields.DictOfIntegersField
ListOfStringsField = fields.ListOfStringsField
SetOfIntegersField = fields.SetOfIntegersField
ListOfSetsOfIntegersField = fields.ListOfSetsOfIntegersField
ListOfDictOfNullableStringsField = fields.ListOfDictOfNullableStringsField
DictProxyField = fields.DictProxyField
ObjectField = fields.ObjectField
ListOfObjectsField = fields.ListOfObjectsField
VersionPredicateField = fields.VersionPredicateField
FlexibleBooleanField = fields.FlexibleBooleanField
DictOfListOfStringsField = fields.DictOfListOfStringsField
IPAddressField = fields.IPAddressField
IPV4AddressField = fields.IPV4AddressField
IPV6AddressField = fields.IPV6AddressField
IPNetworkField = fields.IPNetworkField
IPV4NetworkField = fields.IPV4NetworkField
IPV6NetworkField = fields.IPV6NetworkField
AutoTypedField = fields.AutoTypedField
BaseEnumField = fields.BaseEnumField
# NOTE(danms): These are things we need to import for some of our
# own implementations below, our tests, or other transitional
# bits of code. These should be removable after we finish our
# conversion
Enum = fields.Enum
Field = fields.Field
FieldType = fields.FieldType
Set = fields.Set
Dict = fields.Dict
List = fields.List
Object = fields.Object
IPAddress = fields.IPAddress
IPV4Address = fields.IPV4Address
IPV6Address = fields.IPV6Address
IPNetwork = fields.IPNetwork
IPV4Network = fields.IPV4Network
IPV6Network = fields.IPV6Network
class Architecture(Enum):
# TODO(berrange): move all constants out of 'nova.compute.arch'
# into fields on this class
def __init__(self, **kwargs):
super(Architecture, self).__init__(
valid_values=arch.ALL, **kwargs)
def coerce(self, obj, attr, value):
try:
value = arch.canonicalize(value)
except exception.InvalidArchitectureName:
msg = _("Architecture name '%s' is not valid") % value
raise ValueError(msg)
return super(Architecture, self).coerce(obj, attr, value)
class BlockDeviceDestinationType(Enum):
"""Represents possible destination_type values for a BlockDeviceMapping."""
LOCAL = 'local'
VOLUME = 'volume'
ALL = (LOCAL, VOLUME)
def __init__(self):
super(BlockDeviceDestinationType, self).__init__(
valid_values=BlockDeviceDestinationType.ALL)
class BlockDeviceSourceType(Enum):
"""Represents the possible source_type values for a BlockDeviceMapping."""
BLANK = 'blank'
IMAGE = 'image'
SNAPSHOT = 'snapshot'
VOLUME = 'volume'
ALL = (BLANK, IMAGE, SNAPSHOT, VOLUME)
def __init__(self):
super(BlockDeviceSourceType, self).__init__(
valid_values=BlockDeviceSourceType.ALL)
class BlockDeviceType(Enum):
"""Represents possible device_type values for a BlockDeviceMapping."""
CDROM = 'cdrom'
DISK = 'disk'
FLOPPY = 'floppy'
FS = 'fs'
LUN = 'lun'
ALL = (CDROM, DISK, FLOPPY, FS, LUN)
def __init__(self):
super(BlockDeviceType, self).__init__(
valid_values=BlockDeviceType.ALL)
class ConfigDrivePolicy(Enum):
OPTIONAL = "optional"
MANDATORY = "mandatory"
ALL = (OPTIONAL, MANDATORY)
def __init__(self):
super(ConfigDrivePolicy, self).__init__(
valid_values=ConfigDrivePolicy.ALL)
class CPUAllocationPolicy(Enum):
DEDICATED = "dedicated"
SHARED = "shared"
ALL = (DEDICATED, SHARED)
def __init__(self):
super(CPUAllocationPolicy, self).__init__(
valid_values=CPUAllocationPolicy.ALL)
class CPUThreadAllocationPolicy(Enum):
# prefer (default): The host may or may not have hyperthreads. This
# retains the legacy behavior, whereby siblings are preferred when
# available. This is the default if no policy is specified.
PREFER = "prefer"
# isolate: The host may or many not have hyperthreads. If hyperthreads are
# present, each vCPU will be placed on a different core and no vCPUs from
# other guests will be able to be placed on the same core, i.e. one
# thread sibling is guaranteed to always be unused. If hyperthreads are
# not present, each vCPU will still be placed on a different core and
# there are no thread siblings to be concerned with.
ISOLATE = "isolate"
# require: The host must have hyperthreads. Each vCPU will be allocated on
# thread siblings.
REQUIRE = "require"
ALL = (PREFER, ISOLATE, REQUIRE)
def __init__(self):
super(CPUThreadAllocationPolicy, self).__init__(
valid_values=CPUThreadAllocationPolicy.ALL)
class CPUMode(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUMode, self).__init__(
valid_values=cpumodel.ALL_CPUMODES, **kwargs)
class CPUMatch(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUMatch, self).__init__(
valid_values=cpumodel.ALL_MATCHES, **kwargs)
class CPUFeaturePolicy(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUFeaturePolicy, self).__init__(
valid_values=cpumodel.ALL_POLICIES, **kwargs)
class DiskBus(Enum):
FDC = "fdc"
IDE = "ide"
SATA = "sata"
SCSI = "scsi"
USB = "usb"
VIRTIO = "virtio"
XEN = "xen"
LXC = "lxc"
UML = "uml"
ALL = (FDC, IDE, SATA, SCSI, USB, VIRTIO, XEN, LXC, UML)
def __init__(self):
super(DiskBus, self).__init__(
valid_values=DiskBus.ALL)
class FirmwareType(Enum):
UEFI = "uefi"
BIOS = "bios"
ALL = (UEFI, BIOS)
def __init__(self):
super(FirmwareType, self).__init__(
valid_values=FirmwareType.ALL)
class HVType(Enum):
# TODO(berrange): move all constants out of 'nova.compute.hv_type'
# into fields on this class
def __init__(self):
super(HVType, self).__init__(
valid_values=hv_type.ALL)
def coerce(self, obj, attr, value):
try:
value = hv_type.canonicalize(value)
except exception.InvalidHypervisorVirtType:
msg = _("Hypervisor virt type '%s' is not valid") % value
raise ValueError(msg)
return super(HVType, self).coerce(obj, attr, value)
class ImageSignatureHashType(Enum):
# Represents the possible hash methods used for image signing
def __init__(self):
self.hashes = ('SHA-224', 'SHA-256', 'SHA-384', 'SHA-512')
super(ImageSignatureHashType, self).__init__(
valid_values=self.hashes
)
class ImageSignatureKeyType(Enum):
# Represents the possible keypair types used for image signing
def __init__(self):
self.key_types = (
'DSA', 'ECC_SECT571K1', 'ECC_SECT409K1', 'ECC_SECT571R1',
'ECC_SECT409R1', 'ECC_SECP521R1', 'ECC_SECP384R1', 'RSA-PSS'
)
super(ImageSignatureKeyType, self).__init__(
valid_values=self.key_types
)
class OSType(Enum):
LINUX = "linux"
WINDOWS = "windows"
ALL = (LINUX, WINDOWS)
def __init__(self):
super(OSType, self).__init__(
valid_values=OSType.ALL)
def coerce(self, obj, attr, value):
# Some code/docs use upper case or initial caps
# so canonicalize to all lower case
value = value.lower()
return super(OSType, self).coerce(obj, attr, value)
class ResourceClass(Enum):
"""Classes of resources provided to consumers."""
VCPU = 'VCPU'
MEMORY_MB = 'MEMORY_MB'
DISK_GB = 'DISK_GB'
PCI_DEVICE = 'PCI_DEVICE'
SRIOV_NET_VF = 'SRIOV_NET_VF'
NUMA_SOCKET = 'NUMA_SOCKET'
NUMA_CORE = 'NUMA_CORE'
NUMA_THREAD = 'NUMA_THREAD'
NUMA_MEMORY_MB = 'NUMA_MEMORY_MB'
IPV4_ADDRESS = 'IPV4_ADDRESS'
# The ordering here is relevant. If you must add a value, only
# append.
ALL = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF, NUMA_SOCKET,
NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB, IPV4_ADDRESS)
def __init__(self):
super(ResourceClass, self).__init__(
valid_values=ResourceClass.ALL)
@classmethod
def index(cls, value):
"""Return an index into the Enum given a value."""
return cls.ALL.index(value)
@classmethod
def from_index(cls, index):
"""Return the Enum value at a given index."""
return cls.ALL[index]
class RNGModel(Enum):
VIRTIO = "virtio"
ALL = (VIRTIO,)
def __init__(self):
super(RNGModel, self).__init__(
valid_values=RNGModel.ALL)
class SCSIModel(Enum):
BUSLOGIC = "buslogic"
IBMVSCSI = "ibmvscsi"
LSILOGIC = "lsilogic"
LSISAS1068 = "lsisas1068"
LSISAS1078 = "lsisas1078"
VIRTIO_SCSI = "virtio-scsi"
VMPVSCSI = "vmpvscsi"
ALL = (BUSLOGIC, IBMVSCSI, LSILOGIC, LSISAS1068,
LSISAS1078, VIRTIO_SCSI, VMPVSCSI)
def __init__(self):
super(SCSIModel, self).__init__(
valid_values=SCSIModel.ALL)
def coerce(self, obj, attr, value):
# Some compat for strings we'd see in the legacy
# vmware_adaptertype image property
value = value.lower()
if value == "lsilogicsas":
value = SCSIModel.LSISAS1068
elif value == "paravirtual":
value = SCSIModel.VMPVSCSI
return super(SCSIModel, self).coerce(obj, attr, value)
class VideoModel(Enum):
CIRRUS = "cirrus"
QXL = "qxl"
VGA = "vga"
VMVGA = "vmvga"
XEN = "xen"
ALL = (CIRRUS, QXL, VGA, VMVGA, XEN)
def __init__(self):
super(VideoModel, self).__init__(
valid_values=VideoModel.ALL)
class VIFModel(Enum):
LEGACY_VALUES = {"virtuale1000":
network_model.VIF_MODEL_E1000,
"virtuale1000e":
network_model.VIF_MODEL_E1000E,
"virtualpcnet32":
network_model.VIF_MODEL_PCNET,
"virtualsriovethernetcard":
network_model.VIF_MODEL_SRIOV,
"virtualvmxnet":
network_model.VIF_MODEL_VMXNET,
"virtualvmxnet3":
network_model.VIF_MODEL_VMXNET3,
}
def __init__(self):
super(VIFModel, self).__init__(
valid_values=network_model.VIF_MODEL_ALL)
def _get_legacy(self, value):
return value
def coerce(self, obj, attr, value):
# Some compat for strings we'd see in the legacy
# hw_vif_model image property
value = value.lower()
value = VIFModel.LEGACY_VALUES.get(value, value)
return super(VIFModel, self).coerce(obj, attr, value)
class VMMode(Enum):
# TODO(berrange): move all constants out of 'nova.compute.vm_mode'
# into fields on this class
def __init__(self):
super(VMMode, self).__init__(
valid_values=vm_mode.ALL)
def coerce(self, obj, attr, value):
try:
value = vm_mode.canonicalize(value)
except exception.InvalidVirtualMachineMode:
msg = _("Virtual machine mode '%s' is not valid") % value
raise ValueError(msg)
return super(VMMode, self).coerce(obj, attr, value)
class WatchdogAction(Enum):
NONE = "none"
PAUSE = "pause"
POWEROFF = "poweroff"
RESET = "reset"
ALL = (NONE, PAUSE, POWEROFF, RESET)
def __init__(self):
super(WatchdogAction, self).__init__(
valid_values=WatchdogAction.ALL)
class MonitorMetricType(Enum):
CPU_FREQUENCY = "cpu.frequency"
CPU_USER_TIME = "cpu.user.time"
CPU_KERNEL_TIME = "cpu.kernel.time"
CPU_IDLE_TIME = "cpu.idle.time"
CPU_IOWAIT_TIME = "cpu.iowait.time"
CPU_USER_PERCENT = "cpu.user.percent"
CPU_KERNEL_PERCENT = "cpu.kernel.percent"
CPU_IDLE_PERCENT = "cpu.idle.percent"
CPU_IOWAIT_PERCENT = "cpu.iowait.percent"
CPU_PERCENT = "cpu.percent"
NUMA_MEM_BW_MAX = "numa.membw.max"
NUMA_MEM_BW_CURRENT = "numa.membw.current"
ALL = (
CPU_FREQUENCY,
CPU_USER_TIME,
CPU_KERNEL_TIME,
CPU_IDLE_TIME,
CPU_IOWAIT_TIME,
CPU_USER_PERCENT,
CPU_KERNEL_PERCENT,
CPU_IDLE_PERCENT,
CPU_IOWAIT_PERCENT,
CPU_PERCENT,
NUMA_MEM_BW_MAX,
NUMA_MEM_BW_CURRENT,
)
def __init__(self):
super(MonitorMetricType, self).__init__(
valid_values=MonitorMetricType.ALL)
class HostStatus(Enum):
UP = "UP" # The nova-compute is up.
DOWN = "DOWN" # The nova-compute is forced_down.
MAINTENANCE = "MAINTENANCE" # The nova-compute is disabled.
UNKNOWN = "UNKNOWN" # The nova-compute has not reported.
NONE = "" # No host or nova-compute.
ALL = (UP, DOWN, MAINTENANCE, UNKNOWN, NONE)
def __init__(self):
super(HostStatus, self).__init__(
valid_values=HostStatus.ALL)
class PciDeviceStatus(Enum):
AVAILABLE = "available"
CLAIMED = "claimed"
ALLOCATED = "allocated"
REMOVED = "removed" # The device has been hot-removed and not yet deleted
DELETED = "deleted" # The device is marked not available/deleted.
UNCLAIMABLE = "unclaimable"
UNAVAILABLE = "unavailable"
ALL = (AVAILABLE, CLAIMED, ALLOCATED, REMOVED, DELETED, UNAVAILABLE,
UNCLAIMABLE)
def __init__(self):
super(PciDeviceStatus, self).__init__(
valid_values=PciDeviceStatus.ALL)
class PciDeviceType(Enum):
# NOTE(jaypipes): It's silly that the word "type-" is in these constants,
# but alas, these were the original constant strings used...
STANDARD = "type-PCI"
SRIOV_PF = "type-PF"
SRIOV_VF = "type-VF"
ALL = (STANDARD, SRIOV_PF, SRIOV_VF)
def __init__(self):
super(PciDeviceType, self).__init__(
valid_values=PciDeviceType.ALL)
class DiskFormat(Enum):
RBD = "rbd"
LVM = "lvm"
QCOW2 = "qcow2"
RAW = "raw"
PLOOP = "ploop"
VHD = "vhd"
VMDK = "vmdk"
VDI = "vdi"
ISO = "iso"
ALL = (RBD, LVM, QCOW2, RAW, PLOOP, VHD, VMDK, VDI, ISO)
def __init__(self):
super(DiskFormat, self).__init__(
valid_values=DiskFormat.ALL)
class NotificationPriority(Enum):
AUDIT = 'audit'
CRITICAL = 'critical'
DEBUG = 'debug'
INFO = 'info'
ERROR = 'error'
SAMPLE = 'sample'
WARN = 'warn'
ALL = (AUDIT, CRITICAL, DEBUG, INFO, ERROR, SAMPLE, WARN)
def __init__(self):
super(NotificationPriority, self).__init__(
valid_values=NotificationPriority.ALL)
class NotificationPhase(Enum):
START = 'start'
END = 'end'
ERROR = 'error'
ALL = (START, END, ERROR)
def __init__(self):
super(NotificationPhase, self).__init__(
valid_values=NotificationPhase.ALL)
class NotificationAction(Enum):
UPDATE = 'update'
ALL = (UPDATE,)
def __init__(self):
super(NotificationAction, self).__init__(
valid_values=NotificationAction.ALL)
class IPV4AndV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4 and result.version != 6:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
class NetworkModel(FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, network_model.NetworkInfo):
return value
elif isinstance(value, six.string_types):
# Hmm, do we need this?
return network_model.NetworkInfo.hydrate(value)
else:
raise ValueError(_('A NetworkModel is required in field %s') %
attr)
@staticmethod
def to_primitive(obj, attr, value):
return value.json()
@staticmethod
def from_primitive(obj, attr, value):
return network_model.NetworkInfo.hydrate(value)
def stringify(self, value):
return 'NetworkModel(%s)' % (
','.join([str(vif['id']) for vif in value]))
class NonNegativeFloat(FieldType):
@staticmethod
def coerce(obj, attr, value):
v = float(value)
if v < 0:
raise ValueError(_('Value must be >= 0 for field %s') % attr)
return v
class NonNegativeInteger(FieldType):
@staticmethod
def coerce(obj, attr, value):
v = int(value)
if v < 0:
raise ValueError(_('Value must be >= 0 for field %s') % attr)
return v
class ArchitectureField(BaseEnumField):
AUTO_TYPE = Architecture()
class BlockDeviceDestinationTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceDestinationType()
class BlockDeviceSourceTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceSourceType()
class BlockDeviceTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceType()
class ConfigDrivePolicyField(BaseEnumField):
AUTO_TYPE = ConfigDrivePolicy()
class CPUAllocationPolicyField(BaseEnumField):
AUTO_TYPE = CPUAllocationPolicy()
class CPUThreadAllocationPolicyField(BaseEnumField):
AUTO_TYPE = CPUThreadAllocationPolicy()
class CPUModeField(BaseEnumField):
AUTO_TYPE = CPUMode()
class CPUMatchField(BaseEnumField):
AUTO_TYPE = CPUMatch()
class CPUFeaturePolicyField(BaseEnumField):
AUTO_TYPE = CPUFeaturePolicy()
class DiskBusField(BaseEnumField):
AUTO_TYPE = DiskBus()
class FirmwareTypeField(BaseEnumField):
AUTO_TYPE = FirmwareType()
class HVTypeField(BaseEnumField):
AUTO_TYPE = HVType()
class ImageSignatureHashTypeField(BaseEnumField):
AUTO_TYPE = ImageSignatureHashType()
class ImageSignatureKeyTypeField(BaseEnumField):
AUTO_TYPE = ImageSignatureKeyType()
class OSTypeField(BaseEnumField):
AUTO_TYPE = OSType()
class ResourceClassField(BaseEnumField):
AUTO_TYPE = ResourceClass()
def index(self, value):
"""Return an index into the Enum given a value."""
return self._type.index(value)
def from_index(self, index):
"""Return the Enum value at a given index."""
return self._type.from_index(index)
class RNGModelField(BaseEnumField):
AUTO_TYPE = RNGModel()
class SCSIModelField(BaseEnumField):
AUTO_TYPE = SCSIModel()
class VideoModelField(BaseEnumField):
AUTO_TYPE = VideoModel()
class VIFModelField(BaseEnumField):
AUTO_TYPE = VIFModel()
class VMModeField(BaseEnumField):
AUTO_TYPE = VMMode()
class WatchdogActionField(BaseEnumField):
AUTO_TYPE = WatchdogAction()
class MonitorMetricTypeField(BaseEnumField):
AUTO_TYPE = MonitorMetricType()
class PciDeviceStatusField(BaseEnumField):
AUTO_TYPE = PciDeviceStatus()
class PciDeviceTypeField(BaseEnumField):
AUTO_TYPE = PciDeviceType()
class DiskFormatField(BaseEnumField):
AUTO_TYPE = DiskFormat()
class NotificationPriorityField(BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NotificationActionField(BaseEnumField):
AUTO_TYPE = NotificationAction()
class IPV4AndV6AddressField(AutoTypedField):
AUTO_TYPE = IPV4AndV6Address()
class ListOfIntegersField(AutoTypedField):
AUTO_TYPE = List(fields.Integer())
class NonNegativeFloatField(AutoTypedField):
AUTO_TYPE = NonNegativeFloat()
class NonNegativeIntegerField(AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
| 27.16962 | 79 | 0.673779 |
1f743c02283dc30da9d8d27f91bfea97189e3052 | 306 | py | Python | setup.py | sjwo/asv_sim | fd5383e1e8325d342f14fe0f1ba97d6b2e28d220 | [
"BSD-2-Clause"
] | null | null | null | setup.py | sjwo/asv_sim | fd5383e1e8325d342f14fe0f1ba97d6b2e28d220 | [
"BSD-2-Clause"
] | 2 | 2018-05-23T13:35:17.000Z | 2018-05-23T14:32:47.000Z | setup.py | sjwo/asv_sim | fd5383e1e8325d342f14fe0f1ba97d6b2e28d220 | [
"BSD-2-Clause"
] | 5 | 2018-04-05T19:59:50.000Z | 2021-07-07T18:11:58.000Z |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['asv_sim'],
package_dir={'': 'src'})
setup(**setup_args)
| 23.538462 | 61 | 0.761438 |
17b1b68a63f762b26923d9170b4529614760ea67 | 2,393 | py | Python | test/test_app.py | GermanG/puppetboard | b6fb68a3c1d7b056da6485659a998caaa92d1a29 | [
"Apache-2.0"
] | null | null | null | test/test_app.py | GermanG/puppetboard | b6fb68a3c1d7b056da6485659a998caaa92d1a29 | [
"Apache-2.0"
] | null | null | null | test/test_app.py | GermanG/puppetboard | b6fb68a3c1d7b056da6485659a998caaa92d1a29 | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
from puppetboard import app
from . import MockDbQuery
def test_first_test():
assert app is not None
def test_no_env(client, mock_puppetdb_environments):
rv = client.get('/nonexistent/')
assert rv.status_code == 404
def test_offline_mode(client, mocker,
mock_puppetdb_environments,
mock_puppetdb_default_nodes):
app.app.config['OFFLINE_MODE'] = True
query_data = {
'nodes': [[{'count': 10}]],
'resources': [[{'count': 40}]],
}
dbquery = MockDbQuery(query_data)
mocker.patch.object(app.puppetdb, '_query', side_effect=dbquery.get)
rv = client.get('/')
soup = BeautifulSoup(rv.data, 'html.parser')
assert soup.title.contents[0] == 'Puppetboard'
for link in soup.find_all('link'):
assert "//" not in link['href']
if 'offline' in link['href']:
rv = client.get(link['href'])
assert rv.status_code == 200
for script in soup.find_all('script'):
if "src" in script.attrs:
assert "//" not in script['src']
assert rv.status_code == 200
def test_offline_static(client):
rv = client.get('/offline/css/google_fonts.css')
assert 'Content-Type' in rv.headers
assert 'text/css' in rv.headers['Content-Type']
assert rv.status_code == 200
rv = client.get('/offline/Semantic-UI-2.1.8/semantic.min.css')
assert 'Content-Type' in rv.headers
assert 'text/css' in rv.headers['Content-Type']
assert rv.status_code == 200
def test_health_status(client):
rv = client.get('/status')
assert rv.status_code == 200
assert rv.data.decode('utf-8') == 'OK'
def test_custom_title(client, mocker,
mock_puppetdb_environments,
mock_puppetdb_default_nodes):
default_title = app.app.config['PAGE_TITLE']
custom_title = 'Dev - Puppetboard'
app.app.config['PAGE_TITLE'] = custom_title
query_data = {
'nodes': [[{'count': 10}]],
'resources': [[{'count': 40}]],
}
dbquery = MockDbQuery(query_data)
mocker.patch.object(app.puppetdb, '_query', side_effect=dbquery.get)
rv = client.get('/')
soup = BeautifulSoup(rv.data, 'html.parser')
assert soup.title.contents[0] == custom_title
# restore the global state
app.app.config['PAGE_TITLE'] = default_title
| 26.88764 | 72 | 0.630589 |
2799095c7e83debaf6c6d5e5823b5ad7491d173a | 1,431 | py | Python | tests/utils.py | anshul217/django-rest-framework-mongoengine | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | [
"MIT"
] | 594 | 2015-01-02T19:51:28.000Z | 2022-03-27T05:16:19.000Z | tests/utils.py | anshul217/django-rest-framework-mongoengine | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | [
"MIT"
] | 224 | 2015-01-05T14:06:20.000Z | 2022-03-27T07:58:06.000Z | tests/utils.py | anshul217/django-rest-framework-mongoengine | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | [
"MIT"
] | 181 | 2015-01-06T07:47:22.000Z | 2022-02-17T11:59:43.000Z | import pytest
from rest_framework.exceptions import ValidationError
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldTest():
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(ValidationError) as exc_info:
self.field.run_validation(input_value)
assert expected_failure in exc_info.value.detail[0]
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
| 34.902439 | 80 | 0.69392 |
605f633553961d1ce2a9563fd5908327a4211782 | 8,242 | py | Python | sdgym/synthesizers/tablegan.py | patricebechard/SDGym | 827272b877661d65cc91dc055799c53682834c41 | [
"MIT"
] | null | null | null | sdgym/synthesizers/tablegan.py | patricebechard/SDGym | 827272b877661d65cc91dc055799c53682834c41 | [
"MIT"
] | null | null | null | sdgym/synthesizers/tablegan.py | patricebechard/SDGym | 827272b877661d65cc91dc055799c53682834c41 | [
"MIT"
] | 1 | 2020-09-22T14:54:48.000Z | 2020-09-22T14:54:48.000Z | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import torch
from torch.nn import (
BatchNorm2d, Conv2d, ConvTranspose2d, LeakyReLU, Module, ReLU, Sequential, Sigmoid, Tanh, init)
from torch.nn.functional import binary_cross_entropy_with_logits
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from sdgym.constants import CATEGORICAL
from sdgym.synthesizers.base import BaseSynthesizer
from sdgym.synthesizers.utils import TableganTransformer
class Discriminator(Module):
def __init__(self, meta, side, layers):
super(Discriminator, self).__init__()
self.meta = meta
self.side = side
self.seq = Sequential(*layers)
# self.layers = layers
def forward(self, input):
return self.seq(input)
class Generator(Module):
def __init__(self, meta, side, layers):
super(Generator, self).__init__()
self.meta = meta
self.side = side
self.seq = Sequential(*layers)
# self.layers = layers
def forward(self, input_):
return self.seq(input_)
class Classifier(Module):
def __init__(self, meta, side, layers, device):
super(Classifier, self).__init__()
self.meta = meta
self.side = side
self.seq = Sequential(*layers)
self.valid = True
if meta[-1]['name'] != 'label' or meta[-1]['type'] != CATEGORICAL or meta[-1]['size'] != 2:
self.valid = False
masking = np.ones((1, 1, side, side), dtype='float32')
index = len(self.meta) - 1
self.r = index // side
self.c = index % side
masking[0, 0, self.r, self.c] = 0
self.masking = torch.from_numpy(masking).to(device)
def forward(self, input):
label = (input[:, :, self.r, self.c].view(-1) + 1) / 2
input = input * self.masking.expand(input.size())
return self.seq(input).view(-1), label
def determine_layers(side, random_dim, num_channels):
assert side >= 4 and side <= 32
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layers_D = []
for prev, curr in zip(layer_dims, layer_dims[1:]):
layers_D += [
Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
BatchNorm2d(curr[0]),
LeakyReLU(0.2, inplace=True)
]
layers_D += [
Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0),
Sigmoid()
]
layers_G = [
ConvTranspose2d(
random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False)
]
for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])):
layers_G += [
BatchNorm2d(prev[0]),
ReLU(True),
ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)
]
layers_G += [Tanh()]
layers_C = []
for prev, curr in zip(layer_dims, layer_dims[1:]):
layers_C += [
Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
BatchNorm2d(curr[0]),
LeakyReLU(0.2, inplace=True)
]
layers_C += [Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0)]
return layers_D, layers_G, layers_C
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0)
class TableganSynthesizer(BaseSynthesizer):
"""docstring for TableganSynthesizer??"""
def __init__(self,
random_dim=100,
num_channels=64,
l2scale=1e-5,
batch_size=500,
epochs=300):
self.random_dim = random_dim
self.num_channels = num_channels
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def fit(self, data, categorical_columns=tuple(), ordinal_columns=tuple()):
sides = [4, 8, 16, 24, 32]
for i in sides:
if i * i >= data.shape[1]:
self.side = i
break
self.transformer = TableganTransformer(self.side)
self.transformer.fit(data, categorical_columns, ordinal_columns)
data = self.transformer.transform(data)
data = torch.from_numpy(data.astype('float32')).to(self.device)
dataset = TensorDataset(data)
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)
layers_D, layers_G, layers_C = determine_layers(
self.side, self.random_dim, self.num_channels)
self.generator = Generator(self.transformer.meta, self.side, layers_G).to(self.device)
discriminator = Discriminator(self.transformer.meta, self.side, layers_D).to(self.device)
classifier = Classifier(
self.transformer.meta, self.side, layers_C, self.device).to(self.device)
optimizer_params = dict(lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale)
optimizerG = Adam(self.generator.parameters(), **optimizer_params)
optimizerD = Adam(discriminator.parameters(), **optimizer_params)
optimizerC = Adam(classifier.parameters(), **optimizer_params)
self.generator.apply(weights_init)
discriminator.apply(weights_init)
classifier.apply(weights_init)
for i in range(self.epochs):
for id_, data in enumerate(loader):
real = data[0].to(self.device)
noise = torch.randn(self.batch_size, self.random_dim, 1, 1, device=self.device)
fake = self.generator(noise)
optimizerD.zero_grad()
y_real = discriminator(real)
y_fake = discriminator(fake)
loss_d = (
-(torch.log(y_real + 1e-4).mean()) - (torch.log(1. - y_fake + 1e-4).mean()))
loss_d.backward()
optimizerD.step()
noise = torch.randn(self.batch_size, self.random_dim, 1, 1, device=self.device)
fake = self.generator(noise)
optimizerG.zero_grad()
y_fake = discriminator(fake)
loss_g = -(torch.log(y_fake + 1e-4).mean())
loss_g.backward(retain_graph=True)
loss_mean = torch.norm(torch.mean(fake, dim=0) - torch.mean(real, dim=0), 1)
loss_std = torch.norm(torch.std(fake, dim=0) - torch.std(real, dim=0), 1)
loss_info = loss_mean + loss_std
loss_info.backward()
optimizerG.step()
noise = torch.randn(self.batch_size, self.random_dim, 1, 1, device=self.device)
fake = self.generator(noise)
if classifier.valid:
real_pre, real_label = classifier(real)
fake_pre, fake_label = classifier(fake)
loss_cc = binary_cross_entropy_with_logits(real_pre, real_label)
loss_cg = binary_cross_entropy_with_logits(fake_pre, fake_label)
optimizerG.zero_grad()
loss_cg.backward()
optimizerG.step()
optimizerC.zero_grad()
loss_cc.backward()
optimizerC.step()
loss_c = (loss_cc, loss_cg)
else:
loss_c = None
if((id_ + 1) % 50 == 0):
print("epoch", i + 1, "step", id_ + 1, loss_d, loss_g, loss_c)
def sample(self, n):
self.generator.eval()
steps = n // self.batch_size + 1
data = []
for i in range(steps):
noise = torch.randn(self.batch_size, self.random_dim, 1, 1, device=self.device)
fake = self.generator(noise)
data.append(fake.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
return self.transformer.inverse_transform(data[:n])
| 35.834783 | 99 | 0.581412 |
95f44926652a51cb3fc2a5554d8079384eaaf951 | 1,192 | py | Python | configs/detection/tfa/voc/split2/tfa_r101_fpn_voc-split2_5shot-fine-tuning.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 376 | 2021-11-23T13:29:57.000Z | 2022-03-30T07:22:14.000Z | configs/detection/tfa/voc/split2/tfa_r101_fpn_voc-split2_5shot-fine-tuning.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 51 | 2021-11-23T14:45:08.000Z | 2022-03-30T03:37:15.000Z | configs/detection/tfa/voc/split2/tfa_r101_fpn_voc-split2_5shot-fine-tuning.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 56 | 2021-11-23T14:02:27.000Z | 2022-03-31T09:01:50.000Z | _base_ = [
'../../../_base_/datasets/fine_tune_based/few_shot_voc.py',
'../../../_base_/schedules/schedule.py', '../../tfa_r101_fpn.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
# FewShotVOCDefaultDataset predefine ann_cfg for model reproducibility.
data = dict(
train=dict(
type='FewShotVOCDefaultDataset',
ann_cfg=[dict(method='TFA', setting='SPLIT2_5SHOT')],
num_novel_shots=5,
num_base_shots=5,
classes='ALL_CLASSES_SPLIT2'),
val=dict(classes='ALL_CLASSES_SPLIT2'),
test=dict(classes='ALL_CLASSES_SPLIT2'))
evaluation = dict(
interval=20000,
class_splits=['BASE_CLASSES_SPLIT2', 'NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=20000)
optimizer = dict(lr=0.005)
lr_config = dict(
warmup_iters=10, step=[
18000,
])
runner = dict(max_iters=20000)
# base model needs to be initialized with following script:
# tools/detection/misc/initialize_bbox_head.py
# please refer to configs/detection/tfa/README.md for more details.
load_from = ('work_dirs/tfa_r101_fpn_voc-split2_base-training/'
'base_model_random_init_bbox_head.pth')
| 37.25 | 71 | 0.708893 |
36a0345af3ae1e7cbde7c542ecdd9729984003cf | 6,221 | py | Python | 1-9/9. float_range/test_float_range.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | 1 | 2021-11-30T05:03:24.000Z | 2021-11-30T05:03:24.000Z | 1-9/9. float_range/test_float_range.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | null | null | null | 1-9/9. float_range/test_float_range.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | 2 | 2021-04-18T05:26:43.000Z | 2021-11-28T18:46:43.000Z | import sys
import unittest
from collections.abc import Generator
from timeit import default_timer
from float_range import float_range
class FloatRangeTests(unittest.TestCase):
"""Tests for float_range."""
def test_has_iterability(self):
self.assertEqual(list(float_range(1, 11, 2)), [1, 3, 5, 7, 9])
self.assertEqual(
list(float_range(0.5, 7, 0.75)),
[0.5, 1.25, 2.0, 2.75, 3.5, 4.25, 5.0, 5.75, 6.5]
)
def test_optional_step(self):
self.assertEqual(list(float_range(1, 6, 1)), [1, 2, 3, 4, 5])
self.assertEqual(list(float_range(1, 6)), [1, 2, 3, 4, 5])
self.assertEqual(
list(float_range(0.5, 6)),
[0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
)
def test_optional_start(self):
self.assertEqual(list(float_range(0, 6)), [0, 1, 2, 3, 4, 5])
self.assertEqual(list(float_range(6)), [0, 1, 2, 3, 4, 5])
self.assertEqual(
list(float_range(4.2)),
[0, 1, 2, 3, 4]
)
def test_fractional_step_size(self):
self.assertEqual(
list(float_range(1, 6, 0.5)),
[1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]
)
self.assertEqual(
list(float_range(1, 5.6, 0.5)),
[1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]
)
def test_negative_step(self):
with self.assertRaises(StopIteration):
# Should be empty so StopIteration should be raised
next(iter(float_range(1, 6, -1)))
self.assertEqual(list(float_range(5, 0, -1)), [5, 4, 3, 2, 1])
self.assertEqual(
list(float_range(0.5, 6)),
[0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
)
self.assertEqual(
list(float_range(6, 1, -0.5)),
[6, 5.5, 5.0, 4.5, 4.0, 3.5, 3.0, 2.5, 2.0, 1.5]
)
def test_no_arguments(self):
with self.assertRaises(TypeError):
float_range()
def test_too_many_arguments(self):
with self.assertRaises(TypeError):
float_range(0, 5, 1, 1)
with self.assertRaises(TypeError):
float_range(0, 5, 1, 1, 1)
def test_no_memory_used(self):
"""Make sure float_range response isn't a giant list of numbers."""
response = float_range(0, 1024, 2**-4)
if isinstance(response, Generator):
next(response)
size = sum(
sys.getsizeof(obj)
for obj in response.gi_frame.f_locals.values()
)
else:
size = sys.getsizeof(response)
self.assertLess(size, 8000, 'Too much memory used')
self.assertNotEqual(type(response), list)
self.assertNotEqual(type(response), tuple)
# @unittest.expectedFailure
def test_has_length(self):
with Timer() as small:
self.assertEqual(len(float_range(10)), 10)
with Timer() as big:
self.assertEqual(len(float_range(10000)), 10000)
self.assertLess(
big.elapsed,
small.elapsed*2,
"Timing shouldn't grow with size",
)
self.assertEqual(len(float_range(100)), 100)
self.assertEqual(len(float_range(1, 100)), 99)
self.assertEqual(len(float_range(1, 11, 2)), 5)
self.assertEqual(len(float_range(0.5, 7, 0.75)), 9)
self.assertEqual(len(float_range(1000000)), 1000000)
self.assertEqual(len(float_range(11, 1.2, -2)), 5)
self.assertEqual(len(float_range(11, 1.2, 2)), 0)
r = float_range(1, 6, 0.5)
self.assertEqual(
list(r),
[1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]
)
self.assertEqual(
list(r),
[1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]
)
# @unittest.expectedFailure
def test_reversed(self):
with Timer() as small:
self.assertEqual(len(float_range(10)), 10)
with Timer() as big:
self.assertEqual(len(float_range(10000)), 10000)
self.assertLess(
big.elapsed,
small.elapsed*2,
"Timing shouldn't grow with size",
)
r = reversed(float_range(0.5, 7, 0.75))
self.assertEqual(
list(r),
[6.5, 5.75, 5.0, 4.25, 3.5, 2.75, 2.0, 1.25, 0.5]
)
big_num = 1000000
self.assertEqual(next(reversed(float_range(big_num))), big_num-1)
# @unittest.expectedFailure
def test_equality(self):
with Timer() as small:
self.assertEqual(float_range(0, 9.5, 1), float_range(0, 10, 1))
with Timer() as big:
self.assertEqual(
float_range(0, 10000.5, 1),
float_range(0, 10000.2, 1),
)
self.assertLess(
big.elapsed,
small.elapsed*2,
"Timing shouldn't grow with size",
)
self.assertEqual(float_range(0, 5, 0.5), float_range(0, 5, 0.5))
self.assertEqual(float_range(5, 5), float_range(10, 10))
self.assertEqual(float_range(5, 11, 5), float_range(5, 12, 5))
self.assertEqual(float_range(10), float_range(0, 10))
self.assertNotEqual(
float_range(0, 2**10, 2**-10),
float_range(0, 2**10+1, 2**-10),
)
self.assertEqual(float_range(1000000), range(1000000))
self.assertEqual(range(1000000), float_range(1000000))
self.assertFalse(float_range(0, 5, 0.5) != float_range(0, 5, 0.5))
class EqualToEverything:
def __eq__(self, other):
return True
self.assertEqual(float_range(1000000), EqualToEverything())
self.assertEqual(float_range(0, 5, 3), float_range(0, 4, 3))
self.assertEqual(float_range(0, 0.3, 0.5), float_range(0, 0.4, 1.5))
self.assertNotEqual(float_range(0, 11, 0.5), float_range(0, 11, 1.5))
class Timer:
"""Context manager to time a code block."""
def __enter__(self):
self.start = default_timer()
return self
def __exit__(self, *args):
self.end = default_timer()
self.elapsed = self.end - self.start
if __name__ == "__main__":
unittest.main(verbosity=2)
| 34.561111 | 77 | 0.551519 |
a2910a53900e4c20c9ab1ed85bef5ac8ec5a1425 | 316 | py | Python | fbauth/__init__.py | hellhound/django-fbauth-templatetag | 0b8ab91f2421f61b92c33cf2dcd9c829d65590ae | [
"BSD-3-Clause"
] | null | null | null | fbauth/__init__.py | hellhound/django-fbauth-templatetag | 0b8ab91f2421f61b92c33cf2dcd9c829d65590ae | [
"BSD-3-Clause"
] | 2 | 2020-02-12T00:11:50.000Z | 2020-06-05T17:52:45.000Z | fbauth/__init__.py | hellhound/django-fbauth-templatetag | 0b8ab91f2421f61b92c33cf2dcd9c829d65590ae | [
"BSD-3-Clause"
] | 2 | 2015-08-11T16:58:59.000Z | 2021-01-04T08:18:37.000Z | # -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
__all__ = ['VERSION']
try:
import pkg_resources
VERSION = pkg_resources.get_distribution(
'django-fbauth-templatetag').version
except Exception:
VERSION = 'unknown'
default_app_config = 'fbauth.apps.FBAuthConfig'
| 22.571429 | 56 | 0.734177 |
d1e781cc90238c916db5bbb1e5010059de9ad780 | 10,725 | py | Python | qa/rpc-tests/listtransactions.py | mirzaei-ce/core-hellebit | bd56d29dffb10313b587785151126e2a365c17ed | [
"MIT"
] | null | null | null | qa/rpc-tests/listtransactions.py | mirzaei-ce/core-hellebit | bd56d29dffb10313b587785151126e2a365c17ed | [
"MIT"
] | null | null | null | qa/rpc-tests/listtransactions.py | mirzaei-ce/core-hellebit | bd56d29dffb10313b587785151126e2a365c17ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import HellebitTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction
import cStringIO
import binascii
def txFromHex(hexstring):
tx = CTransaction()
f = cStringIO.StringIO(binascii.unhexlify(hexstring))
tx.deserialize(f)
return tx
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(HellebitTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = binascii.hexlify(tx3_modified.serialize()).decode('utf-8')
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= 0.004*100000000 # bump the fee
tx3_b = binascii.hexlify(tx3_b.serialize()).decode('utf-8')
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 49.197248 | 112 | 0.597762 |
d88e91b462c18511fcb9705abf3bb11457c02caa | 601 | py | Python | src/main.py | h3nnn4n/odeen | 3a7fe0b717bbdc9d04a9e78f51ec09846c381b6d | [
"MIT"
] | null | null | null | src/main.py | h3nnn4n/odeen | 3a7fe0b717bbdc9d04a9e78f51ec09846c381b6d | [
"MIT"
] | null | null | null | src/main.py | h3nnn4n/odeen | 3a7fe0b717bbdc9d04a9e78f51ec09846c381b6d | [
"MIT"
] | null | null | null | from de import DE
from problem_data import ProblemData
from config import Config
def test(self):
print('%6.2f %6.2f' % (
self.score,
self.config.population.best_score)
)
if __name__ == "__main__":
config = Config()
config.size = 100
config.dimensions = 5
config.set_function_evaluations_budget(10000)
problem_data = ProblemData(
pname='Rosenbrock',
n_dimensions=config.dimensions
)
config.problem = problem_data
de = DE(config=config)
de.set_before_eval_callback(test)
de.run()
print(de.population.best_score)
| 20.033333 | 49 | 0.667221 |
71c7397a9aa9b39fdf9e024d5ca5dfdc737b974f | 1,820 | py | Python | 0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | d1b97fb851463694ea844b3b81402c3ea747863b | [
"MIT"
] | 1 | 2021-11-19T21:37:01.000Z | 2021-11-19T21:37:01.000Z | 0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | d1b97fb851463694ea844b3b81402c3ea747863b | [
"MIT"
] | null | null | null | 0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | d1b97fb851463694ea844b3b81402c3ea747863b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[9]:
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup
import datetime
import io
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[10]:
url1 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2020/11/Eoh_PnoA_0811.xlsx"
df1 = pd.read_excel(url1)
df1[:2] = df1[:2].ffill(1)
df1.columns = "Personal No Asalariado - " + df1.iloc[1] + " - " + df1.iloc[2]
df1 = df1.drop(df1.columns[[1]], axis = 1)
df1 = df1.drop(index=1)
df1 = df1.drop(index=0)
df1 = df1.drop(index=2)
df1 = df1.dropna(subset = [df1.columns[3]])
#df1 = df1.iloc[2: , 3:-2]
#df1 = df1[~df1.iloc[:, 0].astype(str).str.isdigit()]
df1 = df1[df1.columns.dropna()]
df1.index = pd.date_range(start='1/1/2008', periods=len(df1), freq = "QS")
df1.index.name = "Date"
#df1 = df1[df1.columns.drop(list(df1.filter(regex='Participación')))]
df1
# In[11]:
url2 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2018/05/Eoh_PA_0811.xlsx"
df2 = pd.read_excel(url2)
df2[:2] = df2[:2].ffill(1)
df2.columns = "Personal Asalariado - " + df2.iloc[1] + " - " + df2.iloc[2]
df2 = df2.drop(df2.columns[[1]], axis = 1)
df2 = df2.drop(index=1)
df2 = df2.drop(index=0)
df2 = df2.drop(index=2)
df2 = df2.dropna(subset = [df2.columns[3]])
#df2 = df2.iloc[2: , 3:-2]
#df2 = df2[~df2.iloc[:, 0].astype(str).str.isdigit()]
df2 = df2[df2.columns.dropna()]
df2.index = pd.date_range(start='1/1/2008', periods=len(df2), freq = "QS")
df2.index.name = "Date"
df3 = df1.merge(df2, right_index=True, left_index=True)
alphacast.datasets.dataset(7432).upload_data_from_df(df3,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
| 27.575758 | 95 | 0.686813 |
1a39ee4b48491a7784db9b304b583e188e934161 | 6,927 | py | Python | src/datadog_api_client/v2/model/logs_list_request_page.py | rchenzheng/datadog-api-client-python | 2e86ac098c6f0c7fdd90ed218224587c0f8eafef | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v2/model/logs_list_request_page.py | rchenzheng/datadog-api-client-python | 2e86ac098c6f0c7fdd90ed218224587c0f8eafef | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v2/model/logs_list_request_page.py | rchenzheng/datadog-api-client-python | 2e86ac098c6f0c7fdd90ed218224587c0f8eafef | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class LogsListRequestPage(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {
("limit",): {
"inclusive_maximum": 1000,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"cursor": (str,), # noqa: E501
"limit": (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"cursor": "cursor", # noqa: E501
"limit": "limit", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LogsListRequestPage - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
cursor (str): List following results with a cursor provided in the previous query.. [optional] # noqa: E501
limit (int): Maximum number of logs in the response.. [optional] if omitted the server will use the default value of 10 # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.508772 | 145 | 0.582503 |
93603490c0301a2984a79b3845ee7df0864b79bf | 4,122 | py | Python | tests/Traffic/t_intersection/ppo_continuous.py | maxiaoba/rlk | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | 1 | 2021-09-28T21:16:54.000Z | 2021-09-28T21:16:54.000Z | tests/Traffic/t_intersection/ppo_continuous.py | maxiaoba/rlkit | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | null | null | null | tests/Traffic/t_intersection/ppo_continuous.py | maxiaoba/rlkit | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | null | null | null | import gym
from torch import nn as nn
from rlkit.torch.vpg.ppo import PPOTrainer
from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.networks import Mlp
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.torch_rl_algorithm import TorchOnlineRLAlgorithm
def experiment(variant):
import sys
from traffic.make_env import make_env
expl_env = make_env(args.exp_name)
eval_env = make_env(args.exp_name)
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
**variant['policy_kwargs'],
)
vf = Mlp(
hidden_sizes=[32, 32],
input_size=obs_dim,
output_size=1,
)
vf_criterion = nn.MSELoss()
eval_policy = MakeDeterministic(policy)
expl_policy = policy
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
expl_policy,
)
trainer = PPOTrainer(
policy=policy,
value_function=vf,
vf_criterion=vf_criterion,
**variant['trainer_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='t_intersection_cont')
parser.add_argument('--log_dir', type=str, default='PPO')
parser.add_argument('--lr', type=float, default=None)
parser.add_argument('--bs', type=int, default=None)
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--snapshot_mode', type=str, default="gap_and_last")
parser.add_argument('--snapshot_gap', type=int, default=500)
args = parser.parse_args()
import os.path as osp
pre_dir = './Data/'+args.exp_name
main_dir = args.log_dir\
+(('lr'+str(args.lr)) if args.lr else '')\
+(('bs'+str(args.bs)) if args.bs else '')
log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))
# noinspection PyTypeChecker
variant = dict(
algorithm_kwargs=dict(
num_epochs=(args.epoch if args.epoch else 1000),
num_eval_steps_per_epoch=500,
num_train_loops_per_epoch=1,
num_trains_per_train_loop=1,
num_expl_steps_per_train_loop=(args.bs if args.bs else 500),
max_path_length=100,
save_best=True,
),
trainer_kwargs=dict(
discount=0.99,
max_path_length=100,
policy_lr=(args.lr if args.lr else 1e-4),
vf_lr=(args.lr if args.lr else 1e-3),
),
policy_kwargs=dict(
hidden_sizes=[32,32],
),
)
import os
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
with open(osp.join(log_dir,'variant.json'),'w') as out_json:
import json
json.dump(variant,out_json,indent=2)
import sys
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
setup_logger(args.exp_name+'/'+main_dir, variant=variant,
snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,
log_dir=log_dir)
import numpy as np
import torch
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
| 34.638655 | 91 | 0.660116 |
b79672e8b0d30ae04431efd48d6c07629595f9a8 | 458 | py | Python | jobsapp/migrations/0017_auto_20210622_2334.py | Sukriti-sood/Recruit-pursuit | 00c75d9101b94d8066c2648af3dd7a646d869df7 | [
"MIT"
] | null | null | null | jobsapp/migrations/0017_auto_20210622_2334.py | Sukriti-sood/Recruit-pursuit | 00c75d9101b94d8066c2648af3dd7a646d869df7 | [
"MIT"
] | 3 | 2021-10-07T14:21:52.000Z | 2021-10-19T16:44:09.000Z | jobsapp/migrations/0017_auto_20210622_2334.py | Sukriti-sood/Recruit-pursuit | 00c75d9101b94d8066c2648af3dd7a646d869df7 | [
"MIT"
] | 1 | 2021-10-12T08:49:30.000Z | 2021-10-12T08:49:30.000Z | # Generated by Django 3.1.2 on 2021-06-22 23:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobsapp', '0016_auto_20210622_2325'),
]
operations = [
migrations.RenameModel(
old_name='skillset',
new_name='skillset_table',
),
migrations.AlterModelTable(
name='skillset_table',
table='skillset_table',
),
]
| 20.818182 | 47 | 0.585153 |
89e040e98ef0c255b70f07cd75066534f9c2be52 | 417 | py | Python | myvenv/Scripts/pip-script.py | IngMachine/compiladores | d8cd2cde29af09188037e7627fc63403a322f5c7 | [
"Apache-2.0"
] | null | null | null | myvenv/Scripts/pip-script.py | IngMachine/compiladores | d8cd2cde29af09188037e7627fc63403a322f5c7 | [
"Apache-2.0"
] | null | null | null | myvenv/Scripts/pip-script.py | IngMachine/compiladores | d8cd2cde29af09188037e7627fc63403a322f5c7 | [
"Apache-2.0"
] | null | null | null | #!C:\Users\Lenovo\PycharmProjects\CompiladorWeb\myvenv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip')()
)
| 32.076923 | 73 | 0.673861 |
3744089e6937ae1df575e71530c9908949c35829 | 21,211 | py | Python | tests/milvus_doc_test/markdown-link-extractor.py | ReigenAraka/milvus | b2f19ace0e1dcd431a512141f42b748581d4b92d | [
"Apache-2.0"
] | 1 | 2020-04-26T08:16:43.000Z | 2020-04-26T08:16:43.000Z | tests/milvus_doc_test/markdown-link-extractor.py | ReigenAraka/milvus | b2f19ace0e1dcd431a512141f42b748581d4b92d | [
"Apache-2.0"
] | 1 | 2019-11-22T07:07:47.000Z | 2019-11-22T07:07:47.000Z | tests/milvus_doc_test/markdown-link-extractor.py | ReigenAraka/milvus | b2f19ace0e1dcd431a512141f42b748581d4b92d | [
"Apache-2.0"
] | 1 | 2021-04-30T18:54:18.000Z | 2021-04-30T18:54:18.000Z | # -*- coding: utf-8 -*-
# Using Python 3.x
import urllib.request
import urllib.error
from pathlib import Path
import requests
import json
from urllib.parse import urlparse
import markdown
import os
from os.path import join, getsize
from bs4 import BeautifulSoup
import re
from sys import platform
import argparse
class LinksFromMarkdown(object):
def __init__(self, repository):
self.dictionary = repository
def extract_links_from_markdown(self, repository):
if platform == "linux" or platform == "linux2":
# linux
link_file = "../link_reports/" + "extracted_links.json"
dirName = "../link_reports"
elif platform == "darwin":
# OS X
link_file = "../link_reports/" + "extracted_links.json"
dirName = "../link_reports"
elif platform == "win32":
# Windows...
link_file = "..\\link_reports\\" + "extracted_links.json"
dirName = "..\\link_reports"
# repository = "D:\\GithubRepo\\docs-master\\docs-master"
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
md_files = []
for root, dirs, files in os.walk(repository):
# print(root, "consumes", end=" ")
# print(sum(getsize(join(root, name)) for name in files), end=" ")
# print("bytes in", len(files), "non-directory files")
if len(files) != 0:
# print(files)
for file in files:
if file.endswith(".md") or file.endswith(".MD") or file.endswith(".mD") or file.endswith(".Md"):
md_files.append(join(root, file))
# elif file.endswith(".png") or file.endswith(".PNG"):
# pics.append((join(root, file)))
# print(md_files)
# print(pics)
a_href_list = []
for md_file in md_files:
with open(md_file, "r", encoding="utf-8") as f:
html = markdown.markdown(f.read())
# print(html)
soup = BeautifulSoup(html, "lxml")
a_hrefs = [(x.get('href')) for x in soup.find_all("a")]
a_href_list.append(a_hrefs)
# print(a_hrefs)
# print(md_file)
# Generates a dictionary that indicates each MD file and links extracted from the MD file
dictionary = dict(zip(md_files, a_href_list))
with open(link_file, "w+", encoding="utf-8") as f:
json.dump(dictionary, f)
return link_file
# print(dictionary)
class CheckExtractedLinksFromMarkdown(object):
def __init__(self, link_file):
self.link_file = link_file
def check_extracted_links(self, link_file):
if platform == "linux" or platform == "linux2":
# linux
report_name = "../link_reports/" + "link_validation_report.html"
elif platform == "darwin":
# OS X
report_name = "../link_reports/" + "link_validation_report.html"
elif platform == "win32":
# Windows...
report_name = "..\\link_reports\\" + "link_validation_report.html"
html_code = """<!DOCTYPE html><html><head><meta charset="UTF-8"><title>Link Validation Detailed Report</title></head><body><h1>Link Validation Detailed Report</h1>"""
with open(link_file, "r", encoding="utf-8") as f:
json_text = f.read()
link_dict = json.loads(json_text)
# If the report file exists, remove the file.
text_file = Path(report_name)
if text_file.is_file():
os.remove(report_name)
with open(report_name, "w+", encoding="utf-8") as f:
f.write(html_code)
# Iterate over all MD files
# key ---> MD file location
# value ---> An array of links in the MD file, including internet links and file links
invalid_counter = 0
for key in link_dict.keys():
head_code = ""
table_code = ""
if link_dict.get(key) == []:
with open(report_name, "a", encoding="utf-8") as f:
f.write("""<h2>Checking links in """ + key)
f.write("""<p style="color:green">This markdown file does not contain any links.</p>""")
else:
head_code = """<table border="1"><tr><th>Link</th><th>Status</th><th>Markdown File</th></tr>"""
with open(report_name, "a", encoding="utf-8") as f:
f.write("""<h2>Checking links in """ + key)
f.write(head_code)
# Iterate over all links in each MD file
for link in link_dict.get(key):
# Check internet links: http,https
try:
assert type(link) is str
except AssertionError as e:
invalid_counter = invalid_counter + 1
a_row_code = """<tr class="fail" bgcolor="#FF0000"><td>Invalid Link Number """ + str(invalid_counter) +"""</td><td>""" + """This link is not string, which indicates that your MD file may not be well-formed.""" + """</td><td>""" + key + """</td></tr>"""
with open(report_name, "a", encoding="utf-8") as f:
f.write(a_row_code)
continue
# MD files that are not well-formed may raise exceptions. If parentheses are not correctly escaped, a NoneType object may be returned
if link.startswith("http://") or link.startswith("https://"):
try:
link_response = requests.get(link, timeout=60)
status_code = link_response.status_code
# Informational responses (100–199),
# Successful responses (200–299),
# Redirects (300–399),
# Client errors (400–499),
# and Server errors (500–599).
if status_code in range(200,299):
# For links that do not contain hashes
if "#" not in link:
row_code = """<tr class="success" bgcolor="#32CD32"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(status_code) + """</td><td>""" + key + """</td></tr>"""
# For links that contain hashes
else:
try:
# Acquire the url after "#"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url=str(
urlparse(link).scheme + "://" + urlparse(link).netloc + urlparse(link).path), headers=headers)
response = urllib.request.urlopen(req,data=None)
html_code = response.read()
soup = BeautifulSoup(html_code.decode("utf-8"), "lxml")
a_hash = soup.find("a", {"id": str(urlparse(link).fragment)})
h1_hash = soup.find("h1", {"id": str(urlparse(link).fragment)})
h2_hash = soup.find("h2", {"id": str(urlparse(link).fragment)})
h3_hash = soup.find("h3", {"id": str(urlparse(link).fragment)})
h4_hash = soup.find("h4", {"id": str(urlparse(link).fragment)})
h5_hash = soup.find("h5", {"id": str(urlparse(link).fragment)})
h6_hash = soup.find("h6", {"id": str(urlparse(link).fragment)})
div_hash = soup.find("div",{"id": str(urlparse(link).fragment)})
if (None, None, None, None, None, None, None, None) != (
a_hash, h1_hash, h2_hash, h3_hash, h4_hash, h5_hash, h6_hash, div_hash):
row_code = """<tr class="success" bgcolor="#32CD32"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
status_code) + """</td><td>""" + key + """</td></tr>"""
else:
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
status_code) + """ The URL looks good but the anchor link does not work or is not using an anchor tag.""" + """</td><td>""" + key + """</td></tr>""" """</td></tr>"""
except urllib.error.HTTPError as http_error:
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
status_code) + """ """ + str(http_error) + """ The URL looks good but the page then returns an HTTP error.</td><td>""" + key + """</td></tr>"""
except urllib.error.URLError as url_error:
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
status_code) + """ """ + str(url_error) + """ The URL looks good but the page then returns a URL error.</td><td>""" + key + """</td></tr>"""
elif status_code in range(400,599):
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
status_code) + """</td><td>""" + key + """</td></tr>"""
except requests.exceptions.Timeout as timeout_error:
print(timeout_error)
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
timeout_error) + """</td><td>""" + key + """</td></tr>"""
except requests.exceptions.ConnectionError as connection_error:
print(connection_error)
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
connection_error) + """</td><td>""" + key + """</td></tr>"""
except requests.exceptions.HTTPError as http_error:
print(http_error)
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + """<a href=\"""" + link + """\">""" + link + """</a>""" + """</td><td>""" + str(
http_error) + """</td><td>""" + key + """</td></tr>"""
# elif link.startswith("mailto:"):
# Check MD file links
# File path formats on Windows systems from https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats
# C:\Documents\Newsletters\Summer2018.pdf An absolute file path from the root of drive C:
# \Program Files\Custom Utilities\StringFinder.exe An absolute path from the root of the current drive.
# 2018\January.xlsx A relative path to a file in a subdirectory of the current directory.
# ..\Publications\TravelBrochure.pdf A relative path to file in a directory that is a peer of the current directory.
# C:\Projects\apilibrary\apilibrary.sln An absolute path to a file from the root of drive C:
# C:Projects\apilibrary\apilibrary.sln A relative path from the current directory of the C: drive.
# We do not use absolute path formats in MD files and path formats are not likely to be from the root of the current drive. So here are possible formats:
# 2018\January.md
# ..\Publications\TravelBrochure.md
# Check if file exists
elif link.endswith(".md") or link.endswith(".MD") or link.endswith(".mD") or link.endswith(".Md"):
# A relative path to file in a directory that is a peer of the current directory.
if link.startswith("..\\"):
# Get the absolute location of the linked md
cur_direct = os.path.dirname(key)
final_direct = os.path.dirname(cur_direct)
linked_md = os.path.join(final_direct,link)
# Check if the linked md exists
if Path(linked_md).is_file():
row_code = """<tr class="success" bgcolor="#32CD32"><td>""" + link + """</td><td>The file link looks good.</td><td>""" + key + """</td></tr>"""
else:
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + link + """</td><td>The file link is broken.</td><td>""" + key + """</td></tr>"""
# A relative path to a file in a subdirectory of the current directory.
else:
# Get the absolute location of the linked md
cur_direct = os.path.dirname(key)
linked_md = os.path.join(cur_direct, link)
# Check if the linked md exists
if Path(linked_md).is_file():
row_code = """<tr class="success" bgcolor="#32CD32"><td>""" + link + """</td><td>The file link looks good.</td><td>""" + key + """</td></tr>"""
else:
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + link + """</td><td>The file link is broken.</td><td>""" + key + """</td></tr>"""
elif link.startswith("#"):
# Validate if anchors correctly show in the MD file
with open(key,"r",encoding="utf-8") as f:
md_text = f.read()
# print(str(md_text))
reg = re.compile(str("#" + "\s*" + link[1:]))
if """<a name=\"""" + link[1:] + """\">""" in str(md_text) or len(re.findall(reg,str(md_text))) == 2:
row_code = """<tr class="success" bgcolor="#32CD32"><td>""" + link + """</td><td>The anchor link looks good.</td><td>""" + key + """</td></tr>"""
else:
row_code = """<tr class="fail" bgcolor="#FF0000"><td>""" + link + """</td><td>The anchor link is broken.</td><td>""" + key + """</td></tr>"""
# Writes row_code for the link to the table
with open(report_name, "a", encoding="utf-8") as f:
f.write(row_code)
# print(row_code)
# Writes the end of the table for the key
with open(report_name, "a", encoding="utf-8") as f:
f.write("</table>")
print("Completed link checking for " + key)
with open(report_name, "a", encoding="utf-8") as f:
f.write("</body></html>")
print("Completed link checking for all markdown files")
return report_name
class GenerateReportSummary(object):
def __init__(self, report_name):
self.report_name = report_name
def generate_report_summary(self, report_name):
if platform == "linux" or platform == "linux2":
# linux
summary_name = "../link_reports/" + "link_validation_summary.html"
elif platform == "darwin":
# OS X
summary_name = "../link_reports/" + "link_validation_summary.html"
elif platform == "win32":
# Windows...
summary_name = "..\\link_reports\\" + "link_validation_summary.html"
# Use BeautifulSoup to read this report and return statistics
with open(report_name, "r", encoding="utf-8") as f:
html_code = f.read()
soup = BeautifulSoup(html_code, "lxml")
failed_links_rows = soup.find_all("tr", {"class": "fail"})
fail_count = len(failed_links_rows)
success_links_rows = soup.find_all("tr", {"class": "success"})
pass_count = len(success_links_rows)
for failed_links_row in failed_links_rows:
del failed_links_row.attrs["bgcolor"]
# print(type(failed_links_rows))
# Write report summary to another HTML file
with open(summary_name, "w+", encoding="utf-8") as f:
f.write(
"""<!DOCTYPE html><html><head><meta charset="UTF-8"><title>Link Validation Report Summary</title></head><body><h1>Link Validation Report Summary</h1>""")
f.write("""<p><strong>The number of failed links:</strong> """ + str(fail_count) + """. <strong>The number of passed links:</strong> """ + str(pass_count) + """ <strong>Pass rate:</strong> """ + str(float(pass_count/(pass_count+fail_count))*100) + '%')
f.write("""<p>Click the button to sort the table by parent page:</p>
<p><button onclick="sortTable()">Sort</button></p>""")
f.write("""<script>
function sortTable() {
var table, rows, switching, i, x, y, shouldSwitch;
table = document.getElementById("myTable");
switching = true;
/*Make a loop that will continue until
no switching has been done:*/
while (switching) {
//start by saying: no switching is done:
switching = false;
rows = table.rows;
/*Loop through all table rows (except the
first, which contains table headers):*/
for (i = 1; i < (rows.length - 1); i++) {
//start by saying there should be no switching:
shouldSwitch = false;
/*Get the two elements you want to compare,
one from current row and one from the next:*/
x = rows[i].getElementsByTagName("TD")[0];
y = rows[i + 1].getElementsByTagName("TD")[0];
//check if the two rows should switch place:
if (x.innerHTML.toLowerCase() > y.innerHTML.toLowerCase()) {
//if so, mark as a switch and break the loop:
shouldSwitch = true;
break;
}
}
if (shouldSwitch) {
/*If a switch has been marked, make the switch
and mark that a switch has been done:*/
rows[i].parentNode.insertBefore(rows[i + 1], rows[i]);
switching = true;
}
}
}
</script>""")
f.write(
"""<table id="myTable" border="1"><tr><th>Failed Links</th><th>Status Code</th><th>Parent Page</th></tr>""")
for failed_link in set(failed_links_rows):
f.write(str(failed_link))
f.write(
"""</table><p>""" + """Refer to <a href=\"""" + report_name + """\">this link</a> for detailed report.""" + """</p></body></html>""")
# Create the parser
my_parser = argparse.ArgumentParser(description='Check the links for all markdown files of a folder')
# Add the arguments
my_parser.add_argument('Path',
metavar='path',
type=str,
help='The path to the repository that contains all markdown files.')
# Execute the parse_args() method
args = my_parser.parse_args()
repository = args.Path
# Get link JSON file
LinksFromMarkdown_Milvus = LinksFromMarkdown(repository)
link_file = LinksFromMarkdown_Milvus.extract_links_from_markdown(repository)
# Generate link validation report
CheckExtractedLinksFromMarkdown_Milvus = CheckExtractedLinksFromMarkdown(link_file)
report_name = CheckExtractedLinksFromMarkdown_Milvus.check_extracted_links(link_file)
# Generate report summary
GenerateReportSummary_Milvus = GenerateReportSummary(report_name)
GenerateReportSummary_Milvus.generate_report_summary(report_name) | 50.502381 | 277 | 0.494649 |
47ed43d295f5c11f85b434badd9967e22d0479fd | 5,595 | py | Python | nuitka/tools/release/Documentation.py | Mortal/Nuitka | 5150eeff7ff845ed4993c773449cd81b7f127c6b | [
"Apache-2.0"
] | null | null | null | nuitka/tools/release/Documentation.py | Mortal/Nuitka | 5150eeff7ff845ed4993c773449cd81b7f127c6b | [
"Apache-2.0"
] | null | null | null | nuitka/tools/release/Documentation.py | Mortal/Nuitka | 5150eeff7ff845ed4993c773449cd81b7f127c6b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Generation of Nuitka documentation.
"""
from __future__ import print_function
import os
import subprocess
import sys
def makeLogoImages():
assert os.system(
"convert -background none doc/Logo/Nuitka-Logo-Vertical.svg doc/images/Nuitka-Logo-Vertical.png"
) == 0
assert os.system(
"convert -background none doc/Logo/Nuitka-Logo-Symbol.svg doc/images/Nuitka-Logo-Symbol.png"
) == 0
assert os.system(
"convert -background none doc/Logo/Nuitka-Logo-Horizontal.svg doc/images/Nuitka-Logo-Horizontal.png"
) == 0
assert os.system("optipng -o2 doc/images/Nuitka-Logo-Vertical.png") == 0
assert os.system("optipng -o2 doc/images/Nuitka-Logo-Symbol.png") == 0
assert os.system("optipng -o2 doc/images/Nuitka-Logo-Horizontal.png") == 0
if os.path.exists("../nikola-site"):
assert os.system(
"convert -resize 32x32 doc/Logo/Nuitka-Logo-Symbol.svg ../nikola-site/files/favicon.ico"
) == 0
assert os.system(
"convert -resize 32x32 doc/Logo/Nuitka-Logo-Symbol.svg ../nikola-site/files/favicon.png"
) == 0
assert os.system(
"convert -resize 72x72 doc/Logo/Nuitka-Logo-Symbol.svg ../nikola-site/files/apple-touch-icon-ipad.png"
) == 0
assert os.system(
"convert -resize 144x144 doc/Logo/Nuitka-Logo-Symbol.svg ../nikola-site/files/apple-touch-icon-ipad3.png"
) == 0
assert os.system(
"convert -resize 57x57 doc/Logo/Nuitka-Logo-Symbol.svg ../nikola-site/files/apple-touch-icon-iphone.png"
) == 0
assert os.system(
"convert -resize 114x114 doc/Logo/Nuitka-Logo-Symbol.svg ../nikola-site/files/apple-touch-icon-iphone4.png"
) == 0
def checkRstLint(document):
import restructuredtext_lint # @UnresolvedImport pylint:disable=I0021,import-error
print("Checking %r for proper restructed text ..." % document)
lint_results = restructuredtext_lint.lint(open(document).read(), document)
lint_error = False
for lint_result in lint_results:
# Not an issue.
if lint_result.message.startswith("Duplicate implicit target name:"):
continue
print(lint_result)
lint_error = True
if lint_error:
sys.exit("Error, no lint clean rest.")
print("OK.")
def makeManpages():
if not os.path.exists("man"):
os.mkdir("man")
def makeManpage(python, suffix):
assert subprocess.call(
"""\
help2man -n 'the Python compiler' --no-discard-stderr --no-info \
--include doc/nuitka-man-include.txt \
'%s ./bin/nuitka' >doc/nuitka%s.1""" % (python, suffix),
shell = True
) == 0
assert subprocess.call(
"""\
help2man -n 'the Python compiler' --no-discard-stderr --no-info \
--include doc/nuitka-man-include.txt \
'%s ./bin/nuitka-run' >doc/nuitka%s-run.1""" % (python, suffix),
shell = True
) == 0
for manpage in ("doc/nuitka%s.1" % suffix, "doc/nuitka%s-run.1" %suffix):
manpage_contents = open(manpage).readlines()
new_contents = []
mark = False
for count, line in enumerate(manpage_contents):
if mark:
line = ".SS " + line + ".BR\n"
mark = False
elif line == ".IP\n" and manpage_contents[ count + 1 ].endswith(":\n"):
mark = True
continue
if line == r"\fB\-\-g\fR++\-only" + '\n':
line = r"\fB\-\-g\++\-only\fR" + '\n'
new_contents.append(line)
open(manpage, 'w').writelines(new_contents)
makeManpage("python2", "")
makeManpage("python3", '3')
def createRstPDF(document, args):
assert subprocess.call(
"%(rst2pdf)s %(args)s %(document)s" %
{
"rst2pdf" : (
"rst2pdf"
if os.name != "nt" else
r"C:\Python27_32\Scripts\rst2pdf.exe"
),
"args" : ' '.join(args),
"document" : document
},
shell = True
) == 0, document
def createReleaseDocumentation():
checkReleaseDocumentation()
for document in ("README.rst", "Developer_Manual.rst", "Changelog.rst"):
args = []
if document != "Changelog.rst":
args.append("-s doc/page-styles.txt")
args.append('--header="###Title### - ###Section###"')
args.append('--footer="###Title### - page ###Page### - ###Section###"')
createRstPDF(document, args)
if os.name != "nt":
makeManpages()
def checkReleaseDocumentation():
for document in ("README.rst", "Developer_Manual.rst", "Changelog.rst"):
checkRstLint(document)
| 33.502994 | 119 | 0.598928 |
bf5f2c27af77bee633c5516b06bd06cfd97a8463 | 8,306 | py | Python | tests/test_app.py | yougov/influxproxy | 107440472c82522c8289ddd5dc69886758668415 | [
"MIT"
] | 1 | 2017-03-06T07:22:16.000Z | 2017-03-06T07:22:16.000Z | tests/test_app.py | yougov/influxproxy | 107440472c82522c8289ddd5dc69886758668415 | [
"MIT"
] | null | null | null | tests/test_app.py | yougov/influxproxy | 107440472c82522c8289ddd5dc69886758668415 | [
"MIT"
] | null | null | null | import json
from unittest.mock import patch
from .base import AppTestCase, asynctest
from influxproxy.configuration import config
from influxproxy.drivers import MalformedDataError
DB_USER = 'testing'
DB_CONF = config['databases'][DB_USER]
class PingTest(AppTestCase):
@asynctest
async def receives_a_pong(self):
response = await self.client.get('/ping')
self.assertEqual(response.status, 200)
content = await response.text()
expected = 'pong'
self.assertEqual(content, expected)
class PreflightTest(AppTestCase):
def setUp(self):
super().setUp()
self.user = DB_USER
self.public_key = DB_CONF['public_key']
self.headers = {
'Origin': DB_CONF['allow_from'][0],
'Access-Control-Request-Method': 'POST',
}
async def do_preflight(self, headers=None):
url = '/metric/{}/{}'.format(self.user, self.public_key)
if headers is not None:
self.headers.update(headers)
return await self.client.options(url, headers=self.headers)
@asynctest
async def sends_a_metric_preflight(self):
response = await self.do_preflight()
self.assertEqual(response.status, 200)
self.assert_control(response, 'Allow-Origin', DB_CONF['allow_from'][0])
self.assert_control(response, 'Allow-Methods', 'POST')
self.assert_control(
response, 'Allow-Headers', 'Content-Type')
self.assert_control(
response, 'Max-Age', str(config['preflight_expiration']))
@asynctest
async def sends_a_metric_preflight_to_generic_database(self):
self.user = 'udp'
self.public_key = config['databases']['udp']['public_key']
origin = 'http://some-unregistered-website.com'
self.headers['Origin'] = origin
response = await self.do_preflight()
self.assertEqual(response.status, 200)
self.assert_control(response, 'Allow-Origin', '*')
self.assert_control(response, 'Allow-Methods', 'POST')
self.assert_control(
response, 'Allow-Headers', 'Content-Type')
self.assert_control(
response, 'Max-Age', str(config['preflight_expiration']))
@asynctest
async def cannot_accept_preflight_if_origin_not_expected(self):
response = await self.do_preflight(headers={
'Origin': 'some-bogus_origin',
})
self.assertEqual(response.status, 403)
@asynctest
async def cannot_accept_preflight_if_wrong_database(self):
self.user = 'bogus-user'
response = await self.do_preflight()
self.assertEqual(response.status, 401)
@asynctest
async def cannot_accept_preflight_if_wrong_public_key(self):
self.public_key = 'bogus-key'
response = await self.do_preflight()
self.assertEqual(response.status, 401)
@asynctest
async def cannot_accept_preflight_if_method_not_expected(self):
response = await self.do_preflight(headers={
'Access-Control-Request-Method': 'GET',
})
self.assertEqual(response.status, 405)
@asynctest
async def cannot_accept_preflight_if_missing_origin(self):
del self.headers['Origin']
response = await self.do_preflight()
self.assertEqual(response.status, 400)
@asynctest
async def cannot_accept_preflight_if_missing_method(self):
del self.headers['Access-Control-Request-Method']
response = await self.do_preflight()
self.assertEqual(response.status, 400)
class MetricPostTest(AppTestCase):
def setUp(self):
super().setUp()
self.origin = DB_CONF['allow_from'][0]
self.points = ['point1', 'point2']
self.data = json.dumps(self.points).encode('utf-8')
self.headers = {
'Content-Type': 'application/json',
'Origin': self.origin,
}
self.set_auth(DB_USER, DB_CONF['public_key'])
def set_auth(self, user, public_key):
self.user = user
self.public_key = public_key
def set_origin(self, origin):
self.origin = origin
self.headers['Origin'] = origin
async def send_metric(self, headers=None):
url = '/metric/{}/{}'.format(self.user, self.public_key)
return await self.client.post(
url, data=self.data, headers=self.headers)
@asynctest
async def sends_metric_to_driver(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
driver = MockDriver.return_value
response = await self.send_metric()
self.assertEqual(response.status, 204)
self.assert_control(response, 'Allow-Origin', self.origin)
MockDriver.assert_called_once_with(udp_port=DB_CONF['udp_port'])
driver.write.assert_called_once_with(DB_USER, self.points)
@asynctest
async def sends_metric_to_generic_database(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
self.user = 'udp'
self.public_key = config['databases']['udp']['public_key']
origin = 'http://some-unregistered-website.com'
self.headers['Origin'] = origin
driver = MockDriver.return_value
response = await self.send_metric()
self.assertEqual(response.status, 204)
self.assert_control(response, 'Allow-Origin', '*')
MockDriver.assert_called_once_with(
udp_port=config['databases']['udp']['udp_port'])
driver.write.assert_called_once_with(self.user, self.points)
@asynctest
async def cant_send_metric_if_wrong_public_key(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
self.set_auth(DB_USER, 'bogus-key')
driver = MockDriver.return_value
response = await self.send_metric()
self.assertEqual(response.status, 401)
self.assertFalse(driver.write.called)
@asynctest
async def cant_send_metric_if_wrong_origin(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
self.set_origin('bogus-origin')
driver = MockDriver.return_value
response = await self.send_metric()
self.assertEqual(response.status, 403)
self.assertFalse(driver.write.called)
@asynctest
async def cant_send_metric_if_database_not_found(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
self.set_auth('bogus-db', DB_CONF['public_key'])
driver = MockDriver.return_value
response = await self.send_metric()
self.assertEqual(response.status, 401)
self.assertFalse(driver.write.called)
@asynctest
async def cant_send_metric_if_bad_metric_format(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
driver = MockDriver.return_value
driver.write.side_effect = MalformedDataError('oops...')
response = await self.send_metric()
self.assertEqual(response.status, 400)
@asynctest
async def cant_send_metric_if_backend_fails(self):
with patch('influxproxy.app.InfluxDriver') as MockDriver:
driver = MockDriver.return_value
driver.write.side_effect = RuntimeError('oops...')
response = await self.send_metric()
self.assertEqual(response.status, 500)
class ManualTest(AppTestCase):
@asynctest
async def loads_manual_test_page(self):
response = await self.client.get('/manual-test')
content = await response.text()
self.assertEqual(response.status, 200)
self.assertIn('<body', content)
@asynctest
async def cannot_load_manual_test_if_not_configured(self):
with patch.dict(config, {'manual_test_page': False}):
response = await self.client.get('/manual-test')
self.assertEqual(response.status, 404)
class StaticTest(AppTestCase):
@asynctest
async def loads_js_file(self):
response = await self.client.get('/static/js/jquery-3.1.0.min.js')
content = await response.text()
self.assertEqual(response.status, 200)
self.assertIn('jQuery', content)
| 32.83004 | 79 | 0.650614 |
9013eb62561242237c334e0cdd5700a7880b13f0 | 2,727 | py | Python | starepandas/tools/temporal_conversions.py | SpatioTemporal/STAREPandas | cb33d0cd8c61d960cd7e5070d8b56d1a8805edae | [
"MIT"
] | 1 | 2021-01-03T18:17:01.000Z | 2021-01-03T18:17:01.000Z | starepandas/tools/temporal_conversions.py | SpatioTemporal/STAREPandas | cb33d0cd8c61d960cd7e5070d8b56d1a8805edae | [
"MIT"
] | 66 | 2020-09-09T21:15:14.000Z | 2022-03-25T21:30:53.000Z | starepandas/tools/temporal_conversions.py | SpatioTemporal/STAREPandas | cb33d0cd8c61d960cd7e5070d8b56d1a8805edae | [
"MIT"
] | 2 | 2020-07-28T14:17:32.000Z | 2021-01-21T19:24:23.000Z | import astropy.time
import pystare
def tivs_from_timeseries(series, scale='utc', format='datetime64', forward_res=48, reverse_res=48):
""" Converts a timeseries to temporal index values.
A timeseries is to be understood as either
- a pandas.Series of dtype('<M8[ns]') as retrieved by pandas.to_datetime() or
- a pandas.DatetimeArray
- a 1D numpy.array of dtype('<M8[ns]')
The forward_res and reverse_res are STARE temporal resolutions. Their ranges are as follows
..tabularcolumns::
+-------------+----------------------------+
| Resolutions | Unit |
+=============+============================+
| 48-39 | Millisecond |
+-------------+----------------------------+
| 38-33 | Second |
+-------------+----------------------------+
| 32-27 | Minute |
+-------------+----------------------------+
| 26-22 | Hour |
+-------------+----------------------------+
| 21-19 | Day-of-week |
+-------------+----------------------------+
| 18-17 | Week-of-month |
+-------------+----------------------------+
| 16-13 | Month-of-year |
+-------------+----------------------------+
| 12-00 | Year |
+-------------+----------------------------+
Parameters
-----------
series: array-like
the series to be converted to tivs
scale: str
time scale (e.g., UTC, TAI, UT1, TDB).
c.f. `astropy.time#scale <https://docs.astropy.org/en/stable/time/index.html#time-scale>`_
format: str
time format. c.f. `astropy.time#format <https://docs.astropy.org/en/stable/time/index.html#format>`_
forward_res: int. Valid range is 0..48
The forward resolution (c.f pystare.coarsest_resolution_finer_or_equal_ms())
reverse_res: int. Valid range is 0..48
The reverse resolution (c.f. pystare.coarsest_resolution_finer_or_equal_ms())
Returns
----------
tivs: numpy.array
STARE temporal index values
Examples
------------
>>> import pandas
>>> import starepandas
>>> dates = ['2021-09-03', '2021-07-17 11:16']
>>> dates = pandas.to_datetime(dates)
>>> starepandas.tivs_from_timeseries(dates)
array([2276059438861267137, 2275939265676325057])
"""
if not series.dtype == '<M8[ns]':
raise ValueError()
times = astropy.time.Time(series, scale=scale, format=format)
tivs = pystare.from_julian_date(times.jd1, times.jd2, scale=scale, forward_res=forward_res, reverse_res=reverse_res)
return tivs
| 39.521739 | 120 | 0.482215 |
4594701a8fa8cf014031023783fa8e259a1c535f | 1,801 | py | Python | src/lumigo_tracer/extension/sampler.py | MattBillock/python_tracer | 0bd8ee0400687390820fc0e6c848f04ad71a8a4d | [
"Apache-2.0"
] | null | null | null | src/lumigo_tracer/extension/sampler.py | MattBillock/python_tracer | 0bd8ee0400687390820fc0e6c848f04ad71a8a4d | [
"Apache-2.0"
] | null | null | null | src/lumigo_tracer/extension/sampler.py | MattBillock/python_tracer | 0bd8ee0400687390820fc0e6c848f04ad71a8a4d | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import Optional, List, Union, Dict
from dataclasses import dataclass
import signal
from lumigo_tracer.extension.extension_utils import get_current_cpu_time
DEFAULT_SAMPLING_INTERVAL = 500
@dataclass
class CpuSample:
start_time: datetime
end_time: datetime
cpu_time: float
def dump(self) -> Dict[str, Union[float, int]]:
return {
"start_time": int(self.start_time.timestamp() * 1000),
"end_time": int(self.end_time.timestamp() * 1000),
"cpu_time": self.cpu_time,
}
class Sampler:
def __init__(self):
self.cpu_last_sample_value: Optional[float] = None
self.cpu_last_sample_time: Optional[datetime] = None
self.cpu_samples: List[CpuSample] = []
def start_sampling(self, interval_ms: int = DEFAULT_SAMPLING_INTERVAL):
self.cpu_samples = []
self.sample()
signal.signal(signal.SIGALRM, self.sample)
signal.setitimer(signal.ITIMER_REAL, interval_ms / 1000, interval_ms / 1000)
def stop_sampling(self):
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
self.sample()
def get_samples(self) -> List[CpuSample]:
return self.cpu_samples
def sample(self, *args):
now = datetime.now()
current_cpu = get_current_cpu_time()
if self.cpu_last_sample_time and self.cpu_last_sample_value and current_cpu:
self.cpu_samples.append(
CpuSample(
start_time=self.cpu_last_sample_time,
end_time=now,
cpu_time=current_cpu - self.cpu_last_sample_value,
)
)
self.cpu_last_sample_time = now
self.cpu_last_sample_value = current_cpu
| 29.52459 | 84 | 0.649639 |
25d62da64c8583675cd96ec2da27f258feaa8342 | 5,235 | py | Python | python/stepspy-current/demo/demo_dynamic.py | changgang/steps | 9b8ea474581885129d1c1a1c3ad40bc8058a7e0a | [
"MIT"
] | 29 | 2019-10-30T07:04:10.000Z | 2022-02-22T06:34:32.000Z | python/stepspy-current/demo/demo_dynamic.py | cuihantao/steps | 60327bf42299cb7117ed5907a931583d7cdf590d | [
"MIT"
] | 1 | 2021-09-25T15:29:59.000Z | 2022-01-05T14:04:18.000Z | python/stepspy-current/demo/demo_dynamic.py | changgang/steps | 9b8ea474581885129d1c1a1c3ad40bc8058a7e0a | [
"MIT"
] | 8 | 2019-12-20T16:13:46.000Z | 2022-03-20T14:58:23.000Z | from stepspy import STEPS, POUCH_CSV
simulator = STEPS(is_default = False, log_file = 'test.log')
simulator.info()
simulator.set_toolkit_log_file("newtest.log", log_file_append_mode=False)
simulator.set_parallel_thread_number(1)
simulator.set_dynamic_model_database_capacity(10000000)
max_bus = simulator.get_allowed_maximum_bus_number()
info = "The default maximum bus number is: "+str(max_bus)
print(info)
simulator.set_allowed_maximum_bus_number(10000)
max_bus = simulator.get_allowed_maximum_bus_number()
info = "The default maximum bus number is changed to: "+str(max_bus)
print(info)
simulator.load_powerflow_data('IEEE39.raw','PSS/E')
simulator.check_powerflow_data()
simulator.check_network_connectivity()
simulator.build_dynamic_network_Y_matrix()
simulator.save_dynamic_network_Y_matrix('ymatrix_dyn.csv')
simulator.build_network_Z_matrix()
simulator.save_network_Z_matrix('zmatrix_dyn.csv')
nbus = simulator.get_bus_count()
print(nbus)
nline = simulator.get_line_count()
print(nline)
ntrans = simulator.get_transformer_count()
print(ntrans)
nload = simulator.get_load_count()
print(nload)
print("here goes all buses")
buses = simulator.get_all_buses()
for bus in buses:
busname = simulator.get_bus_data(bus, "string", "bus name")
basevoltage = simulator.get_bus_data(bus, "double", "base voltage in kV")
print(bus, busname, basevoltage)
print("here goes all lines")
lines = simulator.get_lines_at_bus(0)
for line in lines:
status_send = simulator.get_line_data(line, "bool", "sending side breaker status")
status_recv = simulator.get_line_data(line, "bool", "receiving side breaker status")
r1 = simulator.get_line_data(line, "double", "r1_pu")
x1 = simulator.get_line_data(line, "double", "x1_pu")
g1 = simulator.get_line_data(line, "double", "g1_pu")
b1 = simulator.get_line_data(line, "double", "b1_pu")
print(line, status_send, status_recv, r1, x1, g1, b1)
print("here goes all transformer")
transes = simulator.get_transformers_at_bus(0)
for trans in transes:
status_primary = simulator.get_transformer_data(trans, "bool", "primary", "status")
status_secondary = simulator.get_transformer_data(trans, "bool", "secondary", "status")
status_tertiary = simulator.get_transformer_data(trans, "bool", "tertiary", "status")
gm = simulator.get_transformer_data(trans, "double", "transformer", "gm_pu")
bm = simulator.get_transformer_data(trans, "double", "transformer", "bm_pu")
print(trans, status_primary, status_secondary, status_tertiary, gm, bm)
print("here goes solving powerflow")
simulator.set_powerflow_solver_parameter('bool','flat start logic', True)
simulator.solve_powerflow('NR')
simulator.save_powerflow_result('pfresult.csv')
simulator.save_network_matrix('ymatrix.csv')
simulator.save_jacobian_matrix('jacobian.csv')
print("here goes running dynamic simulation")
simulator.set_dynamic_model_database_capacity(1000000)
simulator.load_dynamic_data('IEEE39.dyr','psse')
simulator.check_missing_models()
simulator.check_dynamic_data()
simulator.check_least_dynamic_time_constants()
print("here goes generator dynamic data")
gens = simulator.get_generators_at_bus(0)
for gen in gens:
gen_model = simulator.get_generator_related_model_name(gen, "GEN")
avr_model = simulator.get_generator_related_model_name(gen, "avr")
pss_model = simulator.get_generator_related_model_name(gen, "pss")
gov_model = simulator.get_generator_related_model_name(gen, "gov")
pmax = simulator.get_generator_related_model_data(gen, "gov", 'pmax')
pmin = simulator.get_generator_related_model_data(gen, "gov", 'pmin')
mbase = simulator.get_generator_data(gen, 'd', "mbase_MVA")
print(gen, mbase, gen_model, avr_model, pss_model, gov_model, pmax, pmin)
data = simulator.get_generator_related_model_parameter_pair(gen, "gen")
print(gen_model, data)
simulator.set_dynamic_simulator_parameter('b','bin export logic',False)
simulator.set_dynamic_simulator_parameter('b','csv export logic',True)
simulator.set_dynamic_simulator_parameter('d','ITERATION ACCELERATOR',1.0)
simulator.set_dynamic_simulator_parameter('d','MAX POWER IMBALANCE IN MVA',0.1)
simulator.set_dynamic_simulator_parameter('i','MAX DAE ITERATION',3)
simulator.set_dynamic_simulator_parameter('i','MIN DAE ITERATION',3)
simulator.set_dynamic_simulator_parameter('i','MAX NETWORK ITERATION',100)
simulator.set_dynamic_simulator_parameter('i','MAX UPDATE ITERATION',3)
simulator.set_dynamic_simulator_parameter('b','AUTOMATIC ACCELERATOR TUNE LOGIC',False)
simulator.set_dynamic_simulator_parameter('b','ANGLE STABILITY SURVEILLANCE LOGIC',False)
simulator.set_dynamic_simulator_parameter('d','ANGLE STABILITY THRESHOLD IN DEG',360.0)
simulator.set_dynamic_simulation_time_step(0.01)
simulator.set_dynamic_simulator_output_file('ieee39')
simulator.prepare_meters('all')
simulator.start_dynamic_simulation()
simulator.run_dynamic_simulation_to_time(1.0)
simulator.set_bus_fault(15, 'three phase fault',[0.0, -2e2])
simulator.run_dynamic_simulation_to_time(1.1)
simulator.clear_bus_fault(15, 'three phase fault')
simulator.run_dynamic_simulation_to_time(5.0)
simulator.stop_dynamic_simulation()
time, value, dychannel = POUCH_CSV('ieee39.csv') | 40.898438 | 91 | 0.792359 |
6f370f809021da0ae597c2af2beefff3aefd3585 | 820 | py | Python | api/models/initiate_database.py | ethanlong1/xsshunter | ba359c804d6ea9a0a5091546aa5d41caae1d5a4c | [
"MIT"
] | 1,199 | 2016-05-30T06:54:37.000Z | 2022-03-31T13:02:44.000Z | api/models/initiate_database.py | ethanlong1/xsshunter | ba359c804d6ea9a0a5091546aa5d41caae1d5a4c | [
"MIT"
] | 27 | 2016-05-31T08:11:37.000Z | 2021-11-04T14:12:09.000Z | api/models/initiate_database.py | ethanlong1/xsshunter | ba359c804d6ea9a0a5091546aa5d41caae1d5a4c | [
"MIT"
] | 285 | 2016-05-30T12:02:24.000Z | 2022-03-12T15:55:28.000Z | import yaml
try:
with open( '../config.yaml', 'r' ) as f:
settings = yaml.safe_load( f )
except IOError:
print "INITIATEDB: Error reading config.yaml, have you created one? (Hint: Try running ./generate_config.py)"
exit()
from sqlalchemy import create_engine
engine = create_engine('postgresql://' + settings["postgreql_username"] + ':' + settings["postgreql_password"] + '@localhost/' + settings["postgres_db"] + '?client_encoding=utf8', pool_recycle=60, encoding='utf8')
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from sqlalchemy import Column, Integer, String, func, update, Text, Binary, Boolean, BigInteger, event, select, exc
from sqlalchemy.orm import sessionmaker, scoped_session
Session = scoped_session(sessionmaker(bind=engine))
session = Session()
| 48.235294 | 213 | 0.746341 |
8033c07715fb158414f30948eb5169d3b072a0f6 | 28,696 | py | Python | utils/aux_functions.py | shhommychon/WrongMaskTheFace | 9950988e6fa2ec395af8c2ef0682d47139402181 | [
"MIT"
] | null | null | null | utils/aux_functions.py | shhommychon/WrongMaskTheFace | 9950988e6fa2ec395af8c2ef0682d47139402181 | [
"MIT"
] | null | null | null | utils/aux_functions.py | shhommychon/WrongMaskTheFace | 9950988e6fa2ec395af8c2ef0682d47139402181 | [
"MIT"
] | null | null | null | # Author: aqeelanwar
# Created: 27 April,2020, 10:21 PM
# Email: aqeel.anwar@gatech.edu
from configparser import ConfigParser
import cv2, math, os
from PIL import Image, ImageDraw
from tqdm import tqdm
from utils.read_cfg import read_cfg
from utils.fit_ellipse import *
import random
from utils.create_mask import texture_the_mask, color_the_mask
from imutils import face_utils
import requests
from zipfile import ZipFile
from tqdm import tqdm
import bz2, shutil
def download_dlib_model():
print_orderly("Get dlib model", 60)
dlib_model_link = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
print("Downloading dlib model...")
with requests.get(dlib_model_link, stream=True) as r:
print("Zip file size: ", np.round(len(r.content) / 1024 / 1024, 2), "MB")
destination = (
"dlib_models" + os.path.sep + "shape_predictor_68_face_landmarks.dat.bz2"
)
if not os.path.exists(destination.rsplit(os.path.sep, 1)[0]):
os.mkdir(destination.rsplit(os.path.sep, 1)[0])
print("Saving dlib model...")
with open(destination, "wb") as fd:
for chunk in r.iter_content(chunk_size=32678):
fd.write(chunk)
print("Extracting dlib model...")
with bz2.BZ2File(destination) as fr, open(
"dlib_models/shape_predictor_68_face_landmarks.dat", "wb"
) as fw:
shutil.copyfileobj(fr, fw)
print("Saved: ", destination)
print_orderly("done", 60)
os.remove(destination)
def get_line(face_landmark, image, type="eye", debug=False):
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
left_eye = face_landmark["left_eye"]
right_eye = face_landmark["right_eye"]
left_eye_mid = np.mean(np.array(left_eye), axis=0)
right_eye_mid = np.mean(np.array(right_eye), axis=0)
eye_line_mid = (left_eye_mid + right_eye_mid) / 2
if type == "eye":
left_point = left_eye_mid
right_point = right_eye_mid
mid_point = eye_line_mid
elif type == "nose_mid":
nose_length = (
face_landmark["nose_bridge"][-1][1] - face_landmark["nose_bridge"][0][1]
)
left_point = [left_eye_mid[0], left_eye_mid[1] + nose_length / 2]
right_point = [right_eye_mid[0], right_eye_mid[1] + nose_length / 2]
# mid_point = (
# face_landmark["nose_bridge"][-1][1] + face_landmark["nose_bridge"][0][1]
# ) / 2
mid_pointY = (
face_landmark["nose_bridge"][-1][1] + face_landmark["nose_bridge"][0][1]
) / 2
mid_pointX = (
face_landmark["nose_bridge"][-1][0] + face_landmark["nose_bridge"][0][0]
) / 2
mid_point = (mid_pointX, mid_pointY)
elif type == "nose_tip":
nose_length = (
face_landmark["nose_bridge"][-1][1] - face_landmark["nose_bridge"][0][1]
)
left_point = [left_eye_mid[0], left_eye_mid[1] + nose_length]
right_point = [right_eye_mid[0], right_eye_mid[1] + nose_length]
mid_point = (
face_landmark["nose_bridge"][-1][1] + face_landmark["nose_bridge"][0][1]
) / 2
elif type == "bottom_lip":
bottom_lip = face_landmark["bottom_lip"]
bottom_lip_mid = np.max(np.array(bottom_lip), axis=0)
shiftY = bottom_lip_mid[1] - eye_line_mid[1]
left_point = [left_eye_mid[0], left_eye_mid[1] + shiftY]
right_point = [right_eye_mid[0], right_eye_mid[1] + shiftY]
mid_point = bottom_lip_mid
elif type == "top_lip":
# copied from type == "bottom_lip"
bottom_lip = face_landmark["top_lip"] # only this line is changed
bottom_lip_mid = np.max(np.array(bottom_lip), axis=0)
shiftY = bottom_lip_mid[1] - eye_line_mid[1]
left_point = [left_eye_mid[0], left_eye_mid[1] + shiftY]
right_point = [right_eye_mid[0], right_eye_mid[1] + shiftY]
mid_point = bottom_lip_mid
elif type == "perp_line":
bottom_lip = face_landmark["bottom_lip"]
bottom_lip_mid = np.mean(np.array(bottom_lip), axis=0)
left_point = eye_line_mid
left_point = face_landmark["nose_bridge"][0]
right_point = bottom_lip_mid
mid_point = bottom_lip_mid
elif type == "nose_long":
nose_bridge = face_landmark["nose_bridge"]
left_point = [nose_bridge[0][0], nose_bridge[0][1]]
right_point = [nose_bridge[-1][0], nose_bridge[-1][1]]
mid_point = left_point
# d.line(eye_mid, width=5, fill='red')
y = [left_point[1], right_point[1]]
x = [left_point[0], right_point[0]]
# cv2.imshow('h', image)
# cv2.waitKey(0)
eye_line = fit_line(x, y, image)
d.line(eye_line, width=5, fill="blue")
# Perpendicular Line
# (midX, midY) and (midX - y2 + y1, midY + x2 - x1)
y = [
(left_point[1] + right_point[1]) / 2,
(left_point[1] + right_point[1]) / 2 + right_point[0] - left_point[0],
]
x = [
(left_point[0] + right_point[0]) / 2,
(left_point[0] + right_point[0]) / 2 - right_point[1] + left_point[1],
]
perp_line = fit_line(x, y, image)
if debug:
d.line(perp_line, width=5, fill="red")
pil_image.show()
return eye_line, perp_line, left_point, right_point, mid_point
def get_points_on_chin(line, face_landmark, chin_type="chin"):
chin = face_landmark[chin_type]
points_on_chin = []
for i in range(len(chin) - 1):
chin_first_point = [chin[i][0], chin[i][1]]
chin_second_point = [chin[i + 1][0], chin[i + 1][1]]
flag, x, y = line_intersection(line, (chin_first_point, chin_second_point))
if flag:
points_on_chin.append((x, y))
return points_on_chin
def plot_lines(face_line, image, debug=False):
pil_image = Image.fromarray(image)
if debug:
d = ImageDraw.Draw(pil_image)
d.line(face_line, width=4, fill="white")
pil_image.show()
def line_intersection(line1, line2):
# mid = int(len(line1) / 2)
start = 0
end = -1
line1 = ([line1[start][0], line1[start][1]], [line1[end][0], line1[end][1]])
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
x = []
y = []
flag = False
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
return flag, x, y
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
segment_minX = min(line2[0][0], line2[1][0])
segment_maxX = max(line2[0][0], line2[1][0])
segment_minY = min(line2[0][1], line2[1][1])
segment_maxY = max(line2[0][1], line2[1][1])
if (
segment_maxX + 1 >= x >= segment_minX - 1
and segment_maxY + 1 >= y >= segment_minY - 1
):
flag = True
return flag, x, y
def fit_line(x, y, image):
if x[0] == x[1]:
x[0] += 0.1
coefficients = np.polyfit(x, y, 1)
polynomial = np.poly1d(coefficients)
x_axis = np.linspace(0, image.shape[1], 50)
y_axis = polynomial(x_axis)
eye_line = []
for i in range(len(x_axis)):
eye_line.append((x_axis[i], y_axis[i]))
return eye_line
def get_six_points(face_landmark, image):
_, perp_line1, _, _, m = get_line(face_landmark, image, type="nose_mid")
face_b = m
perp_line, _, _, _, _ = get_line(face_landmark, image, type="perp_line")
points1 = get_points_on_chin(perp_line1, face_landmark)
points = get_points_on_chin(perp_line, face_landmark)
if not points1:
face_e = tuple(np.asarray(points[0]))
elif not points:
face_e = tuple(np.asarray(points1[0]))
else:
face_e = tuple((np.asarray(points[0]) + np.asarray(points1[0])) / 2)
# face_e = points1[0]
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_long")
angle = get_angle(perp_line, nose_mid_line)
# print("angle: ", angle)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_tip")
points = get_points_on_chin(nose_mid_line, face_landmark)
if len(points) < 2:
face_landmark = get_face_ellipse(face_landmark)
# print("extrapolating chin")
points = get_points_on_chin(
nose_mid_line, face_landmark, chin_type="chin_extrapolated"
)
if len(points) < 2:
points = []
points.append(face_landmark["chin"][0])
points.append(face_landmark["chin"][-1])
face_a = points[0]
face_c = points[-1]
# cv2.imshow('j', image)
# cv2.waitKey(0)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="bottom_lip")
points = get_points_on_chin(nose_mid_line, face_landmark)
face_d = points[0]
face_f = points[-1]
six_points = np.float32([face_a, face_b, face_c, face_f, face_e, face_d])
return six_points, angle
def get_six_points_chin(face_landmark, image):
"""Incorrectly mask over chin
"""
_, perp_line1, _, _, m = get_line(face_landmark, image, type="nose_mid")
face_b = m
perp_line, _, _, _, _ = get_line(face_landmark, image, type="perp_line")
points1 = get_points_on_chin(perp_line1, face_landmark)
points = get_points_on_chin(perp_line, face_landmark)
if not points1:
face_e = tuple(np.asarray(points[0]))
elif not points:
face_e = tuple(np.asarray(points1[0]))
else:
face_e = tuple((np.asarray(points[0]) + np.asarray(points1[0])) / 2)
# face_e = points1[0]
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_long")
angle = get_angle(perp_line, nose_mid_line)
# print("angle: ", angle)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_tip")
points = get_points_on_chin(nose_mid_line, face_landmark)
if len(points) < 2:
face_landmark = get_face_ellipse(face_landmark)
# print("extrapolating chin")
points = get_points_on_chin(
nose_mid_line, face_landmark, chin_type="chin_extrapolated"
)
if len(points) < 2:
points = []
points.append(face_landmark["chin"][0])
points.append(face_landmark["chin"][-1])
face_a = points[0]
face_c = points[-1]
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="bottom_lip")
points = get_points_on_chin(nose_mid_line, face_landmark)
face_d = points[0]
face_f = points[-1]
six_points = np.float32([face_a, face_b, face_c, face_f, face_e, face_d])
six_points[1][0], six_points[1][1] = (six_points[5][0] + six_points[3][0]) / 2, (six_points[5][1] + six_points[3][1]) / 2
six_points[1][0], six_points[1][1] = (six_points[1][0]*5 + six_points[4][0]) / 6, (six_points[1][1]*5 + six_points[4][1]) / 6
six_points[0][0], six_points[0][1], six_points[2][0], six_points[2][1] = six_points[5][0], six_points[5][1], six_points[3][0], six_points[3][1]
new_d_x, new_f_x = (six_points[5][0] + six_points[4][0]*2) / 3, (six_points[3][0] + six_points[4][0]*2) / 3
new_d_y, new_f_y = (six_points[5][1] + six_points[4][1]*3) / 4, (six_points[3][1] + six_points[4][1]*3) / 4
six_points[5][0], six_points[5][1], six_points[3][0], six_points[3][1] = new_d_x, new_d_y, new_f_x, new_f_y
return six_points, angle
def get_six_points_nose(face_landmark, image):
"""Incorrectly mask over nose
"""
_, perp_line1, _, _, _ = get_line(face_landmark, image, type="nose_mid")
face_b = (np.mean(np.array(face_landmark["left_eye"]), axis=0) + np.mean(np.array(face_landmark["right_eye"]), axis=0)) / 2
perp_line, _, _, _, _ = get_line(face_landmark, image, type="perp_line")
points1 = get_points_on_chin(perp_line1, face_landmark)
points = get_points_on_chin(perp_line, face_landmark)
if not points1:
face_e = tuple(np.asarray(points[0]))
elif not points:
face_e = tuple(np.asarray(points1[0]))
else:
face_e = tuple((np.asarray(points[0]) + np.asarray(points1[0])) / 2)
_, _, _, _, m = get_line(face_landmark, image, type="nose_mid")
nose_mid_e = m
_, _, _, _, m = get_line(face_landmark, image, type="top_lip")
top_lip_e = m
face_b = (nose_mid_e + face_b) / 2
face_e = (nose_mid_e + top_lip_e) / 2
# face_e = points1[0]
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_long")
angle = get_angle(perp_line, nose_mid_line)
# print("angle: ", angle)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_tip")
points = get_points_on_chin(nose_mid_line, face_landmark)
if len(points) < 2:
face_landmark = get_face_ellipse(face_landmark)
# print("extrapolating chin")
points = get_points_on_chin(
nose_mid_line, face_landmark, chin_type="chin_extrapolated"
)
if len(points) < 2:
points = []
points.append(face_landmark["chin"][0])
points.append(face_landmark["chin"][-1])
face_a = points[0]
face_c = points[-1]
# cv2.imshow('j', image)
# cv2.waitKey(0)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="bottom_lip")
points = get_points_on_chin(nose_mid_line, face_landmark)
face_d = points[0]
face_f = points[-1]
six_points = np.float32([face_a, face_b, face_c, face_f, face_e, face_d])
six_points[0][1] = six_points[1][1]
six_points[2][1] = six_points[1][1]
six_points[5][1] = six_points[4][1]
six_points[3][1] = six_points[4][1]
return six_points, angle
def get_six_points_eye(face_landmark, image):
"""Incorrectly mask over eye
"""
_, perp_line1, _, _, _ = get_line(face_landmark, image, type="nose_mid")
face_b = (np.mean(np.array(face_landmark["left_eyebrow"]), axis=0) + np.mean(np.array(face_landmark["right_eyebrow"]), axis=0)) / 2
perp_line, _, _, _, _ = get_line(face_landmark, image, type="perp_line")
points1 = get_points_on_chin(perp_line1, face_landmark)
points = get_points_on_chin(perp_line, face_landmark)
if not points1:
face_e = tuple(np.asarray(points[0]))
elif not points:
face_e = tuple(np.asarray(points1[0]))
else:
face_e = tuple((np.asarray(points[0]) + np.asarray(points1[0])) / 2)
_, _, _, _, m = get_line(face_landmark, image, type="nose_mid")
face_e = m
# face_e = points1[0]
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_long")
angle = get_angle(perp_line, nose_mid_line)
# print("angle: ", angle)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_tip")
points = get_points_on_chin(nose_mid_line, face_landmark)
if len(points) < 2:
face_landmark = get_face_ellipse(face_landmark)
# print("extrapolating chin")
points = get_points_on_chin(
nose_mid_line, face_landmark, chin_type="chin_extrapolated"
)
if len(points) < 2:
points = []
points.append(face_landmark["chin"][0])
points.append(face_landmark["chin"][-1])
face_a = points[0]
face_c = points[-1]
# cv2.imshow('j', image)
# cv2.waitKey(0)
nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="bottom_lip")
points = get_points_on_chin(nose_mid_line, face_landmark)
face_d = points[0]
face_f = points[-1]
six_points = np.float32([face_a, face_b, face_c, face_f, face_e, face_d])
six_points[0][1] = six_points[1][1]
six_points[2][1] = six_points[1][1]
six_points[5][1] = six_points[4][1]
six_points[3][1] = six_points[4][1]
return six_points, angle
def get_angle(line1, line2):
delta_y = line1[-1][1] - line1[0][1]
delta_x = line1[-1][0] - line1[0][0]
perp_angle = math.degrees(math.atan2(delta_y, delta_x))
if delta_x < 0:
perp_angle = perp_angle + 180
if perp_angle < 0:
perp_angle += 360
if perp_angle > 180:
perp_angle -= 180
# print("perp", perp_angle)
delta_y = line2[-1][1] - line2[0][1]
delta_x = line2[-1][0] - line2[0][0]
nose_angle = math.degrees(math.atan2(delta_y, delta_x))
if delta_x < 0:
nose_angle = nose_angle + 180
if nose_angle < 0:
nose_angle += 360
if nose_angle > 180:
nose_angle -= 180
# print("nose", nose_angle)
angle = nose_angle - perp_angle
return angle
def mask_face(image, face_location, six_points, angle, args, type="surgical"):
debug = False
# Find the face angle
threshold = 13
if angle < -threshold:
type += "_right"
elif angle > threshold:
type += "_left"
face_height = face_location[2] - face_location[0]
face_width = face_location[1] - face_location[3]
# image = image_raw[
# face_location[0]-int(face_width/2): face_location[2]+int(face_width/2),
# face_location[3]-int(face_height/2): face_location[1]+int(face_height/2),
# :,
# ]
# cv2.imshow('win', image)
# cv2.waitKey(0)
# Read appropriate mask image
w = image.shape[0]
h = image.shape[1]
if not "empty" in type and not "inpaint" in type:
cfg = read_cfg(config_filename="masks/masks.cfg", mask_type=type, verbose=False)
else:
if "left" in type:
str = "surgical_blue_left"
elif "right" in type:
str = "surgical_blue_right"
else:
str = "surgical_blue"
cfg = read_cfg(config_filename="masks/masks.cfg", mask_type=str, verbose=False)
img = cv2.imread(cfg.template, cv2.IMREAD_UNCHANGED)
# Process the mask if necessary
if args.pattern:
# Apply pattern to mask
img = texture_the_mask(img, args.pattern, args.pattern_weight)
if args.color:
# Apply color to mask
img = color_the_mask(img, args.color, args.color_weight)
mask_line = np.float32(
[cfg.mask_a, cfg.mask_b, cfg.mask_c, cfg.mask_f, cfg.mask_e, cfg.mask_d]
)
# Warp the mask
M, mask = cv2.findHomography(mask_line, six_points)
dst_mask = cv2.warpPerspective(img, M, (h, w))
dst_mask_points = cv2.perspectiveTransform(mask_line.reshape(-1, 1, 2), M)
mask = dst_mask[:, :, 3]
face_height = face_location[2] - face_location[0]
face_width = face_location[1] - face_location[3]
image_face = image[
face_location[0] + int(face_height / 2) : face_location[2],
face_location[3] : face_location[1],
:,
]
image_face = image
# Adjust Brightness
mask_brightness = get_avg_brightness(img)
img_brightness = get_avg_brightness(image_face)
delta_b = 1 + (img_brightness - mask_brightness) / 255
dst_mask = change_brightness(dst_mask, delta_b)
# Adjust Saturation
mask_saturation = get_avg_saturation(img)
img_saturation = get_avg_saturation(image_face)
delta_s = 1 - (img_saturation - mask_saturation) / 255
dst_mask = change_saturation(dst_mask, delta_s)
# Apply mask
mask_inv = cv2.bitwise_not(mask)
img_bg = cv2.bitwise_and(image, image, mask=mask_inv)
img_fg = cv2.bitwise_and(dst_mask, dst_mask, mask=mask)
out_img = cv2.add(img_bg, img_fg[:, :, 0:3])
if "empty" in type or "inpaint" in type:
out_img = img_bg
# Plot key points
if "inpaint" in type:
out_img = cv2.inpaint(out_img, mask, 3, cv2.INPAINT_TELEA)
# dst_NS = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS)
if debug:
for i in six_points:
cv2.circle(out_img, (i[0], i[1]), radius=4, color=(0, 0, 255), thickness=-1)
for i in dst_mask_points:
cv2.circle(
out_img, (i[0][0], i[0][1]), radius=4, color=(0, 255, 0), thickness=-1
)
return out_img, mask
def draw_landmarks(face_landmarks, image):
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
for facial_feature in face_landmarks.keys():
d.line(face_landmarks[facial_feature], width=5, fill="white")
pil_image.show()
def get_face_ellipse(face_landmark):
chin = face_landmark["chin"]
x = []
y = []
for point in chin:
x.append(point[0])
y.append(point[1])
x = np.asarray(x)
y = np.asarray(y)
a = fitEllipse(x, y)
center = ellipse_center(a)
phi = ellipse_angle_of_rotation(a)
axes = ellipse_axis_length(a)
a, b = axes
arc = 2.2
R = np.arange(0, arc * np.pi, 0.2)
xx = center[0] + a * np.cos(R) * np.cos(phi) - b * np.sin(R) * np.sin(phi)
yy = center[1] + a * np.cos(R) * np.sin(phi) + b * np.sin(R) * np.cos(phi)
chin_extrapolated = []
for i in range(len(R)):
chin_extrapolated.append((xx[i], yy[i]))
face_landmark["chin_extrapolated"] = chin_extrapolated
return face_landmark
def get_avg_brightness(img):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_hsv)
return np.mean(v)
def get_avg_saturation(img):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_hsv)
return np.mean(v)
def change_brightness(img, value=1.0):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_hsv)
v = value * v
v[v > 255] = 255
v = np.asarray(v, dtype=np.uint8)
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def change_saturation(img, value=1.0):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_hsv)
s = value * s
s[s > 255] = 255
s = np.asarray(s, dtype=np.uint8)
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def check_path(path):
is_directory = False
is_file = False
is_other = False
if os.path.isdir(path):
is_directory = True
elif os.path.isfile(path):
is_file = True
else:
is_other = True
return is_directory, is_file, is_other
def shape_to_landmarks(shape):
face_landmarks = {}
face_landmarks["left_eyebrow"] = [
tuple(shape[17]),
tuple(shape[18]),
tuple(shape[19]),
tuple(shape[20]),
tuple(shape[21]),
]
face_landmarks["right_eyebrow"] = [
tuple(shape[22]),
tuple(shape[23]),
tuple(shape[24]),
tuple(shape[25]),
tuple(shape[26]),
]
face_landmarks["nose_bridge"] = [
tuple(shape[27]),
tuple(shape[28]),
tuple(shape[29]),
tuple(shape[30]),
]
face_landmarks["nose_tip"] = [
tuple(shape[31]),
tuple(shape[32]),
tuple(shape[33]),
tuple(shape[34]),
tuple(shape[35]),
]
face_landmarks["left_eye"] = [
tuple(shape[36]),
tuple(shape[37]),
tuple(shape[38]),
tuple(shape[39]),
tuple(shape[40]),
tuple(shape[41]),
]
face_landmarks["right_eye"] = [
tuple(shape[42]),
tuple(shape[43]),
tuple(shape[44]),
tuple(shape[45]),
tuple(shape[46]),
tuple(shape[47]),
]
face_landmarks["top_lip"] = [
tuple(shape[48]),
tuple(shape[49]),
tuple(shape[50]),
tuple(shape[51]),
tuple(shape[52]),
tuple(shape[53]),
tuple(shape[54]),
tuple(shape[60]),
tuple(shape[61]),
tuple(shape[62]),
tuple(shape[63]),
tuple(shape[64]),
]
face_landmarks["bottom_lip"] = [
tuple(shape[54]),
tuple(shape[55]),
tuple(shape[56]),
tuple(shape[57]),
tuple(shape[58]),
tuple(shape[59]),
tuple(shape[48]),
tuple(shape[64]),
tuple(shape[65]),
tuple(shape[66]),
tuple(shape[67]),
tuple(shape[60]),
]
face_landmarks["chin"] = [
tuple(shape[0]),
tuple(shape[1]),
tuple(shape[2]),
tuple(shape[3]),
tuple(shape[4]),
tuple(shape[5]),
tuple(shape[6]),
tuple(shape[7]),
tuple(shape[8]),
tuple(shape[9]),
tuple(shape[10]),
tuple(shape[11]),
tuple(shape[12]),
tuple(shape[13]),
tuple(shape[14]),
tuple(shape[15]),
tuple(shape[16]),
]
return face_landmarks
def rect_to_bb(rect):
x1 = rect.left()
x2 = rect.right()
y1 = rect.top()
y2 = rect.bottom()
return (x1, x2, y2, x1)
def mask_image(image_path, args):
# Read the image
image = cv2.imread(image_path)
original_image = image.copy()
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = image
face_locations = args.detector(gray, 1)
mask_type = args.mask_type
verbose = args.verbose
if args.code:
ind = random.randint(0, len(args.code_count) - 1)
mask_dict = args.mask_dict_of_dict[ind]
mask_type = mask_dict["type"]
args.color = mask_dict["color"]
args.pattern = mask_dict["texture"]
args.code_count[ind] += 1
elif mask_type == "random":
available_mask_types = get_available_mask_types()
mask_type = random.choice(available_mask_types)
if verbose:
tqdm.write("Faces found: {:2d}".format(len(face_locations)))
# Process each face in the image
masked_images = []
mask_binary_array = []
mask = []
for (i, face_location) in enumerate(face_locations):
shape = args.predictor(gray, face_location)
shape = face_utils.shape_to_np(shape)
face_landmarks = shape_to_landmarks(shape)
face_location = rect_to_bb(face_location)
# draw_landmarks(face_landmarks, image)
if args.wear_type == "normal":
six_points_on_face, angle = get_six_points(face_landmarks, image)
elif args.wear_type == "chin_mask":
six_points_on_face, angle = get_six_points_chin(face_landmarks, image)
elif args.wear_type == "nose_mask":
six_points_on_face, angle = get_six_points_nose(face_landmarks, image)
elif args.wear_type == "eye_mask":
six_points_on_face, angle = get_six_points_eye(face_landmarks, image)
else:
raise ValueError(args.wear_type)
mask = []
if mask_type != "all":
if len(masked_images) > 0:
image = masked_images.pop(0)
image, mask_binary = mask_face(
image, face_location, six_points_on_face, angle, args, type=mask_type
)
# compress to face tight
face_height = face_location[2] - face_location[0]
face_width = face_location[1] - face_location[3]
masked_images.append(image)
mask_binary_array.append(mask_binary)
mask.append(mask_type)
else:
available_mask_types = get_available_mask_types()
for m in range(len(available_mask_types)):
if len(masked_images) == len(available_mask_types):
image = masked_images.pop(m)
img, mask_binary = mask_face(
image,
face_location,
six_points_on_face,
angle,
args,
type=available_mask_types[m],
)
masked_images.insert(m, img)
mask_binary_array.insert(m, mask_binary)
mask = available_mask_types
cc = 1
return masked_images, mask, mask_binary_array, original_image
def is_image(path):
try:
extensions = path[-4:]
image_extensions = ["png", "PNG", "jpg", "JPG"]
if extensions[1:] in image_extensions:
return True
else:
print("Please input image file. png / jpg")
return False
except:
return False
def get_available_mask_types(config_filename="masks/masks.cfg"):
parser = ConfigParser()
parser.optionxform = str
parser.read(config_filename)
available_mask_types = parser.sections()
available_mask_types = [
string for string in available_mask_types if "left" not in string
]
available_mask_types = [
string for string in available_mask_types if "right" not in string
]
return available_mask_types
def print_orderly(str, n):
# print("")
hyphens = "-" * int((n - len(str)) / 2)
str_p = hyphens + " " + str + " " + hyphens
hyphens_bar = "-" * len(str_p)
print(hyphens_bar)
print(str_p)
print(hyphens_bar)
def display_MaskTheFace():
with open("utils/display.txt", "r") as file:
for line in file:
cc = 1
print(line, end="")
| 33.328688 | 147 | 0.610712 |
2e762d8520c06786fc05f3c727d1aae434f8d89d | 71 | py | Python | integration/keeper_secrets_manager_ansible/tests/__init__.py | inna-btc/secrets-manager | 5c65fea092e80b25d2466b395fa03eabd6a98f9b | [
"MIT"
] | 9 | 2022-01-10T18:39:45.000Z | 2022-03-06T03:51:41.000Z | integration/keeper_secrets_manager_ansible/tests/__init__.py | inna-btc/secrets-manager | 5c65fea092e80b25d2466b395fa03eabd6a98f9b | [
"MIT"
] | 10 | 2022-01-27T00:51:05.000Z | 2022-03-30T08:42:01.000Z | integration/keeper_secrets_manager_ansible/tests/__init__.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 6 | 2021-12-17T18:59:26.000Z | 2022-03-28T16:47:28.000Z | # This needs to be here or pytest does like to find the test framework
| 35.5 | 70 | 0.774648 |
512d7b449fae9a57b2159a00897df95363f9111f | 5,749 | py | Python | nasbench/lib/model_spec.py | Naruu/nasbench | b94247037ee470418a3e56dcb83814e9be83f3a8 | [
"Apache-2.0"
] | 621 | 2018-12-21T18:59:46.000Z | 2022-03-26T04:33:15.000Z | nasbench/lib/model_spec.py | Naruu/nasbench | b94247037ee470418a3e56dcb83814e9be83f3a8 | [
"Apache-2.0"
] | 29 | 2019-03-02T19:36:00.000Z | 2022-01-13T00:37:58.000Z | nasbench/lib/model_spec.py | Naruu/nasbench | b94247037ee470418a3e56dcb83814e9be83f3a8 | [
"Apache-2.0"
] | 111 | 2018-12-21T19:32:13.000Z | 2022-03-31T01:47:22.000Z | # Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model specification for module connectivity individuals.
This module handles pruning the unused parts of the computation graph but should
avoid creating any TensorFlow models (this is done inside model_builder.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from nasbench.lib import graph_util
import numpy as np
# Graphviz is optional and only required for visualization.
try:
import graphviz # pylint: disable=g-import-not-at-top
except ImportError:
pass
class ModelSpec(object):
"""Model specification given adjacency matrix and labeling."""
def __init__(self, matrix, ops, data_format='channels_last'):
"""Initialize the module spec.
Args:
matrix: ndarray or nested list with shape [V, V] for the adjacency matrix.
ops: V-length list of labels for the base ops used. The first and last
elements are ignored because they are the input and output vertices
which have no operations. The elements are retained to keep consistent
indexing.
data_format: channels_last or channels_first.
Raises:
ValueError: invalid matrix or ops
"""
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix)
shape = np.shape(matrix)
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError('matrix must be square')
if shape[0] != len(ops):
raise ValueError('length of ops must match matrix dimensions')
if not is_upper_triangular(matrix):
raise ValueError('matrix must be upper triangular')
# Both the original and pruned matrices are deep copies of the matrix and
# ops so any changes to those after initialization are not recognized by the
# spec.
self.original_matrix = copy.deepcopy(matrix)
self.original_ops = copy.deepcopy(ops)
self.matrix = copy.deepcopy(matrix)
self.ops = copy.deepcopy(ops)
self.valid_spec = True
self._prune()
self.data_format = data_format
def _prune(self):
"""Prune the extraneous parts of the graph.
General procedure:
1) Remove parts of graph not connected to input.
2) Remove parts of graph not connected to output.
3) Reorder the vertices so that they are consecutive after steps 1 and 2.
These 3 steps can be combined by deleting the rows and columns of the
vertices that are not reachable from both the input and output (in reverse).
"""
num_vertices = np.shape(self.original_matrix)[0]
# DFS forward from input
visited_from_input = set([0])
frontier = [0]
while frontier:
top = frontier.pop()
for v in range(top + 1, num_vertices):
if self.original_matrix[top, v] and v not in visited_from_input:
visited_from_input.add(v)
frontier.append(v)
# DFS backward from output
visited_from_output = set([num_vertices - 1])
frontier = [num_vertices - 1]
while frontier:
top = frontier.pop()
for v in range(0, top):
if self.original_matrix[v, top] and v not in visited_from_output:
visited_from_output.add(v)
frontier.append(v)
# Any vertex that isn't connected to both input and output is extraneous to
# the computation graph.
extraneous = set(range(num_vertices)).difference(
visited_from_input.intersection(visited_from_output))
# If the non-extraneous graph is less than 2 vertices, the input is not
# connected to the output and the spec is invalid.
if len(extraneous) > num_vertices - 2:
self.matrix = None
self.ops = None
self.valid_spec = False
return
self.matrix = np.delete(self.matrix, list(extraneous), axis=0)
self.matrix = np.delete(self.matrix, list(extraneous), axis=1)
for index in sorted(extraneous, reverse=True):
del self.ops[index]
def hash_spec(self, canonical_ops):
"""Computes the isomorphism-invariant graph hash of this spec.
Args:
canonical_ops: list of operations in the canonical ordering which they
were assigned (i.e. the order provided in the config['available_ops']).
Returns:
MD5 hash of this spec which can be used to query the dataset.
"""
# Invert the operations back to integer label indices used in graph gen.
labeling = [-1] + [canonical_ops.index(op) for op in self.ops[1:-1]] + [-2]
return graph_util.hash_module(self.matrix, labeling)
def visualize(self):
"""Creates a dot graph. Can be visualized in colab directly."""
num_vertices = np.shape(self.matrix)[0]
g = graphviz.Digraph()
g.node(str(0), 'input')
for v in range(1, num_vertices - 1):
g.node(str(v), self.ops[v])
g.node(str(num_vertices - 1), 'output')
for src in range(num_vertices - 1):
for dst in range(src + 1, num_vertices):
if self.matrix[src, dst]:
g.edge(str(src), str(dst))
return g
def is_upper_triangular(matrix):
"""True if matrix is 0 on diagonal and below."""
for src in range(np.shape(matrix)[0]):
for dst in range(0, src + 1):
if matrix[src, dst] != 0:
return False
return True
| 34.42515 | 80 | 0.693512 |
e44247ed546240a29e1cfe54cf53d93aca171a13 | 1,309 | py | Python | poc/classes/ContextInstanceBlock.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 4 | 2021-11-08T10:09:46.000Z | 2021-11-13T22:25:46.000Z | poc/classes/ContextInstanceBlock.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 1 | 2020-09-04T13:02:09.000Z | 2021-06-16T07:07:44.000Z | poc/classes/ContextInstanceBlock.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 1 | 2021-11-08T10:10:12.000Z | 2021-11-08T10:10:12.000Z | """
This file was generated by the tool TatSuSAG (the TatSu syntax analyser generator)
Changes to this file may cause incorrect behavior and will be lost if the code is regenerated.
"""
from poc.classes.AuxISourceAnalyser import AuxISourceAnalyser
from poc.classes.AuxInterpretation import AuxInterpretation
from poc.classes.AuxRuleDependencies import AuxRuleDependencies
from poc.classes.AuxSTVarSpecList import AuxSTVarSpecList
class ContextInstanceBlock(AuxInterpretation):
def __init__(self, i: AuxISourceAnalyser):
super().__init__(i.ast_info, i.errors)
# specification list is optional in the grammar and we initialize it in any case
self.variable_spec = AuxSTVarSpecList()
self.aggregate_previous_rules(i.parse_list,
AuxRuleDependencies.dep["InstanceBlock"], self.rule_aggregator)
def rule_aggregator(self, rule: str, parsing_info: AuxInterpretation):
if rule == "LeftBrace":
self.stop_aggregation = True
elif rule == "VariableSpecificationList":
self.variable_spec = parsing_info.variable_spec # noqa
@staticmethod
def dispatch(i: AuxISourceAnalyser, parsing_info: AuxInterpretation):
new_info = ContextInstanceBlock(i)
i.parse_list.append(new_info)
| 39.666667 | 101 | 0.734912 |
7c6c1926d4a8383848446bf10e2488be460d90f6 | 2,482 | py | Python | api/desecapi/tests/test_throttling.py | unuseless/desec-stack | 399b64ea87775585b825ecdabcf5ea917269dc90 | [
"MIT"
] | null | null | null | api/desecapi/tests/test_throttling.py | unuseless/desec-stack | 399b64ea87775585b825ecdabcf5ea917269dc90 | [
"MIT"
] | null | null | null | api/desecapi/tests/test_throttling.py | unuseless/desec-stack | 399b64ea87775585b825ecdabcf5ea917269dc90 | [
"MIT"
] | null | null | null | from unittest import mock
import time
from django.core.cache import cache
from django.test import TestCase, override_settings
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.test import APIRequestFactory
def override_rates(rates):
return override_settings(REST_FRAMEWORK={'DEFAULT_THROTTLE_CLASSES': ['desecapi.throttling.ScopedRatesThrottle'],
'DEFAULT_THROTTLE_RATES': {'test_scope': rates}})
class MockView(APIView):
throttle_scope = 'test_scope'
@property
def throttle_classes(self):
# Need to import here so that the module is only loaded once the settings override is in effect
from desecapi.throttling import ScopedRatesThrottle
return (ScopedRatesThrottle,)
def get(self, request):
return Response('foo')
class ThrottlingTestCase(TestCase):
"""
Based on DRF's test_throttling.py.
"""
def setUp(self):
super().setUp()
self.factory = APIRequestFactory()
def _test_requests_are_throttled(self, rates, counts):
cache.clear()
request = self.factory.get('/')
with override_rates(rates):
view = MockView.as_view()
sum_delay = 0
for delay, count in counts:
sum_delay += delay
with mock.patch('desecapi.throttling.ScopedRatesThrottle.timer', return_value=time.time() + sum_delay):
for _ in range(count):
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
def test_requests_are_throttled_4sec(self):
self._test_requests_are_throttled(['4/sec'], [(0, 4), (1, 4)])
def test_requests_are_throttled_4min(self):
self._test_requests_are_throttled(['4/min'], [(0, 4)])
def test_requests_are_throttled_multiple(self):
self._test_requests_are_throttled(['5/s', '4/day'], [(0, 4)])
self._test_requests_are_throttled(['4/s', '5/day'], [(0, 4)])
def test_requests_are_throttled_multiple_cascade(self):
# We test that we can do 4 requests in the first second and only 2 in the second second
self._test_requests_are_throttled(['4/s', '6/day'], [(0, 4), (1, 2)])
| 37.044776 | 119 | 0.660355 |
f0642460a1e8663ed08e2f4d4cb3d6ca752b3924 | 1,653 | py | Python | lib/model/__init__.py | fretboardfreak/space | 76dd5ea96f2daf9a44cd041bd5adf315d1b8fd1c | [
"Apache-2.0"
] | 1 | 2015-04-13T00:24:01.000Z | 2015-04-13T00:24:01.000Z | lib/model/__init__.py | fretboardfreak/space | 76dd5ea96f2daf9a44cd041bd5adf315d1b8fd1c | [
"Apache-2.0"
] | 17 | 2015-04-13T00:57:16.000Z | 2015-04-13T05:00:34.000Z | lib/model/__init__.py | fretboardfreak/space | 76dd5ea96f2daf9a44cd041bd5adf315d1b8fd1c | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Space Object Model"""
# order imports by least dependent to most dependent
from .coord import Coord, SystemCoord
from .resources import (Resources, ALL_RESOURCES, ORE, METAL, THORIUM,
HYDROCARBON, DEUTERIUM, SUN, ELECTRICITY, TRADE_RATIO,
NotSufficientResourcesError)
from .building import (Mine, SolarPowerPlant, ALL_BUILDINGS, get_building,
get_all_building_names, get_all_building_abbr)
from .planet import Planet
from .system import System
from .galaxy import Galaxy
from .user import User
from .query import ModelQueryMixin
__all__ = [Coord, SystemCoord, ]
__all__.extend([Resources, ALL_RESOURCES, ORE, METAL, THORIUM,
HYDROCARBON, DEUTERIUM, SUN, ELECTRICITY,
TRADE_RATIO, NotSufficientResourcesError])
__all__.extend([Mine, SolarPowerPlant, ALL_BUILDINGS, get_building,
get_all_building_names, get_all_building_abbr])
__all__.append(Planet)
__all__.append(System)
__all__.append(Galaxy)
__all__.append(User)
__all__.append(ModelQueryMixin)
| 35.934783 | 78 | 0.739867 |
eace6f43e82e18048a51115f0e578e7ea97b155c | 771 | py | Python | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/install/cmd/shutdown/errors.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 9 | 2019-11-22T04:58:40.000Z | 2022-02-26T16:47:28.000Z | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/install/cmd/shutdown/errors.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | null | null | null | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/install/cmd/shutdown/errors.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 8 | 2017-09-27T10:31:18.000Z | 2022-01-08T10:30:46.000Z | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 1
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 2
ERR_SHUTDOWN_FAILED = mcl.status.framework.ERR_START + 3
ERR_INVALID_TYPE = mcl.status.framework.ERR_START + 4
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform',
ERR_SHUTDOWN_FAILED: 'Shutdown failed',
ERR_INVALID_TYPE: 'Invalid shutdown type'
} | 42.833333 | 67 | 0.77821 |
51c2ffa36cc5cd0dd18397dec77efc3f270a50f0 | 1,399 | py | Python | pyro/distributions/testing/naive_dirichlet.py | cnheider/pyro | 60bcab73ada30c2b3f05d525690c9664ff6fc22e | [
"MIT"
] | null | null | null | pyro/distributions/testing/naive_dirichlet.py | cnheider/pyro | 60bcab73ada30c2b3f05d525690c9664ff6fc22e | [
"MIT"
] | null | null | null | pyro/distributions/testing/naive_dirichlet.py | cnheider/pyro | 60bcab73ada30c2b3f05d525690c9664ff6fc22e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import torch
from pyro.distributions.torch import Beta, Dirichlet, Gamma
from pyro.distributions.util import copy_docs_from
@copy_docs_from(Dirichlet)
class NaiveDirichlet(Dirichlet):
"""
Implementation of ``Dirichlet`` via ``Gamma``.
This naive implementation has stochastic reparameterized gradients, which
have higher variance than PyTorch's ``Dirichlet`` implementation.
"""
def __init__(self, alpha):
super(NaiveDirichlet, self).__init__(alpha)
self._gamma = Gamma(alpha, torch.ones_like(alpha))
def rsample(self, sample_shape=torch.Size()):
gammas = self._gamma.rsample(sample_shape)
return gammas / gammas.sum(-1, True)
@copy_docs_from(Beta)
class NaiveBeta(Beta):
"""
Implementation of ``Beta`` via ``Gamma``.
This naive implementation has stochastic reparameterized gradients, which
have higher variance than PyTorch's ``Beta`` implementation.
"""
def __init__(self, alpha, beta):
super(NaiveBeta, self).__init__(alpha, beta)
alpha_beta = torch.stack([alpha, beta], -1)
self._gamma = Gamma(alpha_beta, torch.ones_like(alpha_beta))
def rsample(self, sample_shape=torch.Size()):
gammas = self._gamma.rsample(sample_shape)
probs = gammas / gammas.sum(-1, True)
return probs[..., 0]
| 32.534884 | 77 | 0.699071 |
c78a85346446b614c99d3a2fa9e167d792012318 | 5,118 | py | Python | networking_nec/nwa/l2/rpc/nwa_l2_server_callback.py | nec-openstack/networking-nec | 4a8410422b02c3df8eb4bcf9fe325354dc659185 | [
"Apache-2.0"
] | null | null | null | networking_nec/nwa/l2/rpc/nwa_l2_server_callback.py | nec-openstack/networking-nec | 4a8410422b02c3df8eb4bcf9fe325354dc659185 | [
"Apache-2.0"
] | null | null | null | networking_nec/nwa/l2/rpc/nwa_l2_server_callback.py | nec-openstack/networking-nec | 4a8410422b02c3df8eb4bcf9fe325354dc659185 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import api as db_api
from neutron.db import models_v2
from neutron import manager
from neutron.plugins.ml2 import db as db_ml2
from neutron.plugins.ml2 import models as models_ml2
from neutron_lib import constants
from oslo_log import helpers
from oslo_log import log as logging
import oslo_messaging
from sqlalchemy.orm import exc as sa_exc
from networking_nec._i18n import _LE
LOG = logging.getLogger(__name__)
class NwaL2ServerRpcCallback(object):
target = oslo_messaging.Target(version='1.0')
def get_nwa_network_by_port_id(self, rpc_context, **kwargs):
plugin = manager.NeutronManager.get_plugin()
port_id = kwargs.get('port_id')
port = plugin.get_port(rpc_context, port_id)
network = plugin.get_network(rpc_context, port['network_id'])
return network
def get_nwa_network_by_subnet_id(self, rpc_context, **kwargs):
plugin = manager.NeutronManager.get_plugin()
subnet_id = kwargs.get('subnet_id')
subnet = plugin.get_subnet(rpc_context, subnet_id)
network = plugin.get_network(rpc_context, subnet['network_id'])
return network
def get_nwa_network(self, rpc_context, **kwargs):
plugin = manager.NeutronManager.get_plugin()
net_id = kwargs.get('network_id')
network = plugin.get_network(rpc_context, net_id)
return network
def get_nwa_networks(self, rpc_context, **kwargs):
plugin = manager.NeutronManager.get_plugin()
networks = plugin.get_networks(rpc_context)
return networks
@helpers.log_method_call
def update_port_state_with_notifier(self, rpc_context, **kwargs):
port_id = kwargs.get('port_id')
network_id = kwargs.get('network_id')
network_type = kwargs.get('network_type')
segmentation_id = kwargs.get('segmentation_id')
physical_network = kwargs.get('physical_network')
# 1 update segment
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = (session.query(models_ml2.NetworkSegment).
filter_by(network_id=network_id))
query = query.filter_by(physical_network=physical_network)
query = query.filter_by(is_dynamic=True)
record = query.one()
record.segmentation_id = segmentation_id
except sa_exc.NoResultFound:
pass
# 2 change port state
plugin = manager.NeutronManager.get_plugin()
plugin.update_port_status(
rpc_context,
port_id,
constants.PORT_STATUS_ACTIVE
)
# 3 serch db from port_id
session = db_api.get_session()
port = None
with session.begin(subtransactions=True):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
port = plugin._make_port_dict(port_db)
except sa_exc.NoResultFound:
LOG.error(_LE("Can't find port with port_id %s"),
port_id)
except sa_exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
# 4 send notifier
if port is not None:
LOG.debug("notifier port_update %(net_type)s, %(seg_id)s, "
"%(physnet)s",
{'net_type': network_type,
'seg_id': segmentation_id,
'physnet': physical_network})
plugin.notifier.port_update(
rpc_context, port,
network_type,
segmentation_id,
physical_network
)
return {}
def release_dynamic_segment_from_agent(self, context, **kwargs):
network_id = kwargs.get('network_id')
physical_network = kwargs.get('physical_network')
session = db_api.get_session()
del_segment = db_ml2.get_dynamic_segment(
session, network_id, physical_network=physical_network,
)
if del_segment:
LOG.debug("release_dynamic_segment segment_id=%s",
del_segment['id'])
db_ml2.delete_network_segment(session, del_segment['id'])
| 36.820144 | 78 | 0.631301 |
91492c8c308eeabb0b278dd0e0b5ad51f87494db | 546 | py | Python | tools/scitools/conf/understand/python/python3/strop.py | brucegua/moocos | 575c161cfa35e220f10d042e2e5ca18773691695 | [
"Apache-2.0"
] | 1 | 2020-01-20T21:26:46.000Z | 2020-01-20T21:26:46.000Z | tools/scitools/conf/understand/python/python3/strop.py | brucegua/moocos | 575c161cfa35e220f10d042e2e5ca18773691695 | [
"Apache-2.0"
] | null | null | null | tools/scitools/conf/understand/python/python3/strop.py | brucegua/moocos | 575c161cfa35e220f10d042e2e5ca18773691695 | [
"Apache-2.0"
] | null | null | null | def atof(): pass
def atoi(): pass
def atol(): pass
def capitalize(): pass
def count(): pass
def expandtabs(): pass
def find(): pass
def join(): pass
def joinfields(): pass
def lower(): pass
def lstrip(): pass
def maketrans(): pass
def replace(): pass
def rfind(): pass
def rstrip(): pass
def split(): pass
def splitfields(): pass
def strip(): pass
def swapcase(): pass
def translate(): pass
def upper(): pass
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
whitespace = '\t\n\x0b\x0c\r '
| 21.84 | 41 | 0.681319 |
5d049c537c7af46841c4ee070a1d8dcd9c0019f5 | 8,867 | py | Python | generalized_lloyd_quantization/null_uniform.py | spencerkent/generalized-lloyd-quantization | 6d27b1b1a16a128104f224b06ee8361f6ed600d9 | [
"BSD-3-Clause"
] | 20 | 2018-11-25T07:12:43.000Z | 2022-02-14T23:40:23.000Z | generalized_lloyd_quantization/null_uniform.py | spencerkent/generalized-lloyd-quantization | 6d27b1b1a16a128104f224b06ee8361f6ed600d9 | [
"BSD-3-Clause"
] | null | null | null | generalized_lloyd_quantization/null_uniform.py | spencerkent/generalized-lloyd-quantization | 6d27b1b1a16a128104f224b06ee8361f6ed600d9 | [
"BSD-3-Clause"
] | 5 | 2018-11-25T07:12:48.000Z | 2021-08-01T11:27:58.000Z | """
Most basic null-model alternative to Lloyd quantization, uniformly spaced bins.
We can place assignment points uniformly in an n-dimensional space and then
apply the quantizations based on nearest neighbor assignments. The one choice
that remains to be specified is the precise offset or 'phase' of the assignment
points. One thing that makes some sense is to place an assignment point directly
on the mode, median, or mean of the distribution, which we give the option of
in the function below
"""
from itertools import product as cartesian_product
import numpy as np
from scipy.spatial.distance import cdist as scipy_distance
import hdmedians
def compute_quantization(samples, binwidth, placement_scheme='on_mode'):
"""
Calculates the assignment points for uniformly-spaced quantization bins
The problem we need to solve is: given that we have bins with uniform spacing
(an therefore fixed width), how should they be aligned? Should we place an
assignment point directly on the mean of the distribution? On the mode of
the distribution? On the median? On the value zero? This function calculates
the assignment points based on one of these choices.
Parameters
----------
samples : ndarray (d, n) or (d,)
This is an array of d samples of an n-dimensional random variable
that we wish to find the uniform quantizer for. If these are scalar random
variables, we will accept a 1d array as input.
binwidth : ndarray (n, ) or float
The width of the quantization bins in each dimension. If the input is
multivariate (samples.ndim = 2), then we must specify a binwidth for each
of the dimensions.
placement_scheme : str, optional
Determines where we place one of the assignment points. It can be one of
{'on_mode', 'on_median', 'on_mean', 'on_zero'}.
'on_mode': estimating the distribution from a histogram, take the mode
of this estimate and place a point directly on this value.
'on_median': place a point directly on the median of these values.
'on_mean': place a point directly on the mean of these values.
'on_zero': place a point directly on the value 0.0.
Default 'on_mode'.
Returns
-------
assignment_pts : ndarray (m, n) or (m,)
The converged assignment points
cluster_assignments : ndarray (d, )
For each sample, the index of the codeword to which uniform quantization
assigns this datapoint. We can compute the actual quantized values outside
this function by invoking `assignment_pts[cluster_assignments]`
MSE : float
The mean squared error (the mean l2-normed-squared to be precise) for the
returned quantization.
shannon_entropy : float
The (empirical) Shannon entropy for this code. We can say that assuming
we use a lossless binary source code, that our expected codeword length
is precisely this value.
"""
if samples.ndim == 2:
assert type(binwidth) == np.ndarray
assert len(binwidth) == samples.shape[1]
if placement_scheme == 'on_mode':
assert samples.shape[0] > 1000, (
'Cannot accurately estimate the mode of the ' +
'distribution with so few samples. Try another placement scheme')
if placement_scheme == 'on_mode':
if samples.ndim == 1:
# numpy's histogramdd() is slow on 1d samples for some reason so we
# use good old-fashioned histogram()
counts, hist_bin_edges = np.histogram(samples, 100)
hist_bin_centers = (hist_bin_edges[:-1] + hist_bin_edges[1:]) / 2
largest_count = np.argmax(counts)
anchored_pt = hist_bin_centers[largest_count] # the mode of the dist
else:
counts, hist_bin_edges = np.histogramdd(samples, 100)
hist_bin_centers = [(hist_bin_edges[x][:-1] + hist_bin_edges[x][1:]) / 2
for x in range(len(hist_bin_edges))]
largest_count = np.unravel_index(np.argmax(counts), counts.shape)
anchored_pt = np.array(
[hist_bin_centers[coord_idx][largest_count[coord_idx]]
for coord_idx in range(counts.ndim)])
#^ the mode of the dist, in n dimensions
elif placement_scheme == 'on_median':
if samples.ndim == 1:
anchored_pt = np.median(samples)
else:
# the geometric median is a high-dimensional generalization of the median.
# It minimizes the sum of distances, NOT the sum of squared distances,
# which makes it *different from the multvariate mean, or centroid*. You
# can verify this for yourself on synthetic data.
anchored_pt = np.array(hdmedians.geomedian(samples, axis=0))
elif placement_scheme == 'on_mean':
anchored_pt = np.mean(samples, axis=0)
elif placement_scheme == 'on_zero':
if samples.ndim == 1:
anchored_pt = 0.0
else:
anchored_pt = np.zeros((samples.shape[1], ))
else:
raise KeyError('Unrecognized placement scheme ' + placement_scheme)
max_val_each_dim = np.max(samples, axis=0)
min_val_each_dim = np.min(samples, axis=0)
assert np.all(anchored_pt < max_val_each_dim)
assert np.all(anchored_pt >= min_val_each_dim)
num_pts_lower = np.floor((anchored_pt - min_val_each_dim) / binwidth)
num_pts_higher = np.floor((max_val_each_dim - anchored_pt) / binwidth)
num_a_pts_each_dim = num_pts_lower + num_pts_higher + 1
if samples.ndim == 1:
assignment_pts = np.linspace(anchored_pt - num_pts_lower * binwidth,
anchored_pt + num_pts_higher * binwidth,
num_a_pts_each_dim)
if placement_scheme == 'on_zero':
# for some reason there seams to be a numerical issue with linspace
# keeping the anchored point exactly on zero - it can drift to like 1e-14.
# since we clearly want the point exactly on zero I'm going to correct
# this before we return
assignment_pts[np.argmin(np.abs(assignment_pts))] = 0.0
else:
# careful, this can get huge in high dimensions.
assignment_pts = np.array(list(cartesian_product(
*[np.linspace(anchored_pt[x] - num_pts_lower[x] * binwidth[x],
anchored_pt[x] + num_pts_higher[x] * binwidth[x],
num_a_pts_each_dim[x]) for x in range(samples.shape[1])])))
if placement_scheme == 'on_zero':
# See above for r.e. this correction
assignment_pts[np.argmin(np.linalg.norm(assignment_pts, axis=1))] = \
np.zeros((samples.shape[1], ))
quantized_code, cluster_assignments = quantize(samples, assignment_pts, True)
if samples.ndim == 1:
MSE = np.mean(np.square(quantized_code - samples))
else:
MSE = np.mean(np.sum(np.square(quantized_code - samples), axis=1))
cword_probs = calculate_assignment_probabilites(cluster_assignments,
assignment_pts.shape[0])
assert np.isclose(np.sum(cword_probs), 1.0)
nonzero_prob_pts = np.where(cword_probs != 0) # avoid log2(0)
shannon_entropy = -1 * np.sum(
cword_probs[nonzero_prob_pts] * np.log2(cword_probs[nonzero_prob_pts]))
return assignment_pts, cluster_assignments, MSE, shannon_entropy
def quantize(raw_vals, assignment_vals, return_cluster_assignments=False):
if raw_vals.ndim == 1:
if len(assignment_vals) == 1:
# everything gets assigned to this point
c_assignments = np.zeros((len(raw_vals),), dtype='int')
else:
bin_edges = (assignment_vals[:-1] + assignment_vals[1:]) / 2
c_assignments = np.digitize(raw_vals, bin_edges)
#^ This is more efficient than our vector quantization because here we use
# sorted bin edges and the assignment complexity is (I believe)
# logarithmic in the number of intervals.
else:
c_assignments = np.argmin(scipy_distance(raw_vals, assignment_vals,
metric='euclidean'), axis=1)
#^ This is just a BRUTE FORCE nearest neighbor search. I tried to find a
# fast implementation of this based on KD-trees or Ball Trees, but wasn't
# successful. I also tried scipy's vq method from the clustering
# module but it's also just doing brute force search (albeit in C).
# This approach might have decent performance when the number of
# assignment points is small (low fidelity, very loss regime). In the
# future we should be able to roll a much faster search implementation and
# speed up this part of the algorithm...
if return_cluster_assignments:
return assignment_vals[c_assignments], c_assignments
else:
return assignment_vals[c_assignments]
def calculate_assignment_probabilites(assignments, num_clusters):
temp = np.arange(num_clusters)
hist_b_edges = np.hstack([-np.inf, (temp[:-1] + temp[1:]) / 2, np.inf])
assignment_counts, _ = np.histogram(assignments, hist_b_edges)
empirical_density = assignment_counts / np.sum(assignment_counts)
return empirical_density
| 47.92973 | 80 | 0.700688 |
d617b1b70031eb6c50b040554b584e5619d66304 | 560 | py | Python | tests/test_plot.py | pmacosta/putil | 416cea52df8221981727e25d133e9b4e3f464798 | [
"MIT"
] | 6 | 2015-12-15T04:09:08.000Z | 2020-02-21T01:40:57.000Z | tests/test_plot.py | pmacosta/putil | 416cea52df8221981727e25d133e9b4e3f464798 | [
"MIT"
] | null | null | null | tests/test_plot.py | pmacosta/putil | 416cea52df8221981727e25d133e9b4e3f464798 | [
"MIT"
] | 2 | 2016-01-21T23:29:17.000Z | 2020-02-21T01:41:05.000Z | # test_plot.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,W0611
# Putil imports
from tests.plot.ccontracts import TestContracts
from tests.plot.basic_source import TestBasicSource
from tests.plot.csv_source import TestCsvSource
from tests.plot.series import TestSeries
from tests.plot.panel import TestPanel
from tests.plot.figure import TestFigure
from tests.plot.functions import TestDataSource, TestParameterizedColorSpace
from tests.plot.fixtures import default_panel, default_series, default_source
| 37.333333 | 77 | 0.844643 |
956eed64ff312be30605fbb486c8883a2a55e7fe | 5,115 | py | Python | alipay/aop/api/domain/SearchOrderDetailDataBrandItems.py | alipay/alipay-sdk-python-all | 1b63620431d982d30d39ee0adc4b92463cbcee3c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/SearchOrderDetailDataBrandItems.py | alipay/alipay-sdk-python-all | 1b63620431d982d30d39ee0adc4b92463cbcee3c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/SearchOrderDetailDataBrandItems.py | alipay/alipay-sdk-python-all | 1b63620431d982d30d39ee0adc4b92463cbcee3c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SearchOrderBrandDetail import SearchOrderBrandDetail
class SearchOrderDetailDataBrandItems(object):
def __init__(self):
self._biz_id = None
self._box_status = None
self._brand_box_keywords = None
self._brand_template_id = None
self._channel = None
self._data = None
self._merchant_type = None
self._template_id = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def box_status(self):
return self._box_status
@box_status.setter
def box_status(self, value):
self._box_status = value
@property
def brand_box_keywords(self):
return self._brand_box_keywords
@brand_box_keywords.setter
def brand_box_keywords(self, value):
self._brand_box_keywords = value
@property
def brand_template_id(self):
return self._brand_template_id
@brand_template_id.setter
def brand_template_id(self, value):
self._brand_template_id = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, list):
self._data = list()
for i in value:
if isinstance(i, SearchOrderBrandDetail):
self._data.append(i)
else:
self._data.append(SearchOrderBrandDetail.from_alipay_dict(i))
@property
def merchant_type(self):
return self._merchant_type
@merchant_type.setter
def merchant_type(self, value):
self._merchant_type = value
@property
def template_id(self):
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.box_status:
if hasattr(self.box_status, 'to_alipay_dict'):
params['box_status'] = self.box_status.to_alipay_dict()
else:
params['box_status'] = self.box_status
if self.brand_box_keywords:
if hasattr(self.brand_box_keywords, 'to_alipay_dict'):
params['brand_box_keywords'] = self.brand_box_keywords.to_alipay_dict()
else:
params['brand_box_keywords'] = self.brand_box_keywords
if self.brand_template_id:
if hasattr(self.brand_template_id, 'to_alipay_dict'):
params['brand_template_id'] = self.brand_template_id.to_alipay_dict()
else:
params['brand_template_id'] = self.brand_template_id
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.data:
if isinstance(self.data, list):
for i in range(0, len(self.data)):
element = self.data[i]
if hasattr(element, 'to_alipay_dict'):
self.data[i] = element.to_alipay_dict()
if hasattr(self.data, 'to_alipay_dict'):
params['data'] = self.data.to_alipay_dict()
else:
params['data'] = self.data
if self.merchant_type:
if hasattr(self.merchant_type, 'to_alipay_dict'):
params['merchant_type'] = self.merchant_type.to_alipay_dict()
else:
params['merchant_type'] = self.merchant_type
if self.template_id:
if hasattr(self.template_id, 'to_alipay_dict'):
params['template_id'] = self.template_id.to_alipay_dict()
else:
params['template_id'] = self.template_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SearchOrderDetailDataBrandItems()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'box_status' in d:
o.box_status = d['box_status']
if 'brand_box_keywords' in d:
o.brand_box_keywords = d['brand_box_keywords']
if 'brand_template_id' in d:
o.brand_template_id = d['brand_template_id']
if 'channel' in d:
o.channel = d['channel']
if 'data' in d:
o.data = d['data']
if 'merchant_type' in d:
o.merchant_type = d['merchant_type']
if 'template_id' in d:
o.template_id = d['template_id']
return o
| 32.373418 | 87 | 0.590225 |
776ff0d708c1c6215155d41a73acf82085b6bd91 | 806 | py | Python | manage.py | newnone/Jotto-Web | 52d5d0bfca0a102567bbd4c6d5d3077e21eac4c2 | [
"MIT"
] | null | null | null | manage.py | newnone/Jotto-Web | 52d5d0bfca0a102567bbd4c6d5d3077e21eac4c2 | [
"MIT"
] | null | null | null | manage.py | newnone/Jotto-Web | 52d5d0bfca0a102567bbd4c6d5d3077e21eac4c2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "JottoWeb.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.043478 | 77 | 0.64268 |
8d373d5184777a819cdca64943ef81521e6eec3d | 2,878 | py | Python | main.py | rohinidas0709/bandit.rl | 2f5fcb86fd9ea7cfaa50e7629d91abf614d6bb52 | [
"MIT"
] | null | null | null | main.py | rohinidas0709/bandit.rl | 2f5fcb86fd9ea7cfaa50e7629d91abf614d6bb52 | [
"MIT"
] | null | null | null | main.py | rohinidas0709/bandit.rl | 2f5fcb86fd9ea7cfaa50e7629d91abf614d6bb52 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
from agent import RandomAgent, GreedyAgent, EpsilonGreedyAgent
from bandit import Bandit
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
NUM_ARMS = 10
NUM_GAMES = 2000
NUM_STEPS = 1000
def simulate_random_agent(games):
data = {
"actions": [],
"rewards": np.zeros(NUM_STEPS)
}
for g in tqdm(range(NUM_GAMES), desc="Random Agent"):
agent = RandomAgent(NUM_ARMS, NUM_STEPS)
game = games[g]
actions, rewards = agent.play(game)
data["actions"].extend(actions)
data["rewards"] += rewards
# Convert sum to average reward per step.
data["rewards"] /= NUM_GAMES
return data
def simulate_greedy_agent(games):
data = {
"actions": [],
"rewards": np.zeros(NUM_STEPS)
}
for g in tqdm(range(NUM_GAMES), desc="Greedy Agent"):
agent = GreedyAgent(NUM_ARMS, NUM_STEPS)
game = games[g]
actions, rewards = agent.play(game)
data["actions"].extend(actions)
data["rewards"] += rewards
# Convert sum to average reward per step.
data["rewards"] /= NUM_GAMES
return data
def simulate_epsilon_greedy_agent(games, epsilon):
data = {
"actions": [],
"rewards": np.zeros(NUM_STEPS)
}
for g in tqdm(range(NUM_GAMES), desc=f"Epsilon Greedy Agent ({epsilon})"):
agent = EpsilonGreedyAgent(epsilon, NUM_ARMS, NUM_STEPS)
game = games[g]
actions, rewards = agent.play(game)
data["actions"].extend(actions)
data["rewards"] += rewards
# Convert sum to average reward per step.
data["rewards"] /= NUM_GAMES
return data
if __name__ == "__main__":
games = [Bandit(NUM_ARMS) for _ in range(NUM_GAMES)]
reward_distribution = [[] for _ in range(NUM_ARMS)]
for game in games:
rewards = game.get_rewards()
for i in range(NUM_ARMS):
reward_distribution[i].append(rewards[i])
plt.violinplot(reward_distribution, range(NUM_ARMS), showmeans=True)
plt.xticks(range(NUM_ARMS))
plt.xlabel("Actions")
plt.ylabel("Reward Distribution")
plt.show()
r_data = simulate_random_agent(games)
g_data = simulate_greedy_agent(games)
e_data_1 = simulate_epsilon_greedy_agent(games, 0.1)
e_data_2 = simulate_epsilon_greedy_agent(games, 0.01)
timesteps = range(NUM_STEPS)
plt.plot(timesteps, r_data["rewards"], color="black", linewidth=0.5)
plt.plot(timesteps, g_data["rewards"], color="green", linewidth=0.5)
plt.plot(timesteps, e_data_1["rewards"], color="blue", linewidth=0.5)
plt.plot(timesteps, e_data_2["rewards"], color="red", linewidth=0.5)
plt.ylim(bottom=0)
plt.legend(["Random", "Greedy", "Epsilon Greedy (0.1)", "Epsilon Greedy (0.01)"])
plt.xlabel("Timesteps")
plt.ylabel("Average Reward")
plt.show()
| 27.150943 | 85 | 0.648019 |
01803470c264f953770dddc11bc29240e3541b78 | 37,464 | py | Python | bot/cogs/Fun.py | ConchDev/Conchbot-Rewrite | 4c7593287d95e0146aaca549d727654720034c2f | [
"MIT"
] | 6 | 2021-05-15T02:20:27.000Z | 2021-06-21T18:14:40.000Z | bot/cogs/Fun.py | ConchDev/Conchbot-Rewrite | 4c7593287d95e0146aaca549d727654720034c2f | [
"MIT"
] | 3 | 2021-05-14T01:28:32.000Z | 2021-11-16T01:25:22.000Z | bot/cogs/Fun.py | ConchDev/Conchbot-Rewrite | 4c7593287d95e0146aaca549d727654720034c2f | [
"MIT"
] | 7 | 2021-05-14T02:26:01.000Z | 2021-12-20T22:58:25.000Z | import json
import urllib.request
from aiohttp_requests import requests
from aiohttp import request
import random
import os
import dbl
import aiohttp
import asyncpraw
import discord
import DiscordUtils
from discord.ext.commands.cooldowns import BucketType
import httpx
from discord.ext import commands
from dotenv import load_dotenv
from prsaw import RandomStuff
import os
import urllib
import aiosqlite
from bot.cogs.utils.embed import Embeds
import datetime
load_dotenv('.env')
reddit = asyncpraw.Reddit(client_id = os.getenv("redditid"),
client_secret = os.getenv("redditsecret"),
username = "UnsoughtConch",
password = os.getenv('redditpassword'),
user_agent = "ConchBotPraw")
rs = RandomStuff(async_mode=True, api_key = os.getenv("aiapikey"))
dbltoken = os.getenv('DBLTOKEN')
class Fun(commands.Cog):
'''
The fun category is where most fun commands are. ConchBot is all about its fun commands, so most commands will be here.
'''
def __init__(self, client):
self.client = client
self.dbl = dbl.DBLClient(self.client, dbltoken)
self.delete_snipes = dict()
self.edit_snipes = dict()
self.delete_snipes_attachments = dict()
self.time = datetime.datetime.utcnow().strftime('%Y:%m:%d %H:%M:%S')
async def category_convert(self, category):
cat = category.lower()
categories = ['education', 'diy', 'recreational', 'social', 'charity', 'cooking', 'relaxation', 'music', 'busywork']
alias1 = ['edu', '', 'recreation', '', '', 'baking', 'relax', '', 'work']
alias2 = ['educational', 'rec', '', '', '', 'relaxational', '', '']
if cat in categories:
return cat
elif cat in alias1:
index = alias1.index(cat)
return categories[index]
elif cat in alias2:
index = alias2.index(cat)
return categories[index]
else:
return False
async def create_gofile_folder(self, user_id):
db = await aiosqlite.connect('./bot/db/config.db')
cursor = await db.cursor()
await cursor.execute(f"SELECT user_id FROM gofile WHERE user_id = {user_id}")
result = await cursor.fetchone()
if result is not None:
await cursor.close()
await db.close()
return True
else:
await cursor.execute(f"INSERT INTO gofile (user_id) VALUES ({user_id})")
await db.commit()
await cursor.close()
await db.close()
folderid = os.getenv("GOFILE_FOLDER_ID")
token = os.getenv("GOFILE_TOKEN")
async with aiohttp.ClientSession() as session:
async with session.put("https://api.gofile.io/createFolder", data=f'parentFolderId={str(folderid)}&token={str(token)}&folderName={str(user_id)}') as resp:
data = json.loads(await resp.read())
status = data['status']
dat = str(data['data']) + "ee"
if str(status) == "ok":
print(dat)
print(str(data))
return True
else:
print(status)
return False
@commands.Cog.listener()
async def on_message_delete(self, message):
self.delete_snipes[message.channel] = message
self.delete_snipes_attachments[message.channel] = message.attachments
@commands.Cog.listener()
async def on_message_edit(self, before, after):
self.edit_snipes[after.channel] = (before, after)
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
if message.channel.name == "conchchat":
try:
flag = False
votes = await self.dbl.get_bot_upvotes()
for item in votes:
if int(item['id']) == int(message.author.id):
flag = True
break
if flag is True:
await message.channel.trigger_typing()
aimsg = rs.get_ai_response(message.content)
message = aimsg["message"]
await message.reply(message)
else:
await message.channel.trigger_typing()
aimsg = rs.get_ai_response(message.content)
message = aimsg["message"]
await message.reply(f"{message}\n\n*Consider voting for me on Top.gg! (<https://bit.ly/2PiLbwh>) "
"It only takes a second of your time and you won't see this message anymore!*")
except AttributeError:
await message.channel.trigger_typing()
aimsg = await rs.get_ai_response(message.content)
message = aimsg
await message.reply(message)
except httpx.ReadTimeout:
await message.channel.send("It seems my API has timed out. Please give me a few minutes, and if the problem "
"continues, please contact UnsoughtConch via my `cb support` command.")
else:
pass
try:
if message.guild.id == 724050498847506433:
if "retard" in message.content.lower():
await message.add_reaction("☹")
except:
pass
if message.content == f"<@!{self.client.user.id}>":
await message.channel.send("My prefix is `cb `")
@commands.command(aliases=["chatbot"], description="Set up an AI chat channel in your server!")
@commands.has_permissions(manage_guild=True)
async def ai(self, ctx):
await ctx.send("You can set up a chatbot channel by naming any channel 'conchchat,' or I can do it for you! "
"would you like me to do it for you? `Yes` or `no`.")
msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=30)
if "yes" in msg.content.lower():
await ctx.send("What category would you like this channel in? Channel categories ***must be the exact "
"name, capitalization and all.***")
msg0 = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=30)
category = discord.utils.get(ctx.guild.categories, name=msg0.content)
try:
channel = await ctx.guild.create_text_channel('conchchat', category=category)
except:
await ctx.send("I'm sorry, but I do not have the `manage guild` requirement needed to create channels.")
return
await ctx.send(f"Done! The channel `conchchat` was created in the category `{msg0.content}`")
elif "no" in msg.content.lower():
await ctx.send("Okay. Cancelling...")
else:
await ctx.send("That's not a valid option.")
@commands.command(description="Shorten a link!")
@commands.cooldown(1, 5, BucketType.user)
async def shorten(self, ctx, *, url):
o = urllib.parse.quote(url, safe='/ :')
async with aiohttp.ClientSession() as session:
async with session.post('https://tinyuid.com/api/v1/shorten', json={'url':o}) as resp:
e = await resp.json()
return await ctx.send(f"<{e['result_url']}>")
@commands.command(description="Get a meme from (almost) any Reddit subreddit!")
@commands.cooldown(1, 10, commands.BucketType.user)
async def reddit(self, ctx, subreddit):
message = await ctx.send("This may take a hot minute... Sit tight!")
try:
subreddit = await reddit.subreddit(subreddit)
top = subreddit.top(limit=50)
all_subs = []
async for submission in top:
all_subs.append(submission)
ransub = random.choice(all_subs)
if ransub.over_18:
if ctx.channel.is_nsfw() == True:
pass
else:
await ctx.send("Looks like that post is marked over 18, meaning you need to be in an NSFW marked"
" channel to look at that post.")
return
if ransub.is_self:
embed = discord.Embed(title=f"{ransub.author}'s Post", colour=ctx.author.colour)
embed.add_field(name=ransub.title, value=ransub.selftext)
embed.set_footer(text=f"❤ {ransub.ups} | 💬 {ransub.num_comments}")
else:
embed = discord.Embed(title=ransub.title, colour=ctx.author.colour, url=ransub.url)
embed.set_footer(text=f"Posted by {ransub.author} on Reddit. | ❤ {ransub.ups} | 💬 {ransub.num_comments}")
embed.set_image(url=ransub.url)
await message.delete()
await ctx.send(embed=embed)
except:
await ctx.send("Something went wrong. This may be the fact that the subreddit does not exist or is locked.")
@commands.command(description="It's This for That is a fun API and website! It gives startup ideas.")
@commands.cooldown(1, 10, commands.BucketType.user)
async def itft(self, ctx):
async with aiohttp.ClientSession() as session:
async with session.get('http://itsthisforthat.com/api.php?json') as thing:
try:
load = await thing.read()
jdata = json.loads(load)
embed = discord.Embed(title="Wait, what does your startup do?", colour=ctx.author.colour)
embed.add_field(name="So, basically, it's like a", value=f"**{jdata['this']}**", inline=False)
embed.add_field(name="For", value=f"**{jdata['that']}**", inline=False)
embed.set_footer(text="ItsThisForThat API | itsthisforthat.com")
await ctx.send(embed=embed)
except:
await ctx.send("Woops! Something went wrong.")
@commands.group(invoke_without_command=True, description="Surf the FBI watchlist!")
async def fbi(self, ctx):
await ctx.send("What page?")
msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=10)
page = int(msg.content)
async with aiohttp.ClientSession() as session:
async with session.get("https://api.fbi.gov/wanted/v1/list", params={'page': page}) as response:
data = json.loads(await response.read())
embeds = []
try:
for item in data["items"]:
embed = discord.Embed(title=f"FBI Wanted | {item['title']}")
embed.add_field(name="Details:", value=item['details'])
embed.add_field(name="Warning Message:", value=item['warning_message'])
embed.add_field(name="Reward:", value=item['reward_text'])
embed.add_field(name="UID:", value=item['uid'])
embed.set_footer(text="Data from FBI API | For more info on an entry, use 'cb fbi details {UID}'")
embeds.append(embed)
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, remove_reactions=True)
paginator.add_reaction('⏪', "back")
paginator.add_reaction('⏩', "next")
await paginator.run(embeds)
except IndexError:
return await ctx.send("Page not available or the number you inputed is doesn't exist")
@fbi.command(description="View the specific details of a person on the FBI watchlist via a UID!\n[value] value is optional.")
@commands.cooldown(1, 10, commands.BucketType.user)
async def details(self, ctx, uid, value=None):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://api.fbi.gov/@wanted-person/{uid}") as response:
data = json.loads(await response.read())
details = data["details"]
title = data["title"]
desc = data["description"]
reward = data["reward_text"]
warnmsg = data["warning_message"]
sex = data["sex"]
if value is None:
pass
else:
embed = discord.Embed(title=f"FBI Wanted | {title}", colour=discord.Colour.red(),
description=f"Published on {data['publication']}", url=data['url'])
try:
embed.add_field(name=value, value=data[value])
embed.set_footer(text="Data from FBI API | https://api.fbi.gov.docs")
await ctx.send(embed=embed)
return
except:
await ctx.send("That's an invalid value. Use 'cb help fbi' for more details.")
return
return
embed = discord.Embed(title=f"FBI Wanted | {title}", colour=discord.Colour.red(),
description=f"Published on {data['publication']}", url=data["url"])
if details is not None:
embed.add_field(name="Details:",value=details, inline=False)
else:
pass
if desc is not None:
embed.add_field(name="Description", value=desc)
else:
pass
if warnmsg is not None:
embed.add_field(name="Warning Message:", value=warnmsg, inline=False)
else:
pass
if reward is not None:
embed.add_field(name="Reward:", value=reward)
else:
pass
if sex is not None:
embed.add_field(name="Sex:", value=sex, inline=False)
else:
pass
embed.set_thumbnail(url="https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Seal_of_the_Federal_Bureau_of_Investigation.svg/300px-Seal_of_the_Federal_Bureau_of_Investigation.svg.png")
try:
embed.set_image(url = data["images"][0]["large"])
except:
pass
await ctx.send(embed=embed)
@commands.command(description="View COVID statistics for any country!")
@commands.cooldown(1, 10, commands.BucketType.user)
async def covid(self, ctx, country):
async with aiohttp.ClientSession() as session:
async with session.get("https://covid-api.mmediagroup.fr/v1/cases") as response:
data = json.loads(await response.read())
try:
embed = discord.Embed(title=f"COVID-19 in {country}", colour=discord.Colour.gold(),)
embed.add_field(name="Cases:", value=data[country]['All']['confirmed'])
embed.add_field(name="Recovered Cases:", value=data[country]['All']['recovered'])
embed.add_field(name="Deaths:", value=data[country]['All']['deaths'])
embed.add_field(name="Country Population:", value=data[country]['All']['population'])
embed.add_field(name="Life Expectancy:", value=data[country]['All']['life_expectancy'])
embed.set_footer(text="Stats brought to you by M-Media-Group's COVID-19 API")
await ctx.send(embed=embed)
except:
await ctx.send("Country not found. Country names ***are case-sensitive***.")
@commands.command(description="Get a joke from the r/jokes subreddit!")
@commands.cooldown(1, 10, commands.BucketType.user)
async def joke(self, ctx):
msg = await ctx.send("Grabbing your joke...")
subreddit = await reddit.subreddit("jokes")
top = subreddit.top(limit=50)
all_subs = []
async for submission in top:
all_subs.append(submission)
ransub = random.choice(all_subs)
embed = discord.Embed(name=f"{ransub.author}'s Joke", colour=ctx.author.colour)
embed.add_field(name=ransub.title, value=ransub.selftext)
embed.set_footer(text=f"❤ {ransub.ups} | 💬 {ransub.num_comments}")
await msg.delete()
await ctx.send(embed=embed)
@commands.command(aliases=['repeat'], description="Make the bot repeat a word or phrase.")
@commands.cooldown(1, 3, commands.BucketType.user)
async def echo(self, ctx, channel:discord.TextChannel, *, msg):
if channel is None:
await ctx.send(msg)
else:
await channel.send(msg)
@commands.command(name='8ball', description="Ask the 8-ball a question and receive an answer!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def _8ball(self, ctx, *, msg):
responses = ['As I see it, yes.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate and ask again.',
'Don’t count on it.',
'It is certain.',
'It is decidedly so.',
'Most likely.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Outlook good.',
'Reply hazy, try again.',
'Signs point to yes.',
'Very doubtful.',
'Without a doubt.',
'Yes.',
'Yes – definitely.',
'You may rely on it.']
embed = discord.Embed(
title="Magic 8 Ball",
colour=discord.Colour.blurple()
)
embed.add_field(name="Question:", value=msg)
embed.add_field(name="Answer:", value=random.choice(responses))
await ctx.send(embed=embed)
@commands.command()
@commands.is_owner()
async def hehewed(self, ctx):
embed = discord.Embed(title="**__Ping and Access Roles__**", description="Get your roles by reacting to the corresponding reaction.", color=discord.Color.red())
embed.add_field(name="**Announcement Ping**", value="React with 📣 to get pings for server announcements!", inline=False)
embed.add_field(name="**Vote Ping**", value=f"React with 🗳 to get pings when there's a new vote in {self.client.get_channel(726098004708163594).mention}!", inline=False)
embed.add_field(name="**Revive Ping**", value="React with 💀 to get pinged when the chat is dead!", inline=False)
embed.add_field(name="**NSFW Access**", value=f"React with 🔞 to get access to {self.client.get_channel(814256958806556723).mention}!", inline=False)
embed.set_footer(text="React with a listed reaction to get that role.")
await ctx.send(embed=embed)
embed = discord.Embed(title="**__Color Roles__**", color=discord.Color.teal(), description="Make your name a pretty color!")
embed.add_field(name="**Red**", value=f"React with 🔴 to get a {ctx.guild.get_role(857816667190198282).mention} name!", inline=False)
embed.add_field(name="**Orange**", value=f"React with 🟠 to get an {ctx.guild.get_role(857816821242789929).mention} name!", inline=False)
embed.add_field(name="**Yellow**", value=f"React with 🟡 to get a {ctx.guild.get_role(857816873848012820).mention} name!", inline=False)
embed.add_field(name="**Green**", value=f"React with 🟢 to get a {ctx.guild.get_role(857816937152380948).mention} name!", inline=False)
embed.add_field(name="**Blue**", value=f"React with 🔵 to get a {ctx.guild.get_role(857816980887044157).mention} name!", inline=False)
embed.add_field(name="**Purple**", value=f"React with 🟣 to get a {ctx.guild.get_role(857817018039009290).mention} name!", inline=False)
embed.add_field(name="**White**", value=f"React with ⚪ to get a {ctx.guild.get_role(857817054706663494).mention} name!", inline=False)
embed.add_field(name="**Black**", value=f"React with ⚫ to get a {ctx.guild.get_role(857817144989712434).mention} name!", inline=False)
embed.set_footer(text="React with a listed reaction to get that color name.")
await ctx.send(embed=embed)
@commands.command(aliases=["LMGTFY"], description="Make a Google link with the specified query.")
@commands.cooldown(1, 3, commands.BucketType.user)
async def google(self, ctx, *, query):
nquery = query.replace(' ', '+').lower()
await ctx.send(f"https://www.google.com/search?q={nquery}")
@commands.command(aliases=['chances', 'odds', 'odd'], description="Rate the chances of something happening!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def chance(self, ctx, *, msg):
chancenum = random.randint(0, 10)
embed = discord.Embed(
title="What are the Chances?",
colour = ctx.author.colour
)
embed.add_field(name="Question:", value=msg)
embed.add_field(name="The chances are...", value=chancenum)
await ctx.send(embed=embed)
@commands.command(aliases=['avatar'], description="Show someone's profile picture!\n[member] value is optional.")
@commands.cooldown(1, 3, commands.BucketType.user)
async def pfp(self, ctx, member: discord.Member=None):
if member is None:
embed = discord.Embed(
title=f"{ctx.author}'s Avatar",
colour=ctx.author.colour
)
embed.set_image(url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title=f"{member}'s Avatar",
colour=member.colour
)
embed.set_image(url=member.avatar_url)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True, description="Returns a random activity for when you're bored!")
@commands.cooldown(1, 5, BucketType.user)
async def bored(self, ctx):
response = await requests.get('https://boredapi.com/api/activity')
json = await response.json()
embed = discord.Embed(title="I'm Bored", color=discord.Color.random())
embed.add_field(name="If you're bored, you should...", value=json["activity"])
if json['link']:
embed.add_field(name="I can find a link to this project at...", value=json['link'], inline=False)
if int(json['participants']) == 1:
people = "1 person."
else:
people = f"{json['participants']}"
embed.add_field(name="This might cost...", value="$" + str(int(json['price'])*10), inline=False)
embed.add_field(name="The amount of people needed for this project is...", value=people, inline=False)
embed.set_footer(text=f"Type: {json['type']} | Key: {json['key']} | Provided by BoredAPI")
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True, description="Make a QR code!")
async def qr(self, ctx, value):
o = urllib.parse.quote(value)
await ctx.send(f'https://api.qrserver.com/v1/create-qr-code/?data={o}')
@qr.command(description="Read a QR Code!")
async def read(self, ctx, image=None):
if image is not None:
url = urllib.parse.quote(image)
else:
if len(ctx.message.attachments) > 1:
return await ctx.send("We can only decode one QR code at a time.")
elif len(ctx.messsage.attachments) < 1:
return await ctx.send("You have to add some type of QR code for us to decode.")
url = urllib.parse.quote(ctx.message.attachments[0].url)
async with aiohttp.ClientSession() as cs:
async with cs.get(f'https://api.qrserver.com/v1/read-qr-code/?fileurl={url}') as r:
try:
res = await r.json()
except:
return await ctx.send("Your QR code has to be an attachment or a URL.")
await ctx.send(res[0]['symbol'][0]['data'])
@bored.command(description="Search for a specific activity via an activity key.")
@commands.cooldown(1, 5, BucketType.user)
async def key(self, ctx, key):
response = await requests.get(f'http://www.boredapi.com/api/activity?key={key}')
json = await response.json()
try:
embed = discord.Embed(title="I'm Bored", color=discord.Color.random())
embed.add_field(name="If you're bored, you should...", value=json["activity"])
if json['link']:
embed.add_field(name="I can find a link to this project at...", value=json['link'], inline=False)
if int(json['participants']) == 1:
people = "1 person."
else:
people = f"{json['participants']}"
embed.add_field(name="This might cost...", value="$" + str(int(json['price'])*10), inline=False)
embed.add_field(name="The amount of people needed for this project is...", value=people, inline=False)
embed.set_footer(text=f"Type: {json['type']} | Key: {json['key']} | Provided by BoredAPI")
await ctx.send(embed=embed)
except KeyError:
await ctx.send("No activity found with that key.")
@bored.command(description="Search for activities by category.\n[category] value is optional, returns a list of categories if none.")
@commands.cooldown(1, 5, BucketType.user)
async def category(self, ctx, category=None):
if not category:
embed = discord.Embed(title="List of Categories", color=discord.Color.random(), description="Education\nRecreational\nSocial\nDIY\nCharity\nCooking\nRelaxation\nMusic\nBusywork")
return await ctx.send(embed=embed)
category = await self.category_convert(category)
if not category:
return await ctx.send("That category does not exist.")
response = await requests.get(f'https://www.boredapi.com/api/activity?type={category}')
json = await response.json()
try:
embed = discord.Embed(title="I'm Bored", color=discord.Color.random())
embed.add_field(name="If you're bored, you should...", value=json["activity"])
if json['link']:
embed.add_field(name="I can find a link to this project at...", value=json['link'], inline=False)
if int(json['participants']) == 1:
people = "1 person."
else:
people = f"{json['participants']}"
embed.add_field(name="This might cost...", value="$" + str(int(json['price'])*10), inline=False)
embed.add_field(name="The amount of people needed for this project is...", value=people, inline=False)
embed.set_footer(text=f"Type: {json['type']} | Key: {json['key']} | Provided by BoredAPI")
await ctx.send(embed=embed)
except KeyError:
return await ctx.send("That category does not exist.")
@commands.group(name='snipe', description="Get the most recently deleted message in a channel!")
async def snipe_group(self, ctx):
if ctx.invoked_subcommand is None:
try:
sniped_message = self.delete_snipes[ctx.channel]
except KeyError:
await ctx.send('There are no deleted messages in this channel to snipe!')
else:
result = discord.Embed(
color=discord.Color.red(),
description=sniped_message.content,
timestamp=sniped_message.created_at
)
result.set_author(name=sniped_message.author.display_name, icon_url=sniped_message.author.avatar_url)
try:
result.set_image(url=self.delete_snipes_attachments[ctx.channel][0].url)
except:
pass
await ctx.send(embed=result)
@snipe_group.command(name='edit', description="Get the most recently edited message in the channel, before and after.")
async def snipe_edit(self, ctx):
try:
before, after = self.edit_snipes[ctx.channel]
except KeyError:
await ctx.send('There are no message edits in this channel to snipe!')
else:
result = discord.Embed(
color=discord.Color.red(),
timestamp=after.edited_at
)
result.add_field(name='Before', value=before.content, inline=False)
result.add_field(name='After', value=after.content, inline=False)
result.set_author(name=after.author.display_name, icon_url=after.author.avatar_url)
await ctx.send(embed=result)
@commands.group()
async def gofile(self, ctx):
pass
@gofile.command(disabled=True)
async def upload(self, ctx, url):
status = await self.create_gofile_folder(ctx.author.id)
return await ctx.send(status)
@commands.command(aliases=["encoder"], description="Encode something into binary or base64.")
async def encode(self, ctx, type, *, code):
types = ["binary", "base64"]
type = type.lower()
if type in types:
async with aiohttp.ClientSession() as encodeSession:
if type == "binary":
async with encodeSession.get(f"https://some-random-api.ml/binary?text={code}") as encoder:
if encoder.status == 200:
api = await encoder.json()
encoded = api["binary"]
embed = discord.Embed(title="Binary Encoder")
embed.add_field(name="Input", value=f"```{code}```", inline=False)
embed.add_field(name="Output", value=f"```{encoded}```", inline=False)
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=encoder.status)
await ctx.send(embed=embed)
else:
async with encodeSession.get(f"https://some-random-api.ml/base64?encode={code}") as encoder:
if encoder.status == 200:
api = await encoder.json()
encoded = api["base64"]
embed = discord.Embed(title="Base64 Encoder")
embed.add_field(name="Input", value=f"```{code}```", inline=False)
embed.add_field(name="Output", value=f"```{encoded}```", inline=False)
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=encoder.status)
await ctx.send(embed=embed)
else:
await ctx.send("Use binary or base64")
@commands.command(aliases=["decoder"], description="Decode a binary or base64 string.")
async def decode(self, ctx, type, *, code):
types = ["binary", "base64"]
type = type.lower()
if type in types:
async with aiohttp.ClientSession() as decodeSession:
if type == "binary":
async with decodeSession.get(f"https://some-random-api.ml/binary?decode={code}") as decoder:
if decoder.status == 200:
api = await decoder.json()
decoded = api["text"]
embed = discord.Embed(title="Binary Decoder")
embed.add_field(name="Input", value=f"```{code}```", inline=False)
embed.add_field(name="Output", value=f"```{decoded}```", inline=False)
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=decoder.status)
await ctx.send(embed=embed)
else:
async with decodeSession.get(f"https://some-random-api.ml/base64?decode={code}") as decoder:
if decoder.status == 200:
api = await decoder.json()
decoded = api["text"]
embed = discord.Embed(title="Base64 Decoder")
embed.add_field(name="Input", value=f"```{code}```", inline=False)
embed.add_field(name="Output", value=f"```{decoded}```", inline=False)
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=decoder.status)
await ctx.send(embed=embed)
else:
await ctx.send("Use binary or base64")
@commands.command(description="Get lyrics of a specific song!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def lyrics(self, ctx, *, search):
search = search.replace(' ', '%20')
search_web = f"https://some-random-api.ml/lyrics?title={search}"
await ctx.channel.trigger_typing()
async with request("GET", search_web, headers={}) as response:
if response.status == 200:
api = await response.json()
title = api["title"]
author = api["author"]
lyrics = api["lyrics"]
embed = discord.Embed(title=f"{title} by {author}", description=lyrics)
try:
await ctx.send(embed=embed)
except:
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, remove_reactions=True)
paginator.add_reaction('◀', 'back')
paginator.add_reaction('▶', 'next')
embed1 = discord.Embed(title=f"{title} by {author} | Page 1", description=lyrics[:int(len(lyrics)/2)])
embed2 = discord.Embed(title=f"{title} by {author} | Page 2", description=lyrics[int(len(lyrics)/2):])
embeds = [embed1, embed2]
await paginator.run(embeds)
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=response.status)
await ctx.send(embed=embed)
@commands.command(description="Define a word!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def define(self, ctx, word):
word_lowered = word.lower()
word_link = f"https://some-random-api.ml/dictionary?word={word_lowered}"
async with request("GET", word_link, headers={}) as response:
if response.status == 200:
api = await response.json()
word_name = api["word"]
word_definition = api["definition"]
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, remove_reactions=True)
paginator.add_reaction('◀', 'back')
paginator.add_reaction('▶', 'next')
embed1 = discord.Embed(title=f"{word_name} | Page 1", description=word_definition[:int(len(word_definition)/2)])
embed2 = discord.Embed(title=f"{word_name} | Page 2", description=word_definition[int(len(word_definition)/2):])
embeds = [embed1, embed2]
await paginator.run(embeds)
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=response.status)
await ctx.send(embed=embed)
@commands.command(description="Returns a real-looking Discord bot token.")
@commands.cooldown(1, 5, commands.BucketType.user)
async def token(self, ctx):
token_web = "https://some-random-api.ml/bottoken"
async with ctx.typing():
async with request("GET", token_web, headers={}) as response:
if response.status == 200:
api = await response.json()
bottoken = api["token"]
else:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=response.status)
await ctx.send(embed = embed)
await ctx.send(bottoken)
def setup(client):
client.add_cog(Fun(client))
| 49.885486 | 206 | 0.572229 |
95f2aeb800877141d64a614fb212d2e122c1cc85 | 9,231 | py | Python | HW2/scripts/utils_rbf.py | UltronAI/Stochastic-Process | edbfe0eb2b62758e093306d18ba5a96415572be1 | [
"MIT"
] | null | null | null | HW2/scripts/utils_rbf.py | UltronAI/Stochastic-Process | edbfe0eb2b62758e093306d18ba5a96415572be1 | [
"MIT"
] | null | null | null | HW2/scripts/utils_rbf.py | UltronAI/Stochastic-Process | edbfe0eb2b62758e093306d18ba5a96415572be1 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.linalg as la
import random
def A(_):
return min(1, _)
def FindClosest(Mu, j1):
mu1 = Mu[j1, :]
d_min = 999999
j2 = j1
for i in range(Mu.shape[0]):
if i == j1:
continue
mu2 = Mu[i, :]
d = la.norm(mu1 - mu2)
if d < d_min:
d_min = d
j2 = i
return j2
def Generate1(X):
d = X.shape[1]
mean = X.mean(axis = 0)
var = np.cov(X, rowvar=False)
delta = np.random.rand(1, d)
u = np.random.rand()
if u > 0.5:
return np.random.multivariate_normal((mean + delta).reshape(d), var).reshape(1, d)
else:
return np.random.multivariate_normal((mean - delta).reshape(d), var).reshape(1, d)
def Generate2(X):
x_min = X.min(axis = 1)
x_max = X.max(axis = 1)
delta = x_max - x_min
d = X.shape[1]
t = 0.5
mu = np.zeros((1, d))
for i in range(d):
mu[0, i] = random.uniform(x_min[i] - t * delta[i], x_max[i] + t * delta[i])
# mu = np.array([np.random.uniform(x_min[i] - delta, x_max[i] + delta, 1) for i in range(d)]).reshape(1, d)
return mu
def InitMu(k, X):
Mu = np.zeros((k, X.shape[1]))
for i in range(k):
Mu[i, :] = Generate2(X)
return Mu
def C(c, method = "AIC", N = np.e**2, k = 0, d = 0):
if method == "AIC":
return c + 1
elif method == "RJSA":
return (k * (c + 1) + c * (1 + d)) + 1
else:
return (c + 1) * np.log(N) / 2
def Linear(s):
return s
def Cubic(s):
return s**3
def Pow4(s):
return s**4
def ThinPlateSpline(s):
return s**2 * np.log(s)
def Multiquadric(s):
lamb = 0.5
return (s**2 + lamb) ** 0.5
def Gauss(s):
lamb = 1 / 2.5 ** 2
return np.exp(-lamb * s**2)
def CubicGauss(s):
lamb = 0.5
return np.exp(-lamb * s**3)
def Phi(X, Mu, phi = "Gauss"):
N = X.shape[0]
k = Mu.shape[0]
out = np.empty((N, k))
for i in range(N):
s = la.norm(X[i, :] - Mu, axis = 1)
if phi == "Liearn":
out[i, :] = Linear(s)
elif phi == "Cubic":
out[i, :] = Cubic(s)
elif phi == "ThinPlateSpline":
out[i, :] = ThinPlateSpline(s)
elif phi == "Multiquadric":
out[i, :] = Multiquadric(s)
elif phi == "CubicGauss":
out[i, :] = CubicGauss(s)
elif phi == "Pow4":
out[i, :] = Pow4(s)
else:
out[i, :] = Gauss(s)
return out
def D(X, Mu, phi = "Gauss"):
D1 = np.ones((X.shape[0], 1))
D2 = X
if Mu.shape[0] > 0:
D3 = Phi(X, Mu, phi)
return np.concatenate((D1, D2, D3), axis = 1)
else:
return np.concatenate((D1, D2), axis = 1)
def P(X, Mu, phi = "Gauss"):
N = X.shape[0]
I = np.identity(N)
D_ = D(X, Mu, phi)
return I - D_.dot(la.pinv(D_.T.dot(D_))).dot(D_.T)
def S(X):
d = X.shape[1]
x_min = X.min(axis = 1)
x_max = X.max(axis = 1)
delta = x_max - x_min
s = 1
t = 0.5
for i in range(d):
s *= (1 + 2 * t) * delta[i]
return s
def Birth(X, Mu, y, mu, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
d = mu.shape[1]
C_ = C(c, "RJSA", k = k, d = d)
S_ = S(X)
out = 1
if k == 0:
Mu = Mu.reshape(0)
mu = mu.reshape(d)
Mu_ = np.concatenate((Mu, mu)).reshape(1, d)
else:
Mu_ = np.concatenate((Mu, mu))
P_ = P(X, Mu, phi)
P_1 = P(X, Mu_, phi)
for i in range(c):
out *= (y[:, i].T.dot(P_).dot(y[:, i]) / y[:, i].T.dot(P_1).dot(y[:, i])) ** (N/2)
out *= S_ * np.exp(-C_) / (k + 1)
return out
def Death(X, Mu, y, j, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
d = Mu.shape[1]
C_ = C(c, "RJSA", k = k, d = d)
S_ = S(X)
out = 1
Mu_ = np.concatenate((Mu[:j, :], Mu[j + 1:, :]))
P_ = P(X, Mu, phi)
P_1 = P(X, Mu_, phi)
for i in range(c):
out *= (y[:, i].T.dot(P_).dot(y[:, i]) / y[:, i].T.dot(P_1).dot(y[:, i])) ** (N/2)
out *= k * np.exp(C_) / S_
return out
def Split(X, Mu, y, s, j, mu1, mu2, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
d = Mu.shape[1]
C_ = C(c, "RJSA", k = k, d = d)
out = 1
Mu_ = np.concatenate((Mu[:j, :], mu1, mu2, Mu[j + 1:, :]))
P_ = P(X, Mu, phi)
P_1 = P(X, Mu_, phi)
for i in range(c):
out *= (y[:, i].T.dot(P_).dot(y[:, i]) / y[:, i].T.dot(P_1).dot(y[:, i])) ** (N/2)
out *= k * s * np.exp(C_) / (k + 1)
return out
def Merge(X, Mu, y, s, j1, j2, mu, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
d = Mu.shape[1]
C_ = C(c, "RJSA", k = k, d = d)
out = 1
[j1, j2] = [j2, j1] if j1 > j2 else [j1, j2]
Mu_ = np.concatenate((Mu[:j1, :], Mu[j1 + 1 : j2, :], Mu[j2 + 1:, :], mu))
P_ = P(X, Mu, phi)
P_1 = P(X, Mu_, phi)
for i in range(c):
out *= (y[:, i].T.dot(P_).dot(y[:, i]) / y[:, i].T.dot(P_1).dot(y[:, i])) ** (N/2)
out *= k * np.exp(C_) / (s * (k - 1))
return out
def Update1(X, Mu, y, mu):
d = Mu.shape[1]
Min = X.min(axis = 0)
Max = X.max(axis = 0)
W = Max - Min
t = 0.5
mu = np.zeros((1, d))
for i in range(d):
mu[0, i] = random.uniform(Min[i] - t * W[i], Max[i] + t * W[i])
return mu
def Update2(X, Mu, y, mu):
d = Mu.shape[1]
I = np.identity(d)
mean = mu
sigma_ = np.cov(mu)
sigma = I * sigma_
mu = np.random.multivariate_normal(mean, sigma).reshape(1, d)
return mu
def Update(X, Mu, y, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
d = Mu.shape[1]
threshold = 0.5 # np.random.rand()
for j in range(Mu.shape[0]):
mu = Mu[j, :]
w = np.random.rand()
mu_ = Update1(X, Mu, y, mu) if w <= threshold else Update2(X, Mu, y, mu)
Mu_ = Mu
Mu_[j, :] = mu_
P_ = P(X, Mu, phi)
P_1 = P(X, Mu_, phi)
RJSA = 1
for i in range(c):
RJSA *= (y[:, i].T.dot(P_).dot(y[:, i]) / y[:, i].T.dot(P_1).dot(y[:, i])) ** (N/2)
u = np.random.rand()
if u <= A(RJSA):
Mu = Mu_
else:
pass
return Mu
def Alpha(X, Mu, y, phi = "Gauss"):
D_ = D(X, Mu, phi)
alpha = la.pinv(D_.T.dot(D_)).dot(D_.T).dot(y)
return alpha
def Tao(X, Mu, y, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
tao = np.zeros((c, c))
D_ = D(X, Mu, phi)
P_ = P(X, Mu, phi)
for t in range(c):
tao[t, t] = 1 / N * y[:, t].T.dot(P_).dot(y[:, t])
return tao
def Predict(X, Mu, alpha, tao, c, phi = "Gauss"):
N = X.shape[0]
D_ = D(X, Mu, phi)
n = np.zeros((N, c))
for t in range(N):
nt = np.random.multivariate_normal(np.zeros(c), tao ** 2).reshape(1, c)
n[t, :] = nt
predict = D_.dot(alpha) + n
return predict
def Loss(X, Mu, y, alpha, tao, phi = "Gauss"):
predict = Predict(X, Mu, alpha, tao, y.shape[1], phi)
mse = ((predict - y) ** 2).mean()
return mse # 0.5 * np.sum((predict - y) ** 2)
def T(i):
# base = 0.8
# coef = 20
# return coef * base ** i
return 2 / i if i > 0 else 666
def Mk(T, X, Mu, y, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
d = X.shape[1]
out = 1
P_ = P(X, Mu, phi)
for i in range(c):
out += np.log(y[:, i].T.dot(P_).dot(y[:, i])) * (-N / 2)
out += -(k * (c + 1) + c * (d + 1))
return out
def Pi(T, f):
return np.exp(-f / T)
def SA1(X, Mu, y, alpha, tao, iter, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
threshold = 0.5 # np.random.rand()
sigma = np.cov(Mu, rowvar = False)
T_ = T(iter)
for j in range(k):
mu = Mu[j, :]
w = np.random.rand()
mu_ = Update1(X, Mu, y, mu) if w <= threshold else Update2(X, Mu, y, mu)
Mu_ = Mu
Mu_[j, :] = mu_
loss_ = Loss(X, Mu, y, alpha, tao, phi)
loss_1 = Loss(X, Mu_, y, alpha, tao, phi)
if Pi(T_, loss_) <= Pi(T_, loss_1):
Mu = Mu_
else:
pass
return Mu
def SA2(X, y, iter, Mu, Mu_old, alpha, alpha_old, tao, tao_old, phi = "Gauss"):
N = X.shape[0]
c = y.shape[1]
k = Mu.shape[0]
u = np.random.rand()
T_ = T(iter)
loss = Loss(X, Mu, y, alpha, tao, phi)
loss_old = Loss(X, Mu_old, y, alpha_old, tao_old, phi)
if u <= A(Pi(T_, loss) / Pi(T_, loss_old)):
return Mu
else:
return Mu_old
def SA3(X, y, iter, Mu, Mu_old, phi = "Gauss"):
u = np.random.rand()
T_ = T(iter)
Mk_ = Mk(T_, X, Mu, y, phi)
Mk_old = Mk(T_, X, Mu_old, y, phi)
if u <= A(Pi(T_, -Mk_) / Pi(T_, -Mk_old)):
return Mu
else:
return Mu_old
def AIC(X, y, Mu, Mu_old, alpha, alpha_old, tao, tao_old, phi = "Gauss"):
c = y.shape[1]
d = Mu.shape[1]
N = X.shape[0]
k = Mu.shape[0]
k_old = Mu_old.shape[0]
loss = Loss(X, Mu, y, alpha, tao, phi)
loss_old = Loss(X, Mu_old, y, alpha_old, tao_old, phi)
if ((k * (c + 1) + c * (1 + d)) + 1) * loss ** 2 < ((k_old * (c + 1) + c * (1 + d)) + 1) * loss_old ** 2:
return Mu
else:
return Mu_old | 25.570637 | 111 | 0.469397 |
a393acfc6e9b6e96032b163b6abbbaec1d0db366 | 3,224 | py | Python | targets/saturn/base.py | timvideos/litex-buildenv | a103774342c0265458979a79082b233d9ce08edb | [
"BSD-2-Clause"
] | 198 | 2018-01-17T05:39:54.000Z | 2022-03-15T08:59:16.000Z | targets/saturn/base.py | timvideos/litex-buildenv | a103774342c0265458979a79082b233d9ce08edb | [
"BSD-2-Clause"
] | 610 | 2017-12-31T01:32:32.000Z | 2022-03-19T22:07:28.000Z | targets/saturn/base.py | timvideos/litex-buildenv | a103774342c0265458979a79082b233d9ce08edb | [
"BSD-2-Clause"
] | 85 | 2018-01-13T05:51:38.000Z | 2022-02-11T18:54:14.000Z | # Support for the Numato Saturn (http://numato.com/product/saturn-spartan-6-fpga-development-board-with-ddr-sdram)
from migen import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.interconnect import wishbone
from litedram.modules import MT46H32M16
from litedram.phy import s6ddrphy
from litedram.core import ControllerSettings
from targets.utils import dict_set_max
#from gateware import cas
from gateware import info
from gateware import spi_flash
from fractions import Fraction
from .crg import _CRG
class BaseSoC(SoCSDRAM):
mem_map = {**SoCSDRAM.mem_map, **{
'spiflash': 0x20000000,
}}
def __init__(self, platform, **kwargs):
if kwargs.get('cpu_type', None) == 'mor1kx':
dict_set_max(kwargs, 'integrated_rom_size', 0x10000)
else:
dict_set_max(kwargs, 'integrated_rom_size', 0x8000)
dict_set_max(kwargs, 'integrated_sram_size', 0x4000)
sys_clk_freq = (31 + Fraction(1, 4))*1000*1000
# SoCSDRAM ---------------------------------------------------------------------------------
SoCSDRAM.__init__(self, platform, clk_freq=sys_clk_freq, **kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR2 SDRAM -------------------------------------------------------------------------------
if True:
sdram_module = MT46H32M16(sys_clk_freq, "1:2")
self.submodules.ddrphy = s6ddrphy.S6HalfRateDDRPHY(
platform.request("ddram"),
memtype = sdram_module.memtype,
rd_bitslip = 2,
wr_bitslip = 3,
dqs_ddr_alignment="C1")
self.add_csr("ddrphy")
controller_settings = ControllerSettings(
with_bandwidth=True)
self.register_sdram(
self.ddrphy,
geom_settings = sdram_module.geom_settings,
timing_settings = sdram_module.timing_settings,
controller_settings=controller_settings)
self.comb += [
self.ddrphy.clk4x_wr_strb.eq(self.crg.clk4x_wr_strb),
self.ddrphy.clk4x_rd_strb.eq(self.crg.clk4x_rd_strb),
]
# Basic peripherals ------------------------------------------------------------------------
# info module
self.submodules.info = info.Info(platform, self.__class__.__name__)
self.add_csr("info")
# control and status module
#self.submodules.cas = cas.ControlAndStatus(platform, sys_clk_freq)
self.add_csr("cas")
# Add debug interface if the CPU has one ---------------------------------------------------
if hasattr(self.cpu, "debug_bus"):
self.register_mem(
name="vexriscv_debug",
address=0xf00f0000,
interface=self.cpu.debug_bus,
size=0x100)
# Memory mapped SPI Flash ------------------------------------------------------------------
# TODO: Add SPI Flash support here.
SoC = BaseSoC
| 37.488372 | 114 | 0.540943 |
d9560ad9f8f7507ac8cd08f3b96d401ab0ba741b | 1,482 | py | Python | neural_compressor/ux/web/service/model.py | intel/neural-compressor | 16a4a12045fcb468da4d33769aff2c1a5e2ba6ba | [
"Apache-2.0"
] | 172 | 2021-09-14T18:34:17.000Z | 2022-03-30T06:49:53.000Z | neural_compressor/ux/web/service/model.py | intel/neural-compressor | 16a4a12045fcb468da4d33769aff2c1a5e2ba6ba | [
"Apache-2.0"
] | 40 | 2021-09-14T02:26:12.000Z | 2022-03-29T08:34:04.000Z | neural_compressor/ux/web/service/model.py | intel/neural-compressor | 16a4a12045fcb468da4d33769aff2c1a5e2ba6ba | [
"Apache-2.0"
] | 33 | 2021-09-15T07:27:25.000Z | 2022-03-25T08:30:57.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workload service."""
from neural_compressor.ux.components.db_manager.db_operations import ModelAPIInterface
from neural_compressor.ux.utils.exceptions import NotFoundException
from neural_compressor.ux.web.communication import MessageQueue
from neural_compressor.ux.web.service.response_generator import Response, ResponseGenerator
mq = MessageQueue()
class ModelService:
"""Workload related services."""
@classmethod
def get_model(cls, data: dict) -> Response:
"""Get config file for requested Workload."""
model_data = ModelAPIInterface.get_model_details(data)
model_path = model_data.get("path", None)
if model_path is None:
raise NotFoundException("Unable to find model file")
return ResponseGenerator.serve_from_filesystem(
path=model_path,
as_attachment=True,
)
| 35.285714 | 91 | 0.738192 |
83f2ff3bfc12b38dd32b1b6e0d5e1aaaad239919 | 979 | py | Python | netket/utils/optional_deps.py | rbktech/netket | 847e120cad48f9c92d394e2078370e452f268a3d | [
"Apache-2.0"
] | null | null | null | netket/utils/optional_deps.py | rbktech/netket | 847e120cad48f9c92d394e2078370e452f268a3d | [
"Apache-2.0"
] | 8 | 2022-01-17T17:24:53.000Z | 2022-03-28T17:31:04.000Z | netket/utils/optional_deps.py | inailuig/netket | ab57a6fb019edb9ac298969950724781f2ae2b22 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import torch # type: ignore
torch_available = True
except ImportError:
torch_available = False
try:
import tensorboardX # type: ignore
tensorboard_available = True
except ImportError:
tensorboard_available = False
try:
import backpack # type: ignore
backpack_available = True
except ImportError:
backpack_available = False
| 26.459459 | 74 | 0.745659 |
ef04867202c8d50203b07058cddb42b7b40e2584 | 26,378 | py | Python | python/pyspark/mllib/clustering.py | tophua/spark1.52 | 464f406d04329634d5a8a0d6956d6d803a59d897 | [
"Apache-2.0"
] | 53 | 2016-04-22T03:57:05.000Z | 2020-08-11T02:54:15.000Z | python/pyspark/mllib/clustering.py | tophua/spark1.52 | 464f406d04329634d5a8a0d6956d6d803a59d897 | [
"Apache-2.0"
] | null | null | null | python/pyspark/mllib/clustering.py | tophua/spark1.52 | 464f406d04329634d5a8a0d6956d6d803a59d897 | [
"Apache-2.0"
] | 42 | 2016-04-22T03:56:50.000Z | 2020-11-23T09:32:25.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import array as pyarray
if sys.version > '3':
xrange = range
basestring = str
from math import exp, log
from numpy import array, random, tile
from collections import namedtuple
from pyspark import SparkContext
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, callJavaFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector, DenseVector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable
from pyspark.streaming import DStream
__all__ = ['KMeansModel', 'KMeans', 'GaussianMixtureModel', 'GaussianMixture',
'PowerIterationClusteringModel', 'PowerIterationClustering',
'StreamingKMeans', 'StreamingKMeansModel',
'LDA', 'LDAModel']
@inherit_doc
class KMeansModel(Saveable, Loader):
"""A clustering model derived from the k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, runs=30, initializationMode="random",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model.k
2
>>> model.computeCost(sc.parallelize(data))
2.0000000000000004
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> isinstance(model.clusterCenters, list)
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = KMeansModel.load(sc, path)
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
def __init__(self, centers):
self.centers = centers
@property
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
@property
def k(self):
"""Total number of clusters."""
return len(self.centers)
def predict(self, x):
"""Find the cluster to which x belongs in this model."""
best = 0
best_distance = float("inf")
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
for i in xrange(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
def computeCost(self, rdd):
"""
Return the K-means cost (sum of squared distances of points to
their nearest center) for this model on the given data.
"""
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
[_convert_to_vector(c) for c in self.centers])
return cost
def save(self, sc, path):
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
java_model.save(sc._jsc.sc(), path)
@classmethod
def load(cls, sc, path):
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
class KMeans(object):
@classmethod
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||",
seed=None, initializationSteps=5, epsilon=1e-4):
"""Train a k-means clustering model."""
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
runs, initializationMode, seed, initializationSteps, epsilon)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
@inherit_doc
class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
.. note:: Experimental
A clustering model derived from the Gaussian Mixture Model method.
>>> from pyspark.mllib.linalg import Vectors, DenseMatrix
>>> from numpy.testing import assert_equal
>>> from shutil import rmtree
>>> import os, tempfile
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2))
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
True
>>> labels[4]==labels[5]
True
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = GaussianMixtureModel.load(sc, path)
>>> assert_equal(model.weights, sameModel.weights)
>>> mus, sigmas = list(
... zip(*[(g.mu, g.sigma) for g in model.gaussians]))
>>> sameMus, sameSigmas = list(
... zip(*[(g.mu, g.sigma) for g in sameModel.gaussians]))
>>> mus == sameMus
True
>>> sigmas == sameSigmas
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734])
>>> clusterdata_2 = sc.parallelize(data.reshape(5,3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=10)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]
True
>>> labels[2]==labels[3]==labels[4]
True
"""
@property
def weights(self):
"""
Weights for each Gaussian distribution in the mixture, where weights[i] is
the weight for Gaussian i, and weights.sum == 1.
"""
return array(self.call("weights"))
@property
def gaussians(self):
"""
Array of MultivariateGaussian where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i.
"""
return [
MultivariateGaussian(gaussian[0], gaussian[1])
for gaussian in zip(*self.call("gaussians"))]
@property
def k(self):
"""Number of gaussians in mixture."""
return len(self.weights)
def predict(self, x):
"""
Find the cluster to which the points in 'x' has maximum membership
in this model.
:param x: RDD of data points.
:return: cluster_labels. RDD of cluster labels.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
else:
raise TypeError("x should be represented by an RDD, "
"but got %s." % type(x))
def predictSoft(self, x):
"""
Find the membership of each point in 'x' to all mixture components.
:param x: RDD of data points.
:return: membership_matrix. RDD of array of double values.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
_convert_to_vector(self.weights), means, sigmas)
return membership_matrix.map(lambda x: pyarray.array('d', x))
else:
raise TypeError("x should be represented by an RDD, "
"but got %s." % type(x))
@classmethod
def load(cls, sc, path):
"""Load the GaussianMixtureModel from disk.
:param sc: SparkContext
:param path: str, path to where the model is stored.
"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.GaussianMixtureModelWrapper(model)
return cls(wrapper)
class GaussianMixture(object):
"""
.. note:: Experimental
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
:param data: RDD of data points
:param k: Number of components
:param convergenceTol: Threshold value to check the convergence criteria. Defaults to 1e-3
:param maxIterations: Number of iterations. Default to 100
:param seed: Random Seed
:param initialModel: GaussianMixtureModel for initializing learning
"""
@classmethod
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None):
"""Train a Gaussian Mixture clustering model."""
initialModelWeights = None
initialModelMu = None
initialModelSigma = None
if initialModel is not None:
if initialModel.k != k:
raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s"
% (initialModel.k, k))
initialModelWeights = list(initialModel.weights)
initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)]
initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)]
java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector),
k, convergenceTol, maxIterations, seed,
initialModelWeights, initialModelMu, initialModelSigma)
return GaussianMixtureModel(java_model)
class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
.. note:: Experimental
Model produced by [[PowerIterationClustering]].
>>> data = [(0, 1, 1.0), (0, 2, 1.0), (0, 3, 1.0), (1, 2, 1.0), (1, 3, 1.0),
... (2, 3, 1.0), (3, 4, 0.1), (4, 5, 1.0), (4, 15, 1.0), (5, 6, 1.0),
... (6, 7, 1.0), (7, 8, 1.0), (8, 9, 1.0), (9, 10, 1.0), (10, 11, 1.0),
... (11, 12, 1.0), (12, 13, 1.0), (13, 14, 1.0), (14, 15, 1.0)]
>>> rdd = sc.parallelize(data, 2)
>>> model = PowerIterationClustering.train(rdd, 2, 100)
>>> model.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = PowerIterationClusteringModel.load(sc, path)
>>> sameModel.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
@property
def k(self):
"""
Returns the number of clusters.
"""
return self.call("k")
def assignments(self):
"""
Returns the cluster assignments of this model.
"""
return self.call("getAssignments").map(
lambda x: (PowerIterationClustering.Assignment(*x)))
@classmethod
def load(cls, sc, path):
model = cls._load_java(sc, path)
wrapper = sc._jvm.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper)
class PowerIterationClustering(object):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm
developed by [[http://www.icml2010.org/papers/387.pdf Lin and Cohen]].
From the abstract: PIC finds a very low-dimensional embedding of a
dataset using truncated power iteration on a normalized pair-wise
similarity matrix of the data.
"""
@classmethod
def train(cls, rdd, k, maxIterations=100, initMode="random"):
"""
:param rdd: an RDD of (i, j, s,,ij,,) tuples representing the
affinity matrix, which is the matrix A in the PIC paper.
The similarity s,,ij,, must be nonnegative.
This is a symmetric matrix and hence s,,ij,, = s,,ji,,.
For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input.
Tuples with i = j are ignored, because we assume
s,,ij,, = 0.0.
:param k: Number of clusters.
:param maxIterations: Maximum number of iterations of the
PIC algorithm.
:param initMode: Initialization mode.
"""
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model)
class Assignment(namedtuple("Assignment", ["id", "cluster"])):
"""
Represents an (id, cluster) tuple.
"""
class StreamingKMeansModel(KMeansModel):
"""
.. note:: Experimental
Clustering model which can perform an online update of the centroids.
The update formula for each centroid is given by
* c_t+1 = ((c_t * n_t * a) + (x_t * m_t)) / (n_t + m_t)
* n_t+1 = n_t * a + m_t
where
* c_t: Centroid at the n_th iteration.
* n_t: Number of samples (or) weights associated with the centroid
at the n_th iteration.
* x_t: Centroid of the new data closest to c_t.
* m_t: Number of samples (or) weights of the new data closest to c_t
* c_t+1: New centroid.
* n_t+1: New number of weights.
* a: Decay Factor, which gives the forgetfulness.
Note that if a is set to 1, it is the weighted mean of the previous
and new data. If it set to zero, the old centroids are completely
forgotten.
:param clusterCenters: Initial cluster centers.
:param clusterWeights: List of weights assigned to each cluster.
>>> initCenters = [[0.0, 0.0], [1.0, 1.0]]
>>> initWeights = [1.0, 1.0]
>>> stkm = StreamingKMeansModel(initCenters, initWeights)
>>> data = sc.parallelize([[-0.1, -0.1], [0.1, 0.1],
... [0.9, 0.9], [1.1, 1.1]])
>>> stkm = stkm.update(data, 1.0, u"batches")
>>> stkm.centers
array([[ 0., 0.],
[ 1., 1.]])
>>> stkm.predict([-0.1, -0.1])
0
>>> stkm.predict([0.9, 0.9])
1
>>> stkm.clusterWeights
[3.0, 3.0]
>>> decayFactor = 0.0
>>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])])
>>> stkm = stkm.update(data, 0.0, u"batches")
>>> stkm.centers
array([[ 0.2, 0.2],
[ 1.5, 1.5]])
>>> stkm.clusterWeights
[1.0, 1.0]
>>> stkm.predict([0.2, 0.2])
0
>>> stkm.predict([1.5, 1.5])
1
"""
def __init__(self, clusterCenters, clusterWeights):
super(StreamingKMeansModel, self).__init__(centers=clusterCenters)
self._clusterWeights = list(clusterWeights)
@property
def clusterWeights(self):
"""Return the cluster weights."""
return self._clusterWeights
@ignore_unicode_prefix
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data: Should be a RDD that represents the new data.
:param decayFactor: forgetfulness of the previous centroids.
:param timeUnit: Can be "batches" or "points". If points, then the
decay factor is raised to the power of number of new
points and if batches, it is used as it is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self
class StreamingKMeans(object):
"""
.. note:: Experimental
Provides methods to set k, decayFactor, timeUnit to configure the
KMeans algorithm for fitting and predicting on incoming dstreams.
More details on how the centroids are updated are provided under the
docs of StreamingKMeansModel.
:param k: int, number of clusters
:param decayFactor: float, forgetfulness of the previous centroids.
:param timeUnit: can be "batches" or "points". If points, then the
decayfactor is raised to the power of no. of new points.
"""
def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"):
self._k = k
self._decayFactor = decayFactor
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
self._timeUnit = timeUnit
self._model = None
def latestModel(self):
"""Return the latest model"""
return self._model
def _validate(self, dstream):
if self._model is None:
raise ValueError(
"Initial centers should be set either by setInitialCenters "
"or setRandomCenters.")
if not isinstance(dstream, DStream):
raise TypeError(
"Expected dstream to be of type DStream, "
"got type %s" % type(dstream))
def setK(self, k):
"""Set number of clusters."""
self._k = k
return self
def setDecayFactor(self, decayFactor):
"""Set decay factor."""
self._decayFactor = decayFactor
return self
def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self
def setInitialCenters(self, centers, weights):
"""
Set initial centers. Should be set before calling trainOn.
"""
self._model = StreamingKMeansModel(centers, weights)
return self
def setRandomCenters(self, dim, weight, seed):
"""
Set the initial centres to be random samples from
a gaussian population with constant weights.
"""
rng = random.RandomState(seed)
clusterCenters = rng.randn(self._k, dim)
clusterWeights = tile(weight, self._k)
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
return self
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
self._model.update(rdd, self._decayFactor, self._timeUnit)
dstream.foreachRDD(update)
def predictOn(self, dstream):
"""
Make predictions on a dstream.
Returns a transformed dstream object
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
class LDAModel(JavaModelWrapper):
""" A clustering model derived from the LDA method.
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology
- "word" = "term": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over words representing some concept
References:
- Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
>>> from pyspark.mllib.linalg import Vectors
>>> from numpy.testing import assert_almost_equal, assert_equal
>>> data = [
... [1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],
... ]
>>> rdd = sc.parallelize(data)
>>> model = LDA.train(rdd, k=2)
>>> model.vocabSize()
2
>>> topics = model.topicsMatrix()
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
>>> assert_almost_equal(topics, topics_expect, 1)
>>> import os, tempfile
>>> from shutil import rmtree
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = LDAModel.load(sc, path)
>>> assert_equal(sameModel.topicsMatrix(), model.topicsMatrix())
>>> sameModel.vocabSize() == model.vocabSize()
True
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
def topicsMatrix(self):
"""Inferred topics, where each topic is represented by a distribution over terms."""
return self.call("topicsMatrix").toArray()
def vocabSize(self):
"""Vocabulary size (number of terms or terms in the vocabulary)"""
return self.call("vocabSize")
def save(self, sc, path):
"""Save the LDAModel on to disk.
:param sc: SparkContext
:param path: str, path to where the model needs to be stored.
"""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
@classmethod
def load(cls, sc, path):
"""Load the LDAModel from disk.
:param sc: SparkContext
:param path: str, path to where the model is stored.
"""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
java_model = sc._jvm.org.apache.spark.mllib.clustering.DistributedLDAModel.load(
sc._jsc.sc(), path)
return cls(java_model)
class LDA(object):
@classmethod
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
:param rdd: RDD of data points
:param k: Number of clusters you want
:param maxIterations: Number of iterations. Default to 20
:param docConcentration: Concentration parameter (commonly named "alpha")
for the prior placed on documents' distributions over topics ("theta").
:param topicConcentration: Concentration parameter (commonly named "beta" or "eta")
for the prior placed on topics' distributions over terms.
:param seed: Random Seed
:param checkpointInterval: Period (in iterations) between checkpoints.
:param optimizer: LDAOptimizer used to perform the actual calculation.
Currently "em", "online" are supported. Default to "em".
"""
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations,
docConcentration, topicConcentration, seed,
checkpointInterval, optimizer)
return LDAModel(model)
def _test():
import doctest
import pyspark.mllib.clustering
globs = pyspark.mllib.clustering.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 36.892308 | 99 | 0.60759 |
0b6e9dac0bef8f79e10bfcf972fe89c8eb9fcc4b | 18,901 | py | Python | graphene/types/schema.py | DoctorJohn/graphene | 05d96a983374f368bb1a2124be5878545ba3d2a9 | [
"MIT"
] | null | null | null | graphene/types/schema.py | DoctorJohn/graphene | 05d96a983374f368bb1a2124be5878545ba3d2a9 | [
"MIT"
] | null | null | null | graphene/types/schema.py | DoctorJohn/graphene | 05d96a983374f368bb1a2124be5878545ba3d2a9 | [
"MIT"
] | null | null | null | import inspect
from functools import partial
from graphql import (
default_type_resolver,
get_introspection_query,
graphql,
graphql_sync,
introspection_types,
parse,
print_schema,
subscribe,
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumValue,
GraphQLField,
GraphQLFloat,
GraphQLID,
GraphQLInputField,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
Undefined,
)
from ..utils.str_converters import to_camel_case
from ..utils.get_unbound_function import get_unbound_function
from .definitions import (
GrapheneEnumType,
GrapheneGraphQLType,
GrapheneInputObjectType,
GrapheneInterfaceType,
GrapheneObjectType,
GrapheneScalarType,
GrapheneUnionType,
)
from .dynamic import Dynamic
from .enum import Enum
from .field import Field
from .inputobjecttype import InputObjectType
from .interface import Interface
from .objecttype import ObjectType
from .resolver import get_default_resolver
from .scalars import ID, Boolean, Float, Int, Scalar, String
from .structures import List, NonNull
from .union import Union
from .utils import get_field_as
introspection_query = get_introspection_query()
IntrospectionSchema = introspection_types["__Schema"]
def assert_valid_root_type(type_):
if type_ is None:
return
is_graphene_objecttype = inspect.isclass(type_) and issubclass(type_, ObjectType)
is_graphql_objecttype = isinstance(type_, GraphQLObjectType)
assert (
is_graphene_objecttype or is_graphql_objecttype
), f"Type {type_} is not a valid ObjectType."
def is_graphene_type(type_):
if isinstance(type_, (List, NonNull)):
return True
if inspect.isclass(type_) and issubclass(
type_, (ObjectType, InputObjectType, Scalar, Interface, Union, Enum)
):
return True
def is_type_of_from_possible_types(possible_types, root, _info):
return isinstance(root, possible_types)
class TypeMap(dict):
def __init__(
self,
query=None,
mutation=None,
subscription=None,
types=None,
auto_camelcase=True,
):
assert_valid_root_type(query)
assert_valid_root_type(mutation)
assert_valid_root_type(subscription)
if types is None:
types = []
for type_ in types:
assert is_graphene_type(type_)
self.auto_camelcase = auto_camelcase
create_graphql_type = self.add_type
self.query = create_graphql_type(query) if query else None
self.mutation = create_graphql_type(mutation) if mutation else None
self.subscription = create_graphql_type(subscription) if subscription else None
self.types = [create_graphql_type(graphene_type) for graphene_type in types]
def add_type(self, graphene_type):
if inspect.isfunction(graphene_type):
graphene_type = graphene_type()
if isinstance(graphene_type, List):
return GraphQLList(self.add_type(graphene_type.of_type))
if isinstance(graphene_type, NonNull):
return GraphQLNonNull(self.add_type(graphene_type.of_type))
try:
name = graphene_type._meta.name
except AttributeError:
raise TypeError(f"Expected Graphene type, but received: {graphene_type}.")
graphql_type = self.get(name)
if graphql_type:
return graphql_type
if issubclass(graphene_type, ObjectType):
graphql_type = self.create_objecttype(graphene_type)
elif issubclass(graphene_type, InputObjectType):
graphql_type = self.create_inputobjecttype(graphene_type)
elif issubclass(graphene_type, Interface):
graphql_type = self.create_interface(graphene_type)
elif issubclass(graphene_type, Scalar):
graphql_type = self.create_scalar(graphene_type)
elif issubclass(graphene_type, Enum):
graphql_type = self.create_enum(graphene_type)
elif issubclass(graphene_type, Union):
graphql_type = self.construct_union(graphene_type)
else:
raise TypeError(f"Expected Graphene type, but received: {graphene_type}.")
self[name] = graphql_type
return graphql_type
@staticmethod
def create_scalar(graphene_type):
# We have a mapping to the original GraphQL types
# so there are no collisions.
_scalars = {
String: GraphQLString,
Int: GraphQLInt,
Float: GraphQLFloat,
Boolean: GraphQLBoolean,
ID: GraphQLID,
}
if graphene_type in _scalars:
return _scalars[graphene_type]
return GrapheneScalarType(
graphene_type=graphene_type,
name=graphene_type._meta.name,
description=graphene_type._meta.description,
serialize=getattr(graphene_type, "serialize", None),
parse_value=getattr(graphene_type, "parse_value", None),
parse_literal=getattr(graphene_type, "parse_literal", None),
)
@staticmethod
def create_enum(graphene_type):
values = {}
for name, value in graphene_type._meta.enum.__members__.items():
description = getattr(value, "description", None)
deprecation_reason = getattr(value, "deprecation_reason", None)
if not description and callable(graphene_type._meta.description):
description = graphene_type._meta.description(value)
if not deprecation_reason and callable(
graphene_type._meta.deprecation_reason
):
deprecation_reason = graphene_type._meta.deprecation_reason(value)
values[name] = GraphQLEnumValue(
value=value.value,
description=description,
deprecation_reason=deprecation_reason,
)
type_description = (
graphene_type._meta.description(None)
if callable(graphene_type._meta.description)
else graphene_type._meta.description
)
return GrapheneEnumType(
graphene_type=graphene_type,
values=values,
name=graphene_type._meta.name,
description=type_description,
)
def create_objecttype(self, graphene_type):
create_graphql_type = self.add_type
def interfaces():
interfaces = []
for graphene_interface in graphene_type._meta.interfaces:
interface = create_graphql_type(graphene_interface)
assert interface.graphene_type == graphene_interface
interfaces.append(interface)
return interfaces
if graphene_type._meta.possible_types:
is_type_of = partial(
is_type_of_from_possible_types, graphene_type._meta.possible_types
)
else:
is_type_of = graphene_type.is_type_of
return GrapheneObjectType(
graphene_type=graphene_type,
name=graphene_type._meta.name,
description=graphene_type._meta.description,
fields=partial(self.create_fields_for_type, graphene_type),
is_type_of=is_type_of,
interfaces=interfaces,
)
def create_interface(self, graphene_type):
resolve_type = (
partial(
self.resolve_type, graphene_type.resolve_type, graphene_type._meta.name
)
if graphene_type.resolve_type
else None
)
return GrapheneInterfaceType(
graphene_type=graphene_type,
name=graphene_type._meta.name,
description=graphene_type._meta.description,
fields=partial(self.create_fields_for_type, graphene_type),
resolve_type=resolve_type,
)
def create_inputobjecttype(self, graphene_type):
return GrapheneInputObjectType(
graphene_type=graphene_type,
name=graphene_type._meta.name,
description=graphene_type._meta.description,
out_type=graphene_type._meta.container,
fields=partial(
self.create_fields_for_type, graphene_type, is_input_type=True
),
)
def construct_union(self, graphene_type):
create_graphql_type = self.add_type
def types():
union_types = []
for graphene_objecttype in graphene_type._meta.types:
object_type = create_graphql_type(graphene_objecttype)
assert object_type.graphene_type == graphene_objecttype
union_types.append(object_type)
return union_types
resolve_type = (
partial(
self.resolve_type, graphene_type.resolve_type, graphene_type._meta.name
)
if graphene_type.resolve_type
else None
)
return GrapheneUnionType(
graphene_type=graphene_type,
name=graphene_type._meta.name,
description=graphene_type._meta.description,
types=types,
resolve_type=resolve_type,
)
def get_name(self, name):
if self.auto_camelcase:
return to_camel_case(name)
return name
def create_fields_for_type(self, graphene_type, is_input_type=False):
create_graphql_type = self.add_type
fields = {}
for name, field in graphene_type._meta.fields.items():
if isinstance(field, Dynamic):
field = get_field_as(field.get_type(self), _as=Field)
if not field:
continue
field_type = create_graphql_type(field.type)
if is_input_type:
_field = GraphQLInputField(
field_type,
default_value=field.default_value,
out_name=name,
description=field.description,
)
else:
args = {}
for arg_name, arg in field.args.items():
arg_type = create_graphql_type(arg.type)
processed_arg_name = arg.name or self.get_name(arg_name)
args[processed_arg_name] = GraphQLArgument(
arg_type,
out_name=arg_name,
description=arg.description,
default_value=Undefined
if isinstance(arg.type, NonNull)
else arg.default_value,
)
_field = GraphQLField(
field_type,
args=args,
resolve=field.get_resolver(
self.get_resolver_for_type(
graphene_type, f"resolve_{name}", name, field.default_value
)
),
subscribe=field.get_resolver(
self.get_resolver_for_type(
graphene_type,
f"subscribe_{name}",
name,
field.default_value,
)
),
deprecation_reason=field.deprecation_reason,
description=field.description,
)
field_name = field.name or self.get_name(name)
fields[field_name] = _field
return fields
def get_resolver_for_type(self, graphene_type, func_name, name, default_value):
if not issubclass(graphene_type, ObjectType):
return
resolver = getattr(graphene_type, func_name, None)
if not resolver:
# If we don't find the resolver in the ObjectType class, then try to
# find it in each of the interfaces
interface_resolver = None
for interface in graphene_type._meta.interfaces:
if name not in interface._meta.fields:
continue
interface_resolver = getattr(interface, func_name, None)
if interface_resolver:
break
resolver = interface_resolver
# Only if is not decorated with classmethod
if resolver:
return get_unbound_function(resolver)
default_resolver = (
graphene_type._meta.default_resolver or get_default_resolver()
)
return partial(default_resolver, name, default_value)
def resolve_type(self, resolve_type_func, type_name, root, info, _type):
type_ = resolve_type_func(root, info)
if not type_:
return_type = self[type_name]
return default_type_resolver(root, info, return_type)
if inspect.isclass(type_) and issubclass(type_, ObjectType):
graphql_type = self.get(type_._meta.name)
assert graphql_type, f"Can't find type {type_._meta.name} in schema"
assert (
graphql_type.graphene_type == type_
), f"The type {type_} does not match with the associated graphene type {graphql_type.graphene_type}."
return graphql_type
return type_
class Schema:
"""Schema Definition.
A Graphene Schema can execute operations (query, mutation, subscription) against the defined
types. For advanced purposes, the schema can be used to lookup type definitions and answer
questions about the types through introspection.
Args:
query (Optional[Type[ObjectType]]): Root query *ObjectType*. Describes entry point for fields to *read*
data in your Schema.
mutation (Optional[Type[ObjectType]]): Root mutation *ObjectType*. Describes entry point for
fields to *create, update or delete* data in your API.
subscription (Optional[Type[ObjectType]]): Root subscription *ObjectType*. Describes entry point
for fields to receive continuous updates.
types (Optional[List[Type[ObjectType]]]): List of any types to include in schema that
may not be introspected through root types.
directives (List[GraphQLDirective], optional): List of custom directives to include in the
GraphQL schema. Defaults to only include directives defined by GraphQL spec (@include
and @skip) [GraphQLIncludeDirective, GraphQLSkipDirective].
auto_camelcase (bool): Fieldnames will be transformed in Schema's TypeMap from snake_case
to camelCase (preferred by GraphQL standard). Default True.
"""
def __init__(
self,
query=None,
mutation=None,
subscription=None,
types=None,
directives=None,
auto_camelcase=True,
):
self.query = query
self.mutation = mutation
self.subscription = subscription
type_map = TypeMap(
query, mutation, subscription, types, auto_camelcase=auto_camelcase
)
self.graphql_schema = GraphQLSchema(
type_map.query,
type_map.mutation,
type_map.subscription,
type_map.types,
directives,
)
def __str__(self):
return print_schema(self.graphql_schema)
def __getattr__(self, type_name):
"""
This function let the developer select a type in a given schema
by accessing its attrs.
Example: using schema.Query for accessing the "Query" type in the Schema
"""
_type = self.graphql_schema.get_type(type_name)
if _type is None:
raise AttributeError(f'Type "{type_name}" not found in the Schema')
if isinstance(_type, GrapheneGraphQLType):
return _type.graphene_type
return _type
def lazy(self, _type):
return lambda: self.get_type(_type)
def execute(self, *args, **kwargs):
"""Execute a GraphQL query on the schema.
Use the `graphql_sync` function from `graphql-core` to provide the result
for a query string. Most of the time this method will be called by one of the Graphene
:ref:`Integrations` via a web request.
Args:
request_string (str or Document): GraphQL request (query, mutation or subscription)
as string or parsed AST form from `graphql-core`.
root_value (Any, optional): Value to use as the parent value object when resolving
root types.
context_value (Any, optional): Value to be made available to all resolvers via
`info.context`. Can be used to share authorization, dataloaders or other
information needed to resolve an operation.
variable_values (dict, optional): If variables are used in the request string, they can
be provided in dictionary form mapping the variable name to the variable value.
operation_name (str, optional): If multiple operations are provided in the
request_string, an operation name must be provided for the result to be provided.
middleware (List[SupportsGraphQLMiddleware]): Supply request level middleware as
defined in `graphql-core`.
Returns:
:obj:`ExecutionResult` containing any data and errors for the operation.
"""
kwargs = normalize_execute_kwargs(kwargs)
return graphql_sync(self.graphql_schema, *args, **kwargs)
async def execute_async(self, *args, **kwargs):
"""Execute a GraphQL query on the schema asynchronously.
Same as `execute`, but uses `graphql` instead of `graphql_sync`.
"""
kwargs = normalize_execute_kwargs(kwargs)
return await graphql(self.graphql_schema, *args, **kwargs)
async def subscribe(self, query, *args, **kwargs):
document = parse(query)
kwargs = normalize_execute_kwargs(kwargs)
return await subscribe(self.graphql_schema, document, *args, **kwargs)
def introspect(self):
introspection = self.execute(introspection_query)
if introspection.errors:
raise introspection.errors[0]
return introspection.data
def normalize_execute_kwargs(kwargs):
"""Replace alias names in keyword arguments for graphql()"""
if "root" in kwargs and "root_value" not in kwargs:
kwargs["root_value"] = kwargs.pop("root")
if "context" in kwargs and "context_value" not in kwargs:
kwargs["context_value"] = kwargs.pop("context")
if "variables" in kwargs and "variable_values" not in kwargs:
kwargs["variable_values"] = kwargs.pop("variables")
if "operation" in kwargs and "operation_name" not in kwargs:
kwargs["operation_name"] = kwargs.pop("operation")
return kwargs
| 37.726547 | 113 | 0.631871 |
090b31d40a84304bdcdf44e5401a77e6a5b93b69 | 662 | py | Python | legacy-code/metric.py | developbiao/pythonbasics | a7549786629e820646dcde5bb9f1aad4331de9be | [
"MIT"
] | 1 | 2019-06-13T15:33:57.000Z | 2019-06-13T15:33:57.000Z | legacy-code/metric.py | developbiao/pythonbasics | a7549786629e820646dcde5bb9f1aad4331de9be | [
"MIT"
] | null | null | null | legacy-code/metric.py | developbiao/pythonbasics | a7549786629e820646dcde5bb9f1aad4331de9be | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
import time, functools
def metric(func):
@functools.wraps(func)
def decorator(*args, **kw):
begin_time = time.time()
retval = func(*args, **kw)
spend_time = (time.time() - begin_time) * 1000
print('%s executed in %.2f ms' % (func.__name__, spend_time))
return retval
return decorator
# 测试
@metric
def fast(x, y):
time.sleep(0.0012)
return x + y
@metric
def slow(x, y, z):
time.sleep(0.1234)
return x * y * z
f = fast(11, 22)
s = slow(11, 22, 33)
if f != 33:
print('测试失败!')
elif s != 7986:
print('测试失败!')
print('Programing is runing!')
| 19.470588 | 69 | 0.577039 |
2a3b0ee01439d0a9403a0f13d9ba7391258bae5d | 5,706 | py | Python | extensions/skyportal/skyportal/handlers/api/db_stats.py | profjsb/fritz | 59fed7bad6662291692889105ad3875a99c17b58 | [
"BSD-3-Clause"
] | null | null | null | extensions/skyportal/skyportal/handlers/api/db_stats.py | profjsb/fritz | 59fed7bad6662291692889105ad3875a99c17b58 | [
"BSD-3-Clause"
] | null | null | null | extensions/skyportal/skyportal/handlers/api/db_stats.py | profjsb/fritz | 59fed7bad6662291692889105ad3875a99c17b58 | [
"BSD-3-Clause"
] | null | null | null | import datetime
from astropy.time import Time
from penquins import Kowalski
from baselayer.app.access import permissions
from baselayer.app.env import load_env
from ..base import BaseHandler
from ...models import (
DBSession,
Obj,
Source,
Candidate,
User,
Token,
Group,
Spectrum,
CronJobRun,
)
_, cfg = load_env()
kowalski = Kowalski(
token=cfg["app.kowalski.token"],
protocol=cfg["app.kowalski.protocol"],
host=cfg["app.kowalski.host"],
port=int(cfg["app.kowalski.port"]),
)
class StatsHandler(BaseHandler):
@permissions(["System admin"])
def get(self):
"""
---
description: Retrieve basic DB statistics
tags:
- system_info
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
Number of candidates:
type: integer
description: Number of rows in candidates table
Number of objs:
type: integer
description: Number of rows in objs table
Number of sources:
type: integer
description: Number of rows in sources table
Number of photometry:
type: integer
description: Number of rows in photometry table
Number of spectra:
type: integer
description: Number of rows in spectra table
Number of groups:
type: integer
description: Number of rows in groups table
Number of users:
type: integer
description: Number of rows in users table
Number of tokens:
type: integer
description: Number of rows in tokens table
Oldest candidate creation datetime:
type: string
description: |
Datetime string corresponding to created_at column of
the oldest row in the candidates table.
Newest candidate creation datetime:
type: string
description: |
Datetime string corresponding to created_at column of
the newest row in the candidates table.
"""
data = {}
data["Number of candidates"] = Candidate.query.count()
data["Number of sources"] = Source.query.count()
data["Number of objs"] = Obj.query.count()
data["Number of photometry (approx)"] = list(
DBSession().execute(
"SELECT reltuples::bigint FROM pg_catalog.pg_class WHERE relname = 'photometry'"
)
)[0][0]
data["Number of spectra"] = Spectrum.query.count()
data["Number of groups"] = Group.query.count()
data["Number of users"] = User.query.count()
data["Number of tokens"] = Token.query.count()
cand = Candidate.query.order_by(Candidate.created_at).first()
data["Oldest candidate creation datetime"] = (
cand.created_at if cand is not None else None
)
cand = Candidate.query.order_by(Candidate.created_at.desc()).first()
data["Newest candidate creation datetime"] = (
cand.created_at if cand is not None else None
)
cand = (
DBSession()
.query(Candidate)
.filter(Candidate.obj_id.notin_(DBSession.query(Source.obj_id)))
.order_by(Candidate.created_at)
.first()
)
data["Oldest unsaved candidate creation datetime"] = (
cand.created_at if cand is not None else None
)
data["Latest cron job run times & statuses"] = []
cron_job_scripts = DBSession().query(CronJobRun.script).distinct().all()
for script in cron_job_scripts:
cron_job_run = (
CronJobRun.query.filter(CronJobRun.script == script[0])
.order_by(CronJobRun.created_at.desc())
.first()
)
data["Latest cron job run times & statuses"].append(
{
"summary": f"{script[0]} ran at {cron_job_run.created_at} with exit status {cron_job_run.exit_status}",
"output": cron_job_run.output,
}
)
utc_now = datetime.datetime.utcnow()
jd_start = Time(datetime.datetime(utc_now.year, utc_now.month, utc_now.day)).jd
k_query = {
"query_type": "count_documents",
"query": {
"catalog": "ZTF_alerts",
"filter": {
"candidate.jd": {
"$gt": jd_start,
}
},
},
}
response = kowalski.query(query=k_query)
data["Number of alerts ingested since 0h UTC today"] = response.get("data")
return self.success(data=data)
| 38.816327 | 123 | 0.489485 |
9e6b4ce8794204d0264ce5f9b46e161ed62103ac | 4,568 | py | Python | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_applications_request.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_applications_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_applications_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class ListApplicationsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit': 'int',
'offset': 'int',
'order_by': 'str',
'order': 'str'
}
attribute_map = {
'limit': 'limit',
'offset': 'offset',
'order_by': 'order_by',
'order': 'order'
}
def __init__(self, limit=None, offset=None, order_by=None, order=None):
"""ListApplicationsRequest - a model defined in huaweicloud sdk"""
self._limit = None
self._offset = None
self._order_by = None
self._order = None
self.discriminator = None
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if order_by is not None:
self.order_by = order_by
if order is not None:
self.order = order
@property
def limit(self):
"""Gets the limit of this ListApplicationsRequest.
:return: The limit of this ListApplicationsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListApplicationsRequest.
:param limit: The limit of this ListApplicationsRequest.
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListApplicationsRequest.
:return: The offset of this ListApplicationsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListApplicationsRequest.
:param offset: The offset of this ListApplicationsRequest.
:type: int
"""
self._offset = offset
@property
def order_by(self):
"""Gets the order_by of this ListApplicationsRequest.
:return: The order_by of this ListApplicationsRequest.
:rtype: str
"""
return self._order_by
@order_by.setter
def order_by(self, order_by):
"""Sets the order_by of this ListApplicationsRequest.
:param order_by: The order_by of this ListApplicationsRequest.
:type: str
"""
self._order_by = order_by
@property
def order(self):
"""Gets the order of this ListApplicationsRequest.
:return: The order of this ListApplicationsRequest.
:rtype: str
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this ListApplicationsRequest.
:param order: The order of this ListApplicationsRequest.
:type: str
"""
self._order = order
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListApplicationsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.826087 | 75 | 0.551007 |
6bc017941448bdab58272ade3059d0e8decec799 | 2,238 | py | Python | tests/test_generate_copy_without_render.py | pokoli/cookiecutter | 394c02dc893c358f2faa37b2c6635ba7c881b96f | [
"BSD-3-Clause"
] | 2 | 2019-06-03T12:44:14.000Z | 2021-09-01T08:28:45.000Z | tests/test_generate_copy_without_render.py | pokoli/cookiecutter | 394c02dc893c358f2faa37b2c6635ba7c881b96f | [
"BSD-3-Clause"
] | 2 | 2015-03-14T09:07:54.000Z | 2015-03-29T18:00:32.000Z | tests/test_generate_copy_without_render.py | pokoli/cookiecutter | 394c02dc893c358f2faa37b2c6635ba7c881b96f | [
"BSD-3-Clause"
] | 1 | 2021-01-29T10:04:27.000Z | 2021-01-29T10:04:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_copy_without_render
---------------------------------
"""
from __future__ import unicode_literals
import os
import pytest
from cookiecutter import generate
from cookiecutter import utils
@pytest.fixture(scope='function')
def remove_test_dir(request):
"""
Remove the folder that is created by the test.
"""
def fin_remove_test_dir():
if os.path.exists('test_copy_without_render'):
utils.rmtree('test_copy_without_render')
request.addfinalizer(fin_remove_test_dir)
@pytest.mark.usefixtures('clean_system', 'remove_test_dir')
def test_generate_copy_without_render_extensions():
generate.generate_files(
context={
'cookiecutter': {
'repo_name': 'test_copy_without_render',
'render_test': 'I have been rendered!',
'_copy_without_render': [
'*not-rendered',
'rendered/not_rendered.yml',
'*.txt',
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
dir_contents = os.listdir('test_copy_without_render')
assert '{{cookiecutter.repo_name}}-not-rendered' in dir_contents
assert 'test_copy_without_render-rendered' in dir_contents
with open('test_copy_without_render/README.txt') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/README.rst') as f:
assert 'I have been rendered!' in f.read()
with open('test_copy_without_render/'
'test_copy_without_render-rendered/'
'README.txt') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/'
'test_copy_without_render-rendered/'
'README.rst') as f:
assert 'I have been rendered' in f.read()
with open('test_copy_without_render/'
'{{cookiecutter.repo_name}}-not-rendered/'
'README.rst') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/rendered/not_rendered.yml') as f:
assert '{{cookiecutter.render_test}}' in f.read()
| 31.083333 | 73 | 0.632261 |
92f83cca10a838610bec194007348ef75d2555fa | 94 | py | Python | exotic_bay/apps.py | ansfarooq7/exotic_bay_project | 170bbc1178d94e83bc2cc048f85568da0001eb78 | [
"MIT"
] | 2 | 2020-07-30T14:49:14.000Z | 2020-08-13T19:35:05.000Z | exotic_bay/apps.py | ansfarooq7/exotic_bay_project | 170bbc1178d94e83bc2cc048f85568da0001eb78 | [
"MIT"
] | 8 | 2020-03-04T17:04:07.000Z | 2022-02-10T12:07:25.000Z | exotic_bay/apps.py | ansfarooq7/exotic_bay_project | 170bbc1178d94e83bc2cc048f85568da0001eb78 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ExoticBayConfig(AppConfig):
name = 'exotic_bay'
| 15.666667 | 33 | 0.765957 |
73b90773872f9e426fa66c8731a4cda42b3774fd | 951 | py | Python | GeeksforGeeks/Rotate by 90 degree.py | rayvantsahni/Competitive-Programming-Codes | 39ba91b69ad8ce7dce554f7817c2f0d5545ef471 | [
"MIT"
] | 1 | 2021-07-05T14:01:36.000Z | 2021-07-05T14:01:36.000Z | GeeksforGeeks/Rotate by 90 degree.py | rayvantsahni/Competitive-Programming-and-Interview-Prep | 39ba91b69ad8ce7dce554f7817c2f0d5545ef471 | [
"MIT"
] | null | null | null | GeeksforGeeks/Rotate by 90 degree.py | rayvantsahni/Competitive-Programming-and-Interview-Prep | 39ba91b69ad8ce7dce554f7817c2f0d5545ef471 | [
"MIT"
] | null | null | null |
#User function Template for python3
class Solution:
#Function to rotate matrix anticlockwise by 90 degrees.
def rotateby90(self,a, n):
# code here
for row in a:
row.reverse()
for i in range(n):
for j in range(i, n):
a[j][i], a[i][j] = a[i][j], a[j][i]
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
n = int(input())
matrix = [[0 for j in range(n)] for i in range(n)]
line1 = [int(x) for x in input().strip().split()]
k=0
for i in range(n):
for j in range (n):
matrix[i][j]=line1[k]
k+=1
obj = Solution()
obj.rotateby90(matrix,n)
for i in range(n):
for j in range(n):
print(matrix[i][j],end=" ")
print()
# } Driver Code Ends
| 23.775 | 59 | 0.477392 |
a7b400809a58163ad1951472bc600ac95ee7fd98 | 742 | py | Python | branch/migrations/0001_initial.py | caioaraujo/bakery_payments_api_v2 | ade365bd2aa9561182be982286caa72923f36e13 | [
"MIT"
] | 4 | 2019-06-01T23:51:20.000Z | 2021-02-24T11:23:31.000Z | branch/migrations/0001_initial.py | caioaraujo/bakery_payments_api_v2 | ade365bd2aa9561182be982286caa72923f36e13 | [
"MIT"
] | 8 | 2020-06-13T23:10:46.000Z | 2022-02-28T13:58:02.000Z | branch/migrations/0001_initial.py | caioaraujo/bakery_payments_api_v2 | ade365bd2aa9561182be982286caa72923f36e13 | [
"MIT"
] | 1 | 2022-03-22T04:54:35.000Z | 2022-03-22T04:54:35.000Z | # Generated by Django 2.1.5 on 2019-01-13 03:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('current_balance', models.FloatField()),
('previous_balance', models.FloatField(null=True)),
],
options={
'verbose_name': 'Branch',
'verbose_name_plural': 'Branches',
'db_table': 'branch',
},
),
]
| 25.586207 | 76 | 0.520216 |
5baf80b2397975c3cb75b676568e5eef4b1a47f9 | 838 | py | Python | mysite/mysite/urls.py | zach-king/Pynny | 3dac355092084385c673fd94ef9693f046a835a0 | [
"MIT"
] | null | null | null | mysite/mysite/urls.py | zach-king/Pynny | 3dac355092084385c673fd94ef9693f046a835a0 | [
"MIT"
] | 42 | 2017-09-05T02:38:08.000Z | 2021-06-10T18:32:57.000Z | mysite/mysite/urls.py | zcking/Pynny | 3dac355092084385c673fd94ef9693f046a835a0 | [
"MIT"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls), # /admin/*
url(r'^pynny/', include('pynny.urls')), # /pynny/*
]
| 36.434783 | 79 | 0.690931 |
abb50e36c72ba7dd91a19a1f9285756b4e92d5ef | 15,194 | py | Python | Day5Labs/nso_api.py | rlachman/nso-5-day-training-1 | f0a71ec9bad89d0287d9cf1a22b5ecc9d416fc95 | [
"Apache-2.0"
] | null | null | null | Day5Labs/nso_api.py | rlachman/nso-5-day-training-1 | f0a71ec9bad89d0287d9cf1a22b5ecc9d416fc95 | [
"Apache-2.0"
] | null | null | null | Day5Labs/nso_api.py | rlachman/nso-5-day-training-1 | f0a71ec9bad89d0287d9cf1a22b5ecc9d416fc95 | [
"Apache-2.0"
] | 2 | 2018-07-26T17:36:59.000Z | 2021-08-02T07:15:52.000Z | import ncs
import socket
"""
This File provides servel functions that give example sof executing common tasks using the NSO Maagic API
These a written for use on a local NSO instance and are intended to be used for demonstration purposes.
"""
def create_session():
"""
This is an example of how to create a session into NSO.
A sessions allows for reading data from NSO and executing Actions. It does not create a transaction into NSO.
"""
with ncs.maapi.Maapi() as m:
with ncs.maapi.Session(m, 'admin', 'python', groups=['ncsadmin']):
root = ncs.maagic.get_root(m)
def create_transaction():
"""
This is an example of how to create a transaction into NSO.
We create the transaction with the ncs.maapi.single_write_trans against the ncs module
We commit the transaction with the apply() method inside the transaction object we created above.
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
t.apply()
def navigate_config(device_name):
"""
Example of how to understand and navigate a devices config in the python API.
This example will show by printing the directory of differnet levels of the config
"""
with ncs.maapi.Maapi() as m:
with ncs.maapi.Session(m, 'admin', 'python', groups=['ncsadmin']):
root = ncs.maagic.get_root(m)
device_config = root.devices.device[device_name].config
print(dir(device_config))
print(dir(device_config.ip))
print(dir(device_config.ip.dhcp))
print(dir(device_config.ip.dhcp.snooping))
def change_config_hostname(device_name):
"""
Function to change the hostname of a provided device. This is to give an example of making config changes in NSO
We do this by:
1. create a transaction
2. create a device pointer by passing the device name into the NSO list of devices.
a. This list (root.devices.device) acts much like a Python List,
it has key value pairs with key beign the device name and value being the object for that device.
3. Set the value of the device's config hostname by assigning the device objects config.hostname atrribute to the new value
4. We finish by applying the transaction we have created
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
device = root.devices.device[device_name]
device.config.hostname = "new_host_name"
t.apply()
def delete_data(device_name):
"""
Example of how to delete data (config or NSO) via python
uses python del operator
a note! If you del a pointer to a NCS object this will only delete the pointer!
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
del root.devices.device[device_name].config.hostname
t.apply()
def create_list_item():
"""
Example of how to add a new item into a list resource.
In the IOS YANG model there are many instances of Lists.
For example, adding a new VLAN would be adding a new item to a list.
We do this by invoking the .create() method of the ncs list objects
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
root.devices.device.config.interface.vlan.create("200")
t.apply()
def add_device(device_name):
"""
This function takes a device hostname as an input and adds that device into NSO.
Then does an nslookup on the hostname
This function uses 3 seperate transactions do to sequencing and default admin-state in NSO of locked.
First Transaction: Adds the device and IP to add the device into the cDB
Second Transaction: adds the port and creates the device-type/ NED info and unlocks the device.
Third Transaction: Gets ssh keys, syncs-from and southbound-locks the device.
"""
ip_addr = socket.getaddrinfo(device_name,0,0,0,0)
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
root.devices.device.create(device_name)
root.devices.device[device_name].address = ip_addr[0][4][0]
t.apply()
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t2:
root = ncs.maagic.get_root(t2)
root.devices.device[device_name].port = 22
root.devices.device[device_name].device_type.cli.create()
root.devices.device[device_name].device_type.cli.ned_id = "ios-id:cisco-ios"
root.devices.device[device_name].device_type.cli.protocol = "ssh"
root.devices.device[device_name].authgroup = "root_user"
root.devices.device[device_name].state.admin_state = "unlocked"
t2.apply()
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t3:
root = ncs.maagic.get_root(t3)
root.devices.device[device_name].ssh.fetch_host_keys()
root.devices.device[device_name].sync_from()
root.devices.device[device_name].state.admin_state = "southbound-locked"
t3.apply()
def iterate_devices():
"""
Example of how to loop over devices in NSO and execute actions or changes per each device.
Within this example we will iterate over devices and print the device name and the HW platform.
Then per device print what extended ACL are present on the device.
Notice how the configuration for the device is navigated via a python object
In this case config -> ip -> access-list -> extended -> ext_named_acl
If you thinka bout it, this object structure is very similiar to the IOS syntax and navigation
We do this by:
1. Creating a transaction
2. Using a for loop over the the root.devices.device list
3. Printing the info, print info per box
In this example, we should have used a session! but if we desire changes we per box we would want a transaction.
In this case, even if we changed config info, nothing would be done! Since we never apply/commit the transaction changes.
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
for box in root.devices.device:
print(box.name,": ", box.platform.model)
for acl in root.devices.device[box.name].config.ip.access_list.extended.ext_named_acl:
print(acl.name)
def show_commands(command, device_name):
"""
Use a MAAPI session via maagic api to get the results of a passed show command.
Uses the devices name in NSO as an input parameter and the commnd ie: CDP Neighbors, ip int br.
prints the raw text results of the command.
We do this by:
1. Creating a NSO session
2. Create a pointer to our device
3. Create an input object but calling the device.live_status.ios_stats__exec.show.get_input() emthod
4. Pass the command function input into the input objects args variable
5. Invoke the command by passign the input object into the device.live_status.ios_stats__exec.show() method
6. set the output variable to the result attributw of our invoked command object above
7.Print the output
"""
with ncs.maapi.Maapi() as m:
with ncs.maapi.Session(m, 'admin', 'python'):
root = ncs.maagic.get_root(m)
device = root.devices.device[device_name]
input1 = device.live_status.ios_stats__exec.show.get_input()
input1.args = [command]
output = device.live_status.ios_stats__exec.show(input1).result
print(output)
def clear_commands(command, device_name):
"""
Same as above but for clearing
"""
with ncs.maapi.Maapi() as m:
with ncs.maapi.Session(m, 'admin', 'python'):
root = ncs.maagic.get_root(m)
device = root.devices.device[device_name]
input1 = device.live_status.ios_stats__exec.clear.get_input()
input1.args = [command]
output = device.live_status.ios_stats__exec.clear(input1).result
print(output)
def using_leaflists_data(device_group):
"""
Example that shows one scenario where you will use a leaflist YANG type.
This example iterates over the devices in a provided group the passes
the string value from the list into root.devices.device[] to get the ip address of the device.
"""
with ncs.maapi.single_write_trans('ncsadmin', 'python', groups=['ncsadmin'], db=ncs.RUNNING, ip='127.0.0.1', port=ncs.NCS_PORT, proto=ncs.PROTO_TCP) as trans:
root = ncs.maagic.get_root(trans)
group = root.devices.device_group[device_group].device_name
for box in group:
print type(box)
print(root.devices.device[box].address)
def check_in_string(ip):
"""
Single search to see if a provided IP address
is present inside any of a devices extended ACLs.
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
for box in root.devices.device:
for acl in root.devices.device[box.name].config.ip.access_list.extended.ext_named_acl:
for rule in root.devices.device[box.name].config.ip.access_list.extended.ext_named_acl[acl.name].ext_access_list_rule:
if ip in rule.rule:
print(ip + "Is in acl " + str(acl))
def work_with_boolean(device_name):
"""
Function example that shows values that are of data type boolean.
These can be set to be True or False.
Also showing object assignment for fun.
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
dot_one_q_config = root.devices.device[device_name].config.interface.GigabitEthernet["0/1"].dot1Q
dot_one_q_config.vlan_id = 10
dot_one_q_config.native = True
def check_if_interface_exists(device_name, interface_type, interface_number):
"""
Example function to show how to check if a certain interface is on a device.
We do this by using by if in operators and the maagic API dictionary methods.
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
device = root.devices.device[device_name]
print type(device.interface[interface_type])
if interface_number in device.interface[interface_type]:
print("Interface is on the device!")
else:
print("Interface is not on the device!")
def print_interfaces(device_name, interface_type):
"""
Prints each interface number on the device of the given type
"""
with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:
root = ncs.maagic.get_root(t)
device = root.devices.device[device_name]
for interface in device.interface[interface_type]:
print interface.name
# python example for service compliance
with ncs.maapi.single_write_trans('ncsadmin','python',['ncsadmin'],ip='127.0.0.1', port=ncs.NCS_PORT,path=None, src_ip='127.0.0.1', src_port=0, proto=ncs.PROTO_TCP) as t:
root = ncs.maagic.get_root(t)
service = root.services.your_service_name["instance_name"]
compliance_input = service.check_sync.get_input()
compliance_input.outformat = 'native'
for device in each.check_sync(compliance_input).native.device:
print(device, " ", device.data) ```
# using a rest api call in a flask app to send any command to NSO device
# see OS-Upgrades/client-os-upgrader for full context
import ncs
from flask import Flask, render_template, redirect, url_for,request
from flask import make_response
from flask_cors import CORS
import json
@app.route('/login', methods=['GET', 'POST'])
def login():
message = None
if request.method == 'POST':
username = request.form['user']
password = request.form['pass']
command = 'ssh -l ' + username + ' rtp5-itnlab-dt-gw1 | prompts ' + password
with ncs.maapi.single_write_trans('ncsadmin', 'python', groups=['ncsadmin']) as tran:
root = ncs.maagic.get_root(tran)
device = root.ncs__devices.device['rtp5-itnlab-dt-sw1']
input1 = device.live_status.ios_stats__exec.show.get_input()
input1.args = [command]
try:
output = device.live_status.ios_stats__exec.any(input1).result
except Exception:
output = 'Error'
finally:
tran.apply()
if output[len(output)-1] == '#':
result = 'valid'
else:
result = 'invalid'
resp = make_response(result)
resp.headers['Content-Type'] = "application/json"
return resp
# need to clean this up
# import excel into a yang model list
import ncs
import pyExcel
def importExcel():
# LOG = logging.getLogger("ErrorLogging")
# init_logger("ErrorLogging")
with ncs.maapi.single_write_trans('ncsadmin', 'python', groups=['ncsadmin']) as tran:
root = ncs.maagic.get_root(tran)
# filePath = "/Users/kmanan/Documents/Projects/nso-os-upgrader/python/nso_os_upgrader/NAE/"
filePath = os.getcwd() + os.sep
standardDevList = pyExcel.read_excel(filePath)
y = root.nso_os_upgrader__os_standards.OS_standards
for x in standardDevList:
if (x.Maestro_key == "Maestro_key") or (x.Maestro_key == "tbd"):
pass
elif (str(x.Maestro_key) != "None"):
z = y.create([x.Maestro_key])
z.nso_os_upgrader__Device = x.Device
z.nso_os_upgrader__Function_Role = x.Function_Role
z.nso_os_upgrader__IOS_Version = x.IOS_Version
z.nso_os_upgrader__Feature_Set = x.Feature_Set
z.nso_os_upgrader__Recommended_Rommon_Version = x.Recommended_Rommon_Version
z.nso_os_upgrader__Recommended_OS_File = x.Recommended_OS_File
z.nso_os_upgrader__Recommended_OS_MD5_Hash = x.Recommended_OS_MD5_Hash
z.nso_os_upgrader__Limited_Deploy_IOS = x.Limited_Deploy_IOS_File
z.nso_os_upgrader__Limited_Deploy_IOS_MD5_Hash = x.Limited_Deploy_IOS_MD5_Hash
z.nso_os_upgrader__Accepted_OS = x.Acceptable_IOS_File
z.nso_os_upgrader__Accepted_OS_MD5_Hash = x.Accepted_OS_MD5_Hash
z.nso_os_upgrader__Recommended_ROMMON = x.Recommended_ROMMON
z.nso_os_upgrader__Recommended_ROMMON_MD5_Hash = x.Recommended_ROMMON_MD5_Hash
z.nso_os_upgrader__FPD_File_for_Recommended_IOS = x.FPD_File_for_Recommended_IOS
z.nso_os_upgrader__FPD_File_for_Recommended_IOS_MD5_Hash = x.FPD_File_for_Recommended_IOS_MD5_Hash
z.nso_os_upgrader__FPD_File_for_LD_IOS = x.FPD_File_for_LD_IOS
z.nso_os_upgrader__FPD_File_for_LD_IOS_MD5_Hash = x.FPD_File_for_LD_MD5_Hash
z.nso_os_upgrader__FCS_Date = x.FCS_Date
z.nso_os_upgrader__GD_Date = x.GD_Date
z.nso_os_upgrader__Comments = x.Comments
tran.apply()
| 47.779874 | 170 | 0.68843 |
e817386a112b36973e008b9047dc37f42dc52b12 | 5,100 | py | Python | dist_utils.py | DonaldChung-HK/st2-auth-backend-keystone | 4778319a019f1060df6fcaec12e1001f98922c51 | [
"Apache-2.0"
] | 16 | 2015-09-05T16:05:36.000Z | 2022-02-22T12:48:58.000Z | dist_utils.py | DonaldChung-HK/st2-auth-backend-keystone | 4778319a019f1060df6fcaec12e1001f98922c51 | [
"Apache-2.0"
] | 19 | 2016-02-26T23:36:30.000Z | 2021-03-25T14:28:12.000Z | dist_utils.py | DonaldChung-HK/st2-auth-backend-keystone | 4778319a019f1060df6fcaec12e1001f98922c51 | [
"Apache-2.0"
] | 26 | 2016-03-29T18:47:46.000Z | 2021-03-25T08:35:03.000Z | # -*- coding: utf-8 -*-
# NOTE: This file is auto-generated - DO NOT EDIT MANUALLY
# Instead copy from https://github.com/StackStorm/st2/blob/master/scripts/dist_utils.py
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import re
import sys
from distutils.version import StrictVersion
# NOTE: This script can't rely on any 3rd party dependency so we need to use this code here
#
# TODO: Why can't this script rely on 3rd party dependencies? Is it because it has to import
# from pip?
#
# TODO: Dear future developer, if you are back here fixing a bug with how we parse
# requirements files, please look into using the packaging package on PyPI:
# https://packaging.pypa.io/en/latest/requirements/
# and specifying that in the `setup_requires` argument to `setuptools.setup()`
# for subpackages.
# At the very least we can vendorize some of their code instead of reimplementing
# each piece of their code every time our parsing breaks.
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
else:
text_type = unicode # NOQA
GET_PIP = 'curl https://bootstrap.pypa.io/get-pip.py | python'
__all__ = [
'check_pip_is_installed',
'check_pip_version',
'fetch_requirements',
'apply_vagrant_workaround',
'get_version_string',
'parse_version_string'
]
def check_pip_is_installed():
"""
Ensure that pip is installed.
"""
try:
import pip # NOQA
except ImportError as e:
print('Failed to import pip: %s' % (text_type(e)))
print('')
print('Download pip:\n%s' % (GET_PIP))
sys.exit(1)
return True
def check_pip_version(min_version='6.0.0'):
"""
Ensure that a minimum supported version of pip is installed.
"""
check_pip_is_installed()
import pip
if StrictVersion(pip.__version__) < StrictVersion(min_version):
print("Upgrade pip, your version '{0}' "
"is outdated. Minimum required version is '{1}':\n{2}".format(pip.__version__,
min_version,
GET_PIP))
sys.exit(1)
return True
def fetch_requirements(requirements_file_path):
"""
Return a list of requirements and links by parsing the provided requirements file.
"""
links = []
reqs = []
def _get_link(line):
vcs_prefixes = ['git+', 'svn+', 'hg+', 'bzr+']
for vcs_prefix in vcs_prefixes:
if line.startswith(vcs_prefix) or line.startswith('-e %s' % (vcs_prefix)):
req_name = re.findall('.*#egg=(.+)([&|@]).*$', line)
if not req_name:
req_name = re.findall('.*#egg=(.+?)$', line)
else:
req_name = req_name[0]
if not req_name:
raise ValueError('Line "%s" is missing "#egg=<package name>"' % (line))
link = line.replace('-e ', '').strip()
return link, req_name[0]
return None, None
with open(requirements_file_path, 'r') as fp:
for line in fp.readlines():
line = line.strip()
if line.startswith('#') or not line:
continue
link, req_name = _get_link(line=line)
if link:
links.append(link)
else:
req_name = line
if ';' in req_name:
req_name = req_name.split(';')[0].strip()
reqs.append(req_name)
return (reqs, links)
def apply_vagrant_workaround():
"""
Function which detects if the script is being executed inside vagrant and if it is, it deletes
"os.link" attribute.
Note: Without this workaround, setup.py sdist will fail when running inside a shared directory
(nfs / virtualbox shared folders).
"""
if os.environ.get('USER', None) == 'vagrant':
del os.link
def get_version_string(init_file):
"""
Read __version__ string for an init file.
"""
with open(init_file, 'r') as fp:
content = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
content, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string in %s.' % (init_file))
# alias for get_version_string
parse_version_string = get_version_string
| 30.357143 | 98 | 0.606275 |
262385c1ba5a967b0e9274c9754763f58ade0d0e | 1,894 | py | Python | ant/fs/test/test.py | ddboline/Garmin-Forerunner-610-Extractor_fork | 3cea481bd103a1f20a3ff17f307d2a3a68068ba5 | [
"MIT"
] | 3 | 2016-07-19T12:04:46.000Z | 2019-08-03T21:17:28.000Z | ant/fs/test/test.py | ddboline/Garmin-Forerunner-610-Extractor_fork | 3cea481bd103a1f20a3ff17f307d2a3a68068ba5 | [
"MIT"
] | null | null | null | ant/fs/test/test.py | ddboline/Garmin-Forerunner-610-Extractor_fork | 3cea481bd103a1f20a3ff17f307d2a3a68068ba5 | [
"MIT"
] | 2 | 2019-02-18T08:57:30.000Z | 2019-08-03T20:54:37.000Z | #from ant.base import Message
from ant.easy.node import Node, Message
from ant.easy.channel import Channel
from ant.fs.manager import Application
import array
import logging
import struct
import sys
import threading
import traceback
class App(Application):
def setup_channel(self, channel):
print "on setup channel"
channel.set_period(4096)
channel.set_search_timeout(255)
channel.set_rf_freq(50)
channel.set_search_waveform([0x53, 0x00])
channel.set_id(0, 0x01, 0)
print "Open channel..."
channel.open()
channel.request_message(Message.ID.RESPONSE_CHANNEL_STATUS)
def on_link(self, beacon):
print "on link"
self.link()
def on_authentication(self, beacon):
print "on authentication"
serial = self.authentication_serial()
#passkey = self.authentication_pair("Friendly little name")
passkey = array.array('B', [234, 85, 223, 166, 87, 48, 71, 153])
self.authentication_passkey(passkey)
#print "Link", serial, "-", info, "-", beacon
def on_transport(self, beacon):
print "on transport"
d = self.download_directory()
print d, d.get_version(), d._time_format, d._current_system_time, d._last_modified
print d._files
def main():
try:
# Set up logging
logger = logging.getLogger("garmin")
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler("test.log", "w")
#handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(threadName)-10s %(asctime)s %(name)-15s %(levelname)-8s %(message)s'))
logger.addHandler(handler)
app = App()
app.start()
except (Exception, KeyboardInterrupt):
traceback.print_exc()
print "Interrupted"
app.stop()
sys.exit(1)
| 29.138462 | 127 | 0.63886 |
16af6f87963dbfdc4559c47c7c5d3d1a2e615879 | 3,246 | py | Python | ief_core/models/iefs/logcellkill.py | zeshanmh/ief | 1b7dbd340ecb8ccf40d22de989e3bc3d92135a45 | [
"MIT"
] | 5 | 2021-04-11T04:49:24.000Z | 2022-03-28T18:43:45.000Z | ief_core/models/iefs/logcellkill.py | clinicalml/ief | 97bcaad85ec820fbe062a86c6c500a308904f029 | [
"MIT"
] | 1 | 2021-12-13T06:33:16.000Z | 2021-12-16T02:04:14.000Z | ief_core/models/iefs/logcellkill.py | clinicalml/ief | 97bcaad85ec820fbe062a86c6c500a308904f029 | [
"MIT"
] | 1 | 2022-02-01T03:10:16.000Z | 2022-02-01T03:10:16.000Z | import torch
import math
import numpy as np
import pickle
from torch import nn
import torch.functional as F
from pyro.distributions import Normal, Independent, Categorical, LogNormal
import sys, os
from torch.autograd import grad
class LogCellKill(nn.Module):
def __init__(self, dim_stochastic, dim_treat, dim_hidden = 300, mtype='logcellkill', response_only=False, alpha1_type='linear'):
super(LogCellKill, self).__init__()
self.dim_stochastic = dim_stochastic
self.dim_treat = dim_treat
self.mtype = mtype
self.rho = nn.Parameter(torch.Tensor(dim_stochastic,))
if alpha1_type == 'linear' or alpha1_type == 'quadratic' or alpha1_type == 'nl':
self.scale = nn.Parameter(torch.Tensor(dim_stochastic,))
self.controlfxn = nn.Linear(dim_treat-1, dim_stochastic)
elif alpha1_type == 'linear_fix':
self.scale = nn.Parameter(torch.Tensor(1,))
self.controlfxn = nn.Linear(dim_treat-1, 1)
# elif alpha1_type == 'nl':
# self.scale = nn.Parameter(torch.Tensor(dim_stochastic,))
# omodel = nn.Sequential(nn.Linear(dim_treat-1, dim_hidden),nn.ReLU(True))
# self.controlfxn = nn.Sequential(omodel, nn.Linear(dim_hidden, dim_stochastic))
self.response_only = response_only
if not response_only:
self.inpfxn = nn.Linear(dim_stochastic, dim_stochastic)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.scale, 0.1)
nn.init.constant_(self.rho, 0.5)
def forward(self, inp, con):
tvals = con[...,[0]]
a = con[...,1:]
te = torch.tanh(self.controlfxn(a))
scale = torch.sigmoid(self.scale)*2
#self.debug = [te, growth_term, scale, self.bias]
if self.mtype == 'unused':
# 73
growth_term = torch.sigmoid(self.rho)*torch.log(inp**2+1e-3)
out = inp*(1-growth_term-te*torch.exp(-tvals*scale))
# 66
growth_term = torch.sigmoid(self.rho)*torch.log(inp**2+1e-3)
out = inp*(1-growth_term*0.-te*torch.exp(-tvals*scale))
# 70
growth_term = torch.sigmoid(self.rho)*torch.nn.functional.softplus(inp)
out = inp*(1-growth_term-te*torch.exp(-tvals*scale))
elif self.mtype=='logcellkill':
growth_term = torch.sigmoid(self.rho)*torch.log(inp**2+1e-3)
out = inp*(1-growth_term-te*torch.exp(-tvals*scale))
elif self.mtype=='logcellkill_1':
growth_term = torch.sigmoid(self.rho)*torch.nn.functional.softplus(inp)
out = inp*(1-growth_term*0.-te*torch.exp(-tvals*scale))
# out = (1-growth_term*0.-te*torch.exp(-tvals*scale))
else:
growth_term = torch.sigmoid(self.rho)*torch.nn.functional.softplus(inp)
out = inp*(1-growth_term-te*torch.exp(-tvals*scale))
if self.response_only:
return out
else:
return self.inpfxn(inp) + out | 47.043478 | 132 | 0.582255 |
0e4e30b2180fcae15b7a47f17ab897311e2a895b | 1,171 | py | Python | PyMess/MAG/SaveAllDip.py | mattkjames7/PyMess | f2c68285a7845a24d98284e20ed4292ed5e58138 | [
"MIT"
] | null | null | null | PyMess/MAG/SaveAllDip.py | mattkjames7/PyMess | f2c68285a7845a24d98284e20ed4292ed5e58138 | [
"MIT"
] | null | null | null | PyMess/MAG/SaveAllDip.py | mattkjames7/PyMess | f2c68285a7845a24d98284e20ed4292ed5e58138 | [
"MIT"
] | null | null | null | import numpy as np
from ._SaveDip import _SaveDip
from .DataAvailability import DataAvailability
def SaveAllDip(Minute=False,StartI=0,EndI=None):
'''
This procedure should save all magnetometer data rotated into
a coordinate system useful for studying waves, with components in
the poloidal, toroidal and parallel directions.
Inputs:
Minute: Set to True to use minute resolution data, or False for
full time resolution data.
res: Tells the function to resample the MAG data to this time
resolution in seconds.
ModelParams: Parameters to use for the KT17 magnetic field model
When set to None, the values used are calculated based on
Mercury's distance from the Sun.
Ab: Aberration angle in degrees, set to None to calculate
automatically.
DetectGaps: If not None, then the routine will search for gaps
larger than DetectGaps in hours and insert NaNs, gaps
smaller than this are interpolated over.
'''
dates = DataAvailability(Minute,Type='MSO')
nf = np.size(dates)
if EndI is None:
EndI = nf
for i in range(StartI,EndI):
print('Converting File {0} of {1} ({2})'.format(i+1,nf,dates[i]))
_SaveDip(dates[i],Minute)
| 34.441176 | 67 | 0.747225 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.