max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
selenium_cmd/decorators.py | patkle/selenium-cmd | 1 | 6613351 | def exception_printer(func):
def decorator(*a, **kw):
try:
return func(*a, **kw)
except Exception as e:
print(e)
return decorator
def decorate_do_methods(decorator):
"""apply decorator to all methods starting with do_"""
def decorate(cls):
for attr in cls.__dict__:
if not attr.startswith('do_'):
continue
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
| def exception_printer(func):
def decorator(*a, **kw):
try:
return func(*a, **kw)
except Exception as e:
print(e)
return decorator
def decorate_do_methods(decorator):
"""apply decorator to all methods starting with do_"""
def decorate(cls):
for attr in cls.__dict__:
if not attr.startswith('do_'):
continue
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
| en | 0.818582 | apply decorator to all methods starting with do_ | 3.432251 | 3 |
helloworld/helloworld.py | vasantskr/pipelines-python-django | 0 | 6613352 | <filename>helloworld/helloworld.py<gh_stars>0
# new helloword program
class helloworld:
def write_output(Str):
return Str
if __name__ == "__main__":
p=write_output("hello")
print(p) | <filename>helloworld/helloworld.py<gh_stars>0
# new helloword program
class helloworld:
def write_output(Str):
return Str
if __name__ == "__main__":
p=write_output("hello")
print(p) | en | 0.532524 | # new helloword program | 2.576366 | 3 |
db/inventory.py | jwestgard/aws-invalign | 0 | 6613353 | <reponame>jwestgard/aws-invalign<filename>db/inventory.py
class Inventory():
def __init__(self, path):
self.invpath = path
self.filename = os.path.basename(invpath)
self.lines = []
with open(path) as handle:
for n, line in enumerate(handle.readlines(), 1):
md5, path = line.strip().split(None, 1)
file_signature = (md5, path, self.invpath, n)
self.lines.append(file_signature)
| class Inventory():
def __init__(self, path):
self.invpath = path
self.filename = os.path.basename(invpath)
self.lines = []
with open(path) as handle:
for n, line in enumerate(handle.readlines(), 1):
md5, path = line.strip().split(None, 1)
file_signature = (md5, path, self.invpath, n)
self.lines.append(file_signature) | none | 1 | 2.921839 | 3 | |
PHPUnserialize.py | videlalvaro/InspectorD | 2 | 6613354 | <filename>PHPUnserialize.py
import types, string, re
"""
Unserialize class for the PHP serialization format.
@version v0.4 BETA
@author <NAME>; scott at hurring dot com
@copyright Copyright (c) 2005 <NAME>
@license http://opensource.org/licenses/gpl-license.php GNU Public License
$Id: PHPUnserialize.py,v 1.1 2006/01/08 21:53:19 shurring Exp $
Most recent version can be found at:
http://hurring.com/code/python/phpserialize/
Usage:
# Create an instance of the unserialize engine
u = PHPUnserialize()
# unserialize some string into python data
data = u.unserialize(serialized_string)
Please see README.txt for more information.
"""
class PHPUnserialize(object):
"""
Class to unserialize something from the PHP Serialize format.
Usage:
u = PHPUnserialize()
data = u.unserialize(serialized_string)
"""
def __init__(self):
pass
def session_decode(self, data):
"""Thanks to <NAME> for suggesting the addition
of session_encode
"""
session = {}
while len(data) > 0:
(offset, key) = self.read_until(data, 0, '|')
if offset > 0:
(dtype, dataoffset, value) = self._unserialize(data, offset+1)
data = data[offset+1+dataoffset:]
session[key] = value
else:
return session
return session
def unserialize(self, data):
return self._unserialize(data, 0)[2]
def _unserialize(self, data, offset=0):
"""
Find the next token and unserialize it.
Recurse on array.
offset = raw offset from start of data
return (type, offset, value)
"""
buf = []
dtype = string.lower(data[offset:offset+1])
#print "# dtype =", dtype
# 't:' = 2 chars
dataoffset = offset + 2
typeconvert = lambda x : x
chars = datalength = 0
# int => Integer
if dtype == 'i':
typeconvert = lambda x : int(x)
(chars, readdata) = self.read_until(data, dataoffset, ';')
# +1 for end semicolon
dataoffset += chars + 1
# bool => Boolean
elif dtype == 'b':
typeconvert = lambda x : (int(x) == 1)
(chars, readdata) = self.read_until(data, dataoffset, ';')
# +1 for end semicolon
dataoffset += chars + 1
# double => Floating Point
elif dtype == 'd':
typeconvert = lambda x : float(x)
(chars, readdata) = self.read_until(data, dataoffset, ';')
# +1 for end semicolon
dataoffset += chars + 1
# n => None
elif dtype == 'n':
readdata = None
# s => String
elif dtype == 's':
(chars, stringlength) = self.read_until(data, dataoffset, ':')
# +2 for colons around length field
dataoffset += chars + 2
# +1 for start quote
(chars, readdata) = self.read_chars(data, dataoffset+1, int(stringlength))
# +2 for endquote semicolon
dataoffset += chars + 2
if chars != int(stringlength) != int(readdata):
raise Exception("String length mismatch")
# array => Dict
# If you originally serialized a Tuple or List, it will
# be unserialized as a Dict. PHP doesn't have tuples or lists,
# only arrays - so everything has to get converted into an array
# when serializing and the original type of the array is lost
elif dtype == 'a':
readdata = {}
# How many keys does this list have?
(chars, keys) = self.read_until(data, dataoffset, ':')
# +2 for colons around length field
dataoffset += chars + 2
# Loop through and fetch this number of key/value pairs
for i in range(0, int(keys)):
# Read the key
(ktype, kchars, key) = self._unserialize(data, dataoffset)
dataoffset += kchars
#print "Key(%i) = (%s, %i, %s) %i" % (i, ktype, kchars, key, dataoffset)
# Read value of the key
(vtype, vchars, value) = self._unserialize(data, dataoffset)
dataoffset += vchars
#print "Value(%i) = (%s, %i, %s) %i" % (i, vtype, vchars, value, dataoffset)
# Set the list element
readdata[key] = value
# +1 for end semicolon
dataoffset += 1
#chars = int(dataoffset) - start
# I don't know how to unserialize this
else:
raise Exception("Unknown / Unhandled data type (%s)!" % dtype)
return (dtype, dataoffset-offset, typeconvert(readdata))
def read_until(self, data, offset, stopchar):
"""
Read from data[offset] until you encounter some char 'stopchar'.
"""
buf = []
char = data[offset:offset+1]
i = 2
while char != stopchar:
# Consumed all the characters and havent found ';'
if i+offset > len(data):
raise Exception("Invalid")
buf.append(char)
char = data[offset+(i-1):offset+i]
i += 1
# (chars_read, data)
return (len(buf), "".join(buf))
def read_chars(self, data, offset, length):
"""
Read 'length' number of chars from data[offset].
"""
buf = []
# Account for the starting quote char
#offset += 1
for i in range(0, length):
char = data[offset+(i-1):offset+i]
buf.append(char)
# (chars_read, data)
return (len(buf), "".join(buf))
| <filename>PHPUnserialize.py
import types, string, re
"""
Unserialize class for the PHP serialization format.
@version v0.4 BETA
@author <NAME>; scott at hurring dot com
@copyright Copyright (c) 2005 <NAME>
@license http://opensource.org/licenses/gpl-license.php GNU Public License
$Id: PHPUnserialize.py,v 1.1 2006/01/08 21:53:19 shurring Exp $
Most recent version can be found at:
http://hurring.com/code/python/phpserialize/
Usage:
# Create an instance of the unserialize engine
u = PHPUnserialize()
# unserialize some string into python data
data = u.unserialize(serialized_string)
Please see README.txt for more information.
"""
class PHPUnserialize(object):
"""
Class to unserialize something from the PHP Serialize format.
Usage:
u = PHPUnserialize()
data = u.unserialize(serialized_string)
"""
def __init__(self):
pass
def session_decode(self, data):
"""Thanks to <NAME> for suggesting the addition
of session_encode
"""
session = {}
while len(data) > 0:
(offset, key) = self.read_until(data, 0, '|')
if offset > 0:
(dtype, dataoffset, value) = self._unserialize(data, offset+1)
data = data[offset+1+dataoffset:]
session[key] = value
else:
return session
return session
def unserialize(self, data):
return self._unserialize(data, 0)[2]
def _unserialize(self, data, offset=0):
"""
Find the next token and unserialize it.
Recurse on array.
offset = raw offset from start of data
return (type, offset, value)
"""
buf = []
dtype = string.lower(data[offset:offset+1])
#print "# dtype =", dtype
# 't:' = 2 chars
dataoffset = offset + 2
typeconvert = lambda x : x
chars = datalength = 0
# int => Integer
if dtype == 'i':
typeconvert = lambda x : int(x)
(chars, readdata) = self.read_until(data, dataoffset, ';')
# +1 for end semicolon
dataoffset += chars + 1
# bool => Boolean
elif dtype == 'b':
typeconvert = lambda x : (int(x) == 1)
(chars, readdata) = self.read_until(data, dataoffset, ';')
# +1 for end semicolon
dataoffset += chars + 1
# double => Floating Point
elif dtype == 'd':
typeconvert = lambda x : float(x)
(chars, readdata) = self.read_until(data, dataoffset, ';')
# +1 for end semicolon
dataoffset += chars + 1
# n => None
elif dtype == 'n':
readdata = None
# s => String
elif dtype == 's':
(chars, stringlength) = self.read_until(data, dataoffset, ':')
# +2 for colons around length field
dataoffset += chars + 2
# +1 for start quote
(chars, readdata) = self.read_chars(data, dataoffset+1, int(stringlength))
# +2 for endquote semicolon
dataoffset += chars + 2
if chars != int(stringlength) != int(readdata):
raise Exception("String length mismatch")
# array => Dict
# If you originally serialized a Tuple or List, it will
# be unserialized as a Dict. PHP doesn't have tuples or lists,
# only arrays - so everything has to get converted into an array
# when serializing and the original type of the array is lost
elif dtype == 'a':
readdata = {}
# How many keys does this list have?
(chars, keys) = self.read_until(data, dataoffset, ':')
# +2 for colons around length field
dataoffset += chars + 2
# Loop through and fetch this number of key/value pairs
for i in range(0, int(keys)):
# Read the key
(ktype, kchars, key) = self._unserialize(data, dataoffset)
dataoffset += kchars
#print "Key(%i) = (%s, %i, %s) %i" % (i, ktype, kchars, key, dataoffset)
# Read value of the key
(vtype, vchars, value) = self._unserialize(data, dataoffset)
dataoffset += vchars
#print "Value(%i) = (%s, %i, %s) %i" % (i, vtype, vchars, value, dataoffset)
# Set the list element
readdata[key] = value
# +1 for end semicolon
dataoffset += 1
#chars = int(dataoffset) - start
# I don't know how to unserialize this
else:
raise Exception("Unknown / Unhandled data type (%s)!" % dtype)
return (dtype, dataoffset-offset, typeconvert(readdata))
def read_until(self, data, offset, stopchar):
"""
Read from data[offset] until you encounter some char 'stopchar'.
"""
buf = []
char = data[offset:offset+1]
i = 2
while char != stopchar:
# Consumed all the characters and havent found ';'
if i+offset > len(data):
raise Exception("Invalid")
buf.append(char)
char = data[offset+(i-1):offset+i]
i += 1
# (chars_read, data)
return (len(buf), "".join(buf))
def read_chars(self, data, offset, length):
"""
Read 'length' number of chars from data[offset].
"""
buf = []
# Account for the starting quote char
#offset += 1
for i in range(0, length):
char = data[offset+(i-1):offset+i]
buf.append(char)
# (chars_read, data)
return (len(buf), "".join(buf))
| en | 0.669042 | Unserialize class for the PHP serialization format. @version v0.4 BETA @author <NAME>; scott at hurring dot com @copyright Copyright (c) 2005 <NAME> @license http://opensource.org/licenses/gpl-license.php GNU Public License $Id: PHPUnserialize.py,v 1.1 2006/01/08 21:53:19 shurring Exp $ Most recent version can be found at: http://hurring.com/code/python/phpserialize/ Usage: # Create an instance of the unserialize engine u = PHPUnserialize() # unserialize some string into python data data = u.unserialize(serialized_string) Please see README.txt for more information. Class to unserialize something from the PHP Serialize format. Usage: u = PHPUnserialize() data = u.unserialize(serialized_string) Thanks to <NAME> for suggesting the addition of session_encode Find the next token and unserialize it. Recurse on array. offset = raw offset from start of data return (type, offset, value) #print "# dtype =", dtype # 't:' = 2 chars # int => Integer # +1 for end semicolon # bool => Boolean # +1 for end semicolon # double => Floating Point # +1 for end semicolon # n => None # s => String # +2 for colons around length field # +1 for start quote # +2 for endquote semicolon # array => Dict # If you originally serialized a Tuple or List, it will # be unserialized as a Dict. PHP doesn't have tuples or lists, # only arrays - so everything has to get converted into an array # when serializing and the original type of the array is lost # How many keys does this list have? # +2 for colons around length field # Loop through and fetch this number of key/value pairs # Read the key #print "Key(%i) = (%s, %i, %s) %i" % (i, ktype, kchars, key, dataoffset) # Read value of the key #print "Value(%i) = (%s, %i, %s) %i" % (i, vtype, vchars, value, dataoffset) # Set the list element # +1 for end semicolon #chars = int(dataoffset) - start # I don't know how to unserialize this Read from data[offset] until you encounter some char 'stopchar'. # Consumed all the characters and havent found ';' # (chars_read, data) Read 'length' number of chars from data[offset]. # Account for the starting quote char #offset += 1 # (chars_read, data) | 2.993464 | 3 |
graph-breadth-first/graph_breadth_first/graph_breadth_first.py | doaa-1996/Data-structures-and-algorithms1 | 1 | 6613355 |
class Graph:
def __init__(self):
self.graph = {}
def addEdge(self, u, v):
if u in self.graph:
self.graph[u].append(v)
else:
self.graph[u] = [v]
def BFS(self, s):
visited = [False] * len(self.graph)
que = [s]
visited[s] = True
output=[]
while len(que) > 0:
s = que.pop(0)
output.append(s)
for node in self.graph[s]:
if visited[node] == False:
que.append(node)
visited[node] = True
return output
g = Graph()
g.addEdge(0,1)
g.addEdge(0,2)
g.addEdge(0,3)
g.addEdge(1,4)
g.addEdge(2,3)
g.addEdge(3,4)
g.addEdge(3,5)
g.addEdge(4,0)
g.addEdge(5,2)
print(g.BFS(0))
|
class Graph:
def __init__(self):
self.graph = {}
def addEdge(self, u, v):
if u in self.graph:
self.graph[u].append(v)
else:
self.graph[u] = [v]
def BFS(self, s):
visited = [False] * len(self.graph)
que = [s]
visited[s] = True
output=[]
while len(que) > 0:
s = que.pop(0)
output.append(s)
for node in self.graph[s]:
if visited[node] == False:
que.append(node)
visited[node] = True
return output
g = Graph()
g.addEdge(0,1)
g.addEdge(0,2)
g.addEdge(0,3)
g.addEdge(1,4)
g.addEdge(2,3)
g.addEdge(3,4)
g.addEdge(3,5)
g.addEdge(4,0)
g.addEdge(5,2)
print(g.BFS(0))
| none | 1 | 3.808645 | 4 | |
tests/algorithm_parallel_tests/multi_runs_test.py | buctlab/NIO | 4 | 6613356 | import unittest
import multiprocessing
import benchmarks
import algorithms
from numpy import random, zeros, mean, std, median, min, max
import pandas as pd
import inspect
import logging
import os
logging.basicConfig()
logger = logging.getLogger('MultipleTest')
logger.setLevel('INFO')
class MultiRunsTest(unittest.TestCase):
def setUp(self):
self.runs = 30
self.iterations = 200
self.Rand = random.RandomState(seed=1)
self.path = r"output/HighDim/multipleTests"
if not os.path.exists(self.path):
os.makedirs(self.path)
self.benchmarks = list()
self.benchnames = list()
self.algorithms = list()
self.algonames = list()
self.results = None
self.eval = None
for name, algorithm in inspect.getmembers(algorithms):
# if inspect.isclass(algorithm) and name not in ['Algorithm', 'KrillHerdBase', 'GeneticAlgorithm' ]:
if inspect.isclass(algorithm) and name in ['CuckooSearch']:
self.algorithms.append(algorithm)
self.algonames.append(name)
def save_res(self, path):
if not os.path.exists(path):
os.makedirs(path)
for bench_index, row in self.results.iterrows():
multi_runs = pd.DataFrame(columns=self.algonames, index=list(range(1, self.runs + 1)))
for alg_index in self.algonames:
res = row[alg_index] # array length = self.runs
for r in range(self.runs):
try:
multi_runs.loc[r + 1, alg_index] = res[r].get()
except ValueError as e:
logger.error(alg_index + ": " + str(e))
multi_runs.loc['mean'] = multi_runs.apply(mean)
multi_runs.loc['std'] = multi_runs.apply(std)
multi_runs.loc['median'] = multi_runs.iloc[:self.runs, :].apply(median)
multi_runs.loc['best'] = multi_runs.iloc[:self.runs, :].apply(min)
multi_runs.loc['worst'] = multi_runs.iloc[:self.runs, :].apply(max)
# logger.info("\n{}".format(multi_runs))
csv_path = "{path}/{bench}.csv".format(path=path, bench=bench_index)
multi_runs.to_csv(csv_path)
logger.info("Success Generate {test}: {file}".format(test=self._testMethodName, file=csv_path))
def test_2d_benchmarks(self):
dim = 2
self.benchmarks = list()
self.benchnames = list()
for name, benchmark in inspect.getmembers(benchmarks):
if inspect.isclass(benchmark) and name != 'Benchmark':
# if inspect.isclass(benchmark) and name == 'Michalewicz':
self.benchmarks.append(benchmark)
self.benchnames.append(name)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res # array length = self.runs
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D".format(dim=dim)
self.save_res(path)
def test_5d_michalewicz(self):
dim = 5
self.benchmarks = [benchmarks.Michalewicz]
self.benchnames = ['Michalewicz']
print(self.algonames)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res # array length = self.runs
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D".format(dim=dim)
self.save_res(path)
def test_10d_michalewicz(self):
dim = 10
self.benchmarks = [benchmarks.Michalewicz]
self.benchnames = ['Michalewicz']
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res # array length = self.runs
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D".format(dim=dim)
self.save_res(path)
def test_100d_benchmarks(self):
dim = 100
high_dim_bench = ['Ackley', 'Griewank', 'Rastrigin', 'Rosenbrock', 'Schwefel', 'Sphere', 'Stybtang']
# high_dim_bench = ['Ackley']
self.benchmarks = list()
self.benchnames = list()
for name, benchmark in inspect.getmembers(benchmarks):
if inspect.isclass(benchmark) and name in high_dim_bench:
self.benchmarks.append(benchmark)
self.benchnames.append(name)
print(self.algonames)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D_CS".format(dim=dim)
self.save_res(path)
def test_50d_benchmarks(self):
dim = 50
high_dim_bench = ['Ackley', 'Griewank', 'Rastrigin', 'Rosenbrock', 'Schwefel', 'Sphere', 'Stybtang']
# high_dim_bench = ['Ackley']
self.benchmarks = list()
self.benchnames = list()
for name, benchmark in inspect.getmembers(benchmarks):
if inspect.isclass(benchmark) and name in high_dim_bench:
self.benchmarks.append(benchmark)
self.benchnames.append(name)
print(self.algonames)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D_CS".format(dim=dim)
self.save_res(path)
if __name__ == '__main__':
unittest.main()
| import unittest
import multiprocessing
import benchmarks
import algorithms
from numpy import random, zeros, mean, std, median, min, max
import pandas as pd
import inspect
import logging
import os
logging.basicConfig()
logger = logging.getLogger('MultipleTest')
logger.setLevel('INFO')
class MultiRunsTest(unittest.TestCase):
def setUp(self):
self.runs = 30
self.iterations = 200
self.Rand = random.RandomState(seed=1)
self.path = r"output/HighDim/multipleTests"
if not os.path.exists(self.path):
os.makedirs(self.path)
self.benchmarks = list()
self.benchnames = list()
self.algorithms = list()
self.algonames = list()
self.results = None
self.eval = None
for name, algorithm in inspect.getmembers(algorithms):
# if inspect.isclass(algorithm) and name not in ['Algorithm', 'KrillHerdBase', 'GeneticAlgorithm' ]:
if inspect.isclass(algorithm) and name in ['CuckooSearch']:
self.algorithms.append(algorithm)
self.algonames.append(name)
def save_res(self, path):
if not os.path.exists(path):
os.makedirs(path)
for bench_index, row in self.results.iterrows():
multi_runs = pd.DataFrame(columns=self.algonames, index=list(range(1, self.runs + 1)))
for alg_index in self.algonames:
res = row[alg_index] # array length = self.runs
for r in range(self.runs):
try:
multi_runs.loc[r + 1, alg_index] = res[r].get()
except ValueError as e:
logger.error(alg_index + ": " + str(e))
multi_runs.loc['mean'] = multi_runs.apply(mean)
multi_runs.loc['std'] = multi_runs.apply(std)
multi_runs.loc['median'] = multi_runs.iloc[:self.runs, :].apply(median)
multi_runs.loc['best'] = multi_runs.iloc[:self.runs, :].apply(min)
multi_runs.loc['worst'] = multi_runs.iloc[:self.runs, :].apply(max)
# logger.info("\n{}".format(multi_runs))
csv_path = "{path}/{bench}.csv".format(path=path, bench=bench_index)
multi_runs.to_csv(csv_path)
logger.info("Success Generate {test}: {file}".format(test=self._testMethodName, file=csv_path))
def test_2d_benchmarks(self):
dim = 2
self.benchmarks = list()
self.benchnames = list()
for name, benchmark in inspect.getmembers(benchmarks):
if inspect.isclass(benchmark) and name != 'Benchmark':
# if inspect.isclass(benchmark) and name == 'Michalewicz':
self.benchmarks.append(benchmark)
self.benchnames.append(name)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res # array length = self.runs
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D".format(dim=dim)
self.save_res(path)
def test_5d_michalewicz(self):
dim = 5
self.benchmarks = [benchmarks.Michalewicz]
self.benchnames = ['Michalewicz']
print(self.algonames)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res # array length = self.runs
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D".format(dim=dim)
self.save_res(path)
def test_10d_michalewicz(self):
dim = 10
self.benchmarks = [benchmarks.Michalewicz]
self.benchnames = ['Michalewicz']
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res # array length = self.runs
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D".format(dim=dim)
self.save_res(path)
def test_100d_benchmarks(self):
dim = 100
high_dim_bench = ['Ackley', 'Griewank', 'Rastrigin', 'Rosenbrock', 'Schwefel', 'Sphere', 'Stybtang']
# high_dim_bench = ['Ackley']
self.benchmarks = list()
self.benchnames = list()
for name, benchmark in inspect.getmembers(benchmarks):
if inspect.isclass(benchmark) and name in high_dim_bench:
self.benchmarks.append(benchmark)
self.benchnames.append(name)
print(self.algonames)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D_CS".format(dim=dim)
self.save_res(path)
def test_50d_benchmarks(self):
dim = 50
high_dim_bench = ['Ackley', 'Griewank', 'Rastrigin', 'Rosenbrock', 'Schwefel', 'Sphere', 'Stybtang']
# high_dim_bench = ['Ackley']
self.benchmarks = list()
self.benchnames = list()
for name, benchmark in inspect.getmembers(benchmarks):
if inspect.isclass(benchmark) and name in high_dim_bench:
self.benchmarks.append(benchmark)
self.benchnames.append(name)
print(self.algonames)
self.results = pd.DataFrame(columns=self.algonames, index=self.benchnames, dtype=object)
multiprocessing.freeze_support()
self.pool = multiprocessing.Pool()
for i in range(len(self.benchmarks)):
benchmark = self.benchmarks[i](dimension=dim)
for j in range(len(self.algorithms)):
res = zeros(shape=self.runs, dtype=object)
for r in range(self.runs):
alg = self.algorithms[j](func=benchmark, seed=self.Rand.randint(0, 10 * self.runs),
iterations=self.iterations)
res[r] = self.pool.apply_async(func=alg.run_return_best_val)
self.results.iloc[i, j] = res
self.pool.close()
self.pool.join()
path = self.path + r"/{dim}D_CS".format(dim=dim)
self.save_res(path)
if __name__ == '__main__':
unittest.main()
| en | 0.38904 | # if inspect.isclass(algorithm) and name not in ['Algorithm', 'KrillHerdBase', 'GeneticAlgorithm' ]: # array length = self.runs # logger.info("\n{}".format(multi_runs)) # if inspect.isclass(benchmark) and name == 'Michalewicz': # array length = self.runs # array length = self.runs # array length = self.runs # high_dim_bench = ['Ackley'] # high_dim_bench = ['Ackley'] | 2.477264 | 2 |
Helpers/ProcessController.py | CDboyOne/IHGNN | 5 | 6613357 | <reponame>CDboyOne/IHGNN
from typing import Any, List, Dict, Set, Tuple, Union, Optional, Iterator, Iterable
import math
class ProcessController:
StartEpoch: int
CurrentEpoch: int
EndEpoch: int
EpochCount: int
_start_test_epoch: int
_test_frequency: int
_start_store_epoch: Optional[int]
_store_frequency: Optional[int]
_test_count: int
_train_time_list: List[float]
_test_time_list: List[float]
def __init__(self,
epoch_count: int,
start_epoch: int,
start_test_epoch: int,
test_frequency: int,
start_store_epoch: int = None,
store_frequency: int = None) -> None:
'''初始化一个流程控制器。
参数:
epoch_count: ...
start_epoch: 初始 epoch,从 1 开始。
start_test_epoch: 开始测试的 epoch,从 1 开始。
test_frequency: 控制每多少个 epoch 测试一次。
start_store_epoch: 开始存档的 epoch,从 1 开始。为 None 则不存档。
store_frequency: 控制每多少个 epoch 存档一次。为 None 则不存档。
'''
self.StartEpoch = start_epoch
self.EpochCount = epoch_count
self.EndEpoch = start_epoch + epoch_count
self._start_test_epoch = start_test_epoch
self._test_frequency = test_frequency
self._test_count = 1 + (epoch_count - start_test_epoch) / test_frequency
self._train_time_list = []
self._test_time_list = []
if start_store_epoch is None or store_frequency is None:
self._start_store_epoch = self._store_frequency = None
else:
self._start_store_epoch = start_store_epoch
self._store_frequency = store_frequency
def __len__(self) -> int: return self.EpochCount
def __iter__(self) -> Iterator[int]:
self.CurrentEpoch = self.StartEpoch - 1
return self
def __next__(self) -> int:
self.CurrentEpoch += 1
if self.CurrentEpoch == self.EndEpoch: raise StopIteration()
else: return self.CurrentEpoch
def ShouldTest(self) -> bool:
epoch = self.CurrentEpoch + 1
start_test = self._start_test_epoch
return (epoch - self.StartEpoch >= start_test) and ((self.CurrentEpoch - start_test) % self._test_frequency == 0 or (epoch == self.EndEpoch))
def ShouldStore(self) -> bool:
if self._start_store_epoch is None: return False
epoch = self.CurrentEpoch + 1
start_store = self._start_store_epoch
return (epoch - self.StartEpoch >= start_store) and ((self.CurrentEpoch - start_store) % self._store_frequency == 0 or epoch == self.EndEpoch)
def AddTrainTime(self, time: float) -> None: self._train_time_list.append(time)
def AddTestTime(self, time: float) -> None: self._test_time_list.append(time)
def GetRemainingTime(self) -> float:
if len(self._train_time_list) >= 2:
avg_epoch_time = (self._train_time_list[-1] + self._train_time_list[-2]) / 2
elif len(self._train_time_list) == 1:
avg_epoch_time = self._train_time_list[0]
else:
return float('nan')
if len(self._test_time_list) >= 2:
avg_test_time = (self._test_time_list[-1] + self._test_time_list[-2]) / 2
elif len(self._test_time_list) == 1:
avg_test_time = self._test_time_list[0]
else:
avg_test_time = avg_epoch_time * 2
remain_epoch_time = avg_epoch_time * (self.EndEpoch - self.CurrentEpoch)
remain_test_time = avg_test_time * (self._test_count - len(self._test_time_list))
return remain_epoch_time + remain_test_time
def GetRemainingTimeString(self) -> float:
time = self.GetRemainingTime()
if math.isnan(time):
return '暂无法计算'
elif time >= 3600:
h = time // 3600
m = time / 60 - 60 * h
return f'{int(h)} h {int(m)} m'
elif time >= 60:
return f'{int(time/60)} m'
else:
return f'{int(time)} s'
if __name__ == '__main__':
pc = ProcessController(20, 5, 7, 2)
for epoch in pc:
print(f'Epoch {epoch}')
if pc.ShouldTest(): print(' Test!') | from typing import Any, List, Dict, Set, Tuple, Union, Optional, Iterator, Iterable
import math
class ProcessController:
StartEpoch: int
CurrentEpoch: int
EndEpoch: int
EpochCount: int
_start_test_epoch: int
_test_frequency: int
_start_store_epoch: Optional[int]
_store_frequency: Optional[int]
_test_count: int
_train_time_list: List[float]
_test_time_list: List[float]
def __init__(self,
epoch_count: int,
start_epoch: int,
start_test_epoch: int,
test_frequency: int,
start_store_epoch: int = None,
store_frequency: int = None) -> None:
'''初始化一个流程控制器。
参数:
epoch_count: ...
start_epoch: 初始 epoch,从 1 开始。
start_test_epoch: 开始测试的 epoch,从 1 开始。
test_frequency: 控制每多少个 epoch 测试一次。
start_store_epoch: 开始存档的 epoch,从 1 开始。为 None 则不存档。
store_frequency: 控制每多少个 epoch 存档一次。为 None 则不存档。
'''
self.StartEpoch = start_epoch
self.EpochCount = epoch_count
self.EndEpoch = start_epoch + epoch_count
self._start_test_epoch = start_test_epoch
self._test_frequency = test_frequency
self._test_count = 1 + (epoch_count - start_test_epoch) / test_frequency
self._train_time_list = []
self._test_time_list = []
if start_store_epoch is None or store_frequency is None:
self._start_store_epoch = self._store_frequency = None
else:
self._start_store_epoch = start_store_epoch
self._store_frequency = store_frequency
def __len__(self) -> int: return self.EpochCount
def __iter__(self) -> Iterator[int]:
self.CurrentEpoch = self.StartEpoch - 1
return self
def __next__(self) -> int:
self.CurrentEpoch += 1
if self.CurrentEpoch == self.EndEpoch: raise StopIteration()
else: return self.CurrentEpoch
def ShouldTest(self) -> bool:
epoch = self.CurrentEpoch + 1
start_test = self._start_test_epoch
return (epoch - self.StartEpoch >= start_test) and ((self.CurrentEpoch - start_test) % self._test_frequency == 0 or (epoch == self.EndEpoch))
def ShouldStore(self) -> bool:
if self._start_store_epoch is None: return False
epoch = self.CurrentEpoch + 1
start_store = self._start_store_epoch
return (epoch - self.StartEpoch >= start_store) and ((self.CurrentEpoch - start_store) % self._store_frequency == 0 or epoch == self.EndEpoch)
def AddTrainTime(self, time: float) -> None: self._train_time_list.append(time)
def AddTestTime(self, time: float) -> None: self._test_time_list.append(time)
def GetRemainingTime(self) -> float:
if len(self._train_time_list) >= 2:
avg_epoch_time = (self._train_time_list[-1] + self._train_time_list[-2]) / 2
elif len(self._train_time_list) == 1:
avg_epoch_time = self._train_time_list[0]
else:
return float('nan')
if len(self._test_time_list) >= 2:
avg_test_time = (self._test_time_list[-1] + self._test_time_list[-2]) / 2
elif len(self._test_time_list) == 1:
avg_test_time = self._test_time_list[0]
else:
avg_test_time = avg_epoch_time * 2
remain_epoch_time = avg_epoch_time * (self.EndEpoch - self.CurrentEpoch)
remain_test_time = avg_test_time * (self._test_count - len(self._test_time_list))
return remain_epoch_time + remain_test_time
def GetRemainingTimeString(self) -> float:
time = self.GetRemainingTime()
if math.isnan(time):
return '暂无法计算'
elif time >= 3600:
h = time // 3600
m = time / 60 - 60 * h
return f'{int(h)} h {int(m)} m'
elif time >= 60:
return f'{int(time/60)} m'
else:
return f'{int(time)} s'
if __name__ == '__main__':
pc = ProcessController(20, 5, 7, 2)
for epoch in pc:
print(f'Epoch {epoch}')
if pc.ShouldTest(): print(' Test!') | zh | 0.891856 | 初始化一个流程控制器。
参数:
epoch_count: ...
start_epoch: 初始 epoch,从 1 开始。
start_test_epoch: 开始测试的 epoch,从 1 开始。
test_frequency: 控制每多少个 epoch 测试一次。
start_store_epoch: 开始存档的 epoch,从 1 开始。为 None 则不存档。
store_frequency: 控制每多少个 epoch 存档一次。为 None 则不存档。 | 3.054096 | 3 |
train.py | butyr/pix2pix-tensorflow | 0 | 6613358 | import datetime
import os
import pathlib
import tensorflow as tf
from src.container import ExperimentLogging, Model
from src.dataloader import Dataloader
from src.loss import DiscriminatorLoss, GeneratorLoss
from src.models import PatchGAN, UNet
from src.trainer import Trainer
def main():
# The facade training set consist of 400 images
BUFFER_SIZE = 400
# The batch size of 1 produced better results for the U-Net in the original pix2pix experiment
BATCH_SIZE = 1
# Each image is 256x256 in size
IMG_WIDTH = 256
IMG_HEIGHT = 256
dataset_name = "facades"
_URL = f"http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/{dataset_name}.tar.gz"
path_to_zip = tf.keras.utils.get_file(
fname=f"{dataset_name}.tar.gz", origin=_URL, extract=True
)
path_to_zip = pathlib.Path(path_to_zip)
PATH = path_to_zip.parent / dataset_name
log_dir = "logs/"
summary_writer = tf.summary.create_file_writer(
log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
)
checkpoint_dir = "./training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
trainer = Trainer(
generator=Model(
network=UNet(num_channels=3),
optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5),
loss=GeneratorLoss(lam=100),
),
discriminator=Model(
network=PatchGAN(),
optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5),
loss=DiscriminatorLoss(),
),
logging=ExperimentLogging(
summary_writer=summary_writer,
checkpoint_dir=checkpoint_dir,
checkpoint_prefix=checkpoint_prefix,
),
)
dataloader = Dataloader(
PATH,
BUFFER_SIZE,
BATCH_SIZE,
IMG_WIDTH,
IMG_HEIGHT,
)
train_dataset, test_dataset = dataloader.build_train(), dataloader.build_test()
trainer.fit(train_dataset, test_dataset, steps=40000)
if __name__ == "__main__":
main()
| import datetime
import os
import pathlib
import tensorflow as tf
from src.container import ExperimentLogging, Model
from src.dataloader import Dataloader
from src.loss import DiscriminatorLoss, GeneratorLoss
from src.models import PatchGAN, UNet
from src.trainer import Trainer
def main():
# The facade training set consist of 400 images
BUFFER_SIZE = 400
# The batch size of 1 produced better results for the U-Net in the original pix2pix experiment
BATCH_SIZE = 1
# Each image is 256x256 in size
IMG_WIDTH = 256
IMG_HEIGHT = 256
dataset_name = "facades"
_URL = f"http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/{dataset_name}.tar.gz"
path_to_zip = tf.keras.utils.get_file(
fname=f"{dataset_name}.tar.gz", origin=_URL, extract=True
)
path_to_zip = pathlib.Path(path_to_zip)
PATH = path_to_zip.parent / dataset_name
log_dir = "logs/"
summary_writer = tf.summary.create_file_writer(
log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
)
checkpoint_dir = "./training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
trainer = Trainer(
generator=Model(
network=UNet(num_channels=3),
optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5),
loss=GeneratorLoss(lam=100),
),
discriminator=Model(
network=PatchGAN(),
optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5),
loss=DiscriminatorLoss(),
),
logging=ExperimentLogging(
summary_writer=summary_writer,
checkpoint_dir=checkpoint_dir,
checkpoint_prefix=checkpoint_prefix,
),
)
dataloader = Dataloader(
PATH,
BUFFER_SIZE,
BATCH_SIZE,
IMG_WIDTH,
IMG_HEIGHT,
)
train_dataset, test_dataset = dataloader.build_train(), dataloader.build_test()
trainer.fit(train_dataset, test_dataset, steps=40000)
if __name__ == "__main__":
main()
| en | 0.898013 | # The facade training set consist of 400 images # The batch size of 1 produced better results for the U-Net in the original pix2pix experiment # Each image is 256x256 in size | 2.107941 | 2 |
open this for text file manipulation/searchKeywords.py | ayandeephazra/Natural_Language_Processing_Research | 1 | 6613359 | # Open one of the files,
list = []
import os
# This is the path where all the files are stored.
folder_path = 'C:\\Users\\<NAME>\\PycharmProjects\\pythonProject1\\files'
for data_file in sorted(os.listdir(folder_path)):
print(data_file)
list.append(data_file)
for i in range(len(list)):
f = open("files/" + str(list[i]), "r", encoding="utf-8")
content = f.read()
if content.find('hardness') != -1:
print(content, list[i])
| # Open one of the files,
list = []
import os
# This is the path where all the files are stored.
folder_path = 'C:\\Users\\<NAME>\\PycharmProjects\\pythonProject1\\files'
for data_file in sorted(os.listdir(folder_path)):
print(data_file)
list.append(data_file)
for i in range(len(list)):
f = open("files/" + str(list[i]), "r", encoding="utf-8")
content = f.read()
if content.find('hardness') != -1:
print(content, list[i])
| en | 0.935305 | # Open one of the files, # This is the path where all the files are stored. | 3.263999 | 3 |
UserInput/UserInput.py | roniwinik/Drivers | 48 | 6613360 | <filename>UserInput/UserInput.py<gh_stars>10-100
import InstrumentDriver
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements a user input driver"""
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation"""
# proceed depending on quantity
if quant.name == 'Set value':
# get value from user dialog
newValue = self.getValueFromUserDialog(value,
title='User input - Set value')
return newValue
else:
return quant.getValue()
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
# proceed depending on quantity
if quant.name == 'Get value':
# get value from user dialog
newValue = self.getValueFromUserDialog(quant.getValue(),
title='User input - Get value')
return newValue
else:
return quant.getValue()
| <filename>UserInput/UserInput.py<gh_stars>10-100
import InstrumentDriver
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements a user input driver"""
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation"""
# proceed depending on quantity
if quant.name == 'Set value':
# get value from user dialog
newValue = self.getValueFromUserDialog(value,
title='User input - Set value')
return newValue
else:
return quant.getValue()
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
# proceed depending on quantity
if quant.name == 'Get value':
# get value from user dialog
newValue = self.getValueFromUserDialog(quant.getValue(),
title='User input - Get value')
return newValue
else:
return quant.getValue()
| en | 0.717602 | This class implements a user input driver Perform the Set Value instrument operation # proceed depending on quantity # get value from user dialog Perform the Get Value instrument operation # proceed depending on quantity # get value from user dialog | 3.352913 | 3 |
textomatic/app/keys.py | dankilman/textomatic | 26 | 6613361 | <filename>textomatic/app/keys.py
from prompt_toolkit.key_binding import KeyBindings
from textomatic import context
kb = KeyBindings()
cmd_kb = KeyBindings()
kb.add("c-c")(lambda e: e.app.exit())
kb.add("tab")(lambda e: e.app.layout.focus_next())
kb.add("s-tab")(lambda e: e.app.layout.focus_previous())
@kb.add("c-t")
def change_boxes_orientation(_):
ctx = context.get()
ctx.box_veritcal_orientation = not ctx.box_veritcal_orientation
@kb.add("c-o")
def exit_and_print_output(_):
ctx = context.get()
ctx.print_output_on_exit = True
ctx.app.exit()
@kb.add("c-p")
def copy_output_to_clipboard(_):
ctx = context.get()
ctx.app.clipboard.set_text(ctx.output_buffer.text)
ctx.copied_to_clipboard = True
def off():
ctx.copied_to_clipboard = False
ctx.app.loop.call_later(0.1, off)
@kb.add("f1")
def toggle_help(_):
ctx = context.get()
ctx.display_help = not ctx.display_help
@kb.add("c-l")
def toggle_live(_):
ctx = context.get()
ctx.live = not ctx.live
@kb.add("c-r")
def run1(_):
ctx = context.get()
ctx.process_fn("run")
cmd_kb.add("c-m")(lambda _: run1(_))
| <filename>textomatic/app/keys.py
from prompt_toolkit.key_binding import KeyBindings
from textomatic import context
kb = KeyBindings()
cmd_kb = KeyBindings()
kb.add("c-c")(lambda e: e.app.exit())
kb.add("tab")(lambda e: e.app.layout.focus_next())
kb.add("s-tab")(lambda e: e.app.layout.focus_previous())
@kb.add("c-t")
def change_boxes_orientation(_):
ctx = context.get()
ctx.box_veritcal_orientation = not ctx.box_veritcal_orientation
@kb.add("c-o")
def exit_and_print_output(_):
ctx = context.get()
ctx.print_output_on_exit = True
ctx.app.exit()
@kb.add("c-p")
def copy_output_to_clipboard(_):
ctx = context.get()
ctx.app.clipboard.set_text(ctx.output_buffer.text)
ctx.copied_to_clipboard = True
def off():
ctx.copied_to_clipboard = False
ctx.app.loop.call_later(0.1, off)
@kb.add("f1")
def toggle_help(_):
ctx = context.get()
ctx.display_help = not ctx.display_help
@kb.add("c-l")
def toggle_live(_):
ctx = context.get()
ctx.live = not ctx.live
@kb.add("c-r")
def run1(_):
ctx = context.get()
ctx.process_fn("run")
cmd_kb.add("c-m")(lambda _: run1(_))
| none | 1 | 2.261671 | 2 | |
TwoSum.py | abeecode/Leetcode-Solution | 0 | 6613362 | # coding=utf-8
'''
@Time : 2019-04-08 16:06
@Author : Bxf
@FileName : TwoSum.py
@License : (C)LINKINGMED,2019
@Contact : <EMAIL>, 17603912161
@==============================@
@ ___ ___ @
@ / _ | / _ )___ ___ @
@ / __ |/ _ / -_) -_) @
@ /_/ |_/____/\__/\__/ @
@ ABee @
@==============================@
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) < 2:
pass
else:
for item in nums:
if (target - item) in nums:
if item != target - item:
return [nums.index(item), nums.index(target - item)]
else:
nums_n = nums[nums.index(item) + 1:]
if target - item in nums_n:
return [nums.index(item), nums.index(item) + nums_n.index(target - item) + 1]
return 0
if __name__ == '__main__':
s = Solution()
ret = s.twoSum([2, 6, 3], 6)
print(ret)
| # coding=utf-8
'''
@Time : 2019-04-08 16:06
@Author : Bxf
@FileName : TwoSum.py
@License : (C)LINKINGMED,2019
@Contact : <EMAIL>, 17603912161
@==============================@
@ ___ ___ @
@ / _ | / _ )___ ___ @
@ / __ |/ _ / -_) -_) @
@ /_/ |_/____/\__/\__/ @
@ ABee @
@==============================@
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) < 2:
pass
else:
for item in nums:
if (target - item) in nums:
if item != target - item:
return [nums.index(item), nums.index(target - item)]
else:
nums_n = nums[nums.index(item) + 1:]
if target - item in nums_n:
return [nums.index(item), nums.index(item) + nums_n.index(target - item) + 1]
return 0
if __name__ == '__main__':
s = Solution()
ret = s.twoSum([2, 6, 3], 6)
print(ret)
| fr | 0.344682 | # coding=utf-8 @Time : 2019-04-08 16:06 @Author : Bxf @FileName : TwoSum.py @License : (C)LINKINGMED,2019 @Contact : <EMAIL>, 17603912161 @==============================@ @ ___ ___ @ @ / _ | / _ )___ ___ @ @ / __ |/ _ / -_) -_) @ @ /_/ |_/____/\__/\__/ @ @ ABee @ @==============================@ :type nums: List[int] :type target: int :rtype: List[int] | 3.640877 | 4 |
tests.py | huongdo108/image-classification-LENET-VGG-RESNET | 0 | 6613363 | <filename>tests.py
import os
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
def plot_images(images, n_rows=1):
fig, axs = plt.subplots(n_rows, images.size(0) // n_rows)
for ax, img in zip(axs.flat, images):
ax.matshow(img[0].cpu().numpy(), cmap=plt.cm.Greys)
ax.set_xticks([])
ax.set_yticks([])
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tight_layout(w_pad=0)
def set_weights_lenet(net):
for param in net.parameters():
if param.shape == torch.Size([16, 1, 5, 5]):
param.data[:8] = 1/25
param.data[8:] = -1/25
elif param.shape == torch.Size([16]):
param.data[:] = 0
elif param.shape == torch.Size([32, 16, 5, 5]):
param.data[:16] = -1/200
param.data[16:] = 1/200
elif param.shape == torch.Size([32]):
param.data[:] = 0
elif param.shape == torch.Size([120, 512]):
param.data[:60] = 1/235.52
param.data[60:] = -1/235.52
elif param.shape == torch.Size([120]):
param.data[:] = 0
elif param.shape == torch.Size([84, 120]):
param.data[:42] = 1/60
param.data[42:] = -1/60
elif param.shape == torch.Size([84]):
param.data[:] = 0
elif param.shape == torch.Size([10, 84]):
param.data[:5] = 1/42
param.data[5:] = -1/42
elif param.shape == torch.Size([10]):
param.data[:] = 0
def test_LeNet5(LeNet5):
x = torch.ones(1, 1, 28, 28)
x[0, 0, :14] = -1
net = LeNet5()
set_weights_lenet(net)
y = net(x)
expected = torch.Tensor([1., 1., 1., 1., 1., -1., -1., -1., -1., -1.])
print('y:', y)
print('expected:', expected)
assert torch.allclose(y, expected), "y does not match expected value."
print('Success')
def set_weights(module, weight):
if isinstance(module, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
if module.bias is not None:
module.bias.data.fill_(0)
module.weight.data.fill_(weight)
return
for child in module.children():
set_weights(child, weight)
def disable_batch_norm(module):
if isinstance(module, nn.BatchNorm2d):
module.momentum = 0
module.weight.data.fill_(1)
module.bias.data.fill_(0)
module.running_var.fill_(1)
module.running_mean.fill_(0)
return
for child in module.children():
disable_batch_norm(child)
def set_batch_norm(block):
for child in block.children():
if isinstance(child, nn.BatchNorm2d):
child.momentum = 0
child.weight.data.fill_(1)
child.bias.data.fill_(0)
child.running_var.fill_(0.25)
child.running_mean.fill_(0)
elif isinstance(child, nn.Sequential):
set_batch_norm(child)
def set_weights_vgg(net, n_channels):
for param in net.parameters():
if param.shape == torch.Size([n_channels, 1, 3, 3]):
param.data[:1] = 1/9
param.data[1:] = -1/9
elif param.shape == torch.Size([n_channels]):
param.data[:] = 0
elif param.shape == torch.Size([n_channels, n_channels, 3, 3]):
param.data[:1] = 1/9
param.data[1:] = -1/9
elif param.shape == torch.Size([2*n_channels, n_channels, 3, 3]):
param.data[:2] = 1/9
param.data[2:] = -1/9
elif param.shape == torch.Size([2*n_channels]):
param.data[:] = 0
elif param.shape == torch.Size([2*n_channels, 2*n_channels, 3, 3]):
param.data[:2] = 1/18
param.data[2:] = -1/18
elif param.shape == torch.Size([3*n_channels, 2*n_channels, 3, 3]):
param.data[:3] = 1/18
param.data[3:] = -1/18
elif param.shape == torch.Size([3*n_channels]):
param.data[:] = 0
elif param.shape == torch.Size([2*n_channels, 3*n_channels, 1, 1]):
param.data[:2] = 1/3
param.data[2:] = -1/3
elif param.shape == torch.Size([n_channels, 2*n_channels, 1, 1]):
param.data[:] = 1/2
param.data[1:] = -1/2
elif param.shape == torch.Size([10, n_channels]):
param.data[:5] = 1
param.data[5:] = -1
elif param.shape == torch.Size([10]):
param.data[:] = 0
def test_vgg_net(VGGNet):
x = torch.ones(1, 1, 28, 28)
x[0, 0, :14] = -1
n_channels = 16
net = VGGNet(n_channels)
net.eval()
set_weights_vgg(net, n_channels)
disable_batch_norm(net)
y = net(x)
expected = 8.0032 * torch.ones(10)
expected[5:] = -8.0032
print('y:', y)
print('expected:', expected)
assert torch.allclose(y, expected), "y does not match expected value."
print('Success')
def test_Block(Block):
# Simplest case
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[26, 36, 26],
[36, 50, 36],
[26, 36, 26]
]).reshape((batch_size, 1, 3, 3))
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
# Increase the number of channels
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=2)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 2, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[51, 71, 51],
[71, 99, 71],
[51, 71, 51]
]).reshape(1, 1, 3, 3)
expected = np.tile(expected, (1, 2, 1, 1))
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
# stride=2
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1, stride=2)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 2, 2]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[17, 17],
[17, 17],
]).reshape(1, 1, 2, 2)
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
# Increase the number of channels and stride=2
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=2, stride=2)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 2, 2, 2]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[33, 33],
[33, 33],
]).reshape(1, 1, 2, 2)
expected = np.tile(expected, (1, 2, 1, 1))
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
print('Success')
def test_Block_relu(Block):
# check relus
batch_size = 1
x = torch.tensor([
[-1., 1., -1.],
[1., -1., 1.],
[-1., 1., -1.],
]).view(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 1, 0]
]).reshape(1, 1, 3, 3)
npt.assert_allclose(y, expected, err_msg="y does not match expected value.")
print('Success')
def test_Block_batch_norm(Block):
# Two batch norms in the non-skip part
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1)
block.eval()
set_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[101, 141, 101],
[141, 197, 141],
[101, 141, 101]
]).reshape((batch_size, 1, 3, 3))
npt.assert_allclose(y, expected, atol=1e-02, err_msg="y does not match expected value.")
print('Success')
| <filename>tests.py
import os
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
def plot_images(images, n_rows=1):
fig, axs = plt.subplots(n_rows, images.size(0) // n_rows)
for ax, img in zip(axs.flat, images):
ax.matshow(img[0].cpu().numpy(), cmap=plt.cm.Greys)
ax.set_xticks([])
ax.set_yticks([])
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tight_layout(w_pad=0)
def set_weights_lenet(net):
for param in net.parameters():
if param.shape == torch.Size([16, 1, 5, 5]):
param.data[:8] = 1/25
param.data[8:] = -1/25
elif param.shape == torch.Size([16]):
param.data[:] = 0
elif param.shape == torch.Size([32, 16, 5, 5]):
param.data[:16] = -1/200
param.data[16:] = 1/200
elif param.shape == torch.Size([32]):
param.data[:] = 0
elif param.shape == torch.Size([120, 512]):
param.data[:60] = 1/235.52
param.data[60:] = -1/235.52
elif param.shape == torch.Size([120]):
param.data[:] = 0
elif param.shape == torch.Size([84, 120]):
param.data[:42] = 1/60
param.data[42:] = -1/60
elif param.shape == torch.Size([84]):
param.data[:] = 0
elif param.shape == torch.Size([10, 84]):
param.data[:5] = 1/42
param.data[5:] = -1/42
elif param.shape == torch.Size([10]):
param.data[:] = 0
def test_LeNet5(LeNet5):
x = torch.ones(1, 1, 28, 28)
x[0, 0, :14] = -1
net = LeNet5()
set_weights_lenet(net)
y = net(x)
expected = torch.Tensor([1., 1., 1., 1., 1., -1., -1., -1., -1., -1.])
print('y:', y)
print('expected:', expected)
assert torch.allclose(y, expected), "y does not match expected value."
print('Success')
def set_weights(module, weight):
if isinstance(module, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
if module.bias is not None:
module.bias.data.fill_(0)
module.weight.data.fill_(weight)
return
for child in module.children():
set_weights(child, weight)
def disable_batch_norm(module):
if isinstance(module, nn.BatchNorm2d):
module.momentum = 0
module.weight.data.fill_(1)
module.bias.data.fill_(0)
module.running_var.fill_(1)
module.running_mean.fill_(0)
return
for child in module.children():
disable_batch_norm(child)
def set_batch_norm(block):
for child in block.children():
if isinstance(child, nn.BatchNorm2d):
child.momentum = 0
child.weight.data.fill_(1)
child.bias.data.fill_(0)
child.running_var.fill_(0.25)
child.running_mean.fill_(0)
elif isinstance(child, nn.Sequential):
set_batch_norm(child)
def set_weights_vgg(net, n_channels):
for param in net.parameters():
if param.shape == torch.Size([n_channels, 1, 3, 3]):
param.data[:1] = 1/9
param.data[1:] = -1/9
elif param.shape == torch.Size([n_channels]):
param.data[:] = 0
elif param.shape == torch.Size([n_channels, n_channels, 3, 3]):
param.data[:1] = 1/9
param.data[1:] = -1/9
elif param.shape == torch.Size([2*n_channels, n_channels, 3, 3]):
param.data[:2] = 1/9
param.data[2:] = -1/9
elif param.shape == torch.Size([2*n_channels]):
param.data[:] = 0
elif param.shape == torch.Size([2*n_channels, 2*n_channels, 3, 3]):
param.data[:2] = 1/18
param.data[2:] = -1/18
elif param.shape == torch.Size([3*n_channels, 2*n_channels, 3, 3]):
param.data[:3] = 1/18
param.data[3:] = -1/18
elif param.shape == torch.Size([3*n_channels]):
param.data[:] = 0
elif param.shape == torch.Size([2*n_channels, 3*n_channels, 1, 1]):
param.data[:2] = 1/3
param.data[2:] = -1/3
elif param.shape == torch.Size([n_channels, 2*n_channels, 1, 1]):
param.data[:] = 1/2
param.data[1:] = -1/2
elif param.shape == torch.Size([10, n_channels]):
param.data[:5] = 1
param.data[5:] = -1
elif param.shape == torch.Size([10]):
param.data[:] = 0
def test_vgg_net(VGGNet):
x = torch.ones(1, 1, 28, 28)
x[0, 0, :14] = -1
n_channels = 16
net = VGGNet(n_channels)
net.eval()
set_weights_vgg(net, n_channels)
disable_batch_norm(net)
y = net(x)
expected = 8.0032 * torch.ones(10)
expected[5:] = -8.0032
print('y:', y)
print('expected:', expected)
assert torch.allclose(y, expected), "y does not match expected value."
print('Success')
def test_Block(Block):
# Simplest case
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[26, 36, 26],
[36, 50, 36],
[26, 36, 26]
]).reshape((batch_size, 1, 3, 3))
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
# Increase the number of channels
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=2)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 2, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[51, 71, 51],
[71, 99, 71],
[51, 71, 51]
]).reshape(1, 1, 3, 3)
expected = np.tile(expected, (1, 2, 1, 1))
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
# stride=2
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1, stride=2)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 2, 2]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[17, 17],
[17, 17],
]).reshape(1, 1, 2, 2)
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
# Increase the number of channels and stride=2
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=2, stride=2)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 2, 2, 2]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[33, 33],
[33, 33],
]).reshape(1, 1, 2, 2)
expected = np.tile(expected, (1, 2, 1, 1))
npt.assert_allclose(y, expected, atol=1e-03, err_msg="y does not match expected value.")
print('Success')
def test_Block_relu(Block):
# check relus
batch_size = 1
x = torch.tensor([
[-1., 1., -1.],
[1., -1., 1.],
[-1., 1., -1.],
]).view(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1)
block.eval()
disable_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 1, 0]
]).reshape(1, 1, 3, 3)
npt.assert_allclose(y, expected, err_msg="y does not match expected value.")
print('Success')
def test_Block_batch_norm(Block):
# Two batch norms in the non-skip part
batch_size = 1
x = torch.ones(batch_size, 1, 3, 3)
block = Block(in_channels=1, out_channels=1)
block.eval()
set_batch_norm(block)
set_weights(block, 1)
y = block(x)
assert y.shape == torch.Size([batch_size, 1, 3, 3]), "Bad shape of y: y.shape={}".format(y.shape)
y = y.cpu().data.numpy()
expected = np.array([
[101, 141, 101],
[141, 197, 141],
[101, 141, 101]
]).reshape((batch_size, 1, 3, 3))
npt.assert_allclose(y, expected, atol=1e-02, err_msg="y does not match expected value.")
print('Success')
| en | 0.752575 | # Simplest case # Increase the number of channels # stride=2 # Increase the number of channels and stride=2 # check relus # Two batch norms in the non-skip part | 2.360725 | 2 |
Rishav/cormen/Sorting/bubblesort.py | saisankargochhayat/algo_quest | 3 | 6613364 | class BubbleSort:
def __init__(self,a):
self.a = a
def result(self):
for i in range(len(self.a)):
for j in range(i+1,len(self.a)):
if self.a[i] > self.a[j]:
self.a[i],self.a[j] = self.a[j],self.a[i]
return self.a
| class BubbleSort:
def __init__(self,a):
self.a = a
def result(self):
for i in range(len(self.a)):
for j in range(i+1,len(self.a)):
if self.a[i] > self.a[j]:
self.a[i],self.a[j] = self.a[j],self.a[i]
return self.a
| none | 1 | 3.696391 | 4 | |
tests/workflowTests/oldFit.py | MarkTravers/magLabUtilities | 0 | 6613365 | <reponame>MarkTravers/magLabUtilities
#!python3
import numpy as np
from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle
from magLabUtilities.datafileutilities.timeDomain import importFromXlsx
from magLabUtilities.signalutilities.interpolation import legendre
from magLabUtilities.signalutilities.hysteresis import HysteresisSignalBundle, XExpGedney060820
from magLabUtilities.optimizerutilities.costFunctions import rmsNdNorm
from magLabUtilities.optimizerutilities.testCases import GridNode
from magLabUtilities.optimizerutilities.gradientDescent import GradientDescent
from magLabUtilities.signalutilities.calculus import finiteDiffDerivative, integralIndexQuadrature
from magLabUtilities.uiutilities.plotting.hysteresis import MofHXofMPlotter
from datetime import datetime
import json
class CostEvaluator:
def __init__(self, dataFP, tuneHistoryFP):
self.fp = dataFP
self.refBundle = HysteresisSignalBundle(importFromXlsx(self.fp, '21k', 2, 'C,D', dataColumnNames=['H','M']))
self.refBundle.signals['M'].independentThread.data = legendre(self.refBundle.signals['M'].independentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.refBundle.signals['M'].dependentThread.data = legendre(self.refBundle.signals['M'].dependentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.refBundle.signals['H'].independentThread.data = legendre(self.refBundle.signals['H'].independentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.refBundle.signals['H'].dependentThread.data = legendre(self.refBundle.signals['H'].dependentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.pMAmpIndex = np.argmax(self.refBundle.signals['M'].independentThread.data[0:int(self.refBundle.signals['M'].independentThread.data.shape[0]/2)])
self.nMAmpIndex = np.argmin(self.refBundle.signals['M'].independentThread.data)
# Take the derivative of the data
xThread = SignalThread(finiteDiffDerivative( \
fNum=self.refBundle.signals['M'].independentThread.data, \
fDenom=self.refBundle.signals['H'].independentThread.data, \
windowRadius=1, \
discontinuousPoints=[self.pMAmpIndex, self.nMAmpIndex], \
differenceMode='centralDifference'))
self.refBundle.addSignal('X', Signal.fromThreadPair(xThread, self.refBundle.signals['M'].dependentThread))
self.tuneHistoryFP = tuneHistoryFP
self.plotter = MofHXofMPlotter()
self.plotter.addMofHPlot(self.refBundle, 'Data')
self.plotter.addXofMPlot(self.refBundle, 'Data')
def runCostFunction(self, gridNode:GridNode) -> GridNode:
hCoercive = gridNode.coordList[0]
xInit = gridNode.coordList[1]
mSat = gridNode.coordList[2]
hCoop = gridNode.coordList[3]
hAnh = gridNode.coordList[4]
xcPow = gridNode.coordList[5]
virginGen = XExpGedney060820(xInit=xInit, hCoercive=hCoercive, mSat=mSat, hCoop=hCoop, hAnh=hAnh, xcPow=xcPow, mRev=-2500.0, virginMTolerance=10000)
pRevGen = XExpGedney060820(xInit=xInit, hCoercive=hCoercive, mSat=mSat, hCoop=hCoop, hAnh=hAnh, xcPow=xcPow, mRev=self.refBundle.signals['M'].independentThread.data[self.pMAmpIndex], virginMTolerance=10000)
nRevGen = XExpGedney060820(xInit=xInit, hCoercive=hCoercive, mSat=mSat, hCoop=hCoop, hAnh=hAnh, xcPow=xcPow, mRev=self.refBundle.signals['M'].independentThread.data[self.nMAmpIndex], virginMTolerance=10000)
virginM = Signal.fromThreadPair(SignalThread(self.refBundle.signals['M'].independentThread.data[0:self.pMAmpIndex]), SignalThread(self.refBundle.signals['M'].dependentThread.data[0:self.pMAmpIndex]))
virginX = virginGen.evaluate(mSignal=virginM)
pRevM = Signal.fromThreadPair(SignalThread(self.refBundle.signals['M'].independentThread.data[self.pMAmpIndex:self.nMAmpIndex]), SignalThread(self.refBundle.signals['M'].dependentThread.data[self.pMAmpIndex:self.nMAmpIndex]))
pRevX = pRevGen.evaluate(mSignal=pRevM)
nRevM = Signal.fromThreadPair(SignalThread(self.refBundle.signals['M'].independentThread.data[self.nMAmpIndex:]), SignalThread(self.refBundle.signals['M'].dependentThread.data[self.nMAmpIndex:]))
nRevX = nRevGen.evaluate(mSignal=nRevM)
testBundle = HysteresisSignalBundle.fromSignalBundleSequence([virginX, pRevX, nRevX])
# Take the integral of the model
hThread = SignalThread(integralIndexQuadrature(1.0 / testBundle.signals['X'].independentThread.data, testBundle.signals['M'].independentThread.data))
testBundle.addSignal('H', Signal.fromThreadPair(hThread, testBundle.signals['M'].dependentThread))
refMatrix = self.refBundle.sample(tThread=self.refBundle.signals['M'].dependentThread, signalInterpList=[('M','nearestPoint'),('H','nearestPoint')])
testMatrix = testBundle.sample(tThread=self.refBundle.signals['M'].dependentThread, signalInterpList=[('M','nearestPoint'),('H','nearestPoint')])
tWeightMatrix = np.vstack([self.refBundle.signals['M'].dependentThread.data, np.hstack([np.zeros(5), np.ones((195)), np.ones((800))])])
gridNode.loss = rmsNdNorm(refMatrix, testMatrix, tWeightMatrix)
gridNode.data = testBundle
return gridNode
def gradientStep(self, newCenterGridNode):
self.plotter.addMofHPlot(newCenterGridNode.data, 'Model')
self.plotter.addXofMPlot(newCenterGridNode.data, 'Model')
with open(tuneHistoryFP, 'a') as tuneHistoryFile:
tuneHistoryFile.write(str(datetime.fromtimestamp(datetime.timestamp(datetime.now()))) + '\n')
tuneHistoryFile.write(str(newCenterGridNode.coordList) + '\n')
tuneHistoryFile.write('Error: %s\n' % str(newCenterGridNode.loss))
# tuneHistoryFile.write(json.dumps(newCenterGridNode.data) + '\n')
print(newCenterGridNode.loss)
print('Switching to node: %s' % str(newCenterGridNode.coordList))
if __name__ == '__main__':
parameterDefs = {
'hCoercive': {'initialValue':605.0, 'limits':[0.0,10000.0]},
'xInit': {'initialValue':61.25, 'limits':[60.0,90.0]},
'mSat': {'initialValue':1.65e6, 'limits':[1.0e6,2.0e6]},
'hCoop': {'initialValue':1190.0, 'limits':[100.0,10000.0]},
'hAnh': {'initialValue':5200.0, 'limits':[100.0,10000.0]},
'xcPow': {'initialValue':2.0, 'limits':[0.0,10.0]}
}
gradientDescentConfig = {
'hCoercive': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'xInit': {'localNeighborSteps':[-0.25, 0.0, 0.25]},
'mSat': {'localNeighborSteps':[0.001e6, 0.0, 0.001e6]},
'hCoop': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'hAnh': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'xcPow': {'localNeighborSteps':[-0.1, 0.0, 0.1]}
}
fp = './tests/workflowTests/datafiles/testLoops.xlsx'
tuneHistoryFP = './tests/workflowTests/datafiles/tuneHistory00.txt'
costEvaluator = CostEvaluator(fp, tuneHistoryFP)
tuner = GradientDescent(parameterList, costEvaluator.runCostFunction, costEvaluator.gradientStep)
tuner.tune(numIterations=np.infty, maxThreads=8)
print('done')
| #!python3
import numpy as np
from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle
from magLabUtilities.datafileutilities.timeDomain import importFromXlsx
from magLabUtilities.signalutilities.interpolation import legendre
from magLabUtilities.signalutilities.hysteresis import HysteresisSignalBundle, XExpGedney060820
from magLabUtilities.optimizerutilities.costFunctions import rmsNdNorm
from magLabUtilities.optimizerutilities.testCases import GridNode
from magLabUtilities.optimizerutilities.gradientDescent import GradientDescent
from magLabUtilities.signalutilities.calculus import finiteDiffDerivative, integralIndexQuadrature
from magLabUtilities.uiutilities.plotting.hysteresis import MofHXofMPlotter
from datetime import datetime
import json
class CostEvaluator:
def __init__(self, dataFP, tuneHistoryFP):
self.fp = dataFP
self.refBundle = HysteresisSignalBundle(importFromXlsx(self.fp, '21k', 2, 'C,D', dataColumnNames=['H','M']))
self.refBundle.signals['M'].independentThread.data = legendre(self.refBundle.signals['M'].independentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.refBundle.signals['M'].dependentThread.data = legendre(self.refBundle.signals['M'].dependentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.refBundle.signals['H'].independentThread.data = legendre(self.refBundle.signals['H'].independentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.refBundle.signals['H'].dependentThread.data = legendre(self.refBundle.signals['H'].dependentThread.data, integrationWindowSize=100, stepSize=25, legendreOrder=2)
self.pMAmpIndex = np.argmax(self.refBundle.signals['M'].independentThread.data[0:int(self.refBundle.signals['M'].independentThread.data.shape[0]/2)])
self.nMAmpIndex = np.argmin(self.refBundle.signals['M'].independentThread.data)
# Take the derivative of the data
xThread = SignalThread(finiteDiffDerivative( \
fNum=self.refBundle.signals['M'].independentThread.data, \
fDenom=self.refBundle.signals['H'].independentThread.data, \
windowRadius=1, \
discontinuousPoints=[self.pMAmpIndex, self.nMAmpIndex], \
differenceMode='centralDifference'))
self.refBundle.addSignal('X', Signal.fromThreadPair(xThread, self.refBundle.signals['M'].dependentThread))
self.tuneHistoryFP = tuneHistoryFP
self.plotter = MofHXofMPlotter()
self.plotter.addMofHPlot(self.refBundle, 'Data')
self.plotter.addXofMPlot(self.refBundle, 'Data')
def runCostFunction(self, gridNode:GridNode) -> GridNode:
hCoercive = gridNode.coordList[0]
xInit = gridNode.coordList[1]
mSat = gridNode.coordList[2]
hCoop = gridNode.coordList[3]
hAnh = gridNode.coordList[4]
xcPow = gridNode.coordList[5]
virginGen = XExpGedney060820(xInit=xInit, hCoercive=hCoercive, mSat=mSat, hCoop=hCoop, hAnh=hAnh, xcPow=xcPow, mRev=-2500.0, virginMTolerance=10000)
pRevGen = XExpGedney060820(xInit=xInit, hCoercive=hCoercive, mSat=mSat, hCoop=hCoop, hAnh=hAnh, xcPow=xcPow, mRev=self.refBundle.signals['M'].independentThread.data[self.pMAmpIndex], virginMTolerance=10000)
nRevGen = XExpGedney060820(xInit=xInit, hCoercive=hCoercive, mSat=mSat, hCoop=hCoop, hAnh=hAnh, xcPow=xcPow, mRev=self.refBundle.signals['M'].independentThread.data[self.nMAmpIndex], virginMTolerance=10000)
virginM = Signal.fromThreadPair(SignalThread(self.refBundle.signals['M'].independentThread.data[0:self.pMAmpIndex]), SignalThread(self.refBundle.signals['M'].dependentThread.data[0:self.pMAmpIndex]))
virginX = virginGen.evaluate(mSignal=virginM)
pRevM = Signal.fromThreadPair(SignalThread(self.refBundle.signals['M'].independentThread.data[self.pMAmpIndex:self.nMAmpIndex]), SignalThread(self.refBundle.signals['M'].dependentThread.data[self.pMAmpIndex:self.nMAmpIndex]))
pRevX = pRevGen.evaluate(mSignal=pRevM)
nRevM = Signal.fromThreadPair(SignalThread(self.refBundle.signals['M'].independentThread.data[self.nMAmpIndex:]), SignalThread(self.refBundle.signals['M'].dependentThread.data[self.nMAmpIndex:]))
nRevX = nRevGen.evaluate(mSignal=nRevM)
testBundle = HysteresisSignalBundle.fromSignalBundleSequence([virginX, pRevX, nRevX])
# Take the integral of the model
hThread = SignalThread(integralIndexQuadrature(1.0 / testBundle.signals['X'].independentThread.data, testBundle.signals['M'].independentThread.data))
testBundle.addSignal('H', Signal.fromThreadPair(hThread, testBundle.signals['M'].dependentThread))
refMatrix = self.refBundle.sample(tThread=self.refBundle.signals['M'].dependentThread, signalInterpList=[('M','nearestPoint'),('H','nearestPoint')])
testMatrix = testBundle.sample(tThread=self.refBundle.signals['M'].dependentThread, signalInterpList=[('M','nearestPoint'),('H','nearestPoint')])
tWeightMatrix = np.vstack([self.refBundle.signals['M'].dependentThread.data, np.hstack([np.zeros(5), np.ones((195)), np.ones((800))])])
gridNode.loss = rmsNdNorm(refMatrix, testMatrix, tWeightMatrix)
gridNode.data = testBundle
return gridNode
def gradientStep(self, newCenterGridNode):
self.plotter.addMofHPlot(newCenterGridNode.data, 'Model')
self.plotter.addXofMPlot(newCenterGridNode.data, 'Model')
with open(tuneHistoryFP, 'a') as tuneHistoryFile:
tuneHistoryFile.write(str(datetime.fromtimestamp(datetime.timestamp(datetime.now()))) + '\n')
tuneHistoryFile.write(str(newCenterGridNode.coordList) + '\n')
tuneHistoryFile.write('Error: %s\n' % str(newCenterGridNode.loss))
# tuneHistoryFile.write(json.dumps(newCenterGridNode.data) + '\n')
print(newCenterGridNode.loss)
print('Switching to node: %s' % str(newCenterGridNode.coordList))
if __name__ == '__main__':
parameterDefs = {
'hCoercive': {'initialValue':605.0, 'limits':[0.0,10000.0]},
'xInit': {'initialValue':61.25, 'limits':[60.0,90.0]},
'mSat': {'initialValue':1.65e6, 'limits':[1.0e6,2.0e6]},
'hCoop': {'initialValue':1190.0, 'limits':[100.0,10000.0]},
'hAnh': {'initialValue':5200.0, 'limits':[100.0,10000.0]},
'xcPow': {'initialValue':2.0, 'limits':[0.0,10.0]}
}
gradientDescentConfig = {
'hCoercive': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'xInit': {'localNeighborSteps':[-0.25, 0.0, 0.25]},
'mSat': {'localNeighborSteps':[0.001e6, 0.0, 0.001e6]},
'hCoop': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'hAnh': {'localNeighborSteps':[-15.0, 0.0, 15.0]},
'xcPow': {'localNeighborSteps':[-0.1, 0.0, 0.1]}
}
fp = './tests/workflowTests/datafiles/testLoops.xlsx'
tuneHistoryFP = './tests/workflowTests/datafiles/tuneHistory00.txt'
costEvaluator = CostEvaluator(fp, tuneHistoryFP)
tuner = GradientDescent(parameterList, costEvaluator.runCostFunction, costEvaluator.gradientStep)
tuner.tune(numIterations=np.infty, maxThreads=8)
print('done') | en | 0.518156 | #!python3 # Take the derivative of the data # Take the integral of the model # tuneHistoryFile.write(json.dumps(newCenterGridNode.data) + '\n') | 1.773846 | 2 |
python/testData/findUsages/NonGlobalUsages.py | jnthn/intellij-community | 2 | 6613366 | <caret>a = 0
def b():
a = 1
print a
| <caret>a = 0
def b():
a = 1
print a
| none | 1 | 1.835614 | 2 | |
face_enhancer/enhance.py | Rainweic/dancingycy | 0 | 6613367 | <reponame>Rainweic/dancingycy<filename>face_enhancer/enhance.py<gh_stars>0
import model
import dataset
import cv2
from trainer import Trainer
import os
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
from skimage.io import imsave
from imageio import get_writer
from pathlib import Path
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
image_transforms = transforms.Compose([
Image.fromarray,
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
device = torch.device('cuda')
def load_models(directory):
generator = model.GlobalGenerator(n_downsampling=2, n_blocks=6)
gen_name = os.path.join(directory, 'final_generator.pth')
if os.path.isfile(gen_name):
gen_dict = torch.load(gen_name)
generator.load_state_dict(gen_dict)
return generator.to(device)
def torch2numpy(tensor):
generated = tensor.detach().cpu().permute(1, 2, 0).numpy()
generated[generated < -1] = -1
generated[generated > 1] = 1
generated = (generated + 1) / 2 * 255
return generated.astype(np.uint8)
def get_full_sample(fake_img, head_pos):
crop_size = 48
size = fake_img.shape[:-1]
left = int(head_pos[0] - crop_size / 2) # don't suppose left will go out of bound eh?
left = left if left >= 0 else 0
left = size[1] - crop_size if left + crop_size > size[1] else left
top = int(head_pos[1] - crop_size / 2)
top = top if top >= 0 else 0
top = size[0] - crop_size if top + crop_size > size[0] else top
fake_head = image_transforms(fake_img[top: top + crop_size, left: left + crop_size, :])
return fake_head, top, top + crop_size, left, left + crop_size
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
dataset_dir = '../data/face' # save test_sync in this folder
pose_name = '../data/source/pose_source_norm.npy' # coordinate save every heads
ckpt_dir = '../checkpoints/face'
result_dir = './results'
save_dir = '../results/full_fake/'
target_dir = Path('../results/target/test_latest/images')
target_synth_paths = sorted(target_dir.glob('*synthesized*'))
if not os.path.exists(save_dir):
print('generate %s'%save_dir)
os.mkdir(save_dir)
else:
print(save_dir, 'is existing...')
image_folder = dataset.ImageFolderDataset(dataset_dir, cache=os.path.join(dataset_dir, 'local.db'), is_test=True)
face_dataset = dataset.FaceCropDataset(image_folder, pose_name, image_transforms, crop_size=48)
pose = np.load(pose_name)
length = len(face_dataset)
print('Picture number',len(target_synth_paths))
generator = load_models(os.path.join(ckpt_dir))
for i in tqdm(range(len(target_synth_paths))):#length
print(pose[i])
fake_full = cv2.imread(str(target_synth_paths[i]))
fake_head, top, bottom, left, right = get_full_sample(fake_full, pose[i])
with torch.no_grad():
fake_head.unsqueeze_(0)
fake_head = fake_head.to(device)
residual = generator(fake_head)
enhanced = fake_head + residual
enhanced.squeeze_()
enhanced = torch2numpy(enhanced)
fake_full_old = fake_full.copy()
fake_full[top: bottom, left: right, :] = enhanced
b, g, r = cv2.split(fake_full)
fake_full = cv2.merge([r, g, b])
cv2.imwrite(save_dir+ '{:05}.png'.format(i),fake_full[:,:,::-1])
| import model
import dataset
import cv2
from trainer import Trainer
import os
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
from skimage.io import imsave
from imageio import get_writer
from pathlib import Path
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
image_transforms = transforms.Compose([
Image.fromarray,
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
device = torch.device('cuda')
def load_models(directory):
generator = model.GlobalGenerator(n_downsampling=2, n_blocks=6)
gen_name = os.path.join(directory, 'final_generator.pth')
if os.path.isfile(gen_name):
gen_dict = torch.load(gen_name)
generator.load_state_dict(gen_dict)
return generator.to(device)
def torch2numpy(tensor):
generated = tensor.detach().cpu().permute(1, 2, 0).numpy()
generated[generated < -1] = -1
generated[generated > 1] = 1
generated = (generated + 1) / 2 * 255
return generated.astype(np.uint8)
def get_full_sample(fake_img, head_pos):
crop_size = 48
size = fake_img.shape[:-1]
left = int(head_pos[0] - crop_size / 2) # don't suppose left will go out of bound eh?
left = left if left >= 0 else 0
left = size[1] - crop_size if left + crop_size > size[1] else left
top = int(head_pos[1] - crop_size / 2)
top = top if top >= 0 else 0
top = size[0] - crop_size if top + crop_size > size[0] else top
fake_head = image_transforms(fake_img[top: top + crop_size, left: left + crop_size, :])
return fake_head, top, top + crop_size, left, left + crop_size
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
dataset_dir = '../data/face' # save test_sync in this folder
pose_name = '../data/source/pose_source_norm.npy' # coordinate save every heads
ckpt_dir = '../checkpoints/face'
result_dir = './results'
save_dir = '../results/full_fake/'
target_dir = Path('../results/target/test_latest/images')
target_synth_paths = sorted(target_dir.glob('*synthesized*'))
if not os.path.exists(save_dir):
print('generate %s'%save_dir)
os.mkdir(save_dir)
else:
print(save_dir, 'is existing...')
image_folder = dataset.ImageFolderDataset(dataset_dir, cache=os.path.join(dataset_dir, 'local.db'), is_test=True)
face_dataset = dataset.FaceCropDataset(image_folder, pose_name, image_transforms, crop_size=48)
pose = np.load(pose_name)
length = len(face_dataset)
print('Picture number',len(target_synth_paths))
generator = load_models(os.path.join(ckpt_dir))
for i in tqdm(range(len(target_synth_paths))):#length
print(pose[i])
fake_full = cv2.imread(str(target_synth_paths[i]))
fake_head, top, bottom, left, right = get_full_sample(fake_full, pose[i])
with torch.no_grad():
fake_head.unsqueeze_(0)
fake_head = fake_head.to(device)
residual = generator(fake_head)
enhanced = fake_head + residual
enhanced.squeeze_()
enhanced = torch2numpy(enhanced)
fake_full_old = fake_full.copy()
fake_full[top: bottom, left: right, :] = enhanced
b, g, r = cv2.split(fake_full)
fake_full = cv2.merge([r, g, b])
cv2.imwrite(save_dir+ '{:05}.png'.format(i),fake_full[:,:,::-1]) | en | 0.891454 | # don't suppose left will go out of bound eh? # save test_sync in this folder # coordinate save every heads #length | 2.117274 | 2 |
rest_example/applications/views.py | narnikgamarnikus/rest_example | 0 | 6613368 | <filename>rest_example/applications/views.py<gh_stars>0
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.mixins import (
CreateModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
)
from rest_framework.viewsets import GenericViewSet
from rest_framework.status import HTTP_200_OK
from rest_framework.response import Response
from rest_example.applications.models import Application
from rest_example.applications.serializers import ApplicationSerializer
from rest_example.applications.permissions import ApplicationAPIKeyPermission
class TestApplicationView(APIView):
"""
View to testing applications app response.
* Requires api_key authentication.
* Only authorized users are able to access this view.
"""
permission_classes = [ApplicationAPIKeyPermission]
def get(self, request, format=None):
"""
Return application by api_key in HEADERS.
"""
api_key = request.META.get("HTTP_API_KEY", None)
obj = get_object_or_404(Application, api_key=api_key)
serializer = ApplicationSerializer(obj)
return Response(serializer.data, status=HTTP_200_OK)
class ApplicationViewSet(
GenericViewSet,
CreateModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
):
"""
A viewset that provides `retrieve`, `create`, 'update' actions.
"""
queryset = Application.objects.all()
serializer_class = ApplicationSerializer
permission_classes = [ApplicationAPIKeyPermission]
def get_object(self):
api_key = self.request.META.get("HTTP_API_KEY", None)
obj = get_object_or_404(Application, api_key=api_key)
self.check_object_permissions(self.request, obj)
return obj
def list(self, request):
obj = self.get_object()
serializer = ApplicationSerializer(obj)
return Response(serializer.data, status=HTTP_200_OK)
| <filename>rest_example/applications/views.py<gh_stars>0
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.mixins import (
CreateModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
)
from rest_framework.viewsets import GenericViewSet
from rest_framework.status import HTTP_200_OK
from rest_framework.response import Response
from rest_example.applications.models import Application
from rest_example.applications.serializers import ApplicationSerializer
from rest_example.applications.permissions import ApplicationAPIKeyPermission
class TestApplicationView(APIView):
"""
View to testing applications app response.
* Requires api_key authentication.
* Only authorized users are able to access this view.
"""
permission_classes = [ApplicationAPIKeyPermission]
def get(self, request, format=None):
"""
Return application by api_key in HEADERS.
"""
api_key = request.META.get("HTTP_API_KEY", None)
obj = get_object_or_404(Application, api_key=api_key)
serializer = ApplicationSerializer(obj)
return Response(serializer.data, status=HTTP_200_OK)
class ApplicationViewSet(
GenericViewSet,
CreateModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
):
"""
A viewset that provides `retrieve`, `create`, 'update' actions.
"""
queryset = Application.objects.all()
serializer_class = ApplicationSerializer
permission_classes = [ApplicationAPIKeyPermission]
def get_object(self):
api_key = self.request.META.get("HTTP_API_KEY", None)
obj = get_object_or_404(Application, api_key=api_key)
self.check_object_permissions(self.request, obj)
return obj
def list(self, request):
obj = self.get_object()
serializer = ApplicationSerializer(obj)
return Response(serializer.data, status=HTTP_200_OK)
| en | 0.747697 | View to testing applications app response. * Requires api_key authentication. * Only authorized users are able to access this view. Return application by api_key in HEADERS. A viewset that provides `retrieve`, `create`, 'update' actions. | 2.238823 | 2 |
ojo/worker.py | ofreshy/ojo | 0 | 6613369 | import logging
def make_worker(name, service, work_q, done_q, error_q):
"""
Closure to make a worker with given params
:param name: worker name for context, e.g. worker-rar-1
:param service: the service that is used for doing the job
:param work_q: q to get work from
:param done_q: q to put done jobs on
:param error_q: q for jobs that terminated with error
:return: worker with given params, ready to consume jobs
"""
def worker():
while True:
job = work_q.get()
try:
if job is None:
logging.info("{name} is done! ".format(name=name))
break
logging.info("{name} got job {job} ".format(name=name, job=job))
done_job = service.do_job(job)
if done_job.has_error():
logging.error("{name} done job with error {job} ".format(name=name, job=done_job))
error_q.put(done_job)
else:
done_q.put(done_job)
logging.info("{name} done job successfully {job} ".format(name=name, job=job))
except Exception as e:
logging.error("{name} got exception from job {e} ".format(name=name, job=job, e=e))
job.add_error(e)
error_q.put(job)
finally:
work_q.task_done()
return worker
def make_error_worker(name, error_service, error_q):
"""
Closure to make the end of line worker - for jobs the terminated with errors
:param name: for context, such as error-worker-1
:param error_service: the error service that handles the errors
:param error_q: to consume jobs from
:return: error worker ready to work
"""
def worker():
while True:
job = error_q.get()
try:
if job is None:
logging.info("{name} is done! ".format(name=name))
break
logging.info("{name} got job {job} ".format(name=name, job=job))
error_service.do_job(job)
except Exception as e:
logging.error("{name} got exception from job {e} ".format(name=name, job=job, e=e))
finally:
error_q.task_done()
return worker
| import logging
def make_worker(name, service, work_q, done_q, error_q):
"""
Closure to make a worker with given params
:param name: worker name for context, e.g. worker-rar-1
:param service: the service that is used for doing the job
:param work_q: q to get work from
:param done_q: q to put done jobs on
:param error_q: q for jobs that terminated with error
:return: worker with given params, ready to consume jobs
"""
def worker():
while True:
job = work_q.get()
try:
if job is None:
logging.info("{name} is done! ".format(name=name))
break
logging.info("{name} got job {job} ".format(name=name, job=job))
done_job = service.do_job(job)
if done_job.has_error():
logging.error("{name} done job with error {job} ".format(name=name, job=done_job))
error_q.put(done_job)
else:
done_q.put(done_job)
logging.info("{name} done job successfully {job} ".format(name=name, job=job))
except Exception as e:
logging.error("{name} got exception from job {e} ".format(name=name, job=job, e=e))
job.add_error(e)
error_q.put(job)
finally:
work_q.task_done()
return worker
def make_error_worker(name, error_service, error_q):
"""
Closure to make the end of line worker - for jobs the terminated with errors
:param name: for context, such as error-worker-1
:param error_service: the error service that handles the errors
:param error_q: to consume jobs from
:return: error worker ready to work
"""
def worker():
while True:
job = error_q.get()
try:
if job is None:
logging.info("{name} is done! ".format(name=name))
break
logging.info("{name} got job {job} ".format(name=name, job=job))
error_service.do_job(job)
except Exception as e:
logging.error("{name} got exception from job {e} ".format(name=name, job=job, e=e))
finally:
error_q.task_done()
return worker
| en | 0.89252 | Closure to make a worker with given params :param name: worker name for context, e.g. worker-rar-1 :param service: the service that is used for doing the job :param work_q: q to get work from :param done_q: q to put done jobs on :param error_q: q for jobs that terminated with error :return: worker with given params, ready to consume jobs Closure to make the end of line worker - for jobs the terminated with errors :param name: for context, such as error-worker-1 :param error_service: the error service that handles the errors :param error_q: to consume jobs from :return: error worker ready to work | 3.527053 | 4 |
trailblazer/exc.py | jemten/trailblazer | 0 | 6613370 | <reponame>jemten/trailblazer
# -*- coding: utf-8 -*-
class TrailblazerError(Exception):
def __init__(self, message):
self.message = message
class MissingFileError(TrailblazerError):
pass
class MipStartError(TrailblazerError):
pass
class ConfigError(TrailblazerError):
def __init__(self, message, errors=None):
self.message = message
self.errors = errors
| # -*- coding: utf-8 -*-
class TrailblazerError(Exception):
def __init__(self, message):
self.message = message
class MissingFileError(TrailblazerError):
pass
class MipStartError(TrailblazerError):
pass
class ConfigError(TrailblazerError):
def __init__(self, message, errors=None):
self.message = message
self.errors = errors | en | 0.769321 | # -*- coding: utf-8 -*- | 2.307053 | 2 |
tests/test_grizzly/steps/scenario/test_response.py | boffman/grizzly | 0 | 6613371 | <filename>tests/test_grizzly/steps/scenario/test_response.py
from typing import List, cast
import pytest
from parse import compile
from behave.model import Table, Row
from grizzly.context import GrizzlyContext
from grizzly.types import RequestMethod, ResponseTarget
from grizzly.tasks import RequestTask, WaitTask
from grizzly.steps import * # pylint: disable=unused-wildcard-import # noqa: F403
from grizzly_extras.transformer import TransformerContentType
from ....fixtures import GrizzlyFixture, BehaveFixture
def test_parse_negative() -> None:
p = compile(
'value {condition:Condition} world',
extra_types=dict(
Condition=parse_condition,
),
)
assert p.parse('value is world')['condition']
assert not p.parse('value is not world')['condition']
assert p.parse('value equals world') is None
def test_parse_response_target() -> None:
p = compile(
'save response {target:ResponseTarget}',
extra_types=dict(
ResponseTarget=parse_response_target,
),
)
assert p.parse('save response metadata')['target'] == ResponseTarget.METADATA
assert p.parse('save response payload')['target'] == ResponseTarget.PAYLOAD
assert p.parse('save response test') is None
with pytest.raises(ValueError):
parse_response_target('asdf')
def test_parse_response_content_type() -> None:
p = compile(
'content type is "{content_type:TransformerContentType}"',
extra_types=dict(
TransformerContentType=TransformerContentType.from_string,
),
)
tests = [
(TransformerContentType.JSON, ['json', 'application/json']),
(TransformerContentType.XML, ['xml', 'application/xml']),
(TransformerContentType.PLAIN, ['plain', 'text/plain']),
]
for test_type, values in tests:
for value in values:
assert p.parse(f'content type is "{value}"')['content_type'] == test_type
with pytest.raises(ValueError) as e:
p.parse('content type is "image/png"')
assert 'is an unknown response content type' in str(e)
def test_step_response_save_matches_metadata(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = grizzly_fixture.grizzly
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.METADATA, '', '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.METADATA, '$.test.value', '.*ary$', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.METADATA, '$.test.value', '.*ary$', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save_matches(behave, ResponseTarget.METADATA, '$.test.value', '.*ary$', 'test')
assert len(request.response.handlers.metadata) == 1
assert len(request.response.handlers.payload) == 0
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_save_matches_payload(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '', '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '$.test.value', '.*ary$', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '$.test.value', '.*ary$', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '$.test.value', '.*ary$', 'test')
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 1
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_save_metadata(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.METADATA, '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.METADATA, '$.test.value', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.METADATA, '$.test.value', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save(behave, ResponseTarget.METADATA, '$.test.value', 'test')
assert len(request.response.handlers.metadata) == 1
assert len(request.response.handlers.payload) == 0
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_save_payload(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.PAYLOAD, '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.PAYLOAD, '$.test.value', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.PAYLOAD, '$.test.value', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save(behave, ResponseTarget.PAYLOAD, '$.test.value', 'test')
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 1
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_validate_metadata(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_validate(behave, ResponseTarget.METADATA, '', True, '')
assert 'expression is empty' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_validate(behave, ResponseTarget.METADATA, '$.test.value', True, '.*test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_validate(behave, ResponseTarget.METADATA, '$.test.value', True, '.*test')
assert len(request.response.handlers.metadata) == 1
assert len(request.response.handlers.payload) == 0
def test_step_response_validate_payload(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_validate(behave, ResponseTarget.PAYLOAD, '', True, '')
assert 'expression is empty' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
request.response.content_type = TransformerContentType.JSON
step_response_validate(behave, ResponseTarget.PAYLOAD, '$.test.value', True, '.*test')
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 1
def test_step_response_allow_status_codes(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ve:
step_response_allow_status_codes(behave, '-200')
assert 'there are no requests in the scenario' in str(ve)
request = RequestTask(RequestMethod.SEND, name='test', endpoint='/api/test')
grizzly.scenarios.create(behave_fixture.create_scenario('test'))
grizzly.scenario.tasks.add(request)
step_response_allow_status_codes(behave, '-200')
assert request.response.status_codes == []
step_response_allow_status_codes(behave, '200,302')
assert request.response.status_codes == [200, 302]
def test_step_response_allow_status_codes_table(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'step data table is mandatory' in str(ae)
rows: List[Row] = []
rows.append(Row(['test'], ['-200,400']))
rows.append(Row(['test'], ['302']))
behave.table = Table(['test'], rows=rows)
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'there are no requests in the scenario' in str(ae)
request = RequestTask(RequestMethod.SEND, name='test', endpoint='/api/test')
grizzly.scenarios.create(behave_fixture.create_scenario('test'))
grizzly.scenario.tasks.add(request)
# more rows in data table then there are requests
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'data table has more rows than there are requests' in str(ae)
request = RequestTask(RequestMethod.GET, name='test-get', endpoint='/api/test')
grizzly.scenario.tasks.add(request)
# data table column "code" does not exist
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'data table does not have column "status"' in str(ae)
request = RequestTask(RequestMethod.GET, name='no-code', endpoint='/api/test')
grizzly.scenario.tasks.add(request, pos=0)
rows = []
'''
| status |
| -200,400 | # name=test
| 302 | # name=test-get
'''
column_name = 'status'
rows.append(Row([column_name], ['-200,400']))
rows.append(Row([column_name], ['302']))
behave.table = Table([column_name], rows=rows)
step_response_allow_status_codes_table(behave)
assert cast(RequestTask, grizzly.scenario.tasks[0]).response.status_codes == [200]
assert cast(RequestTask, grizzly.scenario.tasks[1]).response.status_codes == [400]
assert cast(RequestTask, grizzly.scenario.tasks[2]).response.status_codes == [200, 302]
def test_step_response_content_type(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_response_content_type(behave, TransformerContentType.JSON)
assert 'There are no requests in the scenario' in str(ae)
grizzly.scenario.tasks.add(WaitTask(time=1.0))
with pytest.raises(AssertionError) as ae:
step_response_content_type(behave, TransformerContentType.JSON)
assert 'Latest task in scenario is not a request' in str(ae)
request = RequestTask(RequestMethod.POST, 'test-request', endpoint='queue:INCOMMING.MESSAGE')
assert request.response.content_type == TransformerContentType.UNDEFINED
grizzly.scenario.tasks.add(request)
for content_type in TransformerContentType:
if content_type == TransformerContentType.UNDEFINED:
continue
step_response_content_type(behave, content_type)
assert request.response.content_type == content_type
with pytest.raises(AssertionError) as ae:
step_response_content_type(behave, TransformerContentType.UNDEFINED)
assert 'It is not allowed to set UNDEFINED with this step' in str(ae)
request = RequestTask(RequestMethod.POST, 'test-request', endpoint='queue:INCOMING.MESSAGE | content_type="application/xml"')
assert request.response.content_type == TransformerContentType.XML
assert request.endpoint == 'queue:INCOMING.MESSAGE'
grizzly.scenario.tasks.add(request)
for content_type in TransformerContentType:
if content_type == TransformerContentType.UNDEFINED:
continue
step_response_content_type(behave, content_type)
assert request.response.content_type == content_type
| <filename>tests/test_grizzly/steps/scenario/test_response.py
from typing import List, cast
import pytest
from parse import compile
from behave.model import Table, Row
from grizzly.context import GrizzlyContext
from grizzly.types import RequestMethod, ResponseTarget
from grizzly.tasks import RequestTask, WaitTask
from grizzly.steps import * # pylint: disable=unused-wildcard-import # noqa: F403
from grizzly_extras.transformer import TransformerContentType
from ....fixtures import GrizzlyFixture, BehaveFixture
def test_parse_negative() -> None:
p = compile(
'value {condition:Condition} world',
extra_types=dict(
Condition=parse_condition,
),
)
assert p.parse('value is world')['condition']
assert not p.parse('value is not world')['condition']
assert p.parse('value equals world') is None
def test_parse_response_target() -> None:
p = compile(
'save response {target:ResponseTarget}',
extra_types=dict(
ResponseTarget=parse_response_target,
),
)
assert p.parse('save response metadata')['target'] == ResponseTarget.METADATA
assert p.parse('save response payload')['target'] == ResponseTarget.PAYLOAD
assert p.parse('save response test') is None
with pytest.raises(ValueError):
parse_response_target('asdf')
def test_parse_response_content_type() -> None:
p = compile(
'content type is "{content_type:TransformerContentType}"',
extra_types=dict(
TransformerContentType=TransformerContentType.from_string,
),
)
tests = [
(TransformerContentType.JSON, ['json', 'application/json']),
(TransformerContentType.XML, ['xml', 'application/xml']),
(TransformerContentType.PLAIN, ['plain', 'text/plain']),
]
for test_type, values in tests:
for value in values:
assert p.parse(f'content type is "{value}"')['content_type'] == test_type
with pytest.raises(ValueError) as e:
p.parse('content type is "image/png"')
assert 'is an unknown response content type' in str(e)
def test_step_response_save_matches_metadata(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = grizzly_fixture.grizzly
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.METADATA, '', '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.METADATA, '$.test.value', '.*ary$', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.METADATA, '$.test.value', '.*ary$', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save_matches(behave, ResponseTarget.METADATA, '$.test.value', '.*ary$', 'test')
assert len(request.response.handlers.metadata) == 1
assert len(request.response.handlers.payload) == 0
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_save_matches_payload(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '', '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '$.test.value', '.*ary$', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '$.test.value', '.*ary$', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save_matches(behave, ResponseTarget.PAYLOAD, '$.test.value', '.*ary$', 'test')
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 1
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_save_metadata(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.METADATA, '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.METADATA, '$.test.value', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.METADATA, '$.test.value', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save(behave, ResponseTarget.METADATA, '$.test.value', 'test')
assert len(request.response.handlers.metadata) == 1
assert len(request.response.handlers.payload) == 0
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_save_payload(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.PAYLOAD, '', '')
assert 'variable "" has not been declared' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.PAYLOAD, '$.test.value', 'test')
assert 'variable "test" has not been declared' in str(ve)
try:
grizzly.state.variables['test'] = 'none'
with pytest.raises(ValueError) as ve:
step_response_save(behave, ResponseTarget.PAYLOAD, '$.test.value', 'test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_save(behave, ResponseTarget.PAYLOAD, '$.test.value', 'test')
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 1
finally:
request.response.content_type = TransformerContentType.UNDEFINED
del grizzly.state.variables['test']
def test_step_response_validate_metadata(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_validate(behave, ResponseTarget.METADATA, '', True, '')
assert 'expression is empty' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
with pytest.raises(ValueError) as ve:
step_response_validate(behave, ResponseTarget.METADATA, '$.test.value', True, '.*test')
assert 'content type is not set for latest request' in str(ve)
request.response.content_type = TransformerContentType.JSON
step_response_validate(behave, ResponseTarget.METADATA, '$.test.value', True, '.*test')
assert len(request.response.handlers.metadata) == 1
assert len(request.response.handlers.payload) == 0
def test_step_response_validate_payload(grizzly_fixture: GrizzlyFixture) -> None:
behave = grizzly_fixture.behave
grizzly = cast(GrizzlyContext, behave.grizzly)
request = cast(RequestTask, grizzly.scenario.tasks[0])
with pytest.raises(ValueError) as ve:
step_response_validate(behave, ResponseTarget.PAYLOAD, '', True, '')
assert 'expression is empty' in str(ve)
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 0
request.response.content_type = TransformerContentType.JSON
step_response_validate(behave, ResponseTarget.PAYLOAD, '$.test.value', True, '.*test')
assert len(request.response.handlers.metadata) == 0
assert len(request.response.handlers.payload) == 1
def test_step_response_allow_status_codes(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ve:
step_response_allow_status_codes(behave, '-200')
assert 'there are no requests in the scenario' in str(ve)
request = RequestTask(RequestMethod.SEND, name='test', endpoint='/api/test')
grizzly.scenarios.create(behave_fixture.create_scenario('test'))
grizzly.scenario.tasks.add(request)
step_response_allow_status_codes(behave, '-200')
assert request.response.status_codes == []
step_response_allow_status_codes(behave, '200,302')
assert request.response.status_codes == [200, 302]
def test_step_response_allow_status_codes_table(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'step data table is mandatory' in str(ae)
rows: List[Row] = []
rows.append(Row(['test'], ['-200,400']))
rows.append(Row(['test'], ['302']))
behave.table = Table(['test'], rows=rows)
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'there are no requests in the scenario' in str(ae)
request = RequestTask(RequestMethod.SEND, name='test', endpoint='/api/test')
grizzly.scenarios.create(behave_fixture.create_scenario('test'))
grizzly.scenario.tasks.add(request)
# more rows in data table then there are requests
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'data table has more rows than there are requests' in str(ae)
request = RequestTask(RequestMethod.GET, name='test-get', endpoint='/api/test')
grizzly.scenario.tasks.add(request)
# data table column "code" does not exist
with pytest.raises(AssertionError) as ae:
step_response_allow_status_codes_table(behave)
assert 'data table does not have column "status"' in str(ae)
request = RequestTask(RequestMethod.GET, name='no-code', endpoint='/api/test')
grizzly.scenario.tasks.add(request, pos=0)
rows = []
'''
| status |
| -200,400 | # name=test
| 302 | # name=test-get
'''
column_name = 'status'
rows.append(Row([column_name], ['-200,400']))
rows.append(Row([column_name], ['302']))
behave.table = Table([column_name], rows=rows)
step_response_allow_status_codes_table(behave)
assert cast(RequestTask, grizzly.scenario.tasks[0]).response.status_codes == [200]
assert cast(RequestTask, grizzly.scenario.tasks[1]).response.status_codes == [400]
assert cast(RequestTask, grizzly.scenario.tasks[2]).response.status_codes == [200, 302]
def test_step_response_content_type(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_response_content_type(behave, TransformerContentType.JSON)
assert 'There are no requests in the scenario' in str(ae)
grizzly.scenario.tasks.add(WaitTask(time=1.0))
with pytest.raises(AssertionError) as ae:
step_response_content_type(behave, TransformerContentType.JSON)
assert 'Latest task in scenario is not a request' in str(ae)
request = RequestTask(RequestMethod.POST, 'test-request', endpoint='queue:INCOMMING.MESSAGE')
assert request.response.content_type == TransformerContentType.UNDEFINED
grizzly.scenario.tasks.add(request)
for content_type in TransformerContentType:
if content_type == TransformerContentType.UNDEFINED:
continue
step_response_content_type(behave, content_type)
assert request.response.content_type == content_type
with pytest.raises(AssertionError) as ae:
step_response_content_type(behave, TransformerContentType.UNDEFINED)
assert 'It is not allowed to set UNDEFINED with this step' in str(ae)
request = RequestTask(RequestMethod.POST, 'test-request', endpoint='queue:INCOMING.MESSAGE | content_type="application/xml"')
assert request.response.content_type == TransformerContentType.XML
assert request.endpoint == 'queue:INCOMING.MESSAGE'
grizzly.scenario.tasks.add(request)
for content_type in TransformerContentType:
if content_type == TransformerContentType.UNDEFINED:
continue
step_response_content_type(behave, content_type)
assert request.response.content_type == content_type
| en | 0.544006 | # pylint: disable=unused-wildcard-import # noqa: F403 # more rows in data table then there are requests # data table column "code" does not exist | status | | -200,400 | # name=test | 302 | # name=test-get | 1.939658 | 2 |
crawler/gather/spiders/bilibili.py | shifei123/test | 283 | 6613372 | # -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import ChannelItem, RoomItem
import json
class BilibiliSpider(Spider):
name = 'bilibili'
allowed_domains = ['bilibili.com']
start_urls = [
'http://live.bilibili.com/area/live'
]
custom_settings = {
'SITE': {
'code': 'bilibili',
'name': '哔哩哔哩',
'description': '哔哩哔哩-关注ACG直播互动平台',
'url': 'http://live.bilibili.com',
'image': 'http://static.hdslb.com/live-static/common/images/logo/logo-150-cyan.png',
'show_seq': 3
}
}
def parse(self, response):
panel_class = ['live-top-nav-panel', 'live-top-hover-panel']
panel_xpath = ['contains(@class, "{}")'.format(pclass) for pclass in panel_class]
room_query_list = []
for a_element in response.xpath('//div[{}]/a'.format(' and '.join(panel_xpath)))[1:-2]:
div_element = a_element.xpath('div[@class="nav-item"]')[0]
url = a_element.xpath('@href').extract_first()
if '/pages/area/' in url:
i_class = div_element.xpath('i/@class').extract_first().split(' ')
short = i_class[-1]
url = self.custom_settings['SITE']['url'] + '/' + short
else:
short = url[url.rfind('/') + 1:]
name = div_element.xpath('text()').extract_first()
yield ChannelItem({'short': short, 'name': name, 'url': response.urljoin(url)})
url = 'http://live.bilibili.com/area/liveList?area={}&order=online'.format(short)
room_query_list.append({'url': url, 'channel': short, 'page': 1})
for room_query in room_query_list:
yield Request('{}&page=1'.format(room_query['url']), callback=self.parse_room_list, meta=room_query)
def parse_room_list(self, response):
room_list = json.loads(response.text)['data']
if isinstance(room_list, list):
for rjson in room_list:
if isinstance(rjson['online'], int):
yield RoomItem({
'office_id': str(rjson['roomid']),
'name': rjson['title'],
'image': rjson['cover'],
'url': response.urljoin(rjson['link']),
'online': rjson['online'],
'host': rjson['uname'],
'channel': response.meta['channel']
})
if len(room_list) > 0:
next_meta = dict(response.meta, page=response.meta['page'] + 1)
yield Request('{}&page={}'.format(next_meta['url'], str(next_meta['page'])),
callback=self.parse_room_list, meta=next_meta)
| # -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import ChannelItem, RoomItem
import json
class BilibiliSpider(Spider):
name = 'bilibili'
allowed_domains = ['bilibili.com']
start_urls = [
'http://live.bilibili.com/area/live'
]
custom_settings = {
'SITE': {
'code': 'bilibili',
'name': '哔哩哔哩',
'description': '哔哩哔哩-关注ACG直播互动平台',
'url': 'http://live.bilibili.com',
'image': 'http://static.hdslb.com/live-static/common/images/logo/logo-150-cyan.png',
'show_seq': 3
}
}
def parse(self, response):
panel_class = ['live-top-nav-panel', 'live-top-hover-panel']
panel_xpath = ['contains(@class, "{}")'.format(pclass) for pclass in panel_class]
room_query_list = []
for a_element in response.xpath('//div[{}]/a'.format(' and '.join(panel_xpath)))[1:-2]:
div_element = a_element.xpath('div[@class="nav-item"]')[0]
url = a_element.xpath('@href').extract_first()
if '/pages/area/' in url:
i_class = div_element.xpath('i/@class').extract_first().split(' ')
short = i_class[-1]
url = self.custom_settings['SITE']['url'] + '/' + short
else:
short = url[url.rfind('/') + 1:]
name = div_element.xpath('text()').extract_first()
yield ChannelItem({'short': short, 'name': name, 'url': response.urljoin(url)})
url = 'http://live.bilibili.com/area/liveList?area={}&order=online'.format(short)
room_query_list.append({'url': url, 'channel': short, 'page': 1})
for room_query in room_query_list:
yield Request('{}&page=1'.format(room_query['url']), callback=self.parse_room_list, meta=room_query)
def parse_room_list(self, response):
room_list = json.loads(response.text)['data']
if isinstance(room_list, list):
for rjson in room_list:
if isinstance(rjson['online'], int):
yield RoomItem({
'office_id': str(rjson['roomid']),
'name': rjson['title'],
'image': rjson['cover'],
'url': response.urljoin(rjson['link']),
'online': rjson['online'],
'host': rjson['uname'],
'channel': response.meta['channel']
})
if len(room_list) > 0:
next_meta = dict(response.meta, page=response.meta['page'] + 1)
yield Request('{}&page={}'.format(next_meta['url'], str(next_meta['page'])),
callback=self.parse_room_list, meta=next_meta)
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.297408 | 2 |
py_tdlib/constructors/temporary_password_state.py | Mr-TelegramBot/python-tdlib | 24 | 6613373 | <filename>py_tdlib/constructors/temporary_password_state.py
from ..factory import Type
class temporaryPasswordState(Type):
has_password = None # type: "Bool"
valid_for = None # type: "int32"
| <filename>py_tdlib/constructors/temporary_password_state.py
from ..factory import Type
class temporaryPasswordState(Type):
has_password = None # type: "Bool"
valid_for = None # type: "int32"
| en | 0.498859 | # type: "Bool" # type: "int32" | 2.243431 | 2 |
tests/unit_tests/fixtures/swift_object.py | Barometre-de-la-Science-Ouverte/bso3-process-publication | 0 | 6613374 | <filename>tests/unit_tests/fixtures/swift_object.py<gh_stars>0
import json
import swift
from config.path_config import CONFIG_PATH_OVH
config_harvester = json.load(open(CONFIG_PATH_OVH,'r'))
_swift = swift.Swift(config_harvester)
local_dir = [
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.json',
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.pdf',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.pdf'
]
files_to_upload = [
('./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.json', '9f/ea/8e/3a/9fea8e3a-344c-4552-874c-6852074bcdd1.json'),
('./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json', 'dc/d4/1f/3e/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json'),
] | <filename>tests/unit_tests/fixtures/swift_object.py<gh_stars>0
import json
import swift
from config.path_config import CONFIG_PATH_OVH
config_harvester = json.load(open(CONFIG_PATH_OVH,'r'))
_swift = swift.Swift(config_harvester)
local_dir = [
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.json',
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.pdf',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.pdf'
]
files_to_upload = [
('./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.json', '9f/ea/8e/3a/9fea8e3a-344c-4552-874c-6852074bcdd1.json'),
('./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json', 'dc/d4/1f/3e/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json'),
] | none | 1 | 1.842362 | 2 | |
clumper/clump.py | sidvecious/clumper | 0 | 6613375 | <reponame>sidvecious/clumper<gh_stars>0
class Clumper:
def __init__(self, blob):
self.blob = blob
def keep(self, *funcs):
data = self.blob
for func in funcs:
data = [d for d in data if func(d)]
return Clumper(data)
def head(self, n):
return Clumper([self.blob[i] for i in range(n)])
def tail(self, n):
return Clumper([self.blob[-i] for i in range(1, n+1)])
def select(self, *keys):
return Clumper([{k: d[k] for k in keys} for d in self.blob])
def mutate(self, **kwargs):
data = self.blob
for key, func in kwargs.items():
for i in range(len(data)):
data[i][key] = func(data[i])
return Clumper(data)
def sort(self, key, reverse=False):
return Clumper(sorted(self.blob, key=key, reverse=reverse))
def collect(self):
return self.blob | class Clumper:
def __init__(self, blob):
self.blob = blob
def keep(self, *funcs):
data = self.blob
for func in funcs:
data = [d for d in data if func(d)]
return Clumper(data)
def head(self, n):
return Clumper([self.blob[i] for i in range(n)])
def tail(self, n):
return Clumper([self.blob[-i] for i in range(1, n+1)])
def select(self, *keys):
return Clumper([{k: d[k] for k in keys} for d in self.blob])
def mutate(self, **kwargs):
data = self.blob
for key, func in kwargs.items():
for i in range(len(data)):
data[i][key] = func(data[i])
return Clumper(data)
def sort(self, key, reverse=False):
return Clumper(sorted(self.blob, key=key, reverse=reverse))
def collect(self):
return self.blob | none | 1 | 3.022284 | 3 | |
UTILS/RANSX/MasterPlot.py | mmicromegas/ransX | 4 | 6613376 | <reponame>mmicromegas/ransX<filename>UTILS/RANSX/MasterPlot.py
from EQUATIONS.ContinuityEquationWithMassFlux import ContinuityEquationWithMassFlux
from EQUATIONS.ContinuityEquationWithFavrianDilatation import ContinuityEquationWithFavrianDilatation
from EQUATIONS.MomentumEquationX import MomentumEquationX
from EQUATIONS.MomentumEquationY import MomentumEquationY
from EQUATIONS.MomentumEquationZ import MomentumEquationZ
from EQUATIONS.ReynoldsStressXXequation import ReynoldsStressXXequation
from EQUATIONS.ReynoldsStressYYequation import ReynoldsStressYYequation
from EQUATIONS.ReynoldsStressZZequation import ReynoldsStressZZequation
from EQUATIONS.TurbulentKineticEnergyEquation import TurbulentKineticEnergyEquation
from EQUATIONS.TurbulentKineticEnergyEquationRadial import TurbulentKineticEnergyEquationRadial
from EQUATIONS.TurbulentKineticEnergyEquationHorizontal import TurbulentKineticEnergyEquationHorizontal
from EQUATIONS.InternalEnergyEquation import InternalEnergyEquation
from EQUATIONS.InternalEnergyFluxEquation import InternalEnergyFluxEquation
from EQUATIONS.InternalEnergyVarianceEquation import InternalEnergyVarianceEquation
from EQUATIONS.KineticEnergyEquation import KineticEnergyEquation
from EQUATIONS.TotalEnergyEquation import TotalEnergyEquation
from EQUATIONS.EntropyEquation import EntropyEquation
from EQUATIONS.EntropyFluxEquation import EntropyFluxEquation
from EQUATIONS.EntropyVarianceEquation import EntropyVarianceEquation
from EQUATIONS.PressureEquation import PressureEquation
from EQUATIONS.PressureFluxXequation import PressureFluxXequation
from EQUATIONS.PressureFluxYequation import PressureFluxYequation
from EQUATIONS.PressureFluxZequation import PressureFluxZequation
from EQUATIONS.PressureVarianceEquation import PressureVarianceEquation
from EQUATIONS.TemperatureEquation import TemperatureEquation
from EQUATIONS.TemperatureFluxEquation import TemperatureFluxEquation
from EQUATIONS.TemperatureVarianceEquation import TemperatureVarianceEquation
from EQUATIONS.EnthalpyEquation import EnthalpyEquation
from EQUATIONS.EnthalpyFluxEquation import EnthalpyFluxEquation
from EQUATIONS.EnthalpyVarianceEquation import EnthalpyVarianceEquation
from EQUATIONS.DensityVarianceEquation import DensityVarianceEquation
from EQUATIONS.TurbulentMassFluxEquation import TurbulentMassFluxEquation
from EQUATIONS.DensitySpecificVolumeCovarianceEquation import DensitySpecificVolumeCovarianceEquation
from EQUATIONS.XtransportEquation import XtransportEquation
from EQUATIONS.XfluxXequation import XfluxXequation
from EQUATIONS.XfluxYequation import XfluxYequation
from EQUATIONS.XfluxZequation import XfluxZequation
from EQUATIONS.XvarianceEquation import XvarianceEquation
from EQUATIONS.Xdiffusivity import Xdiffusivity
from EQUATIONS.XdamkohlerNumber import XdamkohlerNumber
from EQUATIONS.AbarTransportEquation import AbarTransportEquation
from EQUATIONS.ZbarTransportEquation import ZbarTransportEquation
from EQUATIONS.AbarFluxTransportEquation import AbarFluxTransportEquation
from EQUATIONS.ZbarFluxTransportEquation import ZbarFluxTransportEquation
from EQUATIONS.TemperatureDensity import TemperatureDensity
from EQUATIONS.PressureInternalEnergy import PressureInternalEnergy
from EQUATIONS.NuclearEnergyProduction import NuclearEnergyProduction
from EQUATIONS.Gravity import Gravity
from EQUATIONS.TemperatureGradients import TemperatureGradients
from EQUATIONS.Degeneracy import Degeneracy
from EQUATIONS.VelocitiesMeanExp import VelocitiesMeanExp
from EQUATIONS.VelocitiesMLTturb import VelocitiesMLTturb
from EQUATIONS.RelativeRMSflct import RelativeRMSflct
from EQUATIONS.AbarZbar import AbarZbar
from EQUATIONS.BruntVaisalla import BruntVaisalla
from EQUATIONS.Buoyancy import Buoyancy
# import classes for hydrodynamic stellar structure equations
from EQUATIONS.HsseContinuityEquation import HsseContinuityEquation
from EQUATIONS.HsseMomentumEquationX import HsseMomentumEquationX
from EQUATIONS.HsseTemperatureEquation import HsseTemperatureEquation
from EQUATIONS.HsseLuminosityEquation import HsseLuminosityEquation
from EQUATIONS.HsseXtransportEquation import HsseXtransportEquation
# from class for full turbulence velocity field hypothesis
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisX import FullTurbulenceVelocityFieldHypothesisX
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisY import FullTurbulenceVelocityFieldHypothesisY
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisZ import FullTurbulenceVelocityFieldHypothesisZ
from EQUATIONS.UxfpdIdentity import UxfpdIdentity
from EQUATIONS.UyfpdIdentity import UyfpdIdentity
from EQUATIONS.UzfpdIdentity import UzfpdIdentity
from EQUATIONS.DivuDilatation import DivuDilatation
import matplotlib.pyplot as plt
class MasterPlot():
def __init__(self, params):
self.params = params
def execRho(self, bconv, tconv):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot density
ransCONT.plot_rho(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rho')['xbl'],
params.getForEqs('rho')['xbr'],
params.getForEqs('rho')['ybu'],
params.getForEqs('rho')['ybd'],
params.getForEqs('rho')['ilg'])
# ransCONT.plot_mm_vs_MM(params.getForProp('prop')['laxis'],
# params.getForEqs('rho')['xbl'],
# params.getForEqs('rho')['xbr'],
# params.getForEqs('rho')['ybu'],
# params.getForEqs('rho')['ybd'],
# params.getForEqs('rho')['ilg'])
def execContEq(self, bconv, tconv):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation
ransCONT.plot_continuity_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('conteq')['xbl'],
params.getForEqs('conteq')['xbr'],
params.getForEqs('conteq')['ybu'],
params.getForEqs('conteq')['ybd'],
params.getForEqs('conteq')['ilg'])
def execContEqBar(self):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation integral budget
ransCONT.plot_continuity_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar('conteqBar')['xbl'],
params.getForEqsBar('conteqBar')['xbr'],
params.getForEqsBar('conteqBar')['ybu'],
params.getForEqsBar('conteqBar')['ybd'])
def execContFddEq(self, bconv, tconv):
params = self.params
# instantiate
ransCONTfdd = ContinuityEquationWithMassFlux(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation
ransCONTfdd.plot_continuity_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('conteqfdd')['xbl'],
params.getForEqs('conteqfdd')['xbr'],
params.getForEqs('conteqfdd')['ybu'],
params.getForEqs('conteqfdd')['ybd'],
params.getForEqs('conteqfdd')['ilg'])
# ransCONTfdd.plot_Frho_space_time(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('conteqfdd')['xbl'],
# params.getForEqs('conteqfdd')['xbr'],
# params.getForEqs('conteqfdd')['ybu'],
# params.getForEqs('conteqfdd')['ybd'],
# params.getForEqs('conteqfdd')['ilg'])
def execContFddEqBar(self):
params = self.params
# instantiate
ransCONTfdd = ContinuityEquationWithMassFlux(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation integral budget
ransCONTfdd.plot_continuity_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar('conteqfddBar')['xbl'],
params.getForEqsBar('conteqfddBar')['xbr'],
params.getForEqsBar('conteqfddBar')['ybu'],
params.getForEqsBar('conteqfddBar')['ybd'])
def execHssContEq(self, bconv, tconv):
params = self.params
# instantiate
ranshssecont = HsseContinuityEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
# plot continuity equation
ranshssecont.plot_continuity_equation(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative
ranshssecont.plot_continuity_equation_2(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative simplified
ranshssecont.plot_continuity_equation_3(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative simplified - cracking on velocities
# ranshssecont.plot_velocities(params.getForProp('prop')['laxis'],\
# params.getForEqs('cteqhsse')['xbl'],\
# params.getForEqs('cteqhsse')['xbr'],\
# params.getForEqs('cteqhsse')['ybu'],\
# params.getForEqs('cteqhsse')['ybd'],\
# params.getForEqs('cteqhsse')['ilg'])
ranshssecont.plot_dilatation_flux(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# ranshssecont.plot_mass_flux_acceleration(params.getForProp('prop')['laxis'],\
# params.getForEqs('cteqhsse')['xbl'],\
# params.getForEqs('cteqhsse')['xbr'],\
# params.getForEqs('cteqhsse')['ybu'],\
# params.getForEqs('cteqhsse')['ybd'],\
# params.getForEqs('cteqhsse')['ilg'])
def execHssMomxEq(self, bconv, tconv):
params = self.params
# instantiate
ranshssemomx = HsseMomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
# plot hsse momentm equation
ranshssemomx.plot_momentum_equation_x(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
# plot hsse momentm equation alternative
ranshssemomx.plot_momentum_equation_x_2(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
# plot hsse momentm equation alternative simplified
ranshssemomx.plot_momentum_equation_x_3(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
def execHssTempEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ranshssetemp = HsseTemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss, bconv, tconv,
params.getForProp('prop')['prefix'])
# plot hsse temperature equation
ranshssetemp.plot_tt_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
# plot hsse temperature equation alternative
ranshssetemp.plot_tt_equation_2(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
# plot hsse temperature equation alternative simplified
ranshssetemp.plot_tt_equation_3(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
def execHssLumiEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ranshsselumi = HsseLuminosityEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss, bconv, tconv,
params.getForProp('prop')['prefix'])
# plot hsse luminosity equation
# ranshsselumi.plot_luminosity_equation(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation exact
ranshsselumi.plot_luminosity_equation_exact(params.getForProp('prop')['laxis'],
params.getForEqs('lueqhsse')['xbl'],
params.getForEqs('lueqhsse')['xbr'],
params.getForEqs('lueqhsse')['ybu'],
params.getForEqs('lueqhsse')['ybd'],
params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation exact 2
ranshsselumi.plot_luminosity_equation_exact2(params.getForProp('prop')['laxis'],
params.getForEqs('lueqhsse')['xbl'],
params.getForEqs('lueqhsse')['xbr'],
params.getForEqs('lueqhsse')['ybu'],
params.getForEqs('lueqhsse')['ybd'],
params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation alternative
# ranshsselumi.plot_luminosity_equation_2(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation alternative simplified
# ranshsselumi.plot_luminosity_equation_3(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
def execHssCompEq(self, inuc, element, x, bconv, tconv):
params = self.params
# instantiate
ranshssecomp = HsseXtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ranshssecomp.plot_Xtransport_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXrho(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXtra.plot_Xrho(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXtra.plot_X(params.getForProp('prop')['laxis'], \
# params.getForEqs(x)['xbl'], \
# params.getForEqs(x)['xbr'], \
# params.getForEqs(x)['ybu'], \
# params.getForEqs(x)['ybd'], \
# params.getForEqs(x)['ilg'])
# ransXtra.plot_gradX(params.getForProp('prop')['laxis'],\
# params.getForEqs(x)['xbl'],\
# params.getForEqs(x)['xbr'],\
# params.getForEqs(x)['ybu'],\
# params.getForEqs(x)['ybd'],\
# params.getForEqs(x)['ilg'])
def execX(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
if params.getForProp('prop')['plabel'] == "oburn":
ransXtra.plot_X_with_MM(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
else:
ransXtra.plot_X(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
#ransXtra.plot_X_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
#ransXtra.plot_rhoX_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXtra.plot_Xm_with_MM(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
def execXtrsEq(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXtra.plot_Xtransport_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXtrsEqBar(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot X transport equation integral budget
ransXtra.plot_Xtransport_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar(x)['xbl'],
params.getForEqsBar(x)['xbr'],
params.getForEqsBar(x)['ybu'],
params.getForEqsBar(x)['ybd'])
def execXflxx(self, inuc, element, x, bconv, tconv, tke_diss, tauL, cnvz_in_hp):
params = self.params
# instantiate
ransXflxx = XfluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, tke_diss, tauL, cnvz_in_hp,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# ransXflxx.plot_XfluxX(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
ransXflxx.plot_alphaX(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxxX(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxXRogers1989(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_Xflux_gradient(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxX2(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
def execXflxXeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL, cnvz_in_hp):
params = self.params
# instantiate
ransXflxx = XfluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, tke_diss, tauL, cnvz_in_hp,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXflxx.plot_XfluxX_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxX_equation2(params.getForProp('prop')['laxis'], \
# params.getForEqs(x)['xbl'], \
# params.getForEqs(x)['xbr'], \
# params.getForEqs(x)['ybu'], \
# params.getForEqs(x)['ybd'], \
# params.getForEqs(x)['ilg'])
def execXflxy(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxy = XfluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxy.plot_XfluxY(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxYeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxy = XfluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxy.plot_XfluxY_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxz(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxz = XfluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxz.plot_XfluxZ(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxZeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxz = XfluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxz.plot_XfluxZ_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXvar(self, inuc, element, x, bconv, tconv):
params = self.params
tauL = 1.
# instantiate
ransXvar = XvarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, tauL, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXvar.plot_Xvariance(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXvarEq(self, inuc, element, x, tauL, bconv, tconv):
params = self.params
# instantiate
ransXvar = XvarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, tauL, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXvar.plot_Xvariance_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execDiff(self, inuc, element, x, lc, uconv, bconv, tconv, tke_diss, tauL, super_ad_i, super_ad_o, cnvz_in_hp):
params = self.params
# instantiate
ransXdiff = Xdiffusivity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
inuc, element, lc, uconv, bconv, tconv, cnvz_in_hp,
tke_diss, tauL, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# ransXdiff.plot_X_Ediffusivity(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
ransXdiff.plot_X_Ediffusivity2(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXda(self, inuc, element, x, bconv, tconv):
params = self.params
# instantiate
ransXda = XdamkohlerNumber(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXda.plot_Xda(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execTke(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTke.plot_tke(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkie')['xbl'],
params.getForEqs('tkie')['xbr'],
params.getForEqs('tkie')['ybu'],
params.getForEqs('tkie')['ybd'],
params.getForEqs('tkie')['ilg'])
#ransTke.plot_TKE_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeeq')['xbl'],
# params.getForEqs('tkeeq')['xbr'],
# params.getForEqs('tkeeq')['ybu'],
# params.getForEqs('tkeeq')['ybd'],
# params.getForEqs('tkeeq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEq(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTke.plot_tke_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeeq')['xbl'],
params.getForEqs('tkeeq')['xbr'],
params.getForEqs('tkeeq')['ybu'],
params.getForEqs('tkeeq')['ybd'],
params.getForEqs('tkeeq')['ilg'])
def execTkeEqBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTke.plot_tke_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeeqBar')['xbl'],
params.getForEqs('tkeeqBar')['xbr'],
params.getForEqs('tkeeqBar')['ybu'],
params.getForEqs('tkeeqBar')['ybd'])
def execTkeRadial(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTkeR.plot_tkeRadial(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkieR')['xbl'],
params.getForEqs('tkieR')['xbr'],
params.getForEqs('tkieR')['ybu'],
params.getForEqs('tkieR')['ybd'],
params.getForEqs('tkieR')['ilg'])
#ransTkeR.plot_TKEradial_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeReq')['xbl'],
# params.getForEqs('tkeReq')['xbr'],
# params.getForEqs('tkeReq')['ybu'],
# params.getForEqs('tkeReq')['ybd'],
# params.getForEqs('tkeReq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEqRadial(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeR.plot_tkeRadial_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeReq')['xbl'],
params.getForEqs('tkeReq')['xbr'],
params.getForEqs('tkeReq')['ybu'],
params.getForEqs('tkeReq')['ybd'],
params.getForEqs('tkeReq')['ilg'])
def execTkeEqRadialBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeR.plot_tkeRadial_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeReqBar')['xbl'],
params.getForEqs('tkeReqBar')['xbr'],
params.getForEqs('tkeReqBar')['ybu'],
params.getForEqs('tkeReqBar')['ybd'])
def execTkeHorizontal(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTkeH.plot_tkeHorizontal(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkieH')['xbl'],
params.getForEqs('tkieH')['xbr'],
params.getForEqs('tkieH')['ybu'],
params.getForEqs('tkieH')['ybd'],
params.getForEqs('tkieH')['ilg'])
#ransTkeH.plot_TKEhorizontal_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeHeq')['xbl'],
# params.getForEqs('tkeHeq')['xbr'],
# params.getForEqs('tkeHeq')['ybu'],
# params.getForEqs('tkeHeq')['ybd'],
# params.getForEqs('tkeHeq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEqHorizontal(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeH.plot_tkeHorizontal_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeHeq')['xbl'],
params.getForEqs('tkeHeq')['xbr'],
params.getForEqs('tkeHeq')['ybu'],
params.getForEqs('tkeHeq')['ybd'],
params.getForEqs('tkeHeq')['ilg'])
def execTkeEqHorizontalBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeH.plot_tkeHorizontal_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeHeqBar')['xbl'],
params.getForEqs('tkeHeqBar')['xbr'],
params.getForEqs('tkeHeqBar')['ybu'],
params.getForEqs('tkeHeqBar')['ybd'])
def execMomx(self, bconv, tconv):
params = self.params
# instantiate
ransMomx = MomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransMomx.plot_momentum_x(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momex')['xbl'],
params.getForEqs('momex')['xbr'],
params.getForEqs('momex')['ybu'],
params.getForEqs('momex')['ybd'],
params.getForEqs('momex')['ilg'])
def execMomxEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomx = MomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransMomx.plot_momentum_equation_x(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momxeq')['xbl'],
params.getForEqs('momxeq')['xbr'],
params.getForEqs('momxeq')['ybu'],
params.getForEqs('momxeq')['ybd'],
params.getForEqs('momxeq')['ilg'])
def execMomy(self, bconv, tconv):
params = self.params
# instantiate
ransMomy = MomentumEquationY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomy.plot_momentum_y(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momey')['xbl'],
params.getForEqs('momey')['xbr'],
params.getForEqs('momey')['ybu'],
params.getForEqs('momey')['ybd'],
params.getForEqs('momey')['ilg'])
def execMomyEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomy = MomentumEquationY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomy.plot_momentum_equation_y(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momyeq')['xbl'],
params.getForEqs('momyeq')['xbr'],
params.getForEqs('momyeq')['ybu'],
params.getForEqs('momyeq')['ybd'],
params.getForEqs('momyeq')['ilg'])
def execMomz(self, bconv, tconv):
params = self.params
# instantiate
ransMomz = MomentumEquationZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomz.plot_momentum_z(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momez')['xbl'],
params.getForEqs('momez')['xbr'],
params.getForEqs('momez')['ybu'],
params.getForEqs('momez')['ybd'],
params.getForEqs('momez')['ilg'])
def execMomzEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomz = MomentumEquationZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomz.plot_momentum_equation_z(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momzeq')['xbl'],
params.getForEqs('momzeq')['xbr'],
params.getForEqs('momzeq')['ybu'],
params.getForEqs('momzeq')['ybd'],
params.getForEqs('momzeq')['ilg'])
def execEi(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransEi = InternalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEi.plot_ei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eint')['xbl'],
params.getForEqs('eint')['xbr'],
params.getForEqs('eint')['ybu'],
params.getForEqs('eint')['ybd'],
params.getForEqs('eint')['ilg'])
def execEiEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransEi = InternalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEi.plot_ei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eieq')['xbl'],
params.getForEqs('eieq')['xbr'],
params.getForEqs('eieq')['ybu'],
params.getForEqs('eieq')['ybd'],
params.getForEqs('eieq')['ilg'])
def execEiFlx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransEiFlx = InternalEnergyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEiFlx.plot_fei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eintflx')['xbl'],
params.getForEqs('eintflx')['xbr'],
params.getForEqs('eintflx')['ybu'],
params.getForEqs('eintflx')['ybd'],
params.getForEqs('eintflx')['ilg'])
def execEiFlxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransEiFlx = InternalEnergyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEiFlx.plot_fei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eiflxeq')['xbl'],
params.getForEqs('eiflxeq')['xbr'],
params.getForEqs('eiflxeq')['ybu'],
params.getForEqs('eiflxeq')['ybd'],
params.getForEqs('eiflxeq')['ilg'])
ransEiFlx.plot_fei_equation2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eiflxeq')['xbl'],
params.getForEqs('eiflxeq')['xbr'],
params.getForEqs('eiflxeq')['ybu'],
params.getForEqs('eiflxeq')['ybd'],
params.getForEqs('eiflxeq')['ilg'])
def execHHflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransHHflx = EnthalpyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHHflx.plot_fhh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enthflx')['xbl'],
params.getForEqs('enthflx')['xbr'],
params.getForEqs('enthflx')['ybu'],
params.getForEqs('enthflx')['ybd'],
params.getForEqs('enthflx')['ilg'])
def execHHflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransHHflx = EnthalpyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHHflx.plot_fhh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hhflxeq')['xbl'],
params.getForEqs('hhflxeq')['xbr'],
params.getForEqs('hhflxeq')['ybu'],
params.getForEqs('hhflxeq')['ybd'],
params.getForEqs('hhflxeq')['ilg'])
def execHHvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransHHvar = EnthalpyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransHHvar.plot_sigma_hh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enthvar')['xbl'],
params.getForEqs('enthvar')['xbr'],
params.getForEqs('enthvar')['ybu'],
params.getForEqs('enthvar')['ybd'],
params.getForEqs('enthvar')['ilg'])
def execHHvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransHHvar = EnthalpyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransHHvar.plot_sigma_hh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hhvareq')['xbl'],
params.getForEqs('hhvareq')['xbr'],
params.getForEqs('hhvareq')['ybu'],
params.getForEqs('hhvareq')['ybd'],
params.getForEqs('hhvareq')['ilg'])
def execEiVar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransEiVar = InternalEnergyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransEiVar.plot_sigma_ei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eintvar')['xbl'],
params.getForEqs('eintvar')['xbr'],
params.getForEqs('eintvar')['ybu'],
params.getForEqs('eintvar')['ybd'],
params.getForEqs('eintvar')['ilg'])
def execEiVarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransEiVar = InternalEnergyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransEiVar.plot_sigma_ei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eivareq')['xbl'],
params.getForEqs('eivareq')['xbr'],
params.getForEqs('eivareq')['ybu'],
params.getForEqs('eivareq')['ybd'],
params.getForEqs('eivareq')['ilg'])
def execSS(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransSS = EntropyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSS.plot_ss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entr')['xbl'],
params.getForEqs('entr')['xbr'],
params.getForEqs('entr')['ybu'],
params.getForEqs('entr')['ybd'],
params.getForEqs('entr')['ilg'])
def execSSeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransSS = EntropyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSS.plot_ss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('sseq')['xbl'],
params.getForEqs('sseq')['xbr'],
params.getForEqs('sseq')['ybu'],
params.getForEqs('sseq')['ybd'],
params.getForEqs('sseq')['ilg'])
def execSSflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransSSflx = EntropyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSSflx.plot_fss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entrflx')['xbl'],
params.getForEqs('entrflx')['xbr'],
params.getForEqs('entrflx')['ybu'],
params.getForEqs('entrflx')['ybd'],
params.getForEqs('entrflx')['ilg'])
def execSSflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransSSflx = EntropyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSSflx.plot_fss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssflxeq')['xbl'],
params.getForEqs('ssflxeq')['xbr'],
params.getForEqs('ssflxeq')['ybu'],
params.getForEqs('ssflxeq')['ybd'],
params.getForEqs('ssflxeq')['ilg'])
ransSSflx.plot_fss_equation2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssflxeq')['xbl'],
params.getForEqs('ssflxeq')['xbr'],
params.getForEqs('ssflxeq')['ybu'],
params.getForEqs('ssflxeq')['ybd'],
params.getForEqs('ssflxeq')['ilg'])
def execSSvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransSSvar = EntropyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_ss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entrvar')['xbl'],
params.getForEqs('entrvar')['xbr'],
params.getForEqs('entrvar')['ybu'],
params.getForEqs('entrvar')['ybd'],
params.getForEqs('entrvar')['ilg'])
def execSSvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransSSvar = EntropyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_ss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssvareq')['xbl'],
params.getForEqs('ssvareq')['xbr'],
params.getForEqs('ssvareq')['ybu'],
params.getForEqs('ssvareq')['ybd'],
params.getForEqs('ssvareq')['ilg'])
def execDDvar(self, bconv, tconv):
params = self.params
tauL = 1.
# instantiate
ransDDvar = DensityVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tauL,
params.getForProp('prop')['prefix'])
ransDDvar.plot_sigma_dd(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('densvar')['xbl'],
params.getForEqs('densvar')['xbr'],
params.getForEqs('densvar')['ybu'],
params.getForEqs('densvar')['ybd'],
params.getForEqs('densvar')['ilg'])
def execDDvarEq(self, tauL, bconv, tconv):
params = self.params
# instantiate
ransSSvar = DensityVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_dd_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ddvareq')['xbl'],
params.getForEqs('ddvareq')['xbr'],
params.getForEqs('ddvareq')['ybu'],
params.getForEqs('ddvareq')['ybd'],
params.getForEqs('ddvareq')['ilg'])
def execTMSflx(self, bconv, tconv, lc):
params = self.params
# instantiate
ransTMSflx = TurbulentMassFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
lc)
ransTMSflx.plot_a(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tmsflx')['xbl'],
params.getForEqs('tmsflx')['xbr'],
params.getForEqs('tmsflx')['ybu'],
params.getForEqs('tmsflx')['ybd'],
params.getForEqs('tmsflx')['ilg'])
def execAeq(self, bconv, tconv, lc):
params = self.params
# instantiate
ransTMSflx = TurbulentMassFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
lc)
ransTMSflx.plot_a_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('aeq')['xbl'],
params.getForEqs('aeq')['xbr'],
params.getForEqs('aeq')['ybu'],
params.getForEqs('aeq')['ybd'],
params.getForEqs('aeq')['ilg'])
def execDSVC(self, bconv, tconv):
params = self.params
# instantiate
ransDSVC = DensitySpecificVolumeCovarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDSVC.plot_b(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('dsvc')['xbl'],
params.getForEqs('dsvc')['xbr'],
params.getForEqs('dsvc')['ybu'],
params.getForEqs('dsvc')['ybd'],
params.getForEqs('dsvc')['ilg'])
def execBeq(self, bconv, tconv):
params = self.params
# instantiate
ransDSVC = DensitySpecificVolumeCovarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDSVC.plot_b_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('beq')['xbl'],
params.getForEqs('beq')['xbr'],
params.getForEqs('beq')['ybu'],
params.getForEqs('beq')['ybd'],
params.getForEqs('beq')['ilg'])
def execRhoTemp(self, bconv, tconv):
params = self.params
# instantiate
ransTempRho = TemperatureDensity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransTempRho.plot_ttdd(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttdd')['xbl'],
params.getForEqs('ttdd')['xbr'],
params.getForEqs('ttdd')['ybu'],
params.getForEqs('ttdd')['ybd'],
params.getForEqs('ttdd')['ilg'])
def execPressEi(self, bconv, tconv):
params = self.params
# instantiate
ransPressEi = PressureInternalEnergy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransPressEi.plot_ppei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppei')['xbl'],
params.getForEqs('ppei')['xbr'],
params.getForEqs('ppei')['ybu'],
params.getForEqs('ppei')['ybd'],
params.getForEqs('ppei')['ilg'])
def execEnuc(self, bconv, tconv):
params = self.params
# instantiate
ransEnuc = NuclearEnergyProduction(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# ransEnuc.plot_enuc(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('enuc')['xbl'],
# params.getForEqs('enuc')['xbr'],
# params.getForEqs('enuc')['ybu'],
# params.getForEqs('enuc')['ybd'],
# params.getForEqs('enuc')['ilg'])
ransEnuc.plot_enuc2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enuc')['xbl'],
params.getForEqs('enuc')['xbr'],
params.getForEqs('enuc')['ybu'],
params.getForEqs('enuc')['ybd'],
params.getForEqs('enuc')['ilg'])
# ransEnuc.plot_enuc_per_volume(params.getForProp('prop')['laxis'], \
# params.getForEqs('enuc')['xbl'], \
# params.getForEqs('enuc')['xbr'], \
# params.getForEqs('enuc')['ybu'], \
# params.getForEqs('enuc')['ybd'], \
# params.getForEqs('enuc')['ilg'])
def execGrav(self, bconv, tconv):
params = self.params
# instantiate
ransGrav = Gravity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransGrav.plot_grav(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('grav')['xbl'],
params.getForEqs('grav')['xbr'],
params.getForEqs('grav')['ybu'],
params.getForEqs('grav')['ybd'],
params.getForEqs('grav')['ilg'])
def execNablas(self, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransNablas = TemperatureGradients(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransNablas.plot_nablas(params.getForProp('prop')['laxis'],
bconv, tconv, super_ad_i, super_ad_o,
params.getForEqs('nablas')['xbl'],
params.getForEqs('nablas')['xbr'],
params.getForEqs('nablas')['ybu'],
params.getForEqs('nablas')['ybd'],
params.getForEqs('nablas')['ilg'])
#ransNablas.plot_nablas2(params.getForProp('prop')['laxis'],
# bconv, tconv, super_ad_i, super_ad_o,
# params.getForEqs('nablas')['xbl'],
# params.getForEqs('nablas')['xbr'],
# params.getForEqs('nablas')['ybu'],
# params.getForEqs('nablas')['ybd'],
# params.getForEqs('nablas')['ilg'])
def execDegeneracy(self):
params = self.params
# instantiate
ransDeg = Degeneracy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDeg.plot_degeneracy(params.getForProp('prop')['laxis'],
params.getForEqs('psi')['xbl'],
params.getForEqs('psi')['xbr'],
params.getForEqs('psi')['ybu'],
params.getForEqs('psi')['ybd'],
params.getForEqs('psi')['ilg'])
def execVelocitiesMeanExp(self, bconv, tconv):
params = self.params
# instantiate
ransVelmeanExp = VelocitiesMeanExp(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransVelmeanExp.plot_velocities(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('velbgr')['xbl'],
params.getForEqs('velbgr')['xbr'],
params.getForEqs('velbgr')['ybu'],
params.getForEqs('velbgr')['ybd'],
params.getForEqs('velbgr')['ilg'])
def execVelocitiesMLTturb(self, bconv, tconv, uconv, super_ad_i, super_ad_o, ):
params = self.params
# instantiate
ransVelMLTturb = VelocitiesMLTturb(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
bconv, tconv, uconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransVelMLTturb.plot_velocities(params.getForProp('prop')['laxis'],
params.getForEqs('velmlt')['xbl'],
params.getForEqs('velmlt')['xbr'],
params.getForEqs('velmlt')['ybu'],
params.getForEqs('velmlt')['ybd'],
params.getForEqs('velmlt')['ilg'])
def execBruntV(self, bconv, tconv):
params = self.params
# instantiate
ransBruntV = BruntVaisalla(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransBruntV.plot_bruntvaisalla(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('nsq')['xbl'],
params.getForEqs('nsq')['xbr'],
params.getForEqs('nsq')['ybu'],
params.getForEqs('nsq')['ybd'],
params.getForEqs('nsq')['ilg'])
# ransBruntV.plot_ri(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('nsq')['xbl'],
# params.getForEqs('nsq')['xbr'],
# params.getForEqs('nsq')['ybu'],
# params.getForEqs('nsq')['ybd'],
# params.getForEqs('nsq')['ilg'])
def execBuoyancy(self, bconv, tconv):
params = self.params
# instantiate
ransBuo = Buoyancy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransBuo.plot_buoyancy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('buo')['xbl'],
params.getForEqs('buo')['xbr'],
params.getForEqs('buo')['ybu'],
params.getForEqs('buo')['ybd'],
params.getForEqs('buo')['ilg'])
def execRelativeRmsFlct(self, bconv, tconv):
params = self.params
# instantiate
ransRms = RelativeRMSflct(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransRms.plot_relative_rms_flct(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('relrmsflct')['xbl'],
params.getForEqs('relrmsflct')['xbr'],
params.getForEqs('relrmsflct')['ybu'],
params.getForEqs('relrmsflct')['ybd'],
params.getForEqs('relrmsflct')['ilg'])
# ransRms.plot_relative_rms_flct2(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('relrmsflct')['xbl'],
# params.getForEqs('relrmsflct')['xbr'],
# params.getForEqs('relrmsflct')['ybu'],
# params.getForEqs('relrmsflct')['ybd'],
# params.getForEqs('relrmsflct')['ilg'])
def execAbarZbar(self, bconv, tconv):
params = self.params
# instantiate
ransAZ = AbarZbar(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransAZ.plot_abarzbar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abzb')['xbl'],
params.getForEqs('abzb')['xbr'],
params.getForEqs('abzb')['ybu'],
params.getForEqs('abzb')['ybd'],
params.getForEqs('abzb')['ilg'])
def execKe(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransKe = KineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot kinetic energy
ransKe.plot_ke(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('kine')['xbl'],
params.getForEqs('kine')['xbr'],
params.getForEqs('kine')['ybu'],
params.getForEqs('kine')['ybd'],
params.getForEqs('kine')['ilg'])
def execKeEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransKe = KineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot kinetic energy equation
ransKe.plot_ke_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('kieq')['xbl'],
params.getForEqs('kieq')['xbr'],
params.getForEqs('kieq')['ybu'],
params.getForEqs('kieq')['ybd'],
params.getForEqs('kieq')['ilg'])
def execTe(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransTe = TotalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot total energy
ransTe.plot_et(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('toe')['xbl'],
params.getForEqs('toe')['xbr'],
params.getForEqs('toe')['ybu'],
params.getForEqs('toe')['ybd'],
params.getForEqs('toe')['ilg'])
def execTeEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransTe = TotalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot total energy equation
ransTe.plot_et_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('teeq')['xbl'],
params.getForEqs('teeq')['xbr'],
params.getForEqs('teeq')['ybu'],
params.getForEqs('teeq')['ybd'],
params.getForEqs('teeq')['ilg'])
def execRxx(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRxx = ReynoldsStressXXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rxx
ransRxx.plot_rxx(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rxx')['xbl'],
params.getForEqs('rxx')['xbr'],
params.getForEqs('rxx')['ybu'],
params.getForEqs('rxx')['ybd'],
params.getForEqs('rxx')['ilg'])
def execRxxEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRxx = ReynoldsStressXXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rxx
ransRxx.plot_rxx_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rexxeq')['xbl'],
params.getForEqs('rexxeq')['xbr'],
params.getForEqs('rexxeq')['ybu'],
params.getForEqs('rexxeq')['ybd'],
params.getForEqs('rexxeq')['ilg'])
def execRyy(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRyy = ReynoldsStressYYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress ryy
ransRyy.plot_ryy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ryy')['xbl'],
params.getForEqs('ryy')['xbr'],
params.getForEqs('ryy')['ybu'],
params.getForEqs('ryy')['ybd'],
params.getForEqs('ryy')['ilg'])
def execRyyEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRyy = ReynoldsStressYYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress ryy
ransRyy.plot_ryy_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('reyyeq')['xbl'],
params.getForEqs('reyyeq')['xbr'],
params.getForEqs('reyyeq')['ybu'],
params.getForEqs('reyyeq')['ybd'],
params.getForEqs('reyyeq')['ilg'])
def execRzz(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRzz = ReynoldsStressZZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rzz
ransRzz.plot_rzz(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rzz')['xbl'],
params.getForEqs('rzz')['xbr'],
params.getForEqs('rzz')['ybu'],
params.getForEqs('rzz')['ybd'],
params.getForEqs('rzz')['ilg'])
def execRzzEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRzz = ReynoldsStressZZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rzz
ransRzz.plot_rzz_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rezzeq')['xbl'],
params.getForEqs('rezzeq')['xbr'],
params.getForEqs('rezzeq')['ybu'],
params.getForEqs('rezzeq')['ybd'],
params.getForEqs('rezzeq')['ilg'])
def execAbar(self, bconv, tconv):
params = self.params
# instantiate
ransAbar = AbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot abar
ransAbar.plot_abar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abar')['xbl'],
params.getForEqs('abar')['xbr'],
params.getForEqs('abar')['ybu'],
params.getForEqs('abar')['ybd'],
params.getForEqs('abar')['ilg'])
def execAbarEq(self, bconv, tconv):
params = self.params
# instantiate
ransAbar = AbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot abar equation
ransAbar.plot_abar_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abreq')['xbl'],
params.getForEqs('abreq')['xbr'],
params.getForEqs('abreq')['ybu'],
params.getForEqs('abreq')['ybd'],
params.getForEqs('abreq')['ilg'])
def execFabarx(self, bconv, tconv):
params = self.params
# instantiate
ransFabarx = AbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot fabarx
ransFabarx.plot_abarflux(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abflx')['xbl'],
params.getForEqs('abflx')['xbr'],
params.getForEqs('abflx')['ybu'],
params.getForEqs('abflx')['ybd'],
params.getForEqs('abflx')['ilg'])
def execFabarxEq(self, bconv, tconv):
params = self.params
# instantiate
ransFabarx = AbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot fabarx equation
ransFabarx.plot_abarflux_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('fabxeq')['xbl'],
params.getForEqs('fabxeq')['xbr'],
params.getForEqs('fabxeq')['ybu'],
params.getForEqs('fabxeq')['ybd'],
params.getForEqs('fabxeq')['ilg'])
def execZbar(self, bconv, tconv):
params = self.params
# instantiate
ransZbar = ZbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot zbar
ransZbar.plot_zbar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbar')['xbl'],
params.getForEqs('zbar')['xbr'],
params.getForEqs('zbar')['ybu'],
params.getForEqs('zbar')['ybd'],
params.getForEqs('zbar')['ilg'])
def execZbarEq(self, bconv, tconv):
params = self.params
# instantiate
ransZbar = ZbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot zbar equation
ransZbar.plot_zbar_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbreq')['xbl'],
params.getForEqs('zbreq')['xbr'],
params.getForEqs('zbreq')['ybu'],
params.getForEqs('zbreq')['ybd'],
params.getForEqs('zbreq')['ilg'])
def execFzbarx(self, bconv, tconv):
params = self.params
# instantiate
ransFzbarx = ZbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot fzbarx
ransFzbarx.plot_zbarflux(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbflx')['xbl'],
params.getForEqs('zbflx')['xbr'],
params.getForEqs('zbflx')['ybu'],
params.getForEqs('zbflx')['ybd'],
params.getForEqs('zbflx')['ilg'])
def execFzbarxEq(self, bconv, tconv):
params = self.params
# instantiate
ransFzbarx = ZbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot fzbarx equation
ransFzbarx.plot_zbarflux_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('fzbxeq')['xbl'],
params.getForEqs('fzbxeq')['xbr'],
params.getForEqs('fzbxeq')['ybu'],
params.getForEqs('fzbxeq')['ybd'],
params.getForEqs('fzbxeq')['ilg'])
def execPP(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPP = PressureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPP.plot_pp(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('press')['xbl'],
params.getForEqs('press')['xbr'],
params.getForEqs('press')['ybu'],
params.getForEqs('press')['ybd'],
params.getForEqs('press')['ilg'])
# ransPP.plot_dAdt(params.getForProp('prop')['laxis'], \
# params.getForEqs('press')['xbl'], \
# params.getForEqs('press')['xbr'], \
# params.getForEqs('press')['ybu'], \
# params.getForEqs('press')['ybd'], \
# params.getForEqs('press')['ilg'])
def execPPeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPP = PressureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPP.plot_pp_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppeq')['xbl'],
params.getForEqs('ppeq')['xbr'],
params.getForEqs('ppeq')['ybu'],
params.getForEqs('ppeq')['ybd'],
params.getForEqs('ppeq')['ilg'])
def execPPxflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPxflx = PressureFluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPxflx.plot_fppx(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressxflx')['xbl'],
params.getForEqs('pressxflx')['xbr'],
params.getForEqs('pressxflx')['ybu'],
params.getForEqs('pressxflx')['ybd'],
params.getForEqs('pressxflx')['ilg'])
def execPPxflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPxflx = PressureFluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPxflx.plot_fppx_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppxflxeq')['xbl'],
params.getForEqs('ppxflxeq')['xbr'],
params.getForEqs('ppxflxeq')['ybu'],
params.getForEqs('ppxflxeq')['ybd'],
params.getForEqs('ppxflxeq')['ilg'])
def execPPyflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPyflx = PressureFluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPyflx.plot_fppy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressyflx')['xbl'],
params.getForEqs('pressyflx')['xbr'],
params.getForEqs('pressyflx')['ybu'],
params.getForEqs('pressyflx')['ybd'],
params.getForEqs('pressyflx')['ilg'])
def execPPyflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPyflx = PressureFluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPyflx.plot_fppy_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppyflxeq')['xbl'],
params.getForEqs('ppyflxeq')['xbr'],
params.getForEqs('ppyflxeq')['ybu'],
params.getForEqs('ppyflxeq')['ybd'],
params.getForEqs('ppyflxeq')['ilg'])
def execPPzflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPzflx = PressureFluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPzflx.plot_fppz(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('presszflx')['xbl'],
params.getForEqs('presszflx')['xbr'],
params.getForEqs('presszflx')['ybu'],
params.getForEqs('presszflx')['ybd'],
params.getForEqs('presszflx')['ilg'])
def execPPzflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPzflx = PressureFluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPzflx.plot_fppz_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppzflxeq')['xbl'],
params.getForEqs('ppzflxeq')['xbr'],
params.getForEqs('ppzflxeq')['ybu'],
params.getForEqs('ppzflxeq')['ybd'],
params.getForEqs('ppzflxeq')['ilg'])
def execPPvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransPPvar = PressureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransPPvar.plot_sigma_pp(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressvar')['xbl'],
params.getForEqs('pressvar')['xbr'],
params.getForEqs('pressvar')['ybu'],
params.getForEqs('pressvar')['ybd'],
params.getForEqs('pressvar')['ilg'])
def execPPvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransPPvar = PressureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransPPvar.plot_sigma_pp_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppvareq')['xbl'],
params.getForEqs('ppvareq')['xbr'],
params.getForEqs('ppvareq')['ybu'],
params.getForEqs('ppvareq')['ybd'],
params.getForEqs('ppvareq')['ilg'])
def execTT(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransTT = TemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTT.plot_tt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('temp')['xbl'],
params.getForEqs('temp')['xbr'],
params.getForEqs('temp')['ybu'],
params.getForEqs('temp')['ybd'],
params.getForEqs('temp')['ilg'])
def execTTeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransTT = TemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTT.plot_tt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tteq')['xbl'],
params.getForEqs('tteq')['xbr'],
params.getForEqs('tteq')['ybu'],
params.getForEqs('tteq')['ybd'],
params.getForEqs('tteq')['ilg'])
def execTTvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransTTvar = TemperatureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransTTvar.plot_sigma_tt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tempvar')['xbl'],
params.getForEqs('tempvar')['xbr'],
params.getForEqs('tempvar')['ybu'],
params.getForEqs('tempvar')['ybd'],
params.getForEqs('tempvar')['ilg'])
def execTTvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransTTvar = TemperatureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransTTvar.plot_sigma_tt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttvareq')['xbl'],
params.getForEqs('ttvareq')['xbr'],
params.getForEqs('ttvareq')['ybu'],
params.getForEqs('ttvareq')['ybd'],
params.getForEqs('ttvareq')['ilg'])
def execTTflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransTTflx = TemperatureFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTTflx.plot_ftt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tempflx')['xbl'],
params.getForEqs('tempflx')['xbr'],
params.getForEqs('tempflx')['ybu'],
params.getForEqs('tempflx')['ybd'],
params.getForEqs('tempflx')['ilg'])
def execTTflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransTTflx = TemperatureFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTTflx.plot_ftt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttflxeq')['xbl'],
params.getForEqs('ttflxeq')['xbr'],
params.getForEqs('ttflxeq')['ybu'],
params.getForEqs('ttflxeq')['ybd'],
params.getForEqs('ttflxeq')['ilg'])
def execHH(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransHH = EnthalpyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHH.plot_hh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enth')['xbl'],
params.getForEqs('enth')['xbr'],
params.getForEqs('enth')['ybu'],
params.getForEqs('enth')['ybd'],
params.getForEqs('enth')['ilg'])
def execHHeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransHH = EnthalpyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHH.plot_hh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hheq')['xbl'],
params.getForEqs('hheq')['xbr'],
params.getForEqs('hheq')['ybu'],
params.getForEqs('hheq')['ybd'],
params.getForEqs('hheq')['ilg'])
def execFtvfhX(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhX = FullTurbulenceVelocityFieldHypothesisX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhX.plot_ftvfhX_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_x')['xbl'],
params.getForEqs('ftvfh_x')['xbr'],
params.getForEqs('ftvfh_x')['ybu'],
params.getForEqs('ftvfh_x')['ybd'],
params.getForEqs('ftvfh_x')['ilg'])
def execFtvfhY(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhY = FullTurbulenceVelocityFieldHypothesisY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhY.plot_ftvfhY_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_y')['xbl'],
params.getForEqs('ftvfh_y')['xbr'],
params.getForEqs('ftvfh_y')['ybu'],
params.getForEqs('ftvfh_y')['ybd'],
params.getForEqs('ftvfh_y')['ilg'])
def execFtvfhZ(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhZ = FullTurbulenceVelocityFieldHypothesisZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhZ.plot_ftvfhZ_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_z')['xbl'],
params.getForEqs('ftvfh_z')['xbr'],
params.getForEqs('ftvfh_z')['ybu'],
params.getForEqs('ftvfh_z')['ybd'],
params.getForEqs('ftvfh_z')['ilg'])
def execUxfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUxfpd = UxfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUxfpd.plot_uxfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uxfpd')['xbl'],
params.getForEqs('uxfpd')['xbr'],
params.getForEqs('uxfpd')['ybu'],
params.getForEqs('uxfpd')['ybd'],
params.getForEqs('uxfpd')['ilg'])
def execUyfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUyfpd = UyfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUyfpd.plot_uyfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uyfpd')['xbl'],
params.getForEqs('uyfpd')['xbr'],
params.getForEqs('uyfpd')['ybu'],
params.getForEqs('uyfpd')['ybd'],
params.getForEqs('uyfpd')['ilg'])
def execUzfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUzfpd = UzfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUzfpd.plot_uzfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uzfpd')['xbl'],
params.getForEqs('uzfpd')['xbr'],
params.getForEqs('uzfpd')['ybu'],
params.getForEqs('uzfpd')['ybd'],
params.getForEqs('uzfpd')['ilg'])
def execDivu(self, bconv, tconv):
params = self.params
# instantiate
ransDivu = DivuDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransDivu.plot_divu(params.getForProp('prop')['laxis'],
params.getForEqs('divu')['xbl'],
params.getForEqs('divu')['xbr'],
params.getForEqs('divu')['ybu'],
params.getForEqs('divu')['ybd'],
params.getForEqs('divu')['ilg'])
#ransDivu.plot_divu_space_time(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('conteqfdd')['xbl'],
# params.getForEqs('conteqfdd')['xbr'],
# params.getForEqs('conteqfdd')['ybu'],
# params.getForEqs('conteqfdd')['ybd'],
# params.getForEqs('conteqfdd')['ilg'])
def SetMatplotlibParams(self):
""" This routine sets some standard values for matplotlib """
""" to obtain publication-quality figures """
# plt.rc('text',usetex=True)
# plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('font', **{'family': 'serif', 'serif': ['Times New Roman']})
plt.rc('font', size=16.)
plt.rc('lines', linewidth=2, markeredgewidth=2., markersize=12)
plt.rc('axes', linewidth=1.5)
plt.rcParams['xtick.major.size'] = 8.
plt.rcParams['xtick.minor.size'] = 4.
plt.rcParams['figure.subplot.bottom'] = 0.15
plt.rcParams['figure.subplot.left'] = 0.17
plt.rcParams['figure.subplot.right'] = 0.85
plt.rcParams.update({'figure.max_open_warning': 0})
| from EQUATIONS.ContinuityEquationWithMassFlux import ContinuityEquationWithMassFlux
from EQUATIONS.ContinuityEquationWithFavrianDilatation import ContinuityEquationWithFavrianDilatation
from EQUATIONS.MomentumEquationX import MomentumEquationX
from EQUATIONS.MomentumEquationY import MomentumEquationY
from EQUATIONS.MomentumEquationZ import MomentumEquationZ
from EQUATIONS.ReynoldsStressXXequation import ReynoldsStressXXequation
from EQUATIONS.ReynoldsStressYYequation import ReynoldsStressYYequation
from EQUATIONS.ReynoldsStressZZequation import ReynoldsStressZZequation
from EQUATIONS.TurbulentKineticEnergyEquation import TurbulentKineticEnergyEquation
from EQUATIONS.TurbulentKineticEnergyEquationRadial import TurbulentKineticEnergyEquationRadial
from EQUATIONS.TurbulentKineticEnergyEquationHorizontal import TurbulentKineticEnergyEquationHorizontal
from EQUATIONS.InternalEnergyEquation import InternalEnergyEquation
from EQUATIONS.InternalEnergyFluxEquation import InternalEnergyFluxEquation
from EQUATIONS.InternalEnergyVarianceEquation import InternalEnergyVarianceEquation
from EQUATIONS.KineticEnergyEquation import KineticEnergyEquation
from EQUATIONS.TotalEnergyEquation import TotalEnergyEquation
from EQUATIONS.EntropyEquation import EntropyEquation
from EQUATIONS.EntropyFluxEquation import EntropyFluxEquation
from EQUATIONS.EntropyVarianceEquation import EntropyVarianceEquation
from EQUATIONS.PressureEquation import PressureEquation
from EQUATIONS.PressureFluxXequation import PressureFluxXequation
from EQUATIONS.PressureFluxYequation import PressureFluxYequation
from EQUATIONS.PressureFluxZequation import PressureFluxZequation
from EQUATIONS.PressureVarianceEquation import PressureVarianceEquation
from EQUATIONS.TemperatureEquation import TemperatureEquation
from EQUATIONS.TemperatureFluxEquation import TemperatureFluxEquation
from EQUATIONS.TemperatureVarianceEquation import TemperatureVarianceEquation
from EQUATIONS.EnthalpyEquation import EnthalpyEquation
from EQUATIONS.EnthalpyFluxEquation import EnthalpyFluxEquation
from EQUATIONS.EnthalpyVarianceEquation import EnthalpyVarianceEquation
from EQUATIONS.DensityVarianceEquation import DensityVarianceEquation
from EQUATIONS.TurbulentMassFluxEquation import TurbulentMassFluxEquation
from EQUATIONS.DensitySpecificVolumeCovarianceEquation import DensitySpecificVolumeCovarianceEquation
from EQUATIONS.XtransportEquation import XtransportEquation
from EQUATIONS.XfluxXequation import XfluxXequation
from EQUATIONS.XfluxYequation import XfluxYequation
from EQUATIONS.XfluxZequation import XfluxZequation
from EQUATIONS.XvarianceEquation import XvarianceEquation
from EQUATIONS.Xdiffusivity import Xdiffusivity
from EQUATIONS.XdamkohlerNumber import XdamkohlerNumber
from EQUATIONS.AbarTransportEquation import AbarTransportEquation
from EQUATIONS.ZbarTransportEquation import ZbarTransportEquation
from EQUATIONS.AbarFluxTransportEquation import AbarFluxTransportEquation
from EQUATIONS.ZbarFluxTransportEquation import ZbarFluxTransportEquation
from EQUATIONS.TemperatureDensity import TemperatureDensity
from EQUATIONS.PressureInternalEnergy import PressureInternalEnergy
from EQUATIONS.NuclearEnergyProduction import NuclearEnergyProduction
from EQUATIONS.Gravity import Gravity
from EQUATIONS.TemperatureGradients import TemperatureGradients
from EQUATIONS.Degeneracy import Degeneracy
from EQUATIONS.VelocitiesMeanExp import VelocitiesMeanExp
from EQUATIONS.VelocitiesMLTturb import VelocitiesMLTturb
from EQUATIONS.RelativeRMSflct import RelativeRMSflct
from EQUATIONS.AbarZbar import AbarZbar
from EQUATIONS.BruntVaisalla import BruntVaisalla
from EQUATIONS.Buoyancy import Buoyancy
# import classes for hydrodynamic stellar structure equations
from EQUATIONS.HsseContinuityEquation import HsseContinuityEquation
from EQUATIONS.HsseMomentumEquationX import HsseMomentumEquationX
from EQUATIONS.HsseTemperatureEquation import HsseTemperatureEquation
from EQUATIONS.HsseLuminosityEquation import HsseLuminosityEquation
from EQUATIONS.HsseXtransportEquation import HsseXtransportEquation
# from class for full turbulence velocity field hypothesis
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisX import FullTurbulenceVelocityFieldHypothesisX
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisY import FullTurbulenceVelocityFieldHypothesisY
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisZ import FullTurbulenceVelocityFieldHypothesisZ
from EQUATIONS.UxfpdIdentity import UxfpdIdentity
from EQUATIONS.UyfpdIdentity import UyfpdIdentity
from EQUATIONS.UzfpdIdentity import UzfpdIdentity
from EQUATIONS.DivuDilatation import DivuDilatation
import matplotlib.pyplot as plt
class MasterPlot():
def __init__(self, params):
self.params = params
def execRho(self, bconv, tconv):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot density
ransCONT.plot_rho(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rho')['xbl'],
params.getForEqs('rho')['xbr'],
params.getForEqs('rho')['ybu'],
params.getForEqs('rho')['ybd'],
params.getForEqs('rho')['ilg'])
# ransCONT.plot_mm_vs_MM(params.getForProp('prop')['laxis'],
# params.getForEqs('rho')['xbl'],
# params.getForEqs('rho')['xbr'],
# params.getForEqs('rho')['ybu'],
# params.getForEqs('rho')['ybd'],
# params.getForEqs('rho')['ilg'])
def execContEq(self, bconv, tconv):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation
ransCONT.plot_continuity_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('conteq')['xbl'],
params.getForEqs('conteq')['xbr'],
params.getForEqs('conteq')['ybu'],
params.getForEqs('conteq')['ybd'],
params.getForEqs('conteq')['ilg'])
def execContEqBar(self):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation integral budget
ransCONT.plot_continuity_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar('conteqBar')['xbl'],
params.getForEqsBar('conteqBar')['xbr'],
params.getForEqsBar('conteqBar')['ybu'],
params.getForEqsBar('conteqBar')['ybd'])
def execContFddEq(self, bconv, tconv):
params = self.params
# instantiate
ransCONTfdd = ContinuityEquationWithMassFlux(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation
ransCONTfdd.plot_continuity_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('conteqfdd')['xbl'],
params.getForEqs('conteqfdd')['xbr'],
params.getForEqs('conteqfdd')['ybu'],
params.getForEqs('conteqfdd')['ybd'],
params.getForEqs('conteqfdd')['ilg'])
# ransCONTfdd.plot_Frho_space_time(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('conteqfdd')['xbl'],
# params.getForEqs('conteqfdd')['xbr'],
# params.getForEqs('conteqfdd')['ybu'],
# params.getForEqs('conteqfdd')['ybd'],
# params.getForEqs('conteqfdd')['ilg'])
def execContFddEqBar(self):
params = self.params
# instantiate
ransCONTfdd = ContinuityEquationWithMassFlux(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation integral budget
ransCONTfdd.plot_continuity_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar('conteqfddBar')['xbl'],
params.getForEqsBar('conteqfddBar')['xbr'],
params.getForEqsBar('conteqfddBar')['ybu'],
params.getForEqsBar('conteqfddBar')['ybd'])
def execHssContEq(self, bconv, tconv):
params = self.params
# instantiate
ranshssecont = HsseContinuityEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
# plot continuity equation
ranshssecont.plot_continuity_equation(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative
ranshssecont.plot_continuity_equation_2(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative simplified
ranshssecont.plot_continuity_equation_3(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative simplified - cracking on velocities
# ranshssecont.plot_velocities(params.getForProp('prop')['laxis'],\
# params.getForEqs('cteqhsse')['xbl'],\
# params.getForEqs('cteqhsse')['xbr'],\
# params.getForEqs('cteqhsse')['ybu'],\
# params.getForEqs('cteqhsse')['ybd'],\
# params.getForEqs('cteqhsse')['ilg'])
ranshssecont.plot_dilatation_flux(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# ranshssecont.plot_mass_flux_acceleration(params.getForProp('prop')['laxis'],\
# params.getForEqs('cteqhsse')['xbl'],\
# params.getForEqs('cteqhsse')['xbr'],\
# params.getForEqs('cteqhsse')['ybu'],\
# params.getForEqs('cteqhsse')['ybd'],\
# params.getForEqs('cteqhsse')['ilg'])
def execHssMomxEq(self, bconv, tconv):
params = self.params
# instantiate
ranshssemomx = HsseMomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
# plot hsse momentm equation
ranshssemomx.plot_momentum_equation_x(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
# plot hsse momentm equation alternative
ranshssemomx.plot_momentum_equation_x_2(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
# plot hsse momentm equation alternative simplified
ranshssemomx.plot_momentum_equation_x_3(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
def execHssTempEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ranshssetemp = HsseTemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss, bconv, tconv,
params.getForProp('prop')['prefix'])
# plot hsse temperature equation
ranshssetemp.plot_tt_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
# plot hsse temperature equation alternative
ranshssetemp.plot_tt_equation_2(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
# plot hsse temperature equation alternative simplified
ranshssetemp.plot_tt_equation_3(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
def execHssLumiEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ranshsselumi = HsseLuminosityEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss, bconv, tconv,
params.getForProp('prop')['prefix'])
# plot hsse luminosity equation
# ranshsselumi.plot_luminosity_equation(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation exact
ranshsselumi.plot_luminosity_equation_exact(params.getForProp('prop')['laxis'],
params.getForEqs('lueqhsse')['xbl'],
params.getForEqs('lueqhsse')['xbr'],
params.getForEqs('lueqhsse')['ybu'],
params.getForEqs('lueqhsse')['ybd'],
params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation exact 2
ranshsselumi.plot_luminosity_equation_exact2(params.getForProp('prop')['laxis'],
params.getForEqs('lueqhsse')['xbl'],
params.getForEqs('lueqhsse')['xbr'],
params.getForEqs('lueqhsse')['ybu'],
params.getForEqs('lueqhsse')['ybd'],
params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation alternative
# ranshsselumi.plot_luminosity_equation_2(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation alternative simplified
# ranshsselumi.plot_luminosity_equation_3(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
def execHssCompEq(self, inuc, element, x, bconv, tconv):
params = self.params
# instantiate
ranshssecomp = HsseXtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ranshssecomp.plot_Xtransport_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXrho(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXtra.plot_Xrho(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXtra.plot_X(params.getForProp('prop')['laxis'], \
# params.getForEqs(x)['xbl'], \
# params.getForEqs(x)['xbr'], \
# params.getForEqs(x)['ybu'], \
# params.getForEqs(x)['ybd'], \
# params.getForEqs(x)['ilg'])
# ransXtra.plot_gradX(params.getForProp('prop')['laxis'],\
# params.getForEqs(x)['xbl'],\
# params.getForEqs(x)['xbr'],\
# params.getForEqs(x)['ybu'],\
# params.getForEqs(x)['ybd'],\
# params.getForEqs(x)['ilg'])
def execX(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
if params.getForProp('prop')['plabel'] == "oburn":
ransXtra.plot_X_with_MM(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
else:
ransXtra.plot_X(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
#ransXtra.plot_X_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
#ransXtra.plot_rhoX_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXtra.plot_Xm_with_MM(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
def execXtrsEq(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXtra.plot_Xtransport_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXtrsEqBar(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot X transport equation integral budget
ransXtra.plot_Xtransport_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar(x)['xbl'],
params.getForEqsBar(x)['xbr'],
params.getForEqsBar(x)['ybu'],
params.getForEqsBar(x)['ybd'])
def execXflxx(self, inuc, element, x, bconv, tconv, tke_diss, tauL, cnvz_in_hp):
params = self.params
# instantiate
ransXflxx = XfluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, tke_diss, tauL, cnvz_in_hp,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# ransXflxx.plot_XfluxX(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
ransXflxx.plot_alphaX(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxxX(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxXRogers1989(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_Xflux_gradient(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxX2(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
def execXflxXeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL, cnvz_in_hp):
params = self.params
# instantiate
ransXflxx = XfluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, tke_diss, tauL, cnvz_in_hp,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXflxx.plot_XfluxX_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxX_equation2(params.getForProp('prop')['laxis'], \
# params.getForEqs(x)['xbl'], \
# params.getForEqs(x)['xbr'], \
# params.getForEqs(x)['ybu'], \
# params.getForEqs(x)['ybd'], \
# params.getForEqs(x)['ilg'])
def execXflxy(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxy = XfluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxy.plot_XfluxY(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxYeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxy = XfluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxy.plot_XfluxY_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxz(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxz = XfluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxz.plot_XfluxZ(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxZeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxz = XfluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxz.plot_XfluxZ_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXvar(self, inuc, element, x, bconv, tconv):
params = self.params
tauL = 1.
# instantiate
ransXvar = XvarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, tauL, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXvar.plot_Xvariance(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXvarEq(self, inuc, element, x, tauL, bconv, tconv):
params = self.params
# instantiate
ransXvar = XvarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, tauL, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXvar.plot_Xvariance_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execDiff(self, inuc, element, x, lc, uconv, bconv, tconv, tke_diss, tauL, super_ad_i, super_ad_o, cnvz_in_hp):
params = self.params
# instantiate
ransXdiff = Xdiffusivity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
inuc, element, lc, uconv, bconv, tconv, cnvz_in_hp,
tke_diss, tauL, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# ransXdiff.plot_X_Ediffusivity(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
ransXdiff.plot_X_Ediffusivity2(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXda(self, inuc, element, x, bconv, tconv):
params = self.params
# instantiate
ransXda = XdamkohlerNumber(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXda.plot_Xda(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execTke(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTke.plot_tke(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkie')['xbl'],
params.getForEqs('tkie')['xbr'],
params.getForEqs('tkie')['ybu'],
params.getForEqs('tkie')['ybd'],
params.getForEqs('tkie')['ilg'])
#ransTke.plot_TKE_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeeq')['xbl'],
# params.getForEqs('tkeeq')['xbr'],
# params.getForEqs('tkeeq')['ybu'],
# params.getForEqs('tkeeq')['ybd'],
# params.getForEqs('tkeeq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEq(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTke.plot_tke_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeeq')['xbl'],
params.getForEqs('tkeeq')['xbr'],
params.getForEqs('tkeeq')['ybu'],
params.getForEqs('tkeeq')['ybd'],
params.getForEqs('tkeeq')['ilg'])
def execTkeEqBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTke.plot_tke_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeeqBar')['xbl'],
params.getForEqs('tkeeqBar')['xbr'],
params.getForEqs('tkeeqBar')['ybu'],
params.getForEqs('tkeeqBar')['ybd'])
def execTkeRadial(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTkeR.plot_tkeRadial(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkieR')['xbl'],
params.getForEqs('tkieR')['xbr'],
params.getForEqs('tkieR')['ybu'],
params.getForEqs('tkieR')['ybd'],
params.getForEqs('tkieR')['ilg'])
#ransTkeR.plot_TKEradial_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeReq')['xbl'],
# params.getForEqs('tkeReq')['xbr'],
# params.getForEqs('tkeReq')['ybu'],
# params.getForEqs('tkeReq')['ybd'],
# params.getForEqs('tkeReq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEqRadial(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeR.plot_tkeRadial_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeReq')['xbl'],
params.getForEqs('tkeReq')['xbr'],
params.getForEqs('tkeReq')['ybu'],
params.getForEqs('tkeReq')['ybd'],
params.getForEqs('tkeReq')['ilg'])
def execTkeEqRadialBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeR.plot_tkeRadial_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeReqBar')['xbl'],
params.getForEqs('tkeReqBar')['xbr'],
params.getForEqs('tkeReqBar')['ybu'],
params.getForEqs('tkeReqBar')['ybd'])
def execTkeHorizontal(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTkeH.plot_tkeHorizontal(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkieH')['xbl'],
params.getForEqs('tkieH')['xbr'],
params.getForEqs('tkieH')['ybu'],
params.getForEqs('tkieH')['ybd'],
params.getForEqs('tkieH')['ilg'])
#ransTkeH.plot_TKEhorizontal_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeHeq')['xbl'],
# params.getForEqs('tkeHeq')['xbr'],
# params.getForEqs('tkeHeq')['ybu'],
# params.getForEqs('tkeHeq')['ybd'],
# params.getForEqs('tkeHeq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEqHorizontal(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeH.plot_tkeHorizontal_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeHeq')['xbl'],
params.getForEqs('tkeHeq')['xbr'],
params.getForEqs('tkeHeq')['ybu'],
params.getForEqs('tkeHeq')['ybd'],
params.getForEqs('tkeHeq')['ilg'])
def execTkeEqHorizontalBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeH.plot_tkeHorizontal_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeHeqBar')['xbl'],
params.getForEqs('tkeHeqBar')['xbr'],
params.getForEqs('tkeHeqBar')['ybu'],
params.getForEqs('tkeHeqBar')['ybd'])
def execMomx(self, bconv, tconv):
params = self.params
# instantiate
ransMomx = MomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransMomx.plot_momentum_x(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momex')['xbl'],
params.getForEqs('momex')['xbr'],
params.getForEqs('momex')['ybu'],
params.getForEqs('momex')['ybd'],
params.getForEqs('momex')['ilg'])
def execMomxEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomx = MomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransMomx.plot_momentum_equation_x(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momxeq')['xbl'],
params.getForEqs('momxeq')['xbr'],
params.getForEqs('momxeq')['ybu'],
params.getForEqs('momxeq')['ybd'],
params.getForEqs('momxeq')['ilg'])
def execMomy(self, bconv, tconv):
params = self.params
# instantiate
ransMomy = MomentumEquationY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomy.plot_momentum_y(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momey')['xbl'],
params.getForEqs('momey')['xbr'],
params.getForEqs('momey')['ybu'],
params.getForEqs('momey')['ybd'],
params.getForEqs('momey')['ilg'])
def execMomyEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomy = MomentumEquationY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomy.plot_momentum_equation_y(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momyeq')['xbl'],
params.getForEqs('momyeq')['xbr'],
params.getForEqs('momyeq')['ybu'],
params.getForEqs('momyeq')['ybd'],
params.getForEqs('momyeq')['ilg'])
def execMomz(self, bconv, tconv):
params = self.params
# instantiate
ransMomz = MomentumEquationZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomz.plot_momentum_z(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momez')['xbl'],
params.getForEqs('momez')['xbr'],
params.getForEqs('momez')['ybu'],
params.getForEqs('momez')['ybd'],
params.getForEqs('momez')['ilg'])
def execMomzEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomz = MomentumEquationZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomz.plot_momentum_equation_z(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momzeq')['xbl'],
params.getForEqs('momzeq')['xbr'],
params.getForEqs('momzeq')['ybu'],
params.getForEqs('momzeq')['ybd'],
params.getForEqs('momzeq')['ilg'])
def execEi(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransEi = InternalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEi.plot_ei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eint')['xbl'],
params.getForEqs('eint')['xbr'],
params.getForEqs('eint')['ybu'],
params.getForEqs('eint')['ybd'],
params.getForEqs('eint')['ilg'])
def execEiEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransEi = InternalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEi.plot_ei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eieq')['xbl'],
params.getForEqs('eieq')['xbr'],
params.getForEqs('eieq')['ybu'],
params.getForEqs('eieq')['ybd'],
params.getForEqs('eieq')['ilg'])
def execEiFlx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransEiFlx = InternalEnergyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEiFlx.plot_fei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eintflx')['xbl'],
params.getForEqs('eintflx')['xbr'],
params.getForEqs('eintflx')['ybu'],
params.getForEqs('eintflx')['ybd'],
params.getForEqs('eintflx')['ilg'])
def execEiFlxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransEiFlx = InternalEnergyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEiFlx.plot_fei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eiflxeq')['xbl'],
params.getForEqs('eiflxeq')['xbr'],
params.getForEqs('eiflxeq')['ybu'],
params.getForEqs('eiflxeq')['ybd'],
params.getForEqs('eiflxeq')['ilg'])
ransEiFlx.plot_fei_equation2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eiflxeq')['xbl'],
params.getForEqs('eiflxeq')['xbr'],
params.getForEqs('eiflxeq')['ybu'],
params.getForEqs('eiflxeq')['ybd'],
params.getForEqs('eiflxeq')['ilg'])
def execHHflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransHHflx = EnthalpyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHHflx.plot_fhh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enthflx')['xbl'],
params.getForEqs('enthflx')['xbr'],
params.getForEqs('enthflx')['ybu'],
params.getForEqs('enthflx')['ybd'],
params.getForEqs('enthflx')['ilg'])
def execHHflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransHHflx = EnthalpyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHHflx.plot_fhh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hhflxeq')['xbl'],
params.getForEqs('hhflxeq')['xbr'],
params.getForEqs('hhflxeq')['ybu'],
params.getForEqs('hhflxeq')['ybd'],
params.getForEqs('hhflxeq')['ilg'])
def execHHvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransHHvar = EnthalpyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransHHvar.plot_sigma_hh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enthvar')['xbl'],
params.getForEqs('enthvar')['xbr'],
params.getForEqs('enthvar')['ybu'],
params.getForEqs('enthvar')['ybd'],
params.getForEqs('enthvar')['ilg'])
def execHHvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransHHvar = EnthalpyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransHHvar.plot_sigma_hh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hhvareq')['xbl'],
params.getForEqs('hhvareq')['xbr'],
params.getForEqs('hhvareq')['ybu'],
params.getForEqs('hhvareq')['ybd'],
params.getForEqs('hhvareq')['ilg'])
def execEiVar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransEiVar = InternalEnergyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransEiVar.plot_sigma_ei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eintvar')['xbl'],
params.getForEqs('eintvar')['xbr'],
params.getForEqs('eintvar')['ybu'],
params.getForEqs('eintvar')['ybd'],
params.getForEqs('eintvar')['ilg'])
def execEiVarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransEiVar = InternalEnergyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransEiVar.plot_sigma_ei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eivareq')['xbl'],
params.getForEqs('eivareq')['xbr'],
params.getForEqs('eivareq')['ybu'],
params.getForEqs('eivareq')['ybd'],
params.getForEqs('eivareq')['ilg'])
def execSS(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransSS = EntropyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSS.plot_ss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entr')['xbl'],
params.getForEqs('entr')['xbr'],
params.getForEqs('entr')['ybu'],
params.getForEqs('entr')['ybd'],
params.getForEqs('entr')['ilg'])
def execSSeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransSS = EntropyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSS.plot_ss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('sseq')['xbl'],
params.getForEqs('sseq')['xbr'],
params.getForEqs('sseq')['ybu'],
params.getForEqs('sseq')['ybd'],
params.getForEqs('sseq')['ilg'])
def execSSflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransSSflx = EntropyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSSflx.plot_fss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entrflx')['xbl'],
params.getForEqs('entrflx')['xbr'],
params.getForEqs('entrflx')['ybu'],
params.getForEqs('entrflx')['ybd'],
params.getForEqs('entrflx')['ilg'])
def execSSflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransSSflx = EntropyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSSflx.plot_fss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssflxeq')['xbl'],
params.getForEqs('ssflxeq')['xbr'],
params.getForEqs('ssflxeq')['ybu'],
params.getForEqs('ssflxeq')['ybd'],
params.getForEqs('ssflxeq')['ilg'])
ransSSflx.plot_fss_equation2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssflxeq')['xbl'],
params.getForEqs('ssflxeq')['xbr'],
params.getForEqs('ssflxeq')['ybu'],
params.getForEqs('ssflxeq')['ybd'],
params.getForEqs('ssflxeq')['ilg'])
def execSSvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransSSvar = EntropyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_ss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entrvar')['xbl'],
params.getForEqs('entrvar')['xbr'],
params.getForEqs('entrvar')['ybu'],
params.getForEqs('entrvar')['ybd'],
params.getForEqs('entrvar')['ilg'])
def execSSvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransSSvar = EntropyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_ss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssvareq')['xbl'],
params.getForEqs('ssvareq')['xbr'],
params.getForEqs('ssvareq')['ybu'],
params.getForEqs('ssvareq')['ybd'],
params.getForEqs('ssvareq')['ilg'])
def execDDvar(self, bconv, tconv):
params = self.params
tauL = 1.
# instantiate
ransDDvar = DensityVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tauL,
params.getForProp('prop')['prefix'])
ransDDvar.plot_sigma_dd(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('densvar')['xbl'],
params.getForEqs('densvar')['xbr'],
params.getForEqs('densvar')['ybu'],
params.getForEqs('densvar')['ybd'],
params.getForEqs('densvar')['ilg'])
def execDDvarEq(self, tauL, bconv, tconv):
params = self.params
# instantiate
ransSSvar = DensityVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_dd_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ddvareq')['xbl'],
params.getForEqs('ddvareq')['xbr'],
params.getForEqs('ddvareq')['ybu'],
params.getForEqs('ddvareq')['ybd'],
params.getForEqs('ddvareq')['ilg'])
def execTMSflx(self, bconv, tconv, lc):
params = self.params
# instantiate
ransTMSflx = TurbulentMassFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
lc)
ransTMSflx.plot_a(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tmsflx')['xbl'],
params.getForEqs('tmsflx')['xbr'],
params.getForEqs('tmsflx')['ybu'],
params.getForEqs('tmsflx')['ybd'],
params.getForEqs('tmsflx')['ilg'])
def execAeq(self, bconv, tconv, lc):
params = self.params
# instantiate
ransTMSflx = TurbulentMassFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
lc)
ransTMSflx.plot_a_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('aeq')['xbl'],
params.getForEqs('aeq')['xbr'],
params.getForEqs('aeq')['ybu'],
params.getForEqs('aeq')['ybd'],
params.getForEqs('aeq')['ilg'])
def execDSVC(self, bconv, tconv):
params = self.params
# instantiate
ransDSVC = DensitySpecificVolumeCovarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDSVC.plot_b(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('dsvc')['xbl'],
params.getForEqs('dsvc')['xbr'],
params.getForEqs('dsvc')['ybu'],
params.getForEqs('dsvc')['ybd'],
params.getForEqs('dsvc')['ilg'])
def execBeq(self, bconv, tconv):
params = self.params
# instantiate
ransDSVC = DensitySpecificVolumeCovarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDSVC.plot_b_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('beq')['xbl'],
params.getForEqs('beq')['xbr'],
params.getForEqs('beq')['ybu'],
params.getForEqs('beq')['ybd'],
params.getForEqs('beq')['ilg'])
def execRhoTemp(self, bconv, tconv):
params = self.params
# instantiate
ransTempRho = TemperatureDensity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransTempRho.plot_ttdd(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttdd')['xbl'],
params.getForEqs('ttdd')['xbr'],
params.getForEqs('ttdd')['ybu'],
params.getForEqs('ttdd')['ybd'],
params.getForEqs('ttdd')['ilg'])
def execPressEi(self, bconv, tconv):
params = self.params
# instantiate
ransPressEi = PressureInternalEnergy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransPressEi.plot_ppei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppei')['xbl'],
params.getForEqs('ppei')['xbr'],
params.getForEqs('ppei')['ybu'],
params.getForEqs('ppei')['ybd'],
params.getForEqs('ppei')['ilg'])
def execEnuc(self, bconv, tconv):
params = self.params
# instantiate
ransEnuc = NuclearEnergyProduction(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# ransEnuc.plot_enuc(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('enuc')['xbl'],
# params.getForEqs('enuc')['xbr'],
# params.getForEqs('enuc')['ybu'],
# params.getForEqs('enuc')['ybd'],
# params.getForEqs('enuc')['ilg'])
ransEnuc.plot_enuc2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enuc')['xbl'],
params.getForEqs('enuc')['xbr'],
params.getForEqs('enuc')['ybu'],
params.getForEqs('enuc')['ybd'],
params.getForEqs('enuc')['ilg'])
# ransEnuc.plot_enuc_per_volume(params.getForProp('prop')['laxis'], \
# params.getForEqs('enuc')['xbl'], \
# params.getForEqs('enuc')['xbr'], \
# params.getForEqs('enuc')['ybu'], \
# params.getForEqs('enuc')['ybd'], \
# params.getForEqs('enuc')['ilg'])
def execGrav(self, bconv, tconv):
params = self.params
# instantiate
ransGrav = Gravity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransGrav.plot_grav(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('grav')['xbl'],
params.getForEqs('grav')['xbr'],
params.getForEqs('grav')['ybu'],
params.getForEqs('grav')['ybd'],
params.getForEqs('grav')['ilg'])
def execNablas(self, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransNablas = TemperatureGradients(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransNablas.plot_nablas(params.getForProp('prop')['laxis'],
bconv, tconv, super_ad_i, super_ad_o,
params.getForEqs('nablas')['xbl'],
params.getForEqs('nablas')['xbr'],
params.getForEqs('nablas')['ybu'],
params.getForEqs('nablas')['ybd'],
params.getForEqs('nablas')['ilg'])
#ransNablas.plot_nablas2(params.getForProp('prop')['laxis'],
# bconv, tconv, super_ad_i, super_ad_o,
# params.getForEqs('nablas')['xbl'],
# params.getForEqs('nablas')['xbr'],
# params.getForEqs('nablas')['ybu'],
# params.getForEqs('nablas')['ybd'],
# params.getForEqs('nablas')['ilg'])
def execDegeneracy(self):
params = self.params
# instantiate
ransDeg = Degeneracy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDeg.plot_degeneracy(params.getForProp('prop')['laxis'],
params.getForEqs('psi')['xbl'],
params.getForEqs('psi')['xbr'],
params.getForEqs('psi')['ybu'],
params.getForEqs('psi')['ybd'],
params.getForEqs('psi')['ilg'])
def execVelocitiesMeanExp(self, bconv, tconv):
params = self.params
# instantiate
ransVelmeanExp = VelocitiesMeanExp(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransVelmeanExp.plot_velocities(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('velbgr')['xbl'],
params.getForEqs('velbgr')['xbr'],
params.getForEqs('velbgr')['ybu'],
params.getForEqs('velbgr')['ybd'],
params.getForEqs('velbgr')['ilg'])
def execVelocitiesMLTturb(self, bconv, tconv, uconv, super_ad_i, super_ad_o, ):
params = self.params
# instantiate
ransVelMLTturb = VelocitiesMLTturb(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
bconv, tconv, uconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransVelMLTturb.plot_velocities(params.getForProp('prop')['laxis'],
params.getForEqs('velmlt')['xbl'],
params.getForEqs('velmlt')['xbr'],
params.getForEqs('velmlt')['ybu'],
params.getForEqs('velmlt')['ybd'],
params.getForEqs('velmlt')['ilg'])
def execBruntV(self, bconv, tconv):
params = self.params
# instantiate
ransBruntV = BruntVaisalla(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransBruntV.plot_bruntvaisalla(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('nsq')['xbl'],
params.getForEqs('nsq')['xbr'],
params.getForEqs('nsq')['ybu'],
params.getForEqs('nsq')['ybd'],
params.getForEqs('nsq')['ilg'])
# ransBruntV.plot_ri(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('nsq')['xbl'],
# params.getForEqs('nsq')['xbr'],
# params.getForEqs('nsq')['ybu'],
# params.getForEqs('nsq')['ybd'],
# params.getForEqs('nsq')['ilg'])
def execBuoyancy(self, bconv, tconv):
params = self.params
# instantiate
ransBuo = Buoyancy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransBuo.plot_buoyancy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('buo')['xbl'],
params.getForEqs('buo')['xbr'],
params.getForEqs('buo')['ybu'],
params.getForEqs('buo')['ybd'],
params.getForEqs('buo')['ilg'])
def execRelativeRmsFlct(self, bconv, tconv):
params = self.params
# instantiate
ransRms = RelativeRMSflct(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransRms.plot_relative_rms_flct(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('relrmsflct')['xbl'],
params.getForEqs('relrmsflct')['xbr'],
params.getForEqs('relrmsflct')['ybu'],
params.getForEqs('relrmsflct')['ybd'],
params.getForEqs('relrmsflct')['ilg'])
# ransRms.plot_relative_rms_flct2(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('relrmsflct')['xbl'],
# params.getForEqs('relrmsflct')['xbr'],
# params.getForEqs('relrmsflct')['ybu'],
# params.getForEqs('relrmsflct')['ybd'],
# params.getForEqs('relrmsflct')['ilg'])
def execAbarZbar(self, bconv, tconv):
params = self.params
# instantiate
ransAZ = AbarZbar(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransAZ.plot_abarzbar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abzb')['xbl'],
params.getForEqs('abzb')['xbr'],
params.getForEqs('abzb')['ybu'],
params.getForEqs('abzb')['ybd'],
params.getForEqs('abzb')['ilg'])
def execKe(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransKe = KineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot kinetic energy
ransKe.plot_ke(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('kine')['xbl'],
params.getForEqs('kine')['xbr'],
params.getForEqs('kine')['ybu'],
params.getForEqs('kine')['ybd'],
params.getForEqs('kine')['ilg'])
def execKeEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransKe = KineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot kinetic energy equation
ransKe.plot_ke_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('kieq')['xbl'],
params.getForEqs('kieq')['xbr'],
params.getForEqs('kieq')['ybu'],
params.getForEqs('kieq')['ybd'],
params.getForEqs('kieq')['ilg'])
def execTe(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransTe = TotalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot total energy
ransTe.plot_et(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('toe')['xbl'],
params.getForEqs('toe')['xbr'],
params.getForEqs('toe')['ybu'],
params.getForEqs('toe')['ybd'],
params.getForEqs('toe')['ilg'])
def execTeEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransTe = TotalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot total energy equation
ransTe.plot_et_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('teeq')['xbl'],
params.getForEqs('teeq')['xbr'],
params.getForEqs('teeq')['ybu'],
params.getForEqs('teeq')['ybd'],
params.getForEqs('teeq')['ilg'])
def execRxx(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRxx = ReynoldsStressXXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rxx
ransRxx.plot_rxx(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rxx')['xbl'],
params.getForEqs('rxx')['xbr'],
params.getForEqs('rxx')['ybu'],
params.getForEqs('rxx')['ybd'],
params.getForEqs('rxx')['ilg'])
def execRxxEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRxx = ReynoldsStressXXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rxx
ransRxx.plot_rxx_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rexxeq')['xbl'],
params.getForEqs('rexxeq')['xbr'],
params.getForEqs('rexxeq')['ybu'],
params.getForEqs('rexxeq')['ybd'],
params.getForEqs('rexxeq')['ilg'])
def execRyy(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRyy = ReynoldsStressYYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress ryy
ransRyy.plot_ryy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ryy')['xbl'],
params.getForEqs('ryy')['xbr'],
params.getForEqs('ryy')['ybu'],
params.getForEqs('ryy')['ybd'],
params.getForEqs('ryy')['ilg'])
def execRyyEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRyy = ReynoldsStressYYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress ryy
ransRyy.plot_ryy_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('reyyeq')['xbl'],
params.getForEqs('reyyeq')['xbr'],
params.getForEqs('reyyeq')['ybu'],
params.getForEqs('reyyeq')['ybd'],
params.getForEqs('reyyeq')['ilg'])
def execRzz(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRzz = ReynoldsStressZZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rzz
ransRzz.plot_rzz(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rzz')['xbl'],
params.getForEqs('rzz')['xbr'],
params.getForEqs('rzz')['ybu'],
params.getForEqs('rzz')['ybd'],
params.getForEqs('rzz')['ilg'])
def execRzzEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRzz = ReynoldsStressZZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rzz
ransRzz.plot_rzz_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rezzeq')['xbl'],
params.getForEqs('rezzeq')['xbr'],
params.getForEqs('rezzeq')['ybu'],
params.getForEqs('rezzeq')['ybd'],
params.getForEqs('rezzeq')['ilg'])
def execAbar(self, bconv, tconv):
params = self.params
# instantiate
ransAbar = AbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot abar
ransAbar.plot_abar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abar')['xbl'],
params.getForEqs('abar')['xbr'],
params.getForEqs('abar')['ybu'],
params.getForEqs('abar')['ybd'],
params.getForEqs('abar')['ilg'])
def execAbarEq(self, bconv, tconv):
params = self.params
# instantiate
ransAbar = AbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot abar equation
ransAbar.plot_abar_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abreq')['xbl'],
params.getForEqs('abreq')['xbr'],
params.getForEqs('abreq')['ybu'],
params.getForEqs('abreq')['ybd'],
params.getForEqs('abreq')['ilg'])
def execFabarx(self, bconv, tconv):
params = self.params
# instantiate
ransFabarx = AbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot fabarx
ransFabarx.plot_abarflux(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abflx')['xbl'],
params.getForEqs('abflx')['xbr'],
params.getForEqs('abflx')['ybu'],
params.getForEqs('abflx')['ybd'],
params.getForEqs('abflx')['ilg'])
def execFabarxEq(self, bconv, tconv):
params = self.params
# instantiate
ransFabarx = AbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot fabarx equation
ransFabarx.plot_abarflux_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('fabxeq')['xbl'],
params.getForEqs('fabxeq')['xbr'],
params.getForEqs('fabxeq')['ybu'],
params.getForEqs('fabxeq')['ybd'],
params.getForEqs('fabxeq')['ilg'])
def execZbar(self, bconv, tconv):
params = self.params
# instantiate
ransZbar = ZbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot zbar
ransZbar.plot_zbar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbar')['xbl'],
params.getForEqs('zbar')['xbr'],
params.getForEqs('zbar')['ybu'],
params.getForEqs('zbar')['ybd'],
params.getForEqs('zbar')['ilg'])
def execZbarEq(self, bconv, tconv):
params = self.params
# instantiate
ransZbar = ZbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot zbar equation
ransZbar.plot_zbar_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbreq')['xbl'],
params.getForEqs('zbreq')['xbr'],
params.getForEqs('zbreq')['ybu'],
params.getForEqs('zbreq')['ybd'],
params.getForEqs('zbreq')['ilg'])
def execFzbarx(self, bconv, tconv):
params = self.params
# instantiate
ransFzbarx = ZbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot fzbarx
ransFzbarx.plot_zbarflux(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbflx')['xbl'],
params.getForEqs('zbflx')['xbr'],
params.getForEqs('zbflx')['ybu'],
params.getForEqs('zbflx')['ybd'],
params.getForEqs('zbflx')['ilg'])
def execFzbarxEq(self, bconv, tconv):
params = self.params
# instantiate
ransFzbarx = ZbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot fzbarx equation
ransFzbarx.plot_zbarflux_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('fzbxeq')['xbl'],
params.getForEqs('fzbxeq')['xbr'],
params.getForEqs('fzbxeq')['ybu'],
params.getForEqs('fzbxeq')['ybd'],
params.getForEqs('fzbxeq')['ilg'])
def execPP(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPP = PressureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPP.plot_pp(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('press')['xbl'],
params.getForEqs('press')['xbr'],
params.getForEqs('press')['ybu'],
params.getForEqs('press')['ybd'],
params.getForEqs('press')['ilg'])
# ransPP.plot_dAdt(params.getForProp('prop')['laxis'], \
# params.getForEqs('press')['xbl'], \
# params.getForEqs('press')['xbr'], \
# params.getForEqs('press')['ybu'], \
# params.getForEqs('press')['ybd'], \
# params.getForEqs('press')['ilg'])
def execPPeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPP = PressureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPP.plot_pp_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppeq')['xbl'],
params.getForEqs('ppeq')['xbr'],
params.getForEqs('ppeq')['ybu'],
params.getForEqs('ppeq')['ybd'],
params.getForEqs('ppeq')['ilg'])
def execPPxflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPxflx = PressureFluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPxflx.plot_fppx(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressxflx')['xbl'],
params.getForEqs('pressxflx')['xbr'],
params.getForEqs('pressxflx')['ybu'],
params.getForEqs('pressxflx')['ybd'],
params.getForEqs('pressxflx')['ilg'])
def execPPxflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPxflx = PressureFluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPxflx.plot_fppx_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppxflxeq')['xbl'],
params.getForEqs('ppxflxeq')['xbr'],
params.getForEqs('ppxflxeq')['ybu'],
params.getForEqs('ppxflxeq')['ybd'],
params.getForEqs('ppxflxeq')['ilg'])
def execPPyflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPyflx = PressureFluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPyflx.plot_fppy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressyflx')['xbl'],
params.getForEqs('pressyflx')['xbr'],
params.getForEqs('pressyflx')['ybu'],
params.getForEqs('pressyflx')['ybd'],
params.getForEqs('pressyflx')['ilg'])
def execPPyflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPyflx = PressureFluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPyflx.plot_fppy_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppyflxeq')['xbl'],
params.getForEqs('ppyflxeq')['xbr'],
params.getForEqs('ppyflxeq')['ybu'],
params.getForEqs('ppyflxeq')['ybd'],
params.getForEqs('ppyflxeq')['ilg'])
def execPPzflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPzflx = PressureFluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPzflx.plot_fppz(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('presszflx')['xbl'],
params.getForEqs('presszflx')['xbr'],
params.getForEqs('presszflx')['ybu'],
params.getForEqs('presszflx')['ybd'],
params.getForEqs('presszflx')['ilg'])
def execPPzflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPzflx = PressureFluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPzflx.plot_fppz_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppzflxeq')['xbl'],
params.getForEqs('ppzflxeq')['xbr'],
params.getForEqs('ppzflxeq')['ybu'],
params.getForEqs('ppzflxeq')['ybd'],
params.getForEqs('ppzflxeq')['ilg'])
def execPPvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransPPvar = PressureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransPPvar.plot_sigma_pp(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressvar')['xbl'],
params.getForEqs('pressvar')['xbr'],
params.getForEqs('pressvar')['ybu'],
params.getForEqs('pressvar')['ybd'],
params.getForEqs('pressvar')['ilg'])
def execPPvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransPPvar = PressureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransPPvar.plot_sigma_pp_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppvareq')['xbl'],
params.getForEqs('ppvareq')['xbr'],
params.getForEqs('ppvareq')['ybu'],
params.getForEqs('ppvareq')['ybd'],
params.getForEqs('ppvareq')['ilg'])
def execTT(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransTT = TemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTT.plot_tt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('temp')['xbl'],
params.getForEqs('temp')['xbr'],
params.getForEqs('temp')['ybu'],
params.getForEqs('temp')['ybd'],
params.getForEqs('temp')['ilg'])
def execTTeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransTT = TemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTT.plot_tt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tteq')['xbl'],
params.getForEqs('tteq')['xbr'],
params.getForEqs('tteq')['ybu'],
params.getForEqs('tteq')['ybd'],
params.getForEqs('tteq')['ilg'])
def execTTvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransTTvar = TemperatureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransTTvar.plot_sigma_tt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tempvar')['xbl'],
params.getForEqs('tempvar')['xbr'],
params.getForEqs('tempvar')['ybu'],
params.getForEqs('tempvar')['ybd'],
params.getForEqs('tempvar')['ilg'])
def execTTvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransTTvar = TemperatureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransTTvar.plot_sigma_tt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttvareq')['xbl'],
params.getForEqs('ttvareq')['xbr'],
params.getForEqs('ttvareq')['ybu'],
params.getForEqs('ttvareq')['ybd'],
params.getForEqs('ttvareq')['ilg'])
def execTTflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransTTflx = TemperatureFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTTflx.plot_ftt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tempflx')['xbl'],
params.getForEqs('tempflx')['xbr'],
params.getForEqs('tempflx')['ybu'],
params.getForEqs('tempflx')['ybd'],
params.getForEqs('tempflx')['ilg'])
def execTTflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransTTflx = TemperatureFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTTflx.plot_ftt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttflxeq')['xbl'],
params.getForEqs('ttflxeq')['xbr'],
params.getForEqs('ttflxeq')['ybu'],
params.getForEqs('ttflxeq')['ybd'],
params.getForEqs('ttflxeq')['ilg'])
def execHH(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransHH = EnthalpyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHH.plot_hh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enth')['xbl'],
params.getForEqs('enth')['xbr'],
params.getForEqs('enth')['ybu'],
params.getForEqs('enth')['ybd'],
params.getForEqs('enth')['ilg'])
def execHHeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransHH = EnthalpyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHH.plot_hh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hheq')['xbl'],
params.getForEqs('hheq')['xbr'],
params.getForEqs('hheq')['ybu'],
params.getForEqs('hheq')['ybd'],
params.getForEqs('hheq')['ilg'])
def execFtvfhX(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhX = FullTurbulenceVelocityFieldHypothesisX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhX.plot_ftvfhX_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_x')['xbl'],
params.getForEqs('ftvfh_x')['xbr'],
params.getForEqs('ftvfh_x')['ybu'],
params.getForEqs('ftvfh_x')['ybd'],
params.getForEqs('ftvfh_x')['ilg'])
def execFtvfhY(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhY = FullTurbulenceVelocityFieldHypothesisY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhY.plot_ftvfhY_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_y')['xbl'],
params.getForEqs('ftvfh_y')['xbr'],
params.getForEqs('ftvfh_y')['ybu'],
params.getForEqs('ftvfh_y')['ybd'],
params.getForEqs('ftvfh_y')['ilg'])
def execFtvfhZ(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhZ = FullTurbulenceVelocityFieldHypothesisZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhZ.plot_ftvfhZ_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_z')['xbl'],
params.getForEqs('ftvfh_z')['xbr'],
params.getForEqs('ftvfh_z')['ybu'],
params.getForEqs('ftvfh_z')['ybd'],
params.getForEqs('ftvfh_z')['ilg'])
def execUxfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUxfpd = UxfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUxfpd.plot_uxfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uxfpd')['xbl'],
params.getForEqs('uxfpd')['xbr'],
params.getForEqs('uxfpd')['ybu'],
params.getForEqs('uxfpd')['ybd'],
params.getForEqs('uxfpd')['ilg'])
def execUyfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUyfpd = UyfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUyfpd.plot_uyfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uyfpd')['xbl'],
params.getForEqs('uyfpd')['xbr'],
params.getForEqs('uyfpd')['ybu'],
params.getForEqs('uyfpd')['ybd'],
params.getForEqs('uyfpd')['ilg'])
def execUzfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUzfpd = UzfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUzfpd.plot_uzfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uzfpd')['xbl'],
params.getForEqs('uzfpd')['xbr'],
params.getForEqs('uzfpd')['ybu'],
params.getForEqs('uzfpd')['ybd'],
params.getForEqs('uzfpd')['ilg'])
def execDivu(self, bconv, tconv):
params = self.params
# instantiate
ransDivu = DivuDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransDivu.plot_divu(params.getForProp('prop')['laxis'],
params.getForEqs('divu')['xbl'],
params.getForEqs('divu')['xbr'],
params.getForEqs('divu')['ybu'],
params.getForEqs('divu')['ybd'],
params.getForEqs('divu')['ilg'])
#ransDivu.plot_divu_space_time(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('conteqfdd')['xbl'],
# params.getForEqs('conteqfdd')['xbr'],
# params.getForEqs('conteqfdd')['ybu'],
# params.getForEqs('conteqfdd')['ybd'],
# params.getForEqs('conteqfdd')['ilg'])
def SetMatplotlibParams(self):
""" This routine sets some standard values for matplotlib """
""" to obtain publication-quality figures """
# plt.rc('text',usetex=True)
# plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('font', **{'family': 'serif', 'serif': ['Times New Roman']})
plt.rc('font', size=16.)
plt.rc('lines', linewidth=2, markeredgewidth=2., markersize=12)
plt.rc('axes', linewidth=1.5)
plt.rcParams['xtick.major.size'] = 8.
plt.rcParams['xtick.minor.size'] = 4.
plt.rcParams['figure.subplot.bottom'] = 0.15
plt.rcParams['figure.subplot.left'] = 0.17
plt.rcParams['figure.subplot.right'] = 0.85
plt.rcParams.update({'figure.max_open_warning': 0}) | en | 0.352478 | # import classes for hydrodynamic stellar structure equations # from class for full turbulence velocity field hypothesis # instantiate # plot density # ransCONT.plot_mm_vs_MM(params.getForProp('prop')['laxis'], # params.getForEqs('rho')['xbl'], # params.getForEqs('rho')['xbr'], # params.getForEqs('rho')['ybu'], # params.getForEqs('rho')['ybd'], # params.getForEqs('rho')['ilg']) # instantiate # plot continuity equation # instantiate # plot continuity equation integral budget # instantiate # plot continuity equation # ransCONTfdd.plot_Frho_space_time(params.getForProp('prop')['laxis'], # bconv, tconv, # params.getForEqs('conteqfdd')['xbl'], # params.getForEqs('conteqfdd')['xbr'], # params.getForEqs('conteqfdd')['ybu'], # params.getForEqs('conteqfdd')['ybd'], # params.getForEqs('conteqfdd')['ilg']) # instantiate # plot continuity equation integral budget # instantiate # plot continuity equation # plot continuity equation alternative # plot continuity equation alternative simplified # plot continuity equation alternative simplified - cracking on velocities # ranshssecont.plot_velocities(params.getForProp('prop')['laxis'],\ # params.getForEqs('cteqhsse')['xbl'],\ # params.getForEqs('cteqhsse')['xbr'],\ # params.getForEqs('cteqhsse')['ybu'],\ # params.getForEqs('cteqhsse')['ybd'],\ # params.getForEqs('cteqhsse')['ilg']) # ranshssecont.plot_mass_flux_acceleration(params.getForProp('prop')['laxis'],\ # params.getForEqs('cteqhsse')['xbl'],\ # params.getForEqs('cteqhsse')['xbr'],\ # params.getForEqs('cteqhsse')['ybu'],\ # params.getForEqs('cteqhsse')['ybd'],\ # params.getForEqs('cteqhsse')['ilg']) # instantiate # plot hsse momentm equation # plot hsse momentm equation alternative # plot hsse momentm equation alternative simplified # instantiate # plot hsse temperature equation # plot hsse temperature equation alternative # plot hsse temperature equation alternative simplified # instantiate # plot hsse luminosity equation # ranshsselumi.plot_luminosity_equation(params.getForProp('prop')['laxis'], # params.getForEqs('lueqhsse')['xbl'], # params.getForEqs('lueqhsse')['xbr'], # params.getForEqs('lueqhsse')['ybu'], # params.getForEqs('lueqhsse')['ybd'], # params.getForEqs('lueqhsse')['ilg']) # plot hsse luminosity equation exact # plot hsse luminosity equation exact 2 # plot hsse luminosity equation alternative # ranshsselumi.plot_luminosity_equation_2(params.getForProp('prop')['laxis'], # params.getForEqs('lueqhsse')['xbl'], # params.getForEqs('lueqhsse')['xbr'], # params.getForEqs('lueqhsse')['ybu'], # params.getForEqs('lueqhsse')['ybd'], # params.getForEqs('lueqhsse')['ilg']) # plot hsse luminosity equation alternative simplified # ranshsselumi.plot_luminosity_equation_3(params.getForProp('prop')['laxis'], # params.getForEqs('lueqhsse')['xbl'], # params.getForEqs('lueqhsse')['xbr'], # params.getForEqs('lueqhsse')['ybu'], # params.getForEqs('lueqhsse')['ybd'], # params.getForEqs('lueqhsse')['ilg']) # instantiate # instantiate # ransXtra.plot_X(params.getForProp('prop')['laxis'], \ # params.getForEqs(x)['xbl'], \ # params.getForEqs(x)['xbr'], \ # params.getForEqs(x)['ybu'], \ # params.getForEqs(x)['ybd'], \ # params.getForEqs(x)['ilg']) # ransXtra.plot_gradX(params.getForProp('prop')['laxis'],\ # params.getForEqs(x)['xbl'],\ # params.getForEqs(x)['xbr'],\ # params.getForEqs(x)['ybu'],\ # params.getForEqs(x)['ybd'],\ # params.getForEqs(x)['ilg']) # instantiate #ransXtra.plot_X_space_time(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) #ransXtra.plot_rhoX_space_time(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # ransXtra.plot_Xm_with_MM(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # instantiate # instantiate # plot X transport equation integral budget # instantiate # ransXflxx.plot_XfluxX(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # ransXflxx.plot_XfluxxX(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # ransXflxx.plot_XfluxXRogers1989(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # ransXflxx.plot_Xflux_gradient(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # ransXflxx.plot_XfluxX2(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # instantiate # ransXflxx.plot_XfluxX_equation2(params.getForProp('prop')['laxis'], \ # params.getForEqs(x)['xbl'], \ # params.getForEqs(x)['xbr'], \ # params.getForEqs(x)['ybu'], \ # params.getForEqs(x)['ybd'], \ # params.getForEqs(x)['ilg']) # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # ransXdiff.plot_X_Ediffusivity(params.getForProp('prop')['laxis'], # params.getForEqs(x)['xbl'], # params.getForEqs(x)['xbr'], # params.getForEqs(x)['ybu'], # params.getForEqs(x)['ybd'], # params.getForEqs(x)['ilg']) # instantiate # instantiate # plot turbulent kinetic energy #ransTke.plot_TKE_space_time(params.getForProp('prop')['laxis'], # params.getForEqs('tkeeq')['xbl'], # params.getForEqs('tkeeq')['xbr'], # params.getForEqs('tkeeq')['ybu'], # params.getForEqs('tkeeq')['ybd'], # params.getForEqs('tkeeq')['ilg']) # plot turbulent kinetic energy evolution # ransTke.plot_tke_evolution() # plot evolution of convection boundaries # ransTke.plot_conv_bndry_location() # instantiate # plot turbulent kinetic energy equation # instantiate # plot turbulent kinetic energy equation # instantiate # plot turbulent kinetic energy #ransTkeR.plot_TKEradial_space_time(params.getForProp('prop')['laxis'], # params.getForEqs('tkeReq')['xbl'], # params.getForEqs('tkeReq')['xbr'], # params.getForEqs('tkeReq')['ybu'], # params.getForEqs('tkeReq')['ybd'], # params.getForEqs('tkeReq')['ilg']) # plot turbulent kinetic energy evolution # ransTke.plot_tke_evolution() # plot evolution of convection boundaries # ransTke.plot_conv_bndry_location() # instantiate # plot turbulent kinetic energy equation # instantiate # plot turbulent kinetic energy equation # instantiate # plot turbulent kinetic energy #ransTkeH.plot_TKEhorizontal_space_time(params.getForProp('prop')['laxis'], # params.getForEqs('tkeHeq')['xbl'], # params.getForEqs('tkeHeq')['xbr'], # params.getForEqs('tkeHeq')['ybu'], # params.getForEqs('tkeHeq')['ybd'], # params.getForEqs('tkeHeq')['ilg']) # plot turbulent kinetic energy evolution # ransTke.plot_tke_evolution() # plot evolution of convection boundaries # ransTke.plot_conv_bndry_location() # instantiate # plot turbulent kinetic energy equation # instantiate # plot turbulent kinetic energy equation # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # ransEnuc.plot_enuc(params.getForProp('prop')['laxis'], # bconv, tconv, # params.getForEqs('enuc')['xbl'], # params.getForEqs('enuc')['xbr'], # params.getForEqs('enuc')['ybu'], # params.getForEqs('enuc')['ybd'], # params.getForEqs('enuc')['ilg']) # ransEnuc.plot_enuc_per_volume(params.getForProp('prop')['laxis'], \ # params.getForEqs('enuc')['xbl'], \ # params.getForEqs('enuc')['xbr'], \ # params.getForEqs('enuc')['ybu'], \ # params.getForEqs('enuc')['ybd'], \ # params.getForEqs('enuc')['ilg']) # instantiate # instantiate #ransNablas.plot_nablas2(params.getForProp('prop')['laxis'], # bconv, tconv, super_ad_i, super_ad_o, # params.getForEqs('nablas')['xbl'], # params.getForEqs('nablas')['xbr'], # params.getForEqs('nablas')['ybu'], # params.getForEqs('nablas')['ybd'], # params.getForEqs('nablas')['ilg']) # instantiate # instantiate # instantiate # instantiate # ransBruntV.plot_ri(params.getForProp('prop')['laxis'], # bconv, tconv, # params.getForEqs('nsq')['xbl'], # params.getForEqs('nsq')['xbr'], # params.getForEqs('nsq')['ybu'], # params.getForEqs('nsq')['ybd'], # params.getForEqs('nsq')['ilg']) # instantiate # instantiate # ransRms.plot_relative_rms_flct2(params.getForProp('prop')['laxis'], # bconv, tconv, # params.getForEqs('relrmsflct')['xbl'], # params.getForEqs('relrmsflct')['xbr'], # params.getForEqs('relrmsflct')['ybu'], # params.getForEqs('relrmsflct')['ybd'], # params.getForEqs('relrmsflct')['ilg']) # instantiate # instantiate # plot kinetic energy # instantiate # plot kinetic energy equation # instantiate # plot total energy # instantiate # plot total energy equation # instantiate # plot reynolds stress rxx # instantiate # plot reynolds stress rxx # instantiate # plot reynolds stress ryy # instantiate # plot reynolds stress ryy # instantiate # plot reynolds stress rzz # instantiate # plot reynolds stress rzz # instantiate # plot abar # instantiate # plot abar equation # instantiate # plot fabarx # instantiate # plot fabarx equation # instantiate # plot zbar # instantiate # plot zbar equation # instantiate # plot fzbarx # instantiate # plot fzbarx equation # instantiate # ransPP.plot_dAdt(params.getForProp('prop')['laxis'], \ # params.getForEqs('press')['xbl'], \ # params.getForEqs('press')['xbr'], \ # params.getForEqs('press')['ybu'], \ # params.getForEqs('press')['ybd'], \ # params.getForEqs('press')['ilg']) # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate # instantiate #ransDivu.plot_divu_space_time(params.getForProp('prop')['laxis'], # bconv, tconv, # params.getForEqs('conteqfdd')['xbl'], # params.getForEqs('conteqfdd')['xbr'], # params.getForEqs('conteqfdd')['ybu'], # params.getForEqs('conteqfdd')['ybd'], # params.getForEqs('conteqfdd')['ilg']) This routine sets some standard values for matplotlib to obtain publication-quality figures # plt.rc('text',usetex=True) # plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) | 1.738269 | 2 |
python/periodic-web-scrapper/scraper/RequestScraper.py | MarioCodes/ProyectosClaseDAM | 0 | 6613377 | '''
Created on Mar 22, 2018
@author: msanchez
'''
import re
from bs4 import BeautifulSoup, Tag
class RequestScraper(object):
''' Gets as parameter a whole unfiltered request (full HTML page). Gives back a list with raw HTML <a> tags. One tag for every headline.
Attributes:
web Complete request. contains HTTP response code and whole unfiltered content.
soup First BeautifulSoup created with the contents of the unfiltered web.
'''
def __init__(self, web):
self.web = web
self.soup = BeautifulSoup(web.content, "html.parser")
def scrap_news(self):
''' Searches for news headers in the whole requests, filters out broken or undesired HTML tags and gives back a list with only wanted ones.
:return Raw <a> HTML elements:
:rtype list:
'''
news_html_elements = self.__get_all_news_headers()
news_soup = BeautifulSoup(str(news_html_elements), "html.parser")
wanted_tags = list()
for new in news_soup:
if len(new) == 1 and isinstance(new, Tag): # len == 2 are all commas & out none types
line_soup = BeautifulSoup(str(new), "html.parser")
a_tag = line_soup.a
wanted_tags.append(a_tag)
return wanted_tags
def __get_all_news_headers(self):
return self.__search_element_by_aprox_attribute("h2", "id", "titulo")
def __search_element_by_aprox_attribute(self, element, attribute, value):
return self.soup.find_all(element, {attribute : re.compile(value + "*") })
| '''
Created on Mar 22, 2018
@author: msanchez
'''
import re
from bs4 import BeautifulSoup, Tag
class RequestScraper(object):
''' Gets as parameter a whole unfiltered request (full HTML page). Gives back a list with raw HTML <a> tags. One tag for every headline.
Attributes:
web Complete request. contains HTTP response code and whole unfiltered content.
soup First BeautifulSoup created with the contents of the unfiltered web.
'''
def __init__(self, web):
self.web = web
self.soup = BeautifulSoup(web.content, "html.parser")
def scrap_news(self):
''' Searches for news headers in the whole requests, filters out broken or undesired HTML tags and gives back a list with only wanted ones.
:return Raw <a> HTML elements:
:rtype list:
'''
news_html_elements = self.__get_all_news_headers()
news_soup = BeautifulSoup(str(news_html_elements), "html.parser")
wanted_tags = list()
for new in news_soup:
if len(new) == 1 and isinstance(new, Tag): # len == 2 are all commas & out none types
line_soup = BeautifulSoup(str(new), "html.parser")
a_tag = line_soup.a
wanted_tags.append(a_tag)
return wanted_tags
def __get_all_news_headers(self):
return self.__search_element_by_aprox_attribute("h2", "id", "titulo")
def __search_element_by_aprox_attribute(self, element, attribute, value):
return self.soup.find_all(element, {attribute : re.compile(value + "*") })
| en | 0.738925 | Created on Mar 22, 2018 @author: msanchez Gets as parameter a whole unfiltered request (full HTML page). Gives back a list with raw HTML <a> tags. One tag for every headline. Attributes: web Complete request. contains HTTP response code and whole unfiltered content. soup First BeautifulSoup created with the contents of the unfiltered web. Searches for news headers in the whole requests, filters out broken or undesired HTML tags and gives back a list with only wanted ones. :return Raw <a> HTML elements: :rtype list: # len == 2 are all commas & out none types | 3.458522 | 3 |
webenmr/lib/convrdc.py | andreagia/WEBNMR | 0 | 6613378 | #!/usr/bin/env python
'''
This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format
XPLOR:
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000
AMBER:
&align
num_datasets=2,
dcut= -1.0, freezemol= .false.,
ndip= 10, dwt= 5*0.1, 5*0.1
gigj= 5*-3.1631,5*-3.1631,
dij= 5*1.041,5*1.041,
s11= -4.236,-4.236
s12= 56.860,56.860
s13= -34.696,-34.696
s22= -27.361,-27.361
s23= -12.867,-12.867
dataset=1,
id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13,
id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10,
id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54,
...
...
&end
'''
import sys
import os
import commands
from optparse import OptionParser
from xml_parser import *
from normalize_tbl import normalize
from constants import convtable
def searchres(nres, lpdb):
for l in lpdb:
if l.strip().lower().startswith('atom'):
s=l.split()
if int(nres)==int(s[4]):
return s[3]
def searchC(outx):
i=0
c=[]
while i<len(outx):
if outx[i].strip().startswith('XDIPO_RDC>frun'):
while i<len(outx):
i+=1
if i>=len(outx):
break
if outx[i].strip().startswith('C1='):
t=[]
l=outx[i].split()
for x in range(1,len(l),2):
t.append(l[x])
c.append(t)
break
i+=1
return c
def convert(pdb, new, wd):
if new.calculation.protocol.xrdc:
xfiles=[]
if len(new.calculation.protocol.xrdc)==1:
xfiles.append(new.calculation.protocol.xrdc.attrib_.xrdc_file)
else:
for i in range(len(new.calculation.protocol.xrdc)):
xfiles.append(new.calculation.protocol.xrdc[i].attrib_.xrdc_file)
else:
sys.exit('%s: RDC not found\n' % sys.argv[0])
try:
lpdb=open(pdb, 'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, pdb, strerror))
numMap = {}
for l in lpdb:
if l.strip().lower().startswith('atom'):
ls=l.split()
k='%s:%s' % (ls[4],ls[2])
numMap[k]=ls[1]
cmd=' /opt/local_prog/xplor-nih-2.22/bin/xplor tensor.inp'
outx=commands.getoutput(cmd)
outx=outx.split('\n')
#outx=open('xplor.outx').readlines()
c=searchC(outx)
out=[' &align\n']
out.append(' num_datasets=%d,\n' % len(xfiles))
out.append(' dcut=-1.0, freezemol=.false.,\n')
out.append(' ndip=10,')
out.append(' dcut=-1.0,dwt=92*0.1,\n')
out.append(' gigj=92*-3.163,\n')
out.append(' dij=92*1.01,\n')
s11=' s11='
s12=' s12='
s13=' s13='
s22=' s22='
s23=' s23='
for i in range(len(c)):
s11='%s%s,' % (s11, c[i][0])
s12='%s%s,' % (s12, c[i][1])
s13='%s%s,' % (s13, c[i][2])
s22='%s%s,' % (s22, c[i][3])
s23='%s%s,' % (s23, c[i][4])
out.append('%s\n' % s11)
out.append('%s\n' % s12)
out.append('%s\n' % s13)
out.append('%s\n' % s22)
out.append('%s\n' % s23)
counter=0
nrdc=0
for xfile in xfiles:
counter+=1
nxfile=os.path.join(wd, 'rdc_%d_web_enmr_normalized.tbl' % counter)
xfile=os.path.join(wd, xfile)
try:
normalize(xfile, nxfile, new, wd)
except:
sys.exit('%s: unable to normalize %s tbl file\n' % (sys.argv[0], xfile))
try:
xp=open(nxfile,'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, nxfile, strerror))
out.append(' dataset=%d,\n' % counter)
for l in xp:
if l.strip().startswith('assign'):
nrdc+=1
ls=l.split()
res=searchres(ls[31], lpdb)
kk='%s:%s' % (res, ls[34])
if convtable.has_key(kk):
ls[34]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[31], ls[34])
natm1=numMap[k]
res=searchres(ls[38], lpdb)
kk='%s:%s' % (res, ls[41])
if convtable.has_key(kk):
ls[41]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[38], ls[41])
natm2=numMap[k]
out.append(' id(%s)=%s, jd(%s)=%s, dobsl(%s)=%s, dobsu(%s)=%s, \n' %
(nrdc, natm1, nrdc, natm2, nrdc, ls[43], nrdc, ls[43]))
out[3]=' ndip=%d,' % nrdc
out.append(' &end')
return out
if __name__ == '__main__':
usage = "usage: %prog -w working_directory -p pdb_filename -o out_filename"
parser = OptionParser(usage)
parser.add_option("-w", "--wdir", dest="wd",
help="Working directory", metavar="WORKDIR")
parser.add_option("-p", "--pdbfile", dest="pdbfile",
help="PDB filename", metavar="FILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="Output filename", metavar="FILE")
(options, args) = parser.parse_args()
if not options.wd:
parser.error("Working directory is required")
wd=os.path.abspath(options.wd)+'/'
if options.pdbfile:
pdbfile=os.path.join(wd, options.pdbfile)
else:
parser.error("PDB filename is required")
if options.outfile:
outfile=os.path.join(wd, options.outfile)
else:
parser.error("Output filename is required")
xml_input=os.path.join(wd,'input.xml')
doc = etree.parse(xml_input)
ndoc = etree.tostring(doc)
new=parse_node(etree.fromstring(ndoc))
out=convert(pdbfile, new, wd)
fout=open(outfile,'w')
fout.writelines(out)
fout.close() | #!/usr/bin/env python
'''
This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format
XPLOR:
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000
AMBER:
&align
num_datasets=2,
dcut= -1.0, freezemol= .false.,
ndip= 10, dwt= 5*0.1, 5*0.1
gigj= 5*-3.1631,5*-3.1631,
dij= 5*1.041,5*1.041,
s11= -4.236,-4.236
s12= 56.860,56.860
s13= -34.696,-34.696
s22= -27.361,-27.361
s23= -12.867,-12.867
dataset=1,
id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13,
id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10,
id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54,
...
...
&end
'''
import sys
import os
import commands
from optparse import OptionParser
from xml_parser import *
from normalize_tbl import normalize
from constants import convtable
def searchres(nres, lpdb):
for l in lpdb:
if l.strip().lower().startswith('atom'):
s=l.split()
if int(nres)==int(s[4]):
return s[3]
def searchC(outx):
i=0
c=[]
while i<len(outx):
if outx[i].strip().startswith('XDIPO_RDC>frun'):
while i<len(outx):
i+=1
if i>=len(outx):
break
if outx[i].strip().startswith('C1='):
t=[]
l=outx[i].split()
for x in range(1,len(l),2):
t.append(l[x])
c.append(t)
break
i+=1
return c
def convert(pdb, new, wd):
if new.calculation.protocol.xrdc:
xfiles=[]
if len(new.calculation.protocol.xrdc)==1:
xfiles.append(new.calculation.protocol.xrdc.attrib_.xrdc_file)
else:
for i in range(len(new.calculation.protocol.xrdc)):
xfiles.append(new.calculation.protocol.xrdc[i].attrib_.xrdc_file)
else:
sys.exit('%s: RDC not found\n' % sys.argv[0])
try:
lpdb=open(pdb, 'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, pdb, strerror))
numMap = {}
for l in lpdb:
if l.strip().lower().startswith('atom'):
ls=l.split()
k='%s:%s' % (ls[4],ls[2])
numMap[k]=ls[1]
cmd=' /opt/local_prog/xplor-nih-2.22/bin/xplor tensor.inp'
outx=commands.getoutput(cmd)
outx=outx.split('\n')
#outx=open('xplor.outx').readlines()
c=searchC(outx)
out=[' &align\n']
out.append(' num_datasets=%d,\n' % len(xfiles))
out.append(' dcut=-1.0, freezemol=.false.,\n')
out.append(' ndip=10,')
out.append(' dcut=-1.0,dwt=92*0.1,\n')
out.append(' gigj=92*-3.163,\n')
out.append(' dij=92*1.01,\n')
s11=' s11='
s12=' s12='
s13=' s13='
s22=' s22='
s23=' s23='
for i in range(len(c)):
s11='%s%s,' % (s11, c[i][0])
s12='%s%s,' % (s12, c[i][1])
s13='%s%s,' % (s13, c[i][2])
s22='%s%s,' % (s22, c[i][3])
s23='%s%s,' % (s23, c[i][4])
out.append('%s\n' % s11)
out.append('%s\n' % s12)
out.append('%s\n' % s13)
out.append('%s\n' % s22)
out.append('%s\n' % s23)
counter=0
nrdc=0
for xfile in xfiles:
counter+=1
nxfile=os.path.join(wd, 'rdc_%d_web_enmr_normalized.tbl' % counter)
xfile=os.path.join(wd, xfile)
try:
normalize(xfile, nxfile, new, wd)
except:
sys.exit('%s: unable to normalize %s tbl file\n' % (sys.argv[0], xfile))
try:
xp=open(nxfile,'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, nxfile, strerror))
out.append(' dataset=%d,\n' % counter)
for l in xp:
if l.strip().startswith('assign'):
nrdc+=1
ls=l.split()
res=searchres(ls[31], lpdb)
kk='%s:%s' % (res, ls[34])
if convtable.has_key(kk):
ls[34]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[31], ls[34])
natm1=numMap[k]
res=searchres(ls[38], lpdb)
kk='%s:%s' % (res, ls[41])
if convtable.has_key(kk):
ls[41]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[38], ls[41])
natm2=numMap[k]
out.append(' id(%s)=%s, jd(%s)=%s, dobsl(%s)=%s, dobsu(%s)=%s, \n' %
(nrdc, natm1, nrdc, natm2, nrdc, ls[43], nrdc, ls[43]))
out[3]=' ndip=%d,' % nrdc
out.append(' &end')
return out
if __name__ == '__main__':
usage = "usage: %prog -w working_directory -p pdb_filename -o out_filename"
parser = OptionParser(usage)
parser.add_option("-w", "--wdir", dest="wd",
help="Working directory", metavar="WORKDIR")
parser.add_option("-p", "--pdbfile", dest="pdbfile",
help="PDB filename", metavar="FILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="Output filename", metavar="FILE")
(options, args) = parser.parse_args()
if not options.wd:
parser.error("Working directory is required")
wd=os.path.abspath(options.wd)+'/'
if options.pdbfile:
pdbfile=os.path.join(wd, options.pdbfile)
else:
parser.error("PDB filename is required")
if options.outfile:
outfile=os.path.join(wd, options.outfile)
else:
parser.error("Output filename is required")
xml_input=os.path.join(wd,'input.xml')
doc = etree.parse(xml_input)
ndoc = etree.tostring(doc)
new=parse_node(etree.fromstring(ndoc))
out=convert(pdbfile, new, wd)
fout=open(outfile,'w')
fout.writelines(out)
fout.close() | en | 0.439307 | #!/usr/bin/env python This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format XPLOR: assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000 assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000 assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000 AMBER: &align num_datasets=2, dcut= -1.0, freezemol= .false., ndip= 10, dwt= 5*0.1, 5*0.1 gigj= 5*-3.1631,5*-3.1631, dij= 5*1.041,5*1.041, s11= -4.236,-4.236 s12= 56.860,56.860 s13= -34.696,-34.696 s22= -27.361,-27.361 s23= -12.867,-12.867 dataset=1, id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13, id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10, id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54, ... ... &end #outx=open('xplor.outx').readlines() | 2.335728 | 2 |
resource_rich/applications/edge_bridge.py | MBradbury/iot-trust-task-alloc | 8 | 6613379 | #!/usr/bin/env python3
import logging
import asyncio
import signal
from datetime import datetime, timezone
import os
from config import edge_marker, application_edge_marker, serial_sep, edge_server_port
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("edge-bridge")
logger.setLevel(logging.DEBUG)
class NodeSerialBridge:
def __init__(self):
self.proc = None
self.server = None
self.applications = {}
async def start(self):
# Start processing serial output from edge sensor node
self.proc = await asyncio.create_subprocess_shell(
os.path.expanduser("~/pi-client/tools/pyterm") + " -b 115200 -p /dev/ttyUSB0",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE)
await self._inform_edge_bridge_started()
# Start a server that applications can connect to
self.server = await asyncio.start_server(
self._handle_application_conn,
'localhost',
edge_server_port)
addr = self.server.sockets[0].getsockname()
logger.info(f'Serving on {addr}')
async def stop(self):
# Stop the server, so applications cannot communicate with us
if self.server is not None:
self.server.close()
await self.server.wait_closed()
# If we are being stopped, then inform the sensor node application
await self._inform_edge_bridge_stopped()
# Stop the serial line
if self.proc is not None:
self.proc.terminate()
await self.proc.wait()
self.proc = None
async def _process_serial_output(self, now: datetime, line: str):
logger.debug(f"process_edge_output: {line}")
application_name, payload = line.split(serial_sep, 1)
try:
# Find application to send to
writer = self.applications[application_name]
# Send the payload and the created timestamp
writer.write(f"{now.isoformat()}{serial_sep}{payload}\n".encode('utf-8'))
await writer.drain()
except KeyError:
logger.warning(f"Unable to find local application {application_name} to forward message to")
async def _run_serial(self):
loop = asyncio.get_event_loop()
async for output in self.proc.stdout:
# Exit if the event loop has stopped
if not loop.is_running():
break
line = output.decode('utf-8').rstrip()
# Application message
if line.startswith(application_edge_marker):
now = datetime.now(timezone.utc)
await self._process_serial_output(now, line[len(application_edge_marker):])
# Edge message
elif line.startswith(edge_marker):
logger.warning(f"Don't know what to do with {line}")
# Regular log
else:
print(line, flush=True)
async def _run_applications(self):
async with self.server:
await self.server.serve_forever()
async def run(self):
t1 = asyncio.create_task(self._run_serial())
t2 = asyncio.create_task(self._run_applications())
await asyncio.gather(t1, t2)
async def _handle_application_conn(self, reader, writer):
try:
addr = writer.get_extra_info('peername')
logger.info(f"Connected to {addr}")
application_name = (await reader.readline()).decode("utf-8").rstrip()
logger.info(f"Application {application_name} is running on {addr}")
self.applications[application_name] = writer
# Read lines from the application and forward onto the serial line
while not reader.at_eof():
line = await reader.readline()
self.proc.stdin.write(line)
await self.proc.stdin.drain()
finally:
del self.applications[application_name]
async def _inform_edge_bridge_started(self):
line = f"{edge_marker}start\n".encode("utf-8")
self.proc.stdin.write(line)
await self.proc.stdin.drain()
logger.debug("Sent start event")
async def _inform_edge_bridge_stopped(self):
line = f"{edge_marker}stop\n".encode("utf-8")
self.proc.stdin.write(line)
await self.proc.stdin.drain()
logger.debug("Sent stop event")
async def do_run(service):
await service.start()
await service.run()
async def shutdown(signal, loop, services):
"""Cleanup tasks tied to the service's shutdown."""
logger.info(f"Received exit signal {signal.name}...")
logger.info(f"Stopping services tasks...")
await asyncio.gather(*[service.stop() for service in services], return_exceptions=True)
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
for task in tasks:
task.cancel()
logger.info(f"Cancelling {len(tasks)} outstanding tasks...")
await asyncio.gather(*tasks, return_exceptions=True)
logger.info(f"Finished cancelling tasks!")
loop.stop()
def exception_handler(loop, context, services):
logger.info(f"Exception raised: {context}")
# TODO: Gracefully stop services and notify sensor node we have shutdown
#loop.create_task(asyncio.gather(*[service.stop() for service in services], return_exceptions=True))
def main(service):
logger.info("Starting edge serial bridge")
loop = asyncio.get_event_loop()
# May want to catch other signals too
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for sig in signals:
loop.add_signal_handler(sig, lambda sig=sig: asyncio.create_task(shutdown(sig, loop, [service])))
loop.set_exception_handler(lambda l, c: exception_handler(l, c, [service]))
try:
loop.run_until_complete(do_run(service))
finally:
loop.close()
logger.info("Successfully shutdown the edge serial bridge.")
if __name__ == "__main__":
bridge = NodeSerialBridge()
main(bridge)
| #!/usr/bin/env python3
import logging
import asyncio
import signal
from datetime import datetime, timezone
import os
from config import edge_marker, application_edge_marker, serial_sep, edge_server_port
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("edge-bridge")
logger.setLevel(logging.DEBUG)
class NodeSerialBridge:
def __init__(self):
self.proc = None
self.server = None
self.applications = {}
async def start(self):
# Start processing serial output from edge sensor node
self.proc = await asyncio.create_subprocess_shell(
os.path.expanduser("~/pi-client/tools/pyterm") + " -b 115200 -p /dev/ttyUSB0",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE)
await self._inform_edge_bridge_started()
# Start a server that applications can connect to
self.server = await asyncio.start_server(
self._handle_application_conn,
'localhost',
edge_server_port)
addr = self.server.sockets[0].getsockname()
logger.info(f'Serving on {addr}')
async def stop(self):
# Stop the server, so applications cannot communicate with us
if self.server is not None:
self.server.close()
await self.server.wait_closed()
# If we are being stopped, then inform the sensor node application
await self._inform_edge_bridge_stopped()
# Stop the serial line
if self.proc is not None:
self.proc.terminate()
await self.proc.wait()
self.proc = None
async def _process_serial_output(self, now: datetime, line: str):
logger.debug(f"process_edge_output: {line}")
application_name, payload = line.split(serial_sep, 1)
try:
# Find application to send to
writer = self.applications[application_name]
# Send the payload and the created timestamp
writer.write(f"{now.isoformat()}{serial_sep}{payload}\n".encode('utf-8'))
await writer.drain()
except KeyError:
logger.warning(f"Unable to find local application {application_name} to forward message to")
async def _run_serial(self):
loop = asyncio.get_event_loop()
async for output in self.proc.stdout:
# Exit if the event loop has stopped
if not loop.is_running():
break
line = output.decode('utf-8').rstrip()
# Application message
if line.startswith(application_edge_marker):
now = datetime.now(timezone.utc)
await self._process_serial_output(now, line[len(application_edge_marker):])
# Edge message
elif line.startswith(edge_marker):
logger.warning(f"Don't know what to do with {line}")
# Regular log
else:
print(line, flush=True)
async def _run_applications(self):
async with self.server:
await self.server.serve_forever()
async def run(self):
t1 = asyncio.create_task(self._run_serial())
t2 = asyncio.create_task(self._run_applications())
await asyncio.gather(t1, t2)
async def _handle_application_conn(self, reader, writer):
try:
addr = writer.get_extra_info('peername')
logger.info(f"Connected to {addr}")
application_name = (await reader.readline()).decode("utf-8").rstrip()
logger.info(f"Application {application_name} is running on {addr}")
self.applications[application_name] = writer
# Read lines from the application and forward onto the serial line
while not reader.at_eof():
line = await reader.readline()
self.proc.stdin.write(line)
await self.proc.stdin.drain()
finally:
del self.applications[application_name]
async def _inform_edge_bridge_started(self):
line = f"{edge_marker}start\n".encode("utf-8")
self.proc.stdin.write(line)
await self.proc.stdin.drain()
logger.debug("Sent start event")
async def _inform_edge_bridge_stopped(self):
line = f"{edge_marker}stop\n".encode("utf-8")
self.proc.stdin.write(line)
await self.proc.stdin.drain()
logger.debug("Sent stop event")
async def do_run(service):
await service.start()
await service.run()
async def shutdown(signal, loop, services):
"""Cleanup tasks tied to the service's shutdown."""
logger.info(f"Received exit signal {signal.name}...")
logger.info(f"Stopping services tasks...")
await asyncio.gather(*[service.stop() for service in services], return_exceptions=True)
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
for task in tasks:
task.cancel()
logger.info(f"Cancelling {len(tasks)} outstanding tasks...")
await asyncio.gather(*tasks, return_exceptions=True)
logger.info(f"Finished cancelling tasks!")
loop.stop()
def exception_handler(loop, context, services):
logger.info(f"Exception raised: {context}")
# TODO: Gracefully stop services and notify sensor node we have shutdown
#loop.create_task(asyncio.gather(*[service.stop() for service in services], return_exceptions=True))
def main(service):
logger.info("Starting edge serial bridge")
loop = asyncio.get_event_loop()
# May want to catch other signals too
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for sig in signals:
loop.add_signal_handler(sig, lambda sig=sig: asyncio.create_task(shutdown(sig, loop, [service])))
loop.set_exception_handler(lambda l, c: exception_handler(l, c, [service]))
try:
loop.run_until_complete(do_run(service))
finally:
loop.close()
logger.info("Successfully shutdown the edge serial bridge.")
if __name__ == "__main__":
bridge = NodeSerialBridge()
main(bridge)
| en | 0.833603 | #!/usr/bin/env python3 # Start processing serial output from edge sensor node # Start a server that applications can connect to # Stop the server, so applications cannot communicate with us # If we are being stopped, then inform the sensor node application # Stop the serial line # Find application to send to # Send the payload and the created timestamp # Exit if the event loop has stopped # Application message # Edge message # Regular log # Read lines from the application and forward onto the serial line Cleanup tasks tied to the service's shutdown. # TODO: Gracefully stop services and notify sensor node we have shutdown #loop.create_task(asyncio.gather(*[service.stop() for service in services], return_exceptions=True)) # May want to catch other signals too | 2.488842 | 2 |
libraries/utils.py | mapto/sprks | 1 | 6613380 | import random
import hashlib
import time
import datetime
class hash_utils:
@classmethod
def hash_password(cls, password):
"""
Hashes password for database.
"""
return hashlib.sha224(password).hexdigest()
@classmethod
def random_hex(cls):
"""
Generates random string using parameter as salt, sha224 hashing, random integer, and returns hexdigest.
"""
random.seed()
return hashlib.sha224(time.asctime(time.gmtime()) + str(random.randint(1, 100000))).hexdigest()
class date_utils:
@classmethod
def iso8601_to_date(cls, datestamp):
"""
Converts ISO8601 date (YYYY-MM-DD) to datetime.date object.
"""
return (datetime.datetime.strptime(datestamp, '%Y-%m-%d')).date() | import random
import hashlib
import time
import datetime
class hash_utils:
@classmethod
def hash_password(cls, password):
"""
Hashes password for database.
"""
return hashlib.sha224(password).hexdigest()
@classmethod
def random_hex(cls):
"""
Generates random string using parameter as salt, sha224 hashing, random integer, and returns hexdigest.
"""
random.seed()
return hashlib.sha224(time.asctime(time.gmtime()) + str(random.randint(1, 100000))).hexdigest()
class date_utils:
@classmethod
def iso8601_to_date(cls, datestamp):
"""
Converts ISO8601 date (YYYY-MM-DD) to datetime.date object.
"""
return (datetime.datetime.strptime(datestamp, '%Y-%m-%d')).date() | en | 0.384349 | Hashes password for database. Generates random string using parameter as salt, sha224 hashing, random integer, and returns hexdigest. Converts ISO8601 date (YYYY-MM-DD) to datetime.date object. | 3.310081 | 3 |
main.py | Batoch/PrometheeXmessenger | 1 | 6613381 | import threading
import atexit, random
from flask import Flask, request
from bot import Bot
from promethee import *
from dialog import *
from datetime import datetime
POOL_TIME = 3600 # Seconds
# variables that are accessible from anywhere
nomfichierabo = "listeabo"
studenttocheck = []
listname = []
listenumero = []
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
yourThread = threading.Thread()
def create_app():
app = Flask(__name__)
ACCESS_TOKEN = '<PASSWORD>'
VERIFY_TOKEN = '<PASSWORD>'
bot = Bot(ACCESS_TOKEN)
def interrupt():
global yourThread
yourThread.cancel()
def doStuff():
global yourThread
global studenttocheck
with dataLock:
# Do your stuff with commonDataStruct Here
print(studenttocheck)
for student in studenttocheck:
print(str(datetime.now())[11:-7] + " Scan de " + student["prenom"])
result = checknewnote(student["numero"])
if result == 0:
print("Pas de nouvelle note")
elif result == -1:
print("Erreur lors du check de " + student["prenom"])
else:
for i in result:
send_message(student["idmes"], "Nouvelle note en " + i + " : " + result[i])
print("Nouvelle note en " + i + " : " + result[i])
# Set the next thread to happen
yourThread = threading.Timer(POOL_TIME, doStuff, ())
yourThread.start()
def doStuffStart():
# Do initialisation stuff here
global listname
global listenumero
global studenttocheck
login()
falselisteeleves()
listname = listenom1a + listenom2a
listenumero = listenumero1a + listenumero2a
# Ouverture de la liste des personnes
if is_non_zero_file(nomfichierabo) == 0:
with open(nomfichierabo, 'wb') as file:
pickler = pickle.Pickler(file)
pickler.dump(studenttocheck)
file.close()
print("Nouveau fichier creer")
else:
# Ouverture des fichier contenant les personnes
with open(nomfichierabo, 'rb') as file:
depickler = pickle.Unpickler(file)
studenttocheck = depickler.load()
file.close()
for i in range(len(listname)):
listname[i] = listname[i].upper()
global yourThread
# Create your thread
yourThread = threading.Timer(POOL_TIME, doStuff, ())
yourThread.start()
@app.route("/", methods=['GET', 'POST'])
def receive_message():
# print("Message recu")
if request.method == 'GET':
"""Before allowing people to message your bot, Facebook has implemented a verify token
that confirms all requests that your bot receives came from Facebook."""
token_sent = request.args.get("hub.verify_token")
return verify_fb_token(token_sent)
# if the request was not get, it must be POST and we can just proceed with sending a message back to user
else:
# get whatever message a user sent the bot
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for message in messaging:
if message.get('message'):
# Facebook Messenger ID for user so we know where to send response back to
recipient_id = message['sender']['id']
print("Nouveau message de : " + bot.get_user_info(message['sender']['id']) + " :")
if message['message'].get('text'):
print(message['message'].get('text'))
response_sent_text = get_message(message)
send_message(recipient_id, response_sent_text)
# if user sends us a GIF, photo,video, or any other non-text item
if message['message'].get('attachments'):
response_sent_nontext = "Merci"
send_message(recipient_id, response_sent_nontext)
return "Message Processed"
def verify_fb_token(token_sent):
# take token sent by facebook and verify it matches the verify token you sent
# if they match, allow the request, else return an error
if token_sent == VERIFY_TOKEN:
return request.args.get("hub.challenge")
return 'Invalid verification token'
# chooses the message to send to the user
def get_message(message):
if not next((item for item in studenttocheck if item["idmes"] == message['sender']['id']),
None): # If the person is not in the list
# We look for who is the person:
first_name = bot.get_user_info(message['sender']['id'])["first_name"]
last_name = bot.get_user_info(message['sender']['id'])["last_name"]
name = last_name + " " + first_name
if name.upper() in listname:
promethee_number = listenumero[listname.index(name.upper())]
studenttocheck.append({"numero": promethee_number, "idmes": message['sender']['id'], "nom": last_name,
"prenom": first_name})
with open(nomfichierabo, 'wb') as file:
pickler = pickle.Pickler(file)
pickler.dump(studenttocheck)
file.close()
response = "Je t'ai ajouté à la liste mon petit " + first_name
else:
response = "Je ne sais pas qui t'es, j'ai demandé à baptiste de la faire manuellement"
bot.send_text_message('2621783057950447',
"Aledfrr j'arrive pas a ajouter " + name + " " + message['sender']['id'])
else:
action, response = dialogquerry(message['message'].get('text'))
if action == "Menu":
sample_responses = [
"Send 'note' to get your grade (you must be in the database) or send 'Quit' to get removed from the database."]
elif action == "Note":
notes = noteseleve(
next((item for item in studenttocheck if item["idmes"] == message['sender']['id']), None)["numero"])
answer = "Notes de " + str(bot.get_user_info(message['sender']['id'])["first_name"]) + ":\n"
for i in notes.keys():
answer = answer + i + " : " + str(notes[i]) + "\n"
response = response + " :\n" + answer
notes.clear()
# despicable :
# nomfichier = "notes/" + str(4379)
#
# if is_non_zero_file(nomfichier) == 0:
# nouveaufichier = 1
# else:
# # Ouverture des fichier contenant les notes
# with open(nomfichier, 'rb') as fichier:
# mon_depickler = pickle.Unpickler(fichier)
# notesfichier = mon_depickler.load()
# fichier.close()
#
# sample_responses = [str(notesfichier)]
elif action == "QUIT":
sample_responses = ["Je suis triste, à bientot"]
# Remove the person from the list if exists
if next((item for item in studenttocheck if item["idmes"] == message['sender']['id']),None) in studenttocheck:
studenttocheck.remove(next((item for item in studenttocheck if item["idmes"] == message['sender']['id']), None))
with open(nomfichierabo, 'wb') as file:
pickler = pickle.Pickler(file)
pickler.dump(studenttocheck)
file.close()
else:
bot.send_text_message('2621783057950447', "Aledfrr j'arrive pas a supprimer " + bot.get_user_info(message['sender']['id'])["first_name"] + " " + message['sender']['id'])
elif action == "Default Fallback Intent":
if message['sender']['id'] == "3190229520988487":
response = random.choice(["Ferme la grosse merde", "Je te déteste Arthur", "Get cancer and die plz", "Puterelle",
"Tu vois, ça, c’est la raison pour laquelle les gens parlent mal de toi quand t’es pas là",
"Je trouve ça absolument génial, cette manière bien à toi que tu as de dire des trucs absolument évidents avec la sincère conviction que tu as découvert quelque chose",
"C’est bon, tu as terminé ?", "Tu n’es vraiment pas assez beau pour pouvoir te permettre d’être aussi bête",
"Est-ce que tu te rends compte que les gens ne font que te tolérer ?"])
else:
sample_responses = ["Je suis un bot", "C'est moi qui envoie les messages", "Bonjour a toi"]
# print(bot.get_user_info(message['sender']['id']))
# return selected item to the user
# return random.choice(sample_responses)
return response
# uses PyMessenger to send response to user
def send_message(recipient_id, reply):
# sends user the text message provided via input response parameter
bot.send_text_message(recipient_id, reply)
print("Envoi du message: " + reply)
return "success"
# Initiate
doStuffStart()
# When you kill Flask (SIGTERM), clear the trigger for the next thread
atexit.register(interrupt)
return app
app = create_app()
app.run()
| import threading
import atexit, random
from flask import Flask, request
from bot import Bot
from promethee import *
from dialog import *
from datetime import datetime
POOL_TIME = 3600 # Seconds
# variables that are accessible from anywhere
nomfichierabo = "listeabo"
studenttocheck = []
listname = []
listenumero = []
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
yourThread = threading.Thread()
def create_app():
app = Flask(__name__)
ACCESS_TOKEN = '<PASSWORD>'
VERIFY_TOKEN = '<PASSWORD>'
bot = Bot(ACCESS_TOKEN)
def interrupt():
global yourThread
yourThread.cancel()
def doStuff():
global yourThread
global studenttocheck
with dataLock:
# Do your stuff with commonDataStruct Here
print(studenttocheck)
for student in studenttocheck:
print(str(datetime.now())[11:-7] + " Scan de " + student["prenom"])
result = checknewnote(student["numero"])
if result == 0:
print("Pas de nouvelle note")
elif result == -1:
print("Erreur lors du check de " + student["prenom"])
else:
for i in result:
send_message(student["idmes"], "Nouvelle note en " + i + " : " + result[i])
print("Nouvelle note en " + i + " : " + result[i])
# Set the next thread to happen
yourThread = threading.Timer(POOL_TIME, doStuff, ())
yourThread.start()
def doStuffStart():
# Do initialisation stuff here
global listname
global listenumero
global studenttocheck
login()
falselisteeleves()
listname = listenom1a + listenom2a
listenumero = listenumero1a + listenumero2a
# Ouverture de la liste des personnes
if is_non_zero_file(nomfichierabo) == 0:
with open(nomfichierabo, 'wb') as file:
pickler = pickle.Pickler(file)
pickler.dump(studenttocheck)
file.close()
print("Nouveau fichier creer")
else:
# Ouverture des fichier contenant les personnes
with open(nomfichierabo, 'rb') as file:
depickler = pickle.Unpickler(file)
studenttocheck = depickler.load()
file.close()
for i in range(len(listname)):
listname[i] = listname[i].upper()
global yourThread
# Create your thread
yourThread = threading.Timer(POOL_TIME, doStuff, ())
yourThread.start()
@app.route("/", methods=['GET', 'POST'])
def receive_message():
# print("Message recu")
if request.method == 'GET':
"""Before allowing people to message your bot, Facebook has implemented a verify token
that confirms all requests that your bot receives came from Facebook."""
token_sent = request.args.get("hub.verify_token")
return verify_fb_token(token_sent)
# if the request was not get, it must be POST and we can just proceed with sending a message back to user
else:
# get whatever message a user sent the bot
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for message in messaging:
if message.get('message'):
# Facebook Messenger ID for user so we know where to send response back to
recipient_id = message['sender']['id']
print("Nouveau message de : " + bot.get_user_info(message['sender']['id']) + " :")
if message['message'].get('text'):
print(message['message'].get('text'))
response_sent_text = get_message(message)
send_message(recipient_id, response_sent_text)
# if user sends us a GIF, photo,video, or any other non-text item
if message['message'].get('attachments'):
response_sent_nontext = "Merci"
send_message(recipient_id, response_sent_nontext)
return "Message Processed"
def verify_fb_token(token_sent):
# take token sent by facebook and verify it matches the verify token you sent
# if they match, allow the request, else return an error
if token_sent == VERIFY_TOKEN:
return request.args.get("hub.challenge")
return 'Invalid verification token'
# chooses the message to send to the user
def get_message(message):
if not next((item for item in studenttocheck if item["idmes"] == message['sender']['id']),
None): # If the person is not in the list
# We look for who is the person:
first_name = bot.get_user_info(message['sender']['id'])["first_name"]
last_name = bot.get_user_info(message['sender']['id'])["last_name"]
name = last_name + " " + first_name
if name.upper() in listname:
promethee_number = listenumero[listname.index(name.upper())]
studenttocheck.append({"numero": promethee_number, "idmes": message['sender']['id'], "nom": last_name,
"prenom": first_name})
with open(nomfichierabo, 'wb') as file:
pickler = pickle.Pickler(file)
pickler.dump(studenttocheck)
file.close()
response = "Je t'ai ajouté à la liste mon petit " + first_name
else:
response = "Je ne sais pas qui t'es, j'ai demandé à baptiste de la faire manuellement"
bot.send_text_message('2621783057950447',
"Aledfrr j'arrive pas a ajouter " + name + " " + message['sender']['id'])
else:
action, response = dialogquerry(message['message'].get('text'))
if action == "Menu":
sample_responses = [
"Send 'note' to get your grade (you must be in the database) or send 'Quit' to get removed from the database."]
elif action == "Note":
notes = noteseleve(
next((item for item in studenttocheck if item["idmes"] == message['sender']['id']), None)["numero"])
answer = "Notes de " + str(bot.get_user_info(message['sender']['id'])["first_name"]) + ":\n"
for i in notes.keys():
answer = answer + i + " : " + str(notes[i]) + "\n"
response = response + " :\n" + answer
notes.clear()
# despicable :
# nomfichier = "notes/" + str(4379)
#
# if is_non_zero_file(nomfichier) == 0:
# nouveaufichier = 1
# else:
# # Ouverture des fichier contenant les notes
# with open(nomfichier, 'rb') as fichier:
# mon_depickler = pickle.Unpickler(fichier)
# notesfichier = mon_depickler.load()
# fichier.close()
#
# sample_responses = [str(notesfichier)]
elif action == "QUIT":
sample_responses = ["Je suis triste, à bientot"]
# Remove the person from the list if exists
if next((item for item in studenttocheck if item["idmes"] == message['sender']['id']),None) in studenttocheck:
studenttocheck.remove(next((item for item in studenttocheck if item["idmes"] == message['sender']['id']), None))
with open(nomfichierabo, 'wb') as file:
pickler = pickle.Pickler(file)
pickler.dump(studenttocheck)
file.close()
else:
bot.send_text_message('2621783057950447', "Aledfrr j'arrive pas a supprimer " + bot.get_user_info(message['sender']['id'])["first_name"] + " " + message['sender']['id'])
elif action == "Default Fallback Intent":
if message['sender']['id'] == "3190229520988487":
response = random.choice(["Ferme la grosse merde", "Je te déteste Arthur", "Get cancer and die plz", "Puterelle",
"Tu vois, ça, c’est la raison pour laquelle les gens parlent mal de toi quand t’es pas là",
"Je trouve ça absolument génial, cette manière bien à toi que tu as de dire des trucs absolument évidents avec la sincère conviction que tu as découvert quelque chose",
"C’est bon, tu as terminé ?", "Tu n’es vraiment pas assez beau pour pouvoir te permettre d’être aussi bête",
"Est-ce que tu te rends compte que les gens ne font que te tolérer ?"])
else:
sample_responses = ["Je suis un bot", "C'est moi qui envoie les messages", "Bonjour a toi"]
# print(bot.get_user_info(message['sender']['id']))
# return selected item to the user
# return random.choice(sample_responses)
return response
# uses PyMessenger to send response to user
def send_message(recipient_id, reply):
# sends user the text message provided via input response parameter
bot.send_text_message(recipient_id, reply)
print("Envoi du message: " + reply)
return "success"
# Initiate
doStuffStart()
# When you kill Flask (SIGTERM), clear the trigger for the next thread
atexit.register(interrupt)
return app
app = create_app()
app.run()
| en | 0.621107 | # Seconds # variables that are accessible from anywhere # lock to control access to variable # thread handler # Do your stuff with commonDataStruct Here # Set the next thread to happen # Do initialisation stuff here # Ouverture de la liste des personnes # Ouverture des fichier contenant les personnes # Create your thread # print("Message recu") Before allowing people to message your bot, Facebook has implemented a verify token
that confirms all requests that your bot receives came from Facebook. # if the request was not get, it must be POST and we can just proceed with sending a message back to user # get whatever message a user sent the bot # Facebook Messenger ID for user so we know where to send response back to # if user sends us a GIF, photo,video, or any other non-text item # take token sent by facebook and verify it matches the verify token you sent # if they match, allow the request, else return an error # chooses the message to send to the user # If the person is not in the list # We look for who is the person: # despicable : # nomfichier = "notes/" + str(4379) # # if is_non_zero_file(nomfichier) == 0: # nouveaufichier = 1 # else: # # Ouverture des fichier contenant les notes # with open(nomfichier, 'rb') as fichier: # mon_depickler = pickle.Unpickler(fichier) # notesfichier = mon_depickler.load() # fichier.close() # # sample_responses = [str(notesfichier)] # Remove the person from the list if exists # print(bot.get_user_info(message['sender']['id'])) # return selected item to the user # return random.choice(sample_responses) # uses PyMessenger to send response to user # sends user the text message provided via input response parameter # Initiate # When you kill Flask (SIGTERM), clear the trigger for the next thread | 2.6642 | 3 |
RestApi.py | onyxcherry/IntegracjaPayUAPI | 0 | 6613382 | <reponame>onyxcherry/IntegracjaPayUAPI<gh_stars>0
#!/usr/bin/env python
import json
import os
import sys
import requests
real_dir = os.path.dirname(os.path.abspath(__file__))
with open(real_dir + '/secrets.json', 'r') as secrets:
json_data = json.load(secrets)
client_id = json_data['oauth_creds']['posId']
client_secret = json_data['oauth_creds']['client_secret']
with open(real_dir + '/config.json', 'r') as config:
json_data = json.load(config)
order_url = json_data['urls']['order_url']
authorize_url = json_data['urls']['authorize_url']
notify_url = json_data['urls']['notify_url']
class RestApi:
def __init__(self):
pass
def authorize(self):
authorize_data = {
'grant_type': 'client_credentials',
'client_id': f'{client_id}',
'client_secret': f'{client_secret}'
}
authorize_response = requests.post(authorize_url, data=authorize_data)
json_response = json.loads(authorize_response.text)
if authorize_response.status_code == 200:
if json_response['access_token']:
access_token = json_response['access_token']
else:
sys.exit('No access token in OAuth response.')
else:
sys.exit('Status code different than 200.')
return access_token
def send_request(self, access_token, pycoins_unitprice,
pycoins_quantity, insurance_unitprice, total_amount):
order_headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {access_token}',
}
data = {
'notifyUrl': f'{notify_url}',
'customerIp': '127.0.0.1',
'merchantPosId': f'{client_id}',
'description': 'PyCoins',
'currencyCode': 'PLN',
'totalAmount': f'{total_amount}',
'buyer': {
'email': '<EMAIL>',
'phone': '654111654',
'firstName': 'John',
'lastName': 'Doe',
'language': 'pl'
},
'settings': {
'invoiceDisabled': 'true'
},
'products': [
{
'name': 'PyCoins',
'unitPrice': f'{pycoins_unitprice}',
'quantity': f'{pycoins_quantity}'
},
{
'name': 'Insurance',
'unitPrice': f'{insurance_unitprice}',
'quantity': '1'
}
]
}
response = requests.post(order_url, headers=order_headers,
data=json.dumps(data), allow_redirects=False)
# Note that PayU REST API sends json data with statusCode,
# redirectUri and orderId (with HTTP 302 Found status code)
# and then redirect to redirectUri, so firstly we get 302
if response.status_code == 302:
response_to_json = json.loads(response.text)
json_statusCode = response_to_json['status']['statusCode']
if json_statusCode == 'SUCCESS':
redirect_link = response_to_json['redirectUri']
order_id = response_to_json['orderId']
return redirect_link, order_id
else:
sys.exit('Status code different than SUCCESS.')
else:
sys.exit('Status code different 302.')
def get_order_status(self, access_token, order_id):
headers = {'Authorization': f'Bearer {access_token}'}
url = order_url + '/' + order_id
response = requests.get(url, headers=headers)
json_data = json.loads(response.text)
status_code = json_data['status']['statusCode']
order_status = json_data['orders'][0]['status']
if status_code == 'SUCCESS' and order_status == 'COMPLETED':
return True
return False
| #!/usr/bin/env python
import json
import os
import sys
import requests
real_dir = os.path.dirname(os.path.abspath(__file__))
with open(real_dir + '/secrets.json', 'r') as secrets:
json_data = json.load(secrets)
client_id = json_data['oauth_creds']['posId']
client_secret = json_data['oauth_creds']['client_secret']
with open(real_dir + '/config.json', 'r') as config:
json_data = json.load(config)
order_url = json_data['urls']['order_url']
authorize_url = json_data['urls']['authorize_url']
notify_url = json_data['urls']['notify_url']
class RestApi:
def __init__(self):
pass
def authorize(self):
authorize_data = {
'grant_type': 'client_credentials',
'client_id': f'{client_id}',
'client_secret': f'{client_secret}'
}
authorize_response = requests.post(authorize_url, data=authorize_data)
json_response = json.loads(authorize_response.text)
if authorize_response.status_code == 200:
if json_response['access_token']:
access_token = json_response['access_token']
else:
sys.exit('No access token in OAuth response.')
else:
sys.exit('Status code different than 200.')
return access_token
def send_request(self, access_token, pycoins_unitprice,
pycoins_quantity, insurance_unitprice, total_amount):
order_headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {access_token}',
}
data = {
'notifyUrl': f'{notify_url}',
'customerIp': '127.0.0.1',
'merchantPosId': f'{client_id}',
'description': 'PyCoins',
'currencyCode': 'PLN',
'totalAmount': f'{total_amount}',
'buyer': {
'email': '<EMAIL>',
'phone': '654111654',
'firstName': 'John',
'lastName': 'Doe',
'language': 'pl'
},
'settings': {
'invoiceDisabled': 'true'
},
'products': [
{
'name': 'PyCoins',
'unitPrice': f'{pycoins_unitprice}',
'quantity': f'{pycoins_quantity}'
},
{
'name': 'Insurance',
'unitPrice': f'{insurance_unitprice}',
'quantity': '1'
}
]
}
response = requests.post(order_url, headers=order_headers,
data=json.dumps(data), allow_redirects=False)
# Note that PayU REST API sends json data with statusCode,
# redirectUri and orderId (with HTTP 302 Found status code)
# and then redirect to redirectUri, so firstly we get 302
if response.status_code == 302:
response_to_json = json.loads(response.text)
json_statusCode = response_to_json['status']['statusCode']
if json_statusCode == 'SUCCESS':
redirect_link = response_to_json['redirectUri']
order_id = response_to_json['orderId']
return redirect_link, order_id
else:
sys.exit('Status code different than SUCCESS.')
else:
sys.exit('Status code different 302.')
def get_order_status(self, access_token, order_id):
headers = {'Authorization': f'Bearer {access_token}'}
url = order_url + '/' + order_id
response = requests.get(url, headers=headers)
json_data = json.loads(response.text)
status_code = json_data['status']['statusCode']
order_status = json_data['orders'][0]['status']
if status_code == 'SUCCESS' and order_status == 'COMPLETED':
return True
return False | en | 0.676961 | #!/usr/bin/env python # Note that PayU REST API sends json data with statusCode, # redirectUri and orderId (with HTTP 302 Found status code) # and then redirect to redirectUri, so firstly we get 302 | 2.4243 | 2 |
mcstasscript/tests/test_Configurator.py | PaNOSC-ViNYL/McStasScript | 3 | 6613383 | import os
import unittest
from mcstasscript.interface.functions import Configurator
def setup_expected_file(test_name):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
expected_file = os.path.join(THIS_DIR, "..", test_name + ".yaml")
if os.path.isfile(expected_file):
os.remove(expected_file)
return expected_file
def setup_configurator(test_name):
setup_expected_file(test_name)
return Configurator(test_name)
class TestConfigurator(unittest.TestCase):
"""
Tests for configurator class that handles yaml configuration file
"""
def test_simple_initialize(self):
"""
Tests that initialization happens, new configuration file should be
written.
"""
test_name = "test_configuration"
expected_file = setup_expected_file(test_name)
# check the file did not exist before testing
self.assertFalse(os.path.isfile(expected_file))
# initialize the configurator
Configurator(test_name)
# check a new configuration file was made
self.assertTrue(os.path.isfile(expected_file))
# remove the testing configuration file
if os.path.isfile(expected_file):
os.remove(expected_file)
def test_default_config(self):
"""
This tests confirms the content of the default configuration file
"""
test_name = "test_configuration"
expected_file = setup_expected_file(test_name)
# check the file did not exist before testing
self.assertFalse(os.path.isfile(expected_file))
my_configurator = Configurator(test_name)
default_config = my_configurator._read_yaml()
run = "/Applications/McStas-2.5.app/Contents/Resources/mcstas/2.5/bin/"
mcstas = "/Applications/McStas-2.5.app/Contents/Resources/mcstas/2.5/"
mxrun = "/Applications/McXtrace-1.5.app" \
+ "/Contents/Resources/mcxtrace/1.5/mxrun"
mcxtrace = "/Applications/McXtrace-1.5.app" \
+ "/Contents/Resources/mcxtrace/1.5/"
self.assertEqual(default_config["paths"]["mcrun_path"], run)
self.assertEqual(default_config["paths"]["mcstas_path"], mcstas)
self.assertEqual(default_config["paths"]["mxrun_path"], mxrun)
self.assertEqual(default_config["paths"]["mcxtrace_path"], mcxtrace)
self.assertEqual(default_config["other"]["characters_per_line"], 85)
# remove the testing configuration file
if os.path.isfile(expected_file):
os.remove(expected_file)
def test_yaml_write(self):
"""
This test checks that writing to the configuration file works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
config = my_configurator._read_yaml()
config["new_field"] = 123
config["paths"]["new_path"] = "/test/path/"
my_configurator._write_yaml(config)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["other"]["characters_per_line"], 85)
self.assertEqual(new_config["new_field"], 123)
self.assertEqual(new_config["paths"]["new_path"], "/test/path/")
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mcrun_path(self):
"""
This test checks that setting the mcrun path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mcrun_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mcrun_path"], dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mcstas_path(self):
"""
This test checks that setting the mcstas path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mcstas_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mcstas_path"],
dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mxrun_path(self):
"""
This test checks that setting the mxrun path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mxrun_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mxrun_path"], dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mcxtrace_path(self):
"""
This test checks that setting the mcxtrace path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mcxtrace_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mcxtrace_path"],
dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_line_length(self):
"""
This test checks that setting the line length works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
my_configurator.set_line_length(123)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["other"]["characters_per_line"], 123)
# remove the testing configuration file
setup_expected_file(test_name)
if __name__ == '__main__':
unittest.main()
| import os
import unittest
from mcstasscript.interface.functions import Configurator
def setup_expected_file(test_name):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
expected_file = os.path.join(THIS_DIR, "..", test_name + ".yaml")
if os.path.isfile(expected_file):
os.remove(expected_file)
return expected_file
def setup_configurator(test_name):
setup_expected_file(test_name)
return Configurator(test_name)
class TestConfigurator(unittest.TestCase):
"""
Tests for configurator class that handles yaml configuration file
"""
def test_simple_initialize(self):
"""
Tests that initialization happens, new configuration file should be
written.
"""
test_name = "test_configuration"
expected_file = setup_expected_file(test_name)
# check the file did not exist before testing
self.assertFalse(os.path.isfile(expected_file))
# initialize the configurator
Configurator(test_name)
# check a new configuration file was made
self.assertTrue(os.path.isfile(expected_file))
# remove the testing configuration file
if os.path.isfile(expected_file):
os.remove(expected_file)
def test_default_config(self):
"""
This tests confirms the content of the default configuration file
"""
test_name = "test_configuration"
expected_file = setup_expected_file(test_name)
# check the file did not exist before testing
self.assertFalse(os.path.isfile(expected_file))
my_configurator = Configurator(test_name)
default_config = my_configurator._read_yaml()
run = "/Applications/McStas-2.5.app/Contents/Resources/mcstas/2.5/bin/"
mcstas = "/Applications/McStas-2.5.app/Contents/Resources/mcstas/2.5/"
mxrun = "/Applications/McXtrace-1.5.app" \
+ "/Contents/Resources/mcxtrace/1.5/mxrun"
mcxtrace = "/Applications/McXtrace-1.5.app" \
+ "/Contents/Resources/mcxtrace/1.5/"
self.assertEqual(default_config["paths"]["mcrun_path"], run)
self.assertEqual(default_config["paths"]["mcstas_path"], mcstas)
self.assertEqual(default_config["paths"]["mxrun_path"], mxrun)
self.assertEqual(default_config["paths"]["mcxtrace_path"], mcxtrace)
self.assertEqual(default_config["other"]["characters_per_line"], 85)
# remove the testing configuration file
if os.path.isfile(expected_file):
os.remove(expected_file)
def test_yaml_write(self):
"""
This test checks that writing to the configuration file works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
config = my_configurator._read_yaml()
config["new_field"] = 123
config["paths"]["new_path"] = "/test/path/"
my_configurator._write_yaml(config)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["other"]["characters_per_line"], 85)
self.assertEqual(new_config["new_field"], 123)
self.assertEqual(new_config["paths"]["new_path"], "/test/path/")
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mcrun_path(self):
"""
This test checks that setting the mcrun path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mcrun_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mcrun_path"], dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mcstas_path(self):
"""
This test checks that setting the mcstas path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mcstas_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mcstas_path"],
dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mxrun_path(self):
"""
This test checks that setting the mxrun path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mxrun_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mxrun_path"], dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_mcxtrace_path(self):
"""
This test checks that setting the mcxtrace path works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
dummy_mcstas_path = os.path.join(THIS_DIR, "dummy_mcstas")
my_configurator.set_mcxtrace_path(dummy_mcstas_path)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["paths"]["mcxtrace_path"],
dummy_mcstas_path)
# remove the testing configuration file
setup_expected_file(test_name)
def test_set_line_length(self):
"""
This test checks that setting the line length works
"""
test_name = "test_configuration"
my_configurator = setup_configurator(test_name)
my_configurator.set_line_length(123)
new_config = my_configurator._read_yaml()
self.assertEqual(new_config["other"]["characters_per_line"], 123)
# remove the testing configuration file
setup_expected_file(test_name)
if __name__ == '__main__':
unittest.main()
| en | 0.796466 | Tests for configurator class that handles yaml configuration file Tests that initialization happens, new configuration file should be written. # check the file did not exist before testing # initialize the configurator # check a new configuration file was made # remove the testing configuration file This tests confirms the content of the default configuration file # check the file did not exist before testing # remove the testing configuration file This test checks that writing to the configuration file works # remove the testing configuration file This test checks that setting the mcrun path works # remove the testing configuration file This test checks that setting the mcstas path works # remove the testing configuration file This test checks that setting the mxrun path works # remove the testing configuration file This test checks that setting the mcxtrace path works # remove the testing configuration file This test checks that setting the line length works # remove the testing configuration file | 3.064822 | 3 |
Configuration/Eras/python/Era_Run2_25ns_peripheralPbPb_cff.py | ckamtsikis/cmssw | 852 | 6613384 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_25ns_cff import Run2_25ns
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
Run2_25ns_peripheralPbPb = cms.ModifierChain(Run2_25ns, peripheralPbPb)
| import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_25ns_cff import Run2_25ns
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
Run2_25ns_peripheralPbPb = cms.ModifierChain(Run2_25ns, peripheralPbPb)
| none | 1 | 1.118177 | 1 | |
src/leak_finder.py | Letractively/leak-finder-for-javascript | 0 | 6613385 | <gh_stars>0
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."
"""Tool for finding possibly leaking JavaScript objects based on heap snapshots.
How does it work?
Since JavaScript is a garbage collected language, all objects have one or
several retaining paths which keep the object alive.
This tool finds objects which are only retained (kept alive) by a specified set
of data structures ("bad stop nodes"). Typically those are data structures of a
JavaScript library (e.g., Closure). If an object is only kept alive by such data
structures, the user code doesn't have a pointer to the object, and it can be
considered a "leak". (E.g., the user code forgot to call a function which was
supposed to remove the object from the data structures.)
Example (Closure):
goog.Disposable implements a monitoring mode which gathers all created but not
yet disposed instances of goog.Disposable (and its subclasses) into an array
goog.Disposable.instances_. This array will keep the objects alive. However, if
an object is only kept alive by this array, it is likely a leak, since the user
code doesn't contain any pointers to the object, and the user cannot call
dispose() on it.
Closure contains other data structures (especially for storing event handlers
under goog.events) which keep objects alive. If all retaining paths of an object
go through goog.Disposable.instances_ or goog.events, the object is likely a
leak.
However, if we find a retaining path that goes through a Window object without
going through these bad stop nodes, the object is not a leak.
"""
import simplejson
import stacktrace
class Error(Exception):
pass
class Node(object):
"""Data structure for representing a node in the heap snapshot.
Attributes:
node_id: int, identifier for the node.
type_string: str, describes the type of the node.
class_name: str, describes the class of the JavaScript object
represented by this Node.
edges_to: [Edge], edges whose end point this Node is.
edges_from: [Edge], edges whose start point this Node is.
string: str, for string Nodes, contains the string the Node represents.
Empty string for non-string nodes.
js_name: str, how to refer to this node in JavaScript.
"""
def __init__(self, node_id, type_string, class_name):
"""Initializes the Node object.
Args:
node_id: int, identifier for the Node.
type_string: str, the type of the node.
class_name: str, the class of the JavaScript object this Node represents.
"""
self.node_id = node_id
self.type_string = type_string
self.class_name = class_name
self.edges_to = []
self.edges_from = []
self.string = ''
self.js_name = ''
def AddEdgeTo(self, edge):
"""Associates an Edge with the Node (the end point).
Args:
edge: Edge, an edge whose end point this Node is.
"""
self.edges_to.append(edge)
def AddEdgeFrom(self, edge):
"""Associates an Edge with the Node (the start point).
Args:
edge: Edge, an edge whose start point this Node is.
"""
self.edges_from.append(edge)
def __str__(self):
prefix = 'Node(' + str(self.node_id) + ' '
if self.type_string == 'object':
return prefix + self.class_name + ')'
return prefix + self.type_string + ')'
def ToJavaScript(self):
return self.js_name or str(self)
class Edge(object):
"""Data structure for representing an edge in the heap snapshot.
Attributes:
from_node_id: int, id of the node which is the start point of this Edge.
Used when the corresponding Node object is not yet contstructed.
to_node_id: int, id of the node which is the end point of this Edge. Used
when the corresponding Node object is not yet contstructed.
from_node: Node, the start point of this Edge.
to_node: Node, the end point of this Edge.
type_string: str, the type of the Edge.
name_string: str, the JavaScript attribute name this Edge represents.
"""
def __init__(self, from_node_id, to_node_id, type_string, name_string):
"""Initializes the Edge object.
Args:
from_node_id: int, id of the node which is the start point of this
Edge. Used when the corresponding Node object is not yet contstructed.
to_node_id: int, id of the node which is the end point of this
Edge. Used when the corresponding Node object is not yet contstructed.
type_string: str, the type of the Edge.
name_string: str, the JavaScript attribute name this Edge represents.
"""
self.from_node_id = from_node_id
self.to_node_id = to_node_id
self.from_node = {}
self.to_node = {}
self.type_string = type_string
self.name_string = name_string
def SetFromNode(self, node):
self.from_node = node
return self
def SetToNode(self, node):
self.to_node = node
return self
def __str__(self):
return 'Edge(' + self.type_string + ' ' + self.name_string + ')'
def ToJavaScript(self):
if self.type_string == 'property':
return '.' + self.name_string
if self.type_string == 'element':
return '[' + self.name_string + ']'
return str(self)
class LeakNode(object):
"""Data structure for representing a potentially leaked heap object.
Attributes:
node: Node, represents the leaked JavaScript object.
description: str, human-readable desription of the leak.
how_to_find_node: str, JavaScript expression which evaluates to the
leaked JavaScript object.
stack: Stack, the creation stack trace of the JavaScript object, or None if
the stack trace cannot be retrieved or has not yet been retrieved.
"""
def __init__(self, node, description, how_to_find_node, stacktrace_suffix):
"""Initializes the LeakNode object.
Args:
node: Node, represents the leaked JavaScript object.
description: str, human-readable desription of the leak.
how_to_find_node: str, JavaScript expression which evaluates to the
leaked JavaScript object.
stacktrace_suffix: str, appended to the leaked objects for referring the
member variable where the stack trace is stored. E.g., ".stack".
"""
self.node = node
self.description = description
self.how_to_find_node = how_to_find_node
self._stacktrace_suffix = stacktrace_suffix
self.stack = None
def RetrieveStackTrace(self, inspector_client=None):
"""Retrieves the creation stack trace and stores it into this LeakNode.
Args:
inspector_client: RemoteInspectorClient, client to use for retrieving the
full stack trace. If None, we will retrieve a possibly shortened value
from the snapshot.
"""
stack = None
if not self._stacktrace_suffix:
# No stack trace information.
self.stack = stacktrace.Stack('')
return
if inspector_client:
# The heap snapshot contains only the first 1000 characters of each
# string. As we store the creation stack trace in objects as strings, we
# will need to evaluate this string using the remote inspector client to
# get the full stack trace.
stack = inspector_client.EvaluateJavaScript(
self.how_to_find_node + self._stacktrace_suffix)
else:
# See if the object contains a stack trace.
for edge in self.node.edges_from:
if edge.name_string == self._stacktrace_suffix:
stack = edge.to_node.string
break
if stack:
self.stack = stacktrace.Stack(stack)
def __str__(self):
stack = ''
if self.stack:
stack = 'Stack:\n %s' % '\n '.join(self.stack.frames)
return '%s\nClass: %s\nObject: %s\n%s' % (
self.description, self.node.class_name, self.how_to_find_node, stack)
class Snapshotter(object):
"""Reads a heap snapshot from a chromium process and parses it.
The heap snapshot JSON format is defined by HeapSnapshotJSONSerializer in v8.
Attributes:
_node_dict: {int -> Node}, maps integer ids to Node objects.
_node_list: [int], the raw node data of the heap snapshot.
_edge_list: [int], the raw edge data of the heap snapshot.
_node_types: [str], the possible node types in the heap snapshot.
_edge_types: [str], the possible edge types in the heap snapshot.
_node_fields: [str], the fields present in the heap snapshot for each node.
_edge_fields: [str], the fields present in the heap snapshot for each node.
_node_type_ix: int, index of the node type field.
_node_name_ix: int, index of the node name field.
_node_id_ix: int, index of the node id field.
_node_edges_start_ix: int, index of the "edge start index for a node" field.
_node_edge_count_ix: int, index of the node edge count field.
_node_edge_count_format: bool, defines if the snapshot uses edges_start or
edge_count.
_node_field_count: int, number of node fields.
_edge_type_ix: int, index of the edge type field.
_edge_name_or_ix_ix: int, index of the edge name field.
_edge_to_node_ix: int, index of the "to node for an edge" field.
_edge_field_count: int, number of edge fields.
"""
def __init__(self):
self._node_dict = {}
def GetSnapshot(self, inspector_client):
"""Reads a heap snapshot from a chromium process and returns the data.
Args:
inspector_client: RemoteInspectorClient, the client to used for taking the
heap snapshot.
Returns:
set(Node), the Node objects in the snapshot or None if the snapshot
couldn't be read.
Raises:
KeyError: The snapshot doesn't contain the required data fields.
ValueError: The snaphost cannot be parsed.
Error: The snapshot format cannot be parsed (e.g., too new version).
"""
self._ReadSnapshot(inspector_client)
self._ParseSnapshot()
return self._node_dict.values()
def _FindField(self, field_name, fields_array):
"""Finds field indices based on the snapshot meta information.
Args:
field_name: str, the field to find in fields_array.
fields_array: [str], array of available fields.
Returns:
int, the first index of field_name in fields_array.
Raises:
Error: field_name doesn't occur in fields_array.
"""
if field_name not in fields_array:
raise Error('Cannot find field %s from the snapshot' % field_name)
return fields_array.index(field_name)
def _ReadSnapshot(self, inspector_client):
"""Reads a heap snapshot from a chromium process and stores the data.
The snapshot contains a list of integers describing nodes (types, names,
etc.) and a list of integers describing edges (types, the node the edge
points to, etc.) and a string table. All strings are expressed as indices to
the string table.
In addition, the snapshot contains meta information describing the data
fields for nodes and the data fields for edges.
Args:
inspector_client: RemoteInspectorClient, the client to used for taking the
heap snapshot.
Raises:
KeyError: The snapshot doesn't contain the required data fields.
ValueError: The snaphost cannot be parsed.
Error: The snapshot format is not supported (e.g., too new version).
"""
raw_data = inspector_client.HeapSnapshot(include_summary=False)['raw_data']
heap = simplejson.loads(raw_data)
self._node_list = heap['nodes']
self._edge_list = heap['edges']
self._strings = heap['strings']
self._node_types = heap['snapshot']['meta']['node_types'][0]
self._edge_types = heap['snapshot']['meta']['edge_types'][0]
node_fields = heap['snapshot']['meta']['node_fields']
edge_fields = heap['snapshot']['meta']['edge_fields']
# Find the indices of the required node and edge fields.
self._node_type_ix = self._FindField('type', node_fields)
self._node_name_ix = self._FindField('name', node_fields)
self._node_id_ix = self._FindField('id', node_fields)
# Support 2 different snapshot formats:
# - Define where edges for a given node start in the edge array as
# edges_index.
# - Define how many edges a given node has as edge_count.
if 'edges_index' in node_fields:
self._node_edges_start_ix = node_fields.index('edges_index')
self._node_edge_count_format = False
else:
self._node_edge_count_ix = self._FindField('edge_count', node_fields)
self._node_edge_count_format = True
self._node_field_count = len(node_fields)
self._edge_type_ix = self._FindField('type', edge_fields)
self._edge_name_or_ix_ix = self._FindField('name_or_index',
edge_fields)
self._edge_to_node_ix = self._FindField('to_node', edge_fields)
self._edge_field_count = len(edge_fields)
def _ConstructorName(self, type_string, node_name_ix):
"""Returns the constructor name for a node.
Args:
type_string: str, type of the node.
node_name_ix: int, index of the strings array element which contains the
name of the node, if the type of the node is 'object'. Otherwise, an
arbitrary value.
Returns:
str, the constructor name for the node.
"""
if type_string == 'object':
return self._strings[int(node_name_ix)]
return '(%s)' % type_string
@staticmethod
def _IsNodeTypeUninteresting(type_string):
"""Helper function for filtering out nodes from the heap snapshot.
Args:
type_string: str, type of the node.
Returns:
bool, True if the node is of an uninteresting type and shouldn't be
included in the heap snapshot analysis.
"""
uninteresting_types = ('hidden', 'code', 'number', 'native', 'synthetic')
return type_string in uninteresting_types
@staticmethod
def _IsEdgeTypeUninteresting(edge_type_string):
"""Helper function for filtering out edges from the heap snapshot.
Args:
edge_type_string: str, type of the edge.
Returns:
bool, True if the edge is of an uninteresting type and shouldn't be
included in the heap snapshot analysis.
"""
uninteresting_types = ('weak', 'hidden', 'internal')
return edge_type_string in uninteresting_types
def _ReadNodeFromIndex(self, ix, edges_start):
"""Reads the data for a node from the heap snapshot.
If the index contains an interesting node, constructs a Node object and adds
it to self._node_dict.
Args:
ix: int, index into the self._node_list array.
edges_start: int, if self._node_edge_count_format is True, the index of
the edge array where the edges for the node start.
Returns:
int, if self._node_edge_count_format is True, the edge start index for the
next node.
Raises:
Error: The node list of the snapshot is malformed.
"""
if ix + self._node_field_count > len(self._node_list):
raise Error('Snapshot node list too short')
type_ix = self._node_list[ix + self._node_type_ix]
type_string = self._node_types[int(type_ix)]
# edges_end is noninclusive (the index of the first edge that is not part of
# this node).
if self._node_edge_count_format:
edge_count = self._node_list[ix + self._node_edge_count_ix]
edges_end = edges_start + edge_count * self._edge_field_count
else:
# edges_start is the start point of this node's edges in the edge
# array. The end point of this node's edges is the start point of the next
# node minus 1.
edges_start = self._node_list[ix + self._node_edges_start_ix]
next_edges_start = ix + self._node_edges_start_ix + self._node_field_count
if next_edges_start < len(self._node_list):
edges_end = self._node_list[next_edges_start]
else:
edges_end = len(self._edge_list)
if Snapshotter._IsNodeTypeUninteresting(type_string):
return edges_end
name_ix = self._node_list[ix + self._node_name_ix]
node_id = self._node_list[ix + self._node_id_ix]
ctor_name = self._ConstructorName(type_string, name_ix)
n = Node(node_id, type_string, ctor_name)
if type_string == 'string':
n.string = self._strings[int(name_ix)]
for edge_ix in xrange(edges_start, edges_end, self._edge_field_count):
edge = self._ReadEdgeFromIndex(node_id, edge_ix)
if edge:
# The edge will be associated with the other endpoint when all the data
# has been read.
n.AddEdgeFrom(edge)
self._node_dict[node_id] = n
return edges_end
def _ReadEdgeFromIndex(self, node_id, edge_ix):
"""Reads the data for an edge from the heap snapshot.
Args:
node_id: int, id of the node which is the starting point of the edge.
edge_ix: int, index into the self._edge_list array.
Returns:
Edge, if the index contains an interesting edge, otherwise None.
Raises:
Error: The node list of the snapshot is malformed.
"""
if edge_ix + self._edge_field_count > len(self._edge_list):
raise Error('Snapshot edge list too short')
edge_type_ix = self._edge_list[edge_ix + self._edge_type_ix]
edge_type_string = self._edge_types[int(edge_type_ix)]
if Snapshotter._IsEdgeTypeUninteresting(edge_type_string):
return None
child_name_or_ix = self._edge_list[edge_ix + self._edge_name_or_ix_ix]
child_node_ix = self._edge_list[edge_ix + self._edge_to_node_ix]
# The child_node_ix is an index into the node list. Read the actual
# node information.
child_node_type_ix = self._node_list[child_node_ix + self._node_type_ix]
child_node_type_string = self._node_types[int(child_node_type_ix)]
child_node_id = self._node_list[child_node_ix + self._node_id_ix]
if Snapshotter._IsNodeTypeUninteresting(child_node_type_string):
return None
child_name_string = ''
# For element nodes, the child has no name (only an index).
if (edge_type_string == 'element' or
int(child_name_or_ix) >= len(self._strings)):
child_name_string = str(child_name_or_ix)
else:
child_name_string = self._strings[int(child_name_or_ix)]
return Edge(node_id, child_node_id, edge_type_string, child_name_string)
def _ParseSnapshot(self):
"""Parses the stored JSON snapshot data.
Fills in self._node_dict with Node objects constructed based on the heap
snapshot. The Node objects contain the associated Edge objects.
"""
edge_start_ix = 0
for ix in xrange(0, len(self._node_list), self._node_field_count):
edge_start_ix = self._ReadNodeFromIndex(ix, edge_start_ix)
# Add pointers to the endpoints to the edges, and associate the edges with
# the "to" nodes.
for node_id in self._node_dict:
n = self._node_dict[node_id]
for e in n.edges_from:
self._node_dict[e.to_node_id].AddEdgeTo(e)
e.SetFromNode(n)
e.SetToNode(self._node_dict[e.to_node_id])
class LeakFinder(object):
"""Finds potentially leaking JavaScript objects based on a heap snapshot."""
def __init__(self, containers, bad_stop_nodes, stacktrace_prefix,
stacktrace_suffix):
"""Initializes the LeakFinder object.
Potentially leaking Node objects the are children of the nodes described by
containers which are only retained by the nodes described by bad_stop_nodes.
Args:
containers: [str], describes the container JavaScript objects E.g.,
['foo.bar', 'mylibrary.all_objects_array']. Only objects in the
containers are investigated as potential leaks.
bad_stop_nodes: [str], describes bad stop nodes which don't contribute to
valid retaining paths. E.g., ['foo.baz', 'mylibrary.secondary_array'].
A retaining path is bad if it goes through one of the bad nodes. If
all the retaining paths of an object are bad, the object is considered
a leak.
stacktrace_prefix: str, prefix to add to the container name for retrieving
the stack trace. Useful e.g., if the JavaScript is in different frame.
stacktrace_suffix: str, name of the member variable where the stack trace
is stored.
"""
self._container_description = [c.split('.') for c in containers]
self._bad_stop_node_description = [b.split('.') for b in bad_stop_nodes]
self._stacktrace_prefix = stacktrace_prefix
self._stacktrace_suffix = stacktrace_suffix
def FindLeaks(self, nodes):
"""Finds Node objects which are potentially leaking.
Args:
nodes: set(Node), Node objects in the snapshot.
Yields:
LeakNode objects representing the potential leaks.
Raises:
Error: Cannot find the Nodes needed by the leak detection algorithm.
"""
# The retaining paths are computed until meeting one of these nodes.
stop_nodes = set()
# A retaining path is bad if it ends to one of these nodes. These are
# closure data structures. If all retaining paths end in bad stop nodes, the
# node is probably a leak.
bad_stop_nodes = set()
containers = set()
found_container_edges = set()
# Find container nodes and stopper nodes based on the descriptions.
for node in nodes:
# Window objects are good stop nodes. If a retaining path goes through a
# Window object without going through any bad stop nodes, the retaining
# path is good, and the object is not a leak.
if node.class_name == 'Window' or node.class_name.startswith('Window / '):
stop_nodes.add(node)
node.js_name = 'window'
continue
for edges in self._bad_stop_node_description:
if LeakFinder._IsRetainedByEdges(node, edges):
stop_nodes.add(node)
bad_stop_nodes.add(node)
node.js_name = '.'.join(edges)
break
for edges in self._container_description:
if LeakFinder._IsRetainedByEdges(node, edges):
containers.add(node)
stop_nodes.add(node)
bad_stop_nodes.add(node)
node.container_name = '.'.join(edges)
found_container_edges.add(node.container_name)
node.js_name = '.'.join(edges)
break
# Check that we found all the containers.
for edges in self._container_description:
edge_description = '.'.join(edges)
if edge_description not in found_container_edges:
raise Error('Container not found: %s' % edge_description)
# Find objects such that they are in the specified containers and all
# retaining paths contain either the container or the specified bad stop
# objects.
for container in containers:
for edge in container.edges_from:
if edge.type_string != 'element':
continue
found_good_path = False
node = edge.to_node
for path in LeakFinder._FindRetainingPaths(node, [node], stop_nodes):
# If the last node on the path is in bad_stop_nodes, the path is bad,
# otherwise it's good (it may end in a good stop node or in a node
# which doesn't have parents).
if not path[-1] in bad_stop_nodes:
found_good_path = True
# All the objects on the known good path are known to be non-leaks.
# Utilize this information when finding paths for other objects: As
# soon as we find a path which hits one of them, we know the object
# is not leaked.
for node in path:
stop_nodes.add(node)
break
if not found_good_path:
node_description = '%s%s[%s]' % (self._stacktrace_prefix,
container.container_name,
edge.name_string)
leak = LeakNode(edge.to_node, 'Leak', node_description,
self._stacktrace_suffix)
yield leak
@staticmethod
def _IsRetainedByEdges(node, edge_names):
"""Returns True if the node is retained by edges called edge_names.
E.g., _IsRetainedByEdges(node, ['foo', 'bar', 'baz']) returns True if node
represents obj.foo.bar.baz for some object obj.
Args:
node: Node, the Node which migt be retained by edges called edge_names.
edge_names: [str], the wanted edge names.
Returns:
bool, True if a retaining path with the given edge_names was found, False
otherwise.
"""
if not edge_names:
return True
edge_name = edge_names[-1]
for edge in node.edges_to:
if (edge.name_string == edge_name and
LeakFinder._IsRetainedByEdges(edge.from_node, edge_names[:-1])):
return True
return False
@staticmethod
def _RetainingPathToString(path):
"""Constructs out a textual representation of the path.
Args:
path: [Node]
Returns:
str, representation of the path.
"""
parent_node = None
result = ''
for node in path[::-1]:
if parent_node:
result += LeakFinder._NodeRelationshipToString(parent_node, node)
else:
result += node.ToJavaScript()
parent_node = node
return result
@staticmethod
def _NodeRelationshipToString(node_from, node_to):
"""Constructs a textual representation of a relationship between two nodes.
Args:
node_from: Node
node_to: Node
Returns:
str, representation of the relationship.
"""
for edge in node_to.edges_to:
if edge.from_node_id == node_from.node_id:
return edge.ToJavaScript()
return ''
@staticmethod
def _FindRetainingPaths(node, visited, stop_nodes, max_depth=30):
"""Finds retaining paths for a Node.
Args:
node: Node, the Node to find the retaining paths for.
visited: [Node], the visited path so far.
stop_nodes: set(Node), nodes which terminate the path (we don't care how
they are retained)
max_depth: int, the maximum length of retaining paths to search. The
search will be terminated when at least one path exceeding max_depth
is found.
Yields:
[Node], retaining paths.
"""
if len(visited) > max_depth:
return
if not node.edges_to or node in stop_nodes:
yield visited
return
for edge in node.edges_to:
if edge.from_node not in visited:
visited.append(edge.from_node)
for path in LeakFinder._FindRetainingPaths(edge.from_node, visited,
stop_nodes):
yield path
visited.pop()
| #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."
"""Tool for finding possibly leaking JavaScript objects based on heap snapshots.
How does it work?
Since JavaScript is a garbage collected language, all objects have one or
several retaining paths which keep the object alive.
This tool finds objects which are only retained (kept alive) by a specified set
of data structures ("bad stop nodes"). Typically those are data structures of a
JavaScript library (e.g., Closure). If an object is only kept alive by such data
structures, the user code doesn't have a pointer to the object, and it can be
considered a "leak". (E.g., the user code forgot to call a function which was
supposed to remove the object from the data structures.)
Example (Closure):
goog.Disposable implements a monitoring mode which gathers all created but not
yet disposed instances of goog.Disposable (and its subclasses) into an array
goog.Disposable.instances_. This array will keep the objects alive. However, if
an object is only kept alive by this array, it is likely a leak, since the user
code doesn't contain any pointers to the object, and the user cannot call
dispose() on it.
Closure contains other data structures (especially for storing event handlers
under goog.events) which keep objects alive. If all retaining paths of an object
go through goog.Disposable.instances_ or goog.events, the object is likely a
leak.
However, if we find a retaining path that goes through a Window object without
going through these bad stop nodes, the object is not a leak.
"""
import simplejson
import stacktrace
class Error(Exception):
pass
class Node(object):
"""Data structure for representing a node in the heap snapshot.
Attributes:
node_id: int, identifier for the node.
type_string: str, describes the type of the node.
class_name: str, describes the class of the JavaScript object
represented by this Node.
edges_to: [Edge], edges whose end point this Node is.
edges_from: [Edge], edges whose start point this Node is.
string: str, for string Nodes, contains the string the Node represents.
Empty string for non-string nodes.
js_name: str, how to refer to this node in JavaScript.
"""
def __init__(self, node_id, type_string, class_name):
"""Initializes the Node object.
Args:
node_id: int, identifier for the Node.
type_string: str, the type of the node.
class_name: str, the class of the JavaScript object this Node represents.
"""
self.node_id = node_id
self.type_string = type_string
self.class_name = class_name
self.edges_to = []
self.edges_from = []
self.string = ''
self.js_name = ''
def AddEdgeTo(self, edge):
"""Associates an Edge with the Node (the end point).
Args:
edge: Edge, an edge whose end point this Node is.
"""
self.edges_to.append(edge)
def AddEdgeFrom(self, edge):
"""Associates an Edge with the Node (the start point).
Args:
edge: Edge, an edge whose start point this Node is.
"""
self.edges_from.append(edge)
def __str__(self):
prefix = 'Node(' + str(self.node_id) + ' '
if self.type_string == 'object':
return prefix + self.class_name + ')'
return prefix + self.type_string + ')'
def ToJavaScript(self):
return self.js_name or str(self)
class Edge(object):
"""Data structure for representing an edge in the heap snapshot.
Attributes:
from_node_id: int, id of the node which is the start point of this Edge.
Used when the corresponding Node object is not yet contstructed.
to_node_id: int, id of the node which is the end point of this Edge. Used
when the corresponding Node object is not yet contstructed.
from_node: Node, the start point of this Edge.
to_node: Node, the end point of this Edge.
type_string: str, the type of the Edge.
name_string: str, the JavaScript attribute name this Edge represents.
"""
def __init__(self, from_node_id, to_node_id, type_string, name_string):
"""Initializes the Edge object.
Args:
from_node_id: int, id of the node which is the start point of this
Edge. Used when the corresponding Node object is not yet contstructed.
to_node_id: int, id of the node which is the end point of this
Edge. Used when the corresponding Node object is not yet contstructed.
type_string: str, the type of the Edge.
name_string: str, the JavaScript attribute name this Edge represents.
"""
self.from_node_id = from_node_id
self.to_node_id = to_node_id
self.from_node = {}
self.to_node = {}
self.type_string = type_string
self.name_string = name_string
def SetFromNode(self, node):
self.from_node = node
return self
def SetToNode(self, node):
self.to_node = node
return self
def __str__(self):
return 'Edge(' + self.type_string + ' ' + self.name_string + ')'
def ToJavaScript(self):
if self.type_string == 'property':
return '.' + self.name_string
if self.type_string == 'element':
return '[' + self.name_string + ']'
return str(self)
class LeakNode(object):
"""Data structure for representing a potentially leaked heap object.
Attributes:
node: Node, represents the leaked JavaScript object.
description: str, human-readable desription of the leak.
how_to_find_node: str, JavaScript expression which evaluates to the
leaked JavaScript object.
stack: Stack, the creation stack trace of the JavaScript object, or None if
the stack trace cannot be retrieved or has not yet been retrieved.
"""
def __init__(self, node, description, how_to_find_node, stacktrace_suffix):
"""Initializes the LeakNode object.
Args:
node: Node, represents the leaked JavaScript object.
description: str, human-readable desription of the leak.
how_to_find_node: str, JavaScript expression which evaluates to the
leaked JavaScript object.
stacktrace_suffix: str, appended to the leaked objects for referring the
member variable where the stack trace is stored. E.g., ".stack".
"""
self.node = node
self.description = description
self.how_to_find_node = how_to_find_node
self._stacktrace_suffix = stacktrace_suffix
self.stack = None
def RetrieveStackTrace(self, inspector_client=None):
"""Retrieves the creation stack trace and stores it into this LeakNode.
Args:
inspector_client: RemoteInspectorClient, client to use for retrieving the
full stack trace. If None, we will retrieve a possibly shortened value
from the snapshot.
"""
stack = None
if not self._stacktrace_suffix:
# No stack trace information.
self.stack = stacktrace.Stack('')
return
if inspector_client:
# The heap snapshot contains only the first 1000 characters of each
# string. As we store the creation stack trace in objects as strings, we
# will need to evaluate this string using the remote inspector client to
# get the full stack trace.
stack = inspector_client.EvaluateJavaScript(
self.how_to_find_node + self._stacktrace_suffix)
else:
# See if the object contains a stack trace.
for edge in self.node.edges_from:
if edge.name_string == self._stacktrace_suffix:
stack = edge.to_node.string
break
if stack:
self.stack = stacktrace.Stack(stack)
def __str__(self):
stack = ''
if self.stack:
stack = 'Stack:\n %s' % '\n '.join(self.stack.frames)
return '%s\nClass: %s\nObject: %s\n%s' % (
self.description, self.node.class_name, self.how_to_find_node, stack)
class Snapshotter(object):
"""Reads a heap snapshot from a chromium process and parses it.
The heap snapshot JSON format is defined by HeapSnapshotJSONSerializer in v8.
Attributes:
_node_dict: {int -> Node}, maps integer ids to Node objects.
_node_list: [int], the raw node data of the heap snapshot.
_edge_list: [int], the raw edge data of the heap snapshot.
_node_types: [str], the possible node types in the heap snapshot.
_edge_types: [str], the possible edge types in the heap snapshot.
_node_fields: [str], the fields present in the heap snapshot for each node.
_edge_fields: [str], the fields present in the heap snapshot for each node.
_node_type_ix: int, index of the node type field.
_node_name_ix: int, index of the node name field.
_node_id_ix: int, index of the node id field.
_node_edges_start_ix: int, index of the "edge start index for a node" field.
_node_edge_count_ix: int, index of the node edge count field.
_node_edge_count_format: bool, defines if the snapshot uses edges_start or
edge_count.
_node_field_count: int, number of node fields.
_edge_type_ix: int, index of the edge type field.
_edge_name_or_ix_ix: int, index of the edge name field.
_edge_to_node_ix: int, index of the "to node for an edge" field.
_edge_field_count: int, number of edge fields.
"""
def __init__(self):
self._node_dict = {}
def GetSnapshot(self, inspector_client):
"""Reads a heap snapshot from a chromium process and returns the data.
Args:
inspector_client: RemoteInspectorClient, the client to used for taking the
heap snapshot.
Returns:
set(Node), the Node objects in the snapshot or None if the snapshot
couldn't be read.
Raises:
KeyError: The snapshot doesn't contain the required data fields.
ValueError: The snaphost cannot be parsed.
Error: The snapshot format cannot be parsed (e.g., too new version).
"""
self._ReadSnapshot(inspector_client)
self._ParseSnapshot()
return self._node_dict.values()
def _FindField(self, field_name, fields_array):
"""Finds field indices based on the snapshot meta information.
Args:
field_name: str, the field to find in fields_array.
fields_array: [str], array of available fields.
Returns:
int, the first index of field_name in fields_array.
Raises:
Error: field_name doesn't occur in fields_array.
"""
if field_name not in fields_array:
raise Error('Cannot find field %s from the snapshot' % field_name)
return fields_array.index(field_name)
def _ReadSnapshot(self, inspector_client):
"""Reads a heap snapshot from a chromium process and stores the data.
The snapshot contains a list of integers describing nodes (types, names,
etc.) and a list of integers describing edges (types, the node the edge
points to, etc.) and a string table. All strings are expressed as indices to
the string table.
In addition, the snapshot contains meta information describing the data
fields for nodes and the data fields for edges.
Args:
inspector_client: RemoteInspectorClient, the client to used for taking the
heap snapshot.
Raises:
KeyError: The snapshot doesn't contain the required data fields.
ValueError: The snaphost cannot be parsed.
Error: The snapshot format is not supported (e.g., too new version).
"""
raw_data = inspector_client.HeapSnapshot(include_summary=False)['raw_data']
heap = simplejson.loads(raw_data)
self._node_list = heap['nodes']
self._edge_list = heap['edges']
self._strings = heap['strings']
self._node_types = heap['snapshot']['meta']['node_types'][0]
self._edge_types = heap['snapshot']['meta']['edge_types'][0]
node_fields = heap['snapshot']['meta']['node_fields']
edge_fields = heap['snapshot']['meta']['edge_fields']
# Find the indices of the required node and edge fields.
self._node_type_ix = self._FindField('type', node_fields)
self._node_name_ix = self._FindField('name', node_fields)
self._node_id_ix = self._FindField('id', node_fields)
# Support 2 different snapshot formats:
# - Define where edges for a given node start in the edge array as
# edges_index.
# - Define how many edges a given node has as edge_count.
if 'edges_index' in node_fields:
self._node_edges_start_ix = node_fields.index('edges_index')
self._node_edge_count_format = False
else:
self._node_edge_count_ix = self._FindField('edge_count', node_fields)
self._node_edge_count_format = True
self._node_field_count = len(node_fields)
self._edge_type_ix = self._FindField('type', edge_fields)
self._edge_name_or_ix_ix = self._FindField('name_or_index',
edge_fields)
self._edge_to_node_ix = self._FindField('to_node', edge_fields)
self._edge_field_count = len(edge_fields)
def _ConstructorName(self, type_string, node_name_ix):
"""Returns the constructor name for a node.
Args:
type_string: str, type of the node.
node_name_ix: int, index of the strings array element which contains the
name of the node, if the type of the node is 'object'. Otherwise, an
arbitrary value.
Returns:
str, the constructor name for the node.
"""
if type_string == 'object':
return self._strings[int(node_name_ix)]
return '(%s)' % type_string
@staticmethod
def _IsNodeTypeUninteresting(type_string):
"""Helper function for filtering out nodes from the heap snapshot.
Args:
type_string: str, type of the node.
Returns:
bool, True if the node is of an uninteresting type and shouldn't be
included in the heap snapshot analysis.
"""
uninteresting_types = ('hidden', 'code', 'number', 'native', 'synthetic')
return type_string in uninteresting_types
@staticmethod
def _IsEdgeTypeUninteresting(edge_type_string):
"""Helper function for filtering out edges from the heap snapshot.
Args:
edge_type_string: str, type of the edge.
Returns:
bool, True if the edge is of an uninteresting type and shouldn't be
included in the heap snapshot analysis.
"""
uninteresting_types = ('weak', 'hidden', 'internal')
return edge_type_string in uninteresting_types
def _ReadNodeFromIndex(self, ix, edges_start):
"""Reads the data for a node from the heap snapshot.
If the index contains an interesting node, constructs a Node object and adds
it to self._node_dict.
Args:
ix: int, index into the self._node_list array.
edges_start: int, if self._node_edge_count_format is True, the index of
the edge array where the edges for the node start.
Returns:
int, if self._node_edge_count_format is True, the edge start index for the
next node.
Raises:
Error: The node list of the snapshot is malformed.
"""
if ix + self._node_field_count > len(self._node_list):
raise Error('Snapshot node list too short')
type_ix = self._node_list[ix + self._node_type_ix]
type_string = self._node_types[int(type_ix)]
# edges_end is noninclusive (the index of the first edge that is not part of
# this node).
if self._node_edge_count_format:
edge_count = self._node_list[ix + self._node_edge_count_ix]
edges_end = edges_start + edge_count * self._edge_field_count
else:
# edges_start is the start point of this node's edges in the edge
# array. The end point of this node's edges is the start point of the next
# node minus 1.
edges_start = self._node_list[ix + self._node_edges_start_ix]
next_edges_start = ix + self._node_edges_start_ix + self._node_field_count
if next_edges_start < len(self._node_list):
edges_end = self._node_list[next_edges_start]
else:
edges_end = len(self._edge_list)
if Snapshotter._IsNodeTypeUninteresting(type_string):
return edges_end
name_ix = self._node_list[ix + self._node_name_ix]
node_id = self._node_list[ix + self._node_id_ix]
ctor_name = self._ConstructorName(type_string, name_ix)
n = Node(node_id, type_string, ctor_name)
if type_string == 'string':
n.string = self._strings[int(name_ix)]
for edge_ix in xrange(edges_start, edges_end, self._edge_field_count):
edge = self._ReadEdgeFromIndex(node_id, edge_ix)
if edge:
# The edge will be associated with the other endpoint when all the data
# has been read.
n.AddEdgeFrom(edge)
self._node_dict[node_id] = n
return edges_end
def _ReadEdgeFromIndex(self, node_id, edge_ix):
"""Reads the data for an edge from the heap snapshot.
Args:
node_id: int, id of the node which is the starting point of the edge.
edge_ix: int, index into the self._edge_list array.
Returns:
Edge, if the index contains an interesting edge, otherwise None.
Raises:
Error: The node list of the snapshot is malformed.
"""
if edge_ix + self._edge_field_count > len(self._edge_list):
raise Error('Snapshot edge list too short')
edge_type_ix = self._edge_list[edge_ix + self._edge_type_ix]
edge_type_string = self._edge_types[int(edge_type_ix)]
if Snapshotter._IsEdgeTypeUninteresting(edge_type_string):
return None
child_name_or_ix = self._edge_list[edge_ix + self._edge_name_or_ix_ix]
child_node_ix = self._edge_list[edge_ix + self._edge_to_node_ix]
# The child_node_ix is an index into the node list. Read the actual
# node information.
child_node_type_ix = self._node_list[child_node_ix + self._node_type_ix]
child_node_type_string = self._node_types[int(child_node_type_ix)]
child_node_id = self._node_list[child_node_ix + self._node_id_ix]
if Snapshotter._IsNodeTypeUninteresting(child_node_type_string):
return None
child_name_string = ''
# For element nodes, the child has no name (only an index).
if (edge_type_string == 'element' or
int(child_name_or_ix) >= len(self._strings)):
child_name_string = str(child_name_or_ix)
else:
child_name_string = self._strings[int(child_name_or_ix)]
return Edge(node_id, child_node_id, edge_type_string, child_name_string)
def _ParseSnapshot(self):
"""Parses the stored JSON snapshot data.
Fills in self._node_dict with Node objects constructed based on the heap
snapshot. The Node objects contain the associated Edge objects.
"""
edge_start_ix = 0
for ix in xrange(0, len(self._node_list), self._node_field_count):
edge_start_ix = self._ReadNodeFromIndex(ix, edge_start_ix)
# Add pointers to the endpoints to the edges, and associate the edges with
# the "to" nodes.
for node_id in self._node_dict:
n = self._node_dict[node_id]
for e in n.edges_from:
self._node_dict[e.to_node_id].AddEdgeTo(e)
e.SetFromNode(n)
e.SetToNode(self._node_dict[e.to_node_id])
class LeakFinder(object):
"""Finds potentially leaking JavaScript objects based on a heap snapshot."""
def __init__(self, containers, bad_stop_nodes, stacktrace_prefix,
stacktrace_suffix):
"""Initializes the LeakFinder object.
Potentially leaking Node objects the are children of the nodes described by
containers which are only retained by the nodes described by bad_stop_nodes.
Args:
containers: [str], describes the container JavaScript objects E.g.,
['foo.bar', 'mylibrary.all_objects_array']. Only objects in the
containers are investigated as potential leaks.
bad_stop_nodes: [str], describes bad stop nodes which don't contribute to
valid retaining paths. E.g., ['foo.baz', 'mylibrary.secondary_array'].
A retaining path is bad if it goes through one of the bad nodes. If
all the retaining paths of an object are bad, the object is considered
a leak.
stacktrace_prefix: str, prefix to add to the container name for retrieving
the stack trace. Useful e.g., if the JavaScript is in different frame.
stacktrace_suffix: str, name of the member variable where the stack trace
is stored.
"""
self._container_description = [c.split('.') for c in containers]
self._bad_stop_node_description = [b.split('.') for b in bad_stop_nodes]
self._stacktrace_prefix = stacktrace_prefix
self._stacktrace_suffix = stacktrace_suffix
def FindLeaks(self, nodes):
"""Finds Node objects which are potentially leaking.
Args:
nodes: set(Node), Node objects in the snapshot.
Yields:
LeakNode objects representing the potential leaks.
Raises:
Error: Cannot find the Nodes needed by the leak detection algorithm.
"""
# The retaining paths are computed until meeting one of these nodes.
stop_nodes = set()
# A retaining path is bad if it ends to one of these nodes. These are
# closure data structures. If all retaining paths end in bad stop nodes, the
# node is probably a leak.
bad_stop_nodes = set()
containers = set()
found_container_edges = set()
# Find container nodes and stopper nodes based on the descriptions.
for node in nodes:
# Window objects are good stop nodes. If a retaining path goes through a
# Window object without going through any bad stop nodes, the retaining
# path is good, and the object is not a leak.
if node.class_name == 'Window' or node.class_name.startswith('Window / '):
stop_nodes.add(node)
node.js_name = 'window'
continue
for edges in self._bad_stop_node_description:
if LeakFinder._IsRetainedByEdges(node, edges):
stop_nodes.add(node)
bad_stop_nodes.add(node)
node.js_name = '.'.join(edges)
break
for edges in self._container_description:
if LeakFinder._IsRetainedByEdges(node, edges):
containers.add(node)
stop_nodes.add(node)
bad_stop_nodes.add(node)
node.container_name = '.'.join(edges)
found_container_edges.add(node.container_name)
node.js_name = '.'.join(edges)
break
# Check that we found all the containers.
for edges in self._container_description:
edge_description = '.'.join(edges)
if edge_description not in found_container_edges:
raise Error('Container not found: %s' % edge_description)
# Find objects such that they are in the specified containers and all
# retaining paths contain either the container or the specified bad stop
# objects.
for container in containers:
for edge in container.edges_from:
if edge.type_string != 'element':
continue
found_good_path = False
node = edge.to_node
for path in LeakFinder._FindRetainingPaths(node, [node], stop_nodes):
# If the last node on the path is in bad_stop_nodes, the path is bad,
# otherwise it's good (it may end in a good stop node or in a node
# which doesn't have parents).
if not path[-1] in bad_stop_nodes:
found_good_path = True
# All the objects on the known good path are known to be non-leaks.
# Utilize this information when finding paths for other objects: As
# soon as we find a path which hits one of them, we know the object
# is not leaked.
for node in path:
stop_nodes.add(node)
break
if not found_good_path:
node_description = '%s%s[%s]' % (self._stacktrace_prefix,
container.container_name,
edge.name_string)
leak = LeakNode(edge.to_node, 'Leak', node_description,
self._stacktrace_suffix)
yield leak
@staticmethod
def _IsRetainedByEdges(node, edge_names):
"""Returns True if the node is retained by edges called edge_names.
E.g., _IsRetainedByEdges(node, ['foo', 'bar', 'baz']) returns True if node
represents obj.foo.bar.baz for some object obj.
Args:
node: Node, the Node which migt be retained by edges called edge_names.
edge_names: [str], the wanted edge names.
Returns:
bool, True if a retaining path with the given edge_names was found, False
otherwise.
"""
if not edge_names:
return True
edge_name = edge_names[-1]
for edge in node.edges_to:
if (edge.name_string == edge_name and
LeakFinder._IsRetainedByEdges(edge.from_node, edge_names[:-1])):
return True
return False
@staticmethod
def _RetainingPathToString(path):
"""Constructs out a textual representation of the path.
Args:
path: [Node]
Returns:
str, representation of the path.
"""
parent_node = None
result = ''
for node in path[::-1]:
if parent_node:
result += LeakFinder._NodeRelationshipToString(parent_node, node)
else:
result += node.ToJavaScript()
parent_node = node
return result
@staticmethod
def _NodeRelationshipToString(node_from, node_to):
"""Constructs a textual representation of a relationship between two nodes.
Args:
node_from: Node
node_to: Node
Returns:
str, representation of the relationship.
"""
for edge in node_to.edges_to:
if edge.from_node_id == node_from.node_id:
return edge.ToJavaScript()
return ''
@staticmethod
def _FindRetainingPaths(node, visited, stop_nodes, max_depth=30):
"""Finds retaining paths for a Node.
Args:
node: Node, the Node to find the retaining paths for.
visited: [Node], the visited path so far.
stop_nodes: set(Node), nodes which terminate the path (we don't care how
they are retained)
max_depth: int, the maximum length of retaining paths to search. The
search will be terminated when at least one path exceeding max_depth
is found.
Yields:
[Node], retaining paths.
"""
if len(visited) > max_depth:
return
if not node.edges_to or node in stop_nodes:
yield visited
return
for edge in node.edges_to:
if edge.from_node not in visited:
visited.append(edge.from_node)
for path in LeakFinder._FindRetainingPaths(edge.from_node, visited,
stop_nodes):
yield path
visited.pop() | en | 0.828028 | #!/usr/bin/env python # Copyright 2012 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License." Tool for finding possibly leaking JavaScript objects based on heap snapshots. How does it work? Since JavaScript is a garbage collected language, all objects have one or several retaining paths which keep the object alive. This tool finds objects which are only retained (kept alive) by a specified set of data structures ("bad stop nodes"). Typically those are data structures of a JavaScript library (e.g., Closure). If an object is only kept alive by such data structures, the user code doesn't have a pointer to the object, and it can be considered a "leak". (E.g., the user code forgot to call a function which was supposed to remove the object from the data structures.) Example (Closure): goog.Disposable implements a monitoring mode which gathers all created but not yet disposed instances of goog.Disposable (and its subclasses) into an array goog.Disposable.instances_. This array will keep the objects alive. However, if an object is only kept alive by this array, it is likely a leak, since the user code doesn't contain any pointers to the object, and the user cannot call dispose() on it. Closure contains other data structures (especially for storing event handlers under goog.events) which keep objects alive. If all retaining paths of an object go through goog.Disposable.instances_ or goog.events, the object is likely a leak. However, if we find a retaining path that goes through a Window object without going through these bad stop nodes, the object is not a leak. Data structure for representing a node in the heap snapshot. Attributes: node_id: int, identifier for the node. type_string: str, describes the type of the node. class_name: str, describes the class of the JavaScript object represented by this Node. edges_to: [Edge], edges whose end point this Node is. edges_from: [Edge], edges whose start point this Node is. string: str, for string Nodes, contains the string the Node represents. Empty string for non-string nodes. js_name: str, how to refer to this node in JavaScript. Initializes the Node object. Args: node_id: int, identifier for the Node. type_string: str, the type of the node. class_name: str, the class of the JavaScript object this Node represents. Associates an Edge with the Node (the end point). Args: edge: Edge, an edge whose end point this Node is. Associates an Edge with the Node (the start point). Args: edge: Edge, an edge whose start point this Node is. Data structure for representing an edge in the heap snapshot. Attributes: from_node_id: int, id of the node which is the start point of this Edge. Used when the corresponding Node object is not yet contstructed. to_node_id: int, id of the node which is the end point of this Edge. Used when the corresponding Node object is not yet contstructed. from_node: Node, the start point of this Edge. to_node: Node, the end point of this Edge. type_string: str, the type of the Edge. name_string: str, the JavaScript attribute name this Edge represents. Initializes the Edge object. Args: from_node_id: int, id of the node which is the start point of this Edge. Used when the corresponding Node object is not yet contstructed. to_node_id: int, id of the node which is the end point of this Edge. Used when the corresponding Node object is not yet contstructed. type_string: str, the type of the Edge. name_string: str, the JavaScript attribute name this Edge represents. Data structure for representing a potentially leaked heap object. Attributes: node: Node, represents the leaked JavaScript object. description: str, human-readable desription of the leak. how_to_find_node: str, JavaScript expression which evaluates to the leaked JavaScript object. stack: Stack, the creation stack trace of the JavaScript object, or None if the stack trace cannot be retrieved or has not yet been retrieved. Initializes the LeakNode object. Args: node: Node, represents the leaked JavaScript object. description: str, human-readable desription of the leak. how_to_find_node: str, JavaScript expression which evaluates to the leaked JavaScript object. stacktrace_suffix: str, appended to the leaked objects for referring the member variable where the stack trace is stored. E.g., ".stack". Retrieves the creation stack trace and stores it into this LeakNode. Args: inspector_client: RemoteInspectorClient, client to use for retrieving the full stack trace. If None, we will retrieve a possibly shortened value from the snapshot. # No stack trace information. # The heap snapshot contains only the first 1000 characters of each # string. As we store the creation stack trace in objects as strings, we # will need to evaluate this string using the remote inspector client to # get the full stack trace. # See if the object contains a stack trace. Reads a heap snapshot from a chromium process and parses it. The heap snapshot JSON format is defined by HeapSnapshotJSONSerializer in v8. Attributes: _node_dict: {int -> Node}, maps integer ids to Node objects. _node_list: [int], the raw node data of the heap snapshot. _edge_list: [int], the raw edge data of the heap snapshot. _node_types: [str], the possible node types in the heap snapshot. _edge_types: [str], the possible edge types in the heap snapshot. _node_fields: [str], the fields present in the heap snapshot for each node. _edge_fields: [str], the fields present in the heap snapshot for each node. _node_type_ix: int, index of the node type field. _node_name_ix: int, index of the node name field. _node_id_ix: int, index of the node id field. _node_edges_start_ix: int, index of the "edge start index for a node" field. _node_edge_count_ix: int, index of the node edge count field. _node_edge_count_format: bool, defines if the snapshot uses edges_start or edge_count. _node_field_count: int, number of node fields. _edge_type_ix: int, index of the edge type field. _edge_name_or_ix_ix: int, index of the edge name field. _edge_to_node_ix: int, index of the "to node for an edge" field. _edge_field_count: int, number of edge fields. Reads a heap snapshot from a chromium process and returns the data. Args: inspector_client: RemoteInspectorClient, the client to used for taking the heap snapshot. Returns: set(Node), the Node objects in the snapshot or None if the snapshot couldn't be read. Raises: KeyError: The snapshot doesn't contain the required data fields. ValueError: The snaphost cannot be parsed. Error: The snapshot format cannot be parsed (e.g., too new version). Finds field indices based on the snapshot meta information. Args: field_name: str, the field to find in fields_array. fields_array: [str], array of available fields. Returns: int, the first index of field_name in fields_array. Raises: Error: field_name doesn't occur in fields_array. Reads a heap snapshot from a chromium process and stores the data. The snapshot contains a list of integers describing nodes (types, names, etc.) and a list of integers describing edges (types, the node the edge points to, etc.) and a string table. All strings are expressed as indices to the string table. In addition, the snapshot contains meta information describing the data fields for nodes and the data fields for edges. Args: inspector_client: RemoteInspectorClient, the client to used for taking the heap snapshot. Raises: KeyError: The snapshot doesn't contain the required data fields. ValueError: The snaphost cannot be parsed. Error: The snapshot format is not supported (e.g., too new version). # Find the indices of the required node and edge fields. # Support 2 different snapshot formats: # - Define where edges for a given node start in the edge array as # edges_index. # - Define how many edges a given node has as edge_count. Returns the constructor name for a node. Args: type_string: str, type of the node. node_name_ix: int, index of the strings array element which contains the name of the node, if the type of the node is 'object'. Otherwise, an arbitrary value. Returns: str, the constructor name for the node. Helper function for filtering out nodes from the heap snapshot. Args: type_string: str, type of the node. Returns: bool, True if the node is of an uninteresting type and shouldn't be included in the heap snapshot analysis. Helper function for filtering out edges from the heap snapshot. Args: edge_type_string: str, type of the edge. Returns: bool, True if the edge is of an uninteresting type and shouldn't be included in the heap snapshot analysis. Reads the data for a node from the heap snapshot. If the index contains an interesting node, constructs a Node object and adds it to self._node_dict. Args: ix: int, index into the self._node_list array. edges_start: int, if self._node_edge_count_format is True, the index of the edge array where the edges for the node start. Returns: int, if self._node_edge_count_format is True, the edge start index for the next node. Raises: Error: The node list of the snapshot is malformed. # edges_end is noninclusive (the index of the first edge that is not part of # this node). # edges_start is the start point of this node's edges in the edge # array. The end point of this node's edges is the start point of the next # node minus 1. # The edge will be associated with the other endpoint when all the data # has been read. Reads the data for an edge from the heap snapshot. Args: node_id: int, id of the node which is the starting point of the edge. edge_ix: int, index into the self._edge_list array. Returns: Edge, if the index contains an interesting edge, otherwise None. Raises: Error: The node list of the snapshot is malformed. # The child_node_ix is an index into the node list. Read the actual # node information. # For element nodes, the child has no name (only an index). Parses the stored JSON snapshot data. Fills in self._node_dict with Node objects constructed based on the heap snapshot. The Node objects contain the associated Edge objects. # Add pointers to the endpoints to the edges, and associate the edges with # the "to" nodes. Finds potentially leaking JavaScript objects based on a heap snapshot. Initializes the LeakFinder object. Potentially leaking Node objects the are children of the nodes described by containers which are only retained by the nodes described by bad_stop_nodes. Args: containers: [str], describes the container JavaScript objects E.g., ['foo.bar', 'mylibrary.all_objects_array']. Only objects in the containers are investigated as potential leaks. bad_stop_nodes: [str], describes bad stop nodes which don't contribute to valid retaining paths. E.g., ['foo.baz', 'mylibrary.secondary_array']. A retaining path is bad if it goes through one of the bad nodes. If all the retaining paths of an object are bad, the object is considered a leak. stacktrace_prefix: str, prefix to add to the container name for retrieving the stack trace. Useful e.g., if the JavaScript is in different frame. stacktrace_suffix: str, name of the member variable where the stack trace is stored. Finds Node objects which are potentially leaking. Args: nodes: set(Node), Node objects in the snapshot. Yields: LeakNode objects representing the potential leaks. Raises: Error: Cannot find the Nodes needed by the leak detection algorithm. # The retaining paths are computed until meeting one of these nodes. # A retaining path is bad if it ends to one of these nodes. These are # closure data structures. If all retaining paths end in bad stop nodes, the # node is probably a leak. # Find container nodes and stopper nodes based on the descriptions. # Window objects are good stop nodes. If a retaining path goes through a # Window object without going through any bad stop nodes, the retaining # path is good, and the object is not a leak. # Check that we found all the containers. # Find objects such that they are in the specified containers and all # retaining paths contain either the container or the specified bad stop # objects. # If the last node on the path is in bad_stop_nodes, the path is bad, # otherwise it's good (it may end in a good stop node or in a node # which doesn't have parents). # All the objects on the known good path are known to be non-leaks. # Utilize this information when finding paths for other objects: As # soon as we find a path which hits one of them, we know the object # is not leaked. Returns True if the node is retained by edges called edge_names. E.g., _IsRetainedByEdges(node, ['foo', 'bar', 'baz']) returns True if node represents obj.foo.bar.baz for some object obj. Args: node: Node, the Node which migt be retained by edges called edge_names. edge_names: [str], the wanted edge names. Returns: bool, True if a retaining path with the given edge_names was found, False otherwise. Constructs out a textual representation of the path. Args: path: [Node] Returns: str, representation of the path. Constructs a textual representation of a relationship between two nodes. Args: node_from: Node node_to: Node Returns: str, representation of the relationship. Finds retaining paths for a Node. Args: node: Node, the Node to find the retaining paths for. visited: [Node], the visited path so far. stop_nodes: set(Node), nodes which terminate the path (we don't care how they are retained) max_depth: int, the maximum length of retaining paths to search. The search will be terminated when at least one path exceeding max_depth is found. Yields: [Node], retaining paths. | 2.342574 | 2 |
IoT/topology.py | Wales-Wyf/Distributed-Algorithm-Simulation-Platform-DSP--2.0 | 1 | 6613386 | <reponame>Wales-Wyf/Distributed-Algorithm-Simulation-Platform-DSP--2.0
# coding=utf-8
import copy
import socket
def topology():
# 确定立方体结构
m = 2
n = 5
l = 5
co = []
adjID = []
IP = []
PORT = []
datalist = []
# localIP = socket.gethostbyname(socket.gethostname())
localIP = "localhost"
# 定义每个节点IP与端口(PORT[0]~PORT[5]为通信端口,PORT[6]为交互端口)
for i in range(m*n*l):
IP.append(localIP)
PORT.append([])
datalist.append([])
for j in range(7):
PORT[i].append(10000+7*i+j)
# 确定立方体结构每个节点的邻接节点与对应路由关系
for i in range(m*n*l):
co.append([i/(m*n),(i%(m*n))/n,(i%(m*n))%n])
for i in range(m*n*l):
tmp0 = copy.copy(co[i])
tmp = []
for j in range(3):
tmp1 = copy.copy(tmp0)
tmp2 = copy.copy(tmp0)
tmp1[2-j] += 1
tmp2[2-j] -= 1
if tmp1 in co:
tmp.append(co.index(tmp1))
if tmp2 in co:
tmp.append(co.index(tmp2))
adjID.append(tmp)
for i in range(len(adjID)):
for j in range(len(adjID[i])):
adjID[i][j] += 1
# 返回[邻接节点ID数组,路由表数组,IP数组,端口数组],其中数组的含义是涵盖了全部n个节点的相应信息
return [IP, PORT, adjID, datalist]
| # coding=utf-8
import copy
import socket
def topology():
# 确定立方体结构
m = 2
n = 5
l = 5
co = []
adjID = []
IP = []
PORT = []
datalist = []
# localIP = socket.gethostbyname(socket.gethostname())
localIP = "localhost"
# 定义每个节点IP与端口(PORT[0]~PORT[5]为通信端口,PORT[6]为交互端口)
for i in range(m*n*l):
IP.append(localIP)
PORT.append([])
datalist.append([])
for j in range(7):
PORT[i].append(10000+7*i+j)
# 确定立方体结构每个节点的邻接节点与对应路由关系
for i in range(m*n*l):
co.append([i/(m*n),(i%(m*n))/n,(i%(m*n))%n])
for i in range(m*n*l):
tmp0 = copy.copy(co[i])
tmp = []
for j in range(3):
tmp1 = copy.copy(tmp0)
tmp2 = copy.copy(tmp0)
tmp1[2-j] += 1
tmp2[2-j] -= 1
if tmp1 in co:
tmp.append(co.index(tmp1))
if tmp2 in co:
tmp.append(co.index(tmp2))
adjID.append(tmp)
for i in range(len(adjID)):
for j in range(len(adjID[i])):
adjID[i][j] += 1
# 返回[邻接节点ID数组,路由表数组,IP数组,端口数组],其中数组的含义是涵盖了全部n个节点的相应信息
return [IP, PORT, adjID, datalist] | zh | 0.574756 | # coding=utf-8 # 确定立方体结构 # localIP = socket.gethostbyname(socket.gethostname()) # 定义每个节点IP与端口(PORT[0]~PORT[5]为通信端口,PORT[6]为交互端口) # 确定立方体结构每个节点的邻接节点与对应路由关系 # 返回[邻接节点ID数组,路由表数组,IP数组,端口数组],其中数组的含义是涵盖了全部n个节点的相应信息 | 3.003437 | 3 |
pacote-download/pythonProject/exercicios_python_guanabara/ex091.py | oliveirajonathas/python_estudos | 0 | 6613387 | <filename>pacote-download/pythonProject/exercicios_python_guanabara/ex091.py
"""
Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicioná-
rio. No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado.
obs.: o professor explica como colocar um dicionário em ordem na aula de exercício.
"""
from random import randint
from time import sleep
from operator import itemgetter
jogadas = list()
jogadores = dict()
print('Valores Sorteados:')
for i in range(1, 5):
jogadores[f'jogador{i}'] = randint(1, 6)
for k, v in jogadores.items():
print(f'O {k} tirou {v} no dado.')
sleep(1)
print('*'*30)
print('RANKING DOS JOGADORES')
ranking = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(ranking):
print(f'{i+1}º lugar: {v[0]} com {v[1]} pontos!')
sleep(1)
| <filename>pacote-download/pythonProject/exercicios_python_guanabara/ex091.py
"""
Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicioná-
rio. No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado.
obs.: o professor explica como colocar um dicionário em ordem na aula de exercício.
"""
from random import randint
from time import sleep
from operator import itemgetter
jogadas = list()
jogadores = dict()
print('Valores Sorteados:')
for i in range(1, 5):
jogadores[f'jogador{i}'] = randint(1, 6)
for k, v in jogadores.items():
print(f'O {k} tirou {v} no dado.')
sleep(1)
print('*'*30)
print('RANKING DOS JOGADORES')
ranking = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(ranking):
print(f'{i+1}º lugar: {v[0]} com {v[1]} pontos!')
sleep(1)
| pt | 0.986675 | Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicioná- rio. No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado. obs.: o professor explica como colocar um dicionário em ordem na aula de exercício. | 3.789671 | 4 |
models/sergiy_m8m10_zzzz_02_28_mip_8_10_z16192_m8_finetune_n5/sergiy_m8m10_template/sergiy_trans_minnie_v1_m8m10/main.py | emitch/SEAMLeSS | 4 | 6613388 | <filename>models/sergiy_m8m10_zzzz_02_28_mip_8_10_z16192_m8_finetune_n5/sergiy_m8m10_template/sergiy_trans_minnie_v1_m8m10/main.py
import torch
from alignermodule import Aligner
from rollback_pyramid import RollbackPyramid
aligners = {}
pyramid = RollbackPyramid()
for m in [8, 9, 10]:
aligners[m] = Aligner(fms=[2, 16, 16, 16, 16, 2], k=7).cuda()
aligners[m].load_state_dict(torch.load('./checkpoints/barak_aligner_mip{}.pth.tar'.format(m)))
pyramid.set_mip_processor(aligners[m], m)
| <filename>models/sergiy_m8m10_zzzz_02_28_mip_8_10_z16192_m8_finetune_n5/sergiy_m8m10_template/sergiy_trans_minnie_v1_m8m10/main.py
import torch
from alignermodule import Aligner
from rollback_pyramid import RollbackPyramid
aligners = {}
pyramid = RollbackPyramid()
for m in [8, 9, 10]:
aligners[m] = Aligner(fms=[2, 16, 16, 16, 16, 2], k=7).cuda()
aligners[m].load_state_dict(torch.load('./checkpoints/barak_aligner_mip{}.pth.tar'.format(m)))
pyramid.set_mip_processor(aligners[m], m)
| none | 1 | 2.046793 | 2 | |
EcoFin/dataDownload/core.py | LucaCamerani/EcoFin-library | 9 | 6613389 | """
core.py
Created by <NAME> at 31/08/2020, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
from collections import namedtuple
import numpy as np
import pandas as pd
import requests
from EcoFin.dataDownload import functions as fc
from EcoFin.dataDownload import shared
try:
from urllib.parse import quote as urlencode
except ImportError:
from urllib import quote as urlencode
class TickerCore():
def __init__(self, ticker):
self.ticker = ticker.upper()
self.history = None
self.baseUrl = shared.baseUrl
self.fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._majorHolders = None
self._institutionalHolders = None
self._ISIN = None
self._calendar = None
self._expirations = {}
self._earnings = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._financials = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._balancesheet = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._cashflow = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
def getHistory(self, interval="1d",
start=None, end=None, actions=True,
autoAdjust=True, backAdjust=False,
proxy=None, rounding=True, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
autoAdjust: bool
Adjust all OHLC automatically? Default is True
backAdjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
params = {"period1": start, "period2": end}
params["events"] = "div,splits"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self.baseUrl, self.ticker)
key = '{}?{}'.format(url, '&'.join(['{}={}'.format(k, d) for k, d in params.items()]))
if shared.show_url: print('Connection request: {}'.format(key))
if shared.use_cache & shared.session_cache.keyExists(key):
data = shared.session_cache.read(key)
else:
data = requests.get(url=url, params=params, proxies=proxy)
if "Server" in data.text:
raise RuntimeError("Data provider is currently down!")
data = data.json()
shared.session_cache.add(key=key, var=data)
# Clean up errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or not data["chart"]["result"]:
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
# parse quotes
try:
quotes = fc.parseQuotes(data["chart"]["result"][0])
except Exception:
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if autoAdjust:
quotes = fc.autoAdjust(quotes)
elif backAdjust:
quotes = fc.backAdjust(quotes)
if rounding:
quotes = np.round(quotes, data["chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = fc.parseEvents(data["chart"]["result"][0])
# combine
df = pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
df.index = pd.to_datetime(df.index.date)
df.index.name = "Date"
self.history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df.drop_duplicates()
def getDividends(self, proxy=None):
if self.history is None:
self.getHistory(period="max", proxy=proxy)
dividends = self.history["Dividends"]
return dividends[dividends != 0]
def getSplits(self, proxy=None):
if self.history is None:
self.getHistory(period="max", proxy=proxy)
splits = self.history["Stock Splits"]
return splits[splits != 0]
def getEvents(self, proxy=None):
if self.history is None:
self.getHistory(period="max", proxy=proxy)
actions = self.history[["Dividends", "Stock Splits"]]
return actions[actions != 0].dropna(how='all').fillna(0)
def getInfo(self):
url = "{}/quote/{}".format(self.baseUrl, self.ticker)
if shared.show_url: print('Connection request: {}'.format(url))
try:
if shared.use_cache & shared.session_cache.keyExists(url):
data = shared.session_cache.read(url)
else:
data = requests.get(url=url)
if "Server" in data.text:
raise RuntimeError("Data provider is currently down!")
data = data.json()
shared.session_cache.add(key=url, var=data)
except:
data = {"index": 0,
"Ticker": None,
"ISIN": None,
"Long_Name": None,
"Website": None,
"Region": None,
"Quote_Type": None,
"Currency": None,
"Exchange": None}
return namedtuple('Info', ['ticker', 'ISIN', 'longName',
'website', 'region', 'quoteType',
'currency', 'exchange'])(**{
"ticker": data['Ticker'],
"ISIN": data['ISIN'],
"longName": data['Long_Name'],
"website": data['Website'],
"region": data['Region'],
"quoteType": data['Quote_Type'],
"currency": data['Currency'],
"exchange": data['Exchange']
})
| """
core.py
Created by <NAME> at 31/08/2020, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
from collections import namedtuple
import numpy as np
import pandas as pd
import requests
from EcoFin.dataDownload import functions as fc
from EcoFin.dataDownload import shared
try:
from urllib.parse import quote as urlencode
except ImportError:
from urllib import quote as urlencode
class TickerCore():
def __init__(self, ticker):
self.ticker = ticker.upper()
self.history = None
self.baseUrl = shared.baseUrl
self.fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._majorHolders = None
self._institutionalHolders = None
self._ISIN = None
self._calendar = None
self._expirations = {}
self._earnings = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._financials = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._balancesheet = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._cashflow = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
def getHistory(self, interval="1d",
start=None, end=None, actions=True,
autoAdjust=True, backAdjust=False,
proxy=None, rounding=True, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
autoAdjust: bool
Adjust all OHLC automatically? Default is True
backAdjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
params = {"period1": start, "period2": end}
params["events"] = "div,splits"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self.baseUrl, self.ticker)
key = '{}?{}'.format(url, '&'.join(['{}={}'.format(k, d) for k, d in params.items()]))
if shared.show_url: print('Connection request: {}'.format(key))
if shared.use_cache & shared.session_cache.keyExists(key):
data = shared.session_cache.read(key)
else:
data = requests.get(url=url, params=params, proxies=proxy)
if "Server" in data.text:
raise RuntimeError("Data provider is currently down!")
data = data.json()
shared.session_cache.add(key=key, var=data)
# Clean up errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or not data["chart"]["result"]:
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
# parse quotes
try:
quotes = fc.parseQuotes(data["chart"]["result"][0])
except Exception:
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if autoAdjust:
quotes = fc.autoAdjust(quotes)
elif backAdjust:
quotes = fc.backAdjust(quotes)
if rounding:
quotes = np.round(quotes, data["chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = fc.parseEvents(data["chart"]["result"][0])
# combine
df = pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
df.index = pd.to_datetime(df.index.date)
df.index.name = "Date"
self.history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df.drop_duplicates()
def getDividends(self, proxy=None):
if self.history is None:
self.getHistory(period="max", proxy=proxy)
dividends = self.history["Dividends"]
return dividends[dividends != 0]
def getSplits(self, proxy=None):
if self.history is None:
self.getHistory(period="max", proxy=proxy)
splits = self.history["Stock Splits"]
return splits[splits != 0]
def getEvents(self, proxy=None):
if self.history is None:
self.getHistory(period="max", proxy=proxy)
actions = self.history[["Dividends", "Stock Splits"]]
return actions[actions != 0].dropna(how='all').fillna(0)
def getInfo(self):
url = "{}/quote/{}".format(self.baseUrl, self.ticker)
if shared.show_url: print('Connection request: {}'.format(url))
try:
if shared.use_cache & shared.session_cache.keyExists(url):
data = shared.session_cache.read(url)
else:
data = requests.get(url=url)
if "Server" in data.text:
raise RuntimeError("Data provider is currently down!")
data = data.json()
shared.session_cache.add(key=url, var=data)
except:
data = {"index": 0,
"Ticker": None,
"ISIN": None,
"Long_Name": None,
"Website": None,
"Region": None,
"Quote_Type": None,
"Currency": None,
"Exchange": None}
return namedtuple('Info', ['ticker', 'ISIN', 'longName',
'website', 'region', 'quoteType',
'currency', 'exchange'])(**{
"ticker": data['Ticker'],
"ISIN": data['ISIN'],
"longName": data['Long_Name'],
"website": data['Website'],
"region": data['Region'],
"quoteType": data['Quote_Type'],
"currency": data['Currency'],
"exchange": data['Exchange']
})
| en | 0.68538 | core.py Created by <NAME> at 31/08/2020, University of Milano-Bicocca. (<EMAIL>) All rights reserved. This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library), and is released under the "BSD Open Source License". :Parameters: period : str Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max Either Use period parameter or use start and end interval : str Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo Intraday data cannot extend last 60 days start: str Download start date string (YYYY-MM-DD) or datetime. Default is 1900-01-01 end: str Download end date string (YYYY-MM-DD) or datetime. Default is now prepost : bool Include Pre and Post market data in results? Default is False autoAdjust: bool Adjust all OHLC automatically? Default is True backAdjust: bool Back-adjusted data to mimic true historical prices proxy: str Optional. Proxy server URL scheme. Default is None rounding: bool Round values to 2 decimal places? Optional. Default is False = precision suggested by Yahoo! tz: str Optional timezone locale for dates. (default data is returned as non-localized dates) **kwargs: dict debug: bool Optional. If passed as False, will suppress error message printing to console. # setup proxy in requests format # Getting data from json # Clean up errors # parse quotes # 2) fix weired bug with Yahoo! - returning 60m for 30m bars # actions # combine # index eod/intraday | 2.511901 | 3 |
termenu/ansi.py | elifiner/termenu | 23 | 6613390 | from __future__ import print_function
import errno
import sys
import re
import os
COLORS = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7, default=9)
def write(text):
written = 0
fd = sys.stdout.fileno()
while written < len(text):
remains = text[written:].encode("utf8")
try:
written += os.write(fd, remains)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
def up(n=1):
write("\x1b[%dA" % n)
def down(n=1):
write("\x1b[%dB" % n)
def forward(n=1):
write("\x1b[%dC" % n)
def back(n=1):
write("\x1b[%dD" % n)
def move_horizontal(column=1):
write("\x1b[%dG" % column)
def move(row, column):
write("\x1b[%d;%dH" % (row, column))
def clear_screen():
write("\x1b[2J")
def clear_eol():
write("\x1b[0K")
def clear_line():
write("\x1b[2K")
def save_position():
write("\x1b[s")
def restore_position():
write("\x1b[u")
def hide_cursor():
write("\x1b[?25l")
def show_cursor():
write("\x1b[?25h")
def colorize(string, color, background=None, bright=False):
color = 30 + COLORS.get(color, COLORS["default"])
background = 40 + COLORS.get(background, COLORS["default"])
return "\x1b[0;%d;%d;%dm%s\x1b[0;m" % (int(bright), color, background, string)
def highlight(string, background):
# adds background to a string, even if it's already colorized
background = 40 + COLORS.get(background, COLORS["default"])
bkcmd = "\x1b[%dm" % background
stopcmd = "\x1b[m"
return bkcmd + string.replace(stopcmd, stopcmd + bkcmd) + stopcmd
ANSI_COLOR_REGEX = "\x1b\[(\d+)?(;\d+)*;?m"
def decolorize(string):
return re.sub(ANSI_COLOR_REGEX, "", string)
class ansistr(str):
def __init__(self, s):
if not isinstance(s, str):
s = str(s)
self.__str = s
self.__parts = [m.span() for m in re.finditer("(%s)|(.)" % ANSI_COLOR_REGEX, s)]
self.__len = sum(1 if p[1]-p[0]==1 else 0 for p in self.__parts)
def __len__(self):
return self.__len
def __getslice__(self, i, j):
parts = []
count = 0
for start, end in self.__parts:
if end - start == 1:
count += 1
if i <= count < j:
parts.append(self.__str[start:end])
else:
parts.append(self.__str[start:end])
return ansistr("".join(parts))
def __add__(self, s):
return ansistr(self.__str + s)
def decolorize(self):
return decolorize(self.__str)
if __name__ == "__main__":
# Print all colors
colors = [name for name, color in sorted(COLORS.items(), key=lambda v: v[1])]
for bright in [False, True]:
for background in colors:
for color in colors:
print(colorize("Hello World!", color, background, bright))
| from __future__ import print_function
import errno
import sys
import re
import os
COLORS = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7, default=9)
def write(text):
written = 0
fd = sys.stdout.fileno()
while written < len(text):
remains = text[written:].encode("utf8")
try:
written += os.write(fd, remains)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
def up(n=1):
write("\x1b[%dA" % n)
def down(n=1):
write("\x1b[%dB" % n)
def forward(n=1):
write("\x1b[%dC" % n)
def back(n=1):
write("\x1b[%dD" % n)
def move_horizontal(column=1):
write("\x1b[%dG" % column)
def move(row, column):
write("\x1b[%d;%dH" % (row, column))
def clear_screen():
write("\x1b[2J")
def clear_eol():
write("\x1b[0K")
def clear_line():
write("\x1b[2K")
def save_position():
write("\x1b[s")
def restore_position():
write("\x1b[u")
def hide_cursor():
write("\x1b[?25l")
def show_cursor():
write("\x1b[?25h")
def colorize(string, color, background=None, bright=False):
color = 30 + COLORS.get(color, COLORS["default"])
background = 40 + COLORS.get(background, COLORS["default"])
return "\x1b[0;%d;%d;%dm%s\x1b[0;m" % (int(bright), color, background, string)
def highlight(string, background):
# adds background to a string, even if it's already colorized
background = 40 + COLORS.get(background, COLORS["default"])
bkcmd = "\x1b[%dm" % background
stopcmd = "\x1b[m"
return bkcmd + string.replace(stopcmd, stopcmd + bkcmd) + stopcmd
ANSI_COLOR_REGEX = "\x1b\[(\d+)?(;\d+)*;?m"
def decolorize(string):
return re.sub(ANSI_COLOR_REGEX, "", string)
class ansistr(str):
def __init__(self, s):
if not isinstance(s, str):
s = str(s)
self.__str = s
self.__parts = [m.span() for m in re.finditer("(%s)|(.)" % ANSI_COLOR_REGEX, s)]
self.__len = sum(1 if p[1]-p[0]==1 else 0 for p in self.__parts)
def __len__(self):
return self.__len
def __getslice__(self, i, j):
parts = []
count = 0
for start, end in self.__parts:
if end - start == 1:
count += 1
if i <= count < j:
parts.append(self.__str[start:end])
else:
parts.append(self.__str[start:end])
return ansistr("".join(parts))
def __add__(self, s):
return ansistr(self.__str + s)
def decolorize(self):
return decolorize(self.__str)
if __name__ == "__main__":
# Print all colors
colors = [name for name, color in sorted(COLORS.items(), key=lambda v: v[1])]
for bright in [False, True]:
for background in colors:
for color in colors:
print(colorize("Hello World!", color, background, bright))
| en | 0.879895 | # adds background to a string, even if it's already colorized # Print all colors | 2.911074 | 3 |
tests/test_cpu_decrement_instructions.py | Hexadorsimal/pynes | 1 | 6613391 | import unittest
from nes.processors.cpu import Cpu
from nes.bus import Bus
from nes.bus.devices.memory import Ram
class CpuDecrementInstructionsTestCase(unittest.TestCase):
def setUp(self):
bus = Bus()
bus.attach_device('RAM', Ram(256), 0, 256)
self.cpu = Cpu(bus)
def test_dec(self):
self.cpu.write(0x0000, 0x01)
instruction = self.cpu.decode(0xCE)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.read(0x0000), 0x00)
self.assertTrue(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_dex(self):
self.cpu.x.value = 0x01
instruction = self.cpu.decode(0xCA)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.x.value, 0x00)
self.assertTrue(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_dey(self):
self.cpu.y.value = 0x01
instruction = self.cpu.decode(0x88)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.y.value, 0x00)
self.assertTrue(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
if __name__ == '__main__':
unittest.main()
| import unittest
from nes.processors.cpu import Cpu
from nes.bus import Bus
from nes.bus.devices.memory import Ram
class CpuDecrementInstructionsTestCase(unittest.TestCase):
def setUp(self):
bus = Bus()
bus.attach_device('RAM', Ram(256), 0, 256)
self.cpu = Cpu(bus)
def test_dec(self):
self.cpu.write(0x0000, 0x01)
instruction = self.cpu.decode(0xCE)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.read(0x0000), 0x00)
self.assertTrue(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_dex(self):
self.cpu.x.value = 0x01
instruction = self.cpu.decode(0xCA)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.x.value, 0x00)
self.assertTrue(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_dey(self):
self.cpu.y.value = 0x01
instruction = self.cpu.decode(0x88)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.y.value, 0x00)
self.assertTrue(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.881451 | 3 | |
runlast.py | AshwinHebbar314/stockdatausingapi | 0 | 6613392 | #%%
import datetime
# %%
f = open('dataset.txt', 'r')
op = eval(f.readline())
output = open("outputdata.csv" , 'w')
#%%
idxs = "Date,"
for i in range(0,5):
idxs += str(op[i]['Meta Data']['2. Symbol']) + ','
output.writelines(idxs.strip(',') + '\n')
#%%
for j in range(200):
edxs = ""
for i in range(0,5):
try:
edxs += str(op[i]['Weekly Time Series'][str(datetime.date(2021,9,10)-datetime.timedelta(7*j))]['4. close']) + ","
except KeyError:
edxs += 'NaN' + ','
#print(str(datetime.date(2021,9,10)-datetime.timedelta(7*j)))
outstr = str(datetime.date(2021,9,10)-datetime.timedelta(7*j))+ ',' + edxs.strip(',') + '\n'
output.writelines(outstr)
# %%
f.close()
output.close()
# %%
| #%%
import datetime
# %%
f = open('dataset.txt', 'r')
op = eval(f.readline())
output = open("outputdata.csv" , 'w')
#%%
idxs = "Date,"
for i in range(0,5):
idxs += str(op[i]['Meta Data']['2. Symbol']) + ','
output.writelines(idxs.strip(',') + '\n')
#%%
for j in range(200):
edxs = ""
for i in range(0,5):
try:
edxs += str(op[i]['Weekly Time Series'][str(datetime.date(2021,9,10)-datetime.timedelta(7*j))]['4. close']) + ","
except KeyError:
edxs += 'NaN' + ','
#print(str(datetime.date(2021,9,10)-datetime.timedelta(7*j)))
outstr = str(datetime.date(2021,9,10)-datetime.timedelta(7*j))+ ',' + edxs.strip(',') + '\n'
output.writelines(outstr)
# %%
f.close()
output.close()
# %%
| ru | 0.076159 | #%% # %% #%% #%% #print(str(datetime.date(2021,9,10)-datetime.timedelta(7*j))) # %% # %% | 2.405749 | 2 |
tests/test_delete.py | thomasborgen/storage-bucket | 4 | 6613393 | import uuid
import pytest
from google.api_core.exceptions import NotFound
from storage_bucket.delete import delete_bucket
def test_delete_bucket_function(deletable_bucket):
"""Delete bucket returns None."""
assert delete_bucket( # type: ignore
storage_bucket_name=deletable_bucket,
) is None
def test_delete_bucket_function_raises():
"""Does not exist raises NotFound exception."""
with pytest.raises(NotFound):
delete_bucket(storage_bucket_name=uuid.uuid1().hex)
| import uuid
import pytest
from google.api_core.exceptions import NotFound
from storage_bucket.delete import delete_bucket
def test_delete_bucket_function(deletable_bucket):
"""Delete bucket returns None."""
assert delete_bucket( # type: ignore
storage_bucket_name=deletable_bucket,
) is None
def test_delete_bucket_function_raises():
"""Does not exist raises NotFound exception."""
with pytest.raises(NotFound):
delete_bucket(storage_bucket_name=uuid.uuid1().hex)
| en | 0.768657 | Delete bucket returns None. # type: ignore Does not exist raises NotFound exception. | 2.314495 | 2 |
datatype_tester_files/datatype_tester.py | lineality/csv_to_dynamodb_aws_lambda_python_env | 0 | 6613394 | <filename>datatype_tester_files/datatype_tester.py
## Helper Colab cell to load a .csv and create a draft of a metadata-csv
# import library
import pandas as pd
import glob
# get name of file from user
# name_of_csv = input("What is the name of your .csv?")
# helper function
def make_metadata_csv(name_of_csv):
'''
This function makes a metadata_ file
with three kinds of data:
1. the pandas datatype for each column
2. the AWS dynamoDB data type for each column
3. an example of the data for each column
Requires: pandas as pd
'''
# load file into pandas dataframe
df = pd.read_csv( name_of_csv )
# extract list of column names from .csv
column_name_list = list(df.columns)
# # inspection
# print(column_name_list)
# make empty list for datatypes, the same size as the name-list
pandas_dtypes_list = [None]*len(column_name_list)
AWS_dtypes_list = [None]*len(column_name_list)
# make a list of example items for inspection
example_item_list = list(df.loc[0])
# extract the datatype of each column as recognized by pandas
for index, column_name in enumerate(column_name_list):
pandas_dtypes_list[index] = str(df[column_name].dtypes)
# # inspection
# print(pandas_dtypes_list)
# conversion_dictionary
conversion_dict = {
"object" : 'S',
"int64" : 'N',
"float64" : 'N',
"datetime64" : 'S',
"bool" : 'BOOL',
}
# convert to AWS-DynamoDB datatypes
# look up each item in converstion dictionary
# and put AWS value in new list
for index, column_name in enumerate(pandas_dtypes_list):
AWS_dtypes_list[index] = conversion_dict[column_name]
# make a dictionary of lists
type_dict = {'column_name': column_name_list,
'AWS_column_dtype': AWS_dtypes_list,
'pandas_column_dtype': pandas_dtypes_list,
'example_item_list': example_item_list,
}
# make a new pandas dataframe based on the dictionary of lists
df_meta = pd.DataFrame(type_dict)
# make file name for csv meta_data file (for AWS or for normal OS)
# slice to remove the first /tmp/ name
# new_name_of_csv = "/tmp/metadata_" + name_of_csv[5:]
# # for normal OS or python notebook
new_name_of_csv = "metadata_" + name_of_csv
'''
Below are two different versions of formatted output
in terms of the structure
of the resulting .csv file
'''
# # saving the dataframe
# df_meta.to_csv( new_name_of_csv )
# # saving the dataframe (alternate version)
df_meta.to_csv( new_name_of_csv , header=True, index=False)
# delete dataframes to save memory
#del df
#del df_meta
# end program
return None
# Helper Function
def make_primary_key_warning_flag_list(file_name):
# load file into pandas dataframe
df = pd.read_csv( file_name )
#############
# Make Flags
#############
mixed_datatype_flag = False
missing_data_flag = False
duplicate_data_flag = False
############################
# check mixed_datatype_flag
############################
if str(df.iloc[:, 0].dtype) == 'object':
mixed_datatype_flag = True
# # for terminal or inspection
# print( mixed_datatype_flag )
############################
# check missing_data_flag
############################
# check is na
if df.iloc[:, 0].isna().sum() != 0:
missing_data_flag = True
# check is null
if df.iloc[:, 0].isnull().sum() != 0:
missing_data_flag = True
# # for terminal or inspection
# print( missing_data_flag )
############################
# check duplicate_data_flag
############################
if ( df.iloc[:, 0].value_counts().sum() == len(df.iloc[:, 0].value_counts()) ) == False:
duplicate_data_flag = True
# # for terminal or inspection
# print( duplicate_data_flag )
#####################
# Make list of flags
#####################
warning_flag_list = []
if mixed_datatype_flag == True:
warning_flag_list.append( 'mixed_datatype_flag' )
if missing_data_flag == True:
warning_flag_list.append( 'missing_data_flag' )
if duplicate_data_flag == True:
warning_flag_list.append( 'duplicate_data_flag' )
###############################
# return list of warning flags
###############################
return print(warning_flag_list)
# in the case of a long wait, give the user some idea
# of the progress through the files (crude but works)
progress_counter = 0
# inspection
# print("progress Counter:")
# TODO
# import glob
list_of_csv_files = glob.glob('*.csv', recursive = True)
# iterate through all .rds files in directory
the_path = "."
for filename in list_of_csv_files:
# inspection
# print(filename)
# find AWS data types
make_metadata_csv(filename)
# make
make_primary_key_warning_flag_list(file_name)
# Show Progress:
progress_counter += 1
print(f"{ progress_counter }/{ len(list_of_csv_files) }")
# list of metadata files
list_of_metadata_files = glob.glob('metadata_*.csv', recursive = True)
# Yay!!
print("All Done!!")
# may take extra time to print
# print( "List of new files =
", glob.glob('metadata_*.csv', recursive = True) )
| <filename>datatype_tester_files/datatype_tester.py
## Helper Colab cell to load a .csv and create a draft of a metadata-csv
# import library
import pandas as pd
import glob
# get name of file from user
# name_of_csv = input("What is the name of your .csv?")
# helper function
def make_metadata_csv(name_of_csv):
'''
This function makes a metadata_ file
with three kinds of data:
1. the pandas datatype for each column
2. the AWS dynamoDB data type for each column
3. an example of the data for each column
Requires: pandas as pd
'''
# load file into pandas dataframe
df = pd.read_csv( name_of_csv )
# extract list of column names from .csv
column_name_list = list(df.columns)
# # inspection
# print(column_name_list)
# make empty list for datatypes, the same size as the name-list
pandas_dtypes_list = [None]*len(column_name_list)
AWS_dtypes_list = [None]*len(column_name_list)
# make a list of example items for inspection
example_item_list = list(df.loc[0])
# extract the datatype of each column as recognized by pandas
for index, column_name in enumerate(column_name_list):
pandas_dtypes_list[index] = str(df[column_name].dtypes)
# # inspection
# print(pandas_dtypes_list)
# conversion_dictionary
conversion_dict = {
"object" : 'S',
"int64" : 'N',
"float64" : 'N',
"datetime64" : 'S',
"bool" : 'BOOL',
}
# convert to AWS-DynamoDB datatypes
# look up each item in converstion dictionary
# and put AWS value in new list
for index, column_name in enumerate(pandas_dtypes_list):
AWS_dtypes_list[index] = conversion_dict[column_name]
# make a dictionary of lists
type_dict = {'column_name': column_name_list,
'AWS_column_dtype': AWS_dtypes_list,
'pandas_column_dtype': pandas_dtypes_list,
'example_item_list': example_item_list,
}
# make a new pandas dataframe based on the dictionary of lists
df_meta = pd.DataFrame(type_dict)
# make file name for csv meta_data file (for AWS or for normal OS)
# slice to remove the first /tmp/ name
# new_name_of_csv = "/tmp/metadata_" + name_of_csv[5:]
# # for normal OS or python notebook
new_name_of_csv = "metadata_" + name_of_csv
'''
Below are two different versions of formatted output
in terms of the structure
of the resulting .csv file
'''
# # saving the dataframe
# df_meta.to_csv( new_name_of_csv )
# # saving the dataframe (alternate version)
df_meta.to_csv( new_name_of_csv , header=True, index=False)
# delete dataframes to save memory
#del df
#del df_meta
# end program
return None
# Helper Function
def make_primary_key_warning_flag_list(file_name):
# load file into pandas dataframe
df = pd.read_csv( file_name )
#############
# Make Flags
#############
mixed_datatype_flag = False
missing_data_flag = False
duplicate_data_flag = False
############################
# check mixed_datatype_flag
############################
if str(df.iloc[:, 0].dtype) == 'object':
mixed_datatype_flag = True
# # for terminal or inspection
# print( mixed_datatype_flag )
############################
# check missing_data_flag
############################
# check is na
if df.iloc[:, 0].isna().sum() != 0:
missing_data_flag = True
# check is null
if df.iloc[:, 0].isnull().sum() != 0:
missing_data_flag = True
# # for terminal or inspection
# print( missing_data_flag )
############################
# check duplicate_data_flag
############################
if ( df.iloc[:, 0].value_counts().sum() == len(df.iloc[:, 0].value_counts()) ) == False:
duplicate_data_flag = True
# # for terminal or inspection
# print( duplicate_data_flag )
#####################
# Make list of flags
#####################
warning_flag_list = []
if mixed_datatype_flag == True:
warning_flag_list.append( 'mixed_datatype_flag' )
if missing_data_flag == True:
warning_flag_list.append( 'missing_data_flag' )
if duplicate_data_flag == True:
warning_flag_list.append( 'duplicate_data_flag' )
###############################
# return list of warning flags
###############################
return print(warning_flag_list)
# in the case of a long wait, give the user some idea
# of the progress through the files (crude but works)
progress_counter = 0
# inspection
# print("progress Counter:")
# TODO
# import glob
list_of_csv_files = glob.glob('*.csv', recursive = True)
# iterate through all .rds files in directory
the_path = "."
for filename in list_of_csv_files:
# inspection
# print(filename)
# find AWS data types
make_metadata_csv(filename)
# make
make_primary_key_warning_flag_list(file_name)
# Show Progress:
progress_counter += 1
print(f"{ progress_counter }/{ len(list_of_csv_files) }")
# list of metadata files
list_of_metadata_files = glob.glob('metadata_*.csv', recursive = True)
# Yay!!
print("All Done!!")
# may take extra time to print
# print( "List of new files =
", glob.glob('metadata_*.csv', recursive = True) )
| en | 0.557539 | ## Helper Colab cell to load a .csv and create a draft of a metadata-csv # import library # get name of file from user # name_of_csv = input("What is the name of your .csv?") # helper function This function makes a metadata_ file with three kinds of data: 1. the pandas datatype for each column 2. the AWS dynamoDB data type for each column 3. an example of the data for each column Requires: pandas as pd # load file into pandas dataframe # extract list of column names from .csv # # inspection # print(column_name_list) # make empty list for datatypes, the same size as the name-list # make a list of example items for inspection # extract the datatype of each column as recognized by pandas # # inspection # print(pandas_dtypes_list) # conversion_dictionary # convert to AWS-DynamoDB datatypes # look up each item in converstion dictionary # and put AWS value in new list # make a dictionary of lists # make a new pandas dataframe based on the dictionary of lists # make file name for csv meta_data file (for AWS or for normal OS) # slice to remove the first /tmp/ name # new_name_of_csv = "/tmp/metadata_" + name_of_csv[5:] # # for normal OS or python notebook Below are two different versions of formatted output in terms of the structure of the resulting .csv file # # saving the dataframe # df_meta.to_csv( new_name_of_csv ) # # saving the dataframe (alternate version) # delete dataframes to save memory #del df #del df_meta # end program # Helper Function # load file into pandas dataframe ############# # Make Flags ############# ############################ # check mixed_datatype_flag ############################ # # for terminal or inspection # print( mixed_datatype_flag ) ############################ # check missing_data_flag ############################ # check is na # check is null # # for terminal or inspection # print( missing_data_flag ) ############################ # check duplicate_data_flag ############################ # # for terminal or inspection # print( duplicate_data_flag ) ##################### # Make list of flags ##################### ############################### # return list of warning flags ############################### # in the case of a long wait, give the user some idea # of the progress through the files (crude but works) # inspection # print("progress Counter:") # TODO # import glob # iterate through all .rds files in directory # inspection # print(filename) # find AWS data types # make # Show Progress: # list of metadata files # Yay!! # may take extra time to print # print( "List of new files = | 3.766201 | 4 |
storjlib/__init__.py | Storj/storjlib | 0 | 6613395 | <reponame>Storj/storjlib
from . version import __version__ # NOQA
from . import log # NOQA
from . import common # NOQA
from . import contract # NOQA
from . import heartbeat # NOQA
from . import challenge # NOQA
from . import api # NOQA
from . import util # NOQA
from . import store # NOQA
from . import config # NOQA
| from . version import __version__ # NOQA
from . import log # NOQA
from . import common # NOQA
from . import contract # NOQA
from . import heartbeat # NOQA
from . import challenge # NOQA
from . import api # NOQA
from . import util # NOQA
from . import store # NOQA
from . import config # NOQA | ur | 0.236289 | # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA | 1.080756 | 1 |
euler/problem_9.py | paulliwali/python-exercises | 0 | 6613396 | <filename>euler/problem_9.py<gh_stars>0
import math
def pythagorean(a, b):
return math.sqrt(a**2 + b**2)
def required_for_thousand(a, b):
return (1000 - a - b)
def find_triplet():
for a in range(1, 1000):
for b in range(1, 1000):
print(a, b)
c = pythagorean(a, b)
if c == required_for_thousand(a, b):
return (a, b, c)
triplet = find_triplet()
answer = triplet[0] * triplet[1] * triplet[2]
print(answer)
| <filename>euler/problem_9.py<gh_stars>0
import math
def pythagorean(a, b):
return math.sqrt(a**2 + b**2)
def required_for_thousand(a, b):
return (1000 - a - b)
def find_triplet():
for a in range(1, 1000):
for b in range(1, 1000):
print(a, b)
c = pythagorean(a, b)
if c == required_for_thousand(a, b):
return (a, b, c)
triplet = find_triplet()
answer = triplet[0] * triplet[1] * triplet[2]
print(answer)
| none | 1 | 3.846712 | 4 | |
experiments/citi_ner/data_utils.py | JinqiaoGit/DeBERTa-NER | 0 | 6613397 | from typing import Dict, List, Tuple
import torch
def get_word2ix(trainset: List[Tuple[List[str], List[str]]]) -> Dict[str, int]:
"""
generate one-hot code of tokens
:param trainset: a list of tuple contains tokens and labels
:return: a dict contains the map between token and index
"""
# set <PAD> label as idx 0
word_to_ix: Dict[str, int] = {"<PAD>": 0}
for sentence, _ in trainset:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
return word_to_ix
def prepare_sequence(seq: List[str], to_ix: Dict[str, int], device='cpu') -> torch.Tensor:
"""
convert sequential word to the index in one-hot dictionary.
"""
idxs = [[to_ix[w] for w in seq]]
return torch.tensor(idxs, dtype=torch.long, device=device)
def data_refactor():
pass
| from typing import Dict, List, Tuple
import torch
def get_word2ix(trainset: List[Tuple[List[str], List[str]]]) -> Dict[str, int]:
"""
generate one-hot code of tokens
:param trainset: a list of tuple contains tokens and labels
:return: a dict contains the map between token and index
"""
# set <PAD> label as idx 0
word_to_ix: Dict[str, int] = {"<PAD>": 0}
for sentence, _ in trainset:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
return word_to_ix
def prepare_sequence(seq: List[str], to_ix: Dict[str, int], device='cpu') -> torch.Tensor:
"""
convert sequential word to the index in one-hot dictionary.
"""
idxs = [[to_ix[w] for w in seq]]
return torch.tensor(idxs, dtype=torch.long, device=device)
def data_refactor():
pass
| en | 0.820712 | generate one-hot code of tokens :param trainset: a list of tuple contains tokens and labels :return: a dict contains the map between token and index # set <PAD> label as idx 0 convert sequential word to the index in one-hot dictionary. | 2.969758 | 3 |
pautils/managers.py | simodalla/pympa-utils | 0 | 6613398 | <gh_stars>0
from django.db.models import Q, Manager
from django.utils.timezone import now
class PublishedManager(Manager):
def get_queryset(self):
qs = super(PublishedManager, self).get_queryset().filter(
Q(published_from__lte=now()),
Q(published_to__isnull=True) | Q(published_to__gte=now()),
)
return qs
| from django.db.models import Q, Manager
from django.utils.timezone import now
class PublishedManager(Manager):
def get_queryset(self):
qs = super(PublishedManager, self).get_queryset().filter(
Q(published_from__lte=now()),
Q(published_to__isnull=True) | Q(published_to__gte=now()),
)
return qs | none | 1 | 2.180881 | 2 | |
movie_geeks_django/awards/nested_serializers.py | maciejKusy/movie_geeks_django | 0 | 6613399 | from rest_framework import serializers
from .models import FilmAwardReceived
class ReceivedReceivedAwardSerializerForDisplayInLists(serializers.ModelSerializer):
"""
Serves to serialize/deserialize the FilmAwardReceived objects for lists of awards - only basic information
exposed to user.
"""
name = serializers.CharField()
awarded_for = serializers.CharField()
class Meta:
model = FilmAwardReceived
fields = ["id", "name", "awarded_for"]
| from rest_framework import serializers
from .models import FilmAwardReceived
class ReceivedReceivedAwardSerializerForDisplayInLists(serializers.ModelSerializer):
"""
Serves to serialize/deserialize the FilmAwardReceived objects for lists of awards - only basic information
exposed to user.
"""
name = serializers.CharField()
awarded_for = serializers.CharField()
class Meta:
model = FilmAwardReceived
fields = ["id", "name", "awarded_for"]
| en | 0.856731 | Serves to serialize/deserialize the FilmAwardReceived objects for lists of awards - only basic information exposed to user. | 2.63592 | 3 |
src/get_post.py | gadavivi/polly-kubeless | 1 | 6613400 | <filename>src/get_post.py<gh_stars>1-10
import json
import boto3
import os
from boto3.dynamodb.conditions import Key, Attr
def get_post(event, context):
postId = event['extensions']['request'].query['postId']
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
if postId == '*':
result = table.scan()
else:
result = table.query(
KeyConditionExpression=Key('id').eq(postId)
)
response = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": json.dumps(result['Items'])
}
return response | <filename>src/get_post.py<gh_stars>1-10
import json
import boto3
import os
from boto3.dynamodb.conditions import Key, Attr
def get_post(event, context):
postId = event['extensions']['request'].query['postId']
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
if postId == '*':
result = table.scan()
else:
result = table.query(
KeyConditionExpression=Key('id').eq(postId)
)
response = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": json.dumps(result['Items'])
}
return response | none | 1 | 2.166581 | 2 | |
client/src/main/python/slipstream/Client.py | slipstream/SlipStreamClient | 0 | 6613401 | """
SlipStream Client
=====
Copyright (C) 2013 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from SlipStreamHttpClient import SlipStreamHttpClient
from exceptions.Exceptions import NotYetSetException
from exceptions.Exceptions import TimeoutException
from exceptions.Exceptions import ClientError
from exceptions.Exceptions import AbortException
from NodeDecorator import NodeDecorator
import slipstream.util as util
class Client(object):
TMPDIR = util.TMPDIR
REPORTSDIR = util.REPORTSDIR
WINDOWS_REPORTSDIR = util.WINDOWS_REPORTSDIR
IMAGE = NodeDecorator.IMAGE
DEPLOYMENT = NodeDecorator.DEPLOYMENT
VALUE_LENGTH_LIMIT = 4096 # from RuntimeParameter class on server
def __init__(self, ch):
self.no_block = True
self.ignoreAbort = False
self.timeout = 30
self.verboseLevel = 1
self.verboseThreshold = 1
if not hasattr(ch, 'serviceurl') and hasattr(ch, 'endpoint'):
ch.set('serviceurl', ch.endpoint)
self.ch = ch
ch.assignConfigAndOptions(self)
self.context = ch.context
self.httpClient = SlipStreamHttpClient(ch)
def login(self, username, password):
self.httpClient.login(username, password)
def logout(self):
self.httpClient.logout()
def _loadModule(self, moduleName):
return util.loadModule(moduleName)
def getRuntimeParameter(self, key):
value = None
_key = self._qualifyKey(key)
if self.no_block:
value = self._getRuntimeParameter(_key)
else:
timer = 0
while True:
value = self._getRuntimeParameter(_key)
if value is not None:
break
if self.timeout != 0 and timer >= self.timeout:
raise TimeoutException(
"Exceeded timeout limit of %s waiting for key '%s' "
"to be set" % (self.timeout, _key))
print >> sys.stderr, "Waiting for %s" % _key
sys.stdout.flush()
sleepTime = 5
time.sleep(sleepTime)
timer += sleepTime
return value
def launchDeployment(self, params):
"""
@return: Run location
@rtype: {str}
"""
return self.httpClient.launchDeployment(params)
def is_run_aborted(self, run_uuid):
try:
self.httpClient.getRunState(run_uuid, ignoreAbort=False)
return False
except AbortException:
return True
def getRunState(self, uuid, ignoreAbort=True):
return self.httpClient.getRunState(uuid, ignoreAbort=ignoreAbort)
def _qualifyKey(self, key):
"""Qualify the key, if not already done, with the right nodename"""
node_level_properties = ['multiplicity', 'ids']
_key = key
# Is the key namespaced (i.e. contains node/key separator: ':')?
if NodeDecorator.NODE_PROPERTY_SEPARATOR in _key:
# Is this a reserved or special nodename?
for reserved in NodeDecorator.reservedNodeNames:
if _key.startswith(reserved + NodeDecorator.NODE_PROPERTY_SEPARATOR):
return _key
# Get node (instance) name and the key parts.
parts = _key.split(NodeDecorator.NODE_PROPERTY_SEPARATOR)
nodenamePart = parts[0]
propertyPart = parts[1] # safe since we've done the test in the if above
# Is this an orchestrator? We don't qualify orchestrator
# parameter names.
if NodeDecorator.is_orchestrator_name(nodenamePart):
return _key
# Is the nodename in the form: <nodename>.<index>? If not, make it so
# such that <nodename>:<property> -> <nodename>.1:<property
parts = nodenamePart.split(NodeDecorator.NODE_MULTIPLICITY_SEPARATOR)
nodename = parts[0]
# multiplicity parameter should NOT be qualified make an exception
if len(parts) == 1 and propertyPart not in node_level_properties:
_key = nodename + \
NodeDecorator.NODE_MULTIPLICITY_SEPARATOR + \
NodeDecorator.nodeMultiplicityStartIndex + \
NodeDecorator.NODE_PROPERTY_SEPARATOR + \
propertyPart
return _key
if _key not in node_level_properties:
_key = self._getNodeName() + NodeDecorator.NODE_PROPERTY_SEPARATOR + _key
else:
parts = self._getNodeName().split(NodeDecorator.NODE_MULTIPLICITY_SEPARATOR)
nodename = parts[0]
_key = nodename + NodeDecorator.NODE_PROPERTY_SEPARATOR + _key
return _key
def setNodeName(self, value):
self.context[NodeDecorator.NODE_INSTANCE_NAME_KEY] = value
def _getNodeName(self):
return self.context[NodeDecorator.NODE_INSTANCE_NAME_KEY]
def _getRuntimeParameter(self, key):
special_keys = [NodeDecorator.NODE_INSTANCE_NAME_KEY]
if key in special_keys:
return self.context[key]
try:
return self.httpClient.getRuntimeParameter(key, self.ignoreAbort)
except NotYetSetException:
return None
def setRuntimeParameter(self, key, value):
_key = self._qualifyKey(key)
stripped_value = util.removeASCIIEscape(value)
if stripped_value and len(stripped_value) > self.VALUE_LENGTH_LIMIT:
raise ClientError("value exceeds maximum length of %d characters" % self.VALUE_LENGTH_LIMIT)
self.httpClient.setRuntimeParameter(_key, stripped_value)
def cancel_abort(self):
# Global abort
self.httpClient.unset_runtime_parameter(NodeDecorator.globalNamespacePrefix + NodeDecorator.ABORT_KEY,
ignore_abort=True)
_key = self._qualifyKey(NodeDecorator.ABORT_KEY)
self.httpClient.unset_runtime_parameter(_key, ignore_abort=True)
def executScript(self, script):
return self._systemCall(script, retry=False)
def _systemCall(self, cmd, retry=True):
"""
Execute system call and return stdout.
Raise an exception if the command fails
"""
self._printStep('Executing command: %s' % cmd)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
child_stdin = child_stdin
stdout = []
while True:
out = child_stdout.readlines(1)
if not out:
break
stdout.extend(out)
sys.stdout.writelines(out)
returnCode = p.wait()
if returnCode:
if retry:
return self._systemCall(cmd, False)
else:
raise ClientError("Error executing command '%s', with error "
"code: %s" % (cmd, returnCode))
return stdout
def _printStep(self, message):
util.printStep(message)
def _printDetail(self, message):
util.printDetail(message, self.verboseLevel, self.verboseThreshold)
def getCategory(self):
return self.httpClient.get_run_category()
def fail(self, message):
abort = self._qualifyKey(NodeDecorator.ABORT_KEY)
self.httpClient.setRuntimeParameter(abort, message)
def complete_state(self):
nodeName = self._getNodeName()
self.httpClient.complete_state(nodeName)
def terminateRun(self):
self.httpClient._httpDelete(self.httpClient.run_url)
def getGlobalAbortMessage(self):
return self.httpClient.getGlobalAbortMessage()
def get_server_configuration(self):
return self.httpClient.get_server_configuration()
def _get_params_list(self, compname, key):
ids_param = '%s:ids' % compname
ids = self.httpClient.getRuntimeParameter(ids_param).split(',')
return ['%s.%s:%s' % (compname, i, key) for i in ids]
def _get_rtp(self, param):
client = Client(self.ch)
client.timeout_raise = False
try:
self._printDetail("%s : Get RPT." % param)
t0 = time.time()
val = client.getRuntimeParameter(param)
self._printDetail("%s : Time to get RTP %s sec." %
(param, (time.time() - t0)))
return val
except TimeoutException as ex:
print >> sys.stderr, ex.arg
return ''
def get_rtp_all(self, compname, key):
"Get RTP `key` from all instances of `compname`."
POOL_MAX = 9
params = self._get_params_list(compname, key)
nparams = len(params)
pool_size = min(POOL_MAX, nparams)
self._printDetail("Get %s RTP instances with pool size: %s" %
(nparams, pool_size))
pool = ThreadPool(pool_size)
results = pool.map(self._get_rtp, params)
results = [v or '' for v in results]
pool.close()
pool.join()
return zip(params, results)
def get_session(self):
return self.httpClient.get_session()
def get_api(self):
return self.httpClient.get_api() | """
SlipStream Client
=====
Copyright (C) 2013 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from SlipStreamHttpClient import SlipStreamHttpClient
from exceptions.Exceptions import NotYetSetException
from exceptions.Exceptions import TimeoutException
from exceptions.Exceptions import ClientError
from exceptions.Exceptions import AbortException
from NodeDecorator import NodeDecorator
import slipstream.util as util
class Client(object):
TMPDIR = util.TMPDIR
REPORTSDIR = util.REPORTSDIR
WINDOWS_REPORTSDIR = util.WINDOWS_REPORTSDIR
IMAGE = NodeDecorator.IMAGE
DEPLOYMENT = NodeDecorator.DEPLOYMENT
VALUE_LENGTH_LIMIT = 4096 # from RuntimeParameter class on server
def __init__(self, ch):
self.no_block = True
self.ignoreAbort = False
self.timeout = 30
self.verboseLevel = 1
self.verboseThreshold = 1
if not hasattr(ch, 'serviceurl') and hasattr(ch, 'endpoint'):
ch.set('serviceurl', ch.endpoint)
self.ch = ch
ch.assignConfigAndOptions(self)
self.context = ch.context
self.httpClient = SlipStreamHttpClient(ch)
def login(self, username, password):
self.httpClient.login(username, password)
def logout(self):
self.httpClient.logout()
def _loadModule(self, moduleName):
return util.loadModule(moduleName)
def getRuntimeParameter(self, key):
value = None
_key = self._qualifyKey(key)
if self.no_block:
value = self._getRuntimeParameter(_key)
else:
timer = 0
while True:
value = self._getRuntimeParameter(_key)
if value is not None:
break
if self.timeout != 0 and timer >= self.timeout:
raise TimeoutException(
"Exceeded timeout limit of %s waiting for key '%s' "
"to be set" % (self.timeout, _key))
print >> sys.stderr, "Waiting for %s" % _key
sys.stdout.flush()
sleepTime = 5
time.sleep(sleepTime)
timer += sleepTime
return value
def launchDeployment(self, params):
"""
@return: Run location
@rtype: {str}
"""
return self.httpClient.launchDeployment(params)
def is_run_aborted(self, run_uuid):
try:
self.httpClient.getRunState(run_uuid, ignoreAbort=False)
return False
except AbortException:
return True
def getRunState(self, uuid, ignoreAbort=True):
return self.httpClient.getRunState(uuid, ignoreAbort=ignoreAbort)
def _qualifyKey(self, key):
"""Qualify the key, if not already done, with the right nodename"""
node_level_properties = ['multiplicity', 'ids']
_key = key
# Is the key namespaced (i.e. contains node/key separator: ':')?
if NodeDecorator.NODE_PROPERTY_SEPARATOR in _key:
# Is this a reserved or special nodename?
for reserved in NodeDecorator.reservedNodeNames:
if _key.startswith(reserved + NodeDecorator.NODE_PROPERTY_SEPARATOR):
return _key
# Get node (instance) name and the key parts.
parts = _key.split(NodeDecorator.NODE_PROPERTY_SEPARATOR)
nodenamePart = parts[0]
propertyPart = parts[1] # safe since we've done the test in the if above
# Is this an orchestrator? We don't qualify orchestrator
# parameter names.
if NodeDecorator.is_orchestrator_name(nodenamePart):
return _key
# Is the nodename in the form: <nodename>.<index>? If not, make it so
# such that <nodename>:<property> -> <nodename>.1:<property
parts = nodenamePart.split(NodeDecorator.NODE_MULTIPLICITY_SEPARATOR)
nodename = parts[0]
# multiplicity parameter should NOT be qualified make an exception
if len(parts) == 1 and propertyPart not in node_level_properties:
_key = nodename + \
NodeDecorator.NODE_MULTIPLICITY_SEPARATOR + \
NodeDecorator.nodeMultiplicityStartIndex + \
NodeDecorator.NODE_PROPERTY_SEPARATOR + \
propertyPart
return _key
if _key not in node_level_properties:
_key = self._getNodeName() + NodeDecorator.NODE_PROPERTY_SEPARATOR + _key
else:
parts = self._getNodeName().split(NodeDecorator.NODE_MULTIPLICITY_SEPARATOR)
nodename = parts[0]
_key = nodename + NodeDecorator.NODE_PROPERTY_SEPARATOR + _key
return _key
def setNodeName(self, value):
self.context[NodeDecorator.NODE_INSTANCE_NAME_KEY] = value
def _getNodeName(self):
return self.context[NodeDecorator.NODE_INSTANCE_NAME_KEY]
def _getRuntimeParameter(self, key):
special_keys = [NodeDecorator.NODE_INSTANCE_NAME_KEY]
if key in special_keys:
return self.context[key]
try:
return self.httpClient.getRuntimeParameter(key, self.ignoreAbort)
except NotYetSetException:
return None
def setRuntimeParameter(self, key, value):
_key = self._qualifyKey(key)
stripped_value = util.removeASCIIEscape(value)
if stripped_value and len(stripped_value) > self.VALUE_LENGTH_LIMIT:
raise ClientError("value exceeds maximum length of %d characters" % self.VALUE_LENGTH_LIMIT)
self.httpClient.setRuntimeParameter(_key, stripped_value)
def cancel_abort(self):
# Global abort
self.httpClient.unset_runtime_parameter(NodeDecorator.globalNamespacePrefix + NodeDecorator.ABORT_KEY,
ignore_abort=True)
_key = self._qualifyKey(NodeDecorator.ABORT_KEY)
self.httpClient.unset_runtime_parameter(_key, ignore_abort=True)
def executScript(self, script):
return self._systemCall(script, retry=False)
def _systemCall(self, cmd, retry=True):
"""
Execute system call and return stdout.
Raise an exception if the command fails
"""
self._printStep('Executing command: %s' % cmd)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
child_stdin = child_stdin
stdout = []
while True:
out = child_stdout.readlines(1)
if not out:
break
stdout.extend(out)
sys.stdout.writelines(out)
returnCode = p.wait()
if returnCode:
if retry:
return self._systemCall(cmd, False)
else:
raise ClientError("Error executing command '%s', with error "
"code: %s" % (cmd, returnCode))
return stdout
def _printStep(self, message):
util.printStep(message)
def _printDetail(self, message):
util.printDetail(message, self.verboseLevel, self.verboseThreshold)
def getCategory(self):
return self.httpClient.get_run_category()
def fail(self, message):
abort = self._qualifyKey(NodeDecorator.ABORT_KEY)
self.httpClient.setRuntimeParameter(abort, message)
def complete_state(self):
nodeName = self._getNodeName()
self.httpClient.complete_state(nodeName)
def terminateRun(self):
self.httpClient._httpDelete(self.httpClient.run_url)
def getGlobalAbortMessage(self):
return self.httpClient.getGlobalAbortMessage()
def get_server_configuration(self):
return self.httpClient.get_server_configuration()
def _get_params_list(self, compname, key):
ids_param = '%s:ids' % compname
ids = self.httpClient.getRuntimeParameter(ids_param).split(',')
return ['%s.%s:%s' % (compname, i, key) for i in ids]
def _get_rtp(self, param):
client = Client(self.ch)
client.timeout_raise = False
try:
self._printDetail("%s : Get RPT." % param)
t0 = time.time()
val = client.getRuntimeParameter(param)
self._printDetail("%s : Time to get RTP %s sec." %
(param, (time.time() - t0)))
return val
except TimeoutException as ex:
print >> sys.stderr, ex.arg
return ''
def get_rtp_all(self, compname, key):
"Get RTP `key` from all instances of `compname`."
POOL_MAX = 9
params = self._get_params_list(compname, key)
nparams = len(params)
pool_size = min(POOL_MAX, nparams)
self._printDetail("Get %s RTP instances with pool size: %s" %
(nparams, pool_size))
pool = ThreadPool(pool_size)
results = pool.map(self._get_rtp, params)
results = [v or '' for v in results]
pool.close()
pool.join()
return zip(params, results)
def get_session(self):
return self.httpClient.get_session()
def get_api(self):
return self.httpClient.get_api() | en | 0.767875 | SlipStream Client ===== Copyright (C) 2013 SixSq Sarl (sixsq.com) ===== Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # from RuntimeParameter class on server @return: Run location @rtype: {str} Qualify the key, if not already done, with the right nodename # Is the key namespaced (i.e. contains node/key separator: ':')? # Is this a reserved or special nodename? # Get node (instance) name and the key parts. # safe since we've done the test in the if above # Is this an orchestrator? We don't qualify orchestrator # parameter names. # Is the nodename in the form: <nodename>.<index>? If not, make it so # such that <nodename>:<property> -> <nodename>.1:<property # multiplicity parameter should NOT be qualified make an exception # Global abort Execute system call and return stdout. Raise an exception if the command fails | 1.904411 | 2 |
pysnooper-binary-search-example.py | jinhopark8345/pysnooper-study | 0 | 6613402 | <reponame>jinhopark8345/pysnooper-study<gh_stars>0
import pysnooper
import random
import logging
import threading
import time
from typing import List
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
class Solution:
# @pysnooper.snoop(thread_info=True)
@pysnooper.snoop(thread_info=True)
def binary_search(self, nums: List[int], target:int) -> int:
left, right = 0, len(nums) -1
while left <= right:
mid = left + (right - left)
midnum = nums[mid]
if midnum == target:
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
# logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
# logging.info("Main : before running thread")
x.start()
# logging.info("Main : wait for the thread to finish")
# x.join()
# logging.info("Main : all done")
x.join()
return mid
elif midnum < target:
left = mid + 1
else:
right = mid - 1
return -1
Solution().binary_search([1,2,3,4,5], 2)
| import pysnooper
import random
import logging
import threading
import time
from typing import List
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
class Solution:
# @pysnooper.snoop(thread_info=True)
@pysnooper.snoop(thread_info=True)
def binary_search(self, nums: List[int], target:int) -> int:
left, right = 0, len(nums) -1
while left <= right:
mid = left + (right - left)
midnum = nums[mid]
if midnum == target:
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
# logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
# logging.info("Main : before running thread")
x.start()
# logging.info("Main : wait for the thread to finish")
# x.join()
# logging.info("Main : all done")
x.join()
return mid
elif midnum < target:
left = mid + 1
else:
right = mid - 1
return -1
Solution().binary_search([1,2,3,4,5], 2) | en | 0.597051 | # @pysnooper.snoop(thread_info=True) # logging.info("Main : before creating thread") # logging.info("Main : before running thread") # logging.info("Main : wait for the thread to finish") # x.join() # logging.info("Main : all done") | 3.392725 | 3 |
nidmviewerfsl/lib/slicertools.py | incf-nidash/nidmresults-fslhtml | 2 | 6613403 | # ==============================================================================
#
# The following functions are designed to resize SPM nifti maps to align with
# the given SPM template nifti using FSL and nilearn commands and output a
# slice image of the two niftis overlaid.
#
# ==============================================================================
#
# Authors: <NAME>, <NAME> (29/11/2017)
import subprocess
import os
import shutil
import random
import shlex
import numpy as np
import nibabel as nib
import wget
from nibabel.processing import resample_from_to
from queries.querytools import run_query
def nif_dim(nifti, k):
# Retrieve the k dimension of a nifti using nibabel.
# Retrieve image header.
n = nib.load(nifti)
header = n.header
if k == 'x':
dimension = header['dim'][1]
elif k == 'y':
dimension = header['dim'][2]
elif k == 'z':
dimension = header['dim'][3]
elif k == 'pix':
dimension = header['pixdim'][1]
else:
error('Enter a valid dimension... x, y, z or pix')
return(dimension)
def resize_exc_set(exc_set, template, tempDir):
# This function resizes an SPM excursion set to an SPM template if
# necessary.
# Load the images
template_img = nib.load(template)
excset_img = nib.load(exc_set)
# Resample if necessary
img_resl = resample_from_to(excset_img, template_img)
nib.save(img_resl, os.path.join(tempDir, "resizedExcSet.nii.gz"))
def get_val(niftiFilename, minOrMax):
# Retrieve the min or max values of the image.
# Retrieve image data.
n = nib.load(niftiFilename)
d = n.get_data()
# Ensure there are no NaN's
d = np.nan_to_num(d)
# We are only interested in non-zero values.
d = d[d.nonzero()]
if minOrMax == 'min':
return(d.min())
else:
return(d.max())
def overlay(exc_set, template, o_exc_set, tempDir):
# Overlay exc_set onto template. The output is saved as outputTemp
# Get min and max values of the original excursion set.
minZ = str(get_val(o_exc_set, 'min'))
maxZ = str(get_val(o_exc_set, 'max'))
# Place the template onto the excursion set using overlay
overlayCommand = "overlay 1 1 " + template + " -a " + exc_set + " " + \
minZ + " " + maxZ + " " + \
os.path.join(tempDir, "outputTemp.nii.gz")
subprocess.check_call(shlex.split(overlayCommand), shell=False)
process = subprocess.Popen(shlex.split(overlayCommand), shell=False)
process.wait()
def get_slice_image_from_nifti(tempDir, outputName):
# Get Slices. Slices are saved as slices.png.
slicerCommand = "slicer '" + os.path.join(tempDir, "outputTemp.nii.gz") + \
"' -s 0.72 -S 2 750 '" + outputName + "'"
subprocess.check_call(shlex.split(slicerCommand), shell=False)
process = subprocess.Popen(shlex.split(slicerCommand), shell=False)
process.wait()
def generate_slice_image(exc_set, SPMorFSL):
tempFolder = 'temp_NIDM_viewer' + str(random.randint(0, 999999))
os.mkdir(tempFolder)
FSLDIR = os.environ['FSLDIR']
# Make a copy of the original name of the excursion set.
o_exc_set = exc_set
# Load the excursion set.
n_exc = nib.load(exc_set)
# If we are looking at FSL data use the FSL template.
if SPMorFSL == 'FSL':
if nif_dim(exc_set, 'pix') == 1:
template = os.path.join(FSLDIR, 'data', 'standard',
'MNI152_T1_1mm_brain.nii.gz')
else:
template = os.path.join(FSLDIR, 'data', 'standard',
'MNI152_T1_2mm_brain.nii.gz')
else:
# NaN values.
d = n_exc.get_data()
exc_set_nonan = nib.Nifti1Image(np.nan_to_num(d),
n_exc.affine,
header=n_exc.header)
# Save the result.
nib.save(exc_set_nonan, os.path.join(tempFolder,
'excset_nonan.nii.gz'))
exc_set = os.path.join(tempFolder, 'excset_nonan.nii.gz')
# Use the SPM template.
template = os.path.join(
os.path.split(
os.path.split(
os.path.split(os.path.realpath(__file__))[0])[0])[0],
'templates', 'T1.nii')
# Check whether the template already is downloaded.
if not os.path.isfile(template):
if not os.path.isdir(os.path.split(template)[0]):
os.mkdir(os.path.split(template)[0])
wget.download("https://github.com/spm/spm12/blob/master/tool"
"box/OldNorm/T1.nii?raw=true", template)
# Load the template.
n_tem = nib.load(template)
# If the images are different sizes/ have different affine
# matrices, resize the excursion set.
if (n_tem.affine != n_exc.affine).any():
# Check which is bigger and resize if necessary
resize_exc_set(exc_set, template, tempFolder)
# If we've resized the excursion set we want to look at the resized
# file.
exc_set = os.path.join(tempFolder, 'resizedExcSet.nii.gz')
# Overlay niftis
overlay(exc_set, template, o_exc_set, tempFolder)
# Get the slices image
get_slice_image_from_nifti(tempFolder, o_exc_set.replace(
'.nii', '').replace('.gz', '')+'.png')
shutil.rmtree(tempFolder)
return(o_exc_set.replace('.nii', '').replace('.gz', '')+'.png')
| # ==============================================================================
#
# The following functions are designed to resize SPM nifti maps to align with
# the given SPM template nifti using FSL and nilearn commands and output a
# slice image of the two niftis overlaid.
#
# ==============================================================================
#
# Authors: <NAME>, <NAME> (29/11/2017)
import subprocess
import os
import shutil
import random
import shlex
import numpy as np
import nibabel as nib
import wget
from nibabel.processing import resample_from_to
from queries.querytools import run_query
def nif_dim(nifti, k):
# Retrieve the k dimension of a nifti using nibabel.
# Retrieve image header.
n = nib.load(nifti)
header = n.header
if k == 'x':
dimension = header['dim'][1]
elif k == 'y':
dimension = header['dim'][2]
elif k == 'z':
dimension = header['dim'][3]
elif k == 'pix':
dimension = header['pixdim'][1]
else:
error('Enter a valid dimension... x, y, z or pix')
return(dimension)
def resize_exc_set(exc_set, template, tempDir):
# This function resizes an SPM excursion set to an SPM template if
# necessary.
# Load the images
template_img = nib.load(template)
excset_img = nib.load(exc_set)
# Resample if necessary
img_resl = resample_from_to(excset_img, template_img)
nib.save(img_resl, os.path.join(tempDir, "resizedExcSet.nii.gz"))
def get_val(niftiFilename, minOrMax):
# Retrieve the min or max values of the image.
# Retrieve image data.
n = nib.load(niftiFilename)
d = n.get_data()
# Ensure there are no NaN's
d = np.nan_to_num(d)
# We are only interested in non-zero values.
d = d[d.nonzero()]
if minOrMax == 'min':
return(d.min())
else:
return(d.max())
def overlay(exc_set, template, o_exc_set, tempDir):
# Overlay exc_set onto template. The output is saved as outputTemp
# Get min and max values of the original excursion set.
minZ = str(get_val(o_exc_set, 'min'))
maxZ = str(get_val(o_exc_set, 'max'))
# Place the template onto the excursion set using overlay
overlayCommand = "overlay 1 1 " + template + " -a " + exc_set + " " + \
minZ + " " + maxZ + " " + \
os.path.join(tempDir, "outputTemp.nii.gz")
subprocess.check_call(shlex.split(overlayCommand), shell=False)
process = subprocess.Popen(shlex.split(overlayCommand), shell=False)
process.wait()
def get_slice_image_from_nifti(tempDir, outputName):
# Get Slices. Slices are saved as slices.png.
slicerCommand = "slicer '" + os.path.join(tempDir, "outputTemp.nii.gz") + \
"' -s 0.72 -S 2 750 '" + outputName + "'"
subprocess.check_call(shlex.split(slicerCommand), shell=False)
process = subprocess.Popen(shlex.split(slicerCommand), shell=False)
process.wait()
def generate_slice_image(exc_set, SPMorFSL):
tempFolder = 'temp_NIDM_viewer' + str(random.randint(0, 999999))
os.mkdir(tempFolder)
FSLDIR = os.environ['FSLDIR']
# Make a copy of the original name of the excursion set.
o_exc_set = exc_set
# Load the excursion set.
n_exc = nib.load(exc_set)
# If we are looking at FSL data use the FSL template.
if SPMorFSL == 'FSL':
if nif_dim(exc_set, 'pix') == 1:
template = os.path.join(FSLDIR, 'data', 'standard',
'MNI152_T1_1mm_brain.nii.gz')
else:
template = os.path.join(FSLDIR, 'data', 'standard',
'MNI152_T1_2mm_brain.nii.gz')
else:
# NaN values.
d = n_exc.get_data()
exc_set_nonan = nib.Nifti1Image(np.nan_to_num(d),
n_exc.affine,
header=n_exc.header)
# Save the result.
nib.save(exc_set_nonan, os.path.join(tempFolder,
'excset_nonan.nii.gz'))
exc_set = os.path.join(tempFolder, 'excset_nonan.nii.gz')
# Use the SPM template.
template = os.path.join(
os.path.split(
os.path.split(
os.path.split(os.path.realpath(__file__))[0])[0])[0],
'templates', 'T1.nii')
# Check whether the template already is downloaded.
if not os.path.isfile(template):
if not os.path.isdir(os.path.split(template)[0]):
os.mkdir(os.path.split(template)[0])
wget.download("https://github.com/spm/spm12/blob/master/tool"
"box/OldNorm/T1.nii?raw=true", template)
# Load the template.
n_tem = nib.load(template)
# If the images are different sizes/ have different affine
# matrices, resize the excursion set.
if (n_tem.affine != n_exc.affine).any():
# Check which is bigger and resize if necessary
resize_exc_set(exc_set, template, tempFolder)
# If we've resized the excursion set we want to look at the resized
# file.
exc_set = os.path.join(tempFolder, 'resizedExcSet.nii.gz')
# Overlay niftis
overlay(exc_set, template, o_exc_set, tempFolder)
# Get the slices image
get_slice_image_from_nifti(tempFolder, o_exc_set.replace(
'.nii', '').replace('.gz', '')+'.png')
shutil.rmtree(tempFolder)
return(o_exc_set.replace('.nii', '').replace('.gz', '')+'.png')
| en | 0.647066 | # ============================================================================== # # The following functions are designed to resize SPM nifti maps to align with # the given SPM template nifti using FSL and nilearn commands and output a # slice image of the two niftis overlaid. # # ============================================================================== # # Authors: <NAME>, <NAME> (29/11/2017) # Retrieve the k dimension of a nifti using nibabel. # Retrieve image header. # This function resizes an SPM excursion set to an SPM template if # necessary. # Load the images # Resample if necessary # Retrieve the min or max values of the image. # Retrieve image data. # Ensure there are no NaN's # We are only interested in non-zero values. # Overlay exc_set onto template. The output is saved as outputTemp # Get min and max values of the original excursion set. # Place the template onto the excursion set using overlay # Get Slices. Slices are saved as slices.png. # Make a copy of the original name of the excursion set. # Load the excursion set. # If we are looking at FSL data use the FSL template. # NaN values. # Save the result. # Use the SPM template. # Check whether the template already is downloaded. # Load the template. # If the images are different sizes/ have different affine # matrices, resize the excursion set. # Check which is bigger and resize if necessary # If we've resized the excursion set we want to look at the resized # file. # Overlay niftis # Get the slices image | 2.365702 | 2 |
packit_app/table_elements.py | horsewithnoname1985/packit-app | 0 | 6613404 | <gh_stars>0
from abc import ABC
from collections import OrderedDict
from packit_app import table_fields as tf
class TableDataElement(ABC):
def __init__(self) -> None:
self.column_types: OrderedDict = OrderedDict()
pass
def get_as_dict(self) -> OrderedDict:
return self.column_types
class Gender(TableDataElement):
def __init__(self, gender: tf.GenderName = tf.GenderName("male")) -> None:
super(Gender, self).__init__()
self.column_types[tf.GenderName.column_name] = gender.field[
tf.GenderName.column_name]
class Male(Gender):
def __init__(self):
super(Male, self).__init__(tf.GenderName("male"))
class Female(Gender):
def __init__(self):
super(Female, self).__init__(tf.GenderName("female"))
class User(TableDataElement):
name = ""
gender = None
def __init__(self, username: tf.Username = tf.Username(""),
gender_id: tf.GenderID = tf.GenderID(1)) -> None:
super(User, self).__init__()
self.column_types[tf.Username.column_name] = username.field[
tf.Username.column_name]
self.column_types[tf.GenderID.column_name] = gender_id.field[
tf.GenderID.column_name]
# TODO: Update column_types
class DefaultClothingElement(TableDataElement):
def __init__(self, gender="", clothing_item=""):
super(DefaultClothingElement, self).__init__()
self.column_types['gender'] = gender
self.column_types['clothing_item'] = clothing_item
class Trip(TableDataElement):
def __init__(self, destination, start_date, end_date, day_average_temp,
day_max_temp, day_min_temp, night_average_indoor_temp,
sport_days, no_sport_days, transit_days):
super(Trip, self).__init__()
self.column_types[tf.TripDestination.column_name] = destination
self.column_types[tf.TripDateStart.column_name] = start_date
self.column_types[tf.TripDateEnd.column_name] = end_date
self.column_types[
tf.TripTemperatureDayAverage.column_name] = day_average_temp
self.column_types[tf.TripTemperatureDayMax.column_name] = day_max_temp
self.column_types[tf.TripTemperatureDayMin.column_name] = day_min_temp
self.column_types[
tf.TripTemperatureNightIndoorAverage.column_name] = \
night_average_indoor_temp
self.column_types[tf.TripDaysWithSports.column_name] = sport_days
self.column_types[tf.TripDaysWithoutSports.column_name] = no_sport_days
self.column_types[tf.TripDaysInTransit.column_name] = transit_days
class Garment(TableDataElement):
def __init__(self, gender_id: tf.GenderID = tf.GenderID(),
name: tf.GarmentName = tf.GarmentName(), is_default=False):
super(Garment, self).__init__()
self.column_types[tf.GenderID.column_name] = gender_id.get_value()
self.column_types[tf.GarmentName.column_name] = name.get_value()
self.column_types[tf.GarmentIsDefault.column_name] = is_default
class UserTripGarmentAmount(TableDataElement):
def __init__(self, user_id: tf.UserID = tf.UserID(),
trip_id: tf.TripID = tf.TripID(),
garment_id: tf.GarmentID = tf.GarmentID()):
super(UserTripGarmentAmount, self).__init__()
self.column_types[tf.UserID.column_name] = user_id.get_value()
self.column_types[tf.TripID.column_name] = trip_id.get_value()
self.column_types[tf.GarmentID.column_name] = garment_id.get_value()
self.column_types[tf.TripGarmentTotalQuantity.column_name] = 0.0
class UserGarmentSetting(TableDataElement):
def __init__(self, user_id: tf.UserID = tf.UserID(),
garment_id: tf.GarmentID = tf.GarmentID()):
super(UserGarmentSetting, self).__init__()
self.column_types[tf.UserID.column_name] = user_id.get_value()
self.column_types[tf.GarmentID.column_name] = garment_id.get_value()
self.column_types[tf.QuantityDayBelow0.column_name] = 0.0
self.column_types[tf.QuantityDay0To10.column_name] = 0.0
self.column_types[tf.QuantityDay10To20.column_name] = 0.0
self.column_types[tf.QuantityDayAbove20.column_name] = 0.0
self.column_types[tf.QuantityNightBelow20.column_name] = 0.0
self.column_types[tf.QuantityNightAbove20.column_name] = 0.0
self.column_types[tf.QuantityNoSportsDay.column_name] = 0.0
self.column_types[tf.QuantitySportsDay.column_name] = 0.0
self.column_types[tf.QuantityTransitDay.column_name] = 0.0
| from abc import ABC
from collections import OrderedDict
from packit_app import table_fields as tf
class TableDataElement(ABC):
def __init__(self) -> None:
self.column_types: OrderedDict = OrderedDict()
pass
def get_as_dict(self) -> OrderedDict:
return self.column_types
class Gender(TableDataElement):
def __init__(self, gender: tf.GenderName = tf.GenderName("male")) -> None:
super(Gender, self).__init__()
self.column_types[tf.GenderName.column_name] = gender.field[
tf.GenderName.column_name]
class Male(Gender):
def __init__(self):
super(Male, self).__init__(tf.GenderName("male"))
class Female(Gender):
def __init__(self):
super(Female, self).__init__(tf.GenderName("female"))
class User(TableDataElement):
name = ""
gender = None
def __init__(self, username: tf.Username = tf.Username(""),
gender_id: tf.GenderID = tf.GenderID(1)) -> None:
super(User, self).__init__()
self.column_types[tf.Username.column_name] = username.field[
tf.Username.column_name]
self.column_types[tf.GenderID.column_name] = gender_id.field[
tf.GenderID.column_name]
# TODO: Update column_types
class DefaultClothingElement(TableDataElement):
def __init__(self, gender="", clothing_item=""):
super(DefaultClothingElement, self).__init__()
self.column_types['gender'] = gender
self.column_types['clothing_item'] = clothing_item
class Trip(TableDataElement):
def __init__(self, destination, start_date, end_date, day_average_temp,
day_max_temp, day_min_temp, night_average_indoor_temp,
sport_days, no_sport_days, transit_days):
super(Trip, self).__init__()
self.column_types[tf.TripDestination.column_name] = destination
self.column_types[tf.TripDateStart.column_name] = start_date
self.column_types[tf.TripDateEnd.column_name] = end_date
self.column_types[
tf.TripTemperatureDayAverage.column_name] = day_average_temp
self.column_types[tf.TripTemperatureDayMax.column_name] = day_max_temp
self.column_types[tf.TripTemperatureDayMin.column_name] = day_min_temp
self.column_types[
tf.TripTemperatureNightIndoorAverage.column_name] = \
night_average_indoor_temp
self.column_types[tf.TripDaysWithSports.column_name] = sport_days
self.column_types[tf.TripDaysWithoutSports.column_name] = no_sport_days
self.column_types[tf.TripDaysInTransit.column_name] = transit_days
class Garment(TableDataElement):
def __init__(self, gender_id: tf.GenderID = tf.GenderID(),
name: tf.GarmentName = tf.GarmentName(), is_default=False):
super(Garment, self).__init__()
self.column_types[tf.GenderID.column_name] = gender_id.get_value()
self.column_types[tf.GarmentName.column_name] = name.get_value()
self.column_types[tf.GarmentIsDefault.column_name] = is_default
class UserTripGarmentAmount(TableDataElement):
def __init__(self, user_id: tf.UserID = tf.UserID(),
trip_id: tf.TripID = tf.TripID(),
garment_id: tf.GarmentID = tf.GarmentID()):
super(UserTripGarmentAmount, self).__init__()
self.column_types[tf.UserID.column_name] = user_id.get_value()
self.column_types[tf.TripID.column_name] = trip_id.get_value()
self.column_types[tf.GarmentID.column_name] = garment_id.get_value()
self.column_types[tf.TripGarmentTotalQuantity.column_name] = 0.0
class UserGarmentSetting(TableDataElement):
def __init__(self, user_id: tf.UserID = tf.UserID(),
garment_id: tf.GarmentID = tf.GarmentID()):
super(UserGarmentSetting, self).__init__()
self.column_types[tf.UserID.column_name] = user_id.get_value()
self.column_types[tf.GarmentID.column_name] = garment_id.get_value()
self.column_types[tf.QuantityDayBelow0.column_name] = 0.0
self.column_types[tf.QuantityDay0To10.column_name] = 0.0
self.column_types[tf.QuantityDay10To20.column_name] = 0.0
self.column_types[tf.QuantityDayAbove20.column_name] = 0.0
self.column_types[tf.QuantityNightBelow20.column_name] = 0.0
self.column_types[tf.QuantityNightAbove20.column_name] = 0.0
self.column_types[tf.QuantityNoSportsDay.column_name] = 0.0
self.column_types[tf.QuantitySportsDay.column_name] = 0.0
self.column_types[tf.QuantityTransitDay.column_name] = 0.0 | en | 0.211983 | # TODO: Update column_types | 2.707997 | 3 |
01-Exercicios/Aula001/Ex3.py | AmandaRH07/Python_Entra21 | 0 | 6613405 | #--- Exercício 3 - Variáveis
#--- Imprima dois parágrafos do último livro que você leu
#--- A impressão deve conter informações do livro, que deverão estar em variáveis
#--- As informações do Livro serão:
#--- Título
#--- Edição
#--- Autor
#--- Data de publicação
#--- Os parágrafos devem estar formatados conforme a formatação do livro
titulo = "A Revolução dos Bichos"
edicao = "1ª edição"
autor = "<NAME>"
publicacao = "17/08/1945"
print("Título: {}".format(titulo))
print("Edição: {}".format(edicao))
print("Autor: {}".format(autor))
print("Data de publicação: {}".format(publicacao))
print(f"""
Realmente, era uma discussão violenta. Gritos, socos na mesa, olhares suspeitos, furiosas negativas. A
origem do caso, ao que parecia, fora o fato de Napoleão e o Sr. Pilkington haverem, ao mesmo tempo, jogado
um ás de espadas.\n
Doze vozes gritavam cheias de ódio e eram todas iguais. Não havia dúvida, agora, quanto ao que sucedera
à fisionomia dos porcos. As criaturas de fora olhavam de um porco para um homem, de um homem para um porco e
de um porco para um homem outra vez; mas já se tornara impossível distinguir quem era homem, quem era porco.""")
| #--- Exercício 3 - Variáveis
#--- Imprima dois parágrafos do último livro que você leu
#--- A impressão deve conter informações do livro, que deverão estar em variáveis
#--- As informações do Livro serão:
#--- Título
#--- Edição
#--- Autor
#--- Data de publicação
#--- Os parágrafos devem estar formatados conforme a formatação do livro
titulo = "A Revolução dos Bichos"
edicao = "1ª edição"
autor = "<NAME>"
publicacao = "17/08/1945"
print("Título: {}".format(titulo))
print("Edição: {}".format(edicao))
print("Autor: {}".format(autor))
print("Data de publicação: {}".format(publicacao))
print(f"""
Realmente, era uma discussão violenta. Gritos, socos na mesa, olhares suspeitos, furiosas negativas. A
origem do caso, ao que parecia, fora o fato de Napoleão e o Sr. Pilkington haverem, ao mesmo tempo, jogado
um ás de espadas.\n
Doze vozes gritavam cheias de ódio e eram todas iguais. Não havia dúvida, agora, quanto ao que sucedera
à fisionomia dos porcos. As criaturas de fora olhavam de um porco para um homem, de um homem para um porco e
de um porco para um homem outra vez; mas já se tornara impossível distinguir quem era homem, quem era porco.""")
| pt | 0.996743 | #--- Exercício 3 - Variáveis #--- Imprima dois parágrafos do último livro que você leu #--- A impressão deve conter informações do livro, que deverão estar em variáveis #--- As informações do Livro serão: #--- Título #--- Edição #--- Autor #--- Data de publicação #--- Os parágrafos devem estar formatados conforme a formatação do livro Realmente, era uma discussão violenta. Gritos, socos na mesa, olhares suspeitos, furiosas negativas. A origem do caso, ao que parecia, fora o fato de Napoleão e o Sr. Pilkington haverem, ao mesmo tempo, jogado um ás de espadas.\n Doze vozes gritavam cheias de ódio e eram todas iguais. Não havia dúvida, agora, quanto ao que sucedera à fisionomia dos porcos. As criaturas de fora olhavam de um porco para um homem, de um homem para um porco e de um porco para um homem outra vez; mas já se tornara impossível distinguir quem era homem, quem era porco. | 4.002765 | 4 |
source-code-from-author-book/Listings-for-Second-Edition/listing_3_25.py | robrac/algorithms-exercises-with-python | 0 | 6613406 | <reponame>robrac/algorithms-exercises-with-python
def add(self,item):
current = self.head
previous = None
stop = False
while current != None and not stop:
if current.getData() > item:
stop = True
else:
previous = current
current = current.getNext()
temp = Node(item)
if previous == None:
temp.setNext(self.head)
self.head = temp
else:
temp.setNext(current)
previous.setNext(temp)
| def add(self,item):
current = self.head
previous = None
stop = False
while current != None and not stop:
if current.getData() > item:
stop = True
else:
previous = current
current = current.getNext()
temp = Node(item)
if previous == None:
temp.setNext(self.head)
self.head = temp
else:
temp.setNext(current)
previous.setNext(temp) | none | 1 | 3.682831 | 4 | |
SupervisedBasic/knn.py | jayshonzs/ESL | 9 | 6613407 | '''
Created on 2014-4-29
@author: xiajie
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
def loaddata():
data1 = np.genfromtxt('sdata1.txt')
data2 = np.genfromtxt('sdata2.txt')
return data1, data2
def cookdata(data1, data2):
X = np.concatenate((data1, data2), axis=0)
Y = np.zeros(len(data1)+len(data2))
for i in range(len(data1)):
Y[i] = 1
return X, Y
def distance(x1, x2):
return np.linalg.norm((x1-x2))
def knn(X, Y, k, x0):
min_y = Y.min()
max_y = Y.max()
dist = [(distance(x0, X[i]), i) for i in range(len(X))]
dist.sort()
#print dist
sumofv = 0
for j in range(k):
item = dist[j]
sumofv += Y[item[1]]
if float(sumofv)/k >= (min_y+max_y)/2.0:
return max_y
else:
return min_y
def drawclass(X, Y, resolution):
mycm = mpl.cm.get_cmap('Paired')
one_min, one_max = X[:, 0].min()-0.1, X[:, 0].max()+0.1
two_min, two_max = X[:, 1].min()-0.1, X[:, 1].max()+0.1
xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
np.arange(two_min, two_max, (two_max-two_min)/resolution))
inputs = np.c_[xx1.ravel(), xx2.ravel()]
z = []
for i in range(len(inputs)):
res = knn(X, Y, 15, inputs[i])
z.append(res)
result = np.array(z).reshape(xx1.shape)
plt.contourf(xx1, xx2, result, cmap=mycm)
plt.scatter(X[:,0], X[:,1], s=30, c=Y, cmap=mycm)
plt.show()
if __name__ == '__main__':
data1, data2 = loaddata()
X, Y = cookdata(data1, data2)
drawclass(X, Y, 50) | '''
Created on 2014-4-29
@author: xiajie
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
def loaddata():
data1 = np.genfromtxt('sdata1.txt')
data2 = np.genfromtxt('sdata2.txt')
return data1, data2
def cookdata(data1, data2):
X = np.concatenate((data1, data2), axis=0)
Y = np.zeros(len(data1)+len(data2))
for i in range(len(data1)):
Y[i] = 1
return X, Y
def distance(x1, x2):
return np.linalg.norm((x1-x2))
def knn(X, Y, k, x0):
min_y = Y.min()
max_y = Y.max()
dist = [(distance(x0, X[i]), i) for i in range(len(X))]
dist.sort()
#print dist
sumofv = 0
for j in range(k):
item = dist[j]
sumofv += Y[item[1]]
if float(sumofv)/k >= (min_y+max_y)/2.0:
return max_y
else:
return min_y
def drawclass(X, Y, resolution):
mycm = mpl.cm.get_cmap('Paired')
one_min, one_max = X[:, 0].min()-0.1, X[:, 0].max()+0.1
two_min, two_max = X[:, 1].min()-0.1, X[:, 1].max()+0.1
xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
np.arange(two_min, two_max, (two_max-two_min)/resolution))
inputs = np.c_[xx1.ravel(), xx2.ravel()]
z = []
for i in range(len(inputs)):
res = knn(X, Y, 15, inputs[i])
z.append(res)
result = np.array(z).reshape(xx1.shape)
plt.contourf(xx1, xx2, result, cmap=mycm)
plt.scatter(X[:,0], X[:,1], s=30, c=Y, cmap=mycm)
plt.show()
if __name__ == '__main__':
data1, data2 = loaddata()
X, Y = cookdata(data1, data2)
drawclass(X, Y, 50) | en | 0.577482 | Created on 2014-4-29 @author: xiajie #print dist | 2.274113 | 2 |
test/train_and_user_similarity.py | victusfate/concierge | 14 | 6613408 | import pandas as pd
import time
import os
from concierge import data_io
from concierge import constants
from concierge.collaborative_filter import CollaborativeFilter
from river import metrics
import redis
cache = redis.Redis(host=constants.REDIS_HOST, port=6379, db=0)
# df = data_io.load_dataset(',',constants.PLACE_RATINGS_FILE)
# max_ts,dataset = CollaborativeFilter.df_to_timestamp_and_dataset(df)
# tf = CollaborativeFilter(constants.CF_PLACE,CollaborativeFilter.fm_model(),metrics.MAE() + metrics.RMSE())
# tf.timestamp = max_ts
# # # cf.data_stats(dataset)
# tLearnStart = time.time()
# tf.learn(dataset,max_ts)
# # cf.evaluate(dataset)
# tLearnEnd = time.time()
# print('tLearn',tLearnEnd-tLearnStart)
file_path = os.path.join('/tmp/',constants.CF_PLACE)
# tf.save_to_file(file_path)
cf = CollaborativeFilter(constants.CF_PLACE)
cf.load_from_file(file_path)
# similar users
user_id = '128x9v1'
selected_user_ids = [
'15clr1r', # Pat
'1obrasa', # Mitch
'12exuzi', # Peter
'88i0z7', # Matthew
'163hup1', # <NAME>.
'15u27fv', # <NAME>.
'tf40jt', # Alex
'7pungc', # Rijul
'1q9v9dh', # Jake
'1mkkm8z', # Kingsley
'chroyy', # Fedor
'1wblyoh', # Lina
'ck3th0', # Wes
'1jkh13k', # <NAME>.
'z960qo' # <NAME>
]
t1 = time.time()
similarity_scores = cf.user_rankings(user_id,selected_user_ids)
t2 = time.time()
print('similarity',similarity_scores)
print('delta time',t2-t1)
t3 = time.time()
similarity_scores = cf.user_rankings2(user_id,selected_user_ids)
t4 = time.time()
print('similarity',similarity_scores)
print('delta time',t4-t3) | import pandas as pd
import time
import os
from concierge import data_io
from concierge import constants
from concierge.collaborative_filter import CollaborativeFilter
from river import metrics
import redis
cache = redis.Redis(host=constants.REDIS_HOST, port=6379, db=0)
# df = data_io.load_dataset(',',constants.PLACE_RATINGS_FILE)
# max_ts,dataset = CollaborativeFilter.df_to_timestamp_and_dataset(df)
# tf = CollaborativeFilter(constants.CF_PLACE,CollaborativeFilter.fm_model(),metrics.MAE() + metrics.RMSE())
# tf.timestamp = max_ts
# # # cf.data_stats(dataset)
# tLearnStart = time.time()
# tf.learn(dataset,max_ts)
# # cf.evaluate(dataset)
# tLearnEnd = time.time()
# print('tLearn',tLearnEnd-tLearnStart)
file_path = os.path.join('/tmp/',constants.CF_PLACE)
# tf.save_to_file(file_path)
cf = CollaborativeFilter(constants.CF_PLACE)
cf.load_from_file(file_path)
# similar users
user_id = '128x9v1'
selected_user_ids = [
'15clr1r', # Pat
'1obrasa', # Mitch
'12exuzi', # Peter
'88i0z7', # Matthew
'163hup1', # <NAME>.
'15u27fv', # <NAME>.
'tf40jt', # Alex
'7pungc', # Rijul
'1q9v9dh', # Jake
'1mkkm8z', # Kingsley
'chroyy', # Fedor
'1wblyoh', # Lina
'ck3th0', # Wes
'1jkh13k', # <NAME>.
'z960qo' # <NAME>
]
t1 = time.time()
similarity_scores = cf.user_rankings(user_id,selected_user_ids)
t2 = time.time()
print('similarity',similarity_scores)
print('delta time',t2-t1)
t3 = time.time()
similarity_scores = cf.user_rankings2(user_id,selected_user_ids)
t4 = time.time()
print('similarity',similarity_scores)
print('delta time',t4-t3) | en | 0.322561 | # df = data_io.load_dataset(',',constants.PLACE_RATINGS_FILE) # max_ts,dataset = CollaborativeFilter.df_to_timestamp_and_dataset(df) # tf = CollaborativeFilter(constants.CF_PLACE,CollaborativeFilter.fm_model(),metrics.MAE() + metrics.RMSE()) # tf.timestamp = max_ts # # # cf.data_stats(dataset) # tLearnStart = time.time() # tf.learn(dataset,max_ts) # # cf.evaluate(dataset) # tLearnEnd = time.time() # print('tLearn',tLearnEnd-tLearnStart) # tf.save_to_file(file_path) # similar users # Pat # Mitch # Peter # Matthew # <NAME>. # <NAME>. # Alex # Rijul # Jake # Kingsley # Fedor # Lina # Wes # <NAME>. # <NAME> | 2.196445 | 2 |
concatenator_app/__init__.py | gsingers/qiacuity-concatenator | 0 | 6613409 | <filename>concatenator_app/__init__.py
import os
from flask import Flask
from flask import render_template
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
from itertools import zip_longest
UPLOAD_FOLDER = './data/uploads'
RESULTS_FOLDER = './data/results'
COMPLETED_FOLDER = './data/completed'
ALLOWED_EXTENSIONS = {'csv'}
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
upload = os.environ.get("UPLOAD_FOLDER", UPLOAD_FOLDER)
results = os.environ.get("RESULTS_FOLDER", RESULTS_FOLDER)
completed = os.environ.get("COMPLETED_FOLDER", COMPLETED_FOLDER)
app.config['UPLOAD_FOLDER'] = upload
app.config['RESULTS_FOLDER'] = results
app.config['COMPLETED_FOLDER'] = completed
app.config['ALLOWED_EXTENSIONS'] = ALLOWED_EXTENSIONS
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError as ose:
pass
try:
os.makedirs(UPLOAD_FOLDER)
except OSError:
pass
try:
os.makedirs(RESULTS_FOLDER)
except OSError:
pass
try:
os.makedirs(COMPLETED_FOLDER)
except OSError:
pass
from . import concatenate
from . import files
app.register_blueprint(concatenate.bp)
app.register_blueprint(files.bp)
@app.route("/")
def home():
files = os.listdir(app.config["UPLOAD_FOLDER"])
files.sort()
results = os.listdir(app.config["RESULTS_FOLDER"])
results.sort()
#print(files, results)
completed_folders = os.listdir(app.config["COMPLETED_FOLDER"])
completed_folders.sort()
return render_template("index.jinja2", zipped_files=zip_longest(files, results, completed_folders, fillvalue=""))
return app
| <filename>concatenator_app/__init__.py
import os
from flask import Flask
from flask import render_template
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
from itertools import zip_longest
UPLOAD_FOLDER = './data/uploads'
RESULTS_FOLDER = './data/results'
COMPLETED_FOLDER = './data/completed'
ALLOWED_EXTENSIONS = {'csv'}
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
upload = os.environ.get("UPLOAD_FOLDER", UPLOAD_FOLDER)
results = os.environ.get("RESULTS_FOLDER", RESULTS_FOLDER)
completed = os.environ.get("COMPLETED_FOLDER", COMPLETED_FOLDER)
app.config['UPLOAD_FOLDER'] = upload
app.config['RESULTS_FOLDER'] = results
app.config['COMPLETED_FOLDER'] = completed
app.config['ALLOWED_EXTENSIONS'] = ALLOWED_EXTENSIONS
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError as ose:
pass
try:
os.makedirs(UPLOAD_FOLDER)
except OSError:
pass
try:
os.makedirs(RESULTS_FOLDER)
except OSError:
pass
try:
os.makedirs(COMPLETED_FOLDER)
except OSError:
pass
from . import concatenate
from . import files
app.register_blueprint(concatenate.bp)
app.register_blueprint(files.bp)
@app.route("/")
def home():
files = os.listdir(app.config["UPLOAD_FOLDER"])
files.sort()
results = os.listdir(app.config["RESULTS_FOLDER"])
results.sort()
#print(files, results)
completed_folders = os.listdir(app.config["COMPLETED_FOLDER"])
completed_folders.sort()
return render_template("index.jinja2", zipped_files=zip_longest(files, results, completed_folders, fillvalue=""))
return app
| en | 0.420752 | # load the instance config, if it exists, when not testing # load the test config if passed in # ensure the instance folder exists #print(files, results) | 2.433423 | 2 |
src/semantickit/relatedness/lesk.py | dhchenx/semantic-kit | 1 | 6613410 | <filename>src/semantickit/relatedness/lesk.py<gh_stars>1-10
from nltk.corpus import wordnet
def lesk(context_sentence, ambiguous_word, pos=None, synsets=None):
context = set(context_sentence)
if synsets is None:
synsets = wordnet.synsets(ambiguous_word)
if pos:
synsets = [ss for ss in synsets if str(ss.pos()) == pos]
if not synsets:
return None
max_len, sense = max(
(len(context.intersection(ss.definition().split())), ss) for ss in synsets
)
return max_len,sense
| <filename>src/semantickit/relatedness/lesk.py<gh_stars>1-10
from nltk.corpus import wordnet
def lesk(context_sentence, ambiguous_word, pos=None, synsets=None):
context = set(context_sentence)
if synsets is None:
synsets = wordnet.synsets(ambiguous_word)
if pos:
synsets = [ss for ss in synsets if str(ss.pos()) == pos]
if not synsets:
return None
max_len, sense = max(
(len(context.intersection(ss.definition().split())), ss) for ss in synsets
)
return max_len,sense
| none | 1 | 2.534321 | 3 | |
PyQTST/Moldata.py | Linqiaosong/PyQTST | 4 | 6613411 | # encoding=utf-8
c=299792458
kB=1.3806503E-23
h=6.6260696E-34
L=6.02214179E+23
R=kB*L
pi=3.14159265358979
def kcal2kj(kcal):
return float(kcal)*4.1840
def ev2kj(ev):
return float(ev)*96.485
def eh2kj(eh):
return float(eh)*2625.49962
def c2k(oc):
return float(oc)+273.15
def f2k(of):
return (float(of)-32)/1.8+273.15
def hz2cm(hz):
return hz/(100*c)
class Moldata:
u0k=0.0
gtk=0.0
q=1.0
eunit='kj/mol'
def __init__(
self,
U0K=0.0,
GTK=0.0,
Q=1.0,
EUnit='kj/mol'
):
self.eunit=str.lower(EUnit)
self.q=float(Q)
if self.eunit=='kj/mol':
self.u0k=float(U0K)
self.gtk=float(GTK)
elif self.eunit=='kcal/mol':
self.u0k=kcal2kj(float(U0K))
self.gtk=kcal2kj(float(GTK))
elif self.eunit=='ev':
self.u0k=ev2kj(float(U0K))
self.gtk=ev2kj(float(GTK))
elif self.eunit=='eh':
self.u0k=eh2kj(float(U0K))
self.gtk=eh2kj(float(GTK))
else:
print("The unit of energy was not defined!")
exit()
def __add__(self,other):
result=Moldata(U0K=self.u0k+other.u0k,GTK=self.gtk+other.gtk,Q=self.q*other.q,EUnit='kj/mol')
return result
def get_u0k(self):
return self.u0k
def get_gtk(self):
return self.gtk
def get_q(self):
return self.q
if __name__ == "__main__":
A=Moldata(1,1,1)
B=Moldata(2,2,2)
C=A+B
| # encoding=utf-8
c=299792458
kB=1.3806503E-23
h=6.6260696E-34
L=6.02214179E+23
R=kB*L
pi=3.14159265358979
def kcal2kj(kcal):
return float(kcal)*4.1840
def ev2kj(ev):
return float(ev)*96.485
def eh2kj(eh):
return float(eh)*2625.49962
def c2k(oc):
return float(oc)+273.15
def f2k(of):
return (float(of)-32)/1.8+273.15
def hz2cm(hz):
return hz/(100*c)
class Moldata:
u0k=0.0
gtk=0.0
q=1.0
eunit='kj/mol'
def __init__(
self,
U0K=0.0,
GTK=0.0,
Q=1.0,
EUnit='kj/mol'
):
self.eunit=str.lower(EUnit)
self.q=float(Q)
if self.eunit=='kj/mol':
self.u0k=float(U0K)
self.gtk=float(GTK)
elif self.eunit=='kcal/mol':
self.u0k=kcal2kj(float(U0K))
self.gtk=kcal2kj(float(GTK))
elif self.eunit=='ev':
self.u0k=ev2kj(float(U0K))
self.gtk=ev2kj(float(GTK))
elif self.eunit=='eh':
self.u0k=eh2kj(float(U0K))
self.gtk=eh2kj(float(GTK))
else:
print("The unit of energy was not defined!")
exit()
def __add__(self,other):
result=Moldata(U0K=self.u0k+other.u0k,GTK=self.gtk+other.gtk,Q=self.q*other.q,EUnit='kj/mol')
return result
def get_u0k(self):
return self.u0k
def get_gtk(self):
return self.gtk
def get_q(self):
return self.q
if __name__ == "__main__":
A=Moldata(1,1,1)
B=Moldata(2,2,2)
C=A+B
| en | 0.70014 | # encoding=utf-8 | 2.783525 | 3 |
tests/modules.py | Bamuir3/CCF | 0 | 6613412 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import tempfile
import http
import subprocess
import os
import glob
import infra.network
import infra.path
import infra.proc
import infra.notification
import infra.net
import infra.e2e_args
import suite.test_requirements as reqs
import ccf.proposal_generator
from loguru import logger as LOG
THIS_DIR = os.path.dirname(__file__)
MODULE_PATH_1 = "/app/foo.js"
MODULE_RETURN_1 = "Hello world!"
MODULE_CONTENT_1 = f"""
export function foo() {{
return "{MODULE_RETURN_1}";
}}
"""
MODULE_PATH_2 = "/app/bar.js"
MODULE_CONTENT_2 = """
import {foo} from "./foo.js"
export function bar() {
return foo();
}
"""
# For the purpose of resolving relative import paths,
# app script modules are currently assumed to be located at /.
# This will likely change.
APP_SCRIPT = """
return {
["POST test_module"] = [[
import {bar} from "./app/bar.js";
export default function()
{
return bar();
}
]]
}
"""
# Eventually, the npm app will contain these modules as well
# together with an API description.
NPM_APP_SCRIPT = """
return {
["POST npm/partition"] = [[
import {partition} from "./my-npm-app/src/endpoints.js";
export default () => partition();
]],
["POST npm/proto"] = [[
import {proto} from "./my-npm-app/src/endpoints.js";
export default () => proto();
]],
["GET npm/crypto"] = [[
import {crypto} from "./my-npm-app/src/endpoints.js";
export default () => crypto();
]]
}
"""
def make_module_set_proposal(path, content, network):
primary, _ = network.find_nodes()
with tempfile.NamedTemporaryFile("w") as f:
f.write(content)
f.flush()
proposal_body, _ = ccf.proposal_generator.set_module(path, f.name)
proposal = network.consortium.get_any_active_member().propose(
primary, proposal_body
)
network.consortium.vote_using_majority(primary, proposal)
@reqs.description("Test module set and remove")
def test_module_set_and_remove(network, args):
primary, _ = network.find_nodes()
LOG.info("Member makes a module update proposal")
make_module_set_proposal(MODULE_PATH_1, MODULE_CONTENT_1, network)
with primary.client(
f"member{network.consortium.get_any_active_member().member_id}"
) as c:
r = c.post("/gov/read", {"table": "ccf.modules", "key": MODULE_PATH_1})
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body["js"] == MODULE_CONTENT_1, r.body
LOG.info("Member makes a module remove proposal")
proposal_body, _ = ccf.proposal_generator.remove_module(MODULE_PATH_1)
proposal = network.consortium.get_any_active_member().propose(
primary, proposal_body
)
network.consortium.vote_using_majority(primary, proposal)
with primary.client(
f"member{network.consortium.get_any_active_member().member_id}"
) as c:
r = c.post("/gov/read", {"table": "ccf.modules", "key": MODULE_PATH_1})
assert r.status_code == http.HTTPStatus.BAD_REQUEST, r.status_code
return network
@reqs.description("Test module import")
def test_module_import(network, args):
primary, _ = network.find_nodes()
# Add modules
make_module_set_proposal(MODULE_PATH_1, MODULE_CONTENT_1, network)
make_module_set_proposal(MODULE_PATH_2, MODULE_CONTENT_2, network)
# Update JS app which imports module
with tempfile.NamedTemporaryFile("w") as f:
f.write(APP_SCRIPT)
f.flush()
network.consortium.set_js_app(remote_node=primary, app_script_path=f.name)
with primary.client("user0") as c:
r = c.post("/app/test_module", {})
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body == MODULE_RETURN_1
return network
@reqs.description("Test Node.js/npm app")
def test_npm_app(network, args):
primary, _ = network.find_nodes()
LOG.info("Building npm app")
app_dir = os.path.join(THIS_DIR, "npm-app")
subprocess.run(["npm", "ci"], cwd=app_dir, check=True)
subprocess.run(["npm", "run", "build"], cwd=app_dir, check=True)
LOG.info("Deploying npm app modules")
kv_prefix = "/my-npm-app"
dist_dir = os.path.join(app_dir, "dist")
for module_path in glob.glob(os.path.join(dist_dir, "**", "*.js"), recursive=True):
module_name = os.path.join(kv_prefix, os.path.relpath(module_path, dist_dir))
proposal_body, _ = ccf.proposal_generator.set_module(module_name, module_path)
proposal = network.consortium.get_any_active_member().propose(
primary, proposal_body
)
network.consortium.vote_using_majority(primary, proposal)
LOG.info("Deploying endpoint script")
with tempfile.NamedTemporaryFile("w") as f:
f.write(NPM_APP_SCRIPT)
f.flush()
network.consortium.set_js_app(remote_node=primary, app_script_path=f.name)
LOG.info("Calling npm app endpoints")
with primary.client("user0") as c:
body = [1, 2, 3, 4]
r = c.post("/app/npm/partition", body)
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body == [[1, 3], [2, 4]], r.body
r = c.post("/app/npm/proto", body)
assert r.status_code == http.HTTPStatus.OK, r.status_code
# CCF does not support binary responses yet.
pb = bytes.fromhex(r.body)
# We could now decode the protobuf message but given all the machinery
# involved to make it happen (code generation with protoc) we'll leave it at that.
assert len(pb) == 14, len(pb)
r = c.get("/app/npm/crypto")
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body["available"], r.body
def run(args):
hosts = ["localhost"] * (3 if args.consensus == "pbft" else 2)
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
network = test_module_set_and_remove(network, args)
network = test_module_import(network, args)
network = test_npm_app(network, args)
if __name__ == "__main__":
args = infra.e2e_args.cli_args()
args.package = "libjs_generic"
run(args)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import tempfile
import http
import subprocess
import os
import glob
import infra.network
import infra.path
import infra.proc
import infra.notification
import infra.net
import infra.e2e_args
import suite.test_requirements as reqs
import ccf.proposal_generator
from loguru import logger as LOG
THIS_DIR = os.path.dirname(__file__)
MODULE_PATH_1 = "/app/foo.js"
MODULE_RETURN_1 = "Hello world!"
MODULE_CONTENT_1 = f"""
export function foo() {{
return "{MODULE_RETURN_1}";
}}
"""
MODULE_PATH_2 = "/app/bar.js"
MODULE_CONTENT_2 = """
import {foo} from "./foo.js"
export function bar() {
return foo();
}
"""
# For the purpose of resolving relative import paths,
# app script modules are currently assumed to be located at /.
# This will likely change.
APP_SCRIPT = """
return {
["POST test_module"] = [[
import {bar} from "./app/bar.js";
export default function()
{
return bar();
}
]]
}
"""
# Eventually, the npm app will contain these modules as well
# together with an API description.
NPM_APP_SCRIPT = """
return {
["POST npm/partition"] = [[
import {partition} from "./my-npm-app/src/endpoints.js";
export default () => partition();
]],
["POST npm/proto"] = [[
import {proto} from "./my-npm-app/src/endpoints.js";
export default () => proto();
]],
["GET npm/crypto"] = [[
import {crypto} from "./my-npm-app/src/endpoints.js";
export default () => crypto();
]]
}
"""
def make_module_set_proposal(path, content, network):
primary, _ = network.find_nodes()
with tempfile.NamedTemporaryFile("w") as f:
f.write(content)
f.flush()
proposal_body, _ = ccf.proposal_generator.set_module(path, f.name)
proposal = network.consortium.get_any_active_member().propose(
primary, proposal_body
)
network.consortium.vote_using_majority(primary, proposal)
@reqs.description("Test module set and remove")
def test_module_set_and_remove(network, args):
primary, _ = network.find_nodes()
LOG.info("Member makes a module update proposal")
make_module_set_proposal(MODULE_PATH_1, MODULE_CONTENT_1, network)
with primary.client(
f"member{network.consortium.get_any_active_member().member_id}"
) as c:
r = c.post("/gov/read", {"table": "ccf.modules", "key": MODULE_PATH_1})
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body["js"] == MODULE_CONTENT_1, r.body
LOG.info("Member makes a module remove proposal")
proposal_body, _ = ccf.proposal_generator.remove_module(MODULE_PATH_1)
proposal = network.consortium.get_any_active_member().propose(
primary, proposal_body
)
network.consortium.vote_using_majority(primary, proposal)
with primary.client(
f"member{network.consortium.get_any_active_member().member_id}"
) as c:
r = c.post("/gov/read", {"table": "ccf.modules", "key": MODULE_PATH_1})
assert r.status_code == http.HTTPStatus.BAD_REQUEST, r.status_code
return network
@reqs.description("Test module import")
def test_module_import(network, args):
primary, _ = network.find_nodes()
# Add modules
make_module_set_proposal(MODULE_PATH_1, MODULE_CONTENT_1, network)
make_module_set_proposal(MODULE_PATH_2, MODULE_CONTENT_2, network)
# Update JS app which imports module
with tempfile.NamedTemporaryFile("w") as f:
f.write(APP_SCRIPT)
f.flush()
network.consortium.set_js_app(remote_node=primary, app_script_path=f.name)
with primary.client("user0") as c:
r = c.post("/app/test_module", {})
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body == MODULE_RETURN_1
return network
@reqs.description("Test Node.js/npm app")
def test_npm_app(network, args):
primary, _ = network.find_nodes()
LOG.info("Building npm app")
app_dir = os.path.join(THIS_DIR, "npm-app")
subprocess.run(["npm", "ci"], cwd=app_dir, check=True)
subprocess.run(["npm", "run", "build"], cwd=app_dir, check=True)
LOG.info("Deploying npm app modules")
kv_prefix = "/my-npm-app"
dist_dir = os.path.join(app_dir, "dist")
for module_path in glob.glob(os.path.join(dist_dir, "**", "*.js"), recursive=True):
module_name = os.path.join(kv_prefix, os.path.relpath(module_path, dist_dir))
proposal_body, _ = ccf.proposal_generator.set_module(module_name, module_path)
proposal = network.consortium.get_any_active_member().propose(
primary, proposal_body
)
network.consortium.vote_using_majority(primary, proposal)
LOG.info("Deploying endpoint script")
with tempfile.NamedTemporaryFile("w") as f:
f.write(NPM_APP_SCRIPT)
f.flush()
network.consortium.set_js_app(remote_node=primary, app_script_path=f.name)
LOG.info("Calling npm app endpoints")
with primary.client("user0") as c:
body = [1, 2, 3, 4]
r = c.post("/app/npm/partition", body)
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body == [[1, 3], [2, 4]], r.body
r = c.post("/app/npm/proto", body)
assert r.status_code == http.HTTPStatus.OK, r.status_code
# CCF does not support binary responses yet.
pb = bytes.fromhex(r.body)
# We could now decode the protobuf message but given all the machinery
# involved to make it happen (code generation with protoc) we'll leave it at that.
assert len(pb) == 14, len(pb)
r = c.get("/app/npm/crypto")
assert r.status_code == http.HTTPStatus.OK, r.status_code
assert r.body["available"], r.body
def run(args):
hosts = ["localhost"] * (3 if args.consensus == "pbft" else 2)
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
network = test_module_set_and_remove(network, args)
network = test_module_import(network, args)
network = test_npm_app(network, args)
if __name__ == "__main__":
args = infra.e2e_args.cli_args()
args.package = "libjs_generic"
run(args)
| en | 0.714635 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache 2.0 License. export function foo() {{ return "{MODULE_RETURN_1}"; }} import {foo} from "./foo.js" export function bar() { return foo(); } # For the purpose of resolving relative import paths, # app script modules are currently assumed to be located at /. # This will likely change. return { ["POST test_module"] = [[ import {bar} from "./app/bar.js"; export default function() { return bar(); } ]] } # Eventually, the npm app will contain these modules as well # together with an API description. return { ["POST npm/partition"] = [[ import {partition} from "./my-npm-app/src/endpoints.js"; export default () => partition(); ]], ["POST npm/proto"] = [[ import {proto} from "./my-npm-app/src/endpoints.js"; export default () => proto(); ]], ["GET npm/crypto"] = [[ import {crypto} from "./my-npm-app/src/endpoints.js"; export default () => crypto(); ]] } # Add modules # Update JS app which imports module # CCF does not support binary responses yet. # We could now decode the protobuf message but given all the machinery # involved to make it happen (code generation with protoc) we'll leave it at that. | 2.118577 | 2 |
python/68.text-justification.py | kadaliao/leetcode | 0 | 6613413 | #
# @lc app=leetcode.cn id=68 lang=python3
#
# [68] 文本左右对齐
#
# https://leetcode-cn.com/problems/text-justification/description/
#
# algorithms
# Hard (47.11%)
# Total Accepted: 29.1K
# Total Submissions: 57.2K
# Testcase Example: '["This", "is", "an", "example", "of", "text", "justification."]\n16'
#
# 给定一个单词数组和一个长度 maxWidth,重新排版单词,使其成为每行恰好有 maxWidth 个字符,且左右两端对齐的文本。
#
# 你应该使用“贪心算法”来放置给定的单词;也就是说,尽可能多地往每行中放置单词。必要时可用空格 ' ' 填充,使得每行恰好有 maxWidth 个字符。
#
# 要求尽可能均匀分配单词间的空格数量。如果某一行单词间的空格不能均匀分配,则左侧放置的空格数要多于右侧的空格数。
#
# 文本的最后一行应为左对齐,且单词之间不插入额外的空格。
#
# 说明:
#
#
# 单词是指由非空格字符组成的字符序列。
# 每个单词的长度大于 0,小于等于 maxWidth。
# 输入单词数组 words 至少包含一个单词。
#
#
# 示例:
#
# 输入:
# words = ["This", "is", "an", "example", "of", "text", "justification."]
# maxWidth = 16
# 输出:
# [
# "This is an",
# "example of text",
# "justification. "
# ]
#
#
# 示例 2:
#
# 输入:
# words = ["What","must","be","acknowledgment","shall","be"]
# maxWidth = 16
# 输出:
# [
# "What must be",
# "acknowledgment ",
# "shall be "
# ]
# 解释: 注意最后一行的格式应为 "shall be " 而不是 "shall be",
# 因为最后一行应为左对齐,而不是左右两端对齐。
# 第二行同样为左对齐,这是因为这行只包含一个单词。
#
#
# 示例 3:
#
# 输入:
# words =
# ["Science","is","what","we","understand","well","enough","to","explain",
# "to","a","computer.","Art","is","everything","else","we","do"]
# maxWidth = 20
# 输出:
# [
# "Science is what we",
# "understand well",
# "enough to explain to",
# "a computer. Art is",
# "everything else we",
# "do "
# ]
from typing import List
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
# blank 返回长度为 n 的由空格组成的字符串
def blank(n: int) -> str:
return " " * n
ans = []
right, n = 0, len(words)
while True:
# 当前行的第一个单词在 words 的位置
left = right
# 统计当前行单词长度之和
sumLen = 0
# 循环确定当前行可以放多少单词,单词之间有一个空格
while right < n and sumLen + len(words[right]) + right - left <= maxWidth:
sumLen += len(words[right])
right += 1
# 当前行是最后一行:单词左对齐,且单词之间只有一个空格,在行尾填充剩余空格
if right == n:
s = " ".join(words[left:])
ans.append(s + blank(maxWidth - len(s)))
break
numWords = right - left
numSpaces = maxWidth - sumLen
# 当前行只有一个单词:该单词左对齐,在行末填充空格
if numWords == 1:
ans.append(words[left] + blank(numSpaces))
continue
# 当前行多个单词
avgSpaces, extraSpaces = divmod(numSpaces, numWords - 1)
# 拼接额外加一个空格的单词
s1 = blank(avgSpaces + 1).join(words[left : left + extraSpaces + 1])
s2 = blank(avgSpaces).join(words[left + extraSpaces + 1 : right])
ans.append(s1 + blank(avgSpaces) + s2)
return ans
| #
# @lc app=leetcode.cn id=68 lang=python3
#
# [68] 文本左右对齐
#
# https://leetcode-cn.com/problems/text-justification/description/
#
# algorithms
# Hard (47.11%)
# Total Accepted: 29.1K
# Total Submissions: 57.2K
# Testcase Example: '["This", "is", "an", "example", "of", "text", "justification."]\n16'
#
# 给定一个单词数组和一个长度 maxWidth,重新排版单词,使其成为每行恰好有 maxWidth 个字符,且左右两端对齐的文本。
#
# 你应该使用“贪心算法”来放置给定的单词;也就是说,尽可能多地往每行中放置单词。必要时可用空格 ' ' 填充,使得每行恰好有 maxWidth 个字符。
#
# 要求尽可能均匀分配单词间的空格数量。如果某一行单词间的空格不能均匀分配,则左侧放置的空格数要多于右侧的空格数。
#
# 文本的最后一行应为左对齐,且单词之间不插入额外的空格。
#
# 说明:
#
#
# 单词是指由非空格字符组成的字符序列。
# 每个单词的长度大于 0,小于等于 maxWidth。
# 输入单词数组 words 至少包含一个单词。
#
#
# 示例:
#
# 输入:
# words = ["This", "is", "an", "example", "of", "text", "justification."]
# maxWidth = 16
# 输出:
# [
# "This is an",
# "example of text",
# "justification. "
# ]
#
#
# 示例 2:
#
# 输入:
# words = ["What","must","be","acknowledgment","shall","be"]
# maxWidth = 16
# 输出:
# [
# "What must be",
# "acknowledgment ",
# "shall be "
# ]
# 解释: 注意最后一行的格式应为 "shall be " 而不是 "shall be",
# 因为最后一行应为左对齐,而不是左右两端对齐。
# 第二行同样为左对齐,这是因为这行只包含一个单词。
#
#
# 示例 3:
#
# 输入:
# words =
# ["Science","is","what","we","understand","well","enough","to","explain",
# "to","a","computer.","Art","is","everything","else","we","do"]
# maxWidth = 20
# 输出:
# [
# "Science is what we",
# "understand well",
# "enough to explain to",
# "a computer. Art is",
# "everything else we",
# "do "
# ]
from typing import List
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
# blank 返回长度为 n 的由空格组成的字符串
def blank(n: int) -> str:
return " " * n
ans = []
right, n = 0, len(words)
while True:
# 当前行的第一个单词在 words 的位置
left = right
# 统计当前行单词长度之和
sumLen = 0
# 循环确定当前行可以放多少单词,单词之间有一个空格
while right < n and sumLen + len(words[right]) + right - left <= maxWidth:
sumLen += len(words[right])
right += 1
# 当前行是最后一行:单词左对齐,且单词之间只有一个空格,在行尾填充剩余空格
if right == n:
s = " ".join(words[left:])
ans.append(s + blank(maxWidth - len(s)))
break
numWords = right - left
numSpaces = maxWidth - sumLen
# 当前行只有一个单词:该单词左对齐,在行末填充空格
if numWords == 1:
ans.append(words[left] + blank(numSpaces))
continue
# 当前行多个单词
avgSpaces, extraSpaces = divmod(numSpaces, numWords - 1)
# 拼接额外加一个空格的单词
s1 = blank(avgSpaces + 1).join(words[left : left + extraSpaces + 1])
s2 = blank(avgSpaces).join(words[left + extraSpaces + 1 : right])
ans.append(s1 + blank(avgSpaces) + s2)
return ans
| zh | 0.278075 | # # @lc app=leetcode.cn id=68 lang=python3 # # [68] 文本左右对齐 # # https://leetcode-cn.com/problems/text-justification/description/ # # algorithms # Hard (47.11%) # Total Accepted: 29.1K # Total Submissions: 57.2K # Testcase Example: '["This", "is", "an", "example", "of", "text", "justification."]\n16' # # 给定一个单词数组和一个长度 maxWidth,重新排版单词,使其成为每行恰好有 maxWidth 个字符,且左右两端对齐的文本。 # # 你应该使用“贪心算法”来放置给定的单词;也就是说,尽可能多地往每行中放置单词。必要时可用空格 ' ' 填充,使得每行恰好有 maxWidth 个字符。 # # 要求尽可能均匀分配单词间的空格数量。如果某一行单词间的空格不能均匀分配,则左侧放置的空格数要多于右侧的空格数。 # # 文本的最后一行应为左对齐,且单词之间不插入额外的空格。 # # 说明: # # # 单词是指由非空格字符组成的字符序列。 # 每个单词的长度大于 0,小于等于 maxWidth。 # 输入单词数组 words 至少包含一个单词。 # # # 示例: # # 输入: # words = ["This", "is", "an", "example", "of", "text", "justification."] # maxWidth = 16 # 输出: # [ # "This is an", # "example of text", # "justification. " # ] # # # 示例 2: # # 输入: # words = ["What","must","be","acknowledgment","shall","be"] # maxWidth = 16 # 输出: # [ # "What must be", # "acknowledgment ", # "shall be " # ] # 解释: 注意最后一行的格式应为 "shall be " 而不是 "shall be", # 因为最后一行应为左对齐,而不是左右两端对齐。 # 第二行同样为左对齐,这是因为这行只包含一个单词。 # # # 示例 3: # # 输入: # words = # ["Science","is","what","we","understand","well","enough","to","explain", # "to","a","computer.","Art","is","everything","else","we","do"] # maxWidth = 20 # 输出: # [ # "Science is what we", # "understand well", # "enough to explain to", # "a computer. Art is", # "everything else we", # "do " # ] # blank 返回长度为 n 的由空格组成的字符串 # 当前行的第一个单词在 words 的位置 # 统计当前行单词长度之和 # 循环确定当前行可以放多少单词,单词之间有一个空格 # 当前行是最后一行:单词左对齐,且单词之间只有一个空格,在行尾填充剩余空格 # 当前行只有一个单词:该单词左对齐,在行末填充空格 # 当前行多个单词 # 拼接额外加一个空格的单词 | 3.112056 | 3 |
fasttrips/utils/widgets.py | janzill/fast-trips | 21 | 6613414 | import os
import ipywidgets as widgets
| import os
import ipywidgets as widgets
| none | 1 | 1.063928 | 1 | |
tao/workspace2.bzl | JamesTheZ/BladeDISC | 328 | 6613415 | <reponame>JamesTheZ/BladeDISC
def _tao_bridge_repositories():
native.local_repository(
name = "org_tensorflow",
path = "../tf_community/",
)
native.local_repository(
name = "org_third_party",
path = "../third_party/",
)
native.local_repository(
name = "org_tao_compiler",
path = "../tao_compiler/",
)
def workspace():
_tao_bridge_repositories()
# Alias so it can be loaded without assigning to a different symbol to prevent
# shadowing previous loads and trigger a buildifier warning.
tao_bridge_workspace2 = workspace
| def _tao_bridge_repositories():
native.local_repository(
name = "org_tensorflow",
path = "../tf_community/",
)
native.local_repository(
name = "org_third_party",
path = "../third_party/",
)
native.local_repository(
name = "org_tao_compiler",
path = "../tao_compiler/",
)
def workspace():
_tao_bridge_repositories()
# Alias so it can be loaded without assigning to a different symbol to prevent
# shadowing previous loads and trigger a buildifier warning.
tao_bridge_workspace2 = workspace | en | 0.806141 | # Alias so it can be loaded without assigning to a different symbol to prevent # shadowing previous loads and trigger a buildifier warning. | 1.65589 | 2 |
docs/gen_attr_table.py | DocOtak/gsw-xarray | 7 | 6613416 | from sphinx.util import progress_message
from gsw_xarray._names import _names
from gsw_xarray._attributes import _func_attrs
list_table = ""
def _add_attrs(list_table, attrs, label):
if (label_value := attrs.get(label)) is not None:
list_table += f" * {label}: ``{label_value}``\n"
return list_table
with progress_message("Generating gsw attribute table"):
for name, result_name in _names.items():
list_table += f"{name}\n{'-' * len(name)}\n"
if isinstance(result_name, tuple):
list_table += f"Has {len(result_name)} outputs\n\n"
for i, result in enumerate(result_name):
list_table += f"#. **{result}**\n\n"
attrs = _func_attrs[name][i]
list_table = _add_attrs(list_table, attrs, "standard_name")
list_table = _add_attrs(list_table, attrs, "units")
list_table = _add_attrs(list_table, attrs, "reference_scale")
list_table += "\n"
else:
attrs = _func_attrs[name]
list_table += "Has 1 output\n\n"
list_table += f"#. **{result_name}**\n\n"
list_table = _add_attrs(list_table, attrs, "standard_name")
list_table = _add_attrs(list_table, attrs, "units")
list_table = _add_attrs(list_table, attrs, "reference_scale")
list_table += "\n"
with open("_attr_table.rst", "w", encoding="utf8") as f:
f.write(list_table)
| from sphinx.util import progress_message
from gsw_xarray._names import _names
from gsw_xarray._attributes import _func_attrs
list_table = ""
def _add_attrs(list_table, attrs, label):
if (label_value := attrs.get(label)) is not None:
list_table += f" * {label}: ``{label_value}``\n"
return list_table
with progress_message("Generating gsw attribute table"):
for name, result_name in _names.items():
list_table += f"{name}\n{'-' * len(name)}\n"
if isinstance(result_name, tuple):
list_table += f"Has {len(result_name)} outputs\n\n"
for i, result in enumerate(result_name):
list_table += f"#. **{result}**\n\n"
attrs = _func_attrs[name][i]
list_table = _add_attrs(list_table, attrs, "standard_name")
list_table = _add_attrs(list_table, attrs, "units")
list_table = _add_attrs(list_table, attrs, "reference_scale")
list_table += "\n"
else:
attrs = _func_attrs[name]
list_table += "Has 1 output\n\n"
list_table += f"#. **{result_name}**\n\n"
list_table = _add_attrs(list_table, attrs, "standard_name")
list_table = _add_attrs(list_table, attrs, "units")
list_table = _add_attrs(list_table, attrs, "reference_scale")
list_table += "\n"
with open("_attr_table.rst", "w", encoding="utf8") as f:
f.write(list_table)
| none | 1 | 2.341115 | 2 | |
Python Files/picklemulti.py | Nmane1612/Nihar-Mane | 3 | 6613417 | <gh_stars>1-10
import pickle
class Human:
def __init__(self):
self.name=input("Enter your name : ")
self.age=input("Enter your age : ")
def disp(self):
print("Hello {}, You are {} year old!".format(self.name,self.age))
num=int(input("Enter the number of people to be entered : "))
with open("HumanMulti.dat", "w+b") as f:
for i in range(num):
insaan=Human()
pickle.dump(insaan,f)
with open("HumanMulti.dat", "rb") as f:
while True:
try:
maanav=pickle.load(f)
maanav.disp()
except EOFError:
print("Done with object")
break
| import pickle
class Human:
def __init__(self):
self.name=input("Enter your name : ")
self.age=input("Enter your age : ")
def disp(self):
print("Hello {}, You are {} year old!".format(self.name,self.age))
num=int(input("Enter the number of people to be entered : "))
with open("HumanMulti.dat", "w+b") as f:
for i in range(num):
insaan=Human()
pickle.dump(insaan,f)
with open("HumanMulti.dat", "rb") as f:
while True:
try:
maanav=pickle.load(f)
maanav.disp()
except EOFError:
print("Done with object")
break | none | 1 | 3.408407 | 3 | |
requirements/competency-questions/sparql_query.py | IDLabResearch/montolo-voc | 0 | 6613418 | <filename>requirements/competency-questions/sparql_query.py
import sys
import re
import os
import ssl
import json
import urllib.parse
import urllib.request
if len(sys.argv) != 3:
print(f"Usage: python3 {sys.argv[0]} endpoint_url sparql_directory")
exit()
sparql_endpoint = sys.argv[1]
sparql_dir = sys.argv[2]
files = [f for f in os.listdir(sparql_dir) if re.match(r'.*\.sparql', f)]
for sparql_file in files:
with open(sparql_dir + "/" + sparql_file, "r") as f:
query = f.read()
query = urllib.parse.quote_plus(query)
context = ssl._create_unverified_context()
request = urllib.request.Request(sparql_endpoint + "?query=" + query)
request.add_header("Accept", "application/sparql-results+json")
result = urllib.request.urlopen(request, context=context).read()
obj = json.loads(result)
with open(sparql_dir + "/" + sparql_file + ".out.json", "w") as out:
out.write(json.dumps(obj, indent=4, sort_keys=True))
| <filename>requirements/competency-questions/sparql_query.py
import sys
import re
import os
import ssl
import json
import urllib.parse
import urllib.request
if len(sys.argv) != 3:
print(f"Usage: python3 {sys.argv[0]} endpoint_url sparql_directory")
exit()
sparql_endpoint = sys.argv[1]
sparql_dir = sys.argv[2]
files = [f for f in os.listdir(sparql_dir) if re.match(r'.*\.sparql', f)]
for sparql_file in files:
with open(sparql_dir + "/" + sparql_file, "r") as f:
query = f.read()
query = urllib.parse.quote_plus(query)
context = ssl._create_unverified_context()
request = urllib.request.Request(sparql_endpoint + "?query=" + query)
request.add_header("Accept", "application/sparql-results+json")
result = urllib.request.urlopen(request, context=context).read()
obj = json.loads(result)
with open(sparql_dir + "/" + sparql_file + ".out.json", "w") as out:
out.write(json.dumps(obj, indent=4, sort_keys=True))
| none | 1 | 3.111379 | 3 | |
luna_ml/api/__init__.py | luna-ml/luna-ml | 5 | 6613419 | import yaml
# make pyyaml don't include tag when dump. such as "!!python/object:api.project_yaml"
# see https://stackoverflow.com/a/48823424/2952665
def noop(self, *args, **kw):
pass
yaml.emitter.Emitter.process_tag = noop
| import yaml
# make pyyaml don't include tag when dump. such as "!!python/object:api.project_yaml"
# see https://stackoverflow.com/a/48823424/2952665
def noop(self, *args, **kw):
pass
yaml.emitter.Emitter.process_tag = noop
| en | 0.900233 | # make pyyaml don't include tag when dump. such as "!!python/object:api.project_yaml" # see https://stackoverflow.com/a/48823424/2952665 | 1.574114 | 2 |
start_server.py | neurorishika/babySSH | 0 | 6613420 | from util import *
import atexit
import babydes as bd
import diffiehellman as dh
import hash as hsh
import numpy as np
import os
import rsa
import sys
import time
attributions = """
=============================================================================================
______ _ _ ___ _ _ _ _ _ _
| ___ \ (_) | | / _ \| | | | (_) | | | (_)
| |_/ / __ ___ _ ___ ___| |_/ /_\ \ |_| |_ _ __ _| |__ _ _| |_ _ ___ _ __ ___
| __/ '__/ _ \| |/ _ \/ __| __| _ | __| __| '__| | '_ \| | | | __| |/ _ \| '_ \/ __|
| | | | | (_) | | __/ (__| |_| | | | |_| |_| | | | |_) | |_| | |_| | (_) | | | \__ \
\_| |_| \___/| |\___|\___|\__\_| |_/\__|\__|_| |_|_.__/ \__,_|\__|_|\___/|_| |_|___/
_/ |
|__/
=============================================================================================
Front-End and Messaging Protocol: <NAME>
<NAME> : <NAME>
RSA : <NAME>
Client Authentication: <NAME>
BabyDES: All Team Members
=============================================================================================
"""
def exit_handler():
if os.path.exists('hacker.log'):
os.remove('hacker.log')
if os.path.exists('hacker_decompressed.log'):
os.remove('hacker_decompressed.log')
print(attributions)
atexit.register(exit_handler)
## Setup Messaging Protocol ##
# Listener #
def listen():
keep_listening = True
while keep_listening:
if os.path.exists(f"{IP}/port{port}/client2server.message"):
with open(f"{IP}/port{port}/client2server.message", "r") as f:
message = f.readline()
keep_listening = False
os.remove(f"{IP}/port{port}/client2server.message")
time.sleep(0.5)
print(f"Message Received: '{message}'")
message = decompress(message)
if showcompression:
print(f"Decompressed Message: '{message}'")
if bDESkey is not None:
message = bd.bDES_decryption(message,bDESkey)
message,mac = message.split("||")
print(f"Decrypted Message: {message}")
if hsh.MAC(message,bDESkey) == mac:
print(f"Message is authentic")
else:
print(f"Message is not authentic. You are under attack.")
quit()
return message
# Messenger #
def reply(message):
message = str(message)
if bDESkey is not None:
message = message+"||"+hsh.MAC(message,bDESkey)
print(f"Encrypting reply: '{message}'...")
message = bd.bDES_encryption(message,bDESkey)
if showcompression:
print(f"Compressing reply: '{message}'...")
message = compress(message)
print(f"Sending Reply: '{message}'...",end='')
t0= time.time()
busy = True
while busy:
if not os.path.exists(f"{IP}/port{port}/server2client.message"):
with open(f"{IP}/port{port}/server2client.message", "w") as f:
f.write(message)
busy = False
time.sleep(1)
if time.time()-t0 > 60:
print("Sending Failed. Client Unresponsive.")
return
hackerlog(message)
print("Sent")
## Set up Client System Structure ##
digits =4
bDESkey = None
password = "<PASSWORD>"
keyexchanged = False
authenticated = False
showcompression = True
# Default user@server port to connect #
if len(sys.argv)==2:
IP = sys.argv[1]
port = "22"
user = "root"
elif len(sys.argv)==3:
IP = sys.argv[1]
port = sys.argv[2]
user = "root"
elif len(sys.argv)==4:
IP = sys.argv[1]
port = sys.argv[2]
user = sys.argv[3]
else:
IP = "192.168.0.1"
port = "22"
user = "root"
# Setup the Server Structure
if not os.path.exists(f'{IP}'):
os.makedirs(f'{IP}/port{port}')
os.makedirs(f'{IP}/{user}')
os.makedirs(f'{IP}/{user}/.ssh/')
else:
if not os.path.exists(f'{IP}/port{port}'):
os.makedirs(f'{IP}/port{port}')
if not os.path.exists(f'{IP}/{user}'):
os.makedirs(f'{IP}/{user}/.ssh/')
# Set up SSH data #
if not os.path.exists(f'{IP}/{user}/.ssh/authorized_keys'):
f = open(f"{IP}/{user}/.ssh/authorized_keys", "w")
f.close()
# Set up RSA keypairs for the first time #
if not (os.path.exists(f'{IP}/{user}/.ssh/id_rsa.pub') and os.path.exists(f'{IP}/{user}/.ssh/id_rsa')):
print("Generating server RSA Keys...", end="")
pub,pri = rsa.generate_RSA_keys(digits)
with open(f"{IP}/{user}/.ssh/id_rsa", "w") as f:
f.write(f"{pri[0]}:{pri[1]}")
with open(f"{IP}/{user}/.ssh/id_rsa.pub", "w") as f:
f.write(f"{pub[0]}:{pub[1]}")
print("Generated")
# Clean up the channel for communication
for i in os.listdir(f'{IP}/port{port}'):
os.remove(f'{IP}/port{port}/{i}')
## Initialize the Server ##
startmessage = f"""
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__ __ _ _ ____ _ _____ _____ _ _
\ \ / / | | | | | _ \ | | / ____/ ____| | | |
\ \ /\ / /__| | ___ ___ _ __ ___ ___ | |_ ___ | |_) | __ _| |__ _ _| (___| (___ | |__| |
\ \/ \/ / _ \ |/ __/ _ \| '_ ` _ \ / _ \ | __/ _ \ | _ < / _` | '_ \| | | |\___ \\___ \ | __ |
\ /\ / __/ | (_| (_) | | | | | | __/ | || (_) | | |_) | (_| | |_) | |_| |____) |___) | | | |
\/ \/ \___|_|\___\___/|_| |_| |_|\___| \__\___/ |____/ \__,_|_.__/ \__, |_____/_____/|_| |_|
__/ |
|___/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
BabySSH Server Started on IP {IP}. Listening to Port {port}.
KEYEX_ALGORITHM:DIFFIEHELLMAN
PUBKEYENC_ALGORITHM:RSA
SYMKEYENC_ALGORITHM:BABYDES
HASH_ALGORITHM:KNUTHVARIANTDIVISION
MAC_ALGORITHM:SIMPLIFIEDHMAC
COMPRESSION:ZLIB""".format(IP=IP,port=port)
print(startmessage)
while True:
message = listen()
if keyexchanged:
showcompression = False
# respond to public key request #
if message == "request:publickey":
print("Sending Server Public Key.")
with open(f'{IP}/{user}/.ssh/id_rsa.pub', "r") as f:
pubkey_server = f.read()
reply(pubkey_server)
# respond to key exchange request #
elif message == "init:keyexchange":
p = dh.getSafePrime(digits)
g = dh.getPrimitiveRoot(p)
print("Safe prime and primitive root generated.")
privatekey_server = np.random.randint(2,high=p-1)
print("Private key generated.")
pubm_server = dh.genPublicMessage(p,g,privatekey_server)
reply(f"{p}:{g}:{pubm_server}")
pubm_client = int(listen())
sharedkey = dh.genSharedKey(p,pubm_client,privatekey_server)
print("Shared key generated.")
np.random.seed(sharedkey)
bDESkey = list(np.random.choice([False,True],size=9))
np.random.seed()
keyexchanged = True
print(f"Key exchanged. Shared bDES Key is: {bool2str(bDESkey)}. Channel is now secure.")
# respond to authentication request #
elif message[:13] == "authenticate:" and keyexchanged:
pubkey_client = message[13:]
with open(f'{IP}/{user}/.ssh/authorized_keys', "r") as f:
authorized_keys = f.readlines()
if not (pubkey_client in authorized_keys):
reply(f"Key is not authorized")
if password == listen():
with open(f'{IP}/{user}/.ssh/authorized_keys', "a") as f:
f.write(pubkey_client)
else:
reply("Incorrect Password")
reply("Key is authorized")
pubkey_client = pubkey_client.split(":")
pubkey_client = int(pubkey_client[0]),int(pubkey_client[1])
authentication_code = np.random.randint(10**(digits-1), 10**digits)
print("Authentication message generated.")
authentication_code_enc = rsa.RSA_encrypt(authentication_code, pubkey_client)
print("Authentication message encrpyted.")
reply(f"{authentication_code_enc}")
authentication_code += sum([int(bDESkey[i])*2**i for i in range(len(bDESkey))])
hashed_code = hsh.hash_it(authentication_code)
print("Hash generated.")
if str(hashed_code) == listen():
print("Hash matches.")
reply("Authentication successful")
authenticated = True
else:
print("Hash dont match.")
reply("Authentication failed")
# respond to exit request #
elif message == "exit":
bDESkey = None
keyexchanged = False
authenticated = False
showcompression = True
# respond to general CLI request #
elif keyexchanged and authenticated:
with os.popen(message) as process:
output = process.read()
if output == "":
reply("NULL OUTPUT. Possible Error.")
else:
reply(output)
else:
reply("ERROR. Message not understood.")
| from util import *
import atexit
import babydes as bd
import diffiehellman as dh
import hash as hsh
import numpy as np
import os
import rsa
import sys
import time
attributions = """
=============================================================================================
______ _ _ ___ _ _ _ _ _ _
| ___ \ (_) | | / _ \| | | | (_) | | | (_)
| |_/ / __ ___ _ ___ ___| |_/ /_\ \ |_| |_ _ __ _| |__ _ _| |_ _ ___ _ __ ___
| __/ '__/ _ \| |/ _ \/ __| __| _ | __| __| '__| | '_ \| | | | __| |/ _ \| '_ \/ __|
| | | | | (_) | | __/ (__| |_| | | | |_| |_| | | | |_) | |_| | |_| | (_) | | | \__ \
\_| |_| \___/| |\___|\___|\__\_| |_/\__|\__|_| |_|_.__/ \__,_|\__|_|\___/|_| |_|___/
_/ |
|__/
=============================================================================================
Front-End and Messaging Protocol: <NAME>
<NAME> : <NAME>
RSA : <NAME>
Client Authentication: <NAME>
BabyDES: All Team Members
=============================================================================================
"""
def exit_handler():
if os.path.exists('hacker.log'):
os.remove('hacker.log')
if os.path.exists('hacker_decompressed.log'):
os.remove('hacker_decompressed.log')
print(attributions)
atexit.register(exit_handler)
## Setup Messaging Protocol ##
# Listener #
def listen():
keep_listening = True
while keep_listening:
if os.path.exists(f"{IP}/port{port}/client2server.message"):
with open(f"{IP}/port{port}/client2server.message", "r") as f:
message = f.readline()
keep_listening = False
os.remove(f"{IP}/port{port}/client2server.message")
time.sleep(0.5)
print(f"Message Received: '{message}'")
message = decompress(message)
if showcompression:
print(f"Decompressed Message: '{message}'")
if bDESkey is not None:
message = bd.bDES_decryption(message,bDESkey)
message,mac = message.split("||")
print(f"Decrypted Message: {message}")
if hsh.MAC(message,bDESkey) == mac:
print(f"Message is authentic")
else:
print(f"Message is not authentic. You are under attack.")
quit()
return message
# Messenger #
def reply(message):
message = str(message)
if bDESkey is not None:
message = message+"||"+hsh.MAC(message,bDESkey)
print(f"Encrypting reply: '{message}'...")
message = bd.bDES_encryption(message,bDESkey)
if showcompression:
print(f"Compressing reply: '{message}'...")
message = compress(message)
print(f"Sending Reply: '{message}'...",end='')
t0= time.time()
busy = True
while busy:
if not os.path.exists(f"{IP}/port{port}/server2client.message"):
with open(f"{IP}/port{port}/server2client.message", "w") as f:
f.write(message)
busy = False
time.sleep(1)
if time.time()-t0 > 60:
print("Sending Failed. Client Unresponsive.")
return
hackerlog(message)
print("Sent")
## Set up Client System Structure ##
digits =4
bDESkey = None
password = "<PASSWORD>"
keyexchanged = False
authenticated = False
showcompression = True
# Default user@server port to connect #
if len(sys.argv)==2:
IP = sys.argv[1]
port = "22"
user = "root"
elif len(sys.argv)==3:
IP = sys.argv[1]
port = sys.argv[2]
user = "root"
elif len(sys.argv)==4:
IP = sys.argv[1]
port = sys.argv[2]
user = sys.argv[3]
else:
IP = "192.168.0.1"
port = "22"
user = "root"
# Setup the Server Structure
if not os.path.exists(f'{IP}'):
os.makedirs(f'{IP}/port{port}')
os.makedirs(f'{IP}/{user}')
os.makedirs(f'{IP}/{user}/.ssh/')
else:
if not os.path.exists(f'{IP}/port{port}'):
os.makedirs(f'{IP}/port{port}')
if not os.path.exists(f'{IP}/{user}'):
os.makedirs(f'{IP}/{user}/.ssh/')
# Set up SSH data #
if not os.path.exists(f'{IP}/{user}/.ssh/authorized_keys'):
f = open(f"{IP}/{user}/.ssh/authorized_keys", "w")
f.close()
# Set up RSA keypairs for the first time #
if not (os.path.exists(f'{IP}/{user}/.ssh/id_rsa.pub') and os.path.exists(f'{IP}/{user}/.ssh/id_rsa')):
print("Generating server RSA Keys...", end="")
pub,pri = rsa.generate_RSA_keys(digits)
with open(f"{IP}/{user}/.ssh/id_rsa", "w") as f:
f.write(f"{pri[0]}:{pri[1]}")
with open(f"{IP}/{user}/.ssh/id_rsa.pub", "w") as f:
f.write(f"{pub[0]}:{pub[1]}")
print("Generated")
# Clean up the channel for communication
for i in os.listdir(f'{IP}/port{port}'):
os.remove(f'{IP}/port{port}/{i}')
## Initialize the Server ##
startmessage = f"""
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__ __ _ _ ____ _ _____ _____ _ _
\ \ / / | | | | | _ \ | | / ____/ ____| | | |
\ \ /\ / /__| | ___ ___ _ __ ___ ___ | |_ ___ | |_) | __ _| |__ _ _| (___| (___ | |__| |
\ \/ \/ / _ \ |/ __/ _ \| '_ ` _ \ / _ \ | __/ _ \ | _ < / _` | '_ \| | | |\___ \\___ \ | __ |
\ /\ / __/ | (_| (_) | | | | | | __/ | || (_) | | |_) | (_| | |_) | |_| |____) |___) | | | |
\/ \/ \___|_|\___\___/|_| |_| |_|\___| \__\___/ |____/ \__,_|_.__/ \__, |_____/_____/|_| |_|
__/ |
|___/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
BabySSH Server Started on IP {IP}. Listening to Port {port}.
KEYEX_ALGORITHM:DIFFIEHELLMAN
PUBKEYENC_ALGORITHM:RSA
SYMKEYENC_ALGORITHM:BABYDES
HASH_ALGORITHM:KNUTHVARIANTDIVISION
MAC_ALGORITHM:SIMPLIFIEDHMAC
COMPRESSION:ZLIB""".format(IP=IP,port=port)
print(startmessage)
while True:
message = listen()
if keyexchanged:
showcompression = False
# respond to public key request #
if message == "request:publickey":
print("Sending Server Public Key.")
with open(f'{IP}/{user}/.ssh/id_rsa.pub', "r") as f:
pubkey_server = f.read()
reply(pubkey_server)
# respond to key exchange request #
elif message == "init:keyexchange":
p = dh.getSafePrime(digits)
g = dh.getPrimitiveRoot(p)
print("Safe prime and primitive root generated.")
privatekey_server = np.random.randint(2,high=p-1)
print("Private key generated.")
pubm_server = dh.genPublicMessage(p,g,privatekey_server)
reply(f"{p}:{g}:{pubm_server}")
pubm_client = int(listen())
sharedkey = dh.genSharedKey(p,pubm_client,privatekey_server)
print("Shared key generated.")
np.random.seed(sharedkey)
bDESkey = list(np.random.choice([False,True],size=9))
np.random.seed()
keyexchanged = True
print(f"Key exchanged. Shared bDES Key is: {bool2str(bDESkey)}. Channel is now secure.")
# respond to authentication request #
elif message[:13] == "authenticate:" and keyexchanged:
pubkey_client = message[13:]
with open(f'{IP}/{user}/.ssh/authorized_keys', "r") as f:
authorized_keys = f.readlines()
if not (pubkey_client in authorized_keys):
reply(f"Key is not authorized")
if password == listen():
with open(f'{IP}/{user}/.ssh/authorized_keys', "a") as f:
f.write(pubkey_client)
else:
reply("Incorrect Password")
reply("Key is authorized")
pubkey_client = pubkey_client.split(":")
pubkey_client = int(pubkey_client[0]),int(pubkey_client[1])
authentication_code = np.random.randint(10**(digits-1), 10**digits)
print("Authentication message generated.")
authentication_code_enc = rsa.RSA_encrypt(authentication_code, pubkey_client)
print("Authentication message encrpyted.")
reply(f"{authentication_code_enc}")
authentication_code += sum([int(bDESkey[i])*2**i for i in range(len(bDESkey))])
hashed_code = hsh.hash_it(authentication_code)
print("Hash generated.")
if str(hashed_code) == listen():
print("Hash matches.")
reply("Authentication successful")
authenticated = True
else:
print("Hash dont match.")
reply("Authentication failed")
# respond to exit request #
elif message == "exit":
bDESkey = None
keyexchanged = False
authenticated = False
showcompression = True
# respond to general CLI request #
elif keyexchanged and authenticated:
with os.popen(message) as process:
output = process.read()
if output == "":
reply("NULL OUTPUT. Possible Error.")
else:
reply(output)
else:
reply("ERROR. Message not understood.")
| en | 0.457489 | ============================================================================================= ______ _ _ ___ _ _ _ _ _ _ | ___ \ (_) | | / _ \| | | | (_) | | | (_) | |_/ / __ ___ _ ___ ___| |_/ /_\ \ |_| |_ _ __ _| |__ _ _| |_ _ ___ _ __ ___ | __/ '__/ _ \| |/ _ \/ __| __| _ | __| __| '__| | '_ \| | | | __| |/ _ \| '_ \/ __| | | | | | (_) | | __/ (__| |_| | | | |_| |_| | | | |_) | |_| | |_| | (_) | | | \__ \ \_| |_| \___/| |\___|\___|\__\_| |_/\__|\__|_| |_|_.__/ \__,_|\__|_|\___/|_| |_|___/ _/ | |__/ ============================================================================================= Front-End and Messaging Protocol: <NAME> <NAME> : <NAME> RSA : <NAME> Client Authentication: <NAME> BabyDES: All Team Members ============================================================================================= ## Setup Messaging Protocol ## # Listener # # Messenger # ## Set up Client System Structure ## # Default user@server port to connect # # Setup the Server Structure # Set up SSH data # # Set up RSA keypairs for the first time # # Clean up the channel for communication ## Initialize the Server ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ __ __ _ _ ____ _ _____ _____ _ _ \ \ / / | | | | | _ \ | | / ____/ ____| | | | \ \ /\ / /__| | ___ ___ _ __ ___ ___ | |_ ___ | |_) | __ _| |__ _ _| (___| (___ | |__| | \ \/ \/ / _ \ |/ __/ _ \| '_ ` _ \ / _ \ | __/ _ \ | _ < / _` | '_ \| | | |\___ \\___ \ | __ | \ /\ / __/ | (_| (_) | | | | | | __/ | || (_) | | |_) | (_| | |_) | |_| |____) |___) | | | | \/ \/ \___|_|\___\___/|_| |_| |_|\___| \__\___/ |____/ \__,_|_.__/ \__, |_____/_____/|_| |_| __/ | |___/ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ BabySSH Server Started on IP {IP}. Listening to Port {port}. KEYEX_ALGORITHM:DIFFIEHELLMAN PUBKEYENC_ALGORITHM:RSA SYMKEYENC_ALGORITHM:BABYDES HASH_ALGORITHM:KNUTHVARIANTDIVISION MAC_ALGORITHM:SIMPLIFIEDHMAC COMPRESSION:ZLIB # respond to public key request # # respond to key exchange request # # respond to authentication request # # respond to exit request # # respond to general CLI request # | 1.655936 | 2 |
test/fddb/test_darknet.py | the-house-of-black-and-white/pyWiderFace | 17 | 6613421 | <gh_stars>10-100
import logging
import sys
import unittest
from shutil import rmtree
import os
from morghulis.fddb import FDDB
from morghulis.fddb.darknet_exporter import DarknetExporter
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
FDDB_DIR = os.path.dirname(__file__) + '/FDDB_sample/'
TMP_DIR = '/opt/project/.tmp/fddb/darknet'
class FDDBDarknetTests(unittest.TestCase):
def setUp(self):
rmtree(TMP_DIR, ignore_errors=True)
self.fddb = FDDB(FDDB_DIR)
self.darknetExporter = DarknetExporter(self.fddb)
def test_sanity(self):
self.darknetExporter.export(TMP_DIR)
| import logging
import sys
import unittest
from shutil import rmtree
import os
from morghulis.fddb import FDDB
from morghulis.fddb.darknet_exporter import DarknetExporter
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
FDDB_DIR = os.path.dirname(__file__) + '/FDDB_sample/'
TMP_DIR = '/opt/project/.tmp/fddb/darknet'
class FDDBDarknetTests(unittest.TestCase):
def setUp(self):
rmtree(TMP_DIR, ignore_errors=True)
self.fddb = FDDB(FDDB_DIR)
self.darknetExporter = DarknetExporter(self.fddb)
def test_sanity(self):
self.darknetExporter.export(TMP_DIR) | none | 1 | 2.208898 | 2 | |
matrix_add.py | WinCanton/Python-Morsels | 0 | 6613422 | def matrix_add(list1, list2):
"""
Accepts two lists-of-lists of numbers and returns one
list-of-lists with each of the corresponding numbers in the
two given lists-of-lists added together.
Example:
>>> matrix1 = [[1, -2], [-3, 4]]
>>> matrix2 = [[2, -1], [0, -1]]
>>> add(matrix1, matrix2)
[[3, -3], [-3, 3]]
>>> matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]]
>>> matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]]
>>> add(matrix1, matrix2)
[[2, -1, 3], [-3, 3, -3], [5, -6, 7]]
"""
result = []
for each in range(len(list1)):
inner_list = []
for item in range(len(list1[each])):
inner_list.append(list1[each][item] + list2[each][item])
result.append(inner_list)
return result
if __name__ == "__main__":
#matrix1 = [[1, -2], [-3, 4]]
#matrix2 = [[2, -1], [0, -1]]
matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]]
matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]]
print(matrix_add(matrix1, matrix2))
| def matrix_add(list1, list2):
"""
Accepts two lists-of-lists of numbers and returns one
list-of-lists with each of the corresponding numbers in the
two given lists-of-lists added together.
Example:
>>> matrix1 = [[1, -2], [-3, 4]]
>>> matrix2 = [[2, -1], [0, -1]]
>>> add(matrix1, matrix2)
[[3, -3], [-3, 3]]
>>> matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]]
>>> matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]]
>>> add(matrix1, matrix2)
[[2, -1, 3], [-3, 3, -3], [5, -6, 7]]
"""
result = []
for each in range(len(list1)):
inner_list = []
for item in range(len(list1[each])):
inner_list.append(list1[each][item] + list2[each][item])
result.append(inner_list)
return result
if __name__ == "__main__":
#matrix1 = [[1, -2], [-3, 4]]
#matrix2 = [[2, -1], [0, -1]]
matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]]
matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]]
print(matrix_add(matrix1, matrix2))
| en | 0.558325 | Accepts two lists-of-lists of numbers and returns one list-of-lists with each of the corresponding numbers in the two given lists-of-lists added together. Example: >>> matrix1 = [[1, -2], [-3, 4]] >>> matrix2 = [[2, -1], [0, -1]] >>> add(matrix1, matrix2) [[3, -3], [-3, 3]] >>> matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]] >>> matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]] >>> add(matrix1, matrix2) [[2, -1, 3], [-3, 3, -3], [5, -6, 7]] #matrix1 = [[1, -2], [-3, 4]] #matrix2 = [[2, -1], [0, -1]] | 4.295842 | 4 |
convokit/model/conversation.py | KaminskyJ/Cornell-Conversational-Analysis-Toolkit | 0 | 6613423 | from typing import Dict, List, Callable, Generator, Optional
from .utterance import Utterance
from .user import User
from .corpusUtil import warn
from .corpusObject import CorpusObject
from collections import defaultdict
from .utteranceNode import UtteranceNode
class Conversation(CorpusObject):
"""Represents a discrete subset of utterances in the dataset, connected by a
reply-to chain.
:param owner: The Corpus that this Conversation belongs to
:param cid: The unique ID of this Conversation
:param utterances: A list of the IDs of the Utterances in this Conversation
:param meta: Table of initial values for conversation-level metadata
:ivar meta: A dictionary-like view object providing read-write access to
conversation-level metadata. For utterance-level metadata, use
Utterance.meta. For user-level metadata, use User.meta. For corpus-level
metadata, use Corpus.meta.
"""
def __init__(self, owner, id: Optional[str] = None,
utterances: Optional[List[str]] = None,
meta: Optional[Dict] = None):
super().__init__(obj_type="conversation", owner=owner, id=id, meta=meta)
self._owner = owner
self._utterance_ids = utterances
self._user_ids = None
self.tree: Optional[UtteranceNode] = None
def get_utterance_ids(self) -> List[str]:
"""Produces a list of the unique IDs of all utterances in the
Conversation, which can be used in calls to get_utterance() to retrieve
specific utterances. Provides no ordering guarantees for the list.
:return: a list of IDs of Utterances in the Conversation
"""
# we construct a new list instead of returning self._utterance_ids in
# order to prevent the user from accidentally modifying the internal
# ID list (since lists are mutable)
return [ut_id for ut_id in self._utterance_ids]
def get_utterance(self, ut_id: str) -> Utterance:
"""Looks up the Utterance associated with the given ID. Raises a
KeyError if no utterance by that ID exists.
:return: the Utterance with the given ID
"""
# delegate to the owner Corpus since Conversation does not itself own
# any Utterances
return self._owner.get_utterance(ut_id)
def iter_utterances(self, selector: Callable[[Utterance], bool] = lambda utt: True) -> Generator[Utterance, None, None]:
"""Generator allowing iteration over all utterances in the Conversation.
Provides no ordering guarantees.
:return: Generator that produces Users
"""
for ut_id in self._utterance_ids:
utt = self._owner.get_utterance(ut_id)
if selector(utt):
yield utt
def get_usernames(self) -> List[str]:
"""Produces a list of names of all users in the Conversation, which can
be used in calls to get_user() to retrieve specific users. Provides no
ordering guarantees for the list.
:return: a list of usernames
"""
warn("This function is deprecated and will be removed in a future release. Use get_user_ids() instead.")
if self._user_ids is None:
# first call to get_usernames or iter_users; precompute cached list
# of usernames
self._user_ids = set()
for ut_id in self._utterance_ids:
ut = self._owner.get_utterance(ut_id)
self._user_ids.add(ut.user.name)
return list(self._user_ids)
def get_user_ids(self) -> List[str]:
"""Produces a list of ids of all users in the Conversation, which can
be used in calls to get_user() to retrieve specific users. Provides no
ordering guarantees for the list.
:return: a list of usernames
"""
if self._user_ids is None:
# first call to get_usernames or iter_users; precompute cached list
# of usernames
self._user_ids = set()
for ut_id in self._utterance_ids:
ut = self._owner.get_utterance(ut_id)
self._user_ids.add(ut.user.name)
return list(self._user_ids)
def get_user(self, username: str) -> User:
"""Looks up the User with the given name. Raises a KeyError if no user
with that name exists.
:return: the User with the given username
"""
# delegate to the owner Corpus since Conversation does not itself own
# any Utterances
return self._owner.get_user(username)
def iter_users(self) -> Generator[User, None, None]:
"""Generator allowing iteration over all users in the Conversation.
Provides no ordering guarantees.
:return: Generator that produces Users.
"""
if self._user_ids is None:
# first call to get_ids or iter_users; precompute cached list of usernames
self._user_ids = set()
for ut_id in self._utterance_ids:
ut = self._owner.get_utterance(ut_id)
self._user_ids.add(ut.user.id)
for user_id in self._user_ids:
yield self._owner.get_user(user_id)
def check_integrity(self, verbose=True):
if verbose: print("Checking reply-to chain of Conversation", self.id)
utt_reply_tos = {utt.id: utt.reply_to for utt in self.iter_utterances()}
target_utt_ids = set(list(utt_reply_tos.values()))
speaker_utt_ids = set(list(utt_reply_tos.keys()))
root_utt_id = target_utt_ids - speaker_utt_ids # There should only be 1 root_utt_id: None
if len(root_utt_id) != 1:
if verbose:
for utt_id in root_utt_id:
if utt_id is not None:
warn("ERROR: Missing utterance {}".format(utt_id))
return False
# sanity check
utts_replying_to_none = 0
for utt in self.iter_utterances():
if utt.reply_to is None:
utts_replying_to_none += 1
if utts_replying_to_none > 1:
if verbose: warn("ERROR: Found more than one Utterance replying to None.")
return False
if verbose: print("No issues found.\n")
return True
def initialize_tree_structure(self):
if not self.check_integrity(verbose=False):
raise ValueError("Conversation {} reply-to chain does not form a valid tree.".format(self.id))
root_node_id = None
# Find root node
for utt in self.iter_utterances():
if utt.reply_to is None:
root_node_id = utt.id
parent_to_children_ids = defaultdict(list)
for utt in self.iter_utterances():
parent_to_children_ids[utt.reply_to].append(utt.id)
wrapped_utts = {utt.id: UtteranceNode(utt) for utt in self.iter_utterances()}
for parent_id, wrapped_utt in wrapped_utts.items():
wrapped_utt.set_children([wrapped_utts[child_id] for child_id in parent_to_children_ids[parent_id]])
self.tree = wrapped_utts[root_node_id]
def traverse(self, traversal_type: str, as_utterance: bool = True):
"""
Traverse through the Conversation tree structure in a breadth-first search ('bfs'), depth-first search (dfs),
pre-order ('preorder'), or post-order ('postorder') way.
:param traversal_type: dfs, bfs, preorder, or postorder
:param as_utterance: whether the iterator should yield the utterance (True) or the utterance node (False)
:return: an iterator of the utterances or utterance nodes
"""
if self.tree is None:
self.initialize_tree_structure()
if self.tree is None:
raise ValueError("Failed to traverse because Conversation reply-to chain does not form a valid tree.")
traversals = {'bfs': self.tree.bfs_traversal,
'dfs': self.tree.dfs_traversal,
'preorder': self.tree.pre_order,
'postorder': self.tree.post_order}
for utt_node in traversals[traversal_type]():
yield utt_node.utt if as_utterance else utt_node
def get_subtree(self, root_utt_id):
"""
Get the utterance node of the specified input id
:param root_utt_id: id of the root node that the subtree starts from
:return: UtteranceNode object
"""
if self.tree is None:
self.initialize_tree_structure()
if self.tree is None:
raise ValueError("Failed to traverse because Conversation reply-to chain does not form a valid tree.")
for utt_node in self.tree.bfs_traversal():
if utt_node.utt.id == root_utt_id:
return utt_node
def _print_convo_helper(self, root: str, indent: int, reply_to_dict: Dict[str, str],
utt_info_func: Callable[[Utterance], str]) -> None:
"""
Helper function for print_conversation_structure()
"""
print(" "*indent + utt_info_func(self.get_utterance(root)))
children_utt_ids = [k for k, v in reply_to_dict.items() if v == root]
for child_utt_id in children_utt_ids:
self._print_convo_helper(root=child_utt_id, indent=indent+4,
reply_to_dict=reply_to_dict, utt_info_func=utt_info_func)
def print_conversation_structure(self, utt_info_func: Callable[[Utterance], str] = lambda utt: utt.user.id) -> None:
"""
Prints an indented representation of utterances in the Conversation with conversation reply-to structure
determining the indented level. The details of each utterance to be printed can be configured.
:param utt_info_func: callable function taking an utterance as input and returning a string of the desired
utterance information. By default, this is a lambda function returning the utterance's user's id
:return: None. Prints to stdout.
"""
if not self.check_integrity(verbose=False):
raise ValueError("Could not print conversation structure: The utterance reply-to chain is broken. "
"Try check_integrity() to diagnose the problem.")
root_utt_id = [utt for utt in self.iter_utterances() if utt.reply_to is None][0].id
reply_to_dict = {utt.id: utt.reply_to for utt in self.iter_utterances()}
self._print_convo_helper(root=root_utt_id, indent=0, reply_to_dict=reply_to_dict, utt_info_func=utt_info_func)
def get_chronological_utterance_list(self, selector: Callable[[Utterance], bool] = lambda utt: True):
"""
Get the utterances in the conversation sorted in increasing order of timestamp
:param selector: function for which utterances should be included; all utterances are included by default
:return: list of utterances, sorted by timestamp
"""
return sorted([utt for utt in self.iter_utterances(selector)], key=lambda utt: utt.timestamp)
def _get_path_from_leaf_to_root(self, leaf_utt: Utterance, root_utt: Utterance) -> List[Utterance]:
"""
Helper function for get_root_to_leaf_paths, which returns the path for a given leaf_utt and root_utt
"""
if leaf_utt == root_utt:
return [leaf_utt]
path = [leaf_utt]
root_id = root_utt.id
while leaf_utt.reply_to != root_id:
path.append(self.get_utterance(leaf_utt.reply_to))
leaf_utt = path[-1]
path.append(root_utt)
return path[::-1]
def get_root_to_leaf_paths(self) -> List[List[Utterance]]:
"""
Get the paths (stored as a list of lists of utterances) from the root to each of the leaves
in the conversational tree
:return:
"""
if not self.check_integrity(verbose=False):
raise ValueError("Conversation failed integrity check. "
"It is either missing an utterance in the reply-to chain and/or has multiple root nodes. "
"Run check_integrity() to diagnose issues.")
utt_reply_tos = {utt.id: utt.reply_to for utt in self.iter_utterances()}
target_utt_ids = set(list(utt_reply_tos.values()))
speaker_utt_ids = set(list(utt_reply_tos.keys()))
root_utt_id = target_utt_ids - speaker_utt_ids # There should only be 1 root_utt_id: None
assert len(root_utt_id) == 1
root_utt = [utt for utt in self.iter_utterances() if utt.reply_to is None][0]
leaf_utt_ids = speaker_utt_ids - target_utt_ids
paths = [self._get_path_from_leaf_to_root(self.get_utterance(leaf_utt_id), root_utt)
for leaf_utt_id in leaf_utt_ids]
return paths
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
return self.id == other.id and set(self._utterance_ids) == set(other._utterance_ids)
def __str__(self):
return "Conversation('id': {}, 'utterances': {}, 'meta': {})".format(repr(self.id), self._utterance_ids, self.meta)
| from typing import Dict, List, Callable, Generator, Optional
from .utterance import Utterance
from .user import User
from .corpusUtil import warn
from .corpusObject import CorpusObject
from collections import defaultdict
from .utteranceNode import UtteranceNode
class Conversation(CorpusObject):
"""Represents a discrete subset of utterances in the dataset, connected by a
reply-to chain.
:param owner: The Corpus that this Conversation belongs to
:param cid: The unique ID of this Conversation
:param utterances: A list of the IDs of the Utterances in this Conversation
:param meta: Table of initial values for conversation-level metadata
:ivar meta: A dictionary-like view object providing read-write access to
conversation-level metadata. For utterance-level metadata, use
Utterance.meta. For user-level metadata, use User.meta. For corpus-level
metadata, use Corpus.meta.
"""
def __init__(self, owner, id: Optional[str] = None,
utterances: Optional[List[str]] = None,
meta: Optional[Dict] = None):
super().__init__(obj_type="conversation", owner=owner, id=id, meta=meta)
self._owner = owner
self._utterance_ids = utterances
self._user_ids = None
self.tree: Optional[UtteranceNode] = None
def get_utterance_ids(self) -> List[str]:
"""Produces a list of the unique IDs of all utterances in the
Conversation, which can be used in calls to get_utterance() to retrieve
specific utterances. Provides no ordering guarantees for the list.
:return: a list of IDs of Utterances in the Conversation
"""
# we construct a new list instead of returning self._utterance_ids in
# order to prevent the user from accidentally modifying the internal
# ID list (since lists are mutable)
return [ut_id for ut_id in self._utterance_ids]
def get_utterance(self, ut_id: str) -> Utterance:
"""Looks up the Utterance associated with the given ID. Raises a
KeyError if no utterance by that ID exists.
:return: the Utterance with the given ID
"""
# delegate to the owner Corpus since Conversation does not itself own
# any Utterances
return self._owner.get_utterance(ut_id)
def iter_utterances(self, selector: Callable[[Utterance], bool] = lambda utt: True) -> Generator[Utterance, None, None]:
"""Generator allowing iteration over all utterances in the Conversation.
Provides no ordering guarantees.
:return: Generator that produces Users
"""
for ut_id in self._utterance_ids:
utt = self._owner.get_utterance(ut_id)
if selector(utt):
yield utt
def get_usernames(self) -> List[str]:
"""Produces a list of names of all users in the Conversation, which can
be used in calls to get_user() to retrieve specific users. Provides no
ordering guarantees for the list.
:return: a list of usernames
"""
warn("This function is deprecated and will be removed in a future release. Use get_user_ids() instead.")
if self._user_ids is None:
# first call to get_usernames or iter_users; precompute cached list
# of usernames
self._user_ids = set()
for ut_id in self._utterance_ids:
ut = self._owner.get_utterance(ut_id)
self._user_ids.add(ut.user.name)
return list(self._user_ids)
def get_user_ids(self) -> List[str]:
"""Produces a list of ids of all users in the Conversation, which can
be used in calls to get_user() to retrieve specific users. Provides no
ordering guarantees for the list.
:return: a list of usernames
"""
if self._user_ids is None:
# first call to get_usernames or iter_users; precompute cached list
# of usernames
self._user_ids = set()
for ut_id in self._utterance_ids:
ut = self._owner.get_utterance(ut_id)
self._user_ids.add(ut.user.name)
return list(self._user_ids)
def get_user(self, username: str) -> User:
"""Looks up the User with the given name. Raises a KeyError if no user
with that name exists.
:return: the User with the given username
"""
# delegate to the owner Corpus since Conversation does not itself own
# any Utterances
return self._owner.get_user(username)
def iter_users(self) -> Generator[User, None, None]:
"""Generator allowing iteration over all users in the Conversation.
Provides no ordering guarantees.
:return: Generator that produces Users.
"""
if self._user_ids is None:
# first call to get_ids or iter_users; precompute cached list of usernames
self._user_ids = set()
for ut_id in self._utterance_ids:
ut = self._owner.get_utterance(ut_id)
self._user_ids.add(ut.user.id)
for user_id in self._user_ids:
yield self._owner.get_user(user_id)
def check_integrity(self, verbose=True):
if verbose: print("Checking reply-to chain of Conversation", self.id)
utt_reply_tos = {utt.id: utt.reply_to for utt in self.iter_utterances()}
target_utt_ids = set(list(utt_reply_tos.values()))
speaker_utt_ids = set(list(utt_reply_tos.keys()))
root_utt_id = target_utt_ids - speaker_utt_ids # There should only be 1 root_utt_id: None
if len(root_utt_id) != 1:
if verbose:
for utt_id in root_utt_id:
if utt_id is not None:
warn("ERROR: Missing utterance {}".format(utt_id))
return False
# sanity check
utts_replying_to_none = 0
for utt in self.iter_utterances():
if utt.reply_to is None:
utts_replying_to_none += 1
if utts_replying_to_none > 1:
if verbose: warn("ERROR: Found more than one Utterance replying to None.")
return False
if verbose: print("No issues found.\n")
return True
def initialize_tree_structure(self):
if not self.check_integrity(verbose=False):
raise ValueError("Conversation {} reply-to chain does not form a valid tree.".format(self.id))
root_node_id = None
# Find root node
for utt in self.iter_utterances():
if utt.reply_to is None:
root_node_id = utt.id
parent_to_children_ids = defaultdict(list)
for utt in self.iter_utterances():
parent_to_children_ids[utt.reply_to].append(utt.id)
wrapped_utts = {utt.id: UtteranceNode(utt) for utt in self.iter_utterances()}
for parent_id, wrapped_utt in wrapped_utts.items():
wrapped_utt.set_children([wrapped_utts[child_id] for child_id in parent_to_children_ids[parent_id]])
self.tree = wrapped_utts[root_node_id]
def traverse(self, traversal_type: str, as_utterance: bool = True):
"""
Traverse through the Conversation tree structure in a breadth-first search ('bfs'), depth-first search (dfs),
pre-order ('preorder'), or post-order ('postorder') way.
:param traversal_type: dfs, bfs, preorder, or postorder
:param as_utterance: whether the iterator should yield the utterance (True) or the utterance node (False)
:return: an iterator of the utterances or utterance nodes
"""
if self.tree is None:
self.initialize_tree_structure()
if self.tree is None:
raise ValueError("Failed to traverse because Conversation reply-to chain does not form a valid tree.")
traversals = {'bfs': self.tree.bfs_traversal,
'dfs': self.tree.dfs_traversal,
'preorder': self.tree.pre_order,
'postorder': self.tree.post_order}
for utt_node in traversals[traversal_type]():
yield utt_node.utt if as_utterance else utt_node
def get_subtree(self, root_utt_id):
"""
Get the utterance node of the specified input id
:param root_utt_id: id of the root node that the subtree starts from
:return: UtteranceNode object
"""
if self.tree is None:
self.initialize_tree_structure()
if self.tree is None:
raise ValueError("Failed to traverse because Conversation reply-to chain does not form a valid tree.")
for utt_node in self.tree.bfs_traversal():
if utt_node.utt.id == root_utt_id:
return utt_node
def _print_convo_helper(self, root: str, indent: int, reply_to_dict: Dict[str, str],
utt_info_func: Callable[[Utterance], str]) -> None:
"""
Helper function for print_conversation_structure()
"""
print(" "*indent + utt_info_func(self.get_utterance(root)))
children_utt_ids = [k for k, v in reply_to_dict.items() if v == root]
for child_utt_id in children_utt_ids:
self._print_convo_helper(root=child_utt_id, indent=indent+4,
reply_to_dict=reply_to_dict, utt_info_func=utt_info_func)
def print_conversation_structure(self, utt_info_func: Callable[[Utterance], str] = lambda utt: utt.user.id) -> None:
"""
Prints an indented representation of utterances in the Conversation with conversation reply-to structure
determining the indented level. The details of each utterance to be printed can be configured.
:param utt_info_func: callable function taking an utterance as input and returning a string of the desired
utterance information. By default, this is a lambda function returning the utterance's user's id
:return: None. Prints to stdout.
"""
if not self.check_integrity(verbose=False):
raise ValueError("Could not print conversation structure: The utterance reply-to chain is broken. "
"Try check_integrity() to diagnose the problem.")
root_utt_id = [utt for utt in self.iter_utterances() if utt.reply_to is None][0].id
reply_to_dict = {utt.id: utt.reply_to for utt in self.iter_utterances()}
self._print_convo_helper(root=root_utt_id, indent=0, reply_to_dict=reply_to_dict, utt_info_func=utt_info_func)
def get_chronological_utterance_list(self, selector: Callable[[Utterance], bool] = lambda utt: True):
"""
Get the utterances in the conversation sorted in increasing order of timestamp
:param selector: function for which utterances should be included; all utterances are included by default
:return: list of utterances, sorted by timestamp
"""
return sorted([utt for utt in self.iter_utterances(selector)], key=lambda utt: utt.timestamp)
def _get_path_from_leaf_to_root(self, leaf_utt: Utterance, root_utt: Utterance) -> List[Utterance]:
"""
Helper function for get_root_to_leaf_paths, which returns the path for a given leaf_utt and root_utt
"""
if leaf_utt == root_utt:
return [leaf_utt]
path = [leaf_utt]
root_id = root_utt.id
while leaf_utt.reply_to != root_id:
path.append(self.get_utterance(leaf_utt.reply_to))
leaf_utt = path[-1]
path.append(root_utt)
return path[::-1]
def get_root_to_leaf_paths(self) -> List[List[Utterance]]:
"""
Get the paths (stored as a list of lists of utterances) from the root to each of the leaves
in the conversational tree
:return:
"""
if not self.check_integrity(verbose=False):
raise ValueError("Conversation failed integrity check. "
"It is either missing an utterance in the reply-to chain and/or has multiple root nodes. "
"Run check_integrity() to diagnose issues.")
utt_reply_tos = {utt.id: utt.reply_to for utt in self.iter_utterances()}
target_utt_ids = set(list(utt_reply_tos.values()))
speaker_utt_ids = set(list(utt_reply_tos.keys()))
root_utt_id = target_utt_ids - speaker_utt_ids # There should only be 1 root_utt_id: None
assert len(root_utt_id) == 1
root_utt = [utt for utt in self.iter_utterances() if utt.reply_to is None][0]
leaf_utt_ids = speaker_utt_ids - target_utt_ids
paths = [self._get_path_from_leaf_to_root(self.get_utterance(leaf_utt_id), root_utt)
for leaf_utt_id in leaf_utt_ids]
return paths
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
return self.id == other.id and set(self._utterance_ids) == set(other._utterance_ids)
def __str__(self):
return "Conversation('id': {}, 'utterances': {}, 'meta': {})".format(repr(self.id), self._utterance_ids, self.meta)
| en | 0.774729 | Represents a discrete subset of utterances in the dataset, connected by a reply-to chain. :param owner: The Corpus that this Conversation belongs to :param cid: The unique ID of this Conversation :param utterances: A list of the IDs of the Utterances in this Conversation :param meta: Table of initial values for conversation-level metadata :ivar meta: A dictionary-like view object providing read-write access to conversation-level metadata. For utterance-level metadata, use Utterance.meta. For user-level metadata, use User.meta. For corpus-level metadata, use Corpus.meta. Produces a list of the unique IDs of all utterances in the Conversation, which can be used in calls to get_utterance() to retrieve specific utterances. Provides no ordering guarantees for the list. :return: a list of IDs of Utterances in the Conversation # we construct a new list instead of returning self._utterance_ids in # order to prevent the user from accidentally modifying the internal # ID list (since lists are mutable) Looks up the Utterance associated with the given ID. Raises a KeyError if no utterance by that ID exists. :return: the Utterance with the given ID # delegate to the owner Corpus since Conversation does not itself own # any Utterances Generator allowing iteration over all utterances in the Conversation. Provides no ordering guarantees. :return: Generator that produces Users Produces a list of names of all users in the Conversation, which can be used in calls to get_user() to retrieve specific users. Provides no ordering guarantees for the list. :return: a list of usernames # first call to get_usernames or iter_users; precompute cached list # of usernames Produces a list of ids of all users in the Conversation, which can be used in calls to get_user() to retrieve specific users. Provides no ordering guarantees for the list. :return: a list of usernames # first call to get_usernames or iter_users; precompute cached list # of usernames Looks up the User with the given name. Raises a KeyError if no user with that name exists. :return: the User with the given username # delegate to the owner Corpus since Conversation does not itself own # any Utterances Generator allowing iteration over all users in the Conversation. Provides no ordering guarantees. :return: Generator that produces Users. # first call to get_ids or iter_users; precompute cached list of usernames # There should only be 1 root_utt_id: None # sanity check # Find root node Traverse through the Conversation tree structure in a breadth-first search ('bfs'), depth-first search (dfs), pre-order ('preorder'), or post-order ('postorder') way. :param traversal_type: dfs, bfs, preorder, or postorder :param as_utterance: whether the iterator should yield the utterance (True) or the utterance node (False) :return: an iterator of the utterances or utterance nodes Get the utterance node of the specified input id :param root_utt_id: id of the root node that the subtree starts from :return: UtteranceNode object Helper function for print_conversation_structure() Prints an indented representation of utterances in the Conversation with conversation reply-to structure determining the indented level. The details of each utterance to be printed can be configured. :param utt_info_func: callable function taking an utterance as input and returning a string of the desired utterance information. By default, this is a lambda function returning the utterance's user's id :return: None. Prints to stdout. Get the utterances in the conversation sorted in increasing order of timestamp :param selector: function for which utterances should be included; all utterances are included by default :return: list of utterances, sorted by timestamp Helper function for get_root_to_leaf_paths, which returns the path for a given leaf_utt and root_utt Get the paths (stored as a list of lists of utterances) from the root to each of the leaves in the conversational tree :return: # There should only be 1 root_utt_id: None | 2.72912 | 3 |
extras.py | okasen/django-profanity-filter | 0 | 6613424 | <filename>extras.py
import re
import os
import inflection
class ProfanityFilter:
def __init__(self, **kwargs):
# If defined, use this instead of _censor_list
self._custom_censor_list = kwargs.get('custom_censor_list', [])
# Words to be used in conjunction with _censor_list
self._extra_censor_list = kwargs.get('extra_censor_list', [])
# What to be censored -- should not be modified by user
self._censor_list = []
# What to censor the words with
self._censor_char = "*"
# Where to find the censored words
self._BASE_DIR = os.path.abspath(os.path.dirname(__file__))
self._words_file = os.path.join(self._BASE_DIR, 'wordlist.txt')
self._false_file = os.path.join(self._BASE_DIR, 'goodlist.txt')
self._load_words()
self._load_false_positives()
def _load_words(self):
""" Loads the list of profane words from file. """
with open(self._words_file, 'r') as f:
self._censor_list = [line.strip() for line in f.readlines()]
def _load_false_positives(self):
""" Loads the list of false positive words from file. """
with open(self._false_file, 'r') as f:
self._false_list = [line.strip() for line in f.readlines()]
def define_words(self, word_list):
""" Define a custom list of profane words. """
self._custom_censor_list = word_list
def append_words(self, word_list):
""" Extends the profane word list with word_list """
self._extra_censor_list.extend(word_list)
def set_censor(self, character):
""" Replaces the original censor character '*' with character """
if isinstance(character, int):
character = str(character)
self._censor_char = character
def has_bad_word(self, text):
""" Returns True if text contains profanity, False otherwise """
return self.censor(text) != text
def get_custom_censor_list(self):
""" Returns the list of custom profane words """
return self._custom_censor_list
def get_extra_censor_list(self):
""" Returns the list of custom, additional, profane words """
return self._extra_censor_list
def get_profane_words(self):
""" Gets all profane words """
profane_words = []
if self._custom_censor_list:
profane_words = [w for w in self._custom_censor_list] # Previous versions of Python don't have list.copy()
else:
profane_words = [w for w in self._censor_list]
profane_words.extend(self._extra_censor_list)
profane_words.extend([inflection.pluralize(word) for word in profane_words])
profane_words = list(set(profane_words))
return profane_words
def get_false_positives(self):
false_positives = [w for w in self._false_list]
return false_positives
def restore_words(self):
""" Clears all custom censor lists """
self._custom_censor_list = []
self._extra_censor_list = []
def censor(self, input_text):
""" Returns input_text with any profane words censored """
bad_words = self.get_profane_words()
res = input_text
for word in bad_words:
word = r'\b%s\b' % word # Apply word boundaries to the bad word
regex = re.compile(word, re.IGNORECASE)
res = regex.sub(self._censor_char * (len(word) - 4), res)
return res
def is_clean(self, input_text):
""" Returns True if input_text doesn't contain any profane words, False otherwise. """
return not self.has_bad_word(input_text)
def is_profane(self, input_text):
""" Returns True if input_text contains any profane words, False otherwise. """
return self.has_bad_word(input_text)
def has_bad_word_nospace(self, input_text):
""" Returns True if text contains profanity, False otherwise """
bad_words = self.get_profane_words()
false_positives = self.get_false_positives()
profanity_count = 0
input_text_lowered = input_text.lower()
for word in bad_words:
if re.search(word, input_text_lowered):
profanity_count = profanity_count + 1
for word in false_positives:
if re.search(word, input_text):
profanity_count = profanity_count - 1
""" what this is doing is making removing any counts for false
positives, e.g. basement would trigger the counter to go up
(ba semen t) so the check would then find the word basement and
reduce the count back down """
if profanity_count > 0:
return True
elif profanity_count <= 0:
return False
def is_profane_nospace(self, input_text):
""" same function as is profane, but works with unspaced text """
return self.has_bad_word_nospace(input_text)
| <filename>extras.py
import re
import os
import inflection
class ProfanityFilter:
def __init__(self, **kwargs):
# If defined, use this instead of _censor_list
self._custom_censor_list = kwargs.get('custom_censor_list', [])
# Words to be used in conjunction with _censor_list
self._extra_censor_list = kwargs.get('extra_censor_list', [])
# What to be censored -- should not be modified by user
self._censor_list = []
# What to censor the words with
self._censor_char = "*"
# Where to find the censored words
self._BASE_DIR = os.path.abspath(os.path.dirname(__file__))
self._words_file = os.path.join(self._BASE_DIR, 'wordlist.txt')
self._false_file = os.path.join(self._BASE_DIR, 'goodlist.txt')
self._load_words()
self._load_false_positives()
def _load_words(self):
""" Loads the list of profane words from file. """
with open(self._words_file, 'r') as f:
self._censor_list = [line.strip() for line in f.readlines()]
def _load_false_positives(self):
""" Loads the list of false positive words from file. """
with open(self._false_file, 'r') as f:
self._false_list = [line.strip() for line in f.readlines()]
def define_words(self, word_list):
""" Define a custom list of profane words. """
self._custom_censor_list = word_list
def append_words(self, word_list):
""" Extends the profane word list with word_list """
self._extra_censor_list.extend(word_list)
def set_censor(self, character):
""" Replaces the original censor character '*' with character """
if isinstance(character, int):
character = str(character)
self._censor_char = character
def has_bad_word(self, text):
""" Returns True if text contains profanity, False otherwise """
return self.censor(text) != text
def get_custom_censor_list(self):
""" Returns the list of custom profane words """
return self._custom_censor_list
def get_extra_censor_list(self):
""" Returns the list of custom, additional, profane words """
return self._extra_censor_list
def get_profane_words(self):
""" Gets all profane words """
profane_words = []
if self._custom_censor_list:
profane_words = [w for w in self._custom_censor_list] # Previous versions of Python don't have list.copy()
else:
profane_words = [w for w in self._censor_list]
profane_words.extend(self._extra_censor_list)
profane_words.extend([inflection.pluralize(word) for word in profane_words])
profane_words = list(set(profane_words))
return profane_words
def get_false_positives(self):
false_positives = [w for w in self._false_list]
return false_positives
def restore_words(self):
""" Clears all custom censor lists """
self._custom_censor_list = []
self._extra_censor_list = []
def censor(self, input_text):
""" Returns input_text with any profane words censored """
bad_words = self.get_profane_words()
res = input_text
for word in bad_words:
word = r'\b%s\b' % word # Apply word boundaries to the bad word
regex = re.compile(word, re.IGNORECASE)
res = regex.sub(self._censor_char * (len(word) - 4), res)
return res
def is_clean(self, input_text):
""" Returns True if input_text doesn't contain any profane words, False otherwise. """
return not self.has_bad_word(input_text)
def is_profane(self, input_text):
""" Returns True if input_text contains any profane words, False otherwise. """
return self.has_bad_word(input_text)
def has_bad_word_nospace(self, input_text):
""" Returns True if text contains profanity, False otherwise """
bad_words = self.get_profane_words()
false_positives = self.get_false_positives()
profanity_count = 0
input_text_lowered = input_text.lower()
for word in bad_words:
if re.search(word, input_text_lowered):
profanity_count = profanity_count + 1
for word in false_positives:
if re.search(word, input_text):
profanity_count = profanity_count - 1
""" what this is doing is making removing any counts for false
positives, e.g. basement would trigger the counter to go up
(ba semen t) so the check would then find the word basement and
reduce the count back down """
if profanity_count > 0:
return True
elif profanity_count <= 0:
return False
def is_profane_nospace(self, input_text):
""" same function as is profane, but works with unspaced text """
return self.has_bad_word_nospace(input_text)
| en | 0.756969 | # If defined, use this instead of _censor_list # Words to be used in conjunction with _censor_list # What to be censored -- should not be modified by user # What to censor the words with # Where to find the censored words Loads the list of profane words from file. Loads the list of false positive words from file. Define a custom list of profane words. Extends the profane word list with word_list Replaces the original censor character '*' with character Returns True if text contains profanity, False otherwise Returns the list of custom profane words Returns the list of custom, additional, profane words Gets all profane words # Previous versions of Python don't have list.copy() Clears all custom censor lists Returns input_text with any profane words censored # Apply word boundaries to the bad word Returns True if input_text doesn't contain any profane words, False otherwise. Returns True if input_text contains any profane words, False otherwise. Returns True if text contains profanity, False otherwise what this is doing is making removing any counts for false positives, e.g. basement would trigger the counter to go up (ba semen t) so the check would then find the word basement and reduce the count back down same function as is profane, but works with unspaced text | 3.134732 | 3 |
Simulation/simulation.py | the-mr-music/pcb-toaster | 1 | 6613425 | <filename>Simulation/simulation.py
#!/usr/bin/env python3
# Simple simulation of the hotplate
# Created out of curiosity whilst the pcb was in manufacturing
# Even though it is only a rough model, the results are suprisingly accurate
import matplotlib.pyplot as plt
from numpy import arange
import numpy as np
import time
#from simple_pid import PID
# d_T = Q[J] / (kg * c_alu)
c_alu = 896 # J/(kg * K)
m_iron = 0.5 # Masse in Kilo
Pmax_iron = 1250 # Power in Watts
p_step_max = 50
p_step_pwr = Pmax_iron / p_step_max
sim_seconds = 250
iron_temp = np.zeros(sim_seconds)
for i in range(0, 10):
iron_temp[i] = 20 # °C in the beginning
iron_pwr = np.zeros(sim_seconds)
ttmp = np.zeros(sim_seconds)
target_temp = 150
temp_ambient = 110
cooling_factor = 0.01 # Cooling per delta_T to ambient temp per sec
#pid = PID(1, 0.02, 0.05)
#pid.sample_time = 1
#pid.setpoint = 50
#pid.output_limits = (0, 100) #Pmax_iron)
errorSum = 0
last_error = 0
k_p = 6
k_i = 0.01
k_d = 0
k_d_plot = np.zeros(sim_seconds)
# One iteration equals one second
for i in range(10, sim_seconds-10):
ttmp[i] = target_temp
error = target_temp - iron_temp[i-1]
errorSum = errorSum + error * k_i
d_input = iron_temp[i-1] - iron_temp[i-2]
k_d_plot[i] = k_d * d_input
if errorSum > p_step_max:
errorSum = p_step_max
last_error = error
output = int(k_p * error + errorSum - k_d * d_input)
if output > p_step_max:
output = p_step_max
if output < 0:
output = 0 # We can not cool
iron_pwr[i] = output * p_step_pwr
d_T = iron_pwr[i] / ( m_iron * c_alu )
# iron_temp[i+4] = iron_temp[i-1] + d_T
iron_temp[i+10] = iron_temp[i+9] + d_T
iron_temp[i+10] = iron_temp[i+10] - cooling_factor * (iron_temp[i+10] - temp_ambient)
# print(iron_pwr[i])
#if iron_temp[i-1] > 100 and target_temp < 235:
# print("Entering Reflow Zone")
# target_temp = 235
#if i == 30:
# print("Entering Soak Zone")
# target_temp = 150
#if i == 120:
# print("Entering Reflow Zone")
# target_temp = 235
#if i == 210:
# print("Cooling down")
# target_temp = 25
print("Max iron temp: " + str(np.amax(iron_temp)))
t = arange(sim_seconds)
plt.plot(t, ttmp, 'b')
plt.plot(t, iron_temp, 'r')
plt.plot(t, iron_pwr, 'g')
plt.plot(t, k_d_plot, 'orange')
#plt.ylabel('some numbers')
plt.show()
| <filename>Simulation/simulation.py
#!/usr/bin/env python3
# Simple simulation of the hotplate
# Created out of curiosity whilst the pcb was in manufacturing
# Even though it is only a rough model, the results are suprisingly accurate
import matplotlib.pyplot as plt
from numpy import arange
import numpy as np
import time
#from simple_pid import PID
# d_T = Q[J] / (kg * c_alu)
c_alu = 896 # J/(kg * K)
m_iron = 0.5 # Masse in Kilo
Pmax_iron = 1250 # Power in Watts
p_step_max = 50
p_step_pwr = Pmax_iron / p_step_max
sim_seconds = 250
iron_temp = np.zeros(sim_seconds)
for i in range(0, 10):
iron_temp[i] = 20 # °C in the beginning
iron_pwr = np.zeros(sim_seconds)
ttmp = np.zeros(sim_seconds)
target_temp = 150
temp_ambient = 110
cooling_factor = 0.01 # Cooling per delta_T to ambient temp per sec
#pid = PID(1, 0.02, 0.05)
#pid.sample_time = 1
#pid.setpoint = 50
#pid.output_limits = (0, 100) #Pmax_iron)
errorSum = 0
last_error = 0
k_p = 6
k_i = 0.01
k_d = 0
k_d_plot = np.zeros(sim_seconds)
# One iteration equals one second
for i in range(10, sim_seconds-10):
ttmp[i] = target_temp
error = target_temp - iron_temp[i-1]
errorSum = errorSum + error * k_i
d_input = iron_temp[i-1] - iron_temp[i-2]
k_d_plot[i] = k_d * d_input
if errorSum > p_step_max:
errorSum = p_step_max
last_error = error
output = int(k_p * error + errorSum - k_d * d_input)
if output > p_step_max:
output = p_step_max
if output < 0:
output = 0 # We can not cool
iron_pwr[i] = output * p_step_pwr
d_T = iron_pwr[i] / ( m_iron * c_alu )
# iron_temp[i+4] = iron_temp[i-1] + d_T
iron_temp[i+10] = iron_temp[i+9] + d_T
iron_temp[i+10] = iron_temp[i+10] - cooling_factor * (iron_temp[i+10] - temp_ambient)
# print(iron_pwr[i])
#if iron_temp[i-1] > 100 and target_temp < 235:
# print("Entering Reflow Zone")
# target_temp = 235
#if i == 30:
# print("Entering Soak Zone")
# target_temp = 150
#if i == 120:
# print("Entering Reflow Zone")
# target_temp = 235
#if i == 210:
# print("Cooling down")
# target_temp = 25
print("Max iron temp: " + str(np.amax(iron_temp)))
t = arange(sim_seconds)
plt.plot(t, ttmp, 'b')
plt.plot(t, iron_temp, 'r')
plt.plot(t, iron_pwr, 'g')
plt.plot(t, k_d_plot, 'orange')
#plt.ylabel('some numbers')
plt.show()
| en | 0.669548 | #!/usr/bin/env python3 # Simple simulation of the hotplate # Created out of curiosity whilst the pcb was in manufacturing # Even though it is only a rough model, the results are suprisingly accurate #from simple_pid import PID # d_T = Q[J] / (kg * c_alu) # J/(kg * K) # Masse in Kilo # Power in Watts # °C in the beginning # Cooling per delta_T to ambient temp per sec #pid = PID(1, 0.02, 0.05) #pid.sample_time = 1 #pid.setpoint = 50 #pid.output_limits = (0, 100) #Pmax_iron) # One iteration equals one second # We can not cool # iron_temp[i+4] = iron_temp[i-1] + d_T # print(iron_pwr[i]) #if iron_temp[i-1] > 100 and target_temp < 235: # print("Entering Reflow Zone") # target_temp = 235 #if i == 30: # print("Entering Soak Zone") # target_temp = 150 #if i == 120: # print("Entering Reflow Zone") # target_temp = 235 #if i == 210: # print("Cooling down") # target_temp = 25 #plt.ylabel('some numbers') | 2.950969 | 3 |
simulator/web/new.py | ondiiik/meteoink | 2 | 6613426 | <reponame>ondiiik/meteoink
from config import connection, Connection
from log import dump_exception
from web import bssid2bytes
def page(web):
try:
bssid = bssid2bytes(web.args['bssid'])
for c in connection:
if c.bssid == bssid:
from .set import page
page(web)
return
connection.append(Connection(int(web.args['location']), web.args['ssid'], web.args['psw'], bssid))
Connection.flush()
except Exception as e:
dump_exception('WEB error:', e)
yield web.index
| from config import connection, Connection
from log import dump_exception
from web import bssid2bytes
def page(web):
try:
bssid = bssid2bytes(web.args['bssid'])
for c in connection:
if c.bssid == bssid:
from .set import page
page(web)
return
connection.append(Connection(int(web.args['location']), web.args['ssid'], web.args['psw'], bssid))
Connection.flush()
except Exception as e:
dump_exception('WEB error:', e)
yield web.index | none | 1 | 2.581363 | 3 | |
ginga/trcalc.py | kyraikeda/ginga | 76 | 6613427 | #
# trcalc.py -- transformation calculations for image data
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import math
import numpy as np
interpolation_methods = ['basic']
_use = None
def use(pkgname):
global _use
if pkgname == 'opencv':
_use = 'opencv'
elif pkgname == 'pillow':
_use = 'pillow'
have_opencv = False
try:
# optional opencv package speeds up certain operations, especially
# rotation
import cv2
cv2_resize = {
'nearest': cv2.INTER_NEAREST,
'linear': cv2.INTER_LINEAR,
'area': cv2.INTER_AREA,
'bicubic': cv2.INTER_CUBIC,
'lanczos': cv2.INTER_LANCZOS4,
}
have_opencv = True
if 'nearest' not in interpolation_methods:
interpolation_methods = list(set(['basic'] +
list(cv2_resize.keys())))
interpolation_methods.sort()
except ImportError:
pass
have_pillow = False
try:
# do we have Python Imaging Library available?
import PIL.Image as PILimage
pil_resize = {
'nearest': PILimage.NEAREST,
'linear': PILimage.BILINEAR,
'area': PILimage.HAMMING,
'bicubic': PILimage.BICUBIC,
'lanczos': PILimage.LANCZOS,
}
have_pillow = True
if 'nearest' not in interpolation_methods:
interpolation_methods = list(set(['basic'] +
list(pil_resize.keys())))
interpolation_methods.sort()
except ImportError:
pass
# For testing
#have_opencv = False
#have_pillow = False
def get_center(data_np):
ht, wd = data_np.shape[:2]
ctr_x = int(wd // 2)
ctr_y = int(ht // 2)
## ctr_x = wd * 0.5
## ctr_y = ht * 0.5
return (ctr_x, ctr_y)
def rotate_pt(x_arr, y_arr, theta_deg, xoff=0, yoff=0):
"""
Rotate an array of points (x_arr, y_arr) by theta_deg offsetted
from a center point by (xoff, yoff).
"""
# TODO: use opencv acceleration if available
a_arr = x_arr - xoff
b_arr = y_arr - yoff
cos_t = np.cos(np.radians(theta_deg))
sin_t = np.sin(np.radians(theta_deg))
ap = (a_arr * cos_t) - (b_arr * sin_t)
bp = (a_arr * sin_t) + (b_arr * cos_t)
return np.asarray((ap + xoff, bp + yoff))
rotate_arr = rotate_pt
def rotate_coord(coord, thetas, offsets):
arr_t = np.asarray(coord).T
# TODO: handle dimensional rotation N>2
arr = rotate_pt(arr_t[0], arr_t[1], thetas[0],
xoff=offsets[0], yoff=offsets[1])
if len(arr_t) > 2:
# just copy unrotated Z coords
arr = np.asarray([arr[0], arr[1]] + list(arr_t[2:]))
return arr.T
def rotate_clip(data_np, theta_deg, rotctr_x=None, rotctr_y=None,
out=None, logger=None):
"""
Rotate numpy array `data_np` by `theta_deg` around rotation center
(rotctr_x, rotctr_y). If the rotation center is omitted it defaults
to the center of the array.
No adjustment is done to the data array beforehand, so the result will
be clipped according to the size of the array (the output array will be
the same size as the input array).
"""
# If there is no rotation, then we are done
if math.fmod(theta_deg, 360.0) == 0.0:
return data_np
ht, wd = data_np.shape[:2]
dtype = data_np.dtype
if rotctr_x is None:
rotctr_x = wd // 2
if rotctr_y is None:
rotctr_y = ht // 2
if dtype == np.uint8 and have_opencv and _use in (None, 'opencv'):
if logger is not None:
logger.debug("rotating with OpenCv")
# opencv is fastest
M = cv2.getRotationMatrix2D((rotctr_x, rotctr_y), theta_deg, 1)
newdata = cv2.warpAffine(data_np, M, (wd, ht))
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
newdata = newdata.astype(dtype, copy=False)
if out is not None:
out[:, :, ...] = newdata
newdata = out
elif dtype == np.uint8 and have_pillow and _use in (None, 'pillow'):
if logger is not None:
logger.debug("rotating with pillow")
img = PILimage.fromarray(data_np)
img_rot = img.rotate(theta_deg, resample=False, expand=False,
center=(rotctr_x, rotctr_y))
newdata = np.array(img_rot, dtype=data_np.dtype)
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
else:
if logger is not None:
logger.debug("rotating with numpy")
yi, xi = np.mgrid[0:ht, 0:wd]
xi -= rotctr_x
yi -= rotctr_y
cos_t = np.cos(np.radians(theta_deg))
sin_t = np.sin(np.radians(theta_deg))
ap = (xi * cos_t) - (yi * sin_t) + rotctr_x
bp = (xi * sin_t) + (yi * cos_t) + rotctr_y
#ap = np.rint(ap).clip(0, wd-1).astype(int)
#bp = np.rint(bp).clip(0, ht-1).astype(int)
# Optomizations to reuse existing intermediate arrays
np.rint(ap, out=ap)
ap = ap.astype(int, copy=False)
ap.clip(0, wd - 1, out=ap)
np.rint(bp, out=bp)
bp = bp.astype(int, copy=False)
bp.clip(0, ht - 1, out=bp)
if out is not None:
out[:, :, ...] = data_np[bp, ap]
newdata = out
else:
newdata = data_np[bp, ap]
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
return newdata
def rotate(data_np, theta_deg, rotctr_x=None, rotctr_y=None, pad=20,
logger=None):
# If there is no rotation, then we are done
if math.fmod(theta_deg, 360.0) == 0.0:
return data_np
ht, wd = data_np.shape[:2]
ocx, ocy = wd // 2, ht // 2
# Make a square with room to rotate
side = int(math.sqrt(wd**2 + ht**2) + pad)
new_wd = new_ht = side
dims = (new_ht, new_wd) + data_np.shape[2:]
# Find center of new data array
ncx, ncy = new_wd // 2, new_ht // 2
# Overlay the old image on the new (blank) image
ldx, rdx = min(ocx, ncx), min(wd - ocx, ncx)
bdy, tdy = min(ocy, ncy), min(ht - ocy, ncy)
# TODO: fill with a different value?
newdata = np.zeros(dims, dtype=data_np.dtype)
newdata[ncy - bdy:ncy + tdy, ncx - ldx:ncx + rdx] = \
data_np[ocy - bdy:ocy + tdy, ocx - ldx:ocx + rdx]
# Now rotate with clip as usual
newdata = rotate_clip(newdata, theta_deg,
rotctr_x=rotctr_x, rotctr_y=rotctr_y,
out=newdata)
return newdata
def get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht):
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# calculate dimensions of NON-scaled cutout
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
if new_wd == 0:
iscale_x = 0.0
else:
iscale_x = float(old_wd) / float(new_wd)
if new_ht == 0:
iscale_y = 0.0
else:
iscale_y = float(old_ht) / float(new_ht)
max_x, max_y = shp[1] - 1, shp[0] - 1
# Make indexes and scale them
# Is there a more efficient way to do this?
xi = np.clip(x1 + np.arange(0, new_wd) * iscale_x,
0, max_x).astype(int, copy=False)
yi = np.clip(y1 + np.arange(0, new_ht) * iscale_y,
0, max_y).astype(int, copy=False)
wd, ht = xi.size, yi.size
# bounds check against shape (to protect future data access)
if new_wd > 0:
xi_max = xi[-1]
if xi_max > max_x:
raise ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x))
if new_ht > 0:
yi_max = yi[-1]
if yi_max > max_y:
raise ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y))
view = np.ix_(yi, xi)
# Calculate actual scale used (vs. desired)
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
# return view + actual scale factors used
return (view, (scale_x, scale_y))
def get_scaled_cutout_wdhtdp_view(shp, p1, p2, new_dims):
"""
Like get_scaled_cutout_wdht, but returns the view/slice to extract
from an image instead of the extraction itself.
"""
x1, y1, z1 = p1
x2, y2, z2 = p2
new_wd, new_ht, new_dp = new_dims
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
z1, z2, new_wd, new_ht = int(z1), int(z2), int(new_wd), int(new_ht)
# calculate dimensions of NON-scaled cutout
old_wd = max(x2 - x1 + 1, 1)
old_ht = max(y2 - y1 + 1, 1)
old_dp = max(z2 - z1 + 1, 1)
max_x, max_y, max_z = shp[1] - 1, shp[0] - 1, shp[2] - 1
# Make indexes and scale them
# Is there a more efficient way to do this?
if new_wd == 0:
iscale_x = 0.0
else:
iscale_x = float(old_wd) / float(new_wd)
if new_ht == 0:
iscale_y = 0.0
else:
iscale_y = float(old_ht) / float(new_ht)
if new_dp == 0:
iscale_z = 0.0
else:
iscale_z = float(old_dp) / float(new_dp)
xi = np.clip(x1 + np.arange(0, new_wd) * iscale_x,
0, max_x).astype(int, copy=False)
yi = np.clip(y1 + np.arange(0, new_ht) * iscale_y,
0, max_y).astype(int, copy=False)
zi = np.clip(z1 + np.arange(0, new_dp) * iscale_z,
0, max_z).astype(int, copy=False)
wd, ht, dp = xi.size, yi.size, zi.size
# bounds check against shape (to protect future data access)
if new_wd > 0:
xi_max = xi[-1]
if xi_max > max_x:
raise ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x))
if new_ht > 0:
yi_max = yi[-1]
if yi_max > max_y:
raise ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y))
if new_dp > 0:
zi_max = zi[-1]
if zi_max > max_z:
raise ValueError("Z index (%d) exceeds shape bounds (%d)" % (zi_max, max_z))
view = np.ix_(yi, xi, zi)
# Calculate actual scale used (vs. desired)
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
scale_z = float(dp) / old_dp
# return view + actual scale factors used
return (view, (scale_x, scale_y, scale_z))
def get_scaled_cutout_wdht(data_np, x1, y1, x2, y2, new_wd, new_ht,
interpolation='basic', logger=None,
dtype=None):
"""Extract a region of the `data_np` defined by corners (x1, y1) and
(x2, y2) and resample it to fit dimensions (new_wd, new_ht).
`interpolation` describes the method of interpolation used, where the
default "basic" is nearest neighbor. If `logger` is not `None` it will
be used for logging messages. If `dtype` is defined then the output
array will be converted to that type; the default is the same as the
input type.
"""
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
new_wd, new_ht = int(new_wd), int(new_ht)
rdim = data_np.shape[2:]
open_cl_ok = (len(rdim) == 0 or (len(rdim) == 1 and rdim[0] == 4))
if dtype is None:
dtype = data_np.dtype
if have_opencv and _use in (None, 'opencv'):
if logger is not None:
logger.debug("resizing with OpenCv")
# opencv is fastest and supports many methods
if interpolation == 'basic':
interpolation = 'nearest'
method = cv2_resize[interpolation]
cutout = data_np[y1:y2 + 1, x1:x2 + 1]
if cutout.dtype not in (np.uint8, np.uint16):
# special hack for OpenCv resize on certain numpy array types
cutout = cutout.astype(np.float64)
newdata = cv2.resize(cutout, (new_wd, new_ht),
interpolation=method)
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif data_np.dtype == np.uint8 and have_pillow and _use in (None, 'pillow'):
if logger is not None:
logger.info("resizing with pillow")
if interpolation == 'basic':
interpolation = 'nearest'
method = pil_resize[interpolation]
img = PILimage.fromarray(data_np[y1:y2 + 1, x1:x2 + 1])
img_siz = img.resize((new_wd, new_ht), resample=method)
newdata = np.array(img_siz, dtype=dtype)
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif interpolation not in ('basic', 'nearest'):
raise ValueError("Interpolation method not supported: '%s'" % (
interpolation))
else:
if logger is not None:
logger.debug('resizing by slicing')
view, (scale_x, scale_y) = get_scaled_cutout_wdht_view(data_np.shape,
x1, y1, x2, y2,
new_wd, new_ht)
newdata = data_np[view]
newdata = newdata.astype(dtype, copy=False)
return newdata, (scale_x, scale_y)
def get_scaled_cutout_wdhtdp(data_np, p1, p2, new_dims, logger=None):
if logger is not None:
logger.debug('resizing by slicing')
view, scales = get_scaled_cutout_wdhtdp_view(data_np.shape,
p1, p2, new_dims)
newdata = data_np[view]
return newdata, scales
def get_scaled_cutout_basic_view(shp, p1, p2, scales):
"""
Like get_scaled_cutout_basic, but returns the view/slice to extract
from an image, instead of the extraction itself
"""
x1, y1, x2, y2 = int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])
scale_x, scale_y = scales[:2]
# calculate dimensions of NON-scaled cutout
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
new_wd, new_ht = int(scale_x * old_wd), int(scale_y * old_ht)
if len(scales) == 2:
return get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht)
z1, z2, scale_z = p1[2], p2[2], scales[2]
old_dp = max(z2 - z1 + 1, 1)
new_dp = int(scale_z * old_dp)
return get_scaled_cutout_wdhtdp_view(shp, p1, p2, (new_wd, new_ht, new_dp))
def get_scaled_cutout_basic(data_np, x1, y1, x2, y2, scale_x, scale_y,
interpolation='basic', logger=None,
dtype=None):
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
rdim = data_np.shape[2:]
open_cl_ok = (len(rdim) == 0 or (len(rdim) == 1 and rdim[0] == 4))
if dtype is None:
dtype = data_np.dtype
if have_opencv and _use in (None, 'opencv'):
if logger is not None:
logger.debug("resizing with OpenCv")
# opencv is fastest
if interpolation == 'basic':
interpolation = 'nearest'
method = cv2_resize[interpolation]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cutout = data_np[y1:y2 + 1, x1:x2 + 1]
if cutout.dtype not in (np.uint8, np.uint16):
# special hack for OpenCv resize on certain numpy array types
cutout = cutout.astype(np.float64)
newdata = cv2.resize(cutout, None,
fx=scale_x, fy=scale_y,
interpolation=method)
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif data_np.dtype == np.uint8 and have_pillow and _use in (None, 'pillow'):
if logger is not None:
logger.info("resizing with pillow")
if interpolation == 'basic':
interpolation = 'nearest'
method = pil_resize[interpolation]
img = PILimage.fromarray(data_np[y1:y2 + 1, x1:x2 + 1])
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
new_wd, new_ht = int(scale_x * old_wd), int(scale_y * old_ht)
img_siz = img.resize((new_wd, new_ht), resample=method)
newdata = np.array(img_siz, dtype=dtype)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif interpolation not in ('basic', 'nearest'):
raise ValueError("Interpolation method not supported: '%s'" % (
interpolation))
else:
if logger is not None:
logger.debug('resizing by slicing')
view, scales = get_scaled_cutout_basic_view(data_np.shape,
(x1, y1), (x2, y2),
(scale_x, scale_y))
scale_x, scale_y = scales
newdata = data_np[view]
newdata = newdata.astype(dtype, copy=False)
return newdata, (scale_x, scale_y)
def get_scaled_cutout_basic2(data_np, p1, p2, scales,
interpolation='basic', logger=None):
if interpolation not in ('basic', 'view'):
if len(scales) != 2:
raise ValueError("Interpolation method not supported: '%s'" % (
interpolation))
return get_scaled_cutout_basic(data_np, p1[0], p1[1],
p2[0], p2[1],
scales[0], scales[1],
interpolation=interpolation,
logger=logger)
if logger is not None:
logger.debug('resizing by slicing')
view, oscales = get_scaled_cutout_basic_view(data_np.shape,
p1, p2, scales)
newdata = data_np[view]
return newdata, oscales
def transform(data_np, flip_x=False, flip_y=False, swap_xy=False):
# Do transforms as necessary
if flip_y:
data_np = np.flipud(data_np)
if flip_x:
data_np = np.fliplr(data_np)
if swap_xy:
data_np = data_np.swapaxes(0, 1)
return data_np
def calc_image_merge_clip(p1, p2, dst, q1, q2):
"""
p1 (x1, y1, z1) and p2 (x2, y2, z2) define the extent of the (non-scaled)
data shown. The image, defined by region q1, q2 is to be placed at dst
in the image (destination may be outside of the actual data array).
Refines the modified points (q1', q2') defining the clipped rectangle
needed to be cut from the source array and scaled.
"""
x1, y1 = p1[:2]
x2, y2 = p2[:2]
dst_x, dst_y = dst[:2]
a1, b1 = q1[:2]
a2, b2 = q2[:2]
src_wd, src_ht = a2 - a1, b2 - b1
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
ex = y1 - dst_y
if ex > 0:
src_ht -= ex
dst_y += ex
b1 += ex
ex = x1 - dst_x
if ex > 0:
src_wd -= ex
dst_x += ex
a1 += ex
# Trim off parts of srcarr that would be "hidden"
# to the right and below dstarr edge.
ex = dst_y + src_ht - y2
if ex > 0:
src_ht -= ex
b2 -= ex
ex = dst_x + src_wd - x2
if ex > 0:
src_wd -= ex
a2 -= ex
if len(p1) > 2:
# 3D image
z1, z2, dst_z, c1, c2 = p1[2], p2[2], dst[2], q1[2], q2[2]
src_dp = c2 - c1
ex = z1 - dst_z
if ex > 0:
src_dp -= ex
dst_z += ex
c1 += ex
ex = dst_z + src_dp - z2
if ex > 0:
src_dp -= ex
c2 -= ex
return ((dst_x, dst_y, dst_z), (a1, b1, c1), (a2, b2, c2))
else:
return ((dst_x, dst_y), (a1, b1), (a2, b2))
def overlay_image_2d_pil(dstarr, pos, srcarr, dst_order='RGBA',
src_order='RGBA',
alpha=1.0, copy=False, fill=False, flipy=False):
dst_x, dst_y = int(round(pos[0])), int(round(pos[1]))
if flipy:
srcarr = np.flipud(srcarr)
if dst_order != src_order:
srcarr = reorder_image(dst_order, srcarr, src_order)
img_dst = PILimage.fromarray(dstarr)
img_src = PILimage.fromarray(srcarr)
mask = img_src
if 'A' not in src_order:
mask = None
img_dst.paste(img_src, (dst_x, dst_y), mask=mask)
res_arr = np.array(img_dst, dtype=dstarr.dtype)
if copy:
return res_arr
dstarr[:, :, :] = res_arr
def overlay_image_2d_np(dstarr, pos, srcarr, dst_order='RGBA',
src_order='RGBA',
alpha=1.0, copy=False, fill=False, flipy=False):
dst_ht, dst_wd, dst_ch = dstarr.shape
dst_type = dstarr.dtype
dst_max_val = np.iinfo(dst_type).max
src_ht, src_wd, src_ch = srcarr.shape
src_type = srcarr.dtype
src_max_val = np.iinfo(src_type).max
dst_x, dst_y = int(round(pos[0])), int(round(pos[1]))
if flipy:
srcarr = np.flipud(srcarr)
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
if dst_y < 0:
dy = abs(dst_y)
srcarr = srcarr[dy:, :, :]
src_ht -= dy
dst_y = 0
if dst_x < 0:
dx = abs(dst_x)
srcarr = srcarr[:, dx:, :]
src_wd -= dx
dst_x = 0
# Trim off parts of srcarr that would be "hidden"
# to the right and below the dstarr edge.
ex = dst_y + src_ht - dst_ht
if ex > 0:
srcarr = srcarr[:dst_ht, :, :]
src_ht -= ex
ex = dst_x + src_wd - dst_wd
if ex > 0:
srcarr = srcarr[:, :dst_wd, :]
src_wd -= ex
if src_wd <= 0 or src_ht <= 0:
# nothing to do
return dstarr
if copy:
dstarr = np.copy(dstarr, order='C')
da_idx = -1
slc = slice(0, 3)
if 'A' in dst_order:
da_idx = dst_order.index('A')
# Currently we assume that alpha channel is in position 0 or 3 in dstarr
if da_idx == 0:
slc = slice(1, 4)
elif da_idx != 3:
raise ValueError("Alpha channel not in expected position (0 or 4) in dstarr")
# fill alpha channel in destination in the area we will be dropping
# the image
if fill and (da_idx >= 0):
dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd, da_idx] = dst_max_val
# if overlay source contains an alpha channel, extract it
# and use it, otherwise use scalar keyword parameter
if (src_ch > 3) and ('A' in src_order):
sa_idx = src_order.index('A')
alpha = srcarr[:src_ht, :src_wd, sa_idx]
if np.all(np.isclose(alpha, src_max_val)):
# optimization to avoid blending if all alpha elements are max
alpha = 1.0
else:
alpha = alpha / float(src_max_val)
alpha = np.dstack((alpha, alpha, alpha))
# reorder srcarr if necessary to match dstarr for alpha merge
get_order = dst_order
if ('A' in dst_order) and ('A' not in src_order):
get_order = dst_order.replace('A', '')
if get_order != src_order:
srcarr = reorder_image(get_order, srcarr, src_order)
# define the two subarrays we are blending
_dst = dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd, slc]
_src = srcarr[:src_ht, :src_wd, slc]
if np.isscalar(alpha) and alpha == 1.0:
# optimization to avoid alpha blending
# Place our srcarr into this dstarr at dst offsets
_dst[:, :, :] = _src
else:
# calculate alpha blending
# Co = CaAa + CbAb(1 - Aa)
_dst[:, :, :] = (alpha * _src) + (1.0 - alpha) * _dst
return dstarr
def overlay_image_2d(dstarr, pos, srcarr, dst_order='RGBA',
src_order='RGBA',
alpha=1.0, copy=False, fill=False, flipy=False):
# NOTE: not tested yet thoroughly enough to use
# if have_pillow:
# return overlay_image_2d_pil(dstarr, pos, srcarr, dst_order=dst_order,
# src_order=src_order, alpha=alpha,
# copy=copy, fill=fill, flipy=flipy)
return overlay_image_2d_np(dstarr, pos, srcarr, dst_order=dst_order,
src_order=src_order, alpha=alpha,
copy=copy, fill=fill, flipy=flipy)
def overlay_image_3d(dstarr, pos, srcarr, dst_order='RGBA', src_order='RGBA',
alpha=1.0, copy=False, fill=True, flipy=False):
dst_x, dst_y, dst_z = [int(round(pos[n])) for n in range(3)]
dst_ht, dst_wd, dst_dp, dst_ch = dstarr.shape
dst_type = dstarr.dtype
dst_max_val = np.iinfo(dst_type).max
src_ht, src_wd, src_dp, src_ch = srcarr.shape
src_type = srcarr.dtype
src_max_val = np.iinfo(src_type).max
if flipy:
srcarr = np.flipud(srcarr)
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
if dst_y < 0:
dy = abs(dst_y)
srcarr = srcarr[dy:, :, :, :]
src_ht -= dy
dst_y = 0
if dst_x < 0:
dx = abs(dst_x)
srcarr = srcarr[:, dx:, :, :]
src_wd -= dx
dst_x = 0
if dst_z < 0:
dz = abs(dst_z)
srcarr = srcarr[:, :, dz:, :]
src_dp -= dz
dst_z = 0
# Trim off parts of srcarr that would be "hidden"
# to the right and below the dstarr edge.
ex = dst_y + src_ht - dst_ht
if ex > 0:
srcarr = srcarr[:dst_ht, :, :, :]
src_ht -= ex
ex = dst_x + src_wd - dst_wd
if ex > 0:
srcarr = srcarr[:, :dst_wd, :, :]
src_wd -= ex
ex = dst_z + src_dp - dst_dp
if ex > 0:
srcarr = srcarr[:, :, :dst_dp, :]
src_dp -= ex
if src_wd <= 0 or src_ht <= 0 or src_dp <= 0:
# nothing to do
return dstarr
if copy:
dstarr = np.copy(dstarr, order='C')
da_idx = -1
slc = slice(0, 3)
if 'A' in dst_order:
da_idx = dst_order.index('A')
# Currently we assume that alpha channel is in position 0 or 3 in dstarr
if da_idx == 0:
slc = slice(1, 4)
elif da_idx != 3:
raise ValueError("Alpha channel not in expected position (0 or 4) in dstarr")
# fill alpha channel in destination in the area we will be dropping
# the image
if fill and (da_idx >= 0):
dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd,
dst_z:dst_z + src_dp, da_idx] = dst_max_val
# if overlay source contains an alpha channel, extract it
# and use it, otherwise use scalar keyword parameter
if (src_ch > 3) and ('A' in src_order):
sa_idx = src_order.index('A')
alpha = srcarr[:src_ht, :src_wd, :src_dp, sa_idx]
if np.all(np.isclose(alpha, src_max_val)):
# optimization to avoid blending if all alpha elements are max
alpha = 1.0
else:
alpha = srcarr[0:src_ht, 0:src_wd, 0:src_dp, sa_idx] / float(src_max_val)
alpha = np.concatenate([alpha[..., np.newaxis],
alpha[..., np.newaxis],
alpha[..., np.newaxis]],
axis=-1)
# reorder srcarr if necessary to match dstarr for alpha merge
get_order = dst_order
if ('A' in dst_order) and not ('A' in src_order):
get_order = dst_order.replace('A', '')
if get_order != src_order:
srcarr = reorder_image(get_order, srcarr, src_order)
# define the two subarrays we are blending
_dst = dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd,
dst_z:dst_z + src_dp, slc]
_src = srcarr[:src_ht, :src_wd, :src_dp, slc]
if np.isscalar(alpha) and alpha == 1.0:
# optimization to avoid alpha blending
# Place our srcarr into this dstarr at dst offsets
_dst[:, :, :, :] = _src
else:
# calculate alpha blending
# Co = CaAa + CbAb(1 - Aa)
_dst[:, :, :, :] = (alpha * _src) + (1.0 - alpha) * _dst
return dstarr
def overlay_image(dstarr, pos, srcarr, **kwargs):
method = overlay_image_2d
if len(srcarr.shape) > 3:
method = overlay_image_3d
return method(dstarr, pos, srcarr, **kwargs)
def reorder_image(dst_order, src_arr, src_order):
"""Reorder src_arr, with order of color planes in src_order, as
dst_order.
"""
depth = src_arr.shape[2]
if depth != len(src_order):
if len(dst_order.replace('A', '')) != len(src_order.replace('A', '')):
raise ValueError("src_order (%s) does not match array depth (%d)" % (
src_order, depth))
bands = []
if dst_order == src_order:
return np.ascontiguousarray(src_arr)
missing = set(dst_order) - set(src_order)
if len(missing) == 0:
# <-- we don't have to add an alpha plane, just create a new view
idx = np.array([src_order.index(c) for c in dst_order])
return np.ascontiguousarray(src_arr[..., idx])
if missing != set(['A']):
missing = list(missing - set(['A']))
raise ValueError("source array missing channels ({}) needed in "
"destination array ({})".format(src_order, dst_order))
# <-- dst order requires missing alpha channel
indexes = [src_order.index(c) for c in dst_order.replace('A', '')]
bands = [src_arr[..., idx, np.newaxis] for idx in indexes]
ht, wd = src_arr.shape[:2]
dst_type = src_arr.dtype
dst_max_val = np.iinfo(dst_type).max
alpha = np.full((ht, wd, 1), dst_max_val, dtype=dst_type)
bands.insert(dst_order.index('A'), alpha)
return np.concatenate(bands, axis=-1)
def strip_z(pts):
"""Strips a Z component from `pts` if it is present."""
pts = np.asarray(pts)
if pts.shape[-1] > 2:
pts = np.asarray((pts.T[0], pts.T[1])).T
return pts
def pad_z(pts, value=0.0, dtype=np.float32):
"""Adds a Z component from `pts` if it is missing.
The value defaults to `value` (0.0)"""
pts = np.asarray(pts, dtype=dtype)
if pts.shape[-1] < 3:
if len(pts.shape) < 2:
return np.asarray((pts[0], pts[1], value), dtype=pts.dtype)
pad_col = np.full(len(pts), value, dtype=pts.dtype)
pts = np.asarray((pts.T[0], pts.T[1], pad_col)).T
return pts
def get_bounds(pts):
"""Return the minimum point and maximum point bounding a
set of points."""
pts_t = np.asarray(pts).T
return np.asarray(([np.min(_pts) for _pts in pts_t],
[np.max(_pts) for _pts in pts_t]))
def sort_xy(x1, y1, x2, y2):
"""Sort a set of bounding box parameters."""
pmn, pmx = get_bounds(((x1, y1), (x2, y2)))
return (pmn[0], pmn[1], pmx[0], pmx[1])
def fill_array(dstarr, order, r, g, b, a):
"""Fill array dstarr with a color value. order defines the color planes
in the array. (r, g, b, a) are expected to be in the range 0..1 and
are scaled to the appropriate values.
dstarr can be a 2D or 3D array.
"""
# TODO: can we make this more efficient?
dtype = dstarr.dtype
maxv = np.iinfo(dtype).max
bgval = dict(A=int(maxv * a), R=int(maxv * r), G=int(maxv * g),
B=int(maxv * b))
bgtup = tuple([bgval[order[i]] for i in range(len(order))])
if dtype == np.uint8 and len(bgtup) == 4:
# optimiztion
bgtup = np.array(bgtup, dtype=dtype).view(np.uint32)[0]
dstarr = dstarr.view(np.uint32)
dstarr[..., :] = bgtup
def make_filled_array(shp, dtype, order, r, g, b, a):
"""Return a filled array with a color value. order defines the color
planes in the array. (r, g, b, a) are expected to be in the range
0..1 and are scaled to the appropriate values.
shp can define a 2D or 3D array.
"""
# TODO: can we make this more efficient?
maxv = np.iinfo(dtype).max
bgval = dict(A=int(maxv * a), R=int(maxv * r), G=int(maxv * g),
B=int(maxv * b))
bgtup = tuple([bgval[order[i]] for i in range(len(order))])
if dtype == np.uint8 and len(bgtup) == 4:
# optimization when dealing with 32-bit RGBA arrays
fill_val = np.array(bgtup, dtype=dtype).view(np.uint32)
rgba = np.zeros(shp, dtype=dtype)
rgba_i = rgba.view(np.uint32)
rgba_i[:] = fill_val
return rgba
return np.full(shp, bgtup, dtype=dtype)
def add_alpha(arr, alpha=None):
"""Takes an array and adds an alpha layer to it if it doesn't already
exist."""
if len(arr.shape) == 2:
arr = arr[..., np.newaxis]
if arr.shape[2] in (1, 3):
a_arr = np.zeros(arr.shape[:2], dtype=arr.dtype)
if alpha is not None:
a_arr[:, :] = alpha
arr = np.dstack((arr, a_arr))
return arr
def get_minmax_dtype(dtype):
if issubclass(dtype.type, np.integer):
info = np.iinfo(dtype)
else:
info = np.finfo(dtype)
return info.min, info.max
def check_native_byteorder(data_np):
dt = str(data_np.dtype)
return ((dt.startswith('>') and sys.byteorder == 'little') or
(dt.startswith('<') and sys.byteorder == 'big'))
def cutout_data(data, x1, y1, x2, y2, xstep=1, ystep=1, z=None,
astype=None):
"""Cut out data area based on bounded coordinates.
Parameters
----------
x1, y1 : int
Coordinates defining the minimum corner to be cut out
x2, y2 : int
Coordinates *one greater* than the maximum corner
xstep, ystep : int
Step values for skip intervals in the cutout region
z : int
Value for a depth (slice) component for color images
astype :
Note that the coordinates for `x2`, `y2` are *outside* the
cutout region, similar to slicing parameters in Python.
"""
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data_np = data[view]
if z is not None and len(data_np.shape) > 2:
data_np = data_np[..., z]
if astype:
data_np = data_np.astype(astype, copy=False)
return data_np
def cutout_adjust(data, x1, y1, x2, y2, xstep=1, ystep=1, z=0, astype=None):
"""Like `cutout_data`, but adjusts coordinates `x1`, `y1`, `x2`, `y2`
to be inside the data area if they are not already. It tries to
preserve the width and height of the region, so e.g. (-2, -2, 5, 5)
could become (0, 0, 7, 7)
"""
height, width = data.shape[:2]
dx = x2 - x1
dy = y2 - y1
if x1 < 0:
x1, x2 = 0, dx
else:
if x2 >= width:
x2 = width
x1 = x2 - dx
if y1 < 0:
y1, y2 = 0, dy
else:
if y2 >= height:
y2 = height
y1 = y2 - dy
data = cutout_data(data, x1, y1, x2, y2, xstep=xstep, ystep=ystep,
z=z, astype=astype)
return (data, x1, y1, x2, y2)
def cutout_radius(data, x, y, radius, xstep=1, ystep=1, astype=None):
return cutout_adjust(data, x - radius, y - radius,
x + radius + 1, y + radius + 1,
xstep=xstep, ystep=ystep, astype=astype)
def guess_order(shape):
if len(shape) <= 2:
order = 'M'
else:
depth = shape[-1]
if depth == 1:
order = 'M'
elif depth == 2:
order = 'MA'
elif depth == 3:
order = 'RGB'
elif depth == 4:
order = 'RGBA'
return order
def get_aspect(shape):
return shape[1] / shape[0]
def calc_aspect_str(wd, ht):
# calculate the aspect ratio given by width and height and make
# string of the form "x:y"
gcd = np.gcd(wd, ht)
_wd, _ht = int(wd / gcd), int(ht / gcd)
_as = str(_wd) + ':' + str(_ht)
return _as
| #
# trcalc.py -- transformation calculations for image data
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import math
import numpy as np
interpolation_methods = ['basic']
_use = None
def use(pkgname):
global _use
if pkgname == 'opencv':
_use = 'opencv'
elif pkgname == 'pillow':
_use = 'pillow'
have_opencv = False
try:
# optional opencv package speeds up certain operations, especially
# rotation
import cv2
cv2_resize = {
'nearest': cv2.INTER_NEAREST,
'linear': cv2.INTER_LINEAR,
'area': cv2.INTER_AREA,
'bicubic': cv2.INTER_CUBIC,
'lanczos': cv2.INTER_LANCZOS4,
}
have_opencv = True
if 'nearest' not in interpolation_methods:
interpolation_methods = list(set(['basic'] +
list(cv2_resize.keys())))
interpolation_methods.sort()
except ImportError:
pass
have_pillow = False
try:
# do we have Python Imaging Library available?
import PIL.Image as PILimage
pil_resize = {
'nearest': PILimage.NEAREST,
'linear': PILimage.BILINEAR,
'area': PILimage.HAMMING,
'bicubic': PILimage.BICUBIC,
'lanczos': PILimage.LANCZOS,
}
have_pillow = True
if 'nearest' not in interpolation_methods:
interpolation_methods = list(set(['basic'] +
list(pil_resize.keys())))
interpolation_methods.sort()
except ImportError:
pass
# For testing
#have_opencv = False
#have_pillow = False
def get_center(data_np):
ht, wd = data_np.shape[:2]
ctr_x = int(wd // 2)
ctr_y = int(ht // 2)
## ctr_x = wd * 0.5
## ctr_y = ht * 0.5
return (ctr_x, ctr_y)
def rotate_pt(x_arr, y_arr, theta_deg, xoff=0, yoff=0):
"""
Rotate an array of points (x_arr, y_arr) by theta_deg offsetted
from a center point by (xoff, yoff).
"""
# TODO: use opencv acceleration if available
a_arr = x_arr - xoff
b_arr = y_arr - yoff
cos_t = np.cos(np.radians(theta_deg))
sin_t = np.sin(np.radians(theta_deg))
ap = (a_arr * cos_t) - (b_arr * sin_t)
bp = (a_arr * sin_t) + (b_arr * cos_t)
return np.asarray((ap + xoff, bp + yoff))
rotate_arr = rotate_pt
def rotate_coord(coord, thetas, offsets):
arr_t = np.asarray(coord).T
# TODO: handle dimensional rotation N>2
arr = rotate_pt(arr_t[0], arr_t[1], thetas[0],
xoff=offsets[0], yoff=offsets[1])
if len(arr_t) > 2:
# just copy unrotated Z coords
arr = np.asarray([arr[0], arr[1]] + list(arr_t[2:]))
return arr.T
def rotate_clip(data_np, theta_deg, rotctr_x=None, rotctr_y=None,
out=None, logger=None):
"""
Rotate numpy array `data_np` by `theta_deg` around rotation center
(rotctr_x, rotctr_y). If the rotation center is omitted it defaults
to the center of the array.
No adjustment is done to the data array beforehand, so the result will
be clipped according to the size of the array (the output array will be
the same size as the input array).
"""
# If there is no rotation, then we are done
if math.fmod(theta_deg, 360.0) == 0.0:
return data_np
ht, wd = data_np.shape[:2]
dtype = data_np.dtype
if rotctr_x is None:
rotctr_x = wd // 2
if rotctr_y is None:
rotctr_y = ht // 2
if dtype == np.uint8 and have_opencv and _use in (None, 'opencv'):
if logger is not None:
logger.debug("rotating with OpenCv")
# opencv is fastest
M = cv2.getRotationMatrix2D((rotctr_x, rotctr_y), theta_deg, 1)
newdata = cv2.warpAffine(data_np, M, (wd, ht))
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
newdata = newdata.astype(dtype, copy=False)
if out is not None:
out[:, :, ...] = newdata
newdata = out
elif dtype == np.uint8 and have_pillow and _use in (None, 'pillow'):
if logger is not None:
logger.debug("rotating with pillow")
img = PILimage.fromarray(data_np)
img_rot = img.rotate(theta_deg, resample=False, expand=False,
center=(rotctr_x, rotctr_y))
newdata = np.array(img_rot, dtype=data_np.dtype)
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
else:
if logger is not None:
logger.debug("rotating with numpy")
yi, xi = np.mgrid[0:ht, 0:wd]
xi -= rotctr_x
yi -= rotctr_y
cos_t = np.cos(np.radians(theta_deg))
sin_t = np.sin(np.radians(theta_deg))
ap = (xi * cos_t) - (yi * sin_t) + rotctr_x
bp = (xi * sin_t) + (yi * cos_t) + rotctr_y
#ap = np.rint(ap).clip(0, wd-1).astype(int)
#bp = np.rint(bp).clip(0, ht-1).astype(int)
# Optomizations to reuse existing intermediate arrays
np.rint(ap, out=ap)
ap = ap.astype(int, copy=False)
ap.clip(0, wd - 1, out=ap)
np.rint(bp, out=bp)
bp = bp.astype(int, copy=False)
bp.clip(0, ht - 1, out=bp)
if out is not None:
out[:, :, ...] = data_np[bp, ap]
newdata = out
else:
newdata = data_np[bp, ap]
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
return newdata
def rotate(data_np, theta_deg, rotctr_x=None, rotctr_y=None, pad=20,
logger=None):
# If there is no rotation, then we are done
if math.fmod(theta_deg, 360.0) == 0.0:
return data_np
ht, wd = data_np.shape[:2]
ocx, ocy = wd // 2, ht // 2
# Make a square with room to rotate
side = int(math.sqrt(wd**2 + ht**2) + pad)
new_wd = new_ht = side
dims = (new_ht, new_wd) + data_np.shape[2:]
# Find center of new data array
ncx, ncy = new_wd // 2, new_ht // 2
# Overlay the old image on the new (blank) image
ldx, rdx = min(ocx, ncx), min(wd - ocx, ncx)
bdy, tdy = min(ocy, ncy), min(ht - ocy, ncy)
# TODO: fill with a different value?
newdata = np.zeros(dims, dtype=data_np.dtype)
newdata[ncy - bdy:ncy + tdy, ncx - ldx:ncx + rdx] = \
data_np[ocy - bdy:ocy + tdy, ocx - ldx:ocx + rdx]
# Now rotate with clip as usual
newdata = rotate_clip(newdata, theta_deg,
rotctr_x=rotctr_x, rotctr_y=rotctr_y,
out=newdata)
return newdata
def get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht):
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# calculate dimensions of NON-scaled cutout
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
if new_wd == 0:
iscale_x = 0.0
else:
iscale_x = float(old_wd) / float(new_wd)
if new_ht == 0:
iscale_y = 0.0
else:
iscale_y = float(old_ht) / float(new_ht)
max_x, max_y = shp[1] - 1, shp[0] - 1
# Make indexes and scale them
# Is there a more efficient way to do this?
xi = np.clip(x1 + np.arange(0, new_wd) * iscale_x,
0, max_x).astype(int, copy=False)
yi = np.clip(y1 + np.arange(0, new_ht) * iscale_y,
0, max_y).astype(int, copy=False)
wd, ht = xi.size, yi.size
# bounds check against shape (to protect future data access)
if new_wd > 0:
xi_max = xi[-1]
if xi_max > max_x:
raise ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x))
if new_ht > 0:
yi_max = yi[-1]
if yi_max > max_y:
raise ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y))
view = np.ix_(yi, xi)
# Calculate actual scale used (vs. desired)
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
# return view + actual scale factors used
return (view, (scale_x, scale_y))
def get_scaled_cutout_wdhtdp_view(shp, p1, p2, new_dims):
"""
Like get_scaled_cutout_wdht, but returns the view/slice to extract
from an image instead of the extraction itself.
"""
x1, y1, z1 = p1
x2, y2, z2 = p2
new_wd, new_ht, new_dp = new_dims
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
z1, z2, new_wd, new_ht = int(z1), int(z2), int(new_wd), int(new_ht)
# calculate dimensions of NON-scaled cutout
old_wd = max(x2 - x1 + 1, 1)
old_ht = max(y2 - y1 + 1, 1)
old_dp = max(z2 - z1 + 1, 1)
max_x, max_y, max_z = shp[1] - 1, shp[0] - 1, shp[2] - 1
# Make indexes and scale them
# Is there a more efficient way to do this?
if new_wd == 0:
iscale_x = 0.0
else:
iscale_x = float(old_wd) / float(new_wd)
if new_ht == 0:
iscale_y = 0.0
else:
iscale_y = float(old_ht) / float(new_ht)
if new_dp == 0:
iscale_z = 0.0
else:
iscale_z = float(old_dp) / float(new_dp)
xi = np.clip(x1 + np.arange(0, new_wd) * iscale_x,
0, max_x).astype(int, copy=False)
yi = np.clip(y1 + np.arange(0, new_ht) * iscale_y,
0, max_y).astype(int, copy=False)
zi = np.clip(z1 + np.arange(0, new_dp) * iscale_z,
0, max_z).astype(int, copy=False)
wd, ht, dp = xi.size, yi.size, zi.size
# bounds check against shape (to protect future data access)
if new_wd > 0:
xi_max = xi[-1]
if xi_max > max_x:
raise ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x))
if new_ht > 0:
yi_max = yi[-1]
if yi_max > max_y:
raise ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y))
if new_dp > 0:
zi_max = zi[-1]
if zi_max > max_z:
raise ValueError("Z index (%d) exceeds shape bounds (%d)" % (zi_max, max_z))
view = np.ix_(yi, xi, zi)
# Calculate actual scale used (vs. desired)
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
scale_z = float(dp) / old_dp
# return view + actual scale factors used
return (view, (scale_x, scale_y, scale_z))
def get_scaled_cutout_wdht(data_np, x1, y1, x2, y2, new_wd, new_ht,
interpolation='basic', logger=None,
dtype=None):
"""Extract a region of the `data_np` defined by corners (x1, y1) and
(x2, y2) and resample it to fit dimensions (new_wd, new_ht).
`interpolation` describes the method of interpolation used, where the
default "basic" is nearest neighbor. If `logger` is not `None` it will
be used for logging messages. If `dtype` is defined then the output
array will be converted to that type; the default is the same as the
input type.
"""
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
new_wd, new_ht = int(new_wd), int(new_ht)
rdim = data_np.shape[2:]
open_cl_ok = (len(rdim) == 0 or (len(rdim) == 1 and rdim[0] == 4))
if dtype is None:
dtype = data_np.dtype
if have_opencv and _use in (None, 'opencv'):
if logger is not None:
logger.debug("resizing with OpenCv")
# opencv is fastest and supports many methods
if interpolation == 'basic':
interpolation = 'nearest'
method = cv2_resize[interpolation]
cutout = data_np[y1:y2 + 1, x1:x2 + 1]
if cutout.dtype not in (np.uint8, np.uint16):
# special hack for OpenCv resize on certain numpy array types
cutout = cutout.astype(np.float64)
newdata = cv2.resize(cutout, (new_wd, new_ht),
interpolation=method)
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif data_np.dtype == np.uint8 and have_pillow and _use in (None, 'pillow'):
if logger is not None:
logger.info("resizing with pillow")
if interpolation == 'basic':
interpolation = 'nearest'
method = pil_resize[interpolation]
img = PILimage.fromarray(data_np[y1:y2 + 1, x1:x2 + 1])
img_siz = img.resize((new_wd, new_ht), resample=method)
newdata = np.array(img_siz, dtype=dtype)
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif interpolation not in ('basic', 'nearest'):
raise ValueError("Interpolation method not supported: '%s'" % (
interpolation))
else:
if logger is not None:
logger.debug('resizing by slicing')
view, (scale_x, scale_y) = get_scaled_cutout_wdht_view(data_np.shape,
x1, y1, x2, y2,
new_wd, new_ht)
newdata = data_np[view]
newdata = newdata.astype(dtype, copy=False)
return newdata, (scale_x, scale_y)
def get_scaled_cutout_wdhtdp(data_np, p1, p2, new_dims, logger=None):
if logger is not None:
logger.debug('resizing by slicing')
view, scales = get_scaled_cutout_wdhtdp_view(data_np.shape,
p1, p2, new_dims)
newdata = data_np[view]
return newdata, scales
def get_scaled_cutout_basic_view(shp, p1, p2, scales):
"""
Like get_scaled_cutout_basic, but returns the view/slice to extract
from an image, instead of the extraction itself
"""
x1, y1, x2, y2 = int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])
scale_x, scale_y = scales[:2]
# calculate dimensions of NON-scaled cutout
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
new_wd, new_ht = int(scale_x * old_wd), int(scale_y * old_ht)
if len(scales) == 2:
return get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht)
z1, z2, scale_z = p1[2], p2[2], scales[2]
old_dp = max(z2 - z1 + 1, 1)
new_dp = int(scale_z * old_dp)
return get_scaled_cutout_wdhtdp_view(shp, p1, p2, (new_wd, new_ht, new_dp))
def get_scaled_cutout_basic(data_np, x1, y1, x2, y2, scale_x, scale_y,
interpolation='basic', logger=None,
dtype=None):
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
rdim = data_np.shape[2:]
open_cl_ok = (len(rdim) == 0 or (len(rdim) == 1 and rdim[0] == 4))
if dtype is None:
dtype = data_np.dtype
if have_opencv and _use in (None, 'opencv'):
if logger is not None:
logger.debug("resizing with OpenCv")
# opencv is fastest
if interpolation == 'basic':
interpolation = 'nearest'
method = cv2_resize[interpolation]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cutout = data_np[y1:y2 + 1, x1:x2 + 1]
if cutout.dtype not in (np.uint8, np.uint16):
# special hack for OpenCv resize on certain numpy array types
cutout = cutout.astype(np.float64)
newdata = cv2.resize(cutout, None,
fx=scale_x, fy=scale_y,
interpolation=method)
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif data_np.dtype == np.uint8 and have_pillow and _use in (None, 'pillow'):
if logger is not None:
logger.info("resizing with pillow")
if interpolation == 'basic':
interpolation = 'nearest'
method = pil_resize[interpolation]
img = PILimage.fromarray(data_np[y1:y2 + 1, x1:x2 + 1])
old_wd, old_ht = max(x2 - x1 + 1, 1), max(y2 - y1 + 1, 1)
new_wd, new_ht = int(scale_x * old_wd), int(scale_y * old_ht)
img_siz = img.resize((new_wd, new_ht), resample=method)
newdata = np.array(img_siz, dtype=dtype)
ht, wd = newdata.shape[:2]
scale_x, scale_y = float(wd) / old_wd, float(ht) / old_ht
elif interpolation not in ('basic', 'nearest'):
raise ValueError("Interpolation method not supported: '%s'" % (
interpolation))
else:
if logger is not None:
logger.debug('resizing by slicing')
view, scales = get_scaled_cutout_basic_view(data_np.shape,
(x1, y1), (x2, y2),
(scale_x, scale_y))
scale_x, scale_y = scales
newdata = data_np[view]
newdata = newdata.astype(dtype, copy=False)
return newdata, (scale_x, scale_y)
def get_scaled_cutout_basic2(data_np, p1, p2, scales,
interpolation='basic', logger=None):
if interpolation not in ('basic', 'view'):
if len(scales) != 2:
raise ValueError("Interpolation method not supported: '%s'" % (
interpolation))
return get_scaled_cutout_basic(data_np, p1[0], p1[1],
p2[0], p2[1],
scales[0], scales[1],
interpolation=interpolation,
logger=logger)
if logger is not None:
logger.debug('resizing by slicing')
view, oscales = get_scaled_cutout_basic_view(data_np.shape,
p1, p2, scales)
newdata = data_np[view]
return newdata, oscales
def transform(data_np, flip_x=False, flip_y=False, swap_xy=False):
# Do transforms as necessary
if flip_y:
data_np = np.flipud(data_np)
if flip_x:
data_np = np.fliplr(data_np)
if swap_xy:
data_np = data_np.swapaxes(0, 1)
return data_np
def calc_image_merge_clip(p1, p2, dst, q1, q2):
"""
p1 (x1, y1, z1) and p2 (x2, y2, z2) define the extent of the (non-scaled)
data shown. The image, defined by region q1, q2 is to be placed at dst
in the image (destination may be outside of the actual data array).
Refines the modified points (q1', q2') defining the clipped rectangle
needed to be cut from the source array and scaled.
"""
x1, y1 = p1[:2]
x2, y2 = p2[:2]
dst_x, dst_y = dst[:2]
a1, b1 = q1[:2]
a2, b2 = q2[:2]
src_wd, src_ht = a2 - a1, b2 - b1
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
ex = y1 - dst_y
if ex > 0:
src_ht -= ex
dst_y += ex
b1 += ex
ex = x1 - dst_x
if ex > 0:
src_wd -= ex
dst_x += ex
a1 += ex
# Trim off parts of srcarr that would be "hidden"
# to the right and below dstarr edge.
ex = dst_y + src_ht - y2
if ex > 0:
src_ht -= ex
b2 -= ex
ex = dst_x + src_wd - x2
if ex > 0:
src_wd -= ex
a2 -= ex
if len(p1) > 2:
# 3D image
z1, z2, dst_z, c1, c2 = p1[2], p2[2], dst[2], q1[2], q2[2]
src_dp = c2 - c1
ex = z1 - dst_z
if ex > 0:
src_dp -= ex
dst_z += ex
c1 += ex
ex = dst_z + src_dp - z2
if ex > 0:
src_dp -= ex
c2 -= ex
return ((dst_x, dst_y, dst_z), (a1, b1, c1), (a2, b2, c2))
else:
return ((dst_x, dst_y), (a1, b1), (a2, b2))
def overlay_image_2d_pil(dstarr, pos, srcarr, dst_order='RGBA',
src_order='RGBA',
alpha=1.0, copy=False, fill=False, flipy=False):
dst_x, dst_y = int(round(pos[0])), int(round(pos[1]))
if flipy:
srcarr = np.flipud(srcarr)
if dst_order != src_order:
srcarr = reorder_image(dst_order, srcarr, src_order)
img_dst = PILimage.fromarray(dstarr)
img_src = PILimage.fromarray(srcarr)
mask = img_src
if 'A' not in src_order:
mask = None
img_dst.paste(img_src, (dst_x, dst_y), mask=mask)
res_arr = np.array(img_dst, dtype=dstarr.dtype)
if copy:
return res_arr
dstarr[:, :, :] = res_arr
def overlay_image_2d_np(dstarr, pos, srcarr, dst_order='RGBA',
src_order='RGBA',
alpha=1.0, copy=False, fill=False, flipy=False):
dst_ht, dst_wd, dst_ch = dstarr.shape
dst_type = dstarr.dtype
dst_max_val = np.iinfo(dst_type).max
src_ht, src_wd, src_ch = srcarr.shape
src_type = srcarr.dtype
src_max_val = np.iinfo(src_type).max
dst_x, dst_y = int(round(pos[0])), int(round(pos[1]))
if flipy:
srcarr = np.flipud(srcarr)
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
if dst_y < 0:
dy = abs(dst_y)
srcarr = srcarr[dy:, :, :]
src_ht -= dy
dst_y = 0
if dst_x < 0:
dx = abs(dst_x)
srcarr = srcarr[:, dx:, :]
src_wd -= dx
dst_x = 0
# Trim off parts of srcarr that would be "hidden"
# to the right and below the dstarr edge.
ex = dst_y + src_ht - dst_ht
if ex > 0:
srcarr = srcarr[:dst_ht, :, :]
src_ht -= ex
ex = dst_x + src_wd - dst_wd
if ex > 0:
srcarr = srcarr[:, :dst_wd, :]
src_wd -= ex
if src_wd <= 0 or src_ht <= 0:
# nothing to do
return dstarr
if copy:
dstarr = np.copy(dstarr, order='C')
da_idx = -1
slc = slice(0, 3)
if 'A' in dst_order:
da_idx = dst_order.index('A')
# Currently we assume that alpha channel is in position 0 or 3 in dstarr
if da_idx == 0:
slc = slice(1, 4)
elif da_idx != 3:
raise ValueError("Alpha channel not in expected position (0 or 4) in dstarr")
# fill alpha channel in destination in the area we will be dropping
# the image
if fill and (da_idx >= 0):
dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd, da_idx] = dst_max_val
# if overlay source contains an alpha channel, extract it
# and use it, otherwise use scalar keyword parameter
if (src_ch > 3) and ('A' in src_order):
sa_idx = src_order.index('A')
alpha = srcarr[:src_ht, :src_wd, sa_idx]
if np.all(np.isclose(alpha, src_max_val)):
# optimization to avoid blending if all alpha elements are max
alpha = 1.0
else:
alpha = alpha / float(src_max_val)
alpha = np.dstack((alpha, alpha, alpha))
# reorder srcarr if necessary to match dstarr for alpha merge
get_order = dst_order
if ('A' in dst_order) and ('A' not in src_order):
get_order = dst_order.replace('A', '')
if get_order != src_order:
srcarr = reorder_image(get_order, srcarr, src_order)
# define the two subarrays we are blending
_dst = dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd, slc]
_src = srcarr[:src_ht, :src_wd, slc]
if np.isscalar(alpha) and alpha == 1.0:
# optimization to avoid alpha blending
# Place our srcarr into this dstarr at dst offsets
_dst[:, :, :] = _src
else:
# calculate alpha blending
# Co = CaAa + CbAb(1 - Aa)
_dst[:, :, :] = (alpha * _src) + (1.0 - alpha) * _dst
return dstarr
def overlay_image_2d(dstarr, pos, srcarr, dst_order='RGBA',
src_order='RGBA',
alpha=1.0, copy=False, fill=False, flipy=False):
# NOTE: not tested yet thoroughly enough to use
# if have_pillow:
# return overlay_image_2d_pil(dstarr, pos, srcarr, dst_order=dst_order,
# src_order=src_order, alpha=alpha,
# copy=copy, fill=fill, flipy=flipy)
return overlay_image_2d_np(dstarr, pos, srcarr, dst_order=dst_order,
src_order=src_order, alpha=alpha,
copy=copy, fill=fill, flipy=flipy)
def overlay_image_3d(dstarr, pos, srcarr, dst_order='RGBA', src_order='RGBA',
alpha=1.0, copy=False, fill=True, flipy=False):
dst_x, dst_y, dst_z = [int(round(pos[n])) for n in range(3)]
dst_ht, dst_wd, dst_dp, dst_ch = dstarr.shape
dst_type = dstarr.dtype
dst_max_val = np.iinfo(dst_type).max
src_ht, src_wd, src_dp, src_ch = srcarr.shape
src_type = srcarr.dtype
src_max_val = np.iinfo(src_type).max
if flipy:
srcarr = np.flipud(srcarr)
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
if dst_y < 0:
dy = abs(dst_y)
srcarr = srcarr[dy:, :, :, :]
src_ht -= dy
dst_y = 0
if dst_x < 0:
dx = abs(dst_x)
srcarr = srcarr[:, dx:, :, :]
src_wd -= dx
dst_x = 0
if dst_z < 0:
dz = abs(dst_z)
srcarr = srcarr[:, :, dz:, :]
src_dp -= dz
dst_z = 0
# Trim off parts of srcarr that would be "hidden"
# to the right and below the dstarr edge.
ex = dst_y + src_ht - dst_ht
if ex > 0:
srcarr = srcarr[:dst_ht, :, :, :]
src_ht -= ex
ex = dst_x + src_wd - dst_wd
if ex > 0:
srcarr = srcarr[:, :dst_wd, :, :]
src_wd -= ex
ex = dst_z + src_dp - dst_dp
if ex > 0:
srcarr = srcarr[:, :, :dst_dp, :]
src_dp -= ex
if src_wd <= 0 or src_ht <= 0 or src_dp <= 0:
# nothing to do
return dstarr
if copy:
dstarr = np.copy(dstarr, order='C')
da_idx = -1
slc = slice(0, 3)
if 'A' in dst_order:
da_idx = dst_order.index('A')
# Currently we assume that alpha channel is in position 0 or 3 in dstarr
if da_idx == 0:
slc = slice(1, 4)
elif da_idx != 3:
raise ValueError("Alpha channel not in expected position (0 or 4) in dstarr")
# fill alpha channel in destination in the area we will be dropping
# the image
if fill and (da_idx >= 0):
dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd,
dst_z:dst_z + src_dp, da_idx] = dst_max_val
# if overlay source contains an alpha channel, extract it
# and use it, otherwise use scalar keyword parameter
if (src_ch > 3) and ('A' in src_order):
sa_idx = src_order.index('A')
alpha = srcarr[:src_ht, :src_wd, :src_dp, sa_idx]
if np.all(np.isclose(alpha, src_max_val)):
# optimization to avoid blending if all alpha elements are max
alpha = 1.0
else:
alpha = srcarr[0:src_ht, 0:src_wd, 0:src_dp, sa_idx] / float(src_max_val)
alpha = np.concatenate([alpha[..., np.newaxis],
alpha[..., np.newaxis],
alpha[..., np.newaxis]],
axis=-1)
# reorder srcarr if necessary to match dstarr for alpha merge
get_order = dst_order
if ('A' in dst_order) and not ('A' in src_order):
get_order = dst_order.replace('A', '')
if get_order != src_order:
srcarr = reorder_image(get_order, srcarr, src_order)
# define the two subarrays we are blending
_dst = dstarr[dst_y:dst_y + src_ht, dst_x:dst_x + src_wd,
dst_z:dst_z + src_dp, slc]
_src = srcarr[:src_ht, :src_wd, :src_dp, slc]
if np.isscalar(alpha) and alpha == 1.0:
# optimization to avoid alpha blending
# Place our srcarr into this dstarr at dst offsets
_dst[:, :, :, :] = _src
else:
# calculate alpha blending
# Co = CaAa + CbAb(1 - Aa)
_dst[:, :, :, :] = (alpha * _src) + (1.0 - alpha) * _dst
return dstarr
def overlay_image(dstarr, pos, srcarr, **kwargs):
method = overlay_image_2d
if len(srcarr.shape) > 3:
method = overlay_image_3d
return method(dstarr, pos, srcarr, **kwargs)
def reorder_image(dst_order, src_arr, src_order):
"""Reorder src_arr, with order of color planes in src_order, as
dst_order.
"""
depth = src_arr.shape[2]
if depth != len(src_order):
if len(dst_order.replace('A', '')) != len(src_order.replace('A', '')):
raise ValueError("src_order (%s) does not match array depth (%d)" % (
src_order, depth))
bands = []
if dst_order == src_order:
return np.ascontiguousarray(src_arr)
missing = set(dst_order) - set(src_order)
if len(missing) == 0:
# <-- we don't have to add an alpha plane, just create a new view
idx = np.array([src_order.index(c) for c in dst_order])
return np.ascontiguousarray(src_arr[..., idx])
if missing != set(['A']):
missing = list(missing - set(['A']))
raise ValueError("source array missing channels ({}) needed in "
"destination array ({})".format(src_order, dst_order))
# <-- dst order requires missing alpha channel
indexes = [src_order.index(c) for c in dst_order.replace('A', '')]
bands = [src_arr[..., idx, np.newaxis] for idx in indexes]
ht, wd = src_arr.shape[:2]
dst_type = src_arr.dtype
dst_max_val = np.iinfo(dst_type).max
alpha = np.full((ht, wd, 1), dst_max_val, dtype=dst_type)
bands.insert(dst_order.index('A'), alpha)
return np.concatenate(bands, axis=-1)
def strip_z(pts):
"""Strips a Z component from `pts` if it is present."""
pts = np.asarray(pts)
if pts.shape[-1] > 2:
pts = np.asarray((pts.T[0], pts.T[1])).T
return pts
def pad_z(pts, value=0.0, dtype=np.float32):
"""Adds a Z component from `pts` if it is missing.
The value defaults to `value` (0.0)"""
pts = np.asarray(pts, dtype=dtype)
if pts.shape[-1] < 3:
if len(pts.shape) < 2:
return np.asarray((pts[0], pts[1], value), dtype=pts.dtype)
pad_col = np.full(len(pts), value, dtype=pts.dtype)
pts = np.asarray((pts.T[0], pts.T[1], pad_col)).T
return pts
def get_bounds(pts):
"""Return the minimum point and maximum point bounding a
set of points."""
pts_t = np.asarray(pts).T
return np.asarray(([np.min(_pts) for _pts in pts_t],
[np.max(_pts) for _pts in pts_t]))
def sort_xy(x1, y1, x2, y2):
"""Sort a set of bounding box parameters."""
pmn, pmx = get_bounds(((x1, y1), (x2, y2)))
return (pmn[0], pmn[1], pmx[0], pmx[1])
def fill_array(dstarr, order, r, g, b, a):
"""Fill array dstarr with a color value. order defines the color planes
in the array. (r, g, b, a) are expected to be in the range 0..1 and
are scaled to the appropriate values.
dstarr can be a 2D or 3D array.
"""
# TODO: can we make this more efficient?
dtype = dstarr.dtype
maxv = np.iinfo(dtype).max
bgval = dict(A=int(maxv * a), R=int(maxv * r), G=int(maxv * g),
B=int(maxv * b))
bgtup = tuple([bgval[order[i]] for i in range(len(order))])
if dtype == np.uint8 and len(bgtup) == 4:
# optimiztion
bgtup = np.array(bgtup, dtype=dtype).view(np.uint32)[0]
dstarr = dstarr.view(np.uint32)
dstarr[..., :] = bgtup
def make_filled_array(shp, dtype, order, r, g, b, a):
"""Return a filled array with a color value. order defines the color
planes in the array. (r, g, b, a) are expected to be in the range
0..1 and are scaled to the appropriate values.
shp can define a 2D or 3D array.
"""
# TODO: can we make this more efficient?
maxv = np.iinfo(dtype).max
bgval = dict(A=int(maxv * a), R=int(maxv * r), G=int(maxv * g),
B=int(maxv * b))
bgtup = tuple([bgval[order[i]] for i in range(len(order))])
if dtype == np.uint8 and len(bgtup) == 4:
# optimization when dealing with 32-bit RGBA arrays
fill_val = np.array(bgtup, dtype=dtype).view(np.uint32)
rgba = np.zeros(shp, dtype=dtype)
rgba_i = rgba.view(np.uint32)
rgba_i[:] = fill_val
return rgba
return np.full(shp, bgtup, dtype=dtype)
def add_alpha(arr, alpha=None):
"""Takes an array and adds an alpha layer to it if it doesn't already
exist."""
if len(arr.shape) == 2:
arr = arr[..., np.newaxis]
if arr.shape[2] in (1, 3):
a_arr = np.zeros(arr.shape[:2], dtype=arr.dtype)
if alpha is not None:
a_arr[:, :] = alpha
arr = np.dstack((arr, a_arr))
return arr
def get_minmax_dtype(dtype):
if issubclass(dtype.type, np.integer):
info = np.iinfo(dtype)
else:
info = np.finfo(dtype)
return info.min, info.max
def check_native_byteorder(data_np):
dt = str(data_np.dtype)
return ((dt.startswith('>') and sys.byteorder == 'little') or
(dt.startswith('<') and sys.byteorder == 'big'))
def cutout_data(data, x1, y1, x2, y2, xstep=1, ystep=1, z=None,
astype=None):
"""Cut out data area based on bounded coordinates.
Parameters
----------
x1, y1 : int
Coordinates defining the minimum corner to be cut out
x2, y2 : int
Coordinates *one greater* than the maximum corner
xstep, ystep : int
Step values for skip intervals in the cutout region
z : int
Value for a depth (slice) component for color images
astype :
Note that the coordinates for `x2`, `y2` are *outside* the
cutout region, similar to slicing parameters in Python.
"""
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data_np = data[view]
if z is not None and len(data_np.shape) > 2:
data_np = data_np[..., z]
if astype:
data_np = data_np.astype(astype, copy=False)
return data_np
def cutout_adjust(data, x1, y1, x2, y2, xstep=1, ystep=1, z=0, astype=None):
"""Like `cutout_data`, but adjusts coordinates `x1`, `y1`, `x2`, `y2`
to be inside the data area if they are not already. It tries to
preserve the width and height of the region, so e.g. (-2, -2, 5, 5)
could become (0, 0, 7, 7)
"""
height, width = data.shape[:2]
dx = x2 - x1
dy = y2 - y1
if x1 < 0:
x1, x2 = 0, dx
else:
if x2 >= width:
x2 = width
x1 = x2 - dx
if y1 < 0:
y1, y2 = 0, dy
else:
if y2 >= height:
y2 = height
y1 = y2 - dy
data = cutout_data(data, x1, y1, x2, y2, xstep=xstep, ystep=ystep,
z=z, astype=astype)
return (data, x1, y1, x2, y2)
def cutout_radius(data, x, y, radius, xstep=1, ystep=1, astype=None):
return cutout_adjust(data, x - radius, y - radius,
x + radius + 1, y + radius + 1,
xstep=xstep, ystep=ystep, astype=astype)
def guess_order(shape):
if len(shape) <= 2:
order = 'M'
else:
depth = shape[-1]
if depth == 1:
order = 'M'
elif depth == 2:
order = 'MA'
elif depth == 3:
order = 'RGB'
elif depth == 4:
order = 'RGBA'
return order
def get_aspect(shape):
return shape[1] / shape[0]
def calc_aspect_str(wd, ht):
# calculate the aspect ratio given by width and height and make
# string of the form "x:y"
gcd = np.gcd(wd, ht)
_wd, _ht = int(wd / gcd), int(ht / gcd)
_as = str(_wd) + ':' + str(_ht)
return _as
| en | 0.752169 | # # trcalc.py -- transformation calculations for image data # # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # # optional opencv package speeds up certain operations, especially # rotation # do we have Python Imaging Library available? # For testing #have_opencv = False #have_pillow = False ## ctr_x = wd * 0.5 ## ctr_y = ht * 0.5 Rotate an array of points (x_arr, y_arr) by theta_deg offsetted from a center point by (xoff, yoff). # TODO: use opencv acceleration if available # TODO: handle dimensional rotation N>2 # just copy unrotated Z coords Rotate numpy array `data_np` by `theta_deg` around rotation center (rotctr_x, rotctr_y). If the rotation center is omitted it defaults to the center of the array. No adjustment is done to the data array beforehand, so the result will be clipped according to the size of the array (the output array will be the same size as the input array). # If there is no rotation, then we are done # opencv is fastest #ap = np.rint(ap).clip(0, wd-1).astype(int) #bp = np.rint(bp).clip(0, ht-1).astype(int) # Optomizations to reuse existing intermediate arrays # If there is no rotation, then we are done # Make a square with room to rotate # Find center of new data array # Overlay the old image on the new (blank) image # TODO: fill with a different value? # Now rotate with clip as usual # calculate dimensions of NON-scaled cutout # Make indexes and scale them # Is there a more efficient way to do this? # bounds check against shape (to protect future data access) # Calculate actual scale used (vs. desired) # return view + actual scale factors used Like get_scaled_cutout_wdht, but returns the view/slice to extract from an image instead of the extraction itself. # calculate dimensions of NON-scaled cutout # Make indexes and scale them # Is there a more efficient way to do this? # bounds check against shape (to protect future data access) # Calculate actual scale used (vs. desired) # return view + actual scale factors used Extract a region of the `data_np` defined by corners (x1, y1) and (x2, y2) and resample it to fit dimensions (new_wd, new_ht). `interpolation` describes the method of interpolation used, where the default "basic" is nearest neighbor. If `logger` is not `None` it will be used for logging messages. If `dtype` is defined then the output array will be converted to that type; the default is the same as the input type. # opencv is fastest and supports many methods # special hack for OpenCv resize on certain numpy array types Like get_scaled_cutout_basic, but returns the view/slice to extract from an image, instead of the extraction itself # calculate dimensions of NON-scaled cutout # opencv is fastest # special hack for OpenCv resize on certain numpy array types # Do transforms as necessary p1 (x1, y1, z1) and p2 (x2, y2, z2) define the extent of the (non-scaled) data shown. The image, defined by region q1, q2 is to be placed at dst in the image (destination may be outside of the actual data array). Refines the modified points (q1', q2') defining the clipped rectangle needed to be cut from the source array and scaled. # Trim off parts of srcarr that would be "hidden" # to the left and above the dstarr edge. # Trim off parts of srcarr that would be "hidden" # to the right and below dstarr edge. # 3D image # Trim off parts of srcarr that would be "hidden" # to the left and above the dstarr edge. # Trim off parts of srcarr that would be "hidden" # to the right and below the dstarr edge. # nothing to do # Currently we assume that alpha channel is in position 0 or 3 in dstarr # fill alpha channel in destination in the area we will be dropping # the image # if overlay source contains an alpha channel, extract it # and use it, otherwise use scalar keyword parameter # optimization to avoid blending if all alpha elements are max # reorder srcarr if necessary to match dstarr for alpha merge # define the two subarrays we are blending # optimization to avoid alpha blending # Place our srcarr into this dstarr at dst offsets # calculate alpha blending # Co = CaAa + CbAb(1 - Aa) # NOTE: not tested yet thoroughly enough to use # if have_pillow: # return overlay_image_2d_pil(dstarr, pos, srcarr, dst_order=dst_order, # src_order=src_order, alpha=alpha, # copy=copy, fill=fill, flipy=flipy) # Trim off parts of srcarr that would be "hidden" # to the left and above the dstarr edge. # Trim off parts of srcarr that would be "hidden" # to the right and below the dstarr edge. # nothing to do # Currently we assume that alpha channel is in position 0 or 3 in dstarr # fill alpha channel in destination in the area we will be dropping # the image # if overlay source contains an alpha channel, extract it # and use it, otherwise use scalar keyword parameter # optimization to avoid blending if all alpha elements are max # reorder srcarr if necessary to match dstarr for alpha merge # define the two subarrays we are blending # optimization to avoid alpha blending # Place our srcarr into this dstarr at dst offsets # calculate alpha blending # Co = CaAa + CbAb(1 - Aa) Reorder src_arr, with order of color planes in src_order, as dst_order. # <-- we don't have to add an alpha plane, just create a new view # <-- dst order requires missing alpha channel Strips a Z component from `pts` if it is present. Adds a Z component from `pts` if it is missing. The value defaults to `value` (0.0) Return the minimum point and maximum point bounding a set of points. Sort a set of bounding box parameters. Fill array dstarr with a color value. order defines the color planes in the array. (r, g, b, a) are expected to be in the range 0..1 and are scaled to the appropriate values. dstarr can be a 2D or 3D array. # TODO: can we make this more efficient? # optimiztion Return a filled array with a color value. order defines the color planes in the array. (r, g, b, a) are expected to be in the range 0..1 and are scaled to the appropriate values. shp can define a 2D or 3D array. # TODO: can we make this more efficient? # optimization when dealing with 32-bit RGBA arrays Takes an array and adds an alpha layer to it if it doesn't already exist. Cut out data area based on bounded coordinates. Parameters ---------- x1, y1 : int Coordinates defining the minimum corner to be cut out x2, y2 : int Coordinates *one greater* than the maximum corner xstep, ystep : int Step values for skip intervals in the cutout region z : int Value for a depth (slice) component for color images astype : Note that the coordinates for `x2`, `y2` are *outside* the cutout region, similar to slicing parameters in Python. Like `cutout_data`, but adjusts coordinates `x1`, `y1`, `x2`, `y2` to be inside the data area if they are not already. It tries to preserve the width and height of the region, so e.g. (-2, -2, 5, 5) could become (0, 0, 7, 7) # calculate the aspect ratio given by width and height and make # string of the form "x:y" | 2.547146 | 3 |
install/core/python/tank/util/__init__.py | JoanAzpeitia/lp_sg | 0 | 6613428 | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from .shotgun import register_publish
from .shotgun import resolve_publish_path
from .shotgun import find_publish
from .shotgun import download_url
from .shotgun import create_event_log_entry
from .shotgun import get_entity_type_display_name
from .shotgun import get_published_file_entity_type
from .environment import append_path_to_env_var
from .environment import prepend_path_to_env_var
from .login import get_shotgun_user
from .login import get_current_user
# DO keep the following two log_user_*_metric to preserve retro
# compatibility and prevent exception in legacy engine code.
from .metrics import log_user_activity_metric
from .metrics import log_user_attribute_metric
from .metrics import EventMetric
from .shotgun_path import ShotgunPath
from . import filesystem
from .local_file_storage import LocalFileStorageManager
from .errors import PublishResolveError
from .errors import UnresolvableCoreConfigurationError, ShotgunAttachmentDownloadError
from .errors import EnvironmentVariableFileLookupError, ShotgunPublishError
from .errors import PublishResolveError
from .errors import PublishPathNotDefinedError, PublishPathNotSupported
from .user_settings import UserSettings
| # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from .shotgun import register_publish
from .shotgun import resolve_publish_path
from .shotgun import find_publish
from .shotgun import download_url
from .shotgun import create_event_log_entry
from .shotgun import get_entity_type_display_name
from .shotgun import get_published_file_entity_type
from .environment import append_path_to_env_var
from .environment import prepend_path_to_env_var
from .login import get_shotgun_user
from .login import get_current_user
# DO keep the following two log_user_*_metric to preserve retro
# compatibility and prevent exception in legacy engine code.
from .metrics import log_user_activity_metric
from .metrics import log_user_attribute_metric
from .metrics import EventMetric
from .shotgun_path import ShotgunPath
from . import filesystem
from .local_file_storage import LocalFileStorageManager
from .errors import PublishResolveError
from .errors import UnresolvableCoreConfigurationError, ShotgunAttachmentDownloadError
from .errors import EnvironmentVariableFileLookupError, ShotgunPublishError
from .errors import PublishResolveError
from .errors import PublishPathNotDefinedError, PublishPathNotSupported
from .user_settings import UserSettings
| en | 0.836288 | # Copyright (c) 2013 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. # DO keep the following two log_user_*_metric to preserve retro # compatibility and prevent exception in legacy engine code. | 1.098968 | 1 |
src/problem/admin.py | PLUS-POSTECH/study.plus.or.kr | 7 | 6613429 | from django.contrib import admin
from website.actions import ExportCsvMixin
from .models import Problem, ProblemAttachment, ProblemInstance, ProblemList, ProblemAuthLog, ProblemQuestion
@admin.register(Problem)
class ProblemAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = ('title', 'categories_title', 'author', 'description')
readonly_fields = ('last_modified', )
actions = ['export_as_csv']
@admin.register(ProblemAttachment)
class ProblemAttachmentAdmin(admin.ModelAdmin):
list_display = ('filename', )
@admin.register(ProblemInstance)
class ProblemInstanceAdmin(admin.ModelAdmin):
list_display = ('pk', 'problem')
@admin.register(ProblemList)
class ProblemListAdmin(admin.ModelAdmin):
list_display = ('title', 'session', 'description')
@admin.register(ProblemAuthLog)
class ProblemAuthLogAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = ('user', 'problem_instance', 'auth_key', 'datetime')
actions = ['export_as_csv']
@admin.register(ProblemQuestion)
class ProblemQuestionAdmin(admin.ModelAdmin):
list_display = ('user', 'problem_instance', 'question', 'answer', 'datetime')
| from django.contrib import admin
from website.actions import ExportCsvMixin
from .models import Problem, ProblemAttachment, ProblemInstance, ProblemList, ProblemAuthLog, ProblemQuestion
@admin.register(Problem)
class ProblemAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = ('title', 'categories_title', 'author', 'description')
readonly_fields = ('last_modified', )
actions = ['export_as_csv']
@admin.register(ProblemAttachment)
class ProblemAttachmentAdmin(admin.ModelAdmin):
list_display = ('filename', )
@admin.register(ProblemInstance)
class ProblemInstanceAdmin(admin.ModelAdmin):
list_display = ('pk', 'problem')
@admin.register(ProblemList)
class ProblemListAdmin(admin.ModelAdmin):
list_display = ('title', 'session', 'description')
@admin.register(ProblemAuthLog)
class ProblemAuthLogAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = ('user', 'problem_instance', 'auth_key', 'datetime')
actions = ['export_as_csv']
@admin.register(ProblemQuestion)
class ProblemQuestionAdmin(admin.ModelAdmin):
list_display = ('user', 'problem_instance', 'question', 'answer', 'datetime')
| none | 1 | 1.912741 | 2 | |
adversarial_robustness/datasets/svhn.py | dtak/adversarial_robustness | 50 | 6613430 | <reponame>dtak/adversarial_robustness
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
import tensorflow as tf
from IPython.display import display, Image
from scipy import ndimage
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import scipy.io
import random
from adversarial_robustness.dataset import *
class SVHN(Dataset):
def __init__(self, include_train=True, data_dir=default_data_dir, **kwargs):
self.X, self.y, self.Xv, self.yv, self.Xt, self.yt = load_svhn(
include_train=include_train, data_dir=data_dir)
self.feature_names = [str(i) for i in range(32*32)]
self.label_names = [str(i) for i in range(10)]
self.image_shape = (32, 32)
def load_svhn(include_train=True, data_dir=default_data_dir):
f1 = data_dir + '/SVHN.pickle'
f2 = data_dir + '/SVHN1.pickle'
f3 = data_dir + '/SVHN2.pickle'
f4 = data_dir + '/SVHN3.pickle'
if not os.path.exists(f1):
print('Dataset not found, downloading and preprocessing...')
download_and_preprocess_svhn(data_dir)
classes = np.array([0,1,2,3,4,5,6,7,8,9])
limit = 200000
with open(f1, 'rb') as f:
save = pickle.load(f)
train_labels = save['train_labels'][:limit]
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, 32, 32, 1)).astype(np.float32)
labels = labels.astype(np.int32)
return dataset, labels
Xv, yv = reformat(valid_dataset, valid_labels)
Xt, yt = reformat(test_dataset, test_labels)
if include_train:
with open(f2, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset1'][:limit]
del save
X, y = reformat(train_dataset, train_labels)
else:
X, y = Xv, yv
return X, y, Xv, yv, Xt, yt
def download_and_preprocess_svhn(data_dir):
"""
Adapted from https://github.com/hangyao/street_view_house_numbers/blob/master/1_preprocess_single.ipynb
"""
f1 = data_dir + '/SVHN.pickle'
f2 = data_dir + '/SVHN1.pickle'
f3 = data_dir + '/SVHN2.pickle'
f4 = data_dir + '/SVHN3.pickle'
url = 'http://ufldl.stanford.edu/housenumbers/'
def maybe_download(filename, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename)
print('\nDownload Complete!')
statinfo = os.stat(filename)
return filename
train_matfile = maybe_download('svhn/train_32x32.mat')
test_matfile = maybe_download('svhn/test_32x32.mat')
extra_matfile = maybe_download('svhn/extra_32x32.mat')
train_data = scipy.io.loadmat('svhn/train_32x32.mat', variable_names='X').get('X')
train_labels = scipy.io.loadmat('svhn/train_32x32.mat', variable_names='y').get('y')
test_data = scipy.io.loadmat('svhn/test_32x32.mat', variable_names='X').get('X')
test_labels = scipy.io.loadmat('svhn/test_32x32.mat', variable_names='y').get('y')
extra_data = scipy.io.loadmat('svhn/extra_32x32.mat', variable_names='X').get('X')
extra_labels = scipy.io.loadmat('svhn/extra_32x32.mat', variable_names='y').get('y')
print(train_data.shape, train_labels.shape)
print(test_data.shape, test_labels.shape)
print(extra_data.shape, extra_labels.shape)
train_labels[train_labels == 10] = 0
test_labels[test_labels == 10] = 0
extra_labels[extra_labels == 10] = 0
random.seed()
n_labels = 10
valid_index = []
valid_index2 = []
train_index = []
train_index2 = []
for i in np.arange(n_labels):
valid_index.extend(np.where(train_labels[:,0] == (i))[0][:400].tolist())
train_index.extend(np.where(train_labels[:,0] == (i))[0][400:].tolist())
valid_index2.extend(np.where(extra_labels[:,0] == (i))[0][:200].tolist())
train_index2.extend(np.where(extra_labels[:,0] == (i))[0][200:].tolist())
random.shuffle(valid_index)
random.shuffle(train_index)
random.shuffle(valid_index2)
random.shuffle(train_index2)
valid_data = np.concatenate((extra_data[:,:,:,valid_index2], train_data[:,:,:,valid_index]), axis=3).transpose((3,0,1,2))
valid_labels = np.concatenate((extra_labels[valid_index2,:], train_labels[valid_index,:]), axis=0)[:,0]
train_data_t = np.concatenate((extra_data[:,:,:,train_index2], train_data[:,:,:,train_index]), axis=3).transpose((3,0,1,2))
train_labels_t = np.concatenate((extra_labels[train_index2,:], train_labels[train_index,:]), axis=0)[:,0]
test_data = test_data.transpose((3,0,1,2))
test_labels = test_labels[:,0]
print(train_data_t.shape, train_labels_t.shape)
print(test_data.shape, test_labels.shape)
print(valid_data.shape, valid_labels.shape)
image_size = 32 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def im2gray(image):
'''Normalize images'''
image = image.astype(float)
# Use the Conversion Method in This Paper:
# [http://www.eyemaginary.com/Rendering/TurnColorsGray.pdf]
image_gray = np.dot(image, [[0.2989],[0.5870],[0.1140]])
return image_gray
train_data_c = im2gray(train_data_t)[:,:,:,0]
test_data_c = im2gray(test_data)[:,:,:,0]
valid_data_c = im2gray(valid_data)[:,:,:,0]
print(train_data_c.shape, train_labels_t.shape)
print(test_data_c.shape, test_labels.shape)
print(valid_data_c.shape, valid_labels.shape)
def GCN(image, min_divisor=1e-4):
"""Global Contrast Normalization"""
imsize = image.shape[0]
mean = np.mean(image, axis=(1,2), dtype=float)
std = np.std(image, axis=(1,2), dtype=float, ddof=1)
std[std < min_divisor] = 1.
image_GCN = np.zeros(image.shape, dtype=float)
for i in np.arange(imsize):
image_GCN[i,:,:] = (image[i,:,:] - mean[i]) / std[i]
return image_GCN
train_data_GCN = GCN(train_data_c)
test_data_GCN = GCN(test_data_c)
valid_data_GCN = GCN(valid_data_c)
print(train_data_GCN.shape, train_labels_t.shape)
print(test_data_GCN.shape, test_labels.shape)
print(valid_data_GCN.shape, valid_labels.shape)
pickle_file = f1
try:
f = open(pickle_file, 'wb')
save = {
#'train_dataset': train_data_GCN,
'train_labels': train_labels_t,
'valid_dataset': valid_data_GCN,
'valid_labels': valid_labels,
'test_dataset': test_data_GCN,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
pickle_file = f2
try:
f = open(pickle_file, 'wb')
save = { 'train_dataset1': train_data_GCN[:200000], }
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
pickle_file = f3
try:
f = open(pickle_file, 'wb')
save = { 'train_dataset2': train_data_GCN[200000:400000], }
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
pickle_file = f4
try:
f = open(pickle_file, 'wb')
save = { 'train_dataset3': train_data_GCN[400000:], }
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
if __name__ == '__main__':
import pdb
dataset = SVHN()
pdb.set_trace()
pass
| from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
import tensorflow as tf
from IPython.display import display, Image
from scipy import ndimage
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import scipy.io
import random
from adversarial_robustness.dataset import *
class SVHN(Dataset):
def __init__(self, include_train=True, data_dir=default_data_dir, **kwargs):
self.X, self.y, self.Xv, self.yv, self.Xt, self.yt = load_svhn(
include_train=include_train, data_dir=data_dir)
self.feature_names = [str(i) for i in range(32*32)]
self.label_names = [str(i) for i in range(10)]
self.image_shape = (32, 32)
def load_svhn(include_train=True, data_dir=default_data_dir):
f1 = data_dir + '/SVHN.pickle'
f2 = data_dir + '/SVHN1.pickle'
f3 = data_dir + '/SVHN2.pickle'
f4 = data_dir + '/SVHN3.pickle'
if not os.path.exists(f1):
print('Dataset not found, downloading and preprocessing...')
download_and_preprocess_svhn(data_dir)
classes = np.array([0,1,2,3,4,5,6,7,8,9])
limit = 200000
with open(f1, 'rb') as f:
save = pickle.load(f)
train_labels = save['train_labels'][:limit]
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, 32, 32, 1)).astype(np.float32)
labels = labels.astype(np.int32)
return dataset, labels
Xv, yv = reformat(valid_dataset, valid_labels)
Xt, yt = reformat(test_dataset, test_labels)
if include_train:
with open(f2, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset1'][:limit]
del save
X, y = reformat(train_dataset, train_labels)
else:
X, y = Xv, yv
return X, y, Xv, yv, Xt, yt
def download_and_preprocess_svhn(data_dir):
"""
Adapted from https://github.com/hangyao/street_view_house_numbers/blob/master/1_preprocess_single.ipynb
"""
f1 = data_dir + '/SVHN.pickle'
f2 = data_dir + '/SVHN1.pickle'
f3 = data_dir + '/SVHN2.pickle'
f4 = data_dir + '/SVHN3.pickle'
url = 'http://ufldl.stanford.edu/housenumbers/'
def maybe_download(filename, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename)
print('\nDownload Complete!')
statinfo = os.stat(filename)
return filename
train_matfile = maybe_download('svhn/train_32x32.mat')
test_matfile = maybe_download('svhn/test_32x32.mat')
extra_matfile = maybe_download('svhn/extra_32x32.mat')
train_data = scipy.io.loadmat('svhn/train_32x32.mat', variable_names='X').get('X')
train_labels = scipy.io.loadmat('svhn/train_32x32.mat', variable_names='y').get('y')
test_data = scipy.io.loadmat('svhn/test_32x32.mat', variable_names='X').get('X')
test_labels = scipy.io.loadmat('svhn/test_32x32.mat', variable_names='y').get('y')
extra_data = scipy.io.loadmat('svhn/extra_32x32.mat', variable_names='X').get('X')
extra_labels = scipy.io.loadmat('svhn/extra_32x32.mat', variable_names='y').get('y')
print(train_data.shape, train_labels.shape)
print(test_data.shape, test_labels.shape)
print(extra_data.shape, extra_labels.shape)
train_labels[train_labels == 10] = 0
test_labels[test_labels == 10] = 0
extra_labels[extra_labels == 10] = 0
random.seed()
n_labels = 10
valid_index = []
valid_index2 = []
train_index = []
train_index2 = []
for i in np.arange(n_labels):
valid_index.extend(np.where(train_labels[:,0] == (i))[0][:400].tolist())
train_index.extend(np.where(train_labels[:,0] == (i))[0][400:].tolist())
valid_index2.extend(np.where(extra_labels[:,0] == (i))[0][:200].tolist())
train_index2.extend(np.where(extra_labels[:,0] == (i))[0][200:].tolist())
random.shuffle(valid_index)
random.shuffle(train_index)
random.shuffle(valid_index2)
random.shuffle(train_index2)
valid_data = np.concatenate((extra_data[:,:,:,valid_index2], train_data[:,:,:,valid_index]), axis=3).transpose((3,0,1,2))
valid_labels = np.concatenate((extra_labels[valid_index2,:], train_labels[valid_index,:]), axis=0)[:,0]
train_data_t = np.concatenate((extra_data[:,:,:,train_index2], train_data[:,:,:,train_index]), axis=3).transpose((3,0,1,2))
train_labels_t = np.concatenate((extra_labels[train_index2,:], train_labels[train_index,:]), axis=0)[:,0]
test_data = test_data.transpose((3,0,1,2))
test_labels = test_labels[:,0]
print(train_data_t.shape, train_labels_t.shape)
print(test_data.shape, test_labels.shape)
print(valid_data.shape, valid_labels.shape)
image_size = 32 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def im2gray(image):
'''Normalize images'''
image = image.astype(float)
# Use the Conversion Method in This Paper:
# [http://www.eyemaginary.com/Rendering/TurnColorsGray.pdf]
image_gray = np.dot(image, [[0.2989],[0.5870],[0.1140]])
return image_gray
train_data_c = im2gray(train_data_t)[:,:,:,0]
test_data_c = im2gray(test_data)[:,:,:,0]
valid_data_c = im2gray(valid_data)[:,:,:,0]
print(train_data_c.shape, train_labels_t.shape)
print(test_data_c.shape, test_labels.shape)
print(valid_data_c.shape, valid_labels.shape)
def GCN(image, min_divisor=1e-4):
"""Global Contrast Normalization"""
imsize = image.shape[0]
mean = np.mean(image, axis=(1,2), dtype=float)
std = np.std(image, axis=(1,2), dtype=float, ddof=1)
std[std < min_divisor] = 1.
image_GCN = np.zeros(image.shape, dtype=float)
for i in np.arange(imsize):
image_GCN[i,:,:] = (image[i,:,:] - mean[i]) / std[i]
return image_GCN
train_data_GCN = GCN(train_data_c)
test_data_GCN = GCN(test_data_c)
valid_data_GCN = GCN(valid_data_c)
print(train_data_GCN.shape, train_labels_t.shape)
print(test_data_GCN.shape, test_labels.shape)
print(valid_data_GCN.shape, valid_labels.shape)
pickle_file = f1
try:
f = open(pickle_file, 'wb')
save = {
#'train_dataset': train_data_GCN,
'train_labels': train_labels_t,
'valid_dataset': valid_data_GCN,
'valid_labels': valid_labels,
'test_dataset': test_data_GCN,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
pickle_file = f2
try:
f = open(pickle_file, 'wb')
save = { 'train_dataset1': train_data_GCN[:200000], }
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
pickle_file = f3
try:
f = open(pickle_file, 'wb')
save = { 'train_dataset2': train_data_GCN[200000:400000], }
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
pickle_file = f4
try:
f = open(pickle_file, 'wb')
save = { 'train_dataset3': train_data_GCN[400000:], }
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
if __name__ == '__main__':
import pdb
dataset = SVHN()
pdb.set_trace()
pass | en | 0.631245 | Adapted from https://github.com/hangyao/street_view_house_numbers/blob/master/1_preprocess_single.ipynb Download a file if not present, and make sure it's the right size. # Pixel width and height. # Number of levels per pixel. Normalize images # Use the Conversion Method in This Paper: # [http://www.eyemaginary.com/Rendering/TurnColorsGray.pdf] Global Contrast Normalization #'train_dataset': train_data_GCN, | 2.217383 | 2 |
SIC_AI_Coding_Exercises/SIC_AI_Chapter_07_Coding_Exercises/ex_0615.py | BattlePlot/SIC-Artificial-Intelligence | 7 | 6613431 | <gh_stars>1-10
"""Coding Practice #0615."""
import cv2
# 1. Working with image data:
# 1.1. Open an image in color (BGR) and show it.
img_bgr = cv2.imread('picture_Butterfly.jpg', 1) # Open as a color image.
print("Shape of the image read in color : ", img_bgr.shape) # Height, width, channel.
cv2.imshow("A Color Image", img_bgr)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.2. Convert to B/W (gray scale) and show it.
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
cv2.imshow("In Gray Scale", img_gray)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.3. Convert to HSV and split the channels: Hue, Saturation, Value.
img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
h = img_hsv[:, :, 0]
s = img_hsv[:, :, 1]
v = img_hsv[:, :, 2]
hue = cv2.merge([h, h, h])
sat = cv2.merge([s, s, s])
val = cv2.merge([v, v, v])
cv2.imshow("The Hue Channel", hue)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
cv2.imshow("The Saturation Channel", sat)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
cv2.imshow("The Value Channel", val)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
| """Coding Practice #0615."""
import cv2
# 1. Working with image data:
# 1.1. Open an image in color (BGR) and show it.
img_bgr = cv2.imread('picture_Butterfly.jpg', 1) # Open as a color image.
print("Shape of the image read in color : ", img_bgr.shape) # Height, width, channel.
cv2.imshow("A Color Image", img_bgr)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.2. Convert to B/W (gray scale) and show it.
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
cv2.imshow("In Gray Scale", img_gray)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.3. Convert to HSV and split the channels: Hue, Saturation, Value.
img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
h = img_hsv[:, :, 0]
s = img_hsv[:, :, 1]
v = img_hsv[:, :, 2]
hue = cv2.merge([h, h, h])
sat = cv2.merge([s, s, s])
val = cv2.merge([v, v, v])
cv2.imshow("The Hue Channel", hue)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
cv2.imshow("The Saturation Channel", sat)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
cv2.imshow("The Value Channel", val)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window. | en | 0.912215 | Coding Practice #0615. # 1. Working with image data: # 1.1. Open an image in color (BGR) and show it. # Open as a color image. # Height, width, channel. # Wait until a key is pressed. # Close the open window. # 1.2. Convert to B/W (gray scale) and show it. # Wait until a key is pressed. # Close the open window. # 1.3. Convert to HSV and split the channels: Hue, Saturation, Value. # Wait until a key is pressed. # Close the open window. # Wait until a key is pressed. # Close the open window. # Wait until a key is pressed. # Close the open window. | 3.959209 | 4 |
plan/admin.py | 18892021125/NWU-ACM-MIS-backend | 5 | 6613432 | import smtplib
from django.core.mail import send_mail
from django.contrib import admin
from django.shortcuts import HttpResponseRedirect
from markdownx.admin import MarkdownxModelAdmin
from NWU_ACM_MIS import settings
from member.models import Member
from plan.models import Plan, Announcement
mail_content_template = '''
我们发布了一个新的计划: {}
计划时间:
开始: {}
结束: {}
以下是详细信息:
{}
欢迎参加,
{}
'''
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
list_display = ('name', 'typ', 'clock_in', 'clock_out', 'has_sent')
ordering = ('-clock_in',)
add_fieldsets = (
(None, {'fields': (
'name',
'typ',
'detail',
'clock_in',
'clock_out',
)}),
)
fieldsets = add_fieldsets + (
('邮件通知', {'fields': (
'members',
'has_sent',
)}),
)
readonly_fields = ('has_sent',)
autocomplete_fields = ('members',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super().get_fieldsets(request, obj)
def response_change(self, request, obj:Plan):
if '_addallads' in request.POST:
obj.save()
obj.members.add(*Member.objects.filter(role=Member.Role.AD))
self.message_user(request, '添加了所有的现役队员')
return HttpResponseRedirect('.')
if '_addallnovices' in request.POST:
obj.save()
obj.members.add(*Member.objects.filter(role=Member.Role.NOVICE))
self.message_user(request, '添加了所有的萌新队员')
return HttpResponseRedirect('.')
if '_sendemail' in request.POST:
obj.has_sent = True
obj.save()
failed_members = []
mail_content = mail_content_template.format(
obj.name, obj.clock_in, obj.clock_out, obj.detail,
settings.PROJECT_VERBOSE_NAME
)
for member in obj.members.all():
try:
send_mail(
obj.name,
f'您好,{member.realname}' + mail_content,
settings.EMAIL_FROM,
recipient_list=[member.user.email, ],
fail_silently=False,
)
except smtplib.SMTPException:
failed_members.append(member)
message = '己发送邮件'
if failed_members:
message += f', 其中给{failed_members}发送时失败'
self.message_user(request, message)
return HttpResponseRedirect('.')
return super().response_change(request, obj)
@admin.register(Announcement)
class AnnouncementAdmin(MarkdownxModelAdmin):
list_display = ('title', 'created_date', 'changed_date')
ordering = ('-created_date',)
fields = ('title', 'content', 'created_date', 'changed_date')
readonly_fields = ('created_date', 'changed_date')
| import smtplib
from django.core.mail import send_mail
from django.contrib import admin
from django.shortcuts import HttpResponseRedirect
from markdownx.admin import MarkdownxModelAdmin
from NWU_ACM_MIS import settings
from member.models import Member
from plan.models import Plan, Announcement
mail_content_template = '''
我们发布了一个新的计划: {}
计划时间:
开始: {}
结束: {}
以下是详细信息:
{}
欢迎参加,
{}
'''
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
list_display = ('name', 'typ', 'clock_in', 'clock_out', 'has_sent')
ordering = ('-clock_in',)
add_fieldsets = (
(None, {'fields': (
'name',
'typ',
'detail',
'clock_in',
'clock_out',
)}),
)
fieldsets = add_fieldsets + (
('邮件通知', {'fields': (
'members',
'has_sent',
)}),
)
readonly_fields = ('has_sent',)
autocomplete_fields = ('members',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super().get_fieldsets(request, obj)
def response_change(self, request, obj:Plan):
if '_addallads' in request.POST:
obj.save()
obj.members.add(*Member.objects.filter(role=Member.Role.AD))
self.message_user(request, '添加了所有的现役队员')
return HttpResponseRedirect('.')
if '_addallnovices' in request.POST:
obj.save()
obj.members.add(*Member.objects.filter(role=Member.Role.NOVICE))
self.message_user(request, '添加了所有的萌新队员')
return HttpResponseRedirect('.')
if '_sendemail' in request.POST:
obj.has_sent = True
obj.save()
failed_members = []
mail_content = mail_content_template.format(
obj.name, obj.clock_in, obj.clock_out, obj.detail,
settings.PROJECT_VERBOSE_NAME
)
for member in obj.members.all():
try:
send_mail(
obj.name,
f'您好,{member.realname}' + mail_content,
settings.EMAIL_FROM,
recipient_list=[member.user.email, ],
fail_silently=False,
)
except smtplib.SMTPException:
failed_members.append(member)
message = '己发送邮件'
if failed_members:
message += f', 其中给{failed_members}发送时失败'
self.message_user(request, message)
return HttpResponseRedirect('.')
return super().response_change(request, obj)
@admin.register(Announcement)
class AnnouncementAdmin(MarkdownxModelAdmin):
list_display = ('title', 'created_date', 'changed_date')
ordering = ('-created_date',)
fields = ('title', 'content', 'created_date', 'changed_date')
readonly_fields = ('created_date', 'changed_date')
| zh | 0.997055 | 我们发布了一个新的计划: {} 计划时间: 开始: {} 结束: {} 以下是详细信息: {} 欢迎参加, {} | 1.965657 | 2 |
stippler.py | alexemm/tsp-art | 0 | 6613433 | <gh_stars>0
from argparse import ArgumentParser
from collections import OrderedDict
from os import makedirs
from os.path import exists
from typing import Optional
import numpy as np
from PIL import ImageDraw, ImageFont
from image_tools import image_to_array, load_image, array_to_image
def get_probability_matrix(arr, darkness: bool = False):
brightness_matr = arr if darkness else 255. - arr
return brightness_matr / brightness_matr.sum()
def determine_datapoint_from_int(i, prev_shape):
x = int(i % prev_shape[1])
y = int(i / prev_shape[1])
return x, y
def choose_k_points(prob_arr_2d, k, prev_shape):
chosen_data_values = np.random.choice(range(prob_arr_2d.size), size=k, p=prob_arr_2d.flatten(), replace=False)
chosen_data_points = np.array([determine_datapoint_from_int(i, prev_shape) for i in chosen_data_values])
return chosen_data_points
def create_dotted_array(chosen_points, shape, darkness: bool = False):
dotted_arr = np.zeros(shape) + 255 * (1 - darkness)
for point in chosen_points:
dotted_arr[point[1], point[0]] = 255 * darkness
return dotted_arr
def find_closest_point(nodes, node):
deltas = nodes - node
dist_2 = np.einsum('ij,ij->i', deltas, deltas)
return np.argmin(dist_2)
def tractor_beam(arr, k, iterations: Optional[int] = None, intermediate_steps: Optional[int] = None,
darkness: bool = False):
iterations = iterations or 10000
intermediate_result_steps = intermediate_steps or (iterations + 1)
prob_matr = get_probability_matrix(arr, darkness)
prev_shape = prob_matr.shape
prob_matr = prob_matr.flatten()
p = choose_k_points(prob_matr, k, prev_shape)
n = np.zeros(len(p))
intermediates = OrderedDict() if intermediate_steps is not None else None
for iteration in range(iterations):
# 1. Select tractor beam point
w = choose_k_points(prob_matr, 1, prev_shape)[0]
# 2. Find closest point to tractor beam point
i = find_closest_point(p, w)
# 3. Increment counter for point
n[i] += 1
# 4. Adjust point
p[i][0] = int(round(1. / (n[i] + 1.) * w[0] + n[i] / float(n[i] + 1.) * p[i][0]))
p[i][1] = int(round(1. / (n[i] + 1.) * w[1] + n[i] / float(n[i] + 1.) * p[i][1]))
# Print iterations
if iteration % 100 == 0 or iterations / 2 == iteration:
print(iteration)
# Store side steps for visualization later
if iteration % intermediate_result_steps == 0 and intermediate_steps is not None:
intermediates[iteration] = p.copy()
intermediates[iterations] = p
return p, intermediates
def write_tsp_file(output_file, nodes):
with open(output_file, 'w') as f:
name: str = output_file.split('/')[-1].split('.')[0]
lines = get_header(name, len(nodes))
lines += ["%i %i %i\n" % (i, point[0], point[1]) for i, point in enumerate(nodes)]
f.writelines(lines)
def get_header(name, dim):
lines = ["NAME: %s\n" % name, "TYPE: TSP\n", "DIMENSION: %i\n" % dim, "EDGE_WEIGHT_TYPE: EUC_2D\n",
"NODE_COORD_SECTION\n"]
return lines
def define_arguments() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument("input_file", help="File input")
parser.add_argument("k", help="Number of dots in image", type=int)
parser.add_argument("out_dir", help="Output directory")
parser.add_argument("-it", "--iterations", help="Number of iterations to clean image. Default is 10000", type=int)
parser.add_argument("-st", "--steps", help="Number of intermediate steps which will be printed in to an image",
type=int)
parser.add_argument("--darkness", help="Option for drawing white lines on black background", action="store_true")
# parser.add_argument("--seed", help="Random seed")
return parser
def add_iteration_text_to_corner(im, text, darkness: bool = False):
draw = ImageDraw.Draw(im)
font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 20)
draw.text((0, 0), text, 255 * darkness, font=font)
return im
def create_timelapse_gif(out_name, intermediate_steps, shape, darkness: bool = False):
im = array_to_image(create_dotted_array(intermediate_steps[0], shape, darkness))
im_arr = [
add_iteration_text_to_corner(array_to_image(create_dotted_array(st, shape, darkness)), "Iteration: " + str(it),
darkness) for it, st in intermediate_steps.items()]
im.save(out_name, save_all=True, append_images=im_arr, loop=0, duration=500)
def stippling(im_arr: np.ndarray, k: int, filename: str, out_dir: str, iterations: Optional[int], steps: Optional[int],
darkness: bool = False):
nodes, intermediate_nodes = tractor_beam(im_arr, k, iterations, steps, darkness)
output_directory: str = out_dir + filename.split(".")[0] + "_" + str(k) + '/' # for all output
if not exists(output_directory):
makedirs(output_directory)
# Save picture
array_to_image(create_dotted_array(nodes, im_arr.shape, darkness)).save(
output_directory + str(k) + "dotted_" + filename)
# Save tsp-file
write_tsp_file(output_directory + filename.split('.')[0] + '_' + str(k) + '.tsp', nodes)
# Show all the intermediate steps
if steps is not None:
create_timelapse_gif(output_directory + "dotted_" + filename.split('.')[0] + '.gif', intermediate_nodes,
im_arr.shape, darkness)
return nodes
def main(parsed_arguments):
args = parsed_arguments
filename = args.input_file.split('/')[-1]
im_arr = image_to_array(load_image(args.input_file))
return stippling(im_arr, args.k, filename, args.out_dir, args.iterations, args.steps, args.darkness)
if __name__ == '__main__':
main(define_arguments().parse_args())
| from argparse import ArgumentParser
from collections import OrderedDict
from os import makedirs
from os.path import exists
from typing import Optional
import numpy as np
from PIL import ImageDraw, ImageFont
from image_tools import image_to_array, load_image, array_to_image
def get_probability_matrix(arr, darkness: bool = False):
brightness_matr = arr if darkness else 255. - arr
return brightness_matr / brightness_matr.sum()
def determine_datapoint_from_int(i, prev_shape):
x = int(i % prev_shape[1])
y = int(i / prev_shape[1])
return x, y
def choose_k_points(prob_arr_2d, k, prev_shape):
chosen_data_values = np.random.choice(range(prob_arr_2d.size), size=k, p=prob_arr_2d.flatten(), replace=False)
chosen_data_points = np.array([determine_datapoint_from_int(i, prev_shape) for i in chosen_data_values])
return chosen_data_points
def create_dotted_array(chosen_points, shape, darkness: bool = False):
dotted_arr = np.zeros(shape) + 255 * (1 - darkness)
for point in chosen_points:
dotted_arr[point[1], point[0]] = 255 * darkness
return dotted_arr
def find_closest_point(nodes, node):
deltas = nodes - node
dist_2 = np.einsum('ij,ij->i', deltas, deltas)
return np.argmin(dist_2)
def tractor_beam(arr, k, iterations: Optional[int] = None, intermediate_steps: Optional[int] = None,
darkness: bool = False):
iterations = iterations or 10000
intermediate_result_steps = intermediate_steps or (iterations + 1)
prob_matr = get_probability_matrix(arr, darkness)
prev_shape = prob_matr.shape
prob_matr = prob_matr.flatten()
p = choose_k_points(prob_matr, k, prev_shape)
n = np.zeros(len(p))
intermediates = OrderedDict() if intermediate_steps is not None else None
for iteration in range(iterations):
# 1. Select tractor beam point
w = choose_k_points(prob_matr, 1, prev_shape)[0]
# 2. Find closest point to tractor beam point
i = find_closest_point(p, w)
# 3. Increment counter for point
n[i] += 1
# 4. Adjust point
p[i][0] = int(round(1. / (n[i] + 1.) * w[0] + n[i] / float(n[i] + 1.) * p[i][0]))
p[i][1] = int(round(1. / (n[i] + 1.) * w[1] + n[i] / float(n[i] + 1.) * p[i][1]))
# Print iterations
if iteration % 100 == 0 or iterations / 2 == iteration:
print(iteration)
# Store side steps for visualization later
if iteration % intermediate_result_steps == 0 and intermediate_steps is not None:
intermediates[iteration] = p.copy()
intermediates[iterations] = p
return p, intermediates
def write_tsp_file(output_file, nodes):
with open(output_file, 'w') as f:
name: str = output_file.split('/')[-1].split('.')[0]
lines = get_header(name, len(nodes))
lines += ["%i %i %i\n" % (i, point[0], point[1]) for i, point in enumerate(nodes)]
f.writelines(lines)
def get_header(name, dim):
lines = ["NAME: %s\n" % name, "TYPE: TSP\n", "DIMENSION: %i\n" % dim, "EDGE_WEIGHT_TYPE: EUC_2D\n",
"NODE_COORD_SECTION\n"]
return lines
def define_arguments() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument("input_file", help="File input")
parser.add_argument("k", help="Number of dots in image", type=int)
parser.add_argument("out_dir", help="Output directory")
parser.add_argument("-it", "--iterations", help="Number of iterations to clean image. Default is 10000", type=int)
parser.add_argument("-st", "--steps", help="Number of intermediate steps which will be printed in to an image",
type=int)
parser.add_argument("--darkness", help="Option for drawing white lines on black background", action="store_true")
# parser.add_argument("--seed", help="Random seed")
return parser
def add_iteration_text_to_corner(im, text, darkness: bool = False):
draw = ImageDraw.Draw(im)
font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 20)
draw.text((0, 0), text, 255 * darkness, font=font)
return im
def create_timelapse_gif(out_name, intermediate_steps, shape, darkness: bool = False):
im = array_to_image(create_dotted_array(intermediate_steps[0], shape, darkness))
im_arr = [
add_iteration_text_to_corner(array_to_image(create_dotted_array(st, shape, darkness)), "Iteration: " + str(it),
darkness) for it, st in intermediate_steps.items()]
im.save(out_name, save_all=True, append_images=im_arr, loop=0, duration=500)
def stippling(im_arr: np.ndarray, k: int, filename: str, out_dir: str, iterations: Optional[int], steps: Optional[int],
darkness: bool = False):
nodes, intermediate_nodes = tractor_beam(im_arr, k, iterations, steps, darkness)
output_directory: str = out_dir + filename.split(".")[0] + "_" + str(k) + '/' # for all output
if not exists(output_directory):
makedirs(output_directory)
# Save picture
array_to_image(create_dotted_array(nodes, im_arr.shape, darkness)).save(
output_directory + str(k) + "dotted_" + filename)
# Save tsp-file
write_tsp_file(output_directory + filename.split('.')[0] + '_' + str(k) + '.tsp', nodes)
# Show all the intermediate steps
if steps is not None:
create_timelapse_gif(output_directory + "dotted_" + filename.split('.')[0] + '.gif', intermediate_nodes,
im_arr.shape, darkness)
return nodes
def main(parsed_arguments):
args = parsed_arguments
filename = args.input_file.split('/')[-1]
im_arr = image_to_array(load_image(args.input_file))
return stippling(im_arr, args.k, filename, args.out_dir, args.iterations, args.steps, args.darkness)
if __name__ == '__main__':
main(define_arguments().parse_args()) | en | 0.571202 | # 1. Select tractor beam point # 2. Find closest point to tractor beam point # 3. Increment counter for point # 4. Adjust point # Print iterations # Store side steps for visualization later # parser.add_argument("--seed", help="Random seed") # for all output # Save picture # Save tsp-file # Show all the intermediate steps | 2.260006 | 2 |
medusa/storage/azure_blobs_storage/azcli.py | elsmorian/cassandra-medusa | 187 | 6613434 | import io
import json
import logging
import os
import subprocess
import sys
import uuid
from retrying import retry
from medusa.storage.abstract_storage import AbstractStorage
class AzCli(object):
def __init__(self, storage):
self._config = storage.config
self.storage = storage
@property
def bucket_name(self):
return self._config.bucket_name
def __enter__(self):
with io.open(os.path.expanduser(self._config.key_file), 'r', encoding='utf-8') as json_fi:
credentials = json.load(json_fi)
if 'connection_string' in credentials:
self._env = dict(
os.environ,
AZURE_STORAGE_CONNECTION_STRING=credentials['connection_string']
)
else:
self._env = dict(
os.environ,
AZURE_STORAGE_ACCOUNT=credentials['storage_account'],
AZURE_STORAGE_KEY=credentials['key']
)
self._az_cli_path = self.find_az_cli()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._env = dict(os.environ)
return False
@staticmethod
def find_az_cli():
"""
Construct the AZ command line with parameters and variables
Also includes a lookup for the AZ binary, in case we are running
under a venv
"""
az_bin = 'az'
binary_paths = ['/usr/bin', '/usr/local/bin']
paths = sys.path + binary_paths
for path in paths:
if not path:
continue
tpath = '/'.join([path, 'az'])
if os.path.exists(tpath) and os.path.isfile(tpath) and os.access(tpath, os.X_OK):
az_bin = tpath
break
return az_bin
def cp_upload(self, *, srcs, bucket_name, dest, max_retries=5):
job_id = str(uuid.uuid4())
azcli_output = "/tmp/azcli_{0}.output".format(job_id)
objects = []
# Az cli expects the client to provide the MD5 hash of the upload
for src in srcs:
cmd = [self._az_cli_path, "storage", "blob", "upload", "-f", str(src), "-c", bucket_name, "-n", dest,
"--content-md5", AbstractStorage.generate_md5_hash(src)]
objects.append(self.upload_file(cmd, dest, azcli_output))
return objects
def cp_download(self, *, src, bucket_name, dest, max_retries=5):
job_id = str(uuid.uuid4())
azcli_output = "/tmp/azcli_{0}.output".format(job_id)
objects = []
dest_path = os.path.join(str(dest), str(src).split("/")[-1])
cmd = [self._az_cli_path, "storage", "blob", "download", "-f", dest_path, "-c", bucket_name, "-n", str(src)]
self.download_file(cmd, dest, azcli_output)
return objects
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def upload_file(self, cmd, dest, azcli_output):
logging.debug(" ".join(cmd))
with open(azcli_output, "w") as output:
process = subprocess.Popen(
cmd,
env=self._env,
bufsize=0,
stdout=output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
if process.wait() == 0:
obj = self.get_blob(dest)
os.remove(azcli_output)
return obj
raise IOError(
"az cli cp failed. Max attempts exceeded. Check {} for more informations.".format(
azcli_output
)
)
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def download_file(self, cmd, dest, azcli_output):
logging.debug(" ".join(cmd))
with open(azcli_output, "w") as output:
process = subprocess.Popen(
cmd,
env=self._env,
bufsize=0,
stdout=output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
if process.wait() == 0:
os.remove(azcli_output)
return
raise IOError(
"az cli cp failed. Max attempts exceeded. Check {} for more informations.".format(
azcli_output
)
)
@retry(stop_max_attempt_number=10, wait_fixed=1000)
def get_blob(self, blob_name):
# This needs to be retried as AZ is eventually consistent
obj = self.storage.get_blob(blob_name)
if obj is None:
raise IOError("Failed to find uploaded object {} in Azure".format(blob_name))
return obj
| import io
import json
import logging
import os
import subprocess
import sys
import uuid
from retrying import retry
from medusa.storage.abstract_storage import AbstractStorage
class AzCli(object):
def __init__(self, storage):
self._config = storage.config
self.storage = storage
@property
def bucket_name(self):
return self._config.bucket_name
def __enter__(self):
with io.open(os.path.expanduser(self._config.key_file), 'r', encoding='utf-8') as json_fi:
credentials = json.load(json_fi)
if 'connection_string' in credentials:
self._env = dict(
os.environ,
AZURE_STORAGE_CONNECTION_STRING=credentials['connection_string']
)
else:
self._env = dict(
os.environ,
AZURE_STORAGE_ACCOUNT=credentials['storage_account'],
AZURE_STORAGE_KEY=credentials['key']
)
self._az_cli_path = self.find_az_cli()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._env = dict(os.environ)
return False
@staticmethod
def find_az_cli():
"""
Construct the AZ command line with parameters and variables
Also includes a lookup for the AZ binary, in case we are running
under a venv
"""
az_bin = 'az'
binary_paths = ['/usr/bin', '/usr/local/bin']
paths = sys.path + binary_paths
for path in paths:
if not path:
continue
tpath = '/'.join([path, 'az'])
if os.path.exists(tpath) and os.path.isfile(tpath) and os.access(tpath, os.X_OK):
az_bin = tpath
break
return az_bin
def cp_upload(self, *, srcs, bucket_name, dest, max_retries=5):
job_id = str(uuid.uuid4())
azcli_output = "/tmp/azcli_{0}.output".format(job_id)
objects = []
# Az cli expects the client to provide the MD5 hash of the upload
for src in srcs:
cmd = [self._az_cli_path, "storage", "blob", "upload", "-f", str(src), "-c", bucket_name, "-n", dest,
"--content-md5", AbstractStorage.generate_md5_hash(src)]
objects.append(self.upload_file(cmd, dest, azcli_output))
return objects
def cp_download(self, *, src, bucket_name, dest, max_retries=5):
job_id = str(uuid.uuid4())
azcli_output = "/tmp/azcli_{0}.output".format(job_id)
objects = []
dest_path = os.path.join(str(dest), str(src).split("/")[-1])
cmd = [self._az_cli_path, "storage", "blob", "download", "-f", dest_path, "-c", bucket_name, "-n", str(src)]
self.download_file(cmd, dest, azcli_output)
return objects
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def upload_file(self, cmd, dest, azcli_output):
logging.debug(" ".join(cmd))
with open(azcli_output, "w") as output:
process = subprocess.Popen(
cmd,
env=self._env,
bufsize=0,
stdout=output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
if process.wait() == 0:
obj = self.get_blob(dest)
os.remove(azcli_output)
return obj
raise IOError(
"az cli cp failed. Max attempts exceeded. Check {} for more informations.".format(
azcli_output
)
)
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def download_file(self, cmd, dest, azcli_output):
logging.debug(" ".join(cmd))
with open(azcli_output, "w") as output:
process = subprocess.Popen(
cmd,
env=self._env,
bufsize=0,
stdout=output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
if process.wait() == 0:
os.remove(azcli_output)
return
raise IOError(
"az cli cp failed. Max attempts exceeded. Check {} for more informations.".format(
azcli_output
)
)
@retry(stop_max_attempt_number=10, wait_fixed=1000)
def get_blob(self, blob_name):
# This needs to be retried as AZ is eventually consistent
obj = self.storage.get_blob(blob_name)
if obj is None:
raise IOError("Failed to find uploaded object {} in Azure".format(blob_name))
return obj
| en | 0.926068 | Construct the AZ command line with parameters and variables Also includes a lookup for the AZ binary, in case we are running under a venv # Az cli expects the client to provide the MD5 hash of the upload # This needs to be retried as AZ is eventually consistent | 1.869282 | 2 |
{{ cookiecutter.repo_name }}/main.py | boukepostma/cookiecutter-data-science | 2 | 6613435 | <filename>{{ cookiecutter.repo_name }}/main.py
"""
Main script of the project to run the entire pipeline. Example of docstring in Google style.
"""
def main(arg: bool = True) -> bool:
"""Summary line.
Extended description of function.
Args:
arg (bool): Description of arg
Returns:
bool: Description of return value
"""
return arg
if __name__ == "__main__":
main()
| <filename>{{ cookiecutter.repo_name }}/main.py
"""
Main script of the project to run the entire pipeline. Example of docstring in Google style.
"""
def main(arg: bool = True) -> bool:
"""Summary line.
Extended description of function.
Args:
arg (bool): Description of arg
Returns:
bool: Description of return value
"""
return arg
if __name__ == "__main__":
main()
| en | 0.705676 | Main script of the project to run the entire pipeline. Example of docstring in Google style. Summary line. Extended description of function. Args: arg (bool): Description of arg Returns: bool: Description of return value | 1.633174 | 2 |
fullcyclepy/hw/wincam.py | dfoderick/fullcyclemining | 26 | 6613436 | <gh_stars>10-100
'''reads the camera'''
#image gets written in parent (startup directory!)
import cv2
CAM = cv2.VideoCapture(0)
cv2.namedWindow("test")
COUNTER = 0
while True:
RET, FRAME = CAM.read()
cv2.imshow("test", FRAME)
if not RET:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
IMAGENAME = "fullcycle_{}.png".format(COUNTER)
cv2.imwrite(IMAGENAME, FRAME)
print("{} written!".format(IMAGENAME))
COUNTER += 1
CAM.release()
cv2.destroyAllWindows()
| '''reads the camera'''
#image gets written in parent (startup directory!)
import cv2
CAM = cv2.VideoCapture(0)
cv2.namedWindow("test")
COUNTER = 0
while True:
RET, FRAME = CAM.read()
cv2.imshow("test", FRAME)
if not RET:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
IMAGENAME = "fullcycle_{}.png".format(COUNTER)
cv2.imwrite(IMAGENAME, FRAME)
print("{} written!".format(IMAGENAME))
COUNTER += 1
CAM.release()
cv2.destroyAllWindows() | en | 0.877288 | reads the camera #image gets written in parent (startup directory!) # ESC pressed # SPACE pressed | 3.009754 | 3 |
mccli/mccli.py | giffels/mccli | 4 | 6613437 | <reponame>giffels/mccli
#!/usr/bin/env python3
import click
from .ssh_wrapper import ssh_wrap, scp_wrap
from .init_utils import valid_mc_url, init_endpoint, init_token, init_user, init_cache, augmented_scp_command
from .scp_utils import parse_scp_args
from .click_utils import SshUsageCommand, ScpUsageCommand, tuple_to_list, basic_options, extended_options
from .info_utils import get_all_info
from .logging import logger
@click.group(invoke_without_command=False, add_help_option=False)
@basic_options
def cli(**kwargs):
"""
SSH client wrapper with OIDC-based authentication
"""
pass
@cli.command(name="info", add_help_option=False, short_help="get info about service")
@basic_options
@click.argument("hostname", required=False)
def info(mc_endpoint, verify, no_cache, token, oa_account, iss, dry_run, hostname):
"""Shows various information about the user and the service:
If an Access Token is provided, show information in the token,
as well as info about the user retrieved from the token issuer.
If HOSTNAME is provided, show information about SSH service
running on HOSTNAME: supported OIDC providers, service
description and login info.
If both token and HOSTNAME are provided, also show authorisation
information if issuer of token is supported on the service, as well
as the status of the local account on service.
"""
try:
if not no_cache:
init_cache()
if hostname is None:
raise Exception("No HOSTNAME provided.")
if mc_endpoint:
mc_url = valid_mc_url(mc_endpoint, verify)
else:
mc_url = init_endpoint([hostname], verify)
except Exception as e:
mc_url = None
logger.warning(e)
logger.warning("Cannot show service-related information.")
try:
at, _ = init_token(token, oa_account, iss, mc_url, verify, validate_length=False)
except Exception as e:
at = None
logger.warning(e)
logger.warning("Cannot show token information")
info_string = get_all_info(mc_url, at, verify)
if info_string:
click.echo(info_string)
else:
logger.error("No information available: please provide a hostname and/or an Access Token.\n" + \
"Try 'mccli info --help' for usage information.")
@cli.command(name="ssh", short_help="remote login client",
cls=SshUsageCommand, context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True
})
@extended_options
@click.argument("ssh_command", nargs=-1, required=True, type=click.UNPROCESSED, callback=tuple_to_list)
def ssh(mc_endpoint, verify, no_cache, token, oa_account, iss, dry_run, ssh_command):
"""Connects and logs into HOSTNAME via SSH by using the provided OIDC
Access Token to authenticate.
If a COMMAND is specified, it is executed on the remote host instead
of a login shell.
The remote user must not be specified, since it will be obtained from
the motley_cue service. Any specified username will be ignored.
When no Access Token source is specified, the service on the remote host
is queried for supported issuers; if only one issuer is supported,
this is used to retrieve the token from the oidc-agent.
"""
try:
if not no_cache:
init_cache()
if mc_endpoint:
mc_url = valid_mc_url(mc_endpoint, verify)
else:
mc_url = init_endpoint(ssh_command, verify)
at, str_get_at = init_token(token, oa_account, iss, mc_url, verify)
username = init_user(mc_url, at, verify)
ssh_wrap(ssh_command, username, at,
str_get_token=str_get_at, dry_run=dry_run)
except Exception as e:
logger.error(e)
@cli.command(name="scp", short_help="secure file copy",
cls=ScpUsageCommand, context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True
})
@extended_options
@click.argument("scp_command", nargs=-1, required=True, type=click.UNPROCESSED, callback=tuple_to_list)
def scp(mc_endpoint, verify, no_cache, token, oa_account, iss, dry_run, scp_command):
"""Copies files between hosts on a network over SSH using the provided
OIDC Access Token to authenticate.
The SOURCE and TARGET may be specified as a local pathname, a remote host
with optional path in the form [user@]host:[path], or a URI in the form
scp://[user@]host[:port][/path]
The remote user should not be specified, since it will be obtained from
the motley_cue service. If you specify a username for a host, then it
will be used; it will be assumed that this specific host does not use
motley_cue, and by extension, token authentication; you will have to handle
authentication for this host on your own.
When no Access Token source is specified, the remote host is queried for
supported issuers; if only one issuer is supported, this is used to
retrieve the token from the oidc-agent.
"""
try:
if not no_cache:
init_cache()
if mc_endpoint:
mc_url = valid_mc_url(mc_endpoint, verify)
at, str_get_at = init_token(token, oa_account, iss, mc_url, verify)
username = init_user(mc_url, at, verify)
scp_wrap(scp_command, username=username, tokens=at,
str_get_tokens=str_get_at, dry_run=dry_run)
else:
scp_args = parse_scp_args(scp_command)
if scp_args.no_mc():
logger.warning("No motley_cue handling will be done. "
"Either all specified paths are local, "
"or users are specified for remotes.")
scp_wrap(scp_command, dry_run=dry_run)
elif scp_args.single_mc():
logger.info("Only one host with motley_cue detected. Easy.")
mc_url = init_endpoint([scp_args.mc_host], verify)
at, str_get_at = init_token(token, oa_account, iss, mc_url, verify)
username = init_user(mc_url, at, verify)
scp_wrap(scp_command, username=username, tokens=at,
str_get_tokens=str_get_at,
num_prompts=scp_args.num_prompts, dry_run=dry_run)
elif scp_args.multiple_mc():
logger.info("Multiple hosts with motley_cue detected, "
"your commandline will be augmented with usernames. ")
new_scp_command, tokens, str_get_tokens = \
augmented_scp_command(scp_args, token, oa_account, iss, verify)
scp_wrap(new_scp_command, tokens=tokens,
str_get_tokens=str_get_tokens, dry_run=dry_run)
else:
raise Exception("Something went wrong when trying to find out "
"which paths are remote and which are local.")
except Exception as e:
logger.error(e)
@cli.command(name="sftp", short_help="secure file transfer")
def sftp():
"""
--- Not implemented ---
"""
logger.error("Not implemented.")
if __name__ == '__main__':
cli()
| #!/usr/bin/env python3
import click
from .ssh_wrapper import ssh_wrap, scp_wrap
from .init_utils import valid_mc_url, init_endpoint, init_token, init_user, init_cache, augmented_scp_command
from .scp_utils import parse_scp_args
from .click_utils import SshUsageCommand, ScpUsageCommand, tuple_to_list, basic_options, extended_options
from .info_utils import get_all_info
from .logging import logger
@click.group(invoke_without_command=False, add_help_option=False)
@basic_options
def cli(**kwargs):
"""
SSH client wrapper with OIDC-based authentication
"""
pass
@cli.command(name="info", add_help_option=False, short_help="get info about service")
@basic_options
@click.argument("hostname", required=False)
def info(mc_endpoint, verify, no_cache, token, oa_account, iss, dry_run, hostname):
"""Shows various information about the user and the service:
If an Access Token is provided, show information in the token,
as well as info about the user retrieved from the token issuer.
If HOSTNAME is provided, show information about SSH service
running on HOSTNAME: supported OIDC providers, service
description and login info.
If both token and HOSTNAME are provided, also show authorisation
information if issuer of token is supported on the service, as well
as the status of the local account on service.
"""
try:
if not no_cache:
init_cache()
if hostname is None:
raise Exception("No HOSTNAME provided.")
if mc_endpoint:
mc_url = valid_mc_url(mc_endpoint, verify)
else:
mc_url = init_endpoint([hostname], verify)
except Exception as e:
mc_url = None
logger.warning(e)
logger.warning("Cannot show service-related information.")
try:
at, _ = init_token(token, oa_account, iss, mc_url, verify, validate_length=False)
except Exception as e:
at = None
logger.warning(e)
logger.warning("Cannot show token information")
info_string = get_all_info(mc_url, at, verify)
if info_string:
click.echo(info_string)
else:
logger.error("No information available: please provide a hostname and/or an Access Token.\n" + \
"Try 'mccli info --help' for usage information.")
@cli.command(name="ssh", short_help="remote login client",
cls=SshUsageCommand, context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True
})
@extended_options
@click.argument("ssh_command", nargs=-1, required=True, type=click.UNPROCESSED, callback=tuple_to_list)
def ssh(mc_endpoint, verify, no_cache, token, oa_account, iss, dry_run, ssh_command):
"""Connects and logs into HOSTNAME via SSH by using the provided OIDC
Access Token to authenticate.
If a COMMAND is specified, it is executed on the remote host instead
of a login shell.
The remote user must not be specified, since it will be obtained from
the motley_cue service. Any specified username will be ignored.
When no Access Token source is specified, the service on the remote host
is queried for supported issuers; if only one issuer is supported,
this is used to retrieve the token from the oidc-agent.
"""
try:
if not no_cache:
init_cache()
if mc_endpoint:
mc_url = valid_mc_url(mc_endpoint, verify)
else:
mc_url = init_endpoint(ssh_command, verify)
at, str_get_at = init_token(token, oa_account, iss, mc_url, verify)
username = init_user(mc_url, at, verify)
ssh_wrap(ssh_command, username, at,
str_get_token=str_get_at, dry_run=dry_run)
except Exception as e:
logger.error(e)
@cli.command(name="scp", short_help="secure file copy",
cls=ScpUsageCommand, context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True
})
@extended_options
@click.argument("scp_command", nargs=-1, required=True, type=click.UNPROCESSED, callback=tuple_to_list)
def scp(mc_endpoint, verify, no_cache, token, oa_account, iss, dry_run, scp_command):
"""Copies files between hosts on a network over SSH using the provided
OIDC Access Token to authenticate.
The SOURCE and TARGET may be specified as a local pathname, a remote host
with optional path in the form [user@]host:[path], or a URI in the form
scp://[user@]host[:port][/path]
The remote user should not be specified, since it will be obtained from
the motley_cue service. If you specify a username for a host, then it
will be used; it will be assumed that this specific host does not use
motley_cue, and by extension, token authentication; you will have to handle
authentication for this host on your own.
When no Access Token source is specified, the remote host is queried for
supported issuers; if only one issuer is supported, this is used to
retrieve the token from the oidc-agent.
"""
try:
if not no_cache:
init_cache()
if mc_endpoint:
mc_url = valid_mc_url(mc_endpoint, verify)
at, str_get_at = init_token(token, oa_account, iss, mc_url, verify)
username = init_user(mc_url, at, verify)
scp_wrap(scp_command, username=username, tokens=at,
str_get_tokens=str_get_at, dry_run=dry_run)
else:
scp_args = parse_scp_args(scp_command)
if scp_args.no_mc():
logger.warning("No motley_cue handling will be done. "
"Either all specified paths are local, "
"or users are specified for remotes.")
scp_wrap(scp_command, dry_run=dry_run)
elif scp_args.single_mc():
logger.info("Only one host with motley_cue detected. Easy.")
mc_url = init_endpoint([scp_args.mc_host], verify)
at, str_get_at = init_token(token, oa_account, iss, mc_url, verify)
username = init_user(mc_url, at, verify)
scp_wrap(scp_command, username=username, tokens=at,
str_get_tokens=str_get_at,
num_prompts=scp_args.num_prompts, dry_run=dry_run)
elif scp_args.multiple_mc():
logger.info("Multiple hosts with motley_cue detected, "
"your commandline will be augmented with usernames. ")
new_scp_command, tokens, str_get_tokens = \
augmented_scp_command(scp_args, token, oa_account, iss, verify)
scp_wrap(new_scp_command, tokens=tokens,
str_get_tokens=str_get_tokens, dry_run=dry_run)
else:
raise Exception("Something went wrong when trying to find out "
"which paths are remote and which are local.")
except Exception as e:
logger.error(e)
@cli.command(name="sftp", short_help="secure file transfer")
def sftp():
"""
--- Not implemented ---
"""
logger.error("Not implemented.")
if __name__ == '__main__':
cli() | en | 0.87078 | #!/usr/bin/env python3 SSH client wrapper with OIDC-based authentication Shows various information about the user and the service: If an Access Token is provided, show information in the token, as well as info about the user retrieved from the token issuer. If HOSTNAME is provided, show information about SSH service running on HOSTNAME: supported OIDC providers, service description and login info. If both token and HOSTNAME are provided, also show authorisation information if issuer of token is supported on the service, as well as the status of the local account on service. Connects and logs into HOSTNAME via SSH by using the provided OIDC Access Token to authenticate. If a COMMAND is specified, it is executed on the remote host instead of a login shell. The remote user must not be specified, since it will be obtained from the motley_cue service. Any specified username will be ignored. When no Access Token source is specified, the service on the remote host is queried for supported issuers; if only one issuer is supported, this is used to retrieve the token from the oidc-agent. Copies files between hosts on a network over SSH using the provided OIDC Access Token to authenticate. The SOURCE and TARGET may be specified as a local pathname, a remote host with optional path in the form [user@]host:[path], or a URI in the form scp://[user@]host[:port][/path] The remote user should not be specified, since it will be obtained from the motley_cue service. If you specify a username for a host, then it will be used; it will be assumed that this specific host does not use motley_cue, and by extension, token authentication; you will have to handle authentication for this host on your own. When no Access Token source is specified, the remote host is queried for supported issuers; if only one issuer is supported, this is used to retrieve the token from the oidc-agent. --- Not implemented --- | 2.29379 | 2 |
Chapter_10/ch10_ex2a.py | pauldevos/Mastering-Object-Oriented-Python-Second-Edition | 108 | 6613438 | <reponame>pauldevos/Mastering-Object-Oriented-Python-Second-Edition
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 10. Example 2. YAML (part a)
"""
# Persistence Classes
# ========================================
# A detail class for micro-blog posts
import datetime
from typing import List, Optional, Dict, Any
from dataclasses import dataclass
from pathlib import Path
from Chapter_10.ch10_ex1 import Post, Blog, travel, rst_render
from Chapter_10.ch10_ex2 import Suit, Card, FaceCard, AceCard
# YAML
# ===================
import yaml
# Example 1: That's it.
# ######################
# Start with original definitions
test_yaml = """
>>> text = yaml.dump(travel)
>>> print(text)
!!python/object:Chapter_10.ch10_ex1.Blog
entries:
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-14 17:25:00
rst_text: "Some embarrassing revelation. Including \\u2639 and \\u2693\\uFE0E"
tags:
- '#RedRanger'
- '#Whitby42'
- '#ICW'
title: Hard Aground
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-18 15:30:00
rst_text: Some witty epigram. Including < & > characters.
tags:
- '#RedRanger'
- '#Whitby42'
- '#Mistakes'
title: Anchor Follies
title: Travel
<BLANKLINE>
>>> copy = yaml.load(text)
>>> print(type(copy), copy.title)
<class 'Chapter_10.ch10_ex1.Blog'> Travel
>>> for p in copy.entries:
... print(p.date.year, p.date.month, p.date.day, p.title, p.tags)
2013 11 14 Hard Aground ['#RedRanger', '#Whitby42', '#ICW']
2013 11 18 Anchor Follies ['#RedRanger', '#Whitby42', '#Mistakes']
>>> text2 = yaml.dump(travel, allow_unicode=True)
>>> print(text2)
!!python/object:Chapter_10.ch10_ex1.Blog
entries:
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-14 17:25:00
rst_text: Some embarrassing revelation. Including ☹ and ⚓︎
tags:
- '#RedRanger'
- '#Whitby42'
- '#ICW'
title: Hard Aground
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-18 15:30:00
rst_text: Some witty epigram. Including < & > characters.
tags:
- '#RedRanger'
- '#Whitby42'
- '#Mistakes'
title: Anchor Follies
title: Travel
<BLANKLINE>
"""
with (Path.cwd()/"data"/"ch10.yaml").open("w", encoding="UTF-8") as target:
yaml.dump(travel, target)
# Example 2: Cards
# ###################
deck = [AceCard("A", Suit.Clubs), Card("2", Suit.Hearts), FaceCard("K", Suit.Diamonds)]
test_yaml_dump = """
>>> text = yaml.dump(deck, allow_unicode=True)
>>> print(text)
- !!python/object:Chapter_10.ch10_ex2.AceCard
hard: 1
rank: A
soft: 11
suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit
- ♣
- !!python/object:Chapter_10.ch10_ex2.Card
hard: 2
rank: '2'
soft: 2
suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit
- ♥
- !!python/object:Chapter_10.ch10_ex2.FaceCard
hard: 10
rank: K
soft: 10
suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit
- ♦
<BLANKLINE>
"""
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
| #!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 10. Example 2. YAML (part a)
"""
# Persistence Classes
# ========================================
# A detail class for micro-blog posts
import datetime
from typing import List, Optional, Dict, Any
from dataclasses import dataclass
from pathlib import Path
from Chapter_10.ch10_ex1 import Post, Blog, travel, rst_render
from Chapter_10.ch10_ex2 import Suit, Card, FaceCard, AceCard
# YAML
# ===================
import yaml
# Example 1: That's it.
# ######################
# Start with original definitions
test_yaml = """
>>> text = yaml.dump(travel)
>>> print(text)
!!python/object:Chapter_10.ch10_ex1.Blog
entries:
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-14 17:25:00
rst_text: "Some embarrassing revelation. Including \\u2639 and \\u2693\\uFE0E"
tags:
- '#RedRanger'
- '#Whitby42'
- '#ICW'
title: Hard Aground
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-18 15:30:00
rst_text: Some witty epigram. Including < & > characters.
tags:
- '#RedRanger'
- '#Whitby42'
- '#Mistakes'
title: Anchor Follies
title: Travel
<BLANKLINE>
>>> copy = yaml.load(text)
>>> print(type(copy), copy.title)
<class 'Chapter_10.ch10_ex1.Blog'> Travel
>>> for p in copy.entries:
... print(p.date.year, p.date.month, p.date.day, p.title, p.tags)
2013 11 14 Hard Aground ['#RedRanger', '#Whitby42', '#ICW']
2013 11 18 Anchor Follies ['#RedRanger', '#Whitby42', '#Mistakes']
>>> text2 = yaml.dump(travel, allow_unicode=True)
>>> print(text2)
!!python/object:Chapter_10.ch10_ex1.Blog
entries:
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-14 17:25:00
rst_text: Some embarrassing revelation. Including ☹ and ⚓︎
tags:
- '#RedRanger'
- '#Whitby42'
- '#ICW'
title: Hard Aground
- !!python/object:Chapter_10.ch10_ex1.Post
date: 2013-11-18 15:30:00
rst_text: Some witty epigram. Including < & > characters.
tags:
- '#RedRanger'
- '#Whitby42'
- '#Mistakes'
title: Anchor Follies
title: Travel
<BLANKLINE>
"""
with (Path.cwd()/"data"/"ch10.yaml").open("w", encoding="UTF-8") as target:
yaml.dump(travel, target)
# Example 2: Cards
# ###################
deck = [AceCard("A", Suit.Clubs), Card("2", Suit.Hearts), FaceCard("K", Suit.Diamonds)]
test_yaml_dump = """
>>> text = yaml.dump(deck, allow_unicode=True)
>>> print(text)
- !!python/object:Chapter_10.ch10_ex2.AceCard
hard: 1
rank: A
soft: 11
suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit
- ♣
- !!python/object:Chapter_10.ch10_ex2.Card
hard: 2
rank: '2'
soft: 2
suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit
- ♥
- !!python/object:Chapter_10.ch10_ex2.FaceCard
hard: 10
rank: K
soft: 10
suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit
- ♦
<BLANKLINE>
"""
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False) | en | 0.519426 | #!/usr/bin/env python3.7 Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter 10. Example 2. YAML (part a) # Persistence Classes # ======================================== # A detail class for micro-blog posts # YAML # =================== # Example 1: That's it. # ###################### # Start with original definitions >>> text = yaml.dump(travel) >>> print(text) !!python/object:Chapter_10.ch10_ex1.Blog entries: - !!python/object:Chapter_10.ch10_ex1.Post date: 2013-11-14 17:25:00 rst_text: "Some embarrassing revelation. Including \\u2639 and \\u2693\\uFE0E" tags: - '#RedRanger' - '#Whitby42' - '#ICW' title: Hard Aground - !!python/object:Chapter_10.ch10_ex1.Post date: 2013-11-18 15:30:00 rst_text: Some witty epigram. Including < & > characters. tags: - '#RedRanger' - '#Whitby42' - '#Mistakes' title: Anchor Follies title: Travel <BLANKLINE> >>> copy = yaml.load(text) >>> print(type(copy), copy.title) <class 'Chapter_10.ch10_ex1.Blog'> Travel >>> for p in copy.entries: ... print(p.date.year, p.date.month, p.date.day, p.title, p.tags) 2013 11 14 Hard Aground ['#RedRanger', '#Whitby42', '#ICW'] 2013 11 18 Anchor Follies ['#RedRanger', '#Whitby42', '#Mistakes'] >>> text2 = yaml.dump(travel, allow_unicode=True) >>> print(text2) !!python/object:Chapter_10.ch10_ex1.Blog entries: - !!python/object:Chapter_10.ch10_ex1.Post date: 2013-11-14 17:25:00 rst_text: Some embarrassing revelation. Including ☹ and ⚓︎ tags: - '#RedRanger' - '#Whitby42' - '#ICW' title: Hard Aground - !!python/object:Chapter_10.ch10_ex1.Post date: 2013-11-18 15:30:00 rst_text: Some witty epigram. Including < & > characters. tags: - '#RedRanger' - '#Whitby42' - '#Mistakes' title: Anchor Follies title: Travel <BLANKLINE> # Example 2: Cards # ################### >>> text = yaml.dump(deck, allow_unicode=True) >>> print(text) - !!python/object:Chapter_10.ch10_ex2.AceCard hard: 1 rank: A soft: 11 suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit - ♣ - !!python/object:Chapter_10.ch10_ex2.Card hard: 2 rank: '2' soft: 2 suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit - ♥ - !!python/object:Chapter_10.ch10_ex2.FaceCard hard: 10 rank: K soft: 10 suit: !!python/object/apply:Chapter_10.ch10_ex2.Suit - ♦ <BLANKLINE> | 3.224543 | 3 |
utils/usergrid-util-python/usergrid_tools/general/url_tester.py | snoopdave/incubator-usergrid | 788 | 6613439 | <gh_stars>100-1000
# */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
import datetime
import time
import numpy
import requests
__author__ = '<EMAIL>'
# This will call a URL over and over to check the latency of the call
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
url_template = "{protocol}://{host}:{port}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}"
environments = {
'local': {
'protocol': 'http',
'host': 'localhost',
'port': 8080,
'org': 'myOrg',
'app': 'myApp',
'collection': 'myEntities',
'ql': 'select *',
'client_id': '<<client_id>>',
'client_secret': '<<client_secret>>'
}
}
ENV = 'local'
data = environments.get(ENV)
if data is None:
print 'didn\'t find map entry for data'
exit(1)
x = 0
SLEEP = .5
count_under_one = 0.0
count_over = 0.0
percent_under_one = 100.0
total_time = 0
print url_template.format(**data)
response_times = []
while True:
x += 1
target_url = url_template.format(**data)
r = requests.get(url=target_url)
response_time = total_milliseconds(r.elapsed)
total_time += response_time
# print '%s / %s' % (r.elapsed, total_milliseconds(r.elapsed))
the_date = datetime.datetime.utcnow()
if r.status_code != 200:
print '%s | %s: %s in %s | %s' % (the_date, x, r.status_code, response_time, r.text)
else:
response_times.append(response_time)
if response_time < 2000:
count_under_one += 1
elif response_time > 10000:
count_over += 1
percent_under_one = round(100 * (count_under_one / x), 2)
percent_over = round(100 * (count_over / x), 2)
# print '%s | %s: %s in %s | Count: %s | Avg: %s | under 2s: %s / %s%% | over 10s: %s / %s%%' % (
# the_date, x, r.status_code, response_time, len(r.json().get('entities')), (total_time / x), count_under_one,
# percent_under_one, count_over, percent_over)
print '%s | %s: %s in %s | Count: %s | Avg: %s | 99th: %s | 90th: %s | 50th: %s | 75th: %s | 25th: %s' % (
the_date, x, r.status_code, response_time, r.json().get('count'), (total_time / x),
numpy.percentile(response_times, 99),
numpy.percentile(response_times, 90),
numpy.percentile(response_times, 75),
numpy.percentile(response_times, 50),
numpy.percentile(response_times, 25))
time.sleep(SLEEP)
| # */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
import datetime
import time
import numpy
import requests
__author__ = '<EMAIL>'
# This will call a URL over and over to check the latency of the call
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
url_template = "{protocol}://{host}:{port}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}"
environments = {
'local': {
'protocol': 'http',
'host': 'localhost',
'port': 8080,
'org': 'myOrg',
'app': 'myApp',
'collection': 'myEntities',
'ql': 'select *',
'client_id': '<<client_id>>',
'client_secret': '<<client_secret>>'
}
}
ENV = 'local'
data = environments.get(ENV)
if data is None:
print 'didn\'t find map entry for data'
exit(1)
x = 0
SLEEP = .5
count_under_one = 0.0
count_over = 0.0
percent_under_one = 100.0
total_time = 0
print url_template.format(**data)
response_times = []
while True:
x += 1
target_url = url_template.format(**data)
r = requests.get(url=target_url)
response_time = total_milliseconds(r.elapsed)
total_time += response_time
# print '%s / %s' % (r.elapsed, total_milliseconds(r.elapsed))
the_date = datetime.datetime.utcnow()
if r.status_code != 200:
print '%s | %s: %s in %s | %s' % (the_date, x, r.status_code, response_time, r.text)
else:
response_times.append(response_time)
if response_time < 2000:
count_under_one += 1
elif response_time > 10000:
count_over += 1
percent_under_one = round(100 * (count_under_one / x), 2)
percent_over = round(100 * (count_over / x), 2)
# print '%s | %s: %s in %s | Count: %s | Avg: %s | under 2s: %s / %s%% | over 10s: %s / %s%%' % (
# the_date, x, r.status_code, response_time, len(r.json().get('entities')), (total_time / x), count_under_one,
# percent_under_one, count_over, percent_over)
print '%s | %s: %s in %s | Count: %s | Avg: %s | 99th: %s | 90th: %s | 50th: %s | 75th: %s | 25th: %s' % (
the_date, x, r.status_code, response_time, r.json().get('count'), (total_time / x),
numpy.percentile(response_times, 99),
numpy.percentile(response_times, 90),
numpy.percentile(response_times, 75),
numpy.percentile(response_times, 50),
numpy.percentile(response_times, 25))
time.sleep(SLEEP) | en | 0.755305 | # */ # * Licensed to the Apache Software Foundation (ASF) under one # * or more contributor license agreements. See the NOTICE file # * distributed with this work for additional information # * regarding copyright ownership. The ASF licenses this file # * to you under the Apache License, Version 2.0 (the # * "License"); you may not use this file except in compliance # * with the License. You may obtain a copy of the License at # * # * http://www.apache.org/licenses/LICENSE-2.0 # * # * Unless required by applicable law or agreed to in writing, # * software distributed under the License is distributed on an # * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # * KIND, either express or implied. See the License for the # * specific language governing permissions and limitations # * under the License. # */ # This will call a URL over and over to check the latency of the call # print '%s / %s' % (r.elapsed, total_milliseconds(r.elapsed)) # print '%s | %s: %s in %s | Count: %s | Avg: %s | under 2s: %s / %s%% | over 10s: %s / %s%%' % ( # the_date, x, r.status_code, response_time, len(r.json().get('entities')), (total_time / x), count_under_one, # percent_under_one, count_over, percent_over) | 2.22928 | 2 |
lib/data/datasets/hpatches.py | lin-zju/descriptor-space | 0 | 6613440 | <reponame>lin-zju/descriptor-space<gh_stars>0
"""
Microsoft COCO dataset adapted for descriptor training. Random homography is
applied to every training example.
The images are resized to 240x320
"""
import torch
import random
import glob
import os
import numpy as np
from skimage import io
import cv2
from torch.utils.data import Dataset
from lib.utils.homography import sample_homography, find_map_and_mask, refine_homography
from lib.utils.keypoint import sample_uniform_keypoints, sample_harris_keypoints
from lib.utils.convert import normalize_torch, gray2RGB
from lib.utils.misc import resize_max_length, compute_scale
class Hpatches(Dataset):
def __init__(self, root, mode, transforms=None, size=640, num_kps=1000):
"""
:param mode: either 'test' or 'val'
"""
Dataset.__init__(self)
self.img_paths = self.get_img_paths(root)
self.transforms = transforms
self.size = size
self.num_kps = num_kps
self.mode = mode
def __getitem__(self, index):
"""
:return:
data:
img0: (3, H, W), float
img1: (3, H, W), float
targets:
kps0: (N, 2), long Tensor
kps1: (N, 2), long Tensor
map: (H, W, 2), long Tensor
mask: (H, W), byte Tensor
H: (3, 3), numpy, float
"""
img0_path, img1_path, H_path = self.img_paths[index]
# load images
img0, scale_h0, scale_w0 = self.read_img(img0_path, self.size)
img1, scale_h1, scale_w1 = self.read_img(img1_path, self.size)
# load H. Note scale changes might be applied
scale_ratio = scale_h0, scale_w0, scale_h1, scale_w1
H = self.read_H(H_path, scale_ratio)
# find flow map and mask
# map: int, mask: boolean
h0, w0 = img0.shape[:2]
h1, w1 = img1.shape[:2]
map, mask = find_map_and_mask(h0, w0, h1, w1, H)
# find keypoints
# kps: int, (N, 2)
kps0, kps1 = sample_harris_keypoints(img0, img1, H, self.num_kps)
# we should assume that only data augmentation are applied
if self.transforms:
img0 = self.transforms(img0)
img1 = self.transforms(img1)
# to tensor
img0 = torch.from_numpy(img0).permute(2, 0, 1).float()
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
# to range (0, 1)
img0 = img0 / 255.0
img1 = img1 / 255.0
# normalize
img0 = normalize_torch(img0)
img1 = normalize_torch(img1)
# compute relative scale
right_scale = compute_scale(np.linalg.inv(H), h1, w1)
right_scale = 1.0 / right_scale
left_scale = compute_scale(H, h0, w0)
left_scale = 1.0 / left_scale
# keypoints
kps0 = torch.from_numpy(kps0).float()
kps1 = torch.from_numpy(kps1).float()
map = torch.from_numpy(map).float()
mask = torch.from_numpy(mask.astype(int)).byte()
left_scale = torch.from_numpy(left_scale).float()
right_scale = torch.from_numpy(right_scale).float()
data = {
'img0': img0,
'img1': img1,
'left_scale': left_scale,
'right_scale': right_scale
}
targets = {
'kps0': kps0,
'kps1': kps1,
'map': map,
'mask': mask,
'H': H,
}
return data, targets
def __len__(self):
return len(self.img_paths)
@staticmethod
def get_img_paths(root):
"""
Retrieve image paths for all images.
"""
filelist = []
for subdir in os.scandir(root):
subdir = subdir.path
ref = os.path.join(subdir, '1.ppm')
for i in range(2, 6 + 1):
H = os.path.join(subdir, 'H_1_{}'.format(i))
other = os.path.join(subdir, '{}.ppm'.format(i))
filelist.append((ref, other, H))
return filelist
@staticmethod
def read_img(img_path, scale=None):
"""
Read an image, and resize it
:param img_path: path to image
:param scale: either None, int or tuple. For tuple, this should be (w, h)
:return:
img: the resized image
scale_h, scale_w: scale changes
"""
img = gray2RGB(io.imread(img_path))
oh, ow = img.shape[:2]
if isinstance(scale, int):
img = resize_max_length(img, scale)
elif isinstance(scale, tuple):
img = cv2.resize(img, scale)
h, w = img.shape[:2]
scale_h, scale_w = h / oh, w / ow
return img, scale_h, scale_w
@staticmethod
def read_H(H_path, scale_ratio):
"""
Read the homography
:param H_path: the path
:param scale_ratio:
scale_h0, scale_w0, scale_h1, scale_w1.
This will be used to rescale H.
:return: H, numpy, (3, 3)
"""
scale_h0, scale_w0, scale_h1, scale_w1 = scale_ratio
H = np.loadtxt(H_path).astype(np.float32)
H = np.diag([scale_w1, scale_h1, 1.0]).dot(H).dot(np.linalg.inv(np.diag([scale_w0, scale_h0, 1.0])))
return H
class HpatchesViewpoint(Hpatches):
def __init__(self, *args, **kargs):
Hpatches.__init__(self, *args, **kargs)
# only viewpoint changes
# test and validation split
# use 95 for validation and 200 for testing
# assert self.mode in ['val', 'test', 'all'], 'Invalid mode {} for hpatches viewpoint.'.format(self.mode)
self.img_paths = sorted([x for x in self.img_paths if '/v' in x[0]])
random.seed(233)
random.shuffle(self.img_paths)
if self.mode == 'val':
self.img_paths = self.img_paths[:95]
elif self.mode == 'test':
self.img_paths = self.img_paths[95:]
elif self.mode == 'all':
pass
class HpatchesIllum(Hpatches):
def __init__(self, *args, **kargs):
Hpatches.__init__(self, *args, **kargs)
# only illumination changes
self.img_paths = [x for x in self.img_paths if '/i' in x[0]]
if __name__ == '__main__':
import matplotlib.pyplot as plt
from lib.utils.convert import tonumpyimg, tonumpy
from lib.utils.visualize import draw_match
ds = Hpatches('data/HPATCHES')
for data, targets in ds:
left, right = data['img0'], data['img1']
map, mask, H = targets['map'], targets['mask'], targets['H']
kps0, kps1 = targets['kps0'], targets['kps1']
left, right, mask = [tonumpyimg(x) for x in [left, right, mask]]
h, w = left.shape[:2]
warped = cv2.warpPerspective(left, H, (w, h))
match = draw_match(left, kps0.numpy(), right, kps1.numpy())
plt.imshow(match)
plt.show()
| """
Microsoft COCO dataset adapted for descriptor training. Random homography is
applied to every training example.
The images are resized to 240x320
"""
import torch
import random
import glob
import os
import numpy as np
from skimage import io
import cv2
from torch.utils.data import Dataset
from lib.utils.homography import sample_homography, find_map_and_mask, refine_homography
from lib.utils.keypoint import sample_uniform_keypoints, sample_harris_keypoints
from lib.utils.convert import normalize_torch, gray2RGB
from lib.utils.misc import resize_max_length, compute_scale
class Hpatches(Dataset):
def __init__(self, root, mode, transforms=None, size=640, num_kps=1000):
"""
:param mode: either 'test' or 'val'
"""
Dataset.__init__(self)
self.img_paths = self.get_img_paths(root)
self.transforms = transforms
self.size = size
self.num_kps = num_kps
self.mode = mode
def __getitem__(self, index):
"""
:return:
data:
img0: (3, H, W), float
img1: (3, H, W), float
targets:
kps0: (N, 2), long Tensor
kps1: (N, 2), long Tensor
map: (H, W, 2), long Tensor
mask: (H, W), byte Tensor
H: (3, 3), numpy, float
"""
img0_path, img1_path, H_path = self.img_paths[index]
# load images
img0, scale_h0, scale_w0 = self.read_img(img0_path, self.size)
img1, scale_h1, scale_w1 = self.read_img(img1_path, self.size)
# load H. Note scale changes might be applied
scale_ratio = scale_h0, scale_w0, scale_h1, scale_w1
H = self.read_H(H_path, scale_ratio)
# find flow map and mask
# map: int, mask: boolean
h0, w0 = img0.shape[:2]
h1, w1 = img1.shape[:2]
map, mask = find_map_and_mask(h0, w0, h1, w1, H)
# find keypoints
# kps: int, (N, 2)
kps0, kps1 = sample_harris_keypoints(img0, img1, H, self.num_kps)
# we should assume that only data augmentation are applied
if self.transforms:
img0 = self.transforms(img0)
img1 = self.transforms(img1)
# to tensor
img0 = torch.from_numpy(img0).permute(2, 0, 1).float()
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
# to range (0, 1)
img0 = img0 / 255.0
img1 = img1 / 255.0
# normalize
img0 = normalize_torch(img0)
img1 = normalize_torch(img1)
# compute relative scale
right_scale = compute_scale(np.linalg.inv(H), h1, w1)
right_scale = 1.0 / right_scale
left_scale = compute_scale(H, h0, w0)
left_scale = 1.0 / left_scale
# keypoints
kps0 = torch.from_numpy(kps0).float()
kps1 = torch.from_numpy(kps1).float()
map = torch.from_numpy(map).float()
mask = torch.from_numpy(mask.astype(int)).byte()
left_scale = torch.from_numpy(left_scale).float()
right_scale = torch.from_numpy(right_scale).float()
data = {
'img0': img0,
'img1': img1,
'left_scale': left_scale,
'right_scale': right_scale
}
targets = {
'kps0': kps0,
'kps1': kps1,
'map': map,
'mask': mask,
'H': H,
}
return data, targets
def __len__(self):
return len(self.img_paths)
@staticmethod
def get_img_paths(root):
"""
Retrieve image paths for all images.
"""
filelist = []
for subdir in os.scandir(root):
subdir = subdir.path
ref = os.path.join(subdir, '1.ppm')
for i in range(2, 6 + 1):
H = os.path.join(subdir, 'H_1_{}'.format(i))
other = os.path.join(subdir, '{}.ppm'.format(i))
filelist.append((ref, other, H))
return filelist
@staticmethod
def read_img(img_path, scale=None):
"""
Read an image, and resize it
:param img_path: path to image
:param scale: either None, int or tuple. For tuple, this should be (w, h)
:return:
img: the resized image
scale_h, scale_w: scale changes
"""
img = gray2RGB(io.imread(img_path))
oh, ow = img.shape[:2]
if isinstance(scale, int):
img = resize_max_length(img, scale)
elif isinstance(scale, tuple):
img = cv2.resize(img, scale)
h, w = img.shape[:2]
scale_h, scale_w = h / oh, w / ow
return img, scale_h, scale_w
@staticmethod
def read_H(H_path, scale_ratio):
"""
Read the homography
:param H_path: the path
:param scale_ratio:
scale_h0, scale_w0, scale_h1, scale_w1.
This will be used to rescale H.
:return: H, numpy, (3, 3)
"""
scale_h0, scale_w0, scale_h1, scale_w1 = scale_ratio
H = np.loadtxt(H_path).astype(np.float32)
H = np.diag([scale_w1, scale_h1, 1.0]).dot(H).dot(np.linalg.inv(np.diag([scale_w0, scale_h0, 1.0])))
return H
class HpatchesViewpoint(Hpatches):
def __init__(self, *args, **kargs):
Hpatches.__init__(self, *args, **kargs)
# only viewpoint changes
# test and validation split
# use 95 for validation and 200 for testing
# assert self.mode in ['val', 'test', 'all'], 'Invalid mode {} for hpatches viewpoint.'.format(self.mode)
self.img_paths = sorted([x for x in self.img_paths if '/v' in x[0]])
random.seed(233)
random.shuffle(self.img_paths)
if self.mode == 'val':
self.img_paths = self.img_paths[:95]
elif self.mode == 'test':
self.img_paths = self.img_paths[95:]
elif self.mode == 'all':
pass
class HpatchesIllum(Hpatches):
def __init__(self, *args, **kargs):
Hpatches.__init__(self, *args, **kargs)
# only illumination changes
self.img_paths = [x for x in self.img_paths if '/i' in x[0]]
if __name__ == '__main__':
import matplotlib.pyplot as plt
from lib.utils.convert import tonumpyimg, tonumpy
from lib.utils.visualize import draw_match
ds = Hpatches('data/HPATCHES')
for data, targets in ds:
left, right = data['img0'], data['img1']
map, mask, H = targets['map'], targets['mask'], targets['H']
kps0, kps1 = targets['kps0'], targets['kps1']
left, right, mask = [tonumpyimg(x) for x in [left, right, mask]]
h, w = left.shape[:2]
warped = cv2.warpPerspective(left, H, (w, h))
match = draw_match(left, kps0.numpy(), right, kps1.numpy())
plt.imshow(match)
plt.show() | en | 0.666967 | Microsoft COCO dataset adapted for descriptor training. Random homography is applied to every training example. The images are resized to 240x320 :param mode: either 'test' or 'val' :return: data: img0: (3, H, W), float img1: (3, H, W), float targets: kps0: (N, 2), long Tensor kps1: (N, 2), long Tensor map: (H, W, 2), long Tensor mask: (H, W), byte Tensor H: (3, 3), numpy, float # load images # load H. Note scale changes might be applied # find flow map and mask # map: int, mask: boolean # find keypoints # kps: int, (N, 2) # we should assume that only data augmentation are applied # to tensor # to range (0, 1) # normalize # compute relative scale # keypoints Retrieve image paths for all images. Read an image, and resize it :param img_path: path to image :param scale: either None, int or tuple. For tuple, this should be (w, h) :return: img: the resized image scale_h, scale_w: scale changes Read the homography :param H_path: the path :param scale_ratio: scale_h0, scale_w0, scale_h1, scale_w1. This will be used to rescale H. :return: H, numpy, (3, 3) # only viewpoint changes # test and validation split # use 95 for validation and 200 for testing # assert self.mode in ['val', 'test', 'all'], 'Invalid mode {} for hpatches viewpoint.'.format(self.mode) # only illumination changes | 2.473292 | 2 |
tests/integration/session_api_tests/reset_host_weights_test.py | gglin001/popart | 61 | 6613441 | <gh_stars>10-100
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import os
import popart
import pytest
from tempfile import TemporaryDirectory
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
def test_reset_host_weights_with_extra_tensor_in_onnx_model():
"""
1. Create a training session, and a corresponding validation session
2. The training session must contain some feauture that means when writing
the ONNX model back to the host, it contains extra initializers compared
with the original (builder-generated) model. In this case we achieve this
by using an SGD optimizer with momentum.
3. Try resetting the weights of the validation session using the ONNX model
with the additional momentum tensor (call resetHostWeights)
4. Observe that a PopART exception is thrown
5. Try again, but with ignoreWeightsInModelWithoutCorrespondingHostWeight.
6. Observe that it succeeds
"""
def getModelWithRandomWeights():
builder = popart.Builder()
dShape = [2, 2]
i0 = builder.addInputTensor(popart.TensorInfo("FLOAT", dShape))
wData = np.random.rand(*dShape).astype(np.float32)
w0 = builder.addInitializedInputTensor(wData)
o = builder.aiOnnx.matmul([i0, w0])
loss = builder.aiGraphcore.l1loss([o], 0.1)
builder.addOutputTensor(loss)
return builder
device = tu.create_test_device()
tr_builder = getModelWithRandomWeights()
o = tr_builder.getOutputTensorIds()[0]
# 1. & 2.
# Training
tr_opt = popart.SGD({"defaultMomentum": (0.01, True)})
tr_sess = popart.TrainingSession(fnModel=tr_builder.getModelProto(),
dataFlow=popart.DataFlow(1, []),
loss=o,
optimizer=tr_opt,
deviceInfo=device)
tr_sess.prepareDevice()
with TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tr_model.onnx")
tr_sess.modelToHost(tmpfile)
# Validation (with different model proto weights)
va_builder = getModelWithRandomWeights()
va_opts = popart.SessionOptions()
va_opts.constantWeights = False
va_sess = popart.InferenceSession(fnModel=va_builder.getModelProto(),
dataFlow=popart.DataFlow(1, [o]),
deviceInfo=device,
userOptions=va_opts)
va_sess.prepareDevice()
# 3. Try reset validation weights with training weights
wId = [
w for w in va_builder.getInputTensorIds()
if va_builder.isInitializer(w)
][0]
missing_tensor_name = popart.reservedAcclPrefix() + wId
with pytest.raises(popart.popart_exception) as e_info:
va_sess.resetHostWeights(tmpfile)
# 4.
assert e_info.value.args[
0] == "resetWeights, no tensor '" + missing_tensor_name + "' in tensors"
# 5. & 6. Try again, but this time ignore the missing tensor
va_sess.resetHostWeights(
tmpfile, ignoreWeightsInModelWithoutCorrespondingHostWeight=True)
| # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import os
import popart
import pytest
from tempfile import TemporaryDirectory
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
def test_reset_host_weights_with_extra_tensor_in_onnx_model():
"""
1. Create a training session, and a corresponding validation session
2. The training session must contain some feauture that means when writing
the ONNX model back to the host, it contains extra initializers compared
with the original (builder-generated) model. In this case we achieve this
by using an SGD optimizer with momentum.
3. Try resetting the weights of the validation session using the ONNX model
with the additional momentum tensor (call resetHostWeights)
4. Observe that a PopART exception is thrown
5. Try again, but with ignoreWeightsInModelWithoutCorrespondingHostWeight.
6. Observe that it succeeds
"""
def getModelWithRandomWeights():
builder = popart.Builder()
dShape = [2, 2]
i0 = builder.addInputTensor(popart.TensorInfo("FLOAT", dShape))
wData = np.random.rand(*dShape).astype(np.float32)
w0 = builder.addInitializedInputTensor(wData)
o = builder.aiOnnx.matmul([i0, w0])
loss = builder.aiGraphcore.l1loss([o], 0.1)
builder.addOutputTensor(loss)
return builder
device = tu.create_test_device()
tr_builder = getModelWithRandomWeights()
o = tr_builder.getOutputTensorIds()[0]
# 1. & 2.
# Training
tr_opt = popart.SGD({"defaultMomentum": (0.01, True)})
tr_sess = popart.TrainingSession(fnModel=tr_builder.getModelProto(),
dataFlow=popart.DataFlow(1, []),
loss=o,
optimizer=tr_opt,
deviceInfo=device)
tr_sess.prepareDevice()
with TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tr_model.onnx")
tr_sess.modelToHost(tmpfile)
# Validation (with different model proto weights)
va_builder = getModelWithRandomWeights()
va_opts = popart.SessionOptions()
va_opts.constantWeights = False
va_sess = popart.InferenceSession(fnModel=va_builder.getModelProto(),
dataFlow=popart.DataFlow(1, [o]),
deviceInfo=device,
userOptions=va_opts)
va_sess.prepareDevice()
# 3. Try reset validation weights with training weights
wId = [
w for w in va_builder.getInputTensorIds()
if va_builder.isInitializer(w)
][0]
missing_tensor_name = popart.reservedAcclPrefix() + wId
with pytest.raises(popart.popart_exception) as e_info:
va_sess.resetHostWeights(tmpfile)
# 4.
assert e_info.value.args[
0] == "resetWeights, no tensor '" + missing_tensor_name + "' in tensors"
# 5. & 6. Try again, but this time ignore the missing tensor
va_sess.resetHostWeights(
tmpfile, ignoreWeightsInModelWithoutCorrespondingHostWeight=True) | en | 0.849252 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved. # `import test_util` requires adding to sys.path 1. Create a training session, and a corresponding validation session 2. The training session must contain some feauture that means when writing the ONNX model back to the host, it contains extra initializers compared with the original (builder-generated) model. In this case we achieve this by using an SGD optimizer with momentum. 3. Try resetting the weights of the validation session using the ONNX model with the additional momentum tensor (call resetHostWeights) 4. Observe that a PopART exception is thrown 5. Try again, but with ignoreWeightsInModelWithoutCorrespondingHostWeight. 6. Observe that it succeeds # 1. & 2. # Training # Validation (with different model proto weights) # 3. Try reset validation weights with training weights # 4. # 5. & 6. Try again, but this time ignore the missing tensor | 1.947839 | 2 |
deepts/utils.py | haohy/deepts_torch | 0 | 6613442 |
import os
import numpy as np
def set_logging():
import logging
logging.basicConfig(level = logging.INFO,
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
return logging
|
import os
import numpy as np
def set_logging():
import logging
logging.basicConfig(level = logging.INFO,
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
return logging
| none | 1 | 2.760568 | 3 | |
PyOS-session-data/Apps/File Browser.py | CoderDuino/PyOS-3 | 0 | 6613443 | <filename>PyOS-session-data/Apps/File Browser.py
import Legacy
Legacy.runlegacyapp("Files-legacy.py")
| <filename>PyOS-session-data/Apps/File Browser.py
import Legacy
Legacy.runlegacyapp("Files-legacy.py")
| none | 1 | 1.354924 | 1 | |
survae/transforms/bijections/functional/mixtures/__init__.py | alisiahkoohi/survae_flows | 262 | 6613444 | from .params import *
from .gaussian_mixture import *
from .logistic_mixture import *
from .logistic_mixture_censored import *
| from .params import *
from .gaussian_mixture import *
from .logistic_mixture import *
from .logistic_mixture_censored import *
| none | 1 | 1.106269 | 1 | |
ops_automation_sdk/api/menu/update_menu_pb2.py | easyopsapis/easyops-api-python | 5 | 6613445 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_menu.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_menu.proto',
package='menu',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11update_menu.proto\x12\x04menu\" \n\x12UpdateMenuResponse\x12\n\n\x02id\x18\x01 \x01(\t\"u\n\x19UpdateMenuResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12&\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x18.menu.UpdateMenuResponseb\x06proto3')
)
_UPDATEMENURESPONSE = _descriptor.Descriptor(
name='UpdateMenuResponse',
full_name='menu.UpdateMenuResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='menu.UpdateMenuResponse.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=59,
)
_UPDATEMENURESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateMenuResponseWrapper',
full_name='menu.UpdateMenuResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='menu.UpdateMenuResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='menu.UpdateMenuResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='menu.UpdateMenuResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='menu.UpdateMenuResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=178,
)
_UPDATEMENURESPONSEWRAPPER.fields_by_name['data'].message_type = _UPDATEMENURESPONSE
DESCRIPTOR.message_types_by_name['UpdateMenuResponse'] = _UPDATEMENURESPONSE
DESCRIPTOR.message_types_by_name['UpdateMenuResponseWrapper'] = _UPDATEMENURESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateMenuResponse = _reflection.GeneratedProtocolMessageType('UpdateMenuResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMENURESPONSE,
'__module__' : 'update_menu_pb2'
# @@protoc_insertion_point(class_scope:menu.UpdateMenuResponse)
})
_sym_db.RegisterMessage(UpdateMenuResponse)
UpdateMenuResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateMenuResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMENURESPONSEWRAPPER,
'__module__' : 'update_menu_pb2'
# @@protoc_insertion_point(class_scope:menu.UpdateMenuResponseWrapper)
})
_sym_db.RegisterMessage(UpdateMenuResponseWrapper)
# @@protoc_insertion_point(module_scope)
| # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_menu.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_menu.proto',
package='menu',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11update_menu.proto\x12\x04menu\" \n\x12UpdateMenuResponse\x12\n\n\x02id\x18\x01 \x01(\t\"u\n\x19UpdateMenuResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12&\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x18.menu.UpdateMenuResponseb\x06proto3')
)
_UPDATEMENURESPONSE = _descriptor.Descriptor(
name='UpdateMenuResponse',
full_name='menu.UpdateMenuResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='menu.UpdateMenuResponse.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=59,
)
_UPDATEMENURESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateMenuResponseWrapper',
full_name='menu.UpdateMenuResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='menu.UpdateMenuResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='menu.UpdateMenuResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='menu.UpdateMenuResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='menu.UpdateMenuResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=178,
)
_UPDATEMENURESPONSEWRAPPER.fields_by_name['data'].message_type = _UPDATEMENURESPONSE
DESCRIPTOR.message_types_by_name['UpdateMenuResponse'] = _UPDATEMENURESPONSE
DESCRIPTOR.message_types_by_name['UpdateMenuResponseWrapper'] = _UPDATEMENURESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateMenuResponse = _reflection.GeneratedProtocolMessageType('UpdateMenuResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMENURESPONSE,
'__module__' : 'update_menu_pb2'
# @@protoc_insertion_point(class_scope:menu.UpdateMenuResponse)
})
_sym_db.RegisterMessage(UpdateMenuResponse)
UpdateMenuResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateMenuResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMENURESPONSEWRAPPER,
'__module__' : 'update_menu_pb2'
# @@protoc_insertion_point(class_scope:menu.UpdateMenuResponseWrapper)
})
_sym_db.RegisterMessage(UpdateMenuResponseWrapper)
# @@protoc_insertion_point(module_scope)
| en | 0.407126 | # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_menu.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:menu.UpdateMenuResponse) # @@protoc_insertion_point(class_scope:menu.UpdateMenuResponseWrapper) # @@protoc_insertion_point(module_scope) | 1.126353 | 1 |
src/evidencegraph/decode.py | PietroTotis/evidencegraph | 0 | 6613446 | # -*- coding: utf-8 -*-
'''
@author: <NAME>
'''
import networkx as nx
import copy
from operator import itemgetter
from .depparse.graph import Digraph as DepDigraph
from collections import defaultdict
from .argtree import ArgTree
def multidigraph_to_digraph(g, field='weight', func=max):
'''
Returns a DiGraph from a MultiDiGraph. If there are multiple edges
from one node to the other, the edges is chosen which has the
minimal or maximal value in some data field.
>>> g = nx.MultiDiGraph()
>>> g.add_edge(1, 2, weight=0.2, count=1000)
>>> g.add_edge(1, 2, weight=0.8, count=0)
>>> g.add_edge(2, 1, weight=0.4, count=1000)
>>> g.add_edge(2, 1, weight=0.6, count=0)
>>> d = multidigraph_to_digraph(g)
>>> isinstance(d, nx.DiGraph)
True
>>> d.edges(data=True) == [(1, 2, {'count': 0, 'weight': 0.8}), (2, 1, {'count': 0, 'weight': 0.6})]
True
'''
f = nx.DiGraph()
f.graph = g.graph
for n in g.nodes():
for m in g.succ[n].keys():
# pick best edge
ds = g.succ[n][m].values()
d = func(ds, key=itemgetter(field))
f.add_edge(n, m, **d)
return f
def nxdigraph_to_depdigraph(g, field="weight"):
"""
Returns a depparse.Digraph from an nx.DiGraph.
`field` is the name of the data field to get the score from.
>>> g = nx.DiGraph()
>>> g.add_edge(1, 2, weight=0.3)
>>> g.add_edge(2, 3, weight=0.8)
>>> g.add_edge(3, 1, weight=0.1)
>>> g.add_edge(2, 1, weight=0.5)
>>> d = nxdigraph_to_depdigraph(g)
>>> list(d.iteredges())
[(1, 2), (2, 3), (2, 1), (3, 1), ('root', 1), ('root', 2), ('root', 3)]
"""
succs = defaultdict(list)
weights = {}
for s, t, d in g.edges(data=True):
w = d[field]
succs[s].append(t)
succs[t]
weights[(s, t)] = w
succs['root'] = list(succs.keys())
weights.update({('root', n): 0 for n in succs.keys()})
return DepDigraph(succs, get_score=lambda s, t: weights[(s, t)])
def find_mst(weg, from_root=False, field="weight"):
"""
Returns the ArgTree that is the minimum spanning tree
for the given nx.MultiDiGraph (such as e.g. an WeightedEvidenceGraph).
>>> g = nx.MultiDiGraph()
>>> g.add_edge(1, 2, weight=0.3)
>>> g.add_edge(2, 3, weight=0.8)
>>> g.add_edge(2, 3, weight=0.7)
>>> g.add_edge(3, 1, weight=0.1)
>>> g.add_edge(2, 1, weight=0.5)
>>> mst = find_mst(g)
>>> mst.edges(data=True)
[(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.8})]
"""
# make digraph of multidigraph
deg = multidigraph_to_digraph(weg, field=field)
# reverse graph if needed
g = deg if from_root else deg.reverse()
# convert to depparse digraph
dd = nxdigraph_to_depdigraph(g, field=field)
# find mst
m = dd.mst()
# convert depparse mst to ArgTree
out = ArgTree()
out.graph = copy.deepcopy(g.graph)
out.add_nodes_from([copy.deepcopy((n, d)) for n, d in g.nodes(data=True)])
for s, t in m.iteredges():
if s == 'root':
# don't add the root link
continue
out.add_edge(s, t, g.succ[s][t])
return out if from_root else out.reverse(copy=False)
| # -*- coding: utf-8 -*-
'''
@author: <NAME>
'''
import networkx as nx
import copy
from operator import itemgetter
from .depparse.graph import Digraph as DepDigraph
from collections import defaultdict
from .argtree import ArgTree
def multidigraph_to_digraph(g, field='weight', func=max):
'''
Returns a DiGraph from a MultiDiGraph. If there are multiple edges
from one node to the other, the edges is chosen which has the
minimal or maximal value in some data field.
>>> g = nx.MultiDiGraph()
>>> g.add_edge(1, 2, weight=0.2, count=1000)
>>> g.add_edge(1, 2, weight=0.8, count=0)
>>> g.add_edge(2, 1, weight=0.4, count=1000)
>>> g.add_edge(2, 1, weight=0.6, count=0)
>>> d = multidigraph_to_digraph(g)
>>> isinstance(d, nx.DiGraph)
True
>>> d.edges(data=True) == [(1, 2, {'count': 0, 'weight': 0.8}), (2, 1, {'count': 0, 'weight': 0.6})]
True
'''
f = nx.DiGraph()
f.graph = g.graph
for n in g.nodes():
for m in g.succ[n].keys():
# pick best edge
ds = g.succ[n][m].values()
d = func(ds, key=itemgetter(field))
f.add_edge(n, m, **d)
return f
def nxdigraph_to_depdigraph(g, field="weight"):
"""
Returns a depparse.Digraph from an nx.DiGraph.
`field` is the name of the data field to get the score from.
>>> g = nx.DiGraph()
>>> g.add_edge(1, 2, weight=0.3)
>>> g.add_edge(2, 3, weight=0.8)
>>> g.add_edge(3, 1, weight=0.1)
>>> g.add_edge(2, 1, weight=0.5)
>>> d = nxdigraph_to_depdigraph(g)
>>> list(d.iteredges())
[(1, 2), (2, 3), (2, 1), (3, 1), ('root', 1), ('root', 2), ('root', 3)]
"""
succs = defaultdict(list)
weights = {}
for s, t, d in g.edges(data=True):
w = d[field]
succs[s].append(t)
succs[t]
weights[(s, t)] = w
succs['root'] = list(succs.keys())
weights.update({('root', n): 0 for n in succs.keys()})
return DepDigraph(succs, get_score=lambda s, t: weights[(s, t)])
def find_mst(weg, from_root=False, field="weight"):
"""
Returns the ArgTree that is the minimum spanning tree
for the given nx.MultiDiGraph (such as e.g. an WeightedEvidenceGraph).
>>> g = nx.MultiDiGraph()
>>> g.add_edge(1, 2, weight=0.3)
>>> g.add_edge(2, 3, weight=0.8)
>>> g.add_edge(2, 3, weight=0.7)
>>> g.add_edge(3, 1, weight=0.1)
>>> g.add_edge(2, 1, weight=0.5)
>>> mst = find_mst(g)
>>> mst.edges(data=True)
[(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.8})]
"""
# make digraph of multidigraph
deg = multidigraph_to_digraph(weg, field=field)
# reverse graph if needed
g = deg if from_root else deg.reverse()
# convert to depparse digraph
dd = nxdigraph_to_depdigraph(g, field=field)
# find mst
m = dd.mst()
# convert depparse mst to ArgTree
out = ArgTree()
out.graph = copy.deepcopy(g.graph)
out.add_nodes_from([copy.deepcopy((n, d)) for n, d in g.nodes(data=True)])
for s, t in m.iteredges():
if s == 'root':
# don't add the root link
continue
out.add_edge(s, t, g.succ[s][t])
return out if from_root else out.reverse(copy=False)
| en | 0.621079 | # -*- coding: utf-8 -*- @author: <NAME> Returns a DiGraph from a MultiDiGraph. If there are multiple edges from one node to the other, the edges is chosen which has the minimal or maximal value in some data field. >>> g = nx.MultiDiGraph() >>> g.add_edge(1, 2, weight=0.2, count=1000) >>> g.add_edge(1, 2, weight=0.8, count=0) >>> g.add_edge(2, 1, weight=0.4, count=1000) >>> g.add_edge(2, 1, weight=0.6, count=0) >>> d = multidigraph_to_digraph(g) >>> isinstance(d, nx.DiGraph) True >>> d.edges(data=True) == [(1, 2, {'count': 0, 'weight': 0.8}), (2, 1, {'count': 0, 'weight': 0.6})] True # pick best edge Returns a depparse.Digraph from an nx.DiGraph. `field` is the name of the data field to get the score from. >>> g = nx.DiGraph() >>> g.add_edge(1, 2, weight=0.3) >>> g.add_edge(2, 3, weight=0.8) >>> g.add_edge(3, 1, weight=0.1) >>> g.add_edge(2, 1, weight=0.5) >>> d = nxdigraph_to_depdigraph(g) >>> list(d.iteredges()) [(1, 2), (2, 3), (2, 1), (3, 1), ('root', 1), ('root', 2), ('root', 3)] Returns the ArgTree that is the minimum spanning tree for the given nx.MultiDiGraph (such as e.g. an WeightedEvidenceGraph). >>> g = nx.MultiDiGraph() >>> g.add_edge(1, 2, weight=0.3) >>> g.add_edge(2, 3, weight=0.8) >>> g.add_edge(2, 3, weight=0.7) >>> g.add_edge(3, 1, weight=0.1) >>> g.add_edge(2, 1, weight=0.5) >>> mst = find_mst(g) >>> mst.edges(data=True) [(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.8})] # make digraph of multidigraph # reverse graph if needed # convert to depparse digraph # find mst # convert depparse mst to ArgTree # don't add the root link | 3.015419 | 3 |
flask/tests/test_auth.py | Oluwasegun-AA/pipit | 0 | 6613447 | from tests.testBase import TestBase
from tests.mockData import loginData, signupData
class TestThis(TestBase):
def test_Login(self):
res = self.post('/api/v1/auth/login/', loginData)
self.assertEqual(res['status'], 200)
def test_signup(self):
res = self.post('/api/v1/auth/signup/', signupData)
self.assertEqual(res['status'], 200)
self.assertEqual(res['data']['username'], 'testUsername')
| from tests.testBase import TestBase
from tests.mockData import loginData, signupData
class TestThis(TestBase):
def test_Login(self):
res = self.post('/api/v1/auth/login/', loginData)
self.assertEqual(res['status'], 200)
def test_signup(self):
res = self.post('/api/v1/auth/signup/', signupData)
self.assertEqual(res['status'], 200)
self.assertEqual(res['data']['username'], 'testUsername')
| none | 1 | 2.661438 | 3 | |
ancilla/ancilla/foundation/utils/delegate.py | frenzylabs/ancilla | 7 | 6613448 | '''
delegate.py
ancilla
Created by <NAME> (<EMAIL>) on 01/29/20
Copyright 2019 FrenzyLabs, LLC.
'''
class DelegatedAttribute:
def __init__(self, delegate_name, attr_name):
self.attr_name = attr_name
self.delegate_name = delegate_name
def __get__(self, instance, owner):
if instance is None:
return self
else:
# return instance.delegate.attr
return getattr(self.delegate(instance), self.attr_name)
def __set__(self, instance, value):
# instance.delegate.attr = value
setattr(self.delegate(instance), self.attr_name, value)
def __delete__(self, instance):
delattr(self.delegate(instance), self.attr_name)
def delegate(self, instance):
return getattr(instance, self.delegate_name)
def __str__(self):
return ""
| '''
delegate.py
ancilla
Created by <NAME> (<EMAIL>) on 01/29/20
Copyright 2019 FrenzyLabs, LLC.
'''
class DelegatedAttribute:
def __init__(self, delegate_name, attr_name):
self.attr_name = attr_name
self.delegate_name = delegate_name
def __get__(self, instance, owner):
if instance is None:
return self
else:
# return instance.delegate.attr
return getattr(self.delegate(instance), self.attr_name)
def __set__(self, instance, value):
# instance.delegate.attr = value
setattr(self.delegate(instance), self.attr_name, value)
def __delete__(self, instance):
delattr(self.delegate(instance), self.attr_name)
def delegate(self, instance):
return getattr(instance, self.delegate_name)
def __str__(self):
return ""
| en | 0.616777 | delegate.py ancilla Created by <NAME> (<EMAIL>) on 01/29/20 Copyright 2019 FrenzyLabs, LLC. # return instance.delegate.attr # instance.delegate.attr = value | 2.615297 | 3 |
qittle/http/session/abc.py | exthrempty/qittle | 2 | 6613449 | import typing
from abc import ABC, abstractmethod
from qittle.http.client import ABCHTTPClient
HttpClient = typing.Type[ABCHTTPClient]
class ABCSessionManager(ABC):
_http_client: HttpClient
@property
def http_client(self) -> HttpClient:
return self._http_client
@http_client.setter
def http_client(self, http_client: HttpClient):
self._http_client = http_client
@abstractmethod
async def __aenter__(self) -> ABCHTTPClient:
pass
@abstractmethod
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
| import typing
from abc import ABC, abstractmethod
from qittle.http.client import ABCHTTPClient
HttpClient = typing.Type[ABCHTTPClient]
class ABCSessionManager(ABC):
_http_client: HttpClient
@property
def http_client(self) -> HttpClient:
return self._http_client
@http_client.setter
def http_client(self, http_client: HttpClient):
self._http_client = http_client
@abstractmethod
async def __aenter__(self) -> ABCHTTPClient:
pass
@abstractmethod
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
| none | 1 | 2.883383 | 3 | |
WeatherReport.py | Manishgithub2021/Career | 0 | 6613450 | from tkinter import *
from datetime import *
from PIL import Image,ImageTk
from ApnaDhaba import *
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
today = date.today()
root=Tk()
root.geometry("600x788")
root.title('Weather Report')
root.iconbitmap('weather.ico')
TitleLabel=Label(text="Weather Report-Kolkata",font=("Arial Bold", 25))
DateLabel=Label(text=f"{today}",font=("Arial Bold", 15))
TextLabel=Label(text="Today's temperature in Kolkata is 28°c. Day's maximum temperature would hover at 37°c\nwhile minimum temperature is predicted to be 27°c",font=("Helvetica", 15))
TitleLabel.pack()
DateLabel.pack()
TextLabel.pack(pady=30)
image=Image.open('Summer.jpeg')
image = image. resize((200, 250), Image. ANTIALIAS)
photo=ImageTk.PhotoImage(image)
Label(image=photo).pack()
Label(text="Temperature outside is very hot\nDo you want to order something!!!!!!",font=("Times", "24", "bold italic")).pack()
Frame1=Frame(root)
Frame1.pack()
def NoDisplay():
ButtonNo.destroy()
ButtonYes.destroy()
def YesDisplay():
ButtonNo.destroy()
root1 = Tk()
root1.geometry("600x788")
root1.title(f"APNA RESTAURANT {date.today()}")
root1.iconbitmap('RESTAURANT.ico')
Label(root1, text="Regular size pizza", font=("Arial Bold", 10)).pack()
Label(root1, text="Chicken Crunchy Burger", font=("Arial Bold", 10)).pack()
Label(root1, text="200 ml Pepsi", font=("Arial Bold", 10)).pack()
imageburger=Image.open('pizza.jpg')
photoburger=ImageTk.PhotoImage(imageburger)
Label(root1,image=photoburger).pack()
root1.mainloop()
ButtonYes=Button(Frame1,text="Yes",font=("Arial Bold", 15),command=YesDisplay)
ButtonNo=Button(Frame1,text="No",font=("Arial Bold", 15),command=NoDisplay)
ButtonYes.pack(side='left',padx=5 ,pady=10)
ButtonNo.pack(padx=5 ,pady=10)
root.mainloop() | from tkinter import *
from datetime import *
from PIL import Image,ImageTk
from ApnaDhaba import *
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
today = date.today()
root=Tk()
root.geometry("600x788")
root.title('Weather Report')
root.iconbitmap('weather.ico')
TitleLabel=Label(text="Weather Report-Kolkata",font=("Arial Bold", 25))
DateLabel=Label(text=f"{today}",font=("Arial Bold", 15))
TextLabel=Label(text="Today's temperature in Kolkata is 28°c. Day's maximum temperature would hover at 37°c\nwhile minimum temperature is predicted to be 27°c",font=("Helvetica", 15))
TitleLabel.pack()
DateLabel.pack()
TextLabel.pack(pady=30)
image=Image.open('Summer.jpeg')
image = image. resize((200, 250), Image. ANTIALIAS)
photo=ImageTk.PhotoImage(image)
Label(image=photo).pack()
Label(text="Temperature outside is very hot\nDo you want to order something!!!!!!",font=("Times", "24", "bold italic")).pack()
Frame1=Frame(root)
Frame1.pack()
def NoDisplay():
ButtonNo.destroy()
ButtonYes.destroy()
def YesDisplay():
ButtonNo.destroy()
root1 = Tk()
root1.geometry("600x788")
root1.title(f"APNA RESTAURANT {date.today()}")
root1.iconbitmap('RESTAURANT.ico')
Label(root1, text="Regular size pizza", font=("Arial Bold", 10)).pack()
Label(root1, text="Chicken Crunchy Burger", font=("Arial Bold", 10)).pack()
Label(root1, text="200 ml Pepsi", font=("Arial Bold", 10)).pack()
imageburger=Image.open('pizza.jpg')
photoburger=ImageTk.PhotoImage(imageburger)
Label(root1,image=photoburger).pack()
root1.mainloop()
ButtonYes=Button(Frame1,text="Yes",font=("Arial Bold", 15),command=YesDisplay)
ButtonNo=Button(Frame1,text="No",font=("Arial Bold", 15),command=NoDisplay)
ButtonYes.pack(side='left',padx=5 ,pady=10)
ButtonNo.pack(padx=5 ,pady=10)
root.mainloop() | none | 1 | 3.485921 | 3 |